Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
P
Pytorchjob
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Package Registry
Container Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Engin Eren
Pytorchjob
Commits
997a8ccb
Commit
997a8ccb
authored
2 years ago
by
Engin Eren
Browse files
Options
Downloads
Patches
Plain Diff
latest changes from 25.07.2022
parent
b9428108
No related branches found
Branches containing commit
Tags
6.8.0
Tags containing commit
1 merge request
!5
V3
Pipeline
#4275452
passed
2 years ago
Stage: build
Stage: test
Changes
2
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
interactive/control.ipynb
+71
-285
71 additions, 285 deletions
interactive/control.ipynb
interactive/inference/generate.py
+8
-8
8 additions, 8 deletions
interactive/inference/generate.py
with
79 additions
and
293 deletions
interactive/control.ipynb
+
71
−
285
View file @
997a8ccb
source diff could not be displayed: it is too large. Options to address this:
view the blob
.
This diff is collapsed.
Click to expand it.
interactive/inference/generate.py
+
8
−
8
View file @
997a8ccb
...
@@ -23,10 +23,10 @@ def make_shower(eph, Etrue, nEvents):
...
@@ -23,10 +23,10 @@ def make_shower(eph, Etrue, nEvents):
## LOAD ECAL GENERATOR
## LOAD ECAL GENERATOR
mGenE
=
DCGAN_G
(
ngf
,
nz
)
mGenE
=
DCGAN_G
(
ngf
,
nz
)
mGenE
=
torch
.
nn
.
parallel
.
DataParallel
(
mGenE
)
mGenE
=
torch
.
nn
.
parallel
.
DataParallel
(
mGenE
)
exp
=
'
wganHCAL_p
ev3
'
exp
=
'
wganHCAL_p
2critv2
'
#eph_ecal = 42
#eph_ecal = 42
gen_checkpointECAL
=
torch
.
load
(
'
/eos/user/e/eneren/experiments/
'
+
exp
+
"
_generatorE
CAL
_
"
+
str
(
eph
)
+
"
.pt
"
,
map_location
=
torch
.
device
(
'
cuda
'
))
gen_checkpointECAL
=
torch
.
load
(
'
/eos/user/e/eneren/experiments/
'
+
exp
+
"
_generatorE_
"
+
str
(
eph
)
+
"
.pt
"
,
map_location
=
torch
.
device
(
'
cuda
'
))
mGenE
.
load_state_dict
(
gen_checkpointECAL
[
'
model_state_dict
'
])
mGenE
.
load_state_dict
(
gen_checkpointECAL
[
'
model_state_dict
'
])
mGenE
.
eval
()
mGenE
.
eval
()
#####
#####
...
@@ -34,13 +34,13 @@ def make_shower(eph, Etrue, nEvents):
...
@@ -34,13 +34,13 @@ def make_shower(eph, Etrue, nEvents):
## LOAD HCAL GENERATOR
## LOAD HCAL GENERATOR
mGenH
=
Hcal_ecalEMB
(
ngf
,
32
,
nz
).
to
(
device
)
mGenH
=
Hcal_ecalEMB
(
ngf
,
32
,
nz
).
to
(
device
)
mGenH
=
torch
.
nn
.
parallel
.
DataParallel
(
mGenH
)
mGenH
=
torch
.
nn
.
parallel
.
DataParallel
(
mGenH
)
expH
=
'
wganHCAL_p
ev3
'
expH
=
'
wganHCAL_p
2critv2
'
#expH = 'wganHCALv1'
#expH = 'wganHCALv1'
Tensor
=
torch
.
cuda
.
FloatTensor
Tensor
=
torch
.
cuda
.
FloatTensor
gen_checkpointHCAL
=
torch
.
load
(
'
/eos/user/e/eneren/experiments/
'
+
expH
+
"
_generator_
"
+
str
(
eph
)
+
"
.pt
"
,
map_location
=
torch
.
device
(
'
cuda
'
))
gen_checkpointHCAL
=
torch
.
load
(
'
/eos/user/e/eneren/experiments/
'
+
expH
+
"
_generator
H
_
"
+
str
(
eph
)
+
"
.pt
"
,
map_location
=
torch
.
device
(
'
cuda
'
))
mGenH
.
load_state_dict
(
gen_checkpointHCAL
[
'
model_state_dict
'
])
mGenH
.
load_state_dict
(
gen_checkpointHCAL
[
'
model_state_dict
'
])
mGenH
.
eval
()
mGenH
.
eval
()
...
@@ -230,7 +230,7 @@ def fid_scan(showers, nevents, eph_start, eph_end):
...
@@ -230,7 +230,7 @@ def fid_scan(showers, nevents, eph_start, eph_end):
## LOAD ECAL GENERATOR
## LOAD ECAL GENERATOR
mGenE
=
DCGAN_G
(
ngf
,
nz
)
mGenE
=
DCGAN_G
(
ngf
,
nz
)
mGenE
=
torch
.
nn
.
parallel
.
DataParallel
(
mGenE
)
mGenE
=
torch
.
nn
.
parallel
.
DataParallel
(
mGenE
)
exp
=
'
wganHCAL_p
ev3
'
exp
=
'
wganHCAL_p
2critv2
'
#eph_ecal = 694
#eph_ecal = 694
...
@@ -240,7 +240,7 @@ def fid_scan(showers, nevents, eph_start, eph_end):
...
@@ -240,7 +240,7 @@ def fid_scan(showers, nevents, eph_start, eph_end):
## LOAD HCAL GENERATOR
## LOAD HCAL GENERATOR
mGenH
=
Hcal_ecalEMB
(
ngf
,
32
,
nz
).
to
(
device
)
mGenH
=
Hcal_ecalEMB
(
ngf
,
32
,
nz
).
to
(
device
)
mGenH
=
nn
.
parallel
.
DataParallel
(
mGenH
)
mGenH
=
nn
.
parallel
.
DataParallel
(
mGenH
)
expH
=
'
wganHCAL_p
ev3
'
expH
=
'
wganHCAL_p
2critv2
'
Tensor
=
torch
.
cuda
.
FloatTensor
Tensor
=
torch
.
cuda
.
FloatTensor
Etrue
=
50
Etrue
=
50
...
@@ -253,11 +253,11 @@ def fid_scan(showers, nevents, eph_start, eph_end):
...
@@ -253,11 +253,11 @@ def fid_scan(showers, nevents, eph_start, eph_end):
#esum_down = []
#esum_down = []
#esum_up = []
#esum_up = []
gen_checkpointHCAL
=
torch
.
load
(
'
/eos/user/e/eneren/experiments/
'
+
expH
+
"
_generator_
"
+
str
(
eph
)
+
"
.pt
"
,
map_location
=
torch
.
device
(
'
cuda
'
))
gen_checkpointHCAL
=
torch
.
load
(
'
/eos/user/e/eneren/experiments/
'
+
expH
+
"
_generator
H
_
"
+
str
(
eph
)
+
"
.pt
"
,
map_location
=
torch
.
device
(
'
cuda
'
))
mGenH
.
load_state_dict
(
gen_checkpointHCAL
[
'
model_state_dict
'
])
mGenH
.
load_state_dict
(
gen_checkpointHCAL
[
'
model_state_dict
'
])
mGenH
.
eval
()
mGenH
.
eval
()
gen_checkpointECAL
=
torch
.
load
(
'
/eos/user/e/eneren/experiments/
'
+
exp
+
"
_generatorE
CAL
_
"
+
str
(
eph
)
+
"
.pt
"
,
map_location
=
torch
.
device
(
'
cuda
'
))
gen_checkpointECAL
=
torch
.
load
(
'
/eos/user/e/eneren/experiments/
'
+
exp
+
"
_generatorE_
"
+
str
(
eph
)
+
"
.pt
"
,
map_location
=
torch
.
device
(
'
cuda
'
))
mGenE
.
load_state_dict
(
gen_checkpointECAL
[
'
model_state_dict
'
])
mGenE
.
load_state_dict
(
gen_checkpointECAL
[
'
model_state_dict
'
])
mGenE
.
eval
()
mGenE
.
eval
()
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment