From 81b28b4a7debbc051c6bbaec331b574734f71fce Mon Sep 17 00:00:00 2001
From: eneren <engin.eren@cern.ch>
Date: Fri, 30 Sep 2022 13:46:53 +0000
Subject: [PATCH] indent. error

---
 pytorch_job_wganSingleGen_ncc.yaml | 2 +-
 wganSingleGen.py                   | 5 ++---
 2 files changed, 3 insertions(+), 4 deletions(-)

diff --git a/pytorch_job_wganSingleGen_ncc.yaml b/pytorch_job_wganSingleGen_ncc.yaml
index b980cec..e1c5db6 100644
--- a/pytorch_job_wganSingleGen_ncc.yaml
+++ b/pytorch_job_wganSingleGen_ncc.yaml
@@ -40,7 +40,7 @@ spec:
                 limits:
                   nvidia.com/gpu: 1
     Worker:
-      replicas: 4
+      replicas: 2
       restartPolicy: OnFailure
       template:
         metadata:
diff --git a/wganSingleGen.py b/wganSingleGen.py
index 1ece4d0..4a92e3a 100644
--- a/wganSingleGen.py
+++ b/wganSingleGen.py
@@ -274,9 +274,8 @@ def run(args):
     print('Critic trainable params:', sum(p.numel() for p in Crit_E_H.parameters() if p.requires_grad))
     print('Generator trainable params:', sum(p.numel() for p in Gen_E_H.parameters() if p.requires_grad))
     
-     if args.world_size > 1: 
-        Distributor = nn.parallel.DistributedDataParallel if use_cuda \
-            else nn.parallel.DistributedDataParallelCPU
+    if args.world_size > 1: 
+        Distributor = nn.parallel.DistributedDataParallel if use_cuda else nn.parallel.DistributedDataParallelCPU
         Crit_E_H = Distributor(Crit_E_H, device_ids=[args.local_rank], output_device=args.local_rank )
         Gen_E_H = Distributor(Gen_E_H, device_ids=[args.local_rank], output_device=args.local_rank )
        
-- 
GitLab