diff --git a/models/data_loaderFull.py b/models/data_loaderFull.py
index bc8d0386eae5528d28069b5c70912dfab6a97732..0302b3aade5ec799948f0d304992078b98c725cb 100644
--- a/models/data_loaderFull.py
+++ b/models/data_loaderFull.py
@@ -26,13 +26,13 @@ class HDF5Dataset(data.Dataset):
     def __getitem__(self, index):
         # get ECAL part
         x = self.get_data(index)
-        #x = torch.from_numpy(x).float()
+        x = torch.from_numpy(x).float()
         
         ## get HCAL part
         y = self.get_data_hcal(index)
-        #y = torch.from_numpy(y).float()
+        y = torch.from_numpy(y).float()
         
-        e = self.get_energy(index)
+        e = torch.from_numpy(self.get_energy(index))
         
         return x, y, e
     
diff --git a/wganHCAL.py b/wganHCAL.py
index f08fd6d1962fb3c4a24f0e627da0fab59ff7d2b9..08b9096b94b7dab5544ff24562b2e6d2c9b5503c 100644
--- a/wganHCAL.py
+++ b/wganHCAL.py
@@ -57,14 +57,14 @@ def train(args, aD, aG, device, train_loader, optimizer_d, optimizer_g, epoch, e
     Tensor = torch.cuda.FloatTensor 
    
     for batch_idx, (dataE, dataH, energy) in enumerate(train_loader):
-        #real_dataECAL = dataE.to(device).unsqueeze(1)
-        real_dataECAL = torch.from_numpy(dataE).to(device).unsqueeze(1).float()
+        real_dataECAL = dataE.to(device).unsqueeze(1).float()
         
-        #real_dataHCAL = dataH.to(device).unsqueeze(1)
-        real_dataHCAL = torch.from_numpy(dataH).to(device).unsqueeze(1).float()
         
-        #real_label = energy.to(device)
-        real_label = torch.from_numpy(energy).to(device).float()
+        real_dataHCAL = dataH.to(device).unsqueeze(1).float()
+        
+        
+        real_label = energy.to(device).float()
+        
         
         optimizer_d.zero_grad()
         
@@ -138,8 +138,8 @@ def is_distributed():
 
 def parse_args():
     parser = argparse.ArgumentParser(description='WGAN training on hadron showers')
-    parser.add_argument('--batch-size', type=int, default=100, metavar='N',
-                        help='input batch size for training (default: 100)')
+    parser.add_argument('--batch-size', type=int, default=50, metavar='N',
+                        help='input batch size for training (default: 50)')
     
     parser.add_argument('--nz', type=int, default=100, metavar='N',
                         help='latent space for generator (default: 100)')