From d4c80fb89b6a78100a3abf7d461b16f42abe1f3b Mon Sep 17 00:00:00 2001
From: Debo <debottam.bakshi.gupta@cern.ch>
Date: Thu, 3 Dec 2020 18:58:41 +0100
Subject: [PATCH] Renaming old files and adding provision for batch execution
 in onnx models

---
 .../AthExOnnxRuntime/CMakeLists.txt           |   4 +-
 .../AthenaExamples/AthExOnnxRuntime/README.md |  63 ++--
 .../share/AthExOnnxRuntime_jobOptions.py      |  10 +-
 .../AthExOnnxRuntime/src/CxxApiAlgorithm.cxx  | 198 ------------
 .../AthExOnnxRuntime/src/EvaluateModel.cxx    | 296 ++++++++++++++++++
 .../{CxxApiAlgorithm.h => EvaluateModel.h}    |  20 +-
 .../components/AthExOnnxRuntime_entries.cxx   |   5 +-
 7 files changed, 358 insertions(+), 238 deletions(-)
 delete mode 100644 Control/AthenaExamples/AthExOnnxRuntime/src/CxxApiAlgorithm.cxx
 create mode 100644 Control/AthenaExamples/AthExOnnxRuntime/src/EvaluateModel.cxx
 rename Control/AthenaExamples/AthExOnnxRuntime/src/{CxxApiAlgorithm.h => EvaluateModel.h} (76%)

diff --git a/Control/AthenaExamples/AthExOnnxRuntime/CMakeLists.txt b/Control/AthenaExamples/AthExOnnxRuntime/CMakeLists.txt
index 8361d3a13413..2cf4c6266bf7 100644
--- a/Control/AthenaExamples/AthExOnnxRuntime/CMakeLists.txt
+++ b/Control/AthenaExamples/AthExOnnxRuntime/CMakeLists.txt
@@ -3,9 +3,6 @@
 # Declare the package's name.
 atlas_subdir( AthExOnnxRuntime )
 
-# External dependencies.
-find_package( onnxruntime )
-
 # Component(s) in the package.
 atlas_add_library( AthExOnnxRuntimeLib
    INTERFACE
@@ -25,6 +22,7 @@ atlas_install_joboptions( share/*.py )
 atlas_add_test( AthExOnnxRuntimeJob_serial
    SCRIPT athena.py AthExOnnxRuntime/AthExOnnxRuntime_jobOptions.py )
 
+
 atlas_add_test( AthExOnnxRuntimeJob_mt
    SCRIPT athena.py --threads=2
           AthExOnnxRuntime/AthExOnnxRuntime_jobOptions.py )
diff --git a/Control/AthenaExamples/AthExOnnxRuntime/README.md b/Control/AthenaExamples/AthExOnnxRuntime/README.md
index 34afcb68bea5..79efde7c5724 100644
--- a/Control/AthenaExamples/AthExOnnxRuntime/README.md
+++ b/Control/AthenaExamples/AthExOnnxRuntime/README.md
@@ -4,37 +4,48 @@ This package is meant to hold code demonstrating how to use onnxruntime
 from Athena.
 
 The package loads a MNIST_testModel.onnx model previously trained to recognize 
-handwritten digits in the form of 28x28 pixels from `0 to 9`.
-
-The package further loads test pixel dataset and their labels. CxxApiAlgorithm
+handwritten digits in the form of 28x28 pixels from `0 to 9`. When Run in a DEBUG
+mode the logistics of the input and output layers can be seen 
+```
+AthONNX             DEBUG Input 0 :  name= flatten_input:0
+AthONNX             DEBUG Input 0 : num_dims= 3
+AthONNX             DEBUG Input 0 : dim 0= -1
+AthONNX             DEBUG Input 0 : dim 1= 28
+AthONNX             DEBUG Input 0 : dim 2= 28
+AthONNX             DEBUG Output 0 :  name= dense_1/Softmax:0
+AthONNX             DEBUG Output 0 : num_dims= 2
+AthONNX             DEBUG Output 0 : dim 0= -1
+AthONNX             DEBUG Output 0 : dim 1= 10
+``` 
+The `dim 0= -1` always represents the batch size and ready to take any intger no. The model in this
+example package has provition to run both No-BatchMode and BatchMode. For No-BatchMode `dim 0= 1` for
+BatchMode `dim 0= batch size`
+
+The package further loads test pixel dataset and their labels. When not running in batch mode EvaluateModel 
 randomly picks a sample from the dataset and passes through MNIST_testModel.onnx
 model and predicts its value among 0 to 9. The highest probable(score) value should
 match with actual label of the sample e.g.:
 ```
-AthONNX              INFO Label for the input test data  = 9
-AthONNX              INFO Score for class 0 = 3.32061e-09
-
-AthONNX              INFO Score for class 1 = 1.52828e-08
-
-AthONNX              INFO Score for class 2 = 3.08599e-08
-
-AthONNX              INFO Score for class 3 = 1.14768e-07
-
-AthONNX              INFO Score for class 4 = 2.05344e-05
-
-AthONNX              INFO Score for class 5 = 1.54014e-11
-
-AthONNX              INFO Score for class 6 = 1.0932e-10
-
-AthONNX              INFO Score for class 7 = 0.000205113
-
-AthONNX              INFO Score for class 8 = 5.37525e-08
-
-AthONNX              INFO Score for class 9 = 0.999774
-
-AthONNX              INFO Class: 9 has the highest score: 0.999774
+INFO Label for the input test data  = 1
+AthONNX             DEBUG Score for class 0 = 1.07293e-07
+AthONNX             DEBUG Score for class 1 = 0.999818
+AthONNX             DEBUG Score for class 2 = 1.18024e-05
+AthONNX             DEBUG Score for class 3 = 2.53529e-05
+AthONNX             DEBUG Score for class 4 = 4.19157e-06
+AthONNX             DEBUG Score for class 5 = 1.66088e-06
+AthONNX             DEBUG Score for class 6 = 7.7723e-06
+AthONNX             DEBUG Score for class 7 = 6.33801e-05
+AthONNX             DEBUG Score for class 8 = 5.83467e-05
+AthONNX             DEBUG Score for class 9 = 9.74693e-06
+AthONNX              INFO Class: 1 has the highest score: 0.999818
 ```
 The above result can be obtained by running `athena AthExOnnxRuntime_jobOptions.py`
-in your run directory
+in your run directory. Edit AthExOnnxRuntime_jobOptions.py's `AthONNX.DoBatches = False` to run in No Batch
+mode.
+
+To Run in Batch mode edit `AthONNX.DoBatches = True` and provide number of batches and each batch size to
+`AthONNX.NumberOfBatches` and to `AthONNX.SizeOfBatch` respectively.
+
+Fyi. by default the algorithms will run in No-Batch mode 
 
 
diff --git a/Control/AthenaExamples/AthExOnnxRuntime/share/AthExOnnxRuntime_jobOptions.py b/Control/AthenaExamples/AthExOnnxRuntime/share/AthExOnnxRuntime_jobOptions.py
index 20b057cc213c..ac1beaef5aeb 100644
--- a/Control/AthenaExamples/AthExOnnxRuntime/share/AthExOnnxRuntime_jobOptions.py
+++ b/Control/AthenaExamples/AthExOnnxRuntime/share/AthExOnnxRuntime_jobOptions.py
@@ -5,17 +5,21 @@ from AthenaCommon.AlgSequence import AlgSequence
 algSequence = AlgSequence()
 
 # Set up the job.
-from AthExOnnxRuntime.AthExOnnxRuntimeConf import AthONNX__CxxApiAlgorithm
+from AthExOnnxRuntime.AthExOnnxRuntimeConf import AthONNX__EvaluateModel
 from AthOnnxruntimeService.AthOnnxruntimeServiceConf import AthONNX__ONNXRuntimeSvc
 
 from AthenaCommon.AppMgr import ServiceMgr
 ServiceMgr += AthONNX__ONNXRuntimeSvc( OutputLevel = DEBUG )
-algSequence += AthONNX__CxxApiAlgorithm("AthONNX")
+algSequence += AthONNX__EvaluateModel("AthONNX")
 
 # Get a	random no. between 0 to	10k for	test sample
 from random import randint
 
 AthONNX = algSequence.AthONNX
 AthONNX.TestSample = randint(0, 9999)
+AthONNX.DoBatches = False
+AthONNX.NumberOfBatches = 1
+AthONNX.SizeOfBatch = 1
+
 # Run for 10 "events".
-theApp.EvtMax = 10
+theApp.EvtMax = 2
diff --git a/Control/AthenaExamples/AthExOnnxRuntime/src/CxxApiAlgorithm.cxx b/Control/AthenaExamples/AthExOnnxRuntime/src/CxxApiAlgorithm.cxx
deleted file mode 100644
index 0efc20418790..000000000000
--- a/Control/AthenaExamples/AthExOnnxRuntime/src/CxxApiAlgorithm.cxx
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
-
-// Local include(s).
-#include "CxxApiAlgorithm.h"
-
-// Framework include(s).
-#include "PathResolver/PathResolver.h"
-
-namespace AthONNX {
-
-   //*******************************************************************
-   // for reading MNIST images
-   std::vector<std::vector<float>> read_mnist_pixel(const std::string &full_path) //function to load test images
-   {
-     std::vector<std::vector<float>> input_tensor_values;
-     input_tensor_values.resize(10000, std::vector<float>(28*28*1));  
-     std::ifstream file (full_path.c_str(), std::ios::binary);
-     int magic_number=0;
-     int number_of_images=0;
-     int n_rows=0;
-     int n_cols=0;
-     file.read((char*)&magic_number,sizeof(magic_number));
-     magic_number= ntohl(magic_number);
-     file.read((char*)&number_of_images,sizeof(number_of_images));
-     number_of_images= ntohl(number_of_images);
-     file.read((char*)&n_rows,sizeof(n_rows));
-     n_rows= ntohl(n_rows);
-     file.read((char*)&n_cols,sizeof(n_cols));
-     n_cols= ntohl(n_cols);
-     for(int i=0;i<number_of_images;++i)
-     {
-        for(int r=0;r<n_rows;++r)
-        {
-           for(int c=0;c<n_cols;++c)
-           {
-             unsigned char temp=0;
-             file.read((char*)&temp,sizeof(temp));
-             input_tensor_values[i][r*n_cols+c]= float(temp)/255;
-           }
-        }
-     }
-     return input_tensor_values;
-   }
-
-   //********************************************************************************
-   // for reading MNIST labels
-   std::vector<int> read_mnist_label(const std::string &full_path) //function to load test labels
-   {
-     std::vector<int> output_tensor_values(1*10000);
-     std::ifstream file (full_path.c_str(), std::ios::binary);
-     int magic_number=0;
-     int number_of_labels=0;
-     file.read((char*)&magic_number,sizeof(magic_number));
-     magic_number= ntohl(magic_number);
-     file.read((char*)&number_of_labels,sizeof(number_of_labels));
-     number_of_labels= ntohl(number_of_labels);
-     for(int i=0;i<number_of_labels;++i)
-     {
-          unsigned char temp=0;
-          file.read((char*)&temp,sizeof(temp));
-          output_tensor_values[i]= int(temp);
-     }
-      return output_tensor_values;
-    }
-
-   StatusCode CxxApiAlgorithm::initialize() {
-
-      // Access the service.
-      ATH_CHECK( m_svc.retrieve() );
-
-      // Find the model file.
-      const std::string modelFileName =
-         PathResolverFindCalibFile( m_modelFileName );
-      const std::string pixelFileName =
-         PathResolverFindCalibFile( m_pixelFileName );
-      const std::string labelFileName =
-         PathResolverFindCalibFile( m_labelFileName );
-      ATH_MSG_INFO( "Using model file: " << modelFileName );
-      ATH_MSG_INFO( "Using pixel file: " << pixelFileName );
-      ATH_MSG_INFO( "Using pixel file: " << labelFileName );
-      // Set up the ONNX Runtime session.
-      Ort::SessionOptions sessionOptions;
-      sessionOptions.SetIntraOpNumThreads( 1 );
-      sessionOptions.SetGraphOptimizationLevel( ORT_ENABLE_BASIC );
-      Ort::AllocatorWithDefaultOptions allocator;  
-      m_session = std::make_unique< Ort::Session >( m_svc->env(),
-                                                    modelFileName.c_str(),
-                                                    sessionOptions );
-      ATH_MSG_INFO( "Created the ONNX Runtime session" );
-      std::vector<int64_t> input_node_dims;
-      size_t num_input_nodes = m_session->GetInputCount();
-      std::vector<const char*> input_node_names(num_input_nodes);
-      for( std::size_t i = 0; i < num_input_nodes; i++ ) {
-        // print input node names
-        char* input_name = m_session->GetInputName(i, allocator);
-        ATH_MSG_DEBUG("Input "<<i<<" : "<<" name= "<<input_name);
-        input_node_names[i] = input_name;
-        // print input node types
-        Ort::TypeInfo type_info = m_session->GetInputTypeInfo(i);
-        auto tensor_info = type_info.GetTensorTypeAndShapeInfo();
-        ONNXTensorElementDataType type = tensor_info.GetElementType();
-        ATH_MSG_DEBUG("Input "<<i<<" : "<<" type= "<<type);
-
-        // print input shapes/dims
-        input_node_dims = tensor_info.GetShape();
-        ATH_MSG_DEBUG("Input "<<i<<" : num_dims= "<<input_node_dims.size());
-        for (std::size_t j = 0; j < input_node_dims.size(); j++){
-           if(input_node_dims[j]<0)
-             input_node_dims[j] =1;
-           ATH_MSG_DEBUG("Input"<<i<<" : dim "<<j<<"= "<<input_node_dims[j]);
-          }  
-         }
-      
-      //output nodes
-     std::vector<int64_t> output_node_dims;
-     size_t num_output_nodes = m_session->GetOutputCount();
-     std::vector<const char*> output_node_names(num_output_nodes);
-
-     for( std::size_t i = 0; i < num_output_nodes; i++ ) {
-     // print output node names
-        char* output_name = m_session->GetOutputName(i, allocator);
-        ATH_MSG_DEBUG("Output "<<i<<" : "<<" name= "<<output_name);
-        output_node_names[i] = output_name;
-
-        Ort::TypeInfo type_info = m_session->GetOutputTypeInfo(i);
-        auto tensor_info = type_info.GetTensorTypeAndShapeInfo();
-        ONNXTensorElementDataType type = tensor_info.GetElementType();
-        ATH_MSG_DEBUG("Output "<<i<<" : "<<" type= "<<type);
-
-      // print output shapes/dims
-        output_node_dims = tensor_info.GetShape();
-        ATH_MSG_DEBUG("Output "<<i<<" : num_dims= "<<output_node_dims.size());
-        for (std::size_t j = 0; j < output_node_dims.size(); j++){
-           if(output_node_dims[j]<0)
-              output_node_dims[j] =1;
-        ATH_MSG_DEBUG("Output"<<i<<" : dim "<<j<<"= "<<output_node_dims[j]);
-       }  
-      }
-     //*************************************************************************
-     // Score the model using sample data, and inspect values
-     //loading input data
-  
-     std::vector<std::vector<float>> input_tensor_values_ = read_mnist_pixel(pixelFileName);
-     std::vector<int> output_tensor_values_ = read_mnist_label(labelFileName);
-  
-     //preparing container to hold input data
-  
-     size_t input_tensor_size = 1*28*28;
-     std::vector<float> input_tensor_values(input_tensor_size);
-     input_tensor_values = input_tensor_values_[m_testSample];
-  
-     //preparing container to hold output data
-     int output_tensor_values = output_tensor_values_[m_testSample]; 
-
-     // create input tensor object from data values
-     auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
-     Ort::Value input_tensor = Ort::Value::CreateTensor<float>(memory_info, input_tensor_values.data(), input_tensor_size, input_node_dims.data(), input_node_dims.size());
-     assert(input_tensor.IsTensor());
-
-     // score model & input tensor, get back output tensor
-     auto output_tensors = m_session->Run(Ort::RunOptions{nullptr}, input_node_names.data(), &input_tensor, input_node_names.size(), output_node_names.data(), output_node_names.size());
-     assert(output_tensors.size() == 1 && output_tensors.front().IsTensor());
-  
-     // Get pointer to output tensor float values
-     float* floatarr = output_tensors.front().GetTensorMutableData<float>();
-     
-     // show  true label for the test input
-     ATH_MSG_INFO("Label for the input test data  = "<<output_tensor_values);
-     float max = -999;
-     int max_index;
-     for (int i = 0; i < 10; i++){
-       ATH_MSG_INFO("Score for class "<<i<<" = "<<floatarr[i]<<std::endl);
-       if (max<floatarr[i]){
-          max = floatarr[i];
-          max_index = i;
-       }
-     }
-     ATH_MSG_INFO("Class: "<<max_index<<" has the highest score: "<<floatarr[max_index]);
-      // Return gracefully.
-      return StatusCode::SUCCESS;
-   }
-
-   StatusCode CxxApiAlgorithm::execute( const EventContext& /*ctx*/ ) const {
-
-      // Return gracefully.
-      return StatusCode::SUCCESS;
-   }
-
-   StatusCode CxxApiAlgorithm::finalize() {
-
-      // Delete the session object.
-      m_session.reset();
-
-      // Return gracefully.
-      return StatusCode::SUCCESS;
-   }
-
-} // namespace AthONNX
diff --git a/Control/AthenaExamples/AthExOnnxRuntime/src/EvaluateModel.cxx b/Control/AthenaExamples/AthExOnnxRuntime/src/EvaluateModel.cxx
new file mode 100644
index 000000000000..f4a9ce0b0f8e
--- /dev/null
+++ b/Control/AthenaExamples/AthExOnnxRuntime/src/EvaluateModel.cxx
@@ -0,0 +1,296 @@
+// Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
+
+// Local include(s).
+#include "EvaluateModel.h"
+
+// Framework include(s).
+#include "PathResolver/PathResolver.h"
+
+namespace AthONNX {
+
+   //*******************************************************************
+   // for reading MNIST images
+   std::vector<std::vector<float>> read_mnist_pixel(const std::string &full_path) //function to load test images
+   {
+     std::vector<std::vector<float>> input_tensor_values;
+     input_tensor_values.resize(10000, std::vector<float>(28*28*1));  
+     std::ifstream file (full_path.c_str(), std::ios::binary);
+     int magic_number=0;
+     int number_of_images=0;
+     int n_rows=0;
+     int n_cols=0;
+     file.read((char*)&magic_number,sizeof(magic_number));
+     magic_number= ntohl(magic_number);
+     file.read((char*)&number_of_images,sizeof(number_of_images));
+     number_of_images= ntohl(number_of_images);
+     file.read((char*)&n_rows,sizeof(n_rows));
+     n_rows= ntohl(n_rows);
+     file.read((char*)&n_cols,sizeof(n_cols));
+     n_cols= ntohl(n_cols);
+     for(int i=0;i<number_of_images;++i)
+     {
+        for(int r=0;r<n_rows;++r)
+        {
+           for(int c=0;c<n_cols;++c)
+           {
+             unsigned char temp=0;
+             file.read((char*)&temp,sizeof(temp));
+             input_tensor_values[i][r*n_cols+c]= float(temp)/255;
+           }
+        }
+     }
+     return input_tensor_values;
+   }
+
+   //********************************************************************************
+   // for reading MNIST labels
+   std::vector<int> read_mnist_label(const std::string &full_path) //function to load test labels
+   {
+     std::vector<int> output_tensor_values(1*10000);
+     std::ifstream file (full_path.c_str(), std::ios::binary);
+     int magic_number=0;
+     int number_of_labels=0;
+     file.read((char*)&magic_number,sizeof(magic_number));
+     magic_number= ntohl(magic_number);
+     file.read((char*)&number_of_labels,sizeof(number_of_labels));
+     number_of_labels= ntohl(number_of_labels);
+     for(int i=0;i<number_of_labels;++i)
+     {
+          unsigned char temp=0;
+          file.read((char*)&temp,sizeof(temp));
+          output_tensor_values[i]= int(temp);
+     }
+      return output_tensor_values;
+    }
+
+   StatusCode EvaluateModel::initialize() {
+
+      // Access the service.
+      ATH_CHECK( m_svc.retrieve() );
+
+      /*****
+       The combination of no. of batches and batch size shouldn't cross 
+       the total smple size which is 10000 for this example
+      *****/         
+      if(m_doBatches && (m_numberOfBatches*m_sizeOfBatch)>10000){
+        ATH_MSG_INFO("The total no. of sample crossed the no. of available sample ....");
+	return StatusCode::FAILURE;
+      }
+      // Find the model file.
+      const std::string modelFileName =
+         PathResolverFindCalibFile( m_modelFileName );
+      const std::string pixelFileName =
+         PathResolverFindCalibFile( m_pixelFileName );
+      const std::string labelFileName =
+         PathResolverFindCalibFile( m_labelFileName );
+      ATH_MSG_INFO( "Using model file: " << modelFileName );
+      ATH_MSG_INFO( "Using pixel file: " << pixelFileName );
+      ATH_MSG_INFO( "Using pixel file: " << labelFileName );
+      // Set up the ONNX Runtime session.
+      Ort::SessionOptions sessionOptions;
+      sessionOptions.SetIntraOpNumThreads( 1 );
+      sessionOptions.SetGraphOptimizationLevel( ORT_ENABLE_BASIC );
+      Ort::AllocatorWithDefaultOptions allocator;  
+      m_session = std::make_unique< Ort::Session >( m_svc->env(),
+                                                    modelFileName.c_str(),
+                                                    sessionOptions );
+      ATH_MSG_INFO( "Created the ONNX Runtime session" );
+      m_input_tensor_values = read_mnist_pixel(pixelFileName);
+      m_output_tensor_values = read_mnist_label(labelFileName);
+
+      // Return gracefully.
+      return StatusCode::SUCCESS;
+   }
+
+   StatusCode EvaluateModel::execute( const EventContext& /*ctx*/ ) const {
+     
+     Ort::AllocatorWithDefaultOptions allocator;
+     auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
+     /************************** Input Nodes *****************************/
+     /*********************************************************************/
+     std::vector<int64_t> input_node_dims;
+     size_t num_input_nodes = m_session->GetInputCount();
+     std::vector<const char*> input_node_names(num_input_nodes);
+     for( std::size_t i = 0; i < num_input_nodes; i++ ) {
+        // print input node names
+        char* input_name = m_session->GetInputName(i, allocator);
+        ATH_MSG_DEBUG("Input "<<i<<" : "<<" name= "<<input_name);
+        input_node_names[i] = input_name;
+        Ort::TypeInfo type_info = m_session->GetInputTypeInfo(i);
+        auto tensor_info = type_info.GetTensorTypeAndShapeInfo();
+        // print input shapes/dims
+        input_node_dims = tensor_info.GetShape();
+        ATH_MSG_DEBUG("Input "<<i<<" : num_dims= "<<input_node_dims.size());
+        for (std::size_t j = 0; j < input_node_dims.size(); j++){
+           ATH_MSG_DEBUG("Input "<<i<<" : dim "<<j<<"= "<<input_node_dims[j]);
+          }  
+       }
+    
+     /************************** Output Nodes *****************************/
+     /*********************************************************************/
+     //output nodes
+     std::vector<int64_t> output_node_dims;
+     size_t num_output_nodes = m_session->GetOutputCount();
+     std::vector<const char*> output_node_names(num_output_nodes);
+
+     for( std::size_t i = 0; i < num_output_nodes; i++ ) {
+     // print output node names
+        char* output_name = m_session->GetOutputName(i, allocator);
+        ATH_MSG_DEBUG("Output "<<i<<" : "<<" name= "<<output_name);
+        output_node_names[i] = output_name;
+
+        Ort::TypeInfo type_info = m_session->GetOutputTypeInfo(i);
+        auto tensor_info = type_info.GetTensorTypeAndShapeInfo();
+
+      // print output shapes/dims
+        output_node_dims = tensor_info.GetShape();
+        ATH_MSG_DEBUG("Output "<<i<<" : num_dims= "<<output_node_dims.size());
+        for (std::size_t j = 0; j < output_node_dims.size(); j++){
+        	ATH_MSG_DEBUG("Output "<<i<<" : dim "<<j<<"= "<<output_node_dims[j]);
+       }  
+      }
+
+    /************************* Score if input is not a batch ********************/
+    /****************************************************************************/
+     if(m_doBatches == false){
+
+        /**************************************************************************************
+         * input_node_dims[0] = -1; -1 needs to be replaced by the batch size; for no batch is 1 
+         * input_node_dims[1] = 28
+         * input_node_dims[2] = 28
+        ****************************************************************************************/
+
+     	input_node_dims[0] = 1;
+     	output_node_dims[0] = 1;
+ 
+       /***************** Choose an example sample randomly ****************************/  
+     	std::vector<float> input_tensor_values = m_input_tensor_values[m_testSample];
+        // Output label of corresponding m_input_tensor_values[m_testSample]; e.g 0, 1, 2, 3 etc
+        int output_tensor_values = m_output_tensor_values[m_testSample];
+       
+        // For a check that the sample dimension is fully flatten (1x28x28 = 784)
+     	ATH_MSG_DEBUG("Size of Input tensor: "<<input_tensor_values.size()); 
+
+     	/************** Create input tensor object from input data values to feed into your model *********************/
+     	Ort::Value input_tensor = Ort::Value::CreateTensor<float>(memory_info, 
+                                                                  input_tensor_values.data(), 
+                                                                  input_tensor_values.size(),  /*** 1x28x28 = 784 ***/ 
+                                                                  input_node_dims.data(), 
+                                                                  input_node_dims.size());     /*** [1, 28, 28] = 3 ***/
+
+        /********* Convert 784 elements long flattened 1D array to 3D (1, 28, 28) onnx compatible tensor ************/
+        ATH_MSG_DEBUG("Input tensor size after converted to Ort tensor: "<<input_tensor.GetTensorTypeAndShapeInfo().GetShape());     	
+        // Makes sure input tensor has same dimensions as input layer of the model
+        assert(input_tensor.IsTensor()&&
+     		input_tensors.GetTensorTypeAndShapeInfo().GetShape() == input_node_dims.size());
+
+     	/********* Score model by feeding input tensor and get output tensor in return *****************************/
+        auto output_tensor = m_session->Run(Ort::RunOptions{nullptr}, 
+                                             input_node_names.data(), 
+                                             &input_tensor, 
+                                             input_node_names.size(),      /** 1, flatten_input:0 **/ 
+                                             output_node_names.data(), 
+                                             output_node_names.size());    /** 1, dense_1/Softmax:0 **/
+
+        assert(output_tensor.size() == output_node_names.size() && output_tensor.front().IsTensor());
+  
+     	// Get pointer to output tensor float values
+     	float* floatarr = output_tensor.front().GetTensorMutableData<float>();
+
+     	// show  true label for the test input
+     	ATH_MSG_INFO("Label for the input test data  = "<<output_tensor_values);
+     	float max = -999;
+     	int max_index;
+     	for (int i = 0; i < 10; i++){
+       		ATH_MSG_DEBUG("Score for class "<<i<<" = "<<floatarr[i]);
+       		if (max<floatarr[i]){
+          		max = floatarr[i];
+          		max_index = i;
+       		}
+     	}
+     	ATH_MSG_INFO("Class: "<<max_index<<" has the highest score: "<<floatarr[max_index]);
+     
+     } // m_doBatches == false codition ends   
+    /************************* Score if input is a batch ********************/
+    /****************************************************************************/
+    else {
+        /**************************************************************************************
+         Similar scoring structure like non batch execution but 1st demention needs to be replaced by batch size
+         for this example lets take 3 batches with batch size 5
+         * input_node_dims[0] = 5
+         * input_node_dims[1] = 28
+         * input_node_dims[2] = 28
+        ****************************************************************************************/
+        
+     	input_node_dims[0] = m_sizeOfBatch;
+     	output_node_dims[0] = m_sizeOfBatch;   
+     
+	/************************** process multiple batches ********************************/
+     	for (int i = 0; i < m_numberOfBatches; i++) {
+     		ATH_MSG_DEBUG("Processing batch #" << i);
+      		std::vector<float> batch_input_tensor_values;
+  
+      		for (int j = 0; j < m_sizeOfBatch; j++) {
+
+                        /******************For each batch we need a flattened (5 x 28 x 28 = 3920) 1D array******************************/
+        		batch_input_tensor_values.insert(batch_input_tensor_values.end(),m_input_tensor_values[j].begin(),m_input_tensor_values[j].end());
+        	}   
+
+        	Ort::Value batch_input_tensors = Ort::Value::CreateTensor<float>(memory_info,
+                                                                      batch_input_tensor_values.data(),
+                                                                      batch_input_tensor_values.size(), /*** 5x28x28 = 3920 ***/
+                                                                      input_node_dims.data(),
+                                                                      input_node_dims.size());         /*** [5, 28, 28] = 3 ***/
+
+                assert(batch_input_tensors.IsTensor()&&
+                	batch_input_tensors.GetTensorTypeAndShapeInfo().GetShape() == input_node_dims.size());
+   
+        	auto batch_output_tensors = m_session->Run(Ort::RunOptions{nullptr}, 
+                                                   input_node_names.data(), 
+                                                   &batch_input_tensors, 
+                                                   input_node_names.size(),    /** 1, flatten_input:0 **/
+                                                   output_node_names.data(), 
+                                                   output_node_names.size());  /** 1, dense_1/Softmax:0 **/  
+ 
+      		assert(batch_output_tensors.size() == output_names.size() &&
+             		batch_output_tensors.IsTensor() &&
+             		batch_output_tensors.GetTensorTypeAndShapeInfo().GetShape()[0] == m_sizeOfBatch);
+        
+		// Get pointer to output tensor float values
+
+        	ATH_MSG_DEBUG("output vector size: "<<batch_output_tensors[0].GetTensorTypeAndShapeInfo().GetShape());
+        	float* floatarr = batch_output_tensors.front().GetTensorMutableData<float>();
+     
+     		// show  true label for the test input
+		for(int i = 0; i<m_sizeOfBatch; i++){
+     			ATH_MSG_INFO("Label for the input test data  = "<<m_output_tensor_values[i]);
+                	int k = i*10;
+                	float max = -999;
+                	int max_index = 0;
+     			for (int j =k ; j < k+10; j++){
+       				ATH_MSG_INFO("Score for class "<<j-k<<" = "<<floatarr[j]);
+       				if (max<floatarr[j]){
+          				max = floatarr[j];
+          				max_index = j;
+       				}
+     			}
+    	       	ATH_MSG_INFO("Class: "<<max_index-k<<" has the highest score: "<<floatarr[max_index]);
+      		} 
+
+          }
+     } // else/m_doBatches == True codition ends
+    // Return gracefully.
+      return StatusCode::SUCCESS;
+   }
+   StatusCode EvaluateModel::finalize() {
+
+      // Delete the session object.
+      m_session.reset();
+
+      // Return gracefully.
+      return StatusCode::SUCCESS;
+   }
+
+} // namespace AthONNX
+
+
diff --git a/Control/AthenaExamples/AthExOnnxRuntime/src/CxxApiAlgorithm.h b/Control/AthenaExamples/AthExOnnxRuntime/src/EvaluateModel.h
similarity index 76%
rename from Control/AthenaExamples/AthExOnnxRuntime/src/CxxApiAlgorithm.h
rename to Control/AthenaExamples/AthExOnnxRuntime/src/EvaluateModel.h
index 0ebf218804d2..59e0fc5933e6 100644
--- a/Control/AthenaExamples/AthExOnnxRuntime/src/CxxApiAlgorithm.h
+++ b/Control/AthenaExamples/AthExOnnxRuntime/src/EvaluateModel.h
@@ -1,7 +1,7 @@
 // Dear emacs, this is -*- c++ -*-
 // Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
-#ifndef ATHEXONNXRUNTIME_CXXAPIALGORITHM_H
-#define ATHEXONNXRUNTIME_CXXAPIALGORITHM_H
+#ifndef ATHEXONNXRUNTIME_EVALUATEMODEL_H
+#define ATHEXONNXRUNTIME_EVALUATEMODEL_H
 
 // Local include(s).
 #include "AthOnnxruntimeService/IONNXRuntimeSvc.h"
@@ -19,6 +19,8 @@
 #include <iostream> 
 #include <fstream>
 #include <arpa/inet.h>
+#include <vector>
+#include <iterator>
 
 namespace AthONNX {
 
@@ -29,7 +31,7 @@ namespace AthONNX {
    /// @author Debottam Bakshi Gupta <Debottam.Bakshi.Gupta@cern.ch>
    /// @author Attila Krasznahorkay <Attila.Krasznahorkay@cern.ch>
    ///
-   class CxxApiAlgorithm : public AthReentrantAlgorithm {
+   class EvaluateModel: public AthReentrantAlgorithm {
 
    public:
       /// Inherit the base class's constructor
@@ -62,6 +64,12 @@ namespace AthONNX {
          "dev/MLTest/2020-03-31/t10k-labels-idx1-ubyte",
          "Name of the label file to load" };
       Gaudi::Property<int> m_testSample {this, "TestSample", 0, "A Random Test Sample"};
+
+      /// Following properties needed to be consdered if the .onnx model is evaluated in batch mode
+      Gaudi::Property<bool> m_doBatches {this, "DoBatches", false, "Processing events by batches"};
+      Gaudi::Property<int> m_numberOfBatches {this, "NumberOfBatches", 1, "No. of batches to be passed"};
+      Gaudi::Property<int> m_sizeOfBatch {this, "SizeOfBatch", 1, "No. of elements/example in a batch"};
+      
       /// Handle to @c AthONNX::IONNXRuntimeSvc
       ServiceHandle< IONNXRuntimeSvc > m_svc{ this, "ONNXRuntimeSvc",
                                               "AthONNX::ONNXRuntimeSvc",
@@ -71,9 +79,11 @@ namespace AthONNX {
 
       /// The "session" of ONNX Runtime that we'll be using
       std::unique_ptr< Ort::Session > m_session;
+      std::vector<std::vector<float>> m_input_tensor_values;
+      std::vector<int> m_output_tensor_values;
 
-   }; // class CxxApiAlgorithm
+   }; // class EvaluateModel
 
 } // namespace AthONNX
 
-#endif // ATHEXONNXRUNTIME_CXXAPIALGORITHM_H
+#endif // ATHEXONNXRUNTIME_EVALUATEMODEL_H
diff --git a/Control/AthenaExamples/AthExOnnxRuntime/src/components/AthExOnnxRuntime_entries.cxx b/Control/AthenaExamples/AthExOnnxRuntime/src/components/AthExOnnxRuntime_entries.cxx
index ca1006fe95b4..cb25a4bfd9d2 100644
--- a/Control/AthenaExamples/AthExOnnxRuntime/src/components/AthExOnnxRuntime_entries.cxx
+++ b/Control/AthenaExamples/AthExOnnxRuntime/src/components/AthExOnnxRuntime_entries.cxx
@@ -1,7 +1,6 @@
 // Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 // Local include(s).
-#include "../CxxApiAlgorithm.h"
-
+#include "../EvaluateModel.h"
 // Declare the package's components.
-DECLARE_COMPONENT( AthONNX::CxxApiAlgorithm )
+DECLARE_COMPONENT( AthONNX::EvaluateModel )
-- 
GitLab