Skip to content
Snippets Groups Projects
Commit 56770f22 authored by Debottam Bakshi Gupta's avatar Debottam Bakshi Gupta Committed by Edward Moyse
Browse files

improving Flattening algorithm from 2Dto1D to multiDto1D

parent a3ba6247
No related branches found
No related tags found
5 merge requests!58791DataQualityConfigurations: Modify L1Calo config for web display,!51674Fixing hotSpotInHIST for Run3 HIST,!46784MuonCondInterface: Enable thread-safety checking.,!46776Updated LArMonitoring config file for WD to match new files produced using MT,!44962improving Flattening algorithm from 2Dto1D to multiDto1D
...@@ -13,15 +13,21 @@ ...@@ -13,15 +13,21 @@
namespace AthONNX { namespace AthONNX {
template<typename T> template<typename T>
inline std::vector<T> FlattenInput2D_1D( std::vector<std::vector<T>> features, int size){
std::vector<float> Flatten1D(size); inline std::vector<T> FlattenInput_multiD_1D( std::vector<std::vector<T>> features){
for(std::size_t r=0;r<features.size();++r){ // 1. Compute the total size required.
int n_cols = features[r].size(); int total_size = 0;
for(int c=0;c<n_cols;++c){ for (auto& feature : features) total_size += feature.size();
float a = float(features[r][c]);
Flatten1D[r*n_cols+c] = a; // 2. Create a vector to hold the data.
} std::vector<T> Flatten1D;
} Flatten1D.reserve(total_size);
// 3. Fill it
for (auto& feature : features)
for (auto& elem : feature)
Flatten1D.push_back(elem);
return Flatten1D; return Flatten1D;
} }
......
...@@ -167,7 +167,7 @@ namespace AthONNX { ...@@ -167,7 +167,7 @@ namespace AthONNX {
/***************** Choose an example sample randomly ****************************/ /***************** Choose an example sample randomly ****************************/
std::vector<std::vector<float>> input_tensor_values = m_input_tensor_values_notFlat[m_testSample]; std::vector<std::vector<float>> input_tensor_values = m_input_tensor_values_notFlat[m_testSample];
std::vector<float> flatten = AthONNX::FlattenInput2D_1D(input_tensor_values, 784); std::vector<float> flatten = AthONNX::FlattenInput_multiD_1D(input_tensor_values);
// Output label of corresponding m_input_tensor_values[m_testSample]; e.g 0, 1, 2, 3 etc // Output label of corresponding m_input_tensor_values[m_testSample]; e.g 0, 1, 2, 3 etc
int output_tensor_values = m_output_tensor_values[m_testSample]; int output_tensor_values = m_output_tensor_values[m_testSample];
...@@ -235,7 +235,7 @@ namespace AthONNX { ...@@ -235,7 +235,7 @@ namespace AthONNX {
std::vector<float> batch_input_tensor_values; std::vector<float> batch_input_tensor_values;
for (int j = l; j < l+m_sizeOfBatch; j++) { for (int j = l; j < l+m_sizeOfBatch; j++) {
std::vector<float> flattened_input = AthONNX::FlattenInput2D_1D(m_input_tensor_values_notFlat[j],784); std::vector<float> flattened_input = AthONNX::FlattenInput_multiD_1D(m_input_tensor_values_notFlat[j]);
/******************For each batch we need a flattened (5 x 28 x 28 = 3920) 1D array******************************/ /******************For each batch we need a flattened (5 x 28 x 28 = 3920) 1D array******************************/
batch_input_tensor_values.insert(batch_input_tensor_values.end(), flattened_input.begin(), flattened_input.end()); batch_input_tensor_values.insert(batch_input_tensor_values.end(), flattened_input.begin(), flattened_input.end());
} }
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment