diff --git a/FT/FTDAQ/src/FTRawBankDecoder.cpp b/FT/FTDAQ/src/FTRawBankDecoder.cpp
index 15ecc500b4afb014b95efb5490de937a9015f1f3..ca50f6a7b16e89580585bb4b5be75902478213b4 100644
--- a/FT/FTDAQ/src/FTRawBankDecoder.cpp
+++ b/FT/FTDAQ/src/FTRawBankDecoder.cpp
@@ -16,7 +16,7 @@
 // 2012-05-11 : Olivier Callot
 //-----------------------------------------------------------------------------
 
-namespace {  
+namespace {
 unsigned quarterFromChannel(LHCb::FTChannelID id) {
   return id.uniqueQuarter() - 16u;
 }
@@ -24,7 +24,7 @@ unsigned quarterFromChannel(LHCb::FTChannelID id) {
 unsigned channelInBank(short int c) {
   return ( c >> FTRawBank::cellShift);
 }
-  
+
 unsigned getLinkInBank(short int c){
   return ((c >> FTRawBank::linkShift));
 }
@@ -104,7 +104,7 @@ FTLiteClusters
 FTRawBankDecoder::operator()(const LHCb::RawEvent& rawEvent) const
 {
   const auto& banks = rawEvent.banks(LHCb::RawBank::FTCluster);
-  
+
   // Estimate total number of clusters from bank sizes
   FTLiteClusters clus( LHCb::FTDAQ::nbFTClusters(banks) );
   if ( msgLevel(MSG::DEBUG) )
@@ -186,7 +186,7 @@ FTRawBankDecoder::operator()(const LHCb::RawEvent& rawEvent) const
             bool cSize2       = ( c2 >> 8) & 1;
 
             if( !cSize2 ){ //next cluster is not last fragment
-              clus.addHit(std::make_tuple(LHCb::FTChannelID{ station, layer, quarter,
+              clus.addHit(std::forward_as_tuple(LHCb::FTChannelID{ station, layer, quarter,
                                                              module, mat, sipm, channel },
                                           fraction, 4), bank->sourceID() );
 
@@ -213,7 +213,7 @@ FTRawBankDecoder::operator()(const LHCb::RawEvent& rawEvent) const
               if(diff  > m_clusterMaxWidth){
 
                 //add the first edge cluster
-                clus.addHit(std::make_tuple(LHCb::FTChannelID{ station, layer, quarter,
+                clus.addHit(std::forward_as_tuple(LHCb::FTChannelID{ station, layer, quarter,
                                                                module, mat, sipm, channel },
                                             fraction, 0), bank->sourceID() ); //pseudoSize=0
 
@@ -225,7 +225,7 @@ FTRawBankDecoder::operator()(const LHCb::RawEvent& rawEvent) const
                 for(unsigned int  i = m_clusterMaxWidth; i < diff ; i+= m_clusterMaxWidth){
                   // all middle clusters will have same size as the first cluster,
                   // so use same fraction
-                  clus.addHit(std::make_tuple(LHCb::FTChannelID{ station, layer, quarter,
+                  clus.addHit(std::forward_as_tuple(LHCb::FTChannelID{ station, layer, quarter,
                                                                  module, mat, sipm, channel+i },
                                               fraction, 0), bank->sourceID() );
 
@@ -236,7 +236,7 @@ FTRawBankDecoder::operator()(const LHCb::RawEvent& rawEvent) const
                 }
 
                 //add the last edge
-                clus.addHit(std::make_tuple(LHCb::FTChannelID{ station, layer, quarter,
+                clus.addHit(std::forward_as_tuple(LHCb::FTChannelID{ station, layer, quarter,
                                                                module, mat, sipm, channel2 },
                                             fraction2, 0), bank->sourceID() );
 
@@ -253,7 +253,7 @@ FTRawBankDecoder::operator()(const LHCb::RawEvent& rawEvent) const
                 int frac                      = (widthClus-1)%2;
 
                 //add the new cluster = cluster1+cluster2
-                clus.addHit(std::make_tuple(LHCb::FTChannelID{ station, layer, quarter,
+                clus.addHit(std::forward_as_tuple(LHCb::FTChannelID{ station, layer, quarter,
                                                                module, mat, sipm, clusChanPosition },
                                             frac, widthClus), bank->sourceID() );
 
@@ -266,7 +266,7 @@ FTRawBankDecoder::operator()(const LHCb::RawEvent& rawEvent) const
             }//last edge foud
           }//not the last cluster
           else{ //last cluster, so nothing we can do
-            clus.addHit(std::make_tuple(LHCb::FTChannelID{ station, layer, quarter,
+            clus.addHit(std::forward_as_tuple(LHCb::FTChannelID{ station, layer, quarter,
                                                            module, mat, sipm, channel },
                                         fraction, 4), bank->sourceID() );
 
@@ -283,7 +283,7 @@ FTRawBankDecoder::operator()(const LHCb::RawEvent& rawEvent) const
           unsigned channel = ( c >> 0 ) & 127;
           int fraction     = ( c >> 7 ) & 1;
           int cSize        = ( c >> 8 ) & 1;
-          clus.addHit(std::make_tuple(LHCb::FTChannelID{ station, layer, quarter,
+          clus.addHit(std::forward_as_tuple(LHCb::FTChannelID{ station, layer, quarter,
                                                          module, mat, sipm, channel },
                                       fraction, ( cSize ? 0 : 4 )), bank->sourceID() );
         }
@@ -308,7 +308,7 @@ FTRawBankDecoder::operator()(const LHCb::RawEvent& rawEvent) const
                                                    fraction(c), ( cSize(c) ? 0 : 4 ) };} );
       clus.insert(r.begin(),r.end(), quarterFromChannel(offset));
     }//end loop over rawbanks
-  }//version == 4  
+  }//version == 4
   else if (m_decodingVersion == 5u) {
     for ( const LHCb::RawBank* bank : banks) {//Iterates over the banks
       LHCb::FTChannelID offset = m_readoutTool->channelIDShift(bank->sourceID());
@@ -316,7 +316,7 @@ FTRawBankDecoder::operator()(const LHCb::RawEvent& rawEvent) const
 
       // Define Lambda functions to be used in loop
       auto make_cluster = [&clus, &quarter](unsigned chan, int fraction, int size) {
-        clus.addHit(std::make_tuple(chan, fraction, size), quarter );
+        clus.addHit(std::forward_as_tuple(chan, fraction, size), quarter );
       };
 
       // Make clusters between two channels
diff --git a/Kernel/LHCbKernel/Kernel/MultiIndexedContainer.h b/Kernel/LHCbKernel/Kernel/MultiIndexedContainer.h
index e7e3e120e302bcef165810ba17839c6c8b394308..ca07055d5a9264277e6df13ce1360ee0bdb89886 100644
--- a/Kernel/LHCbKernel/Kernel/MultiIndexedContainer.h
+++ b/Kernel/LHCbKernel/Kernel/MultiIndexedContainer.h
@@ -18,56 +18,9 @@ namespace LHCb
 namespace Container
 {
 
-namespace details
-{
-  template<typename C, typename Tuple, size_t ... I>
-  auto emplace_tuple(C& c, Tuple&& t, std::index_sequence<I...>) -> typename C::reference
-  {
-    c.emplace_back ( std::get<I>(t)... );
-
-    return c.back ();
-  }
-}
-
 namespace
 {
 
-  /**
-   * When C++17 is supported, all references to this function
-   * should be replaced with (... * sizes ).
-   *
-   * Allocate one more than the actual number requested.
-   *
-   * Reason for this is performance: this allows us to quickly
-   * return the size of the offsets (and if empty).
-   ***/
-  template<size_t ... S>
-  constexpr size_t multiply()
-  {
-    size_t result = 1;
-
-    for (auto s : { S... })
-      result *= s;
-
-    return result;
-  }
-
-  /**
-   * C++11 version of multiply.
-	template <typename ...Ts>
-	constexpr size_t multiply()
-	{
-		return  1;
-	}
-
-	template <size_t I,size_t... Is>
-	constexpr size_t multiply()
-	{
-		return   I * multiply<Is...>();
-	}
-
-   */
-
   /**
    * Helper function to access the value of the template
    * argument as an actual value. Only works for size_t.
@@ -140,8 +93,8 @@ public:
    * For the offsets we reserve one more, to make the calculation of the
    * number of hits quicker (can always do n(uniqueSubDetId).second - n(uniqueSubDetId).first )
    */
-  using Offsets = std::array<std::pair<offset_t, offset_t>, multiply<sizes...>() >;
-  using Ids = std::array<size_t, multiply<sizes...>()>;
+  using Offsets = std::array<std::pair<offset_t, offset_t>, ( ...*sizes )  >;
+  using Ids = std::array<size_t, ( ... * sizes ) >;
 
   using HitRange=Gaudi::Range_<MultiIndexedContainer<Hit, sizes...> >;
 
@@ -224,9 +177,7 @@ public:
   template<typename ... Args>
   size_t size(Args&&...args) const
   {
-    offset_t obegin{0}, oend{0};
-    std::tie(obegin, oend) = getOffsets(std::forward(args)...);
-
+    auto [ obegin, oend] = getOffset(std::forward(args)...);
     assert(oend >= obegin && "ill-formed offsets");
     return oend - obegin;
   }
@@ -234,17 +185,12 @@ public:
   template<typename ... Args>
   bool empty(Args&&...args) const
   {
-    offset_t obegin{0}, oend{0};
-    std::tie(obegin, oend) = getOffsets(std::forward(args)...);
-
+    auto [ obegin, oend ] = getOffset(std::forward(args)...);
     assert(oend >= obegin && "ill-formed offsets");
     return oend == obegin;
   }
 
-  inline size_t nSubDetectors() const
-  {
-    return multiply<sizes...>();
-  }
+  constexpr size_t nSubDetectors() const { return ( ... * sizes ); }
 
   /**
    * Function to insert a range of hits in (detectorElementId).
@@ -285,26 +231,6 @@ public:
     m_offsets[subDetectorId].second += n;
   }
 
-  /*
-   * This implementation can be used when we have std::invoke available.
-   *
-		template<typename Tuple, typename ... LocArgs>
-		Hit& addHit(Tuple&& cargs, LocArgs&&... subDetectorElement)
-		{
-			static_assert((sizeof...(subDetectorElement) <= sizeof...(sizes)),
-					"The number of indices provided is strictly higher than the nesting for this subdetector.");
-
-			auto id = getUniqueDetectorElementId ( std::forward<LocArgs>(subDetectorElement)... );
-			m_ids.emplace_back ( id );
-			++m_nIds[id];
-			//constexpr auto nArguments = sizeof...(CtorArgs);
-
-			return std::invoke ( [this]( auto&&... args ) -> decltype(auto)
-			{	m_hits.emplace_back( std::forward<decltype(args)>(args)... );
-				return m_hits.back();}, std::forward<Tuple> ( cargs ) );
-		}
-   */
-
   /**
    * Function to create a single hit in (detectorElementId).
    * A reference to the hit created is returned.
@@ -317,18 +243,22 @@ public:
    * one should explicitly call setOffsets before further using the
    * hit manager.
    */
-  template<typename ... CtorArgs, typename ... LocArgs>
-  Hit& addHit(std::tuple<CtorArgs...>&& cargs, LocArgs&&... subDetectorElement)
+  template<typename Tuple, typename ... LocArgs>
+  Hit& addHit(Tuple&& cargs, LocArgs&&... subDetectorElement)
   {
-    auto id = getUniqueDetectorElementId ( std::forward<LocArgs>(subDetectorElement)... );
-#ifdef NDEBUG
-    m_ids.emplace_back ( id );
+		static_assert((sizeof...(subDetectorElement) <= sizeof...(sizes)),
+				"The number of indices provided is strictly higher than the nesting for this subdetector.");
+
+		auto id = getUniqueDetectorElementId ( std::forward<LocArgs>(subDetectorElement)... );
+#ifndef NDEBUG
+		m_ids.emplace_back ( id );
 #endif
-    m_nIds[ id ] += 1;
+		++m_nIds[id];
 
-    return details::emplace_tuple ( m_hits, std::forward<std::tuple<CtorArgs...> > ( cargs ),
-        std::index_sequence_for < CtorArgs... > {} );
-  }
+		return std::apply( [this]( auto&&... args ) -> decltype(auto)
+		                    { return m_hits.emplace_back( std::forward<decltype(args)>(args)... ); },
+			                std::forward<Tuple>( cargs ) );
+	}
 
   void setOffsets(){
     // Set the offsets
@@ -396,7 +326,7 @@ private:
    * Is it therefore faster to cache the outcome of this?
    */
   template<typename ... Args>
-  inline size_t getUniqueDetectorElementId(Args&& ... args) const
+  size_t getUniqueDetectorElementId(Args&& ... args) const
   {
     constexpr auto nArguments = sizeof...(args);
     constexpr size_t detector_geometry[] = { sizes... };
@@ -433,7 +363,6 @@ private:
   }
 
   template<typename ... Args>
-  inline
   typename Offsets::value_type getOffset(Args&& ... args) const
   {
     const auto uniqueDetectorElementId = getUniqueDetectorElementId( std::forward<Args>(args)... );
@@ -441,7 +370,7 @@ private:
     return m_offsets[ uniqueDetectorElementId ];
   }
 
-  inline void clearOffsets()
+  void clearOffsets()
   {
     typename Offsets::value_type zero{0, 0};
     std::fill ( std::begin ( m_offsets ), std::end ( m_offsets ), zero );