Commit 7e9cfd89 authored by Marco Clemencic's avatar Marco Clemencic

Merge branch 'olupton_EvtStoreSvc' into 'master'

Use local memory pool in EvtStoreSvc

See merge request !1026
parents 0d0bb7cc 32bbddb7
Pipeline #1437327 failed with stages
in 21 minutes and 47 seconds
This diff is collapsed.
......@@ -190,6 +190,9 @@ gaudi_add_unit_test(test_GaudiTimer tests/src/test_GaudiTimer.cpp
gaudi_add_unit_test(test_Counters tests/src/CountersUnitTest.cpp
LINK_LIBRARIES GaudiKernel
TYPE Boost)
gaudi_add_unit_test(test_MonotonicArena tests/src/test_MonotonicArena.cpp
LINK_LIBRARIES GaudiKernel
TYPE Boost)
gaudi_add_compile_test(test_StatusCodeFail tests/src/test_StatusCode_fail.cxx
ERRORS "FAIL01;FAIL02;FAIL03;FAIL04")
......
/***********************************************************************************\
* (c) Copyright 2019-20 CERN for the benefit of the LHCb and ATLAS collaborations *
* *
* This software is distributed under the terms of the Apache version 2 licence, *
* copied verbatim in the file "LICENSE". *
* *
* In applying this licence, CERN does not waive the privileges and immunities *
* granted to it by virtue of its status as an Intergovernmental Organization *
* or submit itself to any jurisdiction. *
\***********************************************************************************/
#pragma once
#include <cstddef>
#include <functional>
#include <type_traits>
namespace Gaudi::Allocator {
/** @class Arena
* @brief Custom allocator holding a pointer to a generic memory resource.
*
* Custom allocator holding a pointer to a memory resource ("arena").
* Allocation and deallocation requests are passed through to the arena.
* Note that the typedefs propagate_on_container_{swap,copy_assignment,move_assignment}
* do not have their default values.
*
* The, optional, third template parameter (DefaultResource) may be used to specify
* a default arena, in which case the allocator is default-constructible. Otherwise,
* a pointer to an arena must be given.
*/
template <typename Resource, typename T, typename DefaultResource = void>
struct Arena {
using value_type = T;
using propagate_on_container_swap = std::true_type;
using propagate_on_container_copy_assignment = std::true_type;
using propagate_on_container_move_assignment = std::true_type;
/** Construct an allocator using the given memory resource, which must be valid.
*/
constexpr Arena( Resource* resource ) noexcept : m_resource{resource} {}
/** Construct an allocator using the resource provided by DefaultResource.
* This constructor is only enabled if an instance of DefaultResource can be invoked
* with no arguments and yields Resource*.
*/
template <typename D = void, typename = std::enable_if_t<std::is_invocable_r_v<Resource*, DefaultResource>, D>>
Arena() : Arena( std::invoke( DefaultResource{} ) ) {}
/** Converting copy constructor, rebinding U -> T.
*/
template <typename U>
constexpr Arena( Arena<Resource, U, DefaultResource> const& other ) noexcept : m_resource{other.m_resource} {}
/** Allocate storage for n objects.
*/
[[nodiscard]] T* allocate( std::size_t n ) {
return reinterpret_cast<T*>( m_resource->template allocate<alignof( T )>( n * sizeof( T ) ) );
}
/** Deallocate storage for n objects.
*/
void deallocate( T* p, std::size_t n ) noexcept {
m_resource->deallocate( reinterpret_cast<std::byte*>( p ), n * sizeof( T ) );
}
/** Return a pointer to the memory resource.
*/
[[nodiscard]] Resource* resource() const noexcept { return m_resource; }
template <typename U>
friend constexpr bool operator==( Arena const& lhs, Arena<Resource, U, DefaultResource> const& rhs ) {
return lhs.m_resource == rhs.m_resource;
}
template <typename U>
struct rebind {
using other = Arena<Resource, U, DefaultResource>;
};
private:
// Required for the Arena<Resource, U, DefaultResource> converting copy constructor
template <typename, typename, typename>
friend struct Arena;
Resource* m_resource{nullptr};
};
template <typename Resource, typename T, typename U, typename DefaultResource>
inline constexpr bool operator!=( Arena<Resource, T, DefaultResource> const& lhs,
Arena<Resource, U, DefaultResource> const& rhs ) {
return !( lhs == rhs );
}
} // namespace Gaudi::Allocator
\ No newline at end of file
/***********************************************************************************\
* (c) Copyright 2019-20 CERN for the benefit of the LHCb and ATLAS collaborations *
* *
* This software is distributed under the terms of the Apache version 2 licence, *
* copied verbatim in the file "LICENSE". *
* *
* In applying this licence, CERN does not waive the privileges and immunities *
* granted to it by virtue of its status as an Intergovernmental Organization *
* or submit itself to any jurisdiction. *
\***********************************************************************************/
#pragma once
#include "Gaudi/Allocator/Arena.h"
#include "GaudiKernel/Kernel.h"
#include <boost/container/small_vector.hpp>
#include <gsl/span>
#include <cstddef>
#include <numeric>
namespace Gaudi::Arena {
namespace details {
template <std::size_t Alignment>
constexpr std::size_t align_up( std::size_t n ) {
return ( n + ( Alignment - 1 ) ) & ~( Alignment - 1 );
}
} // namespace details
/** @class Monotonic
* @brief A fast memory arena that does not track deallocations.
*
* This is a memory arena suitable for use with Gaudi::Allocators::Arena.
* It allocates memory from an upstream resource in blocks of geometrically
* increasing size and serves allocation requests from those blocks.
* Deallocations are not tracked, so the memory footprint of a Monotonic
* arena increases monotonically until either it is destroyed or its reset()
* method is called.
* All requests are served with alignment specified in the template parameter.
*
* @todo Efficiently support stateful upstream allocators, probably by putting an
* instance of the upstream allocator in a boost::compressed_pair.
* @todo Use the given UpstreamAllocator to serve dynamic allocations required by
* boost::container::small_vector.
*/
template <std::size_t Alignment = alignof( std::max_align_t ), typename UpstreamAllocator = std::allocator<std::byte>>
class Monotonic {
// Restriction could be lifted, see @todo above.
static_assert( std::is_empty_v<UpstreamAllocator>, "Stateful upstream allocators are not yet supported." );
/// Size (in bytes) of the next block to be allocated.
std::size_t m_next_block_size{};
/// Number of allocation requests served by this arena.
std::size_t m_allocations{0};
/// Current position in the current block, or nullptr if there is no current block.
std::byte* m_current{nullptr};
/// One byte past the end of the current block, or nullptr if it doesn't exist.
std::byte* m_current_end{nullptr};
/// All memory blocks owned by this arena.
boost::container::small_vector<gsl::span<std::byte>, 1> m_all_blocks;
/// Approximate factor by which each block is larger than its predecessor.
static constexpr std::size_t growth_factor = 2;
public:
static constexpr std::size_t alignment = Alignment;
/** Construct an arena whose first block have approximately the given size.
* This constructor does not trigger any allocation.
*/
Monotonic( std::size_t next_block_size ) noexcept
: m_next_block_size{details::align_up<Alignment>( next_block_size )} {}
~Monotonic() noexcept {
for ( auto block : m_all_blocks ) { UpstreamAllocator{}.deallocate( block.data(), block.size() ); }
}
// Allocators will hold pointers to instances of this class, deleting these
// methods makes it harder to accidentally invalidate those pointers...
Monotonic( Monotonic&& ) = delete;
Monotonic( Monotonic const& ) = delete;
Monotonic& operator=( Monotonic&& ) = delete;
Monotonic& operator=( Monotonic const& ) = delete;
/** Return an aligned point to n bytes of memory.
* This may trigger allocation from the upstream resource.
*/
template <std::size_t ReqAlign>
std::byte* allocate( std::size_t n ) {
// If the requested alignment was larger we would need to round up
// m_current -- instead of implementing that, just assert it's not
// the case.
static_assert( ReqAlign <= alignment, "Requested alignment too large for this Gaudi::Arena::Monotonic!" );
// Figure out how many bytes we need to allocate
std::size_t const aligned_n = details::align_up<Alignment>( n );
// Check that we have a current block and this request fits inside it
if ( UNLIKELY( !m_current || m_current + aligned_n > m_current_end ) ) {
// Calculate our next block size
auto next_block_size = std::max( m_next_block_size, aligned_n );
// And update the estimate of what comes after that, following a geometric series
m_next_block_size = details::align_up<Alignment>( growth_factor * next_block_size );
// Allocate the new block and mark it as the current one
m_current = UpstreamAllocator{}.allocate( next_block_size );
m_current_end = m_current + next_block_size;
// Add it to the list of blocks that we'll eventually deallocate
m_all_blocks.emplace_back( m_current, next_block_size );
}
m_allocations++;
return std::exchange( m_current, m_current + aligned_n );
}
/** Deallocations are not tracked, so this is a no-op!
*/
constexpr void deallocate( std::byte*, std::size_t ) noexcept {}
/** Signal that this arena may start re-using the memory resources.
* - If the arena owns zero blocks, there is no change.
* - If the arena owns one block, it will reset to serving future requests from the
* start of that block.
* - If the arena owns more than one block, it will deallocate all but the first one
* and serve future requests from the start of the remaining block.
*/
void reset() noexcept {
m_allocations = 0;
if ( !m_all_blocks.empty() ) {
// Only re-use the first block, deallocate any others
if ( UNLIKELY( m_all_blocks.size() > 1 ) ) {
for ( std::size_t i = 1; i < m_all_blocks.size(); ++i ) {
UpstreamAllocator{}.deallocate( m_all_blocks[i].data(), m_all_blocks[i].size() );
}
m_all_blocks.resize( 1 );
}
auto reused_block = m_all_blocks.front();
m_current = reused_block.data();
m_current_end = m_current + reused_block.size();
m_next_block_size = details::align_up<Alignment>( growth_factor * reused_block.size() );
}
}
/** Query how much memory is owned by this arena, in bytes.
*/
[[nodiscard]] std::size_t capacity() const noexcept {
return std::accumulate( m_all_blocks.begin(), m_all_blocks.end(), 0ul,
[]( std::size_t sum, auto block ) { return sum + block.size(); } );
}
/** Query how much memory was *used* from this arena, in bytes.
*/
[[nodiscard]] std::size_t size() const noexcept { return capacity() - ( m_current_end - m_current ); }
/** Query how many blocks of memory this arena owns.
*/
[[nodiscard]] std::size_t num_blocks() const noexcept { return m_all_blocks.size(); }
/** Query how many allocations this arena has served.
*/
[[nodiscard]] std::size_t num_allocations() const noexcept { return m_allocations; }
};
} // namespace Gaudi::Arena
namespace Gaudi::Allocator {
/** @class MonotonicArena
* @brief Shorthand for Gaudi::Allocator::Arena with Gaudi::Arena::Monotonic resource
*/
template <typename T, typename DefaultResource = void, std::size_t Alignment = alignof( std::max_align_t ),
typename UpstreamAllocator = std::allocator<std::byte>>
using MonotonicArena =
::Gaudi::Allocator::Arena<::Gaudi::Arena::Monotonic<Alignment, UpstreamAllocator>, T, DefaultResource>;
} // namespace Gaudi::Allocator
\ No newline at end of file
/***********************************************************************************\
* (c) Copyright 2019-20 CERN for the benefit of the LHCb and ATLAS collaborations *
* *
* This software is distributed under the terms of the Apache version 2 licence, *
* copied verbatim in the file "LICENSE". *
* *
* In applying this licence, CERN does not waive the privileges and immunities *
* granted to it by virtue of its status as an Intergovernmental Organization *
* or submit itself to any jurisdiction. *
\***********************************************************************************/
#define BOOST_TEST_DYN_LINK
#define BOOST_TEST_MODULE test_MonotonicArena
#include "Gaudi/Arena/Monotonic.h"
#include <boost/align/align_up.hpp>
#include <boost/test/unit_test.hpp>
std::size_t memory = 0;
std::size_t alloc = 0;
std::size_t dealloc = 0;
// Override global operators for testing purposes
void* operator new( std::size_t s ) {
memory += s;
++alloc;
return std::malloc( s );
}
void operator delete( void* p ) noexcept {
++dealloc;
std::free( p );
}
void operator delete( void* p, std::size_t ) noexcept {
++dealloc;
std::free( p );
}
BOOST_AUTO_TEST_CASE( test_arena ) {
memory = alloc = dealloc = 0; // reset counters just in case
std::size_t first_block_size = 100;
constexpr std::size_t alignment = 2;
// default upstream allocator
Gaudi::Arena::Monotonic<alignment> arena{first_block_size};
// no requests served yet, everything should be zero
BOOST_CHECK( alloc == 0 );
BOOST_CHECK( memory == 0 );
BOOST_CHECK( dealloc == 0 );
BOOST_CHECK( arena.size() == 0 );
BOOST_CHECK( arena.capacity() == 0 );
BOOST_CHECK( arena.num_blocks() == 0 );
BOOST_CHECK( arena.num_allocations() == 0 );
// allocate 11 bytes
std::size_t nbytes = 11;
BOOST_CHECK( nbytes <= first_block_size );
auto ptr = arena.allocate<alignment>( nbytes );
BOOST_CHECK( ptr != nullptr );
// check the basics first
BOOST_CHECK( alloc == 1 );
BOOST_CHECK( dealloc == 0 );
BOOST_CHECK( arena.size() >= nbytes );
BOOST_CHECK( arena.capacity() == memory );
BOOST_CHECK( arena.capacity() >= first_block_size );
BOOST_CHECK( arena.num_blocks() == 1 );
BOOST_CHECK( arena.num_allocations() == 1 );
// arguably these are implementation details...
BOOST_CHECK( arena.size() == boost::alignment::align_up( nbytes, alignment ) );
BOOST_CHECK( arena.capacity() == boost::alignment::align_up( first_block_size, alignment ) );
// allocate enough more that the arena will have to reallocate
BOOST_CHECK( arena.allocate<alignment>( first_block_size ) != nullptr );
BOOST_CHECK( alloc >= 2 ); // maybe more because of small vector
BOOST_CHECK( dealloc == 0 );
BOOST_CHECK( arena.size() >= nbytes + first_block_size );
BOOST_CHECK( arena.capacity() >= 2 * first_block_size );
BOOST_CHECK( memory >= arena.capacity() );
BOOST_CHECK( arena.num_blocks() == 2 );
BOOST_CHECK( arena.num_allocations() == 2 );
// reset the arena
auto memory_pre_reset = memory;
auto allocs_pre_reset = alloc;
arena.reset();
BOOST_CHECK( dealloc >= 1 ); // should have freed the second block (small vector?)
BOOST_CHECK( alloc == allocs_pre_reset );
BOOST_CHECK( memory == memory_pre_reset );
BOOST_CHECK( arena.size() == 0 );
BOOST_CHECK( arena.capacity() >= first_block_size );
BOOST_CHECK( arena.num_blocks() == 1 );
BOOST_CHECK( arena.num_allocations() == 0 );
auto ptr2 = arena.allocate<alignment>( nbytes );
BOOST_CHECK( ptr == ptr2 );
}
BOOST_AUTO_TEST_CASE( test_allocator ) {
memory = alloc = dealloc = 0; // reset counters just in case
auto N = 100;
auto block_size = N * sizeof( int );
Gaudi::Arena::Monotonic<> arena{block_size};
std::vector<int, Gaudi::Allocator::MonotonicArena<int>> vec{&arena};
vec.resize( N );
BOOST_CHECK( alloc == 1 );
BOOST_CHECK( arena.size() == block_size );
BOOST_CHECK( arena.capacity() >= block_size );
BOOST_CHECK( arena.num_blocks() == 1 );
BOOST_CHECK( arena.num_allocations() == 1 );
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment