From b00cead7cd02146e9f37576dc0418f7ac82d9d7d Mon Sep 17 00:00:00 2001 From: Markus Frank <Markus.Frank@cern.ch> Date: Thu, 4 Apr 2024 20:16:58 +0200 Subject: [PATCH 1/6] Add basix XZ/LZMA compression example --- Online/Dataflow/src/Storage/StorageReader.cpp | 49 +-- Online/Dataflow/src/Storage/StorageWriter.cpp | 62 +-- Online/Dataflow/src/components/Delay.cpp | 6 +- Online/Dataflow/src/framework/DiskReader.cpp | 6 +- Online/EventData/include/EventData/RawFile.h | 3 + Online/EventData/src/RawFile.cpp | 7 + Online/FarmConfig/job/EBPass.sh | 2 + Online/GaudiOnline/components/OutputAlg.cpp | 2 +- .../python/GaudiOnlineTests/DF.py | 9 +- .../GaudiOnlineTests/src/TestCompression.cpp | 72 +++- .../dataflow/df_read_mdf_zstd.xml | 22 + .../dataflow/df_read_mdf_zstd_stream.xml | 22 + .../dataflow/df_read_tae_zstd.xml | 22 + .../dataflow/df_read_tae_zstd_stream.xml | 22 + .../dataflow/df_write_mdf_gzip.xml | 2 +- .../dataflow/df_write_mdf_zstd.xml | 23 ++ .../dataflow/df_write_tae_gzip.xml | 2 +- .../dataflow/df_write_tae_zstd.xml | 27 ++ .../dataflow.qms/df_05_write_mdf_zstd.qmt | 30 ++ .../dataflow.qms/df_06_write_tae_zstd.qmt | 33 ++ .../dataflow.qms/df_07_read_mdf_zstd.qmt | 35 ++ .../df_07_read_mdf_zstd_stream.qmt | 35 ++ .../dataflow.qms/df_09_read_tae_zstd.qmt | 35 ++ .../df_09_read_tae_zstd_stream.qmt | 35 ++ .../rtl_test_compression_lzma.qmt | 27 ++ .../rtl_test_compression_zlib.qmt | 2 +- .../rtl_test_compression_zstd.qmt | 26 ++ .../rtl_test_decompression_lzma.qmt | 26 ++ .../rtl_test_decompression_zlib.qmt | 1 + .../rtl_test_decompression_zstd.qmt | 26 ++ .../refs/BASE/rtl_test_compression_lzma.ref | 2 + .../refs/BASE/rtl_test_compression_zlib.ref | 2 +- .../refs/BASE/rtl_test_compression_zstd.ref | 2 + .../refs/BASE/rtl_test_decompression_lzma.ref | 3 + .../refs/BASE/rtl_test_decompression_zlib.ref | 1 + .../refs/BASE/rtl_test_decompression_zstd.ref | 3 + .../CTRL/controller_eval_architecture.ref | 180 ++++++++ .../CTRL/controller_read_architecture_xml.ref | 330 +++++++++++++++ .../tests/refs/DF/df_read_mdf_zstd.ref | 82 ++++ .../tests/refs/DF/df_read_mdf_zstd_stream.ref | 65 +++ .../tests/refs/DF/df_read_tae_zstd.ref | 82 ++++ .../tests/refs/DF/df_read_tae_zstd_stream.ref | 65 +++ .../tests/refs/DF/df_write_mdf_zstd.ref | 59 +++ .../tests/refs/DF/df_write_tae_zstd.ref | 59 +++ Online/OnlineBase/CMakeLists.txt | 15 +- Online/OnlineBase/include/RTL/Compress.h | 28 ++ Online/OnlineBase/src/COMPRESS/Compress.cpp | 261 ++++++++---- Online/OnlineBase/src/COMPRESS/posix_gzip.cpp | 36 +- Online/OnlineBase/src/COMPRESS/posix_lzma.cpp | 390 +++++++++++------- Online/OnlineBase/src/COMPRESS/posix_zstd.cpp | 15 +- Online/Testing/python/OnlineTest/__init__.py | 2 +- bin/build_standalone.sh | 12 +- cmake/OnlineDependencies.cmake | 1 + 53 files changed, 1994 insertions(+), 372 deletions(-) create mode 100644 Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_mdf_zstd.xml create mode 100644 Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_mdf_zstd_stream.xml create mode 100644 Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_tae_zstd.xml create mode 100644 Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_tae_zstd_stream.xml create mode 100644 Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_mdf_zstd.xml create mode 100644 Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_tae_zstd.xml create mode 100644 Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_05_write_mdf_zstd.qmt create mode 100644 Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_06_write_tae_zstd.qmt create mode 100644 Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_zstd.qmt create mode 100644 Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_zstd_stream.qmt create mode 100644 Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_09_read_tae_zstd.qmt create mode 100644 Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_09_read_tae_zstd_stream.qmt create mode 100644 Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_compression_lzma.qmt create mode 100644 Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_compression_zstd.qmt create mode 100644 Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_decompression_lzma.qmt create mode 100644 Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_decompression_zstd.qmt create mode 100644 Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_compression_lzma.ref create mode 100644 Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_compression_zstd.ref create mode 100644 Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_lzma.ref create mode 100644 Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_zstd.ref create mode 100644 Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_zstd.ref create mode 100644 Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_zstd_stream.ref create mode 100644 Online/GaudiOnlineTests/tests/refs/DF/df_read_tae_zstd.ref create mode 100644 Online/GaudiOnlineTests/tests/refs/DF/df_read_tae_zstd_stream.ref create mode 100644 Online/GaudiOnlineTests/tests/refs/DF/df_write_mdf_zstd.ref create mode 100644 Online/GaudiOnlineTests/tests/refs/DF/df_write_tae_zstd.ref diff --git a/Online/Dataflow/src/Storage/StorageReader.cpp b/Online/Dataflow/src/Storage/StorageReader.cpp index b3781687d..4d468b91e 100644 --- a/Online/Dataflow/src/Storage/StorageReader.cpp +++ b/Online/Dataflow/src/Storage/StorageReader.cpp @@ -168,6 +168,7 @@ namespace Online { #include <EventData/event_header_t.h> #include <EventData/raw_bank_offline_t.h> #include <Storage/fdb_client.h> +#include <RTL/Compress.h> #include <RTL/strdef.h> /// ROOT include files @@ -363,7 +364,6 @@ int StorageReader::open_file_posix_root(const std::string& loc, Buffer& buffer) return error("Failed to open TFile: %s [Protocol error]", loc.c_str()); } -#include <zlib.h> int StorageReader::open_file_posix_raw(const std::string& loc, Buffer& buffer) { RawFile input(loc); if ( input.open() ) { @@ -371,47 +371,14 @@ int StorageReader::open_file_posix_raw(const std::string& loc, Buffer& buffer) buffer.data.resize(file_length); std::size_t read_length = input.read(&buffer.data.at(0), file_length); if ( read_length == file_length ) { - /// Check if we have a gzip file. If YES: on the fly uncompress the stream + /// ZLIB compression: Check first 3 bytes as described in RFC 1950 + /// If YES: on the fly uncompress the stream if ( buffer.data.at(0) == 0x1f && buffer.data.at(1) == 0x8b ) { - constexpr static uint32_t CHUNK = 0x4000; - constexpr static int32_t ENABLE_ZLIB_GZIP = 32; - std::vector<unsigned char> result; - uint8_t out[CHUNK+1]; - int32_t windowBits = 15; - z_stream stream; - - result.reserve(2*file_length); - ::memset(&stream,0,sizeof(stream)); - stream.zalloc = Z_NULL; - stream.zfree = Z_NULL; - stream.opaque = Z_NULL; - stream.next_in = (unsigned char*)&buffer.data[0]; - stream.avail_in = 0; - int status = ::inflateInit2(&stream, windowBits + ENABLE_ZLIB_GZIP); - if ( status < 0 ) { - return error("Failed to initialize zlib/gzip [%s]", - std::error_condition(EINVAL,std::system_category()).message().c_str()); - } - stream.avail_in = file_length; - stream.next_in = (unsigned char*)&buffer.data[0]; - do { - stream.avail_out = CHUNK; - stream.next_out = out; - status = ::inflate (&stream, Z_NO_FLUSH); - switch (status) { - case Z_OK: - case Z_STREAM_END: - break; - case Z_BUF_ERROR: - default: - inflateEnd(&stream); - return error("Failed to initialize zlib/gzip [status: %d]", status); - } - std::copy(out, out+CHUNK-stream.avail_out, std::back_inserter(result)); - } while (stream.avail_out == 0); - ::inflateEnd(&stream); - buffer.data = std::move(result); - result.clear(); + buffer.data = compress::decompress_gzip(std::move(buffer.data)); + } + /// Check ZSTD magic word (see RFC 8878 for details) + else if ( *(uint32_t*)&buffer.data.at(0) == 0xFD2FB528 ) { + buffer.data = compress::decompress_zstd(std::move(buffer.data)); } ++this->m_filesClosed; input.close(); diff --git a/Online/Dataflow/src/Storage/StorageWriter.cpp b/Online/Dataflow/src/Storage/StorageWriter.cpp index 97a9c5d84..b7a84b6b4 100644 --- a/Online/Dataflow/src/Storage/StorageWriter.cpp +++ b/Online/Dataflow/src/Storage/StorageWriter.cpp @@ -87,10 +87,14 @@ namespace { return true; } } +#include <RTL/posix.h> +extern "C" Online::posix_t* posix_gzip_descriptor(); +extern "C" Online::posix_t* posix_zstd_descriptor(); +extern "C" Online::posix_t* posix_lzma_descriptor(); +extern "C" Online::posix_t* posix_lz4_descriptor(); struct StorageWriter::POSIX_FILE { public: - gzFile gzip { nullptr }; RawFile file { }; std::mutex lock { }; uint64_t length { 0 }; @@ -108,38 +112,46 @@ public: ~POSIX_FILE() = default; void close() { - if ( gzip != nullptr ) { - ::gzclose_w(gzip); - gzip = nullptr; - } if ( this->file.isOpen() ) { this->file.close(); } this->length = 0; this->run = 0; } - int enable_gzip_compression(std::size_t buffer_len, int level, int strategy=Z_DEFAULT_STRATEGY) { - if ( this->length != 0 ) { + int enable_posix_compression(posix_t* desc, std::size_t buff_len, int level) { + if ( this->length != 0 ) return -1; - } - if ( this->file.isOpen() ) { - gzip = ::gzdopen(this->file.fileno(), "w"); - } - if ( nullptr == gzip ) { + else if ( this->file.isOpen() ) + this->file.close(); + std::string fn = this->file.name(); + this->file = RawFile(fn, desc); + if ( this->file.openWrite(false) < 0 ) return -1; - } - if ( 0 != ::gzbuffer(gzip, buffer_len) ) { - ::gzclose(gzip); - gzip = nullptr; + else if ( 0 != this->file.set_option("strategy", "default") ) return -1; - } - if ( 0 != ::gzsetparams(gzip, level, strategy) ) { - ::gzclose(gzip); - gzip = nullptr; + else if( 0 != this->file.set_option("compression", std::to_string(level)) ) + return -1; + else if( 0 != this->file.set_option("buffer_length",std::to_string(buff_len)) ) + return -1; + else if( 0 != this->file.set_option("apply", "") ) { return -1; } return 0; } + int enable_compression(int typ, std::size_t buff_len, int level) { + if ( typ == DataCompression::NONE ) + return 0; + if ( typ == DataCompression::ZLIB ) + return enable_posix_compression(posix_gzip_descriptor(), buff_len, level); + else if ( typ == DataCompression::ZSTD ) + return enable_posix_compression(posix_zstd_descriptor(), buff_len, level); + else if ( typ == DataCompression::LZMA ) + return enable_posix_compression(posix_lzma_descriptor(), buff_len, level); + else if ( typ == DataCompression::LZ4 ) + return enable_posix_compression(posix_lz4_descriptor(), buff_len, level); + except("StorageWriter", "Not implemented compression for full file compression: %d [int-type]", typ); + return -1; + } int open(const std::string& fname, int verifyNFS) { std::filesystem::path parent = std::filesystem::path(fname).parent_path(); if ( verifyNFS ) { @@ -163,9 +175,6 @@ public: return this->file.isOpen(); } long write(const void* buff, std::size_t len) { - if ( gzip ) { - return ::gzwrite(gzip, buff, len); - } return this->file.write(buff, len); } }; @@ -228,7 +237,7 @@ public: return -1; } /// No-op. Cannot be implemented for ROOT - int enable_gzip_compression(std::size_t, int , int = 0) { + int enable_compression(int, std::size_t, int) { return 0; } const char* name() const { @@ -324,6 +333,7 @@ int StorageWriter::initialize() { } this->m_compressionLevel &= 0xF; this->m_compressionType = DataCompression::NONE; + this->m_compressionTypeName = RTL::str_upper(this->m_compressionTypeName); if ( this->m_compressionTypeName == "NONE" ) this->m_compressionType = DataCompression::NONE; else if ( this->m_compressionTypeName == "ZLIB" ) @@ -1203,7 +1213,9 @@ int StorageWriter::process_posix_buffers(std::mutex& queue_lock, return DF_ERROR; } if ( this->m_fileCompression ) { - if ( 0 != output->enable_gzip_compression(this->m_buffer_size, this->m_compressionLevel) ) { + if ( 0 != output->enable_compression(this->m_compressionType, + this->m_buffer_size, + this->m_compressionLevel) ) { this->warning("FAILED to enable file compression for %s", output->name()); } this->info("File compression enabled for %s", output->name()); diff --git a/Online/Dataflow/src/components/Delay.cpp b/Online/Dataflow/src/components/Delay.cpp index ceef9cd7a..336f3e831 100644 --- a/Online/Dataflow/src/components/Delay.cpp +++ b/Online/Dataflow/src/components/Delay.cpp @@ -131,7 +131,7 @@ void Delay::handle(const DataflowIncident& inc) { if ( inc.type == "DAQ_PAUSE" ) { this->delay("daq_pause", m_pauseDelay); if ( m_autoContinue > 0e0 ) { - always("+++ Got incident %s: trigger auto transition to %s after %.3f seconds", + always("+++ Got incident %s: trigger auto transition to %s after %f seconds", inc.type.c_str(), "RUNNING", m_autoContinue); IocSensor::instance().send(this, CMD_AUTO_CONTINUE, nullptr); } @@ -141,12 +141,12 @@ void Delay::handle(const DataflowIncident& inc) { } else if ( inc.type == "DAQ_RUNNING" ) { if ( m_autoPause > 0e0 ) { - always("+++ Got incident %s: trigger auto transition to %s after %.3f seconds", + always("+++ Got incident %s: trigger auto transition to %s after %f seconds", inc.type.c_str(), "PAUSED", m_autoPause); IocSensor::instance().send(this, CMD_AUTO_PAUSE, nullptr); } if ( m_autoError > 0e0 ) { - always("+++ Got incident %s: trigger auto transition to %s after %.3f seconds", + always("+++ Got incident %s: trigger auto transition to %s after %f seconds", inc.type.c_str(), "ERROR", m_autoError); IocSensor::instance().send(this, CMD_AUTO_ERROR, nullptr); } diff --git a/Online/Dataflow/src/framework/DiskReader.cpp b/Online/Dataflow/src/framework/DiskReader.cpp index 1d56f3fbe..f754af871 100644 --- a/Online/Dataflow/src/framework/DiskReader.cpp +++ b/Online/Dataflow/src/framework/DiskReader.cpp @@ -1339,7 +1339,7 @@ int DiskReader::i_run() { uint8_t file_buff[4]; if ( current_input.read(file_buff, sizeof(file_buff)) == sizeof(file_buff) ) { std::string fname = current_input.name(); - // ZLIB compression + // ZLIB compression: Check first 3 bytes as described in RFC 1950 if ( file_buff[0] == 0x1f && file_buff[1] == 0x8b && file_buff[2] == Z_DEFLATED ) { current_input.close(); current_input = RawFile(fname, posix_zlib_descriptor()); @@ -1347,8 +1347,8 @@ int DiskReader::i_run() { continue; } } - // ZSTD compression - else if ( file_buff[0] == uint8_t('Z') && file_buff[1] == uint8_t('S') && file_buff[2] == uint8_t('\1') ) { + // ZSTD compression: Check ZSTD magic word (see RFC 8878 for details) + else if ( *(uint32_t*)file_buff == 0xFD2FB528 ) { current_input.close(); current_input = RawFile(fname, posix_zstd_descriptor()); if ( current_input.open() == -1 ) { diff --git a/Online/EventData/include/EventData/RawFile.h b/Online/EventData/include/EventData/RawFile.h index ebc1fc979..e347b695b 100644 --- a/Online/EventData/include/EventData/RawFile.h +++ b/Online/EventData/include/EventData/RawFile.h @@ -102,6 +102,9 @@ namespace Online { RawFile(int descriptor, bool); /// Initializing constructor RawFile(int descriptor, const posix_t* posix_descriptor); + /// Initializing constructor + RawFile(const std::string& nam, int descriptor, const posix_t* posix_descriptor); + /// Default destructor virtual ~RawFile() = default; /// Assignment diff --git a/Online/EventData/src/RawFile.cpp b/Online/EventData/src/RawFile.cpp index 6c4bbbb9c..d0000cd45 100644 --- a/Online/EventData/src/RawFile.cpp +++ b/Online/EventData/src/RawFile.cpp @@ -68,6 +68,13 @@ RawFile::RawFile(const string& fname, const posix_t* posix_descriptor) m_reuse = false; } +/// Initializing constructor +RawFile::RawFile(const std::string& nam, int descriptor, const posix_t* posix_descriptor) + : m_name(nam), m_fd(descriptor), m_descriptor(posix_descriptor) +{ + m_reuse = false; +} + /// Standard constructor RawFile::RawFile(int descriptor) : m_name() diff --git a/Online/FarmConfig/job/EBPass.sh b/Online/FarmConfig/job/EBPass.sh index a286bd49e..6069903cc 100755 --- a/Online/FarmConfig/job/EBPass.sh +++ b/Online/FarmConfig/job/EBPass.sh @@ -37,6 +37,8 @@ fi; # TAE global if test "${PARTITION_NAME}" = "LHCb"; then export EXPAND_TAE=YES; +elif test "${PARTITION_NAME}" = "RICH"; then + export EXPAND_TAE=YES; fi; # # diff --git a/Online/GaudiOnline/components/OutputAlg.cpp b/Online/GaudiOnline/components/OutputAlg.cpp index 41646c984..eef1bfaf5 100644 --- a/Online/GaudiOnline/components/OutputAlg.cpp +++ b/Online/GaudiOnline/components/OutputAlg.cpp @@ -327,7 +327,7 @@ StatusCode OutputAlg::process(EventContext const& /* ctxt */) const { bool requireODIN = this->m_requireODIN; EventOutput& output = *this->m_output.get(); std::vector<int> mask = this->m_mask_generator.get(); - + if ( use_tae_leaves ) { DataObject* pObj = nullptr; std::vector<IRegistry*> leaves; diff --git a/Online/GaudiOnlineTests/python/GaudiOnlineTests/DF.py b/Online/GaudiOnlineTests/python/GaudiOnlineTests/DF.py index 78f50a714..e6e95bdca 100644 --- a/Online/GaudiOnlineTests/python/GaudiOnlineTests/DF.py +++ b/Online/GaudiOnlineTests/python/GaudiOnlineTests/DF.py @@ -851,7 +851,7 @@ def ReaderEx(directory, prefix='*', rescan=0, delay=0.0, wait=20, checksum=None) return task.run() #-------------------------------------------------------------------------- -def Writer(output, max_events=100000, buffer='Events', compression=None, file_compression=None, checksum=None, execute=True): +def Writer(output, max_events=100000, buffer='Events', compression_type=None, compression=None, file_compression=None, checksum=None, execute=True): global online_environment import OnlineEnvBase as online_environment print('INFO: +++ Writer output specs: %s [max. %d events per file]'%(output, max_events, )) @@ -859,12 +859,11 @@ def Writer(output, max_events=100000, buffer='Events', compression=None, file_co task.setup_basics() cl, sel, wr = task.setup_file_writer_task(name='Writer', buffer=buffer, output=output) wr.MaxEvents = max_events - if file_compression and compression: - wr.FileCompression = 1 - wr.CompressionLevel = compression - elif compression: + if compression: wr.CompressionType = compression[0] wr.CompressionLevel = compression[1] + if file_compression: + wr.FileCompression = 1 if checksum and isinstance(checksum, str): checksum = checksum.upper() if checksum == 'ADLER32': diff --git a/Online/GaudiOnlineTests/src/TestCompression.cpp b/Online/GaudiOnlineTests/src/TestCompression.cpp index 918c3fced..a71ee1ad2 100644 --- a/Online/GaudiOnlineTests/src/TestCompression.cpp +++ b/Online/GaudiOnlineTests/src/TestCompression.cpp @@ -23,7 +23,10 @@ #define CHUNK_SIZE 8192 namespace fs = std::filesystem; + namespace { + + /// Test routine to compress input file (args) to output file (args) int rtl_test_compression(int argc, char** argv, Online::posix_t* descriptor) { RTL::CLI cli(argc, argv, []() { ::printf("rtl_test_<type>_compression -option [-option] \n\n" @@ -34,15 +37,15 @@ namespace { " -compression=<leve> Compression level \n" " Technology dependent \n" "\n" ); - ::exit(EINVAL); }); - std::string input, output, strategy = "default", compression = "3"; + std::string input, output, strategy, compression = "3"; cli.getopt("input", 3, input); cli.getopt("output", 3, output); cli.getopt("strategy", 3, strategy); cli.getopt("compression", 3, compression); if ( input.empty() || output.empty() ) { cli.call_help(); + ::exit(EINVAL); } input = RTL::str_expand_env(input); output = RTL::str_expand_env(output); @@ -54,8 +57,8 @@ namespace { std::size_t in_bytes = 0; std::size_t out_bytes = 0; char chunk[CHUNK_SIZE]; - out.set_option("STRATEGY", strategy); - out.set_option("COMPRESSION_LEVEL", compression); + if ( !strategy.empty() ) out.set_option("STRATEGY", strategy); + if ( !compression.empty() ) out.set_option("COMPRESSION_LEVEL", compression); out.set_option("APPLY", ""); do { iret = in.read(chunk, sizeof(chunk)); @@ -70,6 +73,7 @@ namespace { fs::path(output).filename().c_str(), oret); break; } + ::lib_rtl_output(LIB_RTL_ALWAYS, "Failed to read input file %s (iret=%d) after a total of 0x%08X bytes read.", fs::path(input).filename().c_str(), iret, in_bytes); @@ -93,19 +97,23 @@ namespace { return EINVAL; } + /// Test routine to decompress input file (args) to output file (args) int rtl_test_decompression(int argc, char** argv, Online::posix_t* descriptor) { RTL::CLI cli(argc, argv, []() { ::printf("rtl_test_<type>_decompression -option [-option] \n\n" " -input=<file-name> Input file name \n" " -output=<file-name> Output file name \n" + " -source=<file-name> Source file name \n" + " Source will be checked against deflated file.\n" "\n" ); - ::exit(EINVAL); }); - std::string input, output; + std::string input, output, source; cli.getopt("input", 3, input); cli.getopt("output", 3, output); + cli.getopt("source", 3, source); if ( input.empty() || output.empty() ) { cli.call_help(); + ::exit(EINVAL); } input = RTL::str_expand_env(input); output = RTL::str_expand_env(output); @@ -143,13 +151,59 @@ namespace { "Decompressed input file %s [0x%08X uncompressed / 0x%08X compressed bytes] to %s with 0x%08X bytes.", fs::path(input).filename().c_str(), in_bytes, stat_buf.st_size, fs::path(output).filename().c_str(), out_bytes); + if ( !source.empty() ) { + Online::RawFile f1(RTL::str_expand_env(source)); + Online::RawFile f2(output); + if ( f1.open() > 0 && f2.open() > 0 ) { + std::size_t len1 = f1.data_size(); + std::size_t len2 = f2.data_size(); + if ( len1 != len2 ) { + ::lib_rtl_output(LIB_RTL_ERROR, + "Decompressed file and source have different size: %s: %ld %s: %ld", + f1.name().c_str(), len1, f2.name().c_str(), len2); + goto Error; + } + std::size_t err_byte = 0; + char c1[1024], c2[1024]; + for( std::size_t i=0; i<len1; ++i ) { + long s1 = f1.read(c1, sizeof(c1)); + long s2 = f2.read(c2, sizeof(c2)); + if ( s1 > 0 && s1 == s2 && ::memcmp(c1, c2, sizeof(c1)) == 0 ) + continue; + if ( ::memcmp(c1, c2, sizeof(c1)) ) + ++err_byte; + if ( s1 < 0 || s2 < 0 ) + break; + if ( s1 == 0 && s2 == 0 ) + break; + } + if ( err_byte > 0 ) { + ::lib_rtl_output(LIB_RTL_ERROR, + "Content check differs by %ld frames. %s: %d %s: %d", + err_byte, f1.name().c_str(), f1.fileno(), f2.name().c_str(), f2.fileno()); + goto Error; + } + ::lib_rtl_output(LIB_RTL_ALWAYS, + "Content check was successful. %s identical to %s", + f1.name().c_str(), f2.name().c_str()); + return 0; + } + ::lib_rtl_output(LIB_RTL_ERROR, + "Failed to open files for content check: %s: %d %s: %d", + f1.name().c_str(), f1.fileno(), f2.name().c_str(), f2.fileno()); + return ENOENT; + Error: + f1.close(); + f2.close(); + return EINVAL; + } return 0; } ::lib_rtl_output(LIB_RTL_ERROR,"Failed to open output file: %s", output.c_str()); - return EINVAL; + return EPERM; } ::lib_rtl_output(LIB_RTL_ERROR,"Failed to open input file: %s", input.c_str()); - return EINVAL; + return ENOENT; } } @@ -159,7 +213,7 @@ namespace { extern "C" int rtl_test_##x##_compression(int argc, char** argv) { \ return rtl_test_compression(argc, argv, posix_##x##_descriptor()); \ } \ - /* Decompress file using ZLIB algorithm */ \ + /* Decompress file using x algorithm */ \ extern "C" int rtl_test_##x##_decompression(int argc, char** argv) { \ return rtl_test_decompression(argc, argv, posix_##x##_descriptor()); \ } diff --git a/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_mdf_zstd.xml b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_mdf_zstd.xml new file mode 100644 index 000000000..66d369edc --- /dev/null +++ b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_mdf_zstd.xml @@ -0,0 +1,22 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<tasks_inventory> + <param type="environment" name="DATA_PREFIX" value="df_output_mdf_zstd_0000"/> + + <include ref="../tasks/MBM.xml"/> + <include ref="../tasks/Passthrough.xml"/> + <include ref="../tasks/StorageFileReader.xml"/> + +</tasks_inventory> diff --git a/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_mdf_zstd_stream.xml b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_mdf_zstd_stream.xml new file mode 100644 index 000000000..5df6958c1 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_mdf_zstd_stream.xml @@ -0,0 +1,22 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<tasks_inventory> + <param type="environment" name="DATA_PREFIX" value="df_output_mdf_zstd_0000"/> + + <include ref="../tasks/MBM.xml"/> + <include ref="../tasks/Passthrough.xml"/> + <include ref="../tasks/DF-MDF-Reader.xml"/> + +</tasks_inventory> diff --git a/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_tae_zstd.xml b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_tae_zstd.xml new file mode 100644 index 000000000..dde01fce2 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_tae_zstd.xml @@ -0,0 +1,22 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<tasks_inventory> + <param type="environment" name="DATA_PREFIX" value="df_output_tae_zstd_0000"/> + + <include ref="../tasks/MBM.xml"/> + <include ref="../tasks/Passthrough.xml"/> + <include ref="../tasks/StorageFileReader.xml"/> + +</tasks_inventory> diff --git a/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_tae_zstd_stream.xml b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_tae_zstd_stream.xml new file mode 100644 index 000000000..ee5d1ad03 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_tae_zstd_stream.xml @@ -0,0 +1,22 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<tasks_inventory> + <param type="environment" name="DATA_PREFIX" value="df_output_tae_zstd_0000"/> + + <include ref="../tasks/MBM.xml"/> + <include ref="../tasks/Passthrough.xml"/> + <include ref="../tasks/DF-MDF-Reader.xml"/> + +</tasks_inventory> diff --git a/Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_mdf_gzip.xml b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_mdf_gzip.xml index a107e2f20..08c81bcdd 100644 --- a/Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_mdf_gzip.xml +++ b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_mdf_gzip.xml @@ -14,7 +14,7 @@ --> <tasks_inventory> <param type="environment" name="WRITER_FILE_COMPRESSION" value="1"/> - <param type="environment" name="WRITER_COMPRESSION" value="8"/> + <param type="environment" name="WRITER_COMPRESSION" value="['ZLIB',8]"/> <param type="environment" name="WRITER_FILENAME" value="${DATA_DIR}/df_output_mdf_gzip_$env{RUN}_$env{SEQ}.mdf.gz"/> <include ref="../tasks/MBM.xml"/> diff --git a/Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_mdf_zstd.xml b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_mdf_zstd.xml new file mode 100644 index 000000000..90ea596d6 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_mdf_zstd.xml @@ -0,0 +1,23 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<tasks_inventory> + <param type="environment" name="WRITER_FILE_COMPRESSION" value="1"/> + <param type="environment" name="WRITER_COMPRESSION" value="['ZSTD',8]"/> + <param type="environment" name="WRITER_FILENAME" value="${DATA_DIR}/df_output_mdf_zstd_$env{RUN}_$env{SEQ}.mdf.zst"/> + + <include ref="../tasks/MBM.xml"/> + <include ref="../tasks/MDFGen.xml"/> + <include ref="../tasks/Writer.xml"/> +</tasks_inventory> diff --git a/Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_tae_gzip.xml b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_tae_gzip.xml index e103fe0df..419267616 100644 --- a/Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_tae_gzip.xml +++ b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_tae_gzip.xml @@ -18,7 +18,7 @@ <param type="environment" name="GEN_HALF_WINDOW" value="7"/> <param type="environment" name="WRITER_MAX_EVENTS" value="20"/> <param type="environment" name="WRITER_FILE_COMPRESSION" value="1"/> - <param type="environment" name="WRITER_COMPRESSION" value="8"/> + <param type="environment" name="WRITER_COMPRESSION" value="['ZLIB',8]"/> <param type="environment" name="WRITER_FILENAME" value="${DATA_DIR}/df_output_tae_gzip_$env{RUN}_$env{SEQ}.tae.gz"/> <include ref="../tasks/MBM.xml"/> diff --git a/Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_tae_zstd.xml b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_tae_zstd.xml new file mode 100644 index 000000000..20bfe9601 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_tae_zstd.xml @@ -0,0 +1,27 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<tasks_inventory> + <param type="environment" name="GEN_NUM_BURSTS" value="200"/> + <param type="environment" name="GEN_PACKING" value="1"/> + <param type="environment" name="GEN_HALF_WINDOW" value="7"/> + <param type="environment" name="WRITER_MAX_EVENTS" value="20"/> + <param type="environment" name="WRITER_FILE_COMPRESSION" value="1"/> + <param type="environment" name="WRITER_COMPRESSION" value="['ZSTD',8]"/> + <param type="environment" name="WRITER_FILENAME" value="${DATA_DIR}/df_output_tae_zstd_$env{RUN}_$env{SEQ}.tae.zst"/> + + <include ref="../tasks/MBM.xml"/> + <include ref="../tasks/MDFGen.xml"/> + <include ref="../tasks/Writer.xml"/> +</tasks_inventory> diff --git a/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_05_write_mdf_zstd.qmt b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_05_write_mdf_zstd.qmt new file mode 100644 index 000000000..65286fd05 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_05_write_mdf_zstd.qmt @@ -0,0 +1,30 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + +--> +<extension class="OnlineTest.Test" kind="test"> + <argument name="program"><text>run_testbench.sh</text></argument> + <argument name="args"><set> + <text>--architecture=${GAUDIONLINETESTSROOT}/tests/architectures/dataflow/df_write_mdf_zstd.xml</text> + <text>--working-dir=df_write_mdf_zstd</text> + <text>--producer=MDFGen</text> + </set></argument> + <argument name="ignore_stderr"/> + <argument name="use_temp_dir"><enumeral>true</enumeral></argument> +<argument name="validator"><text> +self.collect_logs(self._common_tmpdir + '/df_write_mdf_zstd/*.log') +self.validate_log('Writer_0.log') +</text></argument> + <argument name="reference"><text>../refs/DF/df_write_mdf_zstd.ref</text></argument> + <argument name="prerequisites"><set><tuple> + <text>cleanup.cleanup</text><enumeral>PASS</enumeral> + </tuple></set></argument> +</extension> diff --git a/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_06_write_tae_zstd.qmt b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_06_write_tae_zstd.qmt new file mode 100644 index 000000000..72f0375e4 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_06_write_tae_zstd.qmt @@ -0,0 +1,33 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<extension class="OnlineTest.Test" kind="test"> +<argument name="program"><text>run_testbench.sh</text></argument> +<argument name="args"><set> + <text>${ONLINETESTBENCH}/testbench.py</text> + <text>--architecture=${GAUDIONLINETESTSROOT}/tests/architectures/dataflow/df_write_tae_zstd.xml</text> + <text>--working-dir=df_write_tae_zstd</text> + <text>--producer=MDFGen</text> +</set></argument> +<argument name="ignore_stderr"/> +<argument name="use_temp_dir"><enumeral>true</enumeral></argument> +<argument name="validator"><text> +self.collect_logs(self._common_tmpdir + '/df_write_tae_zstd/*.log') +self.validate_log('Writer_0.log') +</text></argument> + <argument name="reference"><text>../refs/DF/df_write_tae_zstd.ref</text></argument> + <argument name="prerequisites"><set><tuple> + <text>cleanup.cleanup</text><enumeral>PASS</enumeral> + </tuple></set></argument> +</extension> diff --git a/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_zstd.qmt b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_zstd.qmt new file mode 100644 index 000000000..d82da83c8 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_zstd.qmt @@ -0,0 +1,35 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<extension class="OnlineTest.Test" kind="test"> + <argument name="program"><text>run_testbench.sh</text></argument> + <argument name="args"><set> + <text>${ONLINETESTBENCH}/testbench.py</text> + <text>--architecture=${GAUDIONLINETESTSROOT}/tests/architectures/dataflow/df_read_mdf_zstd.xml</text> + <text>--working-dir=df_read_mdf_zstd</text> + <text>--producer=StorageFileReader</text> + </set></argument> + <argument name="ignore_stderr"/> + <argument name="use_temp_dir"><enumeral>true</enumeral></argument> + <argument name="validator"><text> +self.collect_logs(self._common_tmpdir + '/df_read_mdf_zstd/*.log') +RESULT = '+++ StorageFileReader_0.log \n' + result['StorageFileReader_0.log'] + \ + '+++ MBMMON.log \n' + result['MBMMON.log'] +self.validateWithReference(stdout=RESULT) + </text></argument> + <argument name="reference"><text>../refs/DF/df_read_mdf_zstd.ref</text></argument> + <argument name="prerequisites"><set> + <tuple><text>dataflow.df_05_write_mdf_zstd</text><enumeral>PASS</enumeral></tuple> + </set></argument> +</extension> diff --git a/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_zstd_stream.qmt b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_zstd_stream.qmt new file mode 100644 index 000000000..b72eb8ec6 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_zstd_stream.qmt @@ -0,0 +1,35 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<extension class="OnlineTest.Test" kind="test"> + <argument name="program"><text>run_testbench.sh</text></argument> + <argument name="args"><set> + <text>${ONLINETESTBENCH}/testbench.py</text> + <text>--architecture=${GAUDIONLINETESTSROOT}/tests/architectures/dataflow/df_read_mdf_zstd_stream.xml</text> + <text>--working-dir=df_read_mdf_zstd_stream</text> + <text>--producer=StorageFileReader</text> + </set></argument> + <argument name="ignore_stderr"/> + <argument name="use_temp_dir"><enumeral>true</enumeral></argument> + <argument name="validator"><text> +self.collect_logs(self._common_tmpdir + '/df_read_mdf_zstd_stream/*.log') +RESULT = '+++ Reader_0.log \n' + result['Reader_0.log'] + \ + '+++ MBMMON.log \n' + result['MBMMON.log'] +self.validateWithReference(stdout=RESULT) + </text></argument> + <argument name="reference"><text>../refs/DF/df_read_mdf_zstd_stream.ref</text></argument> + <argument name="prerequisites"><set> + <tuple><text>dataflow.df_05_write_mdf_zstd</text><enumeral>PASS</enumeral></tuple> + </set></argument> +</extension> diff --git a/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_09_read_tae_zstd.qmt b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_09_read_tae_zstd.qmt new file mode 100644 index 000000000..77aad70db --- /dev/null +++ b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_09_read_tae_zstd.qmt @@ -0,0 +1,35 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- +(c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + +This software is distributed under the terms of the GNU General Public +Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + +In applying this licence, CERN does not waive the privileges and immunities +granted to it by virtue of its status as an Intergovernmental Organization +or submit itself to any jurisdiction. + + +Run dataflow job in batchbench +--> +<extension class="OnlineTest.Test" kind="test"> + <argument name="program"><text>run_testbench.sh</text></argument> + <argument name="args"><set> + <text>${ONLINETESTBENCH}/testbench.py</text> + <text>--architecture=${GAUDIONLINETESTSROOT}/tests/architectures/dataflow/df_read_tae_zstd.xml</text> + <text>--working-dir=df_read_tae_zstd</text> + <text>--producer=StorageFileReader</text> + </set></argument> + <argument name="ignore_stderr"/> + <argument name="use_temp_dir"><enumeral>true</enumeral></argument> + <argument name="validator"><text> +self.collect_logs(self._common_tmpdir + '/df_read_tae_zstd/*.log') +RESULT = '+++ StorageFileReader_0.log \n' + result['StorageFileReader_0.log'] + \ + '+++ MBMMON.log \n' + result['MBMMON.log'] +self.validateWithReference(stdout=RESULT) + </text></argument> + <argument name="reference"><text>../refs/DF/df_read_tae_zstd.ref</text></argument> + <argument name="prerequisites"><set> + <tuple><text>dataflow.df_06_write_tae_zstd</text><enumeral>PASS</enumeral></tuple> + </set></argument> +</extension> diff --git a/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_09_read_tae_zstd_stream.qmt b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_09_read_tae_zstd_stream.qmt new file mode 100644 index 000000000..7eac2bd90 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_09_read_tae_zstd_stream.qmt @@ -0,0 +1,35 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<extension class="OnlineTest.Test" kind="test"> + <argument name="program"><text>run_testbench.sh</text></argument> + <argument name="args"><set> + <text>${ONLINETESTBENCH}/testbench.py</text> + <text>--architecture=${GAUDIONLINETESTSROOT}/tests/architectures/dataflow/df_read_tae_zstd_stream.xml</text> + <text>--working-dir=df_read_tae_zstd_stream</text> + <text>--producer=StorageFileReader</text> + </set></argument> + <argument name="ignore_stderr"/> + <argument name="use_temp_dir"><enumeral>true</enumeral></argument> + <argument name="validator"><text> +self.collect_logs(self._common_tmpdir + '/df_read_tae_zstd_stream/*.log') +RESULT = '+++ Reader_0.log \n' + result['Reader_0.log'] + \ + '+++ MBMMON.log \n' + result['MBMMON.log'] +self.validateWithReference(stdout=RESULT) + </text></argument> + <argument name="reference"><text>../refs/DF/df_read_tae_zstd_stream.ref</text></argument> + <argument name="prerequisites"><set> + <tuple><text>dataflow.df_06_write_tae_zstd</text><enumeral>PASS</enumeral></tuple> + </set></argument> +</extension> diff --git a/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_compression_lzma.qmt b/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_compression_lzma.qmt new file mode 100644 index 000000000..71ce9112f --- /dev/null +++ b/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_compression_lzma.qmt @@ -0,0 +1,27 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + +--> +<extension class="OnlineTest.Test" kind="test"> + <argument name="program"><text>online_test</text></argument> + <argument name="args"><set> + <text>rtl_test_lzma_compression</text> + <text>-input=../../../../InstallArea/${BINARY_TAG}/lib/libProperties.so</text> + <text>-output=rtl_test_compression_lzma.output.xz</text> + <text>-compression=default</text> + <text>-strategy=default</text> + </set></argument> + <argument name="use_temp_dir"><enumeral>true</enumeral></argument> + <argument name="reference"><text>../refs/BASE/rtl_test_compression_lzma.ref</text></argument> + <argument name="prerequisites"><set><tuple> + <text>cleanup.cleanup</text><enumeral>PASS</enumeral> + </tuple></set></argument> +</extension> diff --git a/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_compression_zlib.qmt b/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_compression_zlib.qmt index 39f3c05ef..1b778c930 100644 --- a/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_compression_zlib.qmt +++ b/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_compression_zlib.qmt @@ -13,7 +13,7 @@ <extension class="OnlineTest.Test" kind="test"> <argument name="program"><text>online_test</text></argument> <argument name="args"><set> - <text>rtl_test_zlib_decompression</text> + <text>rtl_test_zlib_compression</text> <text>-input=../../../../InstallArea/${BINARY_TAG}/lib/libProperties.so</text> <text>-output=rtl_test_compression_zlib.output.gz</text> <text>-strategy=default</text> diff --git a/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_compression_zstd.qmt b/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_compression_zstd.qmt new file mode 100644 index 000000000..00b0b6dc8 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_compression_zstd.qmt @@ -0,0 +1,26 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + +--> +<extension class="OnlineTest.Test" kind="test"> + <argument name="program"><text>online_test</text></argument> + <argument name="args"><set> + <text>rtl_test_zstd_compression</text> + <text>-input=../../../../InstallArea/${BINARY_TAG}/lib/libProperties.so</text> + <text>-output=rtl_test_compression_zstd.output.zst</text> + <text>-compression=default</text> + </set></argument> + <argument name="use_temp_dir"><enumeral>true</enumeral></argument> + <argument name="reference"><text>../refs/BASE/rtl_test_compression_zstd.ref</text></argument> + <argument name="prerequisites"><set><tuple> + <text>cleanup.cleanup</text><enumeral>PASS</enumeral> + </tuple></set></argument> +</extension> diff --git a/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_decompression_lzma.qmt b/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_decompression_lzma.qmt new file mode 100644 index 000000000..1f78254f3 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_decompression_lzma.qmt @@ -0,0 +1,26 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + +--> +<extension class="OnlineTest.Test" kind="test"> + <argument name="program"><text>online_test</text></argument> + <argument name="args"><set> + <text>rtl_test_lzma_decompression</text> + <text>-input=rtl_test_compression_lzma.output.xz</text> + <text>-output=rtl_test_compression_lzma.output</text> + <text>-source=../../../../InstallArea/${BINARY_TAG}/lib/libProperties.so</text> + </set></argument> + <argument name="use_temp_dir"><enumeral>true</enumeral></argument> + <argument name="reference"><text>../refs/BASE/rtl_test_decompression_lzma.ref</text></argument> + <argument name="prerequisites"><set> + <tuple><text>onlinekernel.rtl_test_compression_lzma</text><enumeral>PASS</enumeral></tuple> + </set></argument> +</extension> diff --git a/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_decompression_zlib.qmt b/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_decompression_zlib.qmt index fb59ba02c..3a5ccc3b6 100644 --- a/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_decompression_zlib.qmt +++ b/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_decompression_zlib.qmt @@ -16,6 +16,7 @@ <text>rtl_test_zlib_decompression</text> <text>-input=rtl_test_compression_zlib.output.gz</text> <text>-output=rtl_test_compression_zlib.output</text> + <text>-source=../../../../InstallArea/${BINARY_TAG}/lib/libProperties.so</text> </set></argument> <argument name="use_temp_dir"><enumeral>true</enumeral></argument> <argument name="reference"><text>../refs/BASE/rtl_test_decompression_zlib.ref</text></argument> diff --git a/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_decompression_zstd.qmt b/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_decompression_zstd.qmt new file mode 100644 index 000000000..be72bc698 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_decompression_zstd.qmt @@ -0,0 +1,26 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + +--> +<extension class="OnlineTest.Test" kind="test"> + <argument name="program"><text>online_test</text></argument> + <argument name="args"><set> + <text>rtl_test_zstd_decompression</text> + <text>-input=rtl_test_compression_zstd.output.zst</text> + <text>-output=rtl_test_compression_zstd.output</text> + <text>-source=../../../../InstallArea/${BINARY_TAG}/lib/libProperties.so</text> + </set></argument> + <argument name="use_temp_dir"><enumeral>true</enumeral></argument> + <argument name="reference"><text>../refs/BASE/rtl_test_decompression_zstd.ref</text></argument> + <argument name="prerequisites"><set> + <tuple><text>onlinekernel.rtl_test_compression_zstd</text><enumeral>PASS</enumeral></tuple> + </set></argument> +</extension> diff --git a/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_compression_lzma.ref b/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_compression_lzma.ref new file mode 100644 index 000000000..c8845cae3 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_compression_lzma.ref @@ -0,0 +1,2 @@ +Failed to read input file libProperties.so (iret=0) after a total of 0x######## bytes read. +Compressed input file libProperties.so [0x######## bytes] to rtl_test_compression_lzma.output.xz [ 0x######## uncompressed / 0x######## compressed bytes] diff --git a/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_compression_zlib.ref b/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_compression_zlib.ref index a3bd49554..31bb97aa9 100644 --- a/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_compression_zlib.ref +++ b/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_compression_zlib.ref @@ -1,2 +1,2 @@ Failed to read input file libProperties.so (iret=0) after a total of 0x######## bytes read. -Decompressed input file libProperties.so [0x######## uncompressed / 0x######## compressed bytes] to rtl_test_compression_zlib.output.gz with 0x######## bytes. +Compressed input file libProperties.so [0x######## bytes] to rtl_test_compression_zlib.output.gz [ 0x######## uncompressed / 0x######## compressed bytes] diff --git a/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_compression_zstd.ref b/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_compression_zstd.ref new file mode 100644 index 000000000..1589a6d4f --- /dev/null +++ b/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_compression_zstd.ref @@ -0,0 +1,2 @@ +Failed to read input file libProperties.so (iret=0) after a total of 0x######## bytes read. +Compressed input file libProperties.so [0x######## bytes] to rtl_test_compression_zstd.output.zst [ 0x######## uncompressed / 0x######## compressed bytes] diff --git a/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_lzma.ref b/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_lzma.ref new file mode 100644 index 000000000..46e5e1638 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_lzma.ref @@ -0,0 +1,3 @@ +Failed to read input file rtl_test_compression_lzma.output.xz (iret=-1) after a total of 0x######## bytes read. +Decompressed input file rtl_test_compression_lzma.output.xz [0x######## uncompressed / 0x######## compressed bytes] to rtl_test_compression_lzma.output with 0x######## bytes. +Content check was successful. ../../../../InstallArea/x86_64_v2-el9-gcc13-do0/lib/libProperties.so identical to rtl_test_compression_lzma.output diff --git a/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_zlib.ref b/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_zlib.ref index aff9490b9..ba14af31a 100644 --- a/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_zlib.ref +++ b/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_zlib.ref @@ -1,2 +1,3 @@ Failed to read input file rtl_test_compression_zlib.output.gz (iret=0) after a total of 0x######## bytes read. Decompressed input file rtl_test_compression_zlib.output.gz [0x######## uncompressed / 0x######## compressed bytes] to rtl_test_compression_zlib.output with 0x######## bytes. +Content check was successful. ../../../../InstallArea/x86_64_v2-el9-gcc13-do0/lib/libProperties.so identical to rtl_test_compression_zlib.output diff --git a/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_zstd.ref b/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_zstd.ref new file mode 100644 index 000000000..f743e3b01 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_zstd.ref @@ -0,0 +1,3 @@ +Failed to read input file rtl_test_compression_zstd.output.zst (iret=-1) after a total of 0x######## bytes read. +Decompressed input file rtl_test_compression_zstd.output.zst [0x######## uncompressed / 0x######## compressed bytes] to rtl_test_compression_zstd.output with 0x######## bytes. +Content check was successful. ../../../../InstallArea/x86_64_v2-el9-gcc13-do0/lib/libProperties.so identical to rtl_test_compression_zstd.output diff --git a/Online/GaudiOnlineTests/tests/refs/CTRL/controller_eval_architecture.ref b/Online/GaudiOnlineTests/tests/refs/CTRL/controller_eval_architecture.ref index fa197ddbc..fd4b87f23 100644 --- a/Online/GaudiOnlineTests/tests/refs/CTRL/controller_eval_architecture.ref +++ b/Online/GaudiOnlineTests/tests/refs/CTRL/controller_eval_architecture.ref @@ -210,6 +210,60 @@ df_read_mdf_hash32.xml | I/O parameters: df_read_mdf_hash32.xml | TMO: any=30 load=200 df_read_mdf_hash32.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG1=directory=[os.getenv('DATA_DIR')] define=-D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') define=-D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) define=-D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) define=-D ARG5=,checksum=os.getenv('DATA_CHECKSUM') define=-D noDF_DEBUG=1 +--------------------------------------------------------------- +| Loading architecture file: "df_read_mdf_zstd.xml" ++--------------------------------------------------------------- +df_read_mdf_zstd.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_read_mdf_zstd.xml | Command:runTask.sh +df_read_mdf_zstd.xml | I/O parameters: +df_read_mdf_zstd.xml | TMO: any=20 load=200 +df_read_mdf_zstd.xml | FMC: wd=-w ${WORKING_DIR} define=-D noDF_DEBUG=1 +df_read_mdf_zstd.xml +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 +df_read_mdf_zstd.xml | Command:runTask.sh +df_read_mdf_zstd.xml | I/O parameters: +df_read_mdf_zstd.xml | TMO: any=30 load=200 +df_read_mdf_zstd.xml | FMC: define=-D nDF_DEBUG=1 wd=-w ${WORKING_DIR} +df_read_mdf_zstd.xml +-- Task:StorageFileReader [online,onliners] VIP: false Instances: 1 +df_read_mdf_zstd.xml | Command:runTask.sh +df_read_mdf_zstd.xml | I/O parameters: +df_read_mdf_zstd.xml | TMO: any=30 load=200 +df_read_mdf_zstd.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG1=directory=[os.getenv('DATA_DIR')] define=-D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') define=-D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) define=-D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) define=-D ARG5=,checksum=os.getenv('DATA_CHECKSUM') define=-D noDF_DEBUG=1 ++--------------------------------------------------------------- +| Loading architecture file: "df_read_mdf_zstd.xml~" ++--------------------------------------------------------------- +df_read_mdf_zstd.xml~ +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_read_mdf_zstd.xml~ | Command:runTask.sh +df_read_mdf_zstd.xml~ | I/O parameters: +df_read_mdf_zstd.xml~ | TMO: any=20 load=200 +df_read_mdf_zstd.xml~ | FMC: wd=-w ${WORKING_DIR} define=-D noDF_DEBUG=1 +df_read_mdf_zstd.xml~ +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 +df_read_mdf_zstd.xml~ | Command:runTask.sh +df_read_mdf_zstd.xml~ | I/O parameters: +df_read_mdf_zstd.xml~ | TMO: any=30 load=200 +df_read_mdf_zstd.xml~ | FMC: define=-D nDF_DEBUG=1 wd=-w ${WORKING_DIR} +df_read_mdf_zstd.xml~ +-- Task:StorageFileReader [online,onliners] VIP: false Instances: 1 +df_read_mdf_zstd.xml~ | Command:runTask.sh +df_read_mdf_zstd.xml~ | I/O parameters: +df_read_mdf_zstd.xml~ | TMO: any=30 load=200 +df_read_mdf_zstd.xml~ | FMC: wd=-w ${WORKING_DIR} define=-D ARG1=directory=[os.getenv('DATA_DIR')] define=-D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') define=-D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) define=-D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) define=-D ARG5=,checksum=os.getenv('DATA_CHECKSUM') define=-D noDF_DEBUG=1 ++--------------------------------------------------------------- +| Loading architecture file: "df_read_mdf_zstd_stream.xml" ++--------------------------------------------------------------- +df_read_mdf_zstd_stream.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_read_mdf_zstd_stream.xml | Command:runTask.sh +df_read_mdf_zstd_stream.xml | I/O parameters: +df_read_mdf_zstd_stream.xml | TMO: any=20 load=200 +df_read_mdf_zstd_stream.xml | FMC: wd=-w ${WORKING_DIR} define=-D noDF_DEBUG=1 +df_read_mdf_zstd_stream.xml +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 +df_read_mdf_zstd_stream.xml | Command:runTask.sh +df_read_mdf_zstd_stream.xml | I/O parameters: +df_read_mdf_zstd_stream.xml | TMO: any=30 load=200 +df_read_mdf_zstd_stream.xml | FMC: define=-D nDF_DEBUG=1 wd=-w ${WORKING_DIR} +df_read_mdf_zstd_stream.xml +-- Task:Reader [online,onliners] VIP: false Instances: 1 +df_read_mdf_zstd_stream.xml | Command:runTask.sh +df_read_mdf_zstd_stream.xml | I/O parameters: +df_read_mdf_zstd_stream.xml | TMO: any=30 load=200 +df_read_mdf_zstd_stream.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG1=directory=[os.getenv('DATA_DIR')] define=-D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') define=-D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) define=-D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) define=-D ARG5=,checksum=os.getenv('DATA_CHECKSUM') define=-D noDF_DEBUG=1 ++--------------------------------------------------------------- | Loading architecture file: "df_read_tae.xml" +--------------------------------------------------------------- df_read_tae.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 @@ -282,6 +336,60 @@ df_read_tae_gzip_stream.xml | I/O parameters: df_read_tae_gzip_stream.xml | TMO: any=30 load=200 df_read_tae_gzip_stream.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG1=directory=[os.getenv('DATA_DIR')] define=-D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') define=-D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) define=-D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) define=-D ARG5=,checksum=os.getenv('DATA_CHECKSUM') define=-D noDF_DEBUG=1 +--------------------------------------------------------------- +| Loading architecture file: "df_read_tae_zstd.xml" ++--------------------------------------------------------------- +df_read_tae_zstd.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_read_tae_zstd.xml | Command:runTask.sh +df_read_tae_zstd.xml | I/O parameters: +df_read_tae_zstd.xml | TMO: any=20 load=200 +df_read_tae_zstd.xml | FMC: wd=-w ${WORKING_DIR} define=-D noDF_DEBUG=1 +df_read_tae_zstd.xml +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 +df_read_tae_zstd.xml | Command:runTask.sh +df_read_tae_zstd.xml | I/O parameters: +df_read_tae_zstd.xml | TMO: any=30 load=200 +df_read_tae_zstd.xml | FMC: define=-D nDF_DEBUG=1 wd=-w ${WORKING_DIR} +df_read_tae_zstd.xml +-- Task:StorageFileReader [online,onliners] VIP: false Instances: 1 +df_read_tae_zstd.xml | Command:runTask.sh +df_read_tae_zstd.xml | I/O parameters: +df_read_tae_zstd.xml | TMO: any=30 load=200 +df_read_tae_zstd.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG1=directory=[os.getenv('DATA_DIR')] define=-D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') define=-D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) define=-D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) define=-D ARG5=,checksum=os.getenv('DATA_CHECKSUM') define=-D noDF_DEBUG=1 ++--------------------------------------------------------------- +| Loading architecture file: "df_read_tae_zstd.xml~" ++--------------------------------------------------------------- +df_read_tae_zstd.xml~ +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_read_tae_zstd.xml~ | Command:runTask.sh +df_read_tae_zstd.xml~ | I/O parameters: +df_read_tae_zstd.xml~ | TMO: any=20 load=200 +df_read_tae_zstd.xml~ | FMC: wd=-w ${WORKING_DIR} define=-D noDF_DEBUG=1 +df_read_tae_zstd.xml~ +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 +df_read_tae_zstd.xml~ | Command:runTask.sh +df_read_tae_zstd.xml~ | I/O parameters: +df_read_tae_zstd.xml~ | TMO: any=30 load=200 +df_read_tae_zstd.xml~ | FMC: define=-D nDF_DEBUG=1 wd=-w ${WORKING_DIR} +df_read_tae_zstd.xml~ +-- Task:StorageFileReader [online,onliners] VIP: false Instances: 1 +df_read_tae_zstd.xml~ | Command:runTask.sh +df_read_tae_zstd.xml~ | I/O parameters: +df_read_tae_zstd.xml~ | TMO: any=30 load=200 +df_read_tae_zstd.xml~ | FMC: wd=-w ${WORKING_DIR} define=-D ARG1=directory=[os.getenv('DATA_DIR')] define=-D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') define=-D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) define=-D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) define=-D ARG5=,checksum=os.getenv('DATA_CHECKSUM') define=-D noDF_DEBUG=1 ++--------------------------------------------------------------- +| Loading architecture file: "df_read_tae_zstd_stream.xml" ++--------------------------------------------------------------- +df_read_tae_zstd_stream.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_read_tae_zstd_stream.xml | Command:runTask.sh +df_read_tae_zstd_stream.xml | I/O parameters: +df_read_tae_zstd_stream.xml | TMO: any=20 load=200 +df_read_tae_zstd_stream.xml | FMC: wd=-w ${WORKING_DIR} define=-D noDF_DEBUG=1 +df_read_tae_zstd_stream.xml +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 +df_read_tae_zstd_stream.xml | Command:runTask.sh +df_read_tae_zstd_stream.xml | I/O parameters: +df_read_tae_zstd_stream.xml | TMO: any=30 load=200 +df_read_tae_zstd_stream.xml | FMC: define=-D nDF_DEBUG=1 wd=-w ${WORKING_DIR} +df_read_tae_zstd_stream.xml +-- Task:Reader [online,onliners] VIP: false Instances: 1 +df_read_tae_zstd_stream.xml | Command:runTask.sh +df_read_tae_zstd_stream.xml | I/O parameters: +df_read_tae_zstd_stream.xml | TMO: any=30 load=200 +df_read_tae_zstd_stream.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG1=directory=[os.getenv('DATA_DIR')] define=-D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') define=-D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) define=-D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) define=-D ARG5=,checksum=os.getenv('DATA_CHECKSUM') define=-D noDF_DEBUG=1 ++--------------------------------------------------------------- | Loading architecture file: "df_write_mdf.xml" +--------------------------------------------------------------- df_write_mdf.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 @@ -390,6 +498,42 @@ df_write_mdf_hash32.xml | I/O parameters: df_write_mdf_hash32.xml | TMO: any=30 load=200 df_write_mdf_hash32.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=os.getenv('WRITER_FILENAME') define=-D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) define=-D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) define=-D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) define=-D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') define=-D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) define=-D noDF_DEBUG=1 +--------------------------------------------------------------- +| Loading architecture file: "df_write_mdf_zstd.xml" ++--------------------------------------------------------------- +df_write_mdf_zstd.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_write_mdf_zstd.xml | Command:runTask.sh +df_write_mdf_zstd.xml | I/O parameters: +df_write_mdf_zstd.xml | TMO: any=20 load=200 +df_write_mdf_zstd.xml | FMC: wd=-w ${WORKING_DIR} define=-D noDF_DEBUG=1 +df_write_mdf_zstd.xml +-- Task:MDFGen [online,onliners] VIP: false Instances: 1 +df_write_mdf_zstd.xml | Command:runTask.sh +df_write_mdf_zstd.xml | I/O parameters: +df_write_mdf_zstd.xml | TMO: any=30 load=200 +df_write_mdf_zstd.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=bursts=int(os.getenv('GEN_NUM_BURSTS',1000)) define=-D ARG1=,packing=int(os.getenv('GEN_PACKING',1)) define=-D ARG2=,half_window=int(os.getenv('GEN_HALF_WINDOW',0)) define=-D nDF_DEBUG=1 +df_write_mdf_zstd.xml +-- Task:Writer [online,onliners] VIP: false Instances: 1 +df_write_mdf_zstd.xml | Command:runTask.sh +df_write_mdf_zstd.xml | I/O parameters: +df_write_mdf_zstd.xml | TMO: any=30 load=200 +df_write_mdf_zstd.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=os.getenv('WRITER_FILENAME') define=-D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) define=-D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) define=-D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) define=-D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') define=-D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) define=-D noDF_DEBUG=1 ++--------------------------------------------------------------- +| Loading architecture file: "df_write_mdf_zstd.xml~" ++--------------------------------------------------------------- +df_write_mdf_zstd.xml~ +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_write_mdf_zstd.xml~ | Command:runTask.sh +df_write_mdf_zstd.xml~ | I/O parameters: +df_write_mdf_zstd.xml~ | TMO: any=20 load=200 +df_write_mdf_zstd.xml~ | FMC: wd=-w ${WORKING_DIR} define=-D noDF_DEBUG=1 +df_write_mdf_zstd.xml~ +-- Task:MDFGen [online,onliners] VIP: false Instances: 1 +df_write_mdf_zstd.xml~ | Command:runTask.sh +df_write_mdf_zstd.xml~ | I/O parameters: +df_write_mdf_zstd.xml~ | TMO: any=30 load=200 +df_write_mdf_zstd.xml~ | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=bursts=int(os.getenv('GEN_NUM_BURSTS',1000)) define=-D ARG1=,packing=int(os.getenv('GEN_PACKING',1)) define=-D ARG2=,half_window=int(os.getenv('GEN_HALF_WINDOW',0)) define=-D nDF_DEBUG=1 +df_write_mdf_zstd.xml~ +-- Task:Writer [online,onliners] VIP: false Instances: 1 +df_write_mdf_zstd.xml~ | Command:runTask.sh +df_write_mdf_zstd.xml~ | I/O parameters: +df_write_mdf_zstd.xml~ | TMO: any=30 load=200 +df_write_mdf_zstd.xml~ | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=os.getenv('WRITER_FILENAME') define=-D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) define=-D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) define=-D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) define=-D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') define=-D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) define=-D noDF_DEBUG=1 ++--------------------------------------------------------------- | Loading architecture file: "df_write_tae.xml" +--------------------------------------------------------------- df_write_tae.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 @@ -443,3 +587,39 @@ df_write_tae_gzip.xml | Command:runTask.sh df_write_tae_gzip.xml | I/O parameters: df_write_tae_gzip.xml | TMO: any=30 load=200 df_write_tae_gzip.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=os.getenv('WRITER_FILENAME') define=-D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) define=-D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) define=-D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) define=-D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') define=-D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) define=-D noDF_DEBUG=1 ++--------------------------------------------------------------- +| Loading architecture file: "df_write_tae_zstd.xml" ++--------------------------------------------------------------- +df_write_tae_zstd.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_write_tae_zstd.xml | Command:runTask.sh +df_write_tae_zstd.xml | I/O parameters: +df_write_tae_zstd.xml | TMO: any=20 load=200 +df_write_tae_zstd.xml | FMC: wd=-w ${WORKING_DIR} define=-D noDF_DEBUG=1 +df_write_tae_zstd.xml +-- Task:MDFGen [online,onliners] VIP: false Instances: 1 +df_write_tae_zstd.xml | Command:runTask.sh +df_write_tae_zstd.xml | I/O parameters: +df_write_tae_zstd.xml | TMO: any=30 load=200 +df_write_tae_zstd.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=bursts=int(os.getenv('GEN_NUM_BURSTS',1000)) define=-D ARG1=,packing=int(os.getenv('GEN_PACKING',1)) define=-D ARG2=,half_window=int(os.getenv('GEN_HALF_WINDOW',0)) define=-D nDF_DEBUG=1 +df_write_tae_zstd.xml +-- Task:Writer [online,onliners] VIP: false Instances: 1 +df_write_tae_zstd.xml | Command:runTask.sh +df_write_tae_zstd.xml | I/O parameters: +df_write_tae_zstd.xml | TMO: any=30 load=200 +df_write_tae_zstd.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=os.getenv('WRITER_FILENAME') define=-D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) define=-D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) define=-D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) define=-D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') define=-D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) define=-D noDF_DEBUG=1 ++--------------------------------------------------------------- +| Loading architecture file: "df_write_tae_zstd.xml~" ++--------------------------------------------------------------- +df_write_tae_zstd.xml~ +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_write_tae_zstd.xml~ | Command:runTask.sh +df_write_tae_zstd.xml~ | I/O parameters: +df_write_tae_zstd.xml~ | TMO: any=20 load=200 +df_write_tae_zstd.xml~ | FMC: wd=-w ${WORKING_DIR} define=-D noDF_DEBUG=1 +df_write_tae_zstd.xml~ +-- Task:MDFGen [online,onliners] VIP: false Instances: 1 +df_write_tae_zstd.xml~ | Command:runTask.sh +df_write_tae_zstd.xml~ | I/O parameters: +df_write_tae_zstd.xml~ | TMO: any=30 load=200 +df_write_tae_zstd.xml~ | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=bursts=int(os.getenv('GEN_NUM_BURSTS',1000)) define=-D ARG1=,packing=int(os.getenv('GEN_PACKING',1)) define=-D ARG2=,half_window=int(os.getenv('GEN_HALF_WINDOW',0)) define=-D nDF_DEBUG=1 +df_write_tae_zstd.xml~ +-- Task:Writer [online,onliners] VIP: false Instances: 1 +df_write_tae_zstd.xml~ | Command:runTask.sh +df_write_tae_zstd.xml~ | I/O parameters: +df_write_tae_zstd.xml~ | TMO: any=30 load=200 +df_write_tae_zstd.xml~ | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=os.getenv('WRITER_FILENAME') define=-D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) define=-D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) define=-D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) define=-D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') define=-D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) define=-D noDF_DEBUG=1 diff --git a/Online/GaudiOnlineTests/tests/refs/CTRL/controller_read_architecture_xml.ref b/Online/GaudiOnlineTests/tests/refs/CTRL/controller_read_architecture_xml.ref index 62a0a1575..9612c47f1 100644 --- a/Online/GaudiOnlineTests/tests/refs/CTRL/controller_read_architecture_xml.ref +++ b/Online/GaudiOnlineTests/tests/refs/CTRL/controller_read_architecture_xml.ref @@ -382,6 +382,105 @@ df_read_mdf_hash32.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WO df_read_mdf_hash32.xml +----------------------------------------------------------------------------------- df_read_mdf_hash32.xml +----------------------------------------------------------------------------------- +--------------------------------------------------------------- +| Loading architecture file: "df_read_mdf_zstd.xml" ++--------------------------------------------------------------- +df_read_mdf_zstd.xml | Include: ../tasks/MBM.xml +df_read_mdf_zstd.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_read_mdf_zstd.xml | Command:runTask.sh +| IO: df_read_mdf_zstd.xml | I/O parameters: +df_read_mdf_zstd.xml | TMO: + Timeout: Any = 20 + Timeout: load = 200 +df_read_mdf_zstd.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D noDF_DEBUG=1 +df_read_mdf_zstd.xml +----------------------------------------------------------------------------------- +df_read_mdf_zstd.xml +----------------------------------------------------------------------------------- +df_read_mdf_zstd.xml | Include: ../tasks/Passthrough.xml +df_read_mdf_zstd.xml +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 +df_read_mdf_zstd.xml | Command:runTask.sh +| IO: df_read_mdf_zstd.xml | I/O parameters: +df_read_mdf_zstd.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_read_mdf_zstd.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -D nDF_DEBUG=1 -w ${WORKING_DIR} +df_read_mdf_zstd.xml +----------------------------------------------------------------------------------- +df_read_mdf_zstd.xml +----------------------------------------------------------------------------------- +df_read_mdf_zstd.xml | Include: ../tasks/StorageFileReader.xml +df_read_mdf_zstd.xml +-- Task:StorageFileReader [online,onliners] VIP: false Instances: 1 +df_read_mdf_zstd.xml | Command:runTask.sh +| IO: df_read_mdf_zstd.xml | I/O parameters: +df_read_mdf_zstd.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_read_mdf_zstd.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG1=directory=[os.getenv('DATA_DIR')] -D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') -D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) -D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) -D ARG5=,checksum=os.getenv('DATA_CHECKSUM') -D noDF_DEBUG=1 +df_read_mdf_zstd.xml +----------------------------------------------------------------------------------- +df_read_mdf_zstd.xml +----------------------------------------------------------------------------------- ++--------------------------------------------------------------- +| Loading architecture file: "df_read_mdf_zstd.xml~" ++--------------------------------------------------------------- +df_read_mdf_zstd.xml~ | Include: ../tasks/MBM.xml +df_read_mdf_zstd.xml~ +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_read_mdf_zstd.xml~ | Command:runTask.sh +| IO: df_read_mdf_zstd.xml~ | I/O parameters: +df_read_mdf_zstd.xml~ | TMO: + Timeout: Any = 20 + Timeout: load = 200 +df_read_mdf_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D noDF_DEBUG=1 +df_read_mdf_zstd.xml~ +----------------------------------------------------------------------------------- +df_read_mdf_zstd.xml~ +----------------------------------------------------------------------------------- +df_read_mdf_zstd.xml~ | Include: ../tasks/Passthrough.xml +df_read_mdf_zstd.xml~ +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 +df_read_mdf_zstd.xml~ | Command:runTask.sh +| IO: df_read_mdf_zstd.xml~ | I/O parameters: +df_read_mdf_zstd.xml~ | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_read_mdf_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -D nDF_DEBUG=1 -w ${WORKING_DIR} +df_read_mdf_zstd.xml~ +----------------------------------------------------------------------------------- +df_read_mdf_zstd.xml~ +----------------------------------------------------------------------------------- +df_read_mdf_zstd.xml~ | Include: ../tasks/StorageFileReader.xml +df_read_mdf_zstd.xml~ +-- Task:StorageFileReader [online,onliners] VIP: false Instances: 1 +df_read_mdf_zstd.xml~ | Command:runTask.sh +| IO: df_read_mdf_zstd.xml~ | I/O parameters: +df_read_mdf_zstd.xml~ | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_read_mdf_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG1=directory=[os.getenv('DATA_DIR')] -D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') -D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) -D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) -D ARG5=,checksum=os.getenv('DATA_CHECKSUM') -D noDF_DEBUG=1 +df_read_mdf_zstd.xml~ +----------------------------------------------------------------------------------- +df_read_mdf_zstd.xml~ +----------------------------------------------------------------------------------- ++--------------------------------------------------------------- +| Loading architecture file: "df_read_mdf_zstd_stream.xml" ++--------------------------------------------------------------- +df_read_mdf_zstd_stream.xml | Include: ../tasks/MBM.xml +df_read_mdf_zstd_stream.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_read_mdf_zstd_stream.xml | Command:runTask.sh +| IO: df_read_mdf_zstd_stream.xml | I/O parameters: +df_read_mdf_zstd_stream.xml | TMO: + Timeout: Any = 20 + Timeout: load = 200 +df_read_mdf_zstd_stream.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D noDF_DEBUG=1 +df_read_mdf_zstd_stream.xml +----------------------------------------------------------------------------------- +df_read_mdf_zstd_stream.xml +----------------------------------------------------------------------------------- +df_read_mdf_zstd_stream.xml | Include: ../tasks/Passthrough.xml +df_read_mdf_zstd_stream.xml +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 +df_read_mdf_zstd_stream.xml | Command:runTask.sh +| IO: df_read_mdf_zstd_stream.xml | I/O parameters: +df_read_mdf_zstd_stream.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_read_mdf_zstd_stream.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -D nDF_DEBUG=1 -w ${WORKING_DIR} +df_read_mdf_zstd_stream.xml +----------------------------------------------------------------------------------- +df_read_mdf_zstd_stream.xml +----------------------------------------------------------------------------------- +df_read_mdf_zstd_stream.xml | Include: ../tasks/DF-MDF-Reader.xml +df_read_mdf_zstd_stream.xml +-- Task:Reader [online,onliners] VIP: false Instances: 1 +df_read_mdf_zstd_stream.xml | Command:runTask.sh +| IO: df_read_mdf_zstd_stream.xml | I/O parameters: +df_read_mdf_zstd_stream.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_read_mdf_zstd_stream.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG1=directory=[os.getenv('DATA_DIR')] -D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') -D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) -D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) -D ARG5=,checksum=os.getenv('DATA_CHECKSUM') -D noDF_DEBUG=1 +df_read_mdf_zstd_stream.xml +----------------------------------------------------------------------------------- +df_read_mdf_zstd_stream.xml +----------------------------------------------------------------------------------- ++--------------------------------------------------------------- | Loading architecture file: "df_read_tae.xml" +--------------------------------------------------------------- df_read_tae.xml | Include: ../tasks/MBM.xml @@ -514,6 +613,105 @@ df_read_tae_gzip_stream.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w df_read_tae_gzip_stream.xml +----------------------------------------------------------------------------------- df_read_tae_gzip_stream.xml +----------------------------------------------------------------------------------- +--------------------------------------------------------------- +| Loading architecture file: "df_read_tae_zstd.xml" ++--------------------------------------------------------------- +df_read_tae_zstd.xml | Include: ../tasks/MBM.xml +df_read_tae_zstd.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_read_tae_zstd.xml | Command:runTask.sh +| IO: df_read_tae_zstd.xml | I/O parameters: +df_read_tae_zstd.xml | TMO: + Timeout: Any = 20 + Timeout: load = 200 +df_read_tae_zstd.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D noDF_DEBUG=1 +df_read_tae_zstd.xml +----------------------------------------------------------------------------------- +df_read_tae_zstd.xml +----------------------------------------------------------------------------------- +df_read_tae_zstd.xml | Include: ../tasks/Passthrough.xml +df_read_tae_zstd.xml +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 +df_read_tae_zstd.xml | Command:runTask.sh +| IO: df_read_tae_zstd.xml | I/O parameters: +df_read_tae_zstd.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_read_tae_zstd.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -D nDF_DEBUG=1 -w ${WORKING_DIR} +df_read_tae_zstd.xml +----------------------------------------------------------------------------------- +df_read_tae_zstd.xml +----------------------------------------------------------------------------------- +df_read_tae_zstd.xml | Include: ../tasks/StorageFileReader.xml +df_read_tae_zstd.xml +-- Task:StorageFileReader [online,onliners] VIP: false Instances: 1 +df_read_tae_zstd.xml | Command:runTask.sh +| IO: df_read_tae_zstd.xml | I/O parameters: +df_read_tae_zstd.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_read_tae_zstd.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG1=directory=[os.getenv('DATA_DIR')] -D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') -D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) -D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) -D ARG5=,checksum=os.getenv('DATA_CHECKSUM') -D noDF_DEBUG=1 +df_read_tae_zstd.xml +----------------------------------------------------------------------------------- +df_read_tae_zstd.xml +----------------------------------------------------------------------------------- ++--------------------------------------------------------------- +| Loading architecture file: "df_read_tae_zstd.xml~" ++--------------------------------------------------------------- +df_read_tae_zstd.xml~ | Include: ../tasks/MBM.xml +df_read_tae_zstd.xml~ +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_read_tae_zstd.xml~ | Command:runTask.sh +| IO: df_read_tae_zstd.xml~ | I/O parameters: +df_read_tae_zstd.xml~ | TMO: + Timeout: Any = 20 + Timeout: load = 200 +df_read_tae_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D noDF_DEBUG=1 +df_read_tae_zstd.xml~ +----------------------------------------------------------------------------------- +df_read_tae_zstd.xml~ +----------------------------------------------------------------------------------- +df_read_tae_zstd.xml~ | Include: ../tasks/Passthrough.xml +df_read_tae_zstd.xml~ +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 +df_read_tae_zstd.xml~ | Command:runTask.sh +| IO: df_read_tae_zstd.xml~ | I/O parameters: +df_read_tae_zstd.xml~ | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_read_tae_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -D nDF_DEBUG=1 -w ${WORKING_DIR} +df_read_tae_zstd.xml~ +----------------------------------------------------------------------------------- +df_read_tae_zstd.xml~ +----------------------------------------------------------------------------------- +df_read_tae_zstd.xml~ | Include: ../tasks/StorageFileReader.xml +df_read_tae_zstd.xml~ +-- Task:StorageFileReader [online,onliners] VIP: false Instances: 1 +df_read_tae_zstd.xml~ | Command:runTask.sh +| IO: df_read_tae_zstd.xml~ | I/O parameters: +df_read_tae_zstd.xml~ | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_read_tae_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG1=directory=[os.getenv('DATA_DIR')] -D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') -D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) -D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) -D ARG5=,checksum=os.getenv('DATA_CHECKSUM') -D noDF_DEBUG=1 +df_read_tae_zstd.xml~ +----------------------------------------------------------------------------------- +df_read_tae_zstd.xml~ +----------------------------------------------------------------------------------- ++--------------------------------------------------------------- +| Loading architecture file: "df_read_tae_zstd_stream.xml" ++--------------------------------------------------------------- +df_read_tae_zstd_stream.xml | Include: ../tasks/MBM.xml +df_read_tae_zstd_stream.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_read_tae_zstd_stream.xml | Command:runTask.sh +| IO: df_read_tae_zstd_stream.xml | I/O parameters: +df_read_tae_zstd_stream.xml | TMO: + Timeout: Any = 20 + Timeout: load = 200 +df_read_tae_zstd_stream.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D noDF_DEBUG=1 +df_read_tae_zstd_stream.xml +----------------------------------------------------------------------------------- +df_read_tae_zstd_stream.xml +----------------------------------------------------------------------------------- +df_read_tae_zstd_stream.xml | Include: ../tasks/Passthrough.xml +df_read_tae_zstd_stream.xml +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 +df_read_tae_zstd_stream.xml | Command:runTask.sh +| IO: df_read_tae_zstd_stream.xml | I/O parameters: +df_read_tae_zstd_stream.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_read_tae_zstd_stream.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -D nDF_DEBUG=1 -w ${WORKING_DIR} +df_read_tae_zstd_stream.xml +----------------------------------------------------------------------------------- +df_read_tae_zstd_stream.xml +----------------------------------------------------------------------------------- +df_read_tae_zstd_stream.xml | Include: ../tasks/DF-MDF-Reader.xml +df_read_tae_zstd_stream.xml +-- Task:Reader [online,onliners] VIP: false Instances: 1 +df_read_tae_zstd_stream.xml | Command:runTask.sh +| IO: df_read_tae_zstd_stream.xml | I/O parameters: +df_read_tae_zstd_stream.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_read_tae_zstd_stream.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG1=directory=[os.getenv('DATA_DIR')] -D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') -D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) -D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) -D ARG5=,checksum=os.getenv('DATA_CHECKSUM') -D noDF_DEBUG=1 +df_read_tae_zstd_stream.xml +----------------------------------------------------------------------------------- +df_read_tae_zstd_stream.xml +----------------------------------------------------------------------------------- ++--------------------------------------------------------------- | Loading architecture file: "df_write_mdf.xml" +--------------------------------------------------------------- df_write_mdf.xml | Include: ../tasks/MBM.xml @@ -712,6 +910,72 @@ df_write_mdf_hash32.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${W df_write_mdf_hash32.xml +----------------------------------------------------------------------------------- df_write_mdf_hash32.xml +----------------------------------------------------------------------------------- +--------------------------------------------------------------- +| Loading architecture file: "df_write_mdf_zstd.xml" ++--------------------------------------------------------------- +df_write_mdf_zstd.xml | Include: ../tasks/MBM.xml +df_write_mdf_zstd.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_write_mdf_zstd.xml | Command:runTask.sh +| IO: df_write_mdf_zstd.xml | I/O parameters: +df_write_mdf_zstd.xml | TMO: + Timeout: Any = 20 + Timeout: load = 200 +df_write_mdf_zstd.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D noDF_DEBUG=1 +df_write_mdf_zstd.xml +----------------------------------------------------------------------------------- +df_write_mdf_zstd.xml +----------------------------------------------------------------------------------- +df_write_mdf_zstd.xml | Include: ../tasks/MDFGen.xml +df_write_mdf_zstd.xml +-- Task:MDFGen [online,onliners] VIP: false Instances: 1 +df_write_mdf_zstd.xml | Command:runTask.sh +| IO: df_write_mdf_zstd.xml | I/O parameters: +df_write_mdf_zstd.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_write_mdf_zstd.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG0=bursts=int(os.getenv('GEN_NUM_BURSTS',1000)) -D ARG1=,packing=int(os.getenv('GEN_PACKING',1)) -D ARG2=,half_window=int(os.getenv('GEN_HALF_WINDOW',0)) -D nDF_DEBUG=1 +df_write_mdf_zstd.xml +----------------------------------------------------------------------------------- +df_write_mdf_zstd.xml +----------------------------------------------------------------------------------- +df_write_mdf_zstd.xml | Include: ../tasks/Writer.xml +df_write_mdf_zstd.xml +-- Task:Writer [online,onliners] VIP: false Instances: 1 +df_write_mdf_zstd.xml | Command:runTask.sh +| IO: df_write_mdf_zstd.xml | I/O parameters: +df_write_mdf_zstd.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_write_mdf_zstd.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG0=os.getenv('WRITER_FILENAME') -D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) -D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) -D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) -D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') -D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) -D noDF_DEBUG=1 +df_write_mdf_zstd.xml +----------------------------------------------------------------------------------- +df_write_mdf_zstd.xml +----------------------------------------------------------------------------------- ++--------------------------------------------------------------- +| Loading architecture file: "df_write_mdf_zstd.xml~" ++--------------------------------------------------------------- +df_write_mdf_zstd.xml~ | Include: ../tasks/MBM.xml +df_write_mdf_zstd.xml~ +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_write_mdf_zstd.xml~ | Command:runTask.sh +| IO: df_write_mdf_zstd.xml~ | I/O parameters: +df_write_mdf_zstd.xml~ | TMO: + Timeout: Any = 20 + Timeout: load = 200 +df_write_mdf_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D noDF_DEBUG=1 +df_write_mdf_zstd.xml~ +----------------------------------------------------------------------------------- +df_write_mdf_zstd.xml~ +----------------------------------------------------------------------------------- +df_write_mdf_zstd.xml~ | Include: ../tasks/MDFGen.xml +df_write_mdf_zstd.xml~ +-- Task:MDFGen [online,onliners] VIP: false Instances: 1 +df_write_mdf_zstd.xml~ | Command:runTask.sh +| IO: df_write_mdf_zstd.xml~ | I/O parameters: +df_write_mdf_zstd.xml~ | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_write_mdf_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG0=bursts=int(os.getenv('GEN_NUM_BURSTS',1000)) -D ARG1=,packing=int(os.getenv('GEN_PACKING',1)) -D ARG2=,half_window=int(os.getenv('GEN_HALF_WINDOW',0)) -D nDF_DEBUG=1 +df_write_mdf_zstd.xml~ +----------------------------------------------------------------------------------- +df_write_mdf_zstd.xml~ +----------------------------------------------------------------------------------- +df_write_mdf_zstd.xml~ | Include: ../tasks/Writer.xml +df_write_mdf_zstd.xml~ +-- Task:Writer [online,onliners] VIP: false Instances: 1 +df_write_mdf_zstd.xml~ | Command:runTask.sh +| IO: df_write_mdf_zstd.xml~ | I/O parameters: +df_write_mdf_zstd.xml~ | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_write_mdf_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG0=os.getenv('WRITER_FILENAME') -D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) -D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) -D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) -D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') -D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) -D noDF_DEBUG=1 +df_write_mdf_zstd.xml~ +----------------------------------------------------------------------------------- +df_write_mdf_zstd.xml~ +----------------------------------------------------------------------------------- ++--------------------------------------------------------------- | Loading architecture file: "df_write_tae.xml" +--------------------------------------------------------------- df_write_tae.xml | Include: ../tasks/MBM.xml @@ -810,3 +1074,69 @@ df_write_tae_gzip.xml | TMO: df_write_tae_gzip.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG0=os.getenv('WRITER_FILENAME') -D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) -D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) -D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) -D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') -D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) -D noDF_DEBUG=1 df_write_tae_gzip.xml +----------------------------------------------------------------------------------- df_write_tae_gzip.xml +----------------------------------------------------------------------------------- ++--------------------------------------------------------------- +| Loading architecture file: "df_write_tae_zstd.xml" ++--------------------------------------------------------------- +df_write_tae_zstd.xml | Include: ../tasks/MBM.xml +df_write_tae_zstd.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_write_tae_zstd.xml | Command:runTask.sh +| IO: df_write_tae_zstd.xml | I/O parameters: +df_write_tae_zstd.xml | TMO: + Timeout: Any = 20 + Timeout: load = 200 +df_write_tae_zstd.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D noDF_DEBUG=1 +df_write_tae_zstd.xml +----------------------------------------------------------------------------------- +df_write_tae_zstd.xml +----------------------------------------------------------------------------------- +df_write_tae_zstd.xml | Include: ../tasks/MDFGen.xml +df_write_tae_zstd.xml +-- Task:MDFGen [online,onliners] VIP: false Instances: 1 +df_write_tae_zstd.xml | Command:runTask.sh +| IO: df_write_tae_zstd.xml | I/O parameters: +df_write_tae_zstd.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_write_tae_zstd.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG0=bursts=int(os.getenv('GEN_NUM_BURSTS',1000)) -D ARG1=,packing=int(os.getenv('GEN_PACKING',1)) -D ARG2=,half_window=int(os.getenv('GEN_HALF_WINDOW',0)) -D nDF_DEBUG=1 +df_write_tae_zstd.xml +----------------------------------------------------------------------------------- +df_write_tae_zstd.xml +----------------------------------------------------------------------------------- +df_write_tae_zstd.xml | Include: ../tasks/Writer.xml +df_write_tae_zstd.xml +-- Task:Writer [online,onliners] VIP: false Instances: 1 +df_write_tae_zstd.xml | Command:runTask.sh +| IO: df_write_tae_zstd.xml | I/O parameters: +df_write_tae_zstd.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_write_tae_zstd.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG0=os.getenv('WRITER_FILENAME') -D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) -D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) -D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) -D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') -D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) -D noDF_DEBUG=1 +df_write_tae_zstd.xml +----------------------------------------------------------------------------------- +df_write_tae_zstd.xml +----------------------------------------------------------------------------------- ++--------------------------------------------------------------- +| Loading architecture file: "df_write_tae_zstd.xml~" ++--------------------------------------------------------------- +df_write_tae_zstd.xml~ | Include: ../tasks/MBM.xml +df_write_tae_zstd.xml~ +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_write_tae_zstd.xml~ | Command:runTask.sh +| IO: df_write_tae_zstd.xml~ | I/O parameters: +df_write_tae_zstd.xml~ | TMO: + Timeout: Any = 20 + Timeout: load = 200 +df_write_tae_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D noDF_DEBUG=1 +df_write_tae_zstd.xml~ +----------------------------------------------------------------------------------- +df_write_tae_zstd.xml~ +----------------------------------------------------------------------------------- +df_write_tae_zstd.xml~ | Include: ../tasks/MDFGen.xml +df_write_tae_zstd.xml~ +-- Task:MDFGen [online,onliners] VIP: false Instances: 1 +df_write_tae_zstd.xml~ | Command:runTask.sh +| IO: df_write_tae_zstd.xml~ | I/O parameters: +df_write_tae_zstd.xml~ | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_write_tae_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG0=bursts=int(os.getenv('GEN_NUM_BURSTS',1000)) -D ARG1=,packing=int(os.getenv('GEN_PACKING',1)) -D ARG2=,half_window=int(os.getenv('GEN_HALF_WINDOW',0)) -D nDF_DEBUG=1 +df_write_tae_zstd.xml~ +----------------------------------------------------------------------------------- +df_write_tae_zstd.xml~ +----------------------------------------------------------------------------------- +df_write_tae_zstd.xml~ | Include: ../tasks/Writer.xml +df_write_tae_zstd.xml~ +-- Task:Writer [online,onliners] VIP: false Instances: 1 +df_write_tae_zstd.xml~ | Command:runTask.sh +| IO: df_write_tae_zstd.xml~ | I/O parameters: +df_write_tae_zstd.xml~ | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_write_tae_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG0=os.getenv('WRITER_FILENAME') -D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) -D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) -D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) -D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') -D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) -D noDF_DEBUG=1 +df_write_tae_zstd.xml~ +----------------------------------------------------------------------------------- +df_write_tae_zstd.xml~ +----------------------------------------------------------------------------------- diff --git a/Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_zstd.ref b/Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_zstd.ref new file mode 100644 index 000000000..19523cfa2 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_zstd.ref @@ -0,0 +1,82 @@ ++++ StorageFileReader_0.log +####-##-## TIME-### ##ID##_StorageFileReader_0 [INFO] +++ UTGID: ##ID##_StorageFileReader_0 +####-##-## TIME-### ##ID##_StorageFileReader_0 [INFO] +++ DIM_DNS port:#### +####-##-## TIME-### ##ID##_StorageFileReader_0 [INFO] +++ DIM_DNS node:localhost +####-##-## TIME-### ##ID##_StorageFileReader_0 [INFO] +++ OPTIONS: GaudiOnlineTests.DF +####-##-## TIME-### ##ID##_StorageFileReader_0 [INFO] +++ TASK_TYPE: StorageFileReader +####-##-## TIME-### INFO: Loading library: libDataflowDict +####-##-## TIME-### INFO: Import Online namespace from ROOT +####-##-## TIME-### INFO: +++ Reader directory: ['../data'] file prefix: 'df_output_mdf_zstd_0000' +####-##-## TIME-### INFO: +++ Setting up output logger component +####-##-## TIME-### INFO: +++ Starting dataflow process +####-##-## TIME-### Reader INFO Scanned directory ../data: 10 files. +####-##-## TIME-### Delay INFO +++ Got incident DAQ_RUNNING: trigger auto transition to PAUSED after n seconds +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_mdf_zstd_0000012345_0001.mdf.zst [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_mdf_zstd_0000012345_0001.mdf.zst [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_mdf_zstd_0000012345_0002.mdf.zst [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_mdf_zstd_0000012345_0002.mdf.zst [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_mdf_zstd_0000012345_0003.mdf.zst [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_mdf_zstd_0000012345_0003.mdf.zst [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_mdf_zstd_0000012345_0004.mdf.zst [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_mdf_zstd_0000012345_0004.mdf.zst [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_mdf_zstd_0000012345_0005.mdf.zst [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_mdf_zstd_0000012345_0005.mdf.zst [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_mdf_zstd_0000012345_0006.mdf.zst [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_mdf_zstd_0000012345_0006.mdf.zst [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_mdf_zstd_0000012345_0007.mdf.zst [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_mdf_zstd_0000012345_0007.mdf.zst [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_mdf_zstd_0000012345_0008.mdf.zst [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_mdf_zstd_0000012345_0008.mdf.zst [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_mdf_zstd_0000012345_0009.mdf.zst [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_mdf_zstd_0000012345_0009.mdf.zst [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_mdf_zstd_0000012345_0010.mdf.zst [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_mdf_zstd_0000012345_0010.mdf.zst [0 MB] +####-##-## TIME-### Reader INFO Locking event loop. Waiting for work.... +####-##-## TIME-### Delay INFO +++ Timeout expired: Invoke auto transition to PAUSED. +####-##-## TIME-### Summary INFO (pause) +++++ MBM buffer section sucessfully mapped. +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) ======================== MBM Bufer summary for buffer "Events_##ID##" ======================== +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) Buffer "Events_##ID##" Events: Produced:100 Seen:100 Pending:0 Max:50 +####-##-## TIME-### Summary INFO (pause) Space(kB):[Tot:999969 Free:999969] Users:[Tot:2 Max:30] +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) Name Partition Type State Produced %prod #seen %seen Reqs Buffer +####-##-## TIME-### Summary INFO (pause) ##ID##_StorageFileReader_0 feed P 100 100 +####-##-## TIME-### Summary INFO (pause) CONS_ONE feed C 100 100 1 Events_##ID## +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) ======================== MBM Bufer summary for buffer "Output_##ID##" ======================== +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) Buffer "Output_##ID##" Events: Produced:100 Seen:0 Pending:0 Max:50 +####-##-## TIME-### Summary INFO (pause) Space(kB):[Tot:99975 Free:99975] Users:[Tot:1 Max:15] +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) Name Partition Type State Produced %prod #seen %seen Reqs Buffer +####-##-## TIME-### Summary INFO (pause) PROD_ONE feed P 100 100 Output_##ID## +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) +++++ MBM summary finished. +####-##-## TIME-### Reader INFO Executed cancellation of pending I/O requests. +####-##-## TIME-### MBM INFO Excluding from buffers. No more buffer access possible. ++++ MBMMON.log ++---------------------------------------------------------------------------------------------------- +| Buffer Manager summary [ (.*)_##ID## ] ++---------------------------------------------------------------------------------------------------- +| +++++ MBM buffer section sucessfully mapped. +| +| ======================== MBM Bufer summary for buffer "Events_##ID##" ======================== +| +| Buffer "Events_##ID##" Events: Produced:100 Seen:100 Pending:0 Max:50 +| Space(kB):[Tot:999969 Free:999969] Users:[Tot:2 Max:30] +| +| Name Partition Type State Produced %prod #seen %seen Reqs Buffer +| ##ID##_StorageFileReader_0 feed P #MBM# 100 100 +| CONS_ONE feed C 100 100 1 Events_##ID## +| +| ======================== MBM Bufer summary for buffer "Output_##ID##" ======================== +| +| Buffer "Output_##ID##" Events: Produced:100 Seen:0 Pending:0 Max:50 +| Space(kB):[Tot:99975 Free:99975] Users:[Tot:1 Max:15] +| +| Name Partition Type State Produced %prod #seen %seen Reqs Buffer +| PROD_ONE feed P 100 100 Output_##ID## +| +| +++++ MBM summary finished. ++---------------------------------------------------------------------------------------------------- diff --git a/Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_zstd_stream.ref b/Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_zstd_stream.ref new file mode 100644 index 000000000..9bdc9c094 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_zstd_stream.ref @@ -0,0 +1,65 @@ ++++ Reader_0.log +####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ UTGID: ##ID##_Reader_0 +####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ DIM_DNS port:#### +####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ DIM_DNS node:localhost +####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ OPTIONS: GaudiOnlineTests.DF +####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ TASK_TYPE: Reader +####-##-## TIME-### INFO: Loading library: libDataflowDict +####-##-## TIME-### INFO: Import Online namespace from ROOT +####-##-## TIME-### INFO: +++ Reader directory: ['../data'] file prefix: 'df_output_mdf_zstd_0000' +####-##-## TIME-### INFO: +++ Setting up output logger component +####-##-## TIME-### INFO: +++ Starting dataflow process +####-##-## TIME-### Reader INFO Scanned directory ../data: 10 files. +####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_zstd_0000012345_0001.mdf.zst +####-##-## TIME-### RTL INFO ../data/df_output_mdf_zstd_0000012345_0001.mdf.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] +####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_zstd_0000012345_0002.mdf.zst +####-##-## TIME-### RTL INFO ../data/df_output_mdf_zstd_0000012345_0002.mdf.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] +####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_zstd_0000012345_0003.mdf.zst +####-##-## TIME-### RTL INFO ../data/df_output_mdf_zstd_0000012345_0003.mdf.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] +####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_zstd_0000012345_0004.mdf.zst +####-##-## TIME-### RTL INFO ../data/df_output_mdf_zstd_0000012345_0004.mdf.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] +####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_zstd_0000012345_0005.mdf.zst +####-##-## TIME-### RTL INFO ../data/df_output_mdf_zstd_0000012345_0005.mdf.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] +####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_zstd_0000012345_0006.mdf.zst +####-##-## TIME-### RTL INFO ../data/df_output_mdf_zstd_0000012345_0006.mdf.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] +####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_zstd_0000012345_0007.mdf.zst +####-##-## TIME-### RTL INFO ../data/df_output_mdf_zstd_0000012345_0007.mdf.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] +####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_zstd_0000012345_0008.mdf.zst +####-##-## TIME-### RTL INFO ../data/df_output_mdf_zstd_0000012345_0008.mdf.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] +####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_zstd_0000012345_0009.mdf.zst +####-##-## TIME-### RTL INFO ../data/df_output_mdf_zstd_0000012345_0009.mdf.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] +####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_zstd_0000012345_0010.mdf.zst +####-##-## TIME-### RTL INFO ../data/df_output_mdf_zstd_0000012345_0010.mdf.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] +####-##-## TIME-### Reader INFO Rescanning directory list..... +####-##-## TIME-### Reader INFO Scanning directory list disabled! +####-##-## TIME-### Reader INFO Scanning directory list disabled! +####-##-## TIME-### Reader INFO Scanning directory list disabled! +####-##-## TIME-### Reader INFO Sleeping before going to PAUSE.... +####-##-## TIME-### Reader INFO Executed cancellation of pending I/O requests. +####-##-## TIME-### Reader INFO Quitting... +####-##-## TIME-### MBM INFO Excluding from buffers. No more buffer access possible. ++++ MBMMON.log ++---------------------------------------------------------------------------------------------------- +| Buffer Manager summary [ (.*)_##ID## ] ++---------------------------------------------------------------------------------------------------- +| +++++ MBM buffer section sucessfully mapped. +| +| ======================== MBM Bufer summary for buffer "Events_##ID##" ======================== +| +| Buffer "Events_##ID##" Events: Produced:1000 Seen:1000 Pending:0 Max:50 +| Space(kB):[Tot:999969 Free:999969] Users:[Tot:2 Max:30] +| +| Name Partition Type State Produced %prod #seen %seen Reqs Buffer +| ##ID##_Reader_0 feed P #MBM# 1000 100 +| CONS_ONE feed C 1000 100 1 Events_##ID## +| +| ======================== MBM Bufer summary for buffer "Output_##ID##" ======================== +| +| Buffer "Output_##ID##" Events: Produced:1000 Seen:0 Pending:0 Max:50 +| Space(kB):[Tot:99975 Free:99975] Users:[Tot:1 Max:15] +| +| Name Partition Type State Produced %prod #seen %seen Reqs Buffer +| PROD_ONE feed P 1000 100 Output_##ID## +| +| +++++ MBM summary finished. ++---------------------------------------------------------------------------------------------------- diff --git a/Online/GaudiOnlineTests/tests/refs/DF/df_read_tae_zstd.ref b/Online/GaudiOnlineTests/tests/refs/DF/df_read_tae_zstd.ref new file mode 100644 index 000000000..df1db0467 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/refs/DF/df_read_tae_zstd.ref @@ -0,0 +1,82 @@ ++++ StorageFileReader_0.log +####-##-## TIME-### ##ID##_StorageFileReader_0 [INFO] +++ UTGID: ##ID##_StorageFileReader_0 +####-##-## TIME-### ##ID##_StorageFileReader_0 [INFO] +++ DIM_DNS port:#### +####-##-## TIME-### ##ID##_StorageFileReader_0 [INFO] +++ DIM_DNS node:localhost +####-##-## TIME-### ##ID##_StorageFileReader_0 [INFO] +++ OPTIONS: GaudiOnlineTests.DF +####-##-## TIME-### ##ID##_StorageFileReader_0 [INFO] +++ TASK_TYPE: StorageFileReader +####-##-## TIME-### INFO: Loading library: libDataflowDict +####-##-## TIME-### INFO: Import Online namespace from ROOT +####-##-## TIME-### INFO: +++ Reader directory: ['../data'] file prefix: 'df_output_tae_zstd_0000' +####-##-## TIME-### INFO: +++ Setting up output logger component +####-##-## TIME-### INFO: +++ Starting dataflow process +####-##-## TIME-### Reader INFO Scanned directory ../data: 10 files. +####-##-## TIME-### Delay INFO +++ Got incident DAQ_RUNNING: trigger auto transition to PAUSED after n seconds +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_tae_zstd_0000012345_0001.tae.zst [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_tae_zstd_0000012345_0001.tae.zst [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_tae_zstd_0000012345_0002.tae.zst [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_tae_zstd_0000012345_0002.tae.zst [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_tae_zstd_0000012345_0003.tae.zst [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_tae_zstd_0000012345_0003.tae.zst [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_tae_zstd_0000012345_0004.tae.zst [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_tae_zstd_0000012345_0004.tae.zst [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_tae_zstd_0000012345_0005.tae.zst [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_tae_zstd_0000012345_0005.tae.zst [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_tae_zstd_0000012345_0006.tae.zst [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_tae_zstd_0000012345_0006.tae.zst [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_tae_zstd_0000012345_0007.tae.zst [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_tae_zstd_0000012345_0007.tae.zst [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_tae_zstd_0000012345_0008.tae.zst [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_tae_zstd_0000012345_0008.tae.zst [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_tae_zstd_0000012345_0009.tae.zst [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_tae_zstd_0000012345_0009.tae.zst [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_tae_zstd_0000012345_0010.tae.zst [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_tae_zstd_0000012345_0010.tae.zst [0 MB] +####-##-## TIME-### Reader INFO Locking event loop. Waiting for work.... +####-##-## TIME-### Delay INFO +++ Timeout expired: Invoke auto transition to PAUSED. +####-##-## TIME-### Summary INFO (pause) +++++ MBM buffer section sucessfully mapped. +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) ======================== MBM Bufer summary for buffer "Events_##ID##" ======================== +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) Buffer "Events_##ID##" Events: Produced:20 Seen:20 Pending:0 Max:50 +####-##-## TIME-### Summary INFO (pause) Space(kB):[Tot:999969 Free:999969] Users:[Tot:2 Max:30] +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) Name Partition Type State Produced %prod #seen %seen Reqs Buffer +####-##-## TIME-### Summary INFO (pause) ##ID##_StorageFileReader_0 feed P 20 100 +####-##-## TIME-### Summary INFO (pause) CONS_ONE feed C 20 100 1 Events_##ID## +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) ======================== MBM Bufer summary for buffer "Output_##ID##" ======================== +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) Buffer "Output_##ID##" Events: Produced:20 Seen:0 Pending:0 Max:50 +####-##-## TIME-### Summary INFO (pause) Space(kB):[Tot:99975 Free:99975] Users:[Tot:1 Max:15] +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) Name Partition Type State Produced %prod #seen %seen Reqs Buffer +####-##-## TIME-### Summary INFO (pause) PROD_ONE feed P 20 100 Output_##ID## +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) +++++ MBM summary finished. +####-##-## TIME-### Reader INFO Executed cancellation of pending I/O requests. +####-##-## TIME-### MBM INFO Excluding from buffers. No more buffer access possible. ++++ MBMMON.log ++---------------------------------------------------------------------------------------------------- +| Buffer Manager summary [ (.*)_##ID## ] ++---------------------------------------------------------------------------------------------------- +| +++++ MBM buffer section sucessfully mapped. +| +| ======================== MBM Bufer summary for buffer "Events_##ID##" ======================== +| +| Buffer "Events_##ID##" Events: Produced:20 Seen:20 Pending:0 Max:50 +| Space(kB):[Tot:999969 Free:999969] Users:[Tot:2 Max:30] +| +| Name Partition Type State Produced %prod #seen %seen Reqs Buffer +| ##ID##_StorageFileReader_0 feed P #MBM# 20 100 +| CONS_ONE feed C 20 100 1 Events_##ID## +| +| ======================== MBM Bufer summary for buffer "Output_##ID##" ======================== +| +| Buffer "Output_##ID##" Events: Produced:20 Seen:0 Pending:0 Max:50 +| Space(kB):[Tot:99975 Free:99975] Users:[Tot:1 Max:15] +| +| Name Partition Type State Produced %prod #seen %seen Reqs Buffer +| PROD_ONE feed P 20 100 Output_##ID## +| +| +++++ MBM summary finished. ++---------------------------------------------------------------------------------------------------- diff --git a/Online/GaudiOnlineTests/tests/refs/DF/df_read_tae_zstd_stream.ref b/Online/GaudiOnlineTests/tests/refs/DF/df_read_tae_zstd_stream.ref new file mode 100644 index 000000000..1d7b422c0 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/refs/DF/df_read_tae_zstd_stream.ref @@ -0,0 +1,65 @@ ++++ Reader_0.log +####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ UTGID: ##ID##_Reader_0 +####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ DIM_DNS port:#### +####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ DIM_DNS node:localhost +####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ OPTIONS: GaudiOnlineTests.DF +####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ TASK_TYPE: Reader +####-##-## TIME-### INFO: Loading library: libDataflowDict +####-##-## TIME-### INFO: Import Online namespace from ROOT +####-##-## TIME-### INFO: +++ Reader directory: ['../data'] file prefix: 'df_output_tae_zstd_0000' +####-##-## TIME-### INFO: +++ Setting up output logger component +####-##-## TIME-### INFO: +++ Starting dataflow process +####-##-## TIME-### Reader INFO Scanned directory ../data: 10 files. +####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_zstd_0000012345_0001.tae.zst +####-##-## TIME-### RTL INFO ../data/df_output_tae_zstd_0000012345_0001.tae.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] +####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_zstd_0000012345_0002.tae.zst +####-##-## TIME-### RTL INFO ../data/df_output_tae_zstd_0000012345_0002.tae.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] +####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_zstd_0000012345_0003.tae.zst +####-##-## TIME-### RTL INFO ../data/df_output_tae_zstd_0000012345_0003.tae.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] +####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_zstd_0000012345_0004.tae.zst +####-##-## TIME-### RTL INFO ../data/df_output_tae_zstd_0000012345_0004.tae.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] +####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_zstd_0000012345_0005.tae.zst +####-##-## TIME-### RTL INFO ../data/df_output_tae_zstd_0000012345_0005.tae.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] +####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_zstd_0000012345_0006.tae.zst +####-##-## TIME-### RTL INFO ../data/df_output_tae_zstd_0000012345_0006.tae.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] +####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_zstd_0000012345_0007.tae.zst +####-##-## TIME-### RTL INFO ../data/df_output_tae_zstd_0000012345_0007.tae.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] +####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_zstd_0000012345_0008.tae.zst +####-##-## TIME-### RTL INFO ../data/df_output_tae_zstd_0000012345_0008.tae.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] +####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_zstd_0000012345_0009.tae.zst +####-##-## TIME-### RTL INFO ../data/df_output_tae_zstd_0000012345_0009.tae.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] +####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_zstd_0000012345_0010.tae.zst +####-##-## TIME-### RTL INFO ../data/df_output_tae_zstd_0000012345_0010.tae.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] +####-##-## TIME-### Reader INFO Rescanning directory list..... +####-##-## TIME-### Reader INFO Scanning directory list disabled! +####-##-## TIME-### Reader INFO Scanning directory list disabled! +####-##-## TIME-### Reader INFO Scanning directory list disabled! +####-##-## TIME-### Reader INFO Sleeping before going to PAUSE.... +####-##-## TIME-### Reader INFO Executed cancellation of pending I/O requests. +####-##-## TIME-### Reader INFO Quitting... +####-##-## TIME-### MBM INFO Excluding from buffers. No more buffer access possible. ++++ MBMMON.log ++---------------------------------------------------------------------------------------------------- +| Buffer Manager summary [ (.*)_##ID## ] ++---------------------------------------------------------------------------------------------------- +| +++++ MBM buffer section sucessfully mapped. +| +| ======================== MBM Bufer summary for buffer "Events_##ID##" ======================== +| +| Buffer "Events_##ID##" Events: Produced:200 Seen:200 Pending:0 Max:50 +| Space(kB):[Tot:999969 Free:999969] Users:[Tot:2 Max:30] +| +| Name Partition Type State Produced %prod #seen %seen Reqs Buffer +| ##ID##_Reader_0 feed P #MBM# 200 100 +| CONS_ONE feed C 200 100 1 Events_##ID## +| +| ======================== MBM Bufer summary for buffer "Output_##ID##" ======================== +| +| Buffer "Output_##ID##" Events: Produced:200 Seen:0 Pending:0 Max:50 +| Space(kB):[Tot:99975 Free:99975] Users:[Tot:1 Max:15] +| +| Name Partition Type State Produced %prod #seen %seen Reqs Buffer +| PROD_ONE feed P 200 100 Output_##ID## +| +| +++++ MBM summary finished. ++---------------------------------------------------------------------------------------------------- diff --git a/Online/GaudiOnlineTests/tests/refs/DF/df_write_mdf_zstd.ref b/Online/GaudiOnlineTests/tests/refs/DF/df_write_mdf_zstd.ref new file mode 100644 index 000000000..dba737cd6 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/refs/DF/df_write_mdf_zstd.ref @@ -0,0 +1,59 @@ +####-##-## TIME-### ##ID##_Writer_0 [INFO] +++ UTGID: ##ID##_Writer_0 +####-##-## TIME-### ##ID##_Writer_0 [INFO] +++ DIM_DNS port:#### +####-##-## TIME-### ##ID##_Writer_0 [INFO] +++ DIM_DNS node:localhost +####-##-## TIME-### ##ID##_Writer_0 [INFO] +++ OPTIONS: GaudiOnlineTests.DF +####-##-## TIME-### ##ID##_Writer_0 [INFO] +++ TASK_TYPE: Writer +####-##-## TIME-### INFO: Loading library: libDataflowDict +####-##-## TIME-### INFO: Import Online namespace from ROOT +####-##-## TIME-### INFO: +++ Writer output specs: ../data/df_output_mdf_zstd_${RUN}_${SEQ}.mdf.zst [max. 100 events per file] +####-##-## TIME-### INFO: +++ Setting up output logger component +####-##-## TIME-### INFO: +++ Starting dataflow process +####-##-## TIME-### Writer WARN Opened ../data/df_output_mdf_zstd_0000012345_0001.mdf.zst +####-##-## TIME-### Writer WARN Closed ../data/df_output_mdf_zstd_0000012345_0001.mdf.zst after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_mdf_zstd_0000012345_0002.mdf.zst +####-##-## TIME-### Writer WARN Closed ../data/df_output_mdf_zstd_0000012345_0002.mdf.zst after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_mdf_zstd_0000012345_0003.mdf.zst +####-##-## TIME-### Writer WARN Closed ../data/df_output_mdf_zstd_0000012345_0003.mdf.zst after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_mdf_zstd_0000012345_0004.mdf.zst +####-##-## TIME-### Writer WARN Closed ../data/df_output_mdf_zstd_0000012345_0004.mdf.zst after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_mdf_zstd_0000012345_0005.mdf.zst +####-##-## TIME-### Writer WARN Closed ../data/df_output_mdf_zstd_0000012345_0005.mdf.zst after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_mdf_zstd_0000012345_0006.mdf.zst +####-##-## TIME-### Writer WARN Closed ../data/df_output_mdf_zstd_0000012345_0006.mdf.zst after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_mdf_zstd_0000012345_0007.mdf.zst +####-##-## TIME-### Writer WARN Closed ../data/df_output_mdf_zstd_0000012345_0007.mdf.zst after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_mdf_zstd_0000012345_0008.mdf.zst +####-##-## TIME-### Writer WARN Closed ../data/df_output_mdf_zstd_0000012345_0008.mdf.zst after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_mdf_zstd_0000012345_0009.mdf.zst +####-##-## TIME-### Writer WARN Closed ../data/df_output_mdf_zstd_0000012345_0009.mdf.zst after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_mdf_zstd_0000012345_0010.mdf.zst +####-##-## TIME-### Writer WARN Closed ../data/df_output_mdf_zstd_0000012345_0010.mdf.zst after 0 MB [event-limit]. +####-##-## TIME-### Writer INFO +---------------------------------------------------------------- +####-##-## TIME-### Writer INFO | Overview of monitoring items: +####-##-## TIME-### Writer INFO | Number of events written to output: 1000 +####-##-## TIME-### Writer INFO | Number of events not written and dropped: 0 +####-##-## TIME-### Writer INFO | Number of bursts submitted to output: 1000 +####-##-## TIME-### Writer INFO | Number of files opened to write output: 10 +####-##-## TIME-### Writer INFO | Number of files closed to write output: 10 +####-##-## TIME-### Writer INFO | Number of writte errors: 0 +####-##-## TIME-### Writer INFO | Number of bytes written to output: 608000 +####-##-## TIME-### Writer INFO | Number of bytes non-compressed written to output: 608000 +####-##-## TIME-### Writer INFO | Number of bytes dropped from output: 0 +####-##-## TIME-### Writer INFO | Number of events with a bad header structure: 0 +####-##-## TIME-### Writer INFO | Number of currently active buffers: 0 +####-##-## TIME-### Writer INFO | Number of current todo buffers: 0 +####-##-## TIME-### Writer INFO | Number of currently free buffers: 0 +####-##-## TIME-### Writer INFO +---------------------------------------------------------------- +####-##-## TIME-### Writer INFO | Files successfully written: +####-##-## TIME-### Writer INFO +---------------------------------------------------------------- +####-##-## TIME-### Writer INFO | ../data/df_output_mdf_zstd_0000012345_0001.mdf.zst Run: 12345 ####-##-## TIME-### 100 events 60800 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_mdf_zstd_0000012345_0002.mdf.zst Run: 12345 ####-##-## TIME-### 100 events 60800 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_mdf_zstd_0000012345_0003.mdf.zst Run: 12345 ####-##-## TIME-### 100 events 60800 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_mdf_zstd_0000012345_0004.mdf.zst Run: 12345 ####-##-## TIME-### 100 events 60800 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_mdf_zstd_0000012345_0005.mdf.zst Run: 12345 ####-##-## TIME-### 100 events 60800 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_mdf_zstd_0000012345_0006.mdf.zst Run: 12345 ####-##-## TIME-### 100 events 60800 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_mdf_zstd_0000012345_0007.mdf.zst Run: 12345 ####-##-## TIME-### 100 events 60800 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_mdf_zstd_0000012345_0008.mdf.zst Run: 12345 ####-##-## TIME-### 100 events 60800 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_mdf_zstd_0000012345_0009.mdf.zst Run: 12345 ####-##-## TIME-### 100 events 60800 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_mdf_zstd_0000012345_0010.mdf.zst Run: 12345 ####-##-## TIME-### 100 events 60800 bytes +####-##-## TIME-### Writer INFO +---------------------------------------------------------------- diff --git a/Online/GaudiOnlineTests/tests/refs/DF/df_write_tae_zstd.ref b/Online/GaudiOnlineTests/tests/refs/DF/df_write_tae_zstd.ref new file mode 100644 index 000000000..231a6b016 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/refs/DF/df_write_tae_zstd.ref @@ -0,0 +1,59 @@ +####-##-## TIME-### ##ID##_Writer_0 [INFO] +++ UTGID: ##ID##_Writer_0 +####-##-## TIME-### ##ID##_Writer_0 [INFO] +++ DIM_DNS port:#### +####-##-## TIME-### ##ID##_Writer_0 [INFO] +++ DIM_DNS node:localhost +####-##-## TIME-### ##ID##_Writer_0 [INFO] +++ OPTIONS: GaudiOnlineTests.DF +####-##-## TIME-### ##ID##_Writer_0 [INFO] +++ TASK_TYPE: Writer +####-##-## TIME-### INFO: Loading library: libDataflowDict +####-##-## TIME-### INFO: Import Online namespace from ROOT +####-##-## TIME-### INFO: +++ Writer output specs: ../data/df_output_tae_zstd_${RUN}_${SEQ}.tae.zst [max. 20 events per file] +####-##-## TIME-### INFO: +++ Setting up output logger component +####-##-## TIME-### INFO: +++ Starting dataflow process +####-##-## TIME-### Writer WARN Opened ../data/df_output_tae_zstd_0000012345_0001.tae.zst +####-##-## TIME-### Writer WARN Closed ../data/df_output_tae_zstd_0000012345_0001.tae.zst after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_tae_zstd_0000012345_0002.tae.zst +####-##-## TIME-### Writer WARN Closed ../data/df_output_tae_zstd_0000012345_0002.tae.zst after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_tae_zstd_0000012345_0003.tae.zst +####-##-## TIME-### Writer WARN Closed ../data/df_output_tae_zstd_0000012345_0003.tae.zst after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_tae_zstd_0000012345_0004.tae.zst +####-##-## TIME-### Writer WARN Closed ../data/df_output_tae_zstd_0000012345_0004.tae.zst after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_tae_zstd_0000012345_0005.tae.zst +####-##-## TIME-### Writer WARN Closed ../data/df_output_tae_zstd_0000012345_0005.tae.zst after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_tae_zstd_0000012345_0006.tae.zst +####-##-## TIME-### Writer WARN Closed ../data/df_output_tae_zstd_0000012345_0006.tae.zst after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_tae_zstd_0000012345_0007.tae.zst +####-##-## TIME-### Writer WARN Closed ../data/df_output_tae_zstd_0000012345_0007.tae.zst after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_tae_zstd_0000012345_0008.tae.zst +####-##-## TIME-### Writer WARN Closed ../data/df_output_tae_zstd_0000012345_0008.tae.zst after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_tae_zstd_0000012345_0009.tae.zst +####-##-## TIME-### Writer WARN Closed ../data/df_output_tae_zstd_0000012345_0009.tae.zst after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_tae_zstd_0000012345_0010.tae.zst +####-##-## TIME-### Writer WARN Closed ../data/df_output_tae_zstd_0000012345_0010.tae.zst after 0 MB [event-limit]. +####-##-## TIME-### Writer INFO +---------------------------------------------------------------- +####-##-## TIME-### Writer INFO | Overview of monitoring items: +####-##-## TIME-### Writer INFO | Number of events written to output: 200 +####-##-## TIME-### Writer INFO | Number of events not written and dropped: 0 +####-##-## TIME-### Writer INFO | Number of bursts submitted to output: 200 +####-##-## TIME-### Writer INFO | Number of files opened to write output: 10 +####-##-## TIME-### Writer INFO | Number of files closed to write output: 10 +####-##-## TIME-### Writer INFO | Number of writte errors: 0 +####-##-## TIME-### Writer INFO | Number of bytes written to output: 1727200 +####-##-## TIME-### Writer INFO | Number of bytes non-compressed written to output: 1727200 +####-##-## TIME-### Writer INFO | Number of bytes dropped from output: 0 +####-##-## TIME-### Writer INFO | Number of events with a bad header structure: 0 +####-##-## TIME-### Writer INFO | Number of currently active buffers: 0 +####-##-## TIME-### Writer INFO | Number of current todo buffers: 0 +####-##-## TIME-### Writer INFO | Number of currently free buffers: 0 +####-##-## TIME-### Writer INFO +---------------------------------------------------------------- +####-##-## TIME-### Writer INFO | Files successfully written: +####-##-## TIME-### Writer INFO +---------------------------------------------------------------- +####-##-## TIME-### Writer INFO | ../data/df_output_tae_zstd_0000012345_0001.tae.zst Run: 12345 ####-##-## TIME-### 20 events 172720 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_tae_zstd_0000012345_0002.tae.zst Run: 12345 ####-##-## TIME-### 20 events 172720 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_tae_zstd_0000012345_0003.tae.zst Run: 12345 ####-##-## TIME-### 20 events 172720 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_tae_zstd_0000012345_0004.tae.zst Run: 12345 ####-##-## TIME-### 20 events 172720 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_tae_zstd_0000012345_0005.tae.zst Run: 12345 ####-##-## TIME-### 20 events 172720 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_tae_zstd_0000012345_0006.tae.zst Run: 12345 ####-##-## TIME-### 20 events 172720 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_tae_zstd_0000012345_0007.tae.zst Run: 12345 ####-##-## TIME-### 20 events 172720 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_tae_zstd_0000012345_0008.tae.zst Run: 12345 ####-##-## TIME-### 20 events 172720 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_tae_zstd_0000012345_0009.tae.zst Run: 12345 ####-##-## TIME-### 20 events 172720 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_tae_zstd_0000012345_0010.tae.zst Run: 12345 ####-##-## TIME-### 20 events 172720 bytes +####-##-## TIME-### Writer INFO +---------------------------------------------------------------- diff --git a/Online/OnlineBase/CMakeLists.txt b/Online/OnlineBase/CMakeLists.txt index 6cd87279d..b586727ad 100755 --- a/Online/OnlineBase/CMakeLists.txt +++ b/Online/OnlineBase/CMakeLists.txt @@ -166,51 +166,40 @@ online_library(OnlineCompress src/COMPRESS/posix_lzma.cpp src/COMPRESS/posix_lz4.cpp) # -target_link_libraries(OnlineCompress PRIVATE Online::OnlineBase) +target_link_libraries(OnlineCompress PRIVATE Online::OnlineBase ROOT::Core -lz -llzma) # find_path(ZLIB_HEADER_DIR NAMES gzip.h HINTS $ENV{LBENV_PREFIX}/include) -find_path(ZLIB_LIBRARY_DIR NAMES libz.so HINTS $ENV{LBENV_PREFIX}/lib) if( NOT "${ZLIB_HEADER_DIR}" STREQUAL "" ) target_compile_definitions(OnlineCompress PRIVATE ONLINE_HAVE_ZLIB=1) - target_link_libraries(OnlineCompress PRIVATE -lz) - target_link_directories(OnlineCompress PRIVATE ${ZLIB_LIBRARY_DIR}) + target_link_libraries(OnlineCompress PRIVATE ROOT::Core) message(STATUS "ZLIB FOUND. ZLIB compression availible") else() message(STATUS "ZLIB was not found. ZLIB compression not availible") endif() find_path(ZSTD_HEADER_DIR NAMES zstd.h HINTS $ENV{LBENV_PREFIX}/include) -find_path(ZSTD_LIBRARY_DIR NAMES libzstd.so HINTS $ENV{LBENV_PREFIX}/lib) if( NOT "${ZSTD_HEADER_DIR}" STREQUAL "" ) message(STATUS "ZSTD_HEADER_DIR: ${ZSTD_HEADER_DIR}. ZSTD compression availible.") target_include_directories(OnlineCompress PRIVATE ${ZSTD_HEADER_DIR}) target_compile_definitions(OnlineCompress PRIVATE ONLINE_HAVE_ZSTD=1) - target_link_directories(OnlineCompress PRIVATE ${ZSTD_LIBRARY_DIR}) - target_link_libraries(OnlineCompress PRIVATE -lzstd) else() message(STATUS "ZSTD HEADER not found! ZSTD compression not available.") endif() find_path(LZMA_HEADER_DIR NAMES lzma.h HINTS $ENV{LBENV_PREFIX}/include) -find_path(LZMA_LIBRARY_DIR NAMES liblzma.so HINTS $ENV{LBENV_PREFIX}/lib) if( NOT "${LZMA_HEADER_DIR}" STREQUAL "" ) message(STATUS "LZMA_HEADER_DIR: ${LZMA_HEADER_DIR}. LZMA compression availible.") target_include_directories(OnlineCompress PRIVATE ${LZMA_HEADER_DIR}) target_compile_definitions(OnlineCompress PRIVATE ONLINE_HAVE_LZMA=1) - target_link_directories(OnlineCompress PRIVATE ${LZMA_LIBRARY_DIR}) - target_link_libraries(OnlineCompress PRIVATE -llzma) else() message(STATUS "LZMA HEADER not found! LZMA compression not available.") endif() find_path(LZ4_HEADER_DIR NAMES lz4.h HINTS $ENV{LBENV_PREFIX}/include) -find_path(LZ4_LIBRARY_DIR NAMES liblz4.so HINTS $ENV{LBENV_PREFIX}/lib) if( NOT "${LZ4_HEADER_DIR}" STREQUAL "" ) message(STATUS "LZ4_HEADER_DIR: ${LZ4_HEADER_DIR}. LZ4 compression availible.") target_include_directories(OnlineCompress PRIVATE ${LZ4_HEADER_DIR}) target_compile_definitions(OnlineCompress PRIVATE ONLINE_HAVE_LZ4=1) - target_link_directories(OnlineCompress PRIVATE ${LZ4_LIBRARY_DIR}) - target_link_libraries(OnlineCompress PRIVATE -llz4) else() message(STATUS "LZ4 HEADER not found! LZ4 compression not available.") endif() diff --git a/Online/OnlineBase/include/RTL/Compress.h b/Online/OnlineBase/include/RTL/Compress.h index 44ff98fa1..22a8effd9 100644 --- a/Online/OnlineBase/include/RTL/Compress.h +++ b/Online/OnlineBase/include/RTL/Compress.h @@ -30,11 +30,39 @@ namespace Online { std::vector<unsigned char> to_vector(const char* data); std::vector<unsigned char> to_vector(const std::string& data); + /// Inflate the response using zstd + std::vector<unsigned char> decompress_zstd(const unsigned char* data, std::size_t len); + + /// Inflate the response using zstd + std::vector<unsigned char> decompress_zstd(const std::vector<unsigned char>& data); + + /// Inflate the response using zstd + std::vector<unsigned char> decompress_zstd(std::vector<unsigned char>&& data); + + /// Inflate the response using gzip + std::vector<unsigned char> decompress_gzip(const std::vector<unsigned char>& data); + + /// Inflate the response using gzip + std::vector<unsigned char> decompress_gzip(std::vector<unsigned char>&& data); + /// decompress a byte buffer std::vector<unsigned char> decompress(const std::string& content_encoding, const std::vector<unsigned char>& data); + /// Inflate the response + std::vector<unsigned char> decompress(const std::string& encoding, + std::vector<unsigned char>&& data); + + /// Inflate the response + std::vector<unsigned char> decompress(const std::string& encoding, + const void* data, + std::size_t data_len); + + /// Inflate the response + std::vector<unsigned char> decompress(const std::string& encoding, + const unsigned char* data, + std::size_t data_len); /// compress a byte buffer std::vector<unsigned char> compress(const std::string& accepted_encoding, const std::vector<unsigned char>& data, diff --git a/Online/OnlineBase/src/COMPRESS/Compress.cpp b/Online/OnlineBase/src/COMPRESS/Compress.cpp index ce7173157..c7c14ea7a 100644 --- a/Online/OnlineBase/src/COMPRESS/Compress.cpp +++ b/Online/OnlineBase/src/COMPRESS/Compress.cpp @@ -15,40 +15,76 @@ // Framework include files #include <RTL/Compress.h> +#include <RTL/rtl.h> // C/C++ include files #include <system_error> #include <sstream> #include <cstring> +#include <cstdint> #include <cerrno> #include <zlib.h> -using namespace std; - namespace { static const std::string base64_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz" "0123456789+/"; - - - static inline bool is_base64(unsigned char c) { + + static inline bool is_base64(uint8_t c) { return (isalnum(c) || (c == '+') || (c == '/')); } } +#if defined(ONLINE_HAVE_ZSTD) +#include <zstd.h> + +/// Namespace for the Online utilities +namespace Online { + /// Namespace for the Online compression utilities + namespace compress { + + /// Inflate the response using zstd + std::vector<uint8_t> decompress_zstd(const uint8_t* data, std::size_t len) { + ZSTD_DCtx* decompress = ::ZSTD_createDCtx(); + std::size_t out_size = ::ZSTD_DStreamOutSize(); + std::vector<uint8_t> buffer, result; + + result.reserve(len); + buffer.resize(out_size); + ::ZSTD_inBuffer input = { data, len, 0 }; + while (input.pos < input.size) { + ZSTD_outBuffer output = { &buffer.at(0), buffer.size(), 0 }; + std::size_t ret = ::ZSTD_decompressStream(decompress, &output , &input); + if ( ZSTD_isError(ret) ) { + ::lib_rtl_output(LIB_RTL_ERROR, "ZSTD FAILED to decompress %ld bytes.", len); + return {}; + } + std::copy(&buffer.at(0), &buffer.at(0)+output.pos, std::back_inserter(result)); + } + ::ZSTD_freeDCtx( decompress ); + return result; + } + } // End namespace compress +} // End namespace Online +#else +namespace Online { namespace compress { + std::vector<uint8_t> decompress_zstd(const uint8_t*, std::size_t) { return {}; } + }} +#endif + /// Namespace for the HTTP utilities namespace Online { /// Namespace for the HTTP compression utilities namespace compress { - std::string base64_encode(const unsigned char* bytes_to_encode, unsigned int in_len) { + std::string base64_encode(const uint8_t* bytes_to_encode, unsigned int in_len) { std::string ret; int i = 0; int j = 0; - unsigned char char_array_3[3]; - unsigned char char_array_4[4]; + uint8_t char_array_3[3]; + uint8_t char_array_4[4]; while (in_len--) { char_array_3[i++] = *(bytes_to_encode++); @@ -82,19 +118,19 @@ namespace Online { return ret; } - std::vector<unsigned char> base64_decode(const std::string& encoded_string) { + std::vector<uint8_t> base64_decode(const std::string& encoded_string) { size_t in_len = encoded_string.size(); size_t i = 0; size_t j = 0; int in_ = 0; - unsigned char char_array_4[4], char_array_3[3]; - std::vector<unsigned char> ret; + uint8_t char_array_4[4], char_array_3[3]; + std::vector<uint8_t> ret; while (in_len-- && ( encoded_string[in_] != '=') && is_base64(encoded_string[in_])) { char_array_4[i++] = encoded_string[in_]; in_++; if (i ==4) { for (i = 0; i <4; i++) - char_array_4[i] = static_cast<unsigned char>(base64_chars.find(char_array_4[i])); + char_array_4[i] = static_cast<uint8_t>(base64_chars.find(char_array_4[i])); char_array_3[0] = (char_array_4[0] << 2) + ((char_array_4[1] & 0x30) >> 4); char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2); @@ -110,7 +146,7 @@ namespace Online { for (j = i; j <4; j++) char_array_4[j] = 0; for (j = 0; j <4; j++) - char_array_4[j] = static_cast<unsigned char>(base64_chars.find(char_array_4[j])); + char_array_4[j] = static_cast<uint8_t>(base64_chars.find(char_array_4[j])); char_array_3[0] = (char_array_4[0] << 2) + ((char_array_4[1] & 0x30) >> 4); char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2); @@ -121,6 +157,16 @@ namespace Online { return ret; } + /// Inflate the response using zstd + std::vector<uint8_t> decompress_zstd(const std::vector<uint8_t>& data) { + return decompress_zstd(&data[0], data.size()); + } + + /// Inflate the response using zstd + std::vector<uint8_t> decompress_zstd(std::vector<uint8_t>&& data) { + return decompress_zstd(&data[0], data.size()); + } + #define CHUNK 0x4000 @@ -132,17 +178,17 @@ namespace Online { #define GZIP_ENCODING 16 /// compress a byte buffer - vector<unsigned char> compress(const string& encoding, - const vector<unsigned char>& data, - string& used_encoding) { - vector<unsigned char> result; + std::vector<uint8_t> compress(const std::string& encoding, + const std::vector<uint8_t>& data, + std::string& used_encoding) { + std::vector<uint8_t> result; used_encoding = ""; if ( !encoding.empty() ) { - stringstream str; - bool gzip = encoding.find("gzip") != string::npos; - bool deflate = encoding.find("deflate") != string::npos; + std::stringstream str; + bool gzip = encoding.find("gzip") != std::string::npos; + bool deflate = encoding.find("deflate") != std::string::npos; if ( gzip || deflate ) { - unsigned char out[CHUNK+1]; + uint8_t out[CHUNK+1]; int status, window = windowBits + (gzip ? GZIP_ENCODING : 0); z_stream strm; @@ -151,31 +197,31 @@ namespace Online { strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.avail_in = 0; - strm.next_in = (unsigned char*)data.data(); + strm.next_in = (uint8_t*)data.data(); status = ::deflateInit2 (&strm, Z_DEFAULT_COMPRESSION, Z_DEFLATED, window, 8, Z_DEFAULT_STRATEGY); if ( status < 0 ) { - str << "HTTP [Failed to initialize zlib/gzip " - << make_error_code(errc(EINVAL)).message() << "]"; + str << "Compress [Failed to initialize zlib/gzip " + << std::make_error_code(std::errc(EINVAL)).message() << "]"; goto Default; } strm.avail_in = data.size(); - strm.next_in = (unsigned char*)data.data(); + strm.next_in = (uint8_t*)data.data(); strm.avail_in = data.size(); do { strm.avail_out = CHUNK; strm.next_out = out; status = ::deflate(&strm, Z_FINISH); if ( status < 0 ) { - str << "XMLRPC [Failed to deflate buffer with zlib/gzip " - << make_error_code(errc(EINVAL)).message() << "]"; + str << "Compress [Failed to deflate buffer with zlib/gzip " + << std::make_error_code(std::errc(EINVAL)).message() << "]"; ::deflateEnd (&strm); goto Default; } - copy(out, out+CHUNK-strm.avail_out, back_inserter(result)); + std::copy(out, out+CHUNK-strm.avail_out, std::back_inserter(result)); } while (strm.avail_out == 0); ::deflateEnd(&strm); used_encoding = gzip ? "gzip" : "deflate"; @@ -187,79 +233,122 @@ namespace Online { return result; } + std::vector<uint8_t> decompress_zstream(bool have_gzip, + const uint8_t* data, + std::size_t data_len) { + std::stringstream str; + uint8_t out[CHUNK+1]; + std::vector<uint8_t> result; + z_stream strm; + + result.reserve(1024*1024); + ::memset(&strm,0,sizeof(strm)); + strm.zalloc = Z_NULL; + strm.zfree = Z_NULL; + strm.opaque = Z_NULL; + strm.next_in = (uint8_t*)data; + strm.avail_in = 0; + int window = windowBits + (have_gzip ? ENABLE_ZLIB_GZIP : 0); + int status = ::inflateInit2(&strm, window); + if ( status < 0 ) { + str << "XMLRPC [Failed to initialize zlib/gzip " + << std::make_error_code(std::errc(EINVAL)).message() << "]"; + goto Default; + } + strm.avail_in = data_len; + strm.next_in = (uint8_t*)data; + do { + strm.avail_out = CHUNK; + strm.next_out = out; + status = ::inflate (&strm, Z_NO_FLUSH); + switch (status) { + case Z_OK: + case Z_STREAM_END: + break; + case Z_BUF_ERROR: + default: + inflateEnd(&strm); + str << "Compress [Failed inflate buffer with zlib/gzip : " << status; + goto Default; + } + std::copy(out, out+CHUNK-strm.avail_out, back_inserter(result)); + } while (strm.avail_out == 0); + ::inflateEnd(&strm); + return result; + + Default: + result.reserve(data_len); + std::copy(data, data+data_len, back_inserter(result)); + return result; + } + + /// Inflate the response using gzip + std::vector<uint8_t> decompress_gzip(const std::vector<uint8_t>& data) { + return decompress_zstream(true, &data[0], data.size()); + } + + /// Inflate the response using gzip + std::vector<uint8_t> decompress_gzip(std::vector<uint8_t>&& data) { + return decompress_zstream(true, &data[0], data.size()); + } + /// Inflate the response - vector<unsigned char> decompress(const string& encoding, - const vector<unsigned char>& data) { - if ( !encoding.empty() ) { - stringstream str; - bool gzip = encoding.find("gzip") != string::npos; - bool deflate = encoding.find("deflate") != string::npos; - if ( gzip || deflate ) { - vector<unsigned char> result; - unsigned char out[CHUNK+1]; - z_stream strm; + std::vector<uint8_t> decompress(const std::string& encoding, + const std::vector<uint8_t>& data) { + return decompress(encoding, &data[0], data.size()); + } - result.reserve(1024*1024); - ::memset(&strm,0,sizeof(strm)); - strm.zalloc = Z_NULL; - strm.zfree = Z_NULL; - strm.opaque = Z_NULL; - strm.next_in = (unsigned char*)&data[0]; - strm.avail_in = 0; - int window = windowBits + (gzip ? ENABLE_ZLIB_GZIP : 0); - int status = ::inflateInit2(&strm, window); - if ( status < 0 ) { - str << "XMLRPC [Failed to initialize zlib/gzip " - << make_error_code(errc(EINVAL)).message() << "]"; - goto Default; - } - strm.avail_in = data.size(); - strm.next_in = (unsigned char*)&data[0]; - do { - strm.avail_out = CHUNK; - strm.next_out = out; - status = ::inflate (&strm, Z_NO_FLUSH); - switch (status) { - case Z_OK: - case Z_STREAM_END: - break; - case Z_BUF_ERROR: - default: - inflateEnd(&strm); - str << "XMLRPC [Failed inflate buffer with zlib/gzip : " << status; - goto Default; - } - copy(out, out+CHUNK-strm.avail_out, back_inserter(result)); - } while (strm.avail_out == 0); - ::inflateEnd(&strm); - return result; - } - else if ( encoding.find("identity") != string::npos ) { + /// Inflate the response + std::vector<uint8_t> decompress(const std::string& encoding, + std::vector<uint8_t>&& data) { + return decompress(encoding, &data[0], data.size()); + } + + /// Inflate the response + std::vector<uint8_t> decompress(const std::string& encoding, + const void* data, + std::size_t data_len) { + return decompress(encoding, (const uint8_t*)data, data_len); + } + + /// Inflate the response + std::vector<uint8_t> decompress(const std::string& encoding, + const uint8_t* data, + std::size_t data_len) { + std::vector<uint8_t> result; + if ( !encoding.empty() ) { + if ( encoding.find("gzip") != std::string::npos ) + return decompress_zstream(true, data, data_len); + else if ( encoding.find("deflate") != std::string::npos ) + return decompress_zstream(false, data, data_len); + else if ( encoding.find("zstd") != std::string::npos ) + return decompress_zstd(data, data_len); + else if ( encoding.find("identity") != std::string::npos ) goto Default; - } } + Default: - return data; + result.reserve(data_len); + std::copy(data, data+data_len, back_inserter(result)); + return result; } - - std::vector<unsigned char> to_vector(const char* data) { - std::vector<unsigned char> r; + std::vector<uint8_t> to_vector(const char* data) { + std::vector<uint8_t> r; if ( data ) { size_t len = ::strlen(data); r.reserve(len+1); - for(const char *c=data; *c; ++c) r.push_back((unsigned char)(*c)); + for(const char *c=data; *c; ++c) r.push_back((uint8_t)(*c)); } return r; } - std::vector<unsigned char> to_vector(const std::string& data) { - std::vector<unsigned char> r; + std::vector<uint8_t> to_vector(const std::string& data) { + std::vector<uint8_t> r; if ( !data.empty() ) { r.reserve(data.length()+1); - for(const char *c=data.c_str(); *c; ++c) r.push_back((unsigned char)(*c)); + for(const char *c=data.c_str(); *c; ++c) r.push_back((uint8_t)(*c)); } return r; } - - } -} + } // End namespace compress +} // End namespace Online diff --git a/Online/OnlineBase/src/COMPRESS/posix_gzip.cpp b/Online/OnlineBase/src/COMPRESS/posix_gzip.cpp index 941c95864..247d10570 100644 --- a/Online/OnlineBase/src/COMPRESS/posix_gzip.cpp +++ b/Online/OnlineBase/src/COMPRESS/posix_gzip.cpp @@ -28,6 +28,8 @@ #include <zlib.h> +// #define POSIX_DEBUG 1 + /// -------------------------------------------------------------------------------- namespace { @@ -36,7 +38,8 @@ namespace { public: int fd { 0 }; gzFile gz_file { nullptr }; - std::string path { }; + std::string path { }; + uint32_t buff_length { 0 }; int level { Z_DEFAULT_COMPRESSION }; int strategy { Z_DEFAULT_STRATEGY }; int applied { 0 }; @@ -73,7 +76,7 @@ namespace { auto i = fileMap().find( fd ); if ( i != fileMap().end() ) { path = i->second.path; - if ( opt.substr(0,4) == "strat" ) { + if ( opt.substr(0, 8) == "strategy" ) { std::string val = RTL::str_lower(value); if ( val == "default" ) i->second.strategy = Z_DEFAULT_STRATEGY; @@ -109,20 +112,35 @@ namespace { } return 0; } + else if ( opt.substr(0,6) == "buffer" ) { + char* end = 0; + int len = ::strtol(value, &end, 10); + if ( len >= 0 ) + i->second.buff_length = len; + else + goto Error; + return 0; + } else if ( opt.substr(0, 5) == "apply" ) { gzFile gzip = i->second.gz_file; int ret = ::gzsetparams(gzip, i->second.level, i->second.strategy); - if ( ret == Z_OK ) { - i->second.applied = 1; - return 0; + if ( Z_OK != ret ) { + return -1; } + if ( i->second.buff_length > 0 ) { + ret = ::gzbuffer(gzip, i->second.buff_length); + if ( Z_OK != ret ) { + ::gzclose(gzip); + return -1; + } + } + i->second.applied = 1; + return 0; } } } Error: -#if POSIX_DEBUG ::lib_rtl_output(LIB_RTL_ERROR,"GZIP error: FAILED to set option %s = %s", option, value); -#endif return -1; } /// ------------------------------------------------------------------------------ @@ -169,7 +187,7 @@ namespace { if ( i != fileMap().end() ) { auto& desc = i->second; #if POSIX_DEBUG - ::lib_rtl_output(LIB_RTL_INFO,"+++ Close LZMA file %s", desc.path.c_str()); + ::lib_rtl_output(LIB_RTL_INFO,"+++ Close GZIP file %s", desc.path.c_str()); #endif ::gzclose(desc.gz_file); fileMap().erase(i); @@ -201,7 +219,7 @@ namespace { if ( !i->second.applied ) { if ( Z_OK != ::gzsetparams(gzip, desc.level, desc.strategy) ) { ::lib_rtl_output(LIB_RTL_ERROR, - "%s: LZMA error: FAILED to set options level:%d strategy:%s", + "%s: GZIP error: FAILED to set options level:%d strategy:%s", desc.path.c_str(), desc.level, desc.strategy); } i->second.applied = 1; diff --git a/Online/OnlineBase/src/COMPRESS/posix_lzma.cpp b/Online/OnlineBase/src/COMPRESS/posix_lzma.cpp index bee280666..453242a49 100644 --- a/Online/OnlineBase/src/COMPRESS/posix_lzma.cpp +++ b/Online/OnlineBase/src/COMPRESS/posix_lzma.cpp @@ -35,7 +35,9 @@ /// -------------------------------------------------------------------------------- namespace { - constexpr static std::size_t DEFAULT_BUFFER_SIZE = 512*1024; + static int s_debug = 0; + + constexpr static std::size_t DEFAULT_BUFFER_SIZE = 3*1024*1024; class descriptor { public: using mem_buff_t = Online::mem_buff; @@ -45,11 +47,17 @@ namespace { int level { 0 }; int applied { 0 }; int mode { NONE }; + int debug { s_debug }; std::string path { }; std::size_t in_size { 0 }; - std::size_t out_limit { 32*1024*1024 }; - std::size_t out_size { 0 }; + std::size_t out_limit { 5*1024*1024 }; + std::size_t tmp_size { 0 }; std::size_t position { 0 }; + + struct monitoring { + std::size_t in_total { 0 }; + std::size_t out_disk{ 0 }; + } monitor; lzma_stream stream = LZMA_STREAM_INIT; mem_buff_t in_buffer { }; mem_buff_t out_buffer { }; @@ -61,7 +69,7 @@ namespace { descriptor& operator=(descriptor&& copy) = default; descriptor& operator=(const descriptor& copy) = delete; }; - + using posix_t = Online::posix_t; using FileMap = std::map<int, descriptor>; @@ -80,41 +88,66 @@ namespace { return nullptr; } /// ------------------------------------------------------------------------------ + std::size_t posix_lzma_print(int debug, const char* format, ...) { + if ( debug ) { + va_list args; + va_start( args, format ); + std::size_t len = ::lib_rtl_log(debug > 1 ? debug : LIB_RTL_ALWAYS, format, args); + va_end(args); + return len; + } + return 0; + } + /// ------------------------------------------------------------------------------ int posix_lzma_handle_error(descriptor& desc, lzma_ret ret) { const char* path = desc.path.c_str(); switch (ret) { + case LZMA_MEMLIMIT_ERROR: + posix_lzma_print(LIB_RTL_ERROR, + "%s: LZMA error: Memory usage limit reached", path); + errno = ENOMEM; + break; case LZMA_MEM_ERROR: - ::lib_rtl_output(LIB_RTL_ERROR, + posix_lzma_print(LIB_RTL_ERROR, "%s: LZMA error: Memory allocation failed", path); errno = ENOMEM; break; case LZMA_FORMAT_ERROR: - ::lib_rtl_output(LIB_RTL_ERROR, + posix_lzma_print(LIB_RTL_ERROR, "%s: LZMA error: The input is not in the .xz format", path); + errno = EINVAL; break; case LZMA_OPTIONS_ERROR: - ::lib_rtl_output(LIB_RTL_ERROR, - "%s: LZMA error: Unsupported compression options", path); + posix_lzma_print(LIB_RTL_ERROR, + "%s: LZMA options error: Unsupported compression options", path); + errno = EINVAL; break; case LZMA_DATA_ERROR: - ::lib_rtl_output(LIB_RTL_ERROR, - "%s: LZMA error: Compressed file is corrupt", path); + posix_lzma_print(LIB_RTL_ERROR, + "%s: LZMA data error: Compressed file is corrupt", path); + errno = EINVAL; break; case LZMA_BUF_ERROR: - ::lib_rtl_output(LIB_RTL_ERROR, - "%s: LZMA error: Compressed file is truncated " - "or otherwise corrupt", path); + posix_lzma_print(LIB_RTL_ERROR, + "%s: LZMA buffer error: Compressed file is truncated " + "or otherwise corrupt. No progress is possible", path); + errno = ENOSPC; break; case LZMA_UNSUPPORTED_CHECK: - ::lib_rtl_output(LIB_RTL_ERROR, + posix_lzma_print(LIB_RTL_ERROR, "%s: LZMA error: Specified integrity check is not supported", path); errno = EINVAL; break; + case LZMA_PROG_ERROR: + posix_lzma_print(LIB_RTL_ERROR, + "%s: LZMA error: Programming error", path); + errno = EINVAL; + break; case LZMA_OK: return 1; default: - ::lib_rtl_output(LIB_RTL_ERROR,"%s: LZMA error: Unknown error, possibly a bug", path); + posix_lzma_print(LIB_RTL_ERROR,"%s: LZMA error: Unknown error, possibly a bug", path); break; } return 0; @@ -126,14 +159,20 @@ namespace { auto i = fileMap().find( fd ); if ( i != fileMap().end() ) { auto& desc = i->second; - if ( opt.substr(0,8) == "compress" ) { + if ( opt.substr(0,5) == "debug" ) { + std::string val = RTL::str_lower(value); + if ( val[0] == 'y' ) desc.debug = 1; // yes + else if ( val[0] == 't' ) desc.debug = 1; // true + else if ( val[0] == 'n' ) desc.debug = 0; // no + else if ( val[0] == 'f' ) desc.debug = 0; // false + } + else if ( opt.substr(0,8) == "compress" ) { std::string val = RTL::str_lower(value); if ( val == "none" ) val = "0"; else if ( val == "default" ) val = "3"; else if ( val == "high" ) val = "9"; - if ( !::isdigit(val[0]) ) { + if ( !::isdigit(val[0]) ) goto Error; - } desc.level = int(val[0]) - int('0'); if ( ::tolower(val[1]) == 'e' ) { desc.level |= LZMA_PRESET_EXTREME; @@ -144,7 +183,7 @@ namespace { errno = EBADF; } Error: - ::lib_rtl_output(LIB_RTL_ERROR,"LZMA error: FAILED to set option %s = %s", option, value); + posix_lzma_print(LIB_RTL_ERROR, "LZMA error: FAILED to set option %s = %s", option, value); return -1; } /// ------------------------------------------------------------------------------ @@ -152,9 +191,11 @@ namespace { const char* path = desc.path.c_str(); int flags = desc.flags; ::lzma_ret ret = LZMA_OK; + + desc.stream = LZMA_STREAM_INIT; if ( (flags & (O_WRONLY|O_CREAT)) != 0 ) { - lzma_options_lzma opt_lzma2; - if ( ::lzma_lzma_preset(&opt_lzma2, LZMA_PRESET_DEFAULT) ) { + lzma_options_lzma opt_lzma; + if ( ::lzma_lzma_preset(&opt_lzma, LZMA_PRESET_DEFAULT) ) { // It should never fail because the default preset // (and presets 0-9 optionally with LZMA_PRESET_EXTREME) // are supported by all stable liblzma versions. @@ -165,7 +206,7 @@ namespace { filters[0].id = LZMA_FILTER_X86; filters[0].options = NULL; filters[1].id = LZMA_FILTER_LZMA2; - filters[1].options = &opt_lzma2; + filters[1].options = &opt_lzma; filters[2].id = LZMA_VLI_UNKNOWN; filters[2].options = NULL; @@ -180,12 +221,11 @@ namespace { } switch (ret) { case LZMA_OK: { - desc.in_buffer.allocate(DEFAULT_BUFFER_SIZE); - desc.tmp_buffer.allocate(DEFAULT_BUFFER_SIZE); + desc.in_buffer.allocate(desc.in_size=DEFAULT_BUFFER_SIZE); + desc.tmp_buffer.allocate(desc.tmp_size=DEFAULT_BUFFER_SIZE); desc.out_buffer.allocate(desc.out_limit); -#if POSIX_DEBUG - ::lib_rtl_output(LIB_RTL_DEBUG,"+++ Opened LZMA file: %s", path); -#endif + desc.applied = 1; + posix_lzma_print(desc.debug, "%s: LZMA: Successfully opened file.", path); return 0; } default: @@ -193,91 +233,120 @@ namespace { ::close(desc.fd); break; } -#if POSIX_DEBUG - ::lib_rtl_output(LIB_RTL_ERROR,"+++ FAILED to open LZMA file: %s", path); -#endif + posix_lzma_print(desc.debug, "%s: LZMA: FAILED to open file", path); return -1; } /// ------------------------------------------------------------------------------ - int posix_lzma_flush(descriptor& desc) { - if ( desc.out_buffer.used() > 0 ) { + int posix_lzma_flush_buffer(descriptor& desc, std::size_t max_len) { + if ( desc.out_buffer.used() >= max_len ) { int64_t ret = posix_t::write_disk(desc.fd, desc.out_buffer.begin(), desc.out_buffer.used()); if ( ret < 0 ) { std::string msg = std::make_error_code(std::errc(errno)).message(); - ::lib_rtl_output(LIB_RTL_ERROR, - "+++ FAILED to write %ld bytes to LZMA file %s [%s]", - desc.out_buffer.used(), desc.path.c_str(), msg.c_str()); + posix_lzma_print(desc.debug, + "%s: LZMA: FAILED to write %ld bytes to file [%s]", + desc.path.c_str(), desc.out_buffer.used(), msg.c_str()); return -1; } + desc.monitor.out_disk += desc.out_buffer.used(); + posix_lzma_print(desc.debug, "%s: LZMA: Wrote %ld bytes to disk. [Total: %ld bytes]", + desc.path.c_str(), desc.out_buffer.used(), desc.monitor.out_disk); + desc.out_buffer.set_cursor(0); + desc.position = 0; } - desc.out_buffer.set_cursor(0); - desc.position = 0; return 0; } /// ------------------------------------------------------------------------------ - long posix_lzma_write_buffer(descriptor& desc, uint8_t* buff, std::size_t len) { + long posix_lzma_write_buffer(descriptor& desc, uint8_t* buff, std::size_t len, lzma_action flag) { auto& strm = desc.stream; - long out_size = 0; - strm.next_in = buff; - strm.avail_in = len; - strm.next_out = desc.tmp_buffer.begin(); - strm.avail_out = desc.tmp_buffer.length(); - ::lzma_ret ret = ::lzma_code(&strm, LZMA_RUN); - if ( strm.avail_out == 0 || ret == LZMA_STREAM_END ) { - out_size = desc.tmp_buffer.length() - strm.avail_out; - if ( out_size + desc.out_buffer.used() > desc.out_limit ) { - posix_lzma_flush(desc); + long done = 0; + for (long todo=len; todo > 0; todo = len-done) { + long out_now = strm.total_out; + long in_now = strm.total_in; + strm.avail_in = todo; + strm.next_in = buff + done; + strm.next_out = desc.tmp_buffer.begin(); + strm.avail_out = desc.tmp_buffer.length(); + ::lzma_ret ret = ::lzma_code(&strm, flag); + long done_now = strm.total_in - in_now; + done += done_now; + out_now = strm.total_out - out_now; + if ( !(ret == LZMA_OK || ret == LZMA_STREAM_END) ) { + posix_lzma_handle_error(desc, ret); + std::string msg = std::make_error_code(std::errc(errno)).message(); + posix_lzma_print(desc.debug, + "%s: LZMA: Failed to encode frame with %ld bytes [%s]", + desc.path.c_str(), len, msg.c_str()); + return -1; + } + if ( out_now+desc.out_buffer.used() > desc.out_limit ) { + ::posix_lzma_flush_buffer(desc, desc.out_buffer.used()); + } + if ( out_now > 0 ) { + desc.out_buffer.append(desc.tmp_buffer.begin(), out_now); + } + if ( strm.avail_out == 0 ) { + posix_lzma_print(desc.debug,"%s: LZMA: Output buffer overflow. " + "Stop coding after %ld out of %ld bytes.", + desc.path.c_str(), done, len); + break; } - desc.out_buffer.append(desc.tmp_buffer.begin(), out_size); } - posix_lzma_flush(desc); - return out_size; - } - /// ------------------------------------------------------------------------------ - int posix_lzma_open( const char* path, int flags, ... ) { - va_list args; - va_start(args, flags); - int fd = posix_t::open_disk(path, flags, args); - if ( fd == -1 ) { -#if POSIX_DEBUG - std::string msg = std::make_error_code(std::errc(errno)).message(); - ::lib_rtl_output(LIB_RTL_ERROR,"+++ FAILED to open LZMA file %s [%s]", path, msg.c_str()); -#endif - return -1; + posix_lzma_print(desc.debug, + "%s: LZMA: Encoded %ld bytes with flag:%s Total in:%9ld compressed:%9ld disk:%9ld bytes", + desc.path.c_str(), len, + (flag==LZMA_RUN) ? "LZMA_RUN" : (flag==LZMA_FINISH) ? "LZMA_FINISH" : "????", + strm.total_in, strm.total_out, desc.monitor.out_disk); + if ( flag == LZMA_FINISH ) { + posix_lzma_print(desc.debug, "%s: LZMA: Closing frame: %ld bytes", + desc.path.c_str(), desc.position); } - descriptor desc; - int new_fd = posix_t::new_fd(); - desc.fd = fd; - desc.path = path; - desc.flags = flags; - fileMap()[new_fd] = std::move(desc); - return new_fd; + ::posix_lzma_flush_buffer(desc, flag==LZMA_RUN ? desc.out_limit : 0); + return done; } /// ------------------------------------------------------------------------------ - int posix_lzma_close( int fd ) { - auto i = fileMap().find( fd ); + ssize_t posix_lzma_write( int fd, const void* ptr, size_t size ) { + auto i = fileMap().find(fd); if ( i != fileMap().end() ) { auto& desc = i->second; -#if POSIX_DEBUG - ::lib_rtl_output(LIB_RTL_INFO,"+++ Close LZMA file %s", desc.path.c_str()); -#endif - /// - if ( desc.mode == descriptor::WRITING ) { - posix_lzma_write_buffer(desc, desc.in_buffer.begin(), desc.position); - ::lzma_end(&desc.stream); + if ( !desc.applied ) { + if ( posix_lzma_attach(desc) < 0 ) { + return -1; + } } - else if ( desc.mode == descriptor::READING ) { - ::lzma_end(&desc.stream); + uint8_t *pointer = (uint8_t*)ptr; + uint8_t *start = desc.in_buffer.begin()+desc.position; + uint64_t in_length = desc.in_buffer.length(); + uint64_t in_space = in_length - desc.position; + uint64_t in_miss = 0; + if ( in_space >= size ) { + ::memcpy(start, pointer, size); + desc.position += size; + return size; } - if ( desc.fd != 0 ) ::close(desc.fd); - fileMap().erase(i); - return 0; + else if ( in_space > 0 ) { + ::memcpy(start, pointer, in_space); + in_miss = size - in_space; + desc.position += in_space; + pointer += in_space; + } + else { + in_miss = size; + } + int64_t written = posix_lzma_write_buffer(desc, desc.in_buffer.begin(), in_length, LZMA_RUN); + if ( int64_t(in_length) != written ) { + desc.position = 0; + return -1; + } + desc.position = 0; + posix_lzma_print(desc.debug, + "%s: LZMA: Wrote buffer of %8ld bytes. [raw: %9ld encoded: %9ld buffered: %9ld disk: %9ld bytes]", + desc.path.c_str(), in_length, desc.stream.total_in, desc.stream.total_out, + desc.out_buffer.used(), desc.monitor.out_disk); + return in_space + posix_lzma_write(fd, pointer+in_space, in_miss); } - errno = EBADF; -#if POSIX_DEBUG - std::string msg = std::make_error_code(std::errc(errno)).message(); - ::lib_rtl_output(LIB_RTL_ERROR, "%05X: LZMA FAILED to close file [%s]", fd, msg.c_str()); -#endif + std::string msg = std::make_error_code(std::errc(errno=EBADF)).message(); + posix_lzma_print(s_debug, "%05X: LZMA: FAILED to write %ld bytes [%s]", + fd, size, msg.c_str()); return -1; } /// ------------------------------------------------------------------------------ @@ -292,25 +361,22 @@ namespace { } } uint8_t *pointer = (uint8_t*)ptr; - int64_t out_len = size; - int64_t out_pos = desc.position; - int64_t out_left = desc.out_buffer.used() - out_pos; + uint8_t *start = desc.out_buffer.begin() + desc.position; + uint64_t out_left = desc.out_buffer.used() - desc.position; int64_t out_miss = 0; - if ( out_left >= out_len ) { - ::memcpy(pointer, desc.out_buffer.begin()+out_pos, out_len); - out_pos += out_len; - desc.position = out_pos; - return out_len; + if ( out_left >= size ) { + ::memcpy(pointer, start, size); + desc.position += size; + return size; } else if ( out_left > 0 ) { - ::memcpy(pointer, desc.out_buffer.begin()+out_pos, out_left); - out_miss = out_len - out_left; - out_pos += out_left; - desc.position = out_pos; + ::memcpy(pointer, start, out_left); + out_miss = size - out_left; + desc.position += out_left; pointer += out_left; } else { - out_miss = out_len; + out_miss = size; } // We have to freshly populate the output buffer from zero position if ( desc.out_buffer.length() < size ) { @@ -321,85 +387,89 @@ namespace { desc.position = 0; desc.out_buffer.set_cursor(0); for (int64_t len=0; len < out_miss; ) { + auto& strm = desc.stream; long total = posix_t::read_disk(desc.fd, desc.in_buffer.begin(), desc.in_size); if ( total <= 0 ) { std::string msg = std::make_error_code(std::errc(errno)).message(); - ::lib_rtl_output(LIB_RTL_INFO, + posix_lzma_print(desc.debug, "%s: LZMA FAILED to read %ld bytes. [Read error: %s]", path, size, msg.c_str()); - return -1; + return (out_left > 0) ? out_left : -1; + } + else if ( total < desc.in_size ) { + posix_lzma_print(desc.debug, + "%s: LZMA LIMITED read %ld bytes. [End-of-file]", path, size); } if ( total <= 0 ) { return out_left > 0 ? out_left : -1; } - ::lib_rtl_output(LIB_RTL_DEBUG, "%s: LZMA Read %ld bytes.", path, total); - auto& strm = desc.stream; - strm.next_in = desc.in_buffer.begin(); - strm.avail_in = total; - strm.next_out = desc.tmp_buffer.begin(); - strm.avail_out = desc.tmp_buffer.length(); - ::lzma_ret ret = ::lzma_code(&strm, LZMA_RUN); - if ( strm.avail_out == 0 || ret == LZMA_STREAM_END ) { - size_t decompress_size = desc.tmp_buffer.length() - strm.avail_out; - desc.out_buffer.append(desc.tmp_buffer.begin(), decompress_size); - len = desc.out_buffer.used(); - continue; - } - if ( !posix_lzma_handle_error(desc, ret) ) { - return out_left; + posix_lzma_print(desc.debug, "%s: LZMA: read %ld bytes from disk.", path, total); + long in_done = 0; + long out_done = 0; + strm.next_in = desc.in_buffer.begin(); + for( strm.avail_in = total; strm.avail_in > 0; ) { + long out_now = strm.total_out; + strm.next_out = desc.tmp_buffer.begin(); + strm.avail_out = desc.tmp_buffer.length(); + ::lzma_ret ret = ::lzma_code(&strm, LZMA_RUN); + long done_now = strm.total_out - out_now; + if ( ret != LZMA_OK ) { + posix_lzma_handle_error(desc, ret); + return -1; + } + if ( done_now > 0 ) { + out_done += done_now; + desc.out_buffer.append(desc.tmp_buffer.begin(), done_now); + } } + len = desc.out_buffer.used(); } return out_left + posix_lzma_read(fd, pointer, out_miss); } - errno = EBADF; -#if POSIX_DEBUG - std::string msg = std::make_error_code(std::errc(errno)).message(); - ::lib_rtl_output(LIB_RTL_ERROR, "%05X: LZMA FAILED to read %ld bytes. [%s]", fd, size, msg.c_str()); -#endif + std::string msg = std::make_error_code(std::errc(errno=EBADF)).message(); + posix_lzma_print(s_debug, "%05X: LZMA FAILED to read %ld bytes. [%s]", fd, size, msg.c_str()); return -1; } /// ------------------------------------------------------------------------------ - ssize_t posix_lzma_write( int fd, const void* ptr, size_t size ) { - auto i = fileMap().find(fd); + int posix_lzma_open( const char* path, int flags, ... ) { + va_list args; + va_start(args, flags); + int fd = posix_t::open_disk(path, flags, args); + if ( fd == -1 ) { + std::string msg = std::make_error_code(std::errc(errno)).message(); + posix_lzma_print(LIB_RTL_ERROR, "%s: LZMA: FAILED to open file [%s]", path, msg.c_str()); + return -1; + } + descriptor desc; + int new_fd = posix_t::new_fd(); + desc.fd = fd; + desc.path = path; + desc.flags = flags; + fileMap()[new_fd] = std::move(desc); + posix_lzma_print(desc.debug, "%s: LZMA: Opened file fd: %d", path, new_fd); + return new_fd; + } + /// ------------------------------------------------------------------------------ + int posix_lzma_close( int fd ) { + auto i = fileMap().find( fd ); if ( i != fileMap().end() ) { auto& desc = i->second; - if ( !desc.applied ) { - if ( posix_lzma_attach(desc) < 0 ) { - return -1; - } - } - uint8_t *pointer = (uint8_t*)ptr; - int64_t in_miss = 0; - int64_t in_len = size; - int64_t in_left = desc.in_buffer.length() - desc.position; - if ( in_left >= in_len ) { - ::memcpy(desc.in_buffer.begin()+desc.position, pointer, in_len); - desc.position += in_len; - return size; - } - else if ( in_left > 0 ) { - ::memcpy(desc.in_buffer.begin()+desc.position, pointer, in_left); - in_miss = in_len - in_left; - desc.position += in_left; - pointer += in_left; - } - else { - in_miss = size; + posix_lzma_print(desc.debug, "%s: LZMA: Closing file", desc.path.c_str()); + /// + if ( desc.mode == descriptor::WRITING ) { + posix_lzma_write_buffer(desc, desc.in_buffer.begin(), desc.position, LZMA_FINISH); + ::posix_lzma_flush_buffer(desc, desc.out_buffer.used()); + ::lzma_end(&desc.stream); } - if ( posix_lzma_write_buffer(desc, desc.in_buffer.begin(), desc.in_buffer.length()) ) { - std::string msg = std::make_error_code(std::errc(errno)).message(); - ::lib_rtl_output(LIB_RTL_ERROR, - "%s: LZMA error: Failed to write %ld bytes [%s]", - desc.path.c_str(), desc.in_buffer.length(), msg.c_str()); + else if ( desc.mode == descriptor::READING ) { + ::lzma_end(&desc.stream); } - desc.in_buffer.set_cursor(0); - desc.position = 0; - return in_left + posix_lzma_write( fd, pointer, in_miss); + if ( desc.fd != 0 ) ::close(desc.fd); + fileMap().erase(i); + return 0; } - errno = EBADF; -#if POSIX_DEBUG - ::lib_rtl_output(LIB_RTL_ERROR, "%05X: LZMA FAILED to write %ld bytes.", fd, size); -#endif + std::string msg = std::make_error_code(std::errc(errno=EBADF)).message(); + posix_lzma_print(s_debug, "%05X: LZMA: FAILED to close file [%s]", fd, msg.c_str()); return -1; } /// ------------------------------------------------------------------------------ diff --git a/Online/OnlineBase/src/COMPRESS/posix_zstd.cpp b/Online/OnlineBase/src/COMPRESS/posix_zstd.cpp index 76b5ef8d3..fdf4c965d 100644 --- a/Online/OnlineBase/src/COMPRESS/posix_zstd.cpp +++ b/Online/OnlineBase/src/COMPRESS/posix_zstd.cpp @@ -30,7 +30,7 @@ #include <zstd.h> -#define POSIX_DEBUG 1 +//#define POSIX_DEBUG 1 /// -------------------------------------------------------------------------------- namespace { @@ -84,9 +84,11 @@ namespace { auto i = fileMap().find( fd ); if ( i != fileMap().end() ) { auto& desc = i->second; - if ( opt.substr(0,4) == "strat" ) { + if ( opt.substr(0, 8) == "strategy" ) { std::string val = RTL::str_lower(value); - if ( val == "fast" ) + if ( val == "default" ) + desc.cparams.emplace_back(std::make_pair( ZSTD_c_strategy, ZSTD_fast )); + else if ( val == "fast" ) desc.cparams.emplace_back(std::make_pair( ZSTD_c_strategy, ZSTD_fast )); else if ( val == "dfast" ) desc.cparams.emplace_back(std::make_pair( ZSTD_c_strategy, ZSTD_dfast )); @@ -108,7 +110,7 @@ namespace { goto Error; return 0; } - else if ( opt.substr(0,8) == "compress" ) { + else if ( opt.substr(0, 8) == "compress" ) { std::string val = RTL::str_lower(value); if ( val == "default" ) desc.cparams.emplace_back(std::make_pair( ZSTD_c_compressionLevel, ZSTD_CLEVEL_DEFAULT )); @@ -122,6 +124,9 @@ namespace { } return 0; } + else if ( opt.substr(0,6) == "buffer" ) { + return 0; + } else if ( opt.substr(0, 5) == "apply" ) { if ( desc.compress ) { for( const auto& p : desc.cparams ) @@ -138,9 +143,7 @@ namespace { errno = EBADF; } Error: -#if POSIX_DEBUG ::lib_rtl_output(LIB_RTL_ERROR,"ZSTD error: FAILED to set option %s = %s", option, value); -#endif return -1; } /// ------------------------------------------------------------------------------ diff --git a/Online/Testing/python/OnlineTest/__init__.py b/Online/Testing/python/OnlineTest/__init__.py index 04dbfe819..d3035bd49 100644 --- a/Online/Testing/python/OnlineTest/__init__.py +++ b/Online/Testing/python/OnlineTest/__init__.py @@ -231,7 +231,7 @@ preprocessor = ( r"\[INFO\]\s*execute bash command: genRunner", r"\[INFO\]\s*execute bash command: python", r"\s*\[QMTEST:IGNORE_LINE\]", - r"\s* Working directory: ", + r"\s*Working directory:", r"ToolSvc.SequencerTimer...", r"JobOptionsSvc\s*INFO Job options successfully read", r"JobOptionsSvc\s*INFO Properties are dumped into", diff --git a/bin/build_standalone.sh b/bin/build_standalone.sh index baf83f979..5eee4114a 100755 --- a/bin/build_standalone.sh +++ b/bin/build_standalone.sh @@ -79,14 +79,14 @@ usage() echo " "; echo " "; echo " ===> Example to use the LCG view from ** Gaudi ** (/cvmfs/lhcb.cern.ch): "; - echo " cd /group/online/dataflow/cmtuser/ONLINE/ONLINE_v7r20 "; - echo " . /cvmfs/lhcb.cern.ch/lib/lcg/releases/LCG_101/gcc/11.1.0/x86_64-centos7/setup.sh "; - echo " ./build_standalone.sh --type Debug0 --tag x86_64-centos7-gcc11-dbg --view /cvmfs/lhcb.cern.ch/lib/lcg/releases/LCG_103 -c -B -I"; + echo " cd /group/online/dataflow/cmtuser/ONLINE/ONLINE_v7r25 "; + echo " . /cvmfs/sft.cern.ch/lcg/releases/gcc/13.1.0/x86_64-el9/setup.sh "; + echo " ./build_standalone.sh --type Debug0 --tag x86_64-el9-gcc13-dbg --view /cvmfs/lhcb.cern.ch/lib/lcg/releases/LCG_105a -c -B -I"; echo " "; echo " ===> Example to use the LCG view from ** SFT ** (/cvmfs/sft.cern.ch): "; - echo " cd /group/online/dataflow/cmtuser/ONLINE/ONLINE_v7r20 "; - echo " . /cvmfs/sft.cern.ch/lcg/releases/gcc/11.1.0/x86_64-centos7/setup.sh "; - echo " ./build_standalone.sh --type Debug0 --tag x86_64-centos7-gcc11-dbg --view /cvmfs/sft.cern.ch/lcg/releases/LCG_103 -c -B -I"; + echo " cd /group/online/dataflow/cmtuser/ONLINE/ONLINE_v7r25 "; + echo " . /cvmfs/sft.cern.ch/lcg/releases/gcc/13.1.0/x86_64-el9/setup.sh "; + echo " ./build_standalone.sh --type Debug0 --tag x86_64-el9-gcc13-dbg --view /cvmfs/sft.cern.ch/lcg/releases/LCG_105a -c -B -I"; echo " "; exit 1; } diff --git a/cmake/OnlineDependencies.cmake b/cmake/OnlineDependencies.cmake index 89bac04ed..3bbe90721 100644 --- a/cmake/OnlineDependencies.cmake +++ b/cmake/OnlineDependencies.cmake @@ -35,6 +35,7 @@ find_package(Oracle REQUIRED) find_package(PkgConfig REQUIRED) find_package(nlohmann_json REQUIRED) +pkg_check_modules(libzmq libzmq REQUIRED IMPORTED_TARGET) pkg_check_modules(libzmq libzmq REQUIRED IMPORTED_TARGET) pkg_check_modules(libsodium libsodium IMPORTED_TARGET) # optional -- GitLab From c315c05e6c0058d5aca474c5c55a329f13088934 Mon Sep 17 00:00:00 2001 From: Markus Frank <Markus.Frank@cern.ch> Date: Fri, 5 Apr 2024 18:29:22 +0200 Subject: [PATCH 2/6] Add XZ/LZMA compression examples for dataflow and file streaming --- Online/Dataflow/src/Storage/StorageReader.cpp | 11 +- Online/Dataflow/src/Storage/StorageWriter.cpp | 4 +- Online/Dataflow/src/framework/DiskReader.cpp | 18 +- .../GaudiOnlineTests/src/TestCompression.cpp | 14 +- .../dataflow/df_read_mdf_lzma.xml | 22 + .../dataflow/df_read_mdf_lzma_stream.xml | 22 + .../dataflow/df_read_tae_lzma.xml | 22 + .../dataflow/df_read_tae_lzma_stream.xml | 22 + .../dataflow/df_write_mdf_lzma.xml | 23 + .../dataflow/df_write_tae_lzma.xml | 27 + .../dataflow.qms/df_05_write_mdf_lzma.qmt | 30 + .../dataflow.qms/df_06_write_tae_lzma.qmt | 33 + .../df_07_read_mdf_gzip_stream.qmt | 2 +- .../dataflow.qms/df_07_read_mdf_lzma.qmt | 35 + .../df_07_read_mdf_lzma_stream.qmt | 35 + .../dataflow.qms/df_07_read_mdf_zstd.qmt | 2 +- .../df_07_read_mdf_zstd_stream.qmt | 2 +- .../dataflow.qms/df_09_read_tae_lzma.qmt | 35 + .../df_09_read_tae_lzma_stream.qmt | 35 + .../dataflow.qms/df_09_read_tae_zstd.qmt | 14 +- .../refs/BASE/rtl_test_decompression_lzma.ref | 2 +- .../refs/BASE/rtl_test_decompression_zlib.ref | 2 +- .../refs/BASE/rtl_test_decompression_zstd.ref | 2 +- .../CTRL/controller_eval_architecture.ref | 180 +++-- .../CTRL/controller_read_architecture_xml.ref | 330 ++++++---- .../tests/refs/DF/df_read_mdf_gzip_stream.ref | 22 +- .../tests/refs/DF/df_read_mdf_lzma.ref | 82 +++ .../tests/refs/DF/df_read_mdf_lzma_stream.ref | 55 ++ .../tests/refs/DF/df_read_mdf_zstd_stream.ref | 12 +- .../tests/refs/DF/df_read_tae_lzma.ref | 82 +++ .../tests/refs/DF/df_read_tae_lzma_stream.ref | 55 ++ .../tests/refs/DF/df_read_tae_zstd_stream.ref | 10 - .../tests/refs/DF/df_write_mdf_lzma.ref | 59 ++ .../tests/refs/DF/df_write_tae_lzma.ref | 59 ++ Online/OnlineBase/CMakeLists.txt | 2 +- Online/OnlineBase/include/RTL/Compress.h | 18 + Online/OnlineBase/include/RTL/posix.h | 3 + Online/OnlineBase/src/COMPRESS/Compress.cpp | 90 +++ Online/OnlineBase/src/COMPRESS/posix_gzip.cpp | 2 +- Online/OnlineBase/src/COMPRESS/posix_lz4.cpp | 623 ++++++++++++------ Online/OnlineBase/src/COMPRESS/posix_lzma.cpp | 168 +++-- Online/OnlineBase/src/COMPRESS/posix_zstd.cpp | 151 +++-- Online/OnlineBase/src/RTL/posix.cpp | 13 + 43 files changed, 1838 insertions(+), 592 deletions(-) create mode 100644 Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_mdf_lzma.xml create mode 100644 Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_mdf_lzma_stream.xml create mode 100644 Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_tae_lzma.xml create mode 100644 Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_tae_lzma_stream.xml create mode 100644 Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_mdf_lzma.xml create mode 100644 Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_tae_lzma.xml create mode 100644 Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_05_write_mdf_lzma.qmt create mode 100644 Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_06_write_tae_lzma.qmt create mode 100644 Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_lzma.qmt create mode 100644 Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_lzma_stream.qmt create mode 100644 Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_09_read_tae_lzma.qmt create mode 100644 Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_09_read_tae_lzma_stream.qmt create mode 100644 Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_lzma.ref create mode 100644 Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_lzma_stream.ref create mode 100644 Online/GaudiOnlineTests/tests/refs/DF/df_read_tae_lzma.ref create mode 100644 Online/GaudiOnlineTests/tests/refs/DF/df_read_tae_lzma_stream.ref create mode 100644 Online/GaudiOnlineTests/tests/refs/DF/df_write_mdf_lzma.ref create mode 100644 Online/GaudiOnlineTests/tests/refs/DF/df_write_tae_lzma.ref diff --git a/Online/Dataflow/src/Storage/StorageReader.cpp b/Online/Dataflow/src/Storage/StorageReader.cpp index 4d468b91e..66d1aa0be 100644 --- a/Online/Dataflow/src/Storage/StorageReader.cpp +++ b/Online/Dataflow/src/Storage/StorageReader.cpp @@ -371,15 +371,22 @@ int StorageReader::open_file_posix_raw(const std::string& loc, Buffer& buffer) buffer.data.resize(file_length); std::size_t read_length = input.read(&buffer.data.at(0), file_length); if ( read_length == file_length ) { + const uint8_t* hdr = &buffer.data.at(0); /// ZLIB compression: Check first 3 bytes as described in RFC 1950 /// If YES: on the fly uncompress the stream - if ( buffer.data.at(0) == 0x1f && buffer.data.at(1) == 0x8b ) { + if ( hdr[0] == 0x1f && hdr[1] == 0x8b ) { buffer.data = compress::decompress_gzip(std::move(buffer.data)); } /// Check ZSTD magic word (see RFC 8878 for details) - else if ( *(uint32_t*)&buffer.data.at(0) == 0xFD2FB528 ) { + else if ( *(uint32_t*)hdr == 0xFD2FB528 ) { buffer.data = compress::decompress_zstd(std::move(buffer.data)); } + /// Check 7-zip/XZ file header: lzma compression + /// (see https://py7zr.readthedocs.io/en/latest/archive_format.html for details) + else if ( hdr[0] == 0xFD && hdr[1] == '7' && hdr[2] == 'z' && + hdr[3] == 'X' && hdr[4] == 'Z' && hdr[5] == 0 ) { + buffer.data = compress::decompress_lzma(std::move(buffer.data)); + } ++this->m_filesClosed; input.close(); return DF_SUCCESS; diff --git a/Online/Dataflow/src/Storage/StorageWriter.cpp b/Online/Dataflow/src/Storage/StorageWriter.cpp index b7a84b6b4..2d6cc32ee 100644 --- a/Online/Dataflow/src/Storage/StorageWriter.cpp +++ b/Online/Dataflow/src/Storage/StorageWriter.cpp @@ -127,11 +127,11 @@ public: this->file = RawFile(fn, desc); if ( this->file.openWrite(false) < 0 ) return -1; - else if ( 0 != this->file.set_option("strategy", "default") ) + else if ( 0 != this->file.set_option("strategy", "default") ) return -1; else if( 0 != this->file.set_option("compression", std::to_string(level)) ) return -1; - else if( 0 != this->file.set_option("buffer_length",std::to_string(buff_len)) ) + else if( 0 != this->file.set_option("write_size", std::to_string(buff_len)) ) return -1; else if( 0 != this->file.set_option("apply", "") ) { return -1; diff --git a/Online/Dataflow/src/framework/DiskReader.cpp b/Online/Dataflow/src/framework/DiskReader.cpp index f754af871..b7caa6101 100644 --- a/Online/Dataflow/src/framework/DiskReader.cpp +++ b/Online/Dataflow/src/framework/DiskReader.cpp @@ -1336,10 +1336,10 @@ int DiskReader::i_run() { ::sscanf(ptr, "%010d", &runno); } } - uint8_t file_buff[4]; + uint8_t file_buff[6]; if ( current_input.read(file_buff, sizeof(file_buff)) == sizeof(file_buff) ) { std::string fname = current_input.name(); - // ZLIB compression: Check first 3 bytes as described in RFC 1950 + /// ZLIB compression: Check first 3 bytes as described in RFC 1950 if ( file_buff[0] == 0x1f && file_buff[1] == 0x8b && file_buff[2] == Z_DEFLATED ) { current_input.close(); current_input = RawFile(fname, posix_zlib_descriptor()); @@ -1347,7 +1347,7 @@ int DiskReader::i_run() { continue; } } - // ZSTD compression: Check ZSTD magic word (see RFC 8878 for details) + /// ZSTD compression: Check ZSTD magic word (see RFC 8878 for details) else if ( *(uint32_t*)file_buff == 0xFD2FB528 ) { current_input.close(); current_input = RawFile(fname, posix_zstd_descriptor()); @@ -1355,16 +1355,19 @@ int DiskReader::i_run() { continue; } } -#if 0 - // LZMA compression - else if ( file_buff[0] == uint8_t('X') && file_buff[1] == uint8_t('Z') && file_buff[2] == 0 ) { + /// Check 7-zip/XZ file header: lzma compression + /// (see https://py7zr.readthedocs.io/en/latest/archive_format.html for details) + else if ( file_buff[0] == 0xFD && + file_buff[1] == uint8_t('7') && file_buff[2] == uint8_t('z') && + file_buff[3] == uint8_t('X') && file_buff[4] == uint8_t('Z') && + file_buff[5] == 0 ) { current_input.close(); current_input = RawFile(fname, posix_lzma_descriptor()); if ( current_input.open() == -1 ) { continue; } } - // LZ4 compression + /// LZ4 compression else if ( file_buff[0] == uint8_t('L') && file_buff[1] == uint8_t('4') ) { current_input.close(); current_input = RawFile(fname, posix_lz4_descriptor()); @@ -1372,7 +1375,6 @@ int DiskReader::i_run() { continue; } } -#endif else { current_input.position(0); } diff --git a/Online/GaudiOnlineTests/src/TestCompression.cpp b/Online/GaudiOnlineTests/src/TestCompression.cpp index a71ee1ad2..3816d1b65 100644 --- a/Online/GaudiOnlineTests/src/TestCompression.cpp +++ b/Online/GaudiOnlineTests/src/TestCompression.cpp @@ -47,7 +47,7 @@ namespace { cli.call_help(); ::exit(EINVAL); } - input = RTL::str_expand_env(input); + input = RTL::str_expand_env(input); output = RTL::str_expand_env(output); Online::RawFile in(input); Online::RawFile out(output, descriptor); @@ -115,7 +115,7 @@ namespace { cli.call_help(); ::exit(EINVAL); } - input = RTL::str_expand_env(input); + input = RTL::str_expand_env(input); output = RTL::str_expand_env(output); Online::RawFile in(input, descriptor); Online::RawFile out(output); @@ -154,13 +154,15 @@ namespace { if ( !source.empty() ) { Online::RawFile f1(RTL::str_expand_env(source)); Online::RawFile f2(output); + std::string f1_name = fs::path(f1.name()).filename().string(); + std::string f2_name = fs::path(f2.name()).filename().string(); if ( f1.open() > 0 && f2.open() > 0 ) { std::size_t len1 = f1.data_size(); std::size_t len2 = f2.data_size(); if ( len1 != len2 ) { ::lib_rtl_output(LIB_RTL_ERROR, "Decompressed file and source have different size: %s: %ld %s: %ld", - f1.name().c_str(), len1, f2.name().c_str(), len2); + f1_name.c_str(), len1, f2_name.c_str(), len2); goto Error; } std::size_t err_byte = 0; @@ -180,17 +182,17 @@ namespace { if ( err_byte > 0 ) { ::lib_rtl_output(LIB_RTL_ERROR, "Content check differs by %ld frames. %s: %d %s: %d", - err_byte, f1.name().c_str(), f1.fileno(), f2.name().c_str(), f2.fileno()); + err_byte, f1_name.c_str(), f1.fileno(), f2_name.c_str(), f2.fileno()); goto Error; } ::lib_rtl_output(LIB_RTL_ALWAYS, "Content check was successful. %s identical to %s", - f1.name().c_str(), f2.name().c_str()); + f1_name.c_str(), f2_name.c_str()); return 0; } ::lib_rtl_output(LIB_RTL_ERROR, "Failed to open files for content check: %s: %d %s: %d", - f1.name().c_str(), f1.fileno(), f2.name().c_str(), f2.fileno()); + f1_name.c_str(), f1.fileno(), f2_name.c_str(), f2.fileno()); return ENOENT; Error: f1.close(); diff --git a/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_mdf_lzma.xml b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_mdf_lzma.xml new file mode 100644 index 000000000..4c9989d48 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_mdf_lzma.xml @@ -0,0 +1,22 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<tasks_inventory> + <param type="environment" name="DATA_PREFIX" value="df_output_mdf_lzma_0000"/> + + <include ref="../tasks/MBM.xml"/> + <include ref="../tasks/Passthrough.xml"/> + <include ref="../tasks/StorageFileReader.xml"/> + +</tasks_inventory> diff --git a/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_mdf_lzma_stream.xml b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_mdf_lzma_stream.xml new file mode 100644 index 000000000..d7b790ad1 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_mdf_lzma_stream.xml @@ -0,0 +1,22 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<tasks_inventory> + <param type="environment" name="DATA_PREFIX" value="df_output_mdf_lzma_0000"/> + + <include ref="../tasks/MBM.xml"/> + <include ref="../tasks/Passthrough.xml"/> + <include ref="../tasks/DF-MDF-Reader.xml"/> + +</tasks_inventory> diff --git a/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_tae_lzma.xml b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_tae_lzma.xml new file mode 100644 index 000000000..b6fb2e718 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_tae_lzma.xml @@ -0,0 +1,22 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<tasks_inventory> + <param type="environment" name="DATA_PREFIX" value="df_output_tae_lzma_0000"/> + + <include ref="../tasks/MBM.xml"/> + <include ref="../tasks/Passthrough.xml"/> + <include ref="../tasks/StorageFileReader.xml"/> + +</tasks_inventory> diff --git a/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_tae_lzma_stream.xml b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_tae_lzma_stream.xml new file mode 100644 index 000000000..7cc921af5 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_read_tae_lzma_stream.xml @@ -0,0 +1,22 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<tasks_inventory> + <param type="environment" name="DATA_PREFIX" value="df_output_tae_lzma_0000"/> + + <include ref="../tasks/MBM.xml"/> + <include ref="../tasks/Passthrough.xml"/> + <include ref="../tasks/DF-MDF-Reader.xml"/> + +</tasks_inventory> diff --git a/Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_mdf_lzma.xml b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_mdf_lzma.xml new file mode 100644 index 000000000..34c7b9641 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_mdf_lzma.xml @@ -0,0 +1,23 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<tasks_inventory> + <param type="environment" name="WRITER_FILE_COMPRESSION" value="1"/> + <param type="environment" name="WRITER_COMPRESSION" value="['LZMA',8]"/> + <param type="environment" name="WRITER_FILENAME" value="${DATA_DIR}/df_output_mdf_lzma_$env{RUN}_$env{SEQ}.mdf.xz"/> + + <include ref="../tasks/MBM.xml"/> + <include ref="../tasks/MDFGen.xml"/> + <include ref="../tasks/Writer.xml"/> +</tasks_inventory> diff --git a/Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_tae_lzma.xml b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_tae_lzma.xml new file mode 100644 index 000000000..781109fb0 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/architectures/dataflow/df_write_tae_lzma.xml @@ -0,0 +1,27 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<tasks_inventory> + <param type="environment" name="GEN_NUM_BURSTS" value="200"/> + <param type="environment" name="GEN_PACKING" value="1"/> + <param type="environment" name="GEN_HALF_WINDOW" value="7"/> + <param type="environment" name="WRITER_MAX_EVENTS" value="20"/> + <param type="environment" name="WRITER_FILE_COMPRESSION" value="1"/> + <param type="environment" name="WRITER_COMPRESSION" value="['LZMA',8]"/> + <param type="environment" name="WRITER_FILENAME" value="${DATA_DIR}/df_output_tae_lzma_$env{RUN}_$env{SEQ}.tae.xz"/> + + <include ref="../tasks/MBM.xml"/> + <include ref="../tasks/MDFGen.xml"/> + <include ref="../tasks/Writer.xml"/> +</tasks_inventory> diff --git a/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_05_write_mdf_lzma.qmt b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_05_write_mdf_lzma.qmt new file mode 100644 index 000000000..c75af36b5 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_05_write_mdf_lzma.qmt @@ -0,0 +1,30 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + +--> +<extension class="OnlineTest.Test" kind="test"> + <argument name="program"><text>run_testbench.sh</text></argument> + <argument name="args"><set> + <text>--architecture=${GAUDIONLINETESTSROOT}/tests/architectures/dataflow/df_write_mdf_lzma.xml</text> + <text>--working-dir=df_write_mdf_lzma</text> + <text>--producer=MDFGen</text> + </set></argument> + <argument name="ignore_stderr"/> + <argument name="use_temp_dir"><enumeral>true</enumeral></argument> +<argument name="validator"><text> +self.collect_logs(self._common_tmpdir + '/df_write_mdf_lzma/*.log') +self.validate_log('Writer_0.log') +</text></argument> + <argument name="reference"><text>../refs/DF/df_write_mdf_lzma.ref</text></argument> + <argument name="prerequisites"><set><tuple> + <text>cleanup.cleanup</text><enumeral>PASS</enumeral> + </tuple></set></argument> +</extension> diff --git a/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_06_write_tae_lzma.qmt b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_06_write_tae_lzma.qmt new file mode 100644 index 000000000..1ca2c89c6 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_06_write_tae_lzma.qmt @@ -0,0 +1,33 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<extension class="OnlineTest.Test" kind="test"> +<argument name="program"><text>run_testbench.sh</text></argument> +<argument name="args"><set> + <text>${ONLINETESTBENCH}/testbench.py</text> + <text>--architecture=${GAUDIONLINETESTSROOT}/tests/architectures/dataflow/df_write_tae_lzma.xml</text> + <text>--working-dir=df_write_tae_lzma</text> + <text>--producer=MDFGen</text> +</set></argument> +<argument name="ignore_stderr"/> +<argument name="use_temp_dir"><enumeral>true</enumeral></argument> +<argument name="validator"><text> +self.collect_logs(self._common_tmpdir + '/df_write_tae_lzma/*.log') +self.validate_log('Writer_0.log') +</text></argument> + <argument name="reference"><text>../refs/DF/df_write_tae_lzma.ref</text></argument> + <argument name="prerequisites"><set><tuple> + <text>cleanup.cleanup</text><enumeral>PASS</enumeral> + </tuple></set></argument> +</extension> diff --git a/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_gzip_stream.qmt b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_gzip_stream.qmt index 6ede3e9c9..1f83c4d89 100644 --- a/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_gzip_stream.qmt +++ b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_gzip_stream.qmt @@ -18,7 +18,7 @@ <text>${ONLINETESTBENCH}/testbench.py</text> <text>--architecture=${GAUDIONLINETESTSROOT}/tests/architectures/dataflow/df_read_mdf_gzip_stream.xml</text> <text>--working-dir=df_read_mdf_gzip_stream</text> - <text>--producer=StorageFileReader</text> + <text>--producer=Reader</text> </set></argument> <argument name="ignore_stderr"/> <argument name="use_temp_dir"><enumeral>true</enumeral></argument> diff --git a/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_lzma.qmt b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_lzma.qmt new file mode 100644 index 000000000..731a8d521 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_lzma.qmt @@ -0,0 +1,35 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<extension class="OnlineTest.Test" kind="test"> + <argument name="program"><text>run_testbench.sh</text></argument> + <argument name="args"><set> + <text>${ONLINETESTBENCH}/testbench.py</text> + <text>--architecture=${GAUDIONLINETESTSROOT}/tests/architectures/dataflow/df_read_mdf_lzma.xml</text> + <text>--working-dir=df_read_mdf_lzma</text> + <text>--producer=Reader</text> + </set></argument> + <argument name="ignore_stderr"/> + <argument name="use_temp_dir"><enumeral>true</enumeral></argument> + <argument name="validator"><text> +self.collect_logs(self._common_tmpdir + '/df_read_mdf_lzma/*.log') +RESULT = '+++ Reader_0.log \n' + result['StorageFileReader_0.log'] + \ + '+++ MBMMON.log \n' + result['MBMMON.log'] +self.validateWithReference(stdout=RESULT) + </text></argument> + <argument name="reference"><text>../refs/DF/df_read_mdf_lzma.ref</text></argument> + <argument name="prerequisites"><set> + <tuple><text>dataflow.df_05_write_mdf_lzma</text><enumeral>PASS</enumeral></tuple> + </set></argument> +</extension> diff --git a/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_lzma_stream.qmt b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_lzma_stream.qmt new file mode 100644 index 000000000..8247d14d8 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_lzma_stream.qmt @@ -0,0 +1,35 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<extension class="OnlineTest.Test" kind="test"> + <argument name="program"><text>run_testbench.sh</text></argument> + <argument name="args"><set> + <text>${ONLINETESTBENCH}/testbench.py</text> + <text>--architecture=${GAUDIONLINETESTSROOT}/tests/architectures/dataflow/df_read_mdf_lzma_stream.xml</text> + <text>--working-dir=df_read_mdf_lzma_stream</text> + <text>--producer=StorageFileReader</text> + </set></argument> + <argument name="ignore_stderr"/> + <argument name="use_temp_dir"><enumeral>true</enumeral></argument> + <argument name="validator"><text> +self.collect_logs(self._common_tmpdir + '/df_read_mdf_lzma_stream/*.log') +RESULT = '+++ StorageFileReader_0.log \n' + result['Reader_0.log'] + \ + '+++ MBMMON.log \n' + result['MBMMON.log'] +self.validateWithReference(stdout=RESULT) + </text></argument> + <argument name="reference"><text>../refs/DF/df_read_mdf_lzma_stream.ref</text></argument> + <argument name="prerequisites"><set> + <tuple><text>dataflow.df_05_write_mdf_lzma</text><enumeral>PASS</enumeral></tuple> + </set></argument> +</extension> diff --git a/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_zstd.qmt b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_zstd.qmt index d82da83c8..61800a119 100644 --- a/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_zstd.qmt +++ b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_zstd.qmt @@ -18,7 +18,7 @@ <text>${ONLINETESTBENCH}/testbench.py</text> <text>--architecture=${GAUDIONLINETESTSROOT}/tests/architectures/dataflow/df_read_mdf_zstd.xml</text> <text>--working-dir=df_read_mdf_zstd</text> - <text>--producer=StorageFileReader</text> + <text>--producer=Reader</text> </set></argument> <argument name="ignore_stderr"/> <argument name="use_temp_dir"><enumeral>true</enumeral></argument> diff --git a/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_zstd_stream.qmt b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_zstd_stream.qmt index b72eb8ec6..2c4f8882e 100644 --- a/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_zstd_stream.qmt +++ b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_07_read_mdf_zstd_stream.qmt @@ -24,7 +24,7 @@ <argument name="use_temp_dir"><enumeral>true</enumeral></argument> <argument name="validator"><text> self.collect_logs(self._common_tmpdir + '/df_read_mdf_zstd_stream/*.log') -RESULT = '+++ Reader_0.log \n' + result['Reader_0.log'] + \ +RESULT = '+++ StorageFileReader_0.log \n' + result['Reader_0.log'] + \ '+++ MBMMON.log \n' + result['MBMMON.log'] self.validateWithReference(stdout=RESULT) </text></argument> diff --git a/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_09_read_tae_lzma.qmt b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_09_read_tae_lzma.qmt new file mode 100644 index 000000000..7b69c0780 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_09_read_tae_lzma.qmt @@ -0,0 +1,35 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<extension class="OnlineTest.Test" kind="test"> + <argument name="program"><text>run_testbench.sh</text></argument> + <argument name="args"><set> + <text>${ONLINETESTBENCH}/testbench.py</text> + <text>--architecture=${GAUDIONLINETESTSROOT}/tests/architectures/dataflow/df_read_tae_lzma.xml</text> + <text>--working-dir=df_read_tae_lzma</text> + <text>--producer=StorageFileReader</text> + </set></argument> + <argument name="ignore_stderr"/> + <argument name="use_temp_dir"><enumeral>true</enumeral></argument> + <argument name="validator"><text> +self.collect_logs(self._common_tmpdir + '/df_read_tae_lzma/*.log') +RESULT = '+++ StorageFileReader_0.log \n' + result['StorageFileReader_0.log'] + \ + '+++ MBMMON.log \n' + result['MBMMON.log'] +self.validateWithReference(stdout=RESULT) + </text></argument> + <argument name="reference"><text>../refs/DF/df_read_tae_lzma.ref</text></argument> + <argument name="prerequisites"><set> + <tuple><text>dataflow.df_06_write_tae_lzma</text><enumeral>PASS</enumeral></tuple> + </set></argument> +</extension> diff --git a/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_09_read_tae_lzma_stream.qmt b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_09_read_tae_lzma_stream.qmt new file mode 100644 index 000000000..a7cb25bdc --- /dev/null +++ b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_09_read_tae_lzma_stream.qmt @@ -0,0 +1,35 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + + + Run dataflow job in batchbench +--> +<extension class="OnlineTest.Test" kind="test"> + <argument name="program"><text>run_testbench.sh</text></argument> + <argument name="args"><set> + <text>${ONLINETESTBENCH}/testbench.py</text> + <text>--architecture=${GAUDIONLINETESTSROOT}/tests/architectures/dataflow/df_read_tae_lzma_stream.xml</text> + <text>--working-dir=df_read_tae_lzma_stream</text> + <text>--producer=StorageFileReader</text> + </set></argument> + <argument name="ignore_stderr"/> + <argument name="use_temp_dir"><enumeral>true</enumeral></argument> + <argument name="validator"><text> +self.collect_logs(self._common_tmpdir + '/df_read_tae_lzma_stream/*.log') +RESULT = '+++ Reader_0.log \n' + result['Reader_0.log'] + \ + '+++ MBMMON.log \n' + result['MBMMON.log'] +self.validateWithReference(stdout=RESULT) + </text></argument> + <argument name="reference"><text>../refs/DF/df_read_tae_lzma_stream.ref</text></argument> + <argument name="prerequisites"><set> + <tuple><text>dataflow.df_06_write_tae_lzma</text><enumeral>PASS</enumeral></tuple> + </set></argument> +</extension> diff --git a/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_09_read_tae_zstd.qmt b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_09_read_tae_zstd.qmt index 77aad70db..7296c7ef3 100644 --- a/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_09_read_tae_zstd.qmt +++ b/Online/GaudiOnlineTests/tests/qmtest/dataflow.qms/df_09_read_tae_zstd.qmt @@ -1,16 +1,16 @@ <?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> <!-- -(c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration -This software is distributed under the terms of the GNU General Public -Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". -In applying this licence, CERN does not waive the privileges and immunities -granted to it by virtue of its status as an Intergovernmental Organization -or submit itself to any jurisdiction. + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. -Run dataflow job in batchbench + Run dataflow job in batchbench --> <extension class="OnlineTest.Test" kind="test"> <argument name="program"><text>run_testbench.sh</text></argument> diff --git a/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_lzma.ref b/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_lzma.ref index 46e5e1638..646850f88 100644 --- a/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_lzma.ref +++ b/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_lzma.ref @@ -1,3 +1,3 @@ Failed to read input file rtl_test_compression_lzma.output.xz (iret=-1) after a total of 0x######## bytes read. Decompressed input file rtl_test_compression_lzma.output.xz [0x######## uncompressed / 0x######## compressed bytes] to rtl_test_compression_lzma.output with 0x######## bytes. -Content check was successful. ../../../../InstallArea/x86_64_v2-el9-gcc13-do0/lib/libProperties.so identical to rtl_test_compression_lzma.output +Content check was successful. libProperties.so identical to rtl_test_compression_lzma.output diff --git a/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_zlib.ref b/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_zlib.ref index ba14af31a..00cd1bc7b 100644 --- a/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_zlib.ref +++ b/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_zlib.ref @@ -1,3 +1,3 @@ Failed to read input file rtl_test_compression_zlib.output.gz (iret=0) after a total of 0x######## bytes read. Decompressed input file rtl_test_compression_zlib.output.gz [0x######## uncompressed / 0x######## compressed bytes] to rtl_test_compression_zlib.output with 0x######## bytes. -Content check was successful. ../../../../InstallArea/x86_64_v2-el9-gcc13-do0/lib/libProperties.so identical to rtl_test_compression_zlib.output +Content check was successful. libProperties.so identical to rtl_test_compression_zlib.output diff --git a/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_zstd.ref b/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_zstd.ref index f743e3b01..345398b62 100644 --- a/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_zstd.ref +++ b/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_decompression_zstd.ref @@ -1,3 +1,3 @@ Failed to read input file rtl_test_compression_zstd.output.zst (iret=-1) after a total of 0x######## bytes read. Decompressed input file rtl_test_compression_zstd.output.zst [0x######## uncompressed / 0x######## compressed bytes] to rtl_test_compression_zstd.output with 0x######## bytes. -Content check was successful. ../../../../InstallArea/x86_64_v2-el9-gcc13-do0/lib/libProperties.so identical to rtl_test_compression_zstd.output +Content check was successful. libProperties.so identical to rtl_test_compression_zstd.output diff --git a/Online/GaudiOnlineTests/tests/refs/CTRL/controller_eval_architecture.ref b/Online/GaudiOnlineTests/tests/refs/CTRL/controller_eval_architecture.ref index fd4b87f23..07a7091dd 100644 --- a/Online/GaudiOnlineTests/tests/refs/CTRL/controller_eval_architecture.ref +++ b/Online/GaudiOnlineTests/tests/refs/CTRL/controller_eval_architecture.ref @@ -210,6 +210,42 @@ df_read_mdf_hash32.xml | I/O parameters: df_read_mdf_hash32.xml | TMO: any=30 load=200 df_read_mdf_hash32.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG1=directory=[os.getenv('DATA_DIR')] define=-D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') define=-D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) define=-D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) define=-D ARG5=,checksum=os.getenv('DATA_CHECKSUM') define=-D noDF_DEBUG=1 +--------------------------------------------------------------- +| Loading architecture file: "df_read_mdf_lzma.xml" ++--------------------------------------------------------------- +df_read_mdf_lzma.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_read_mdf_lzma.xml | Command:runTask.sh +df_read_mdf_lzma.xml | I/O parameters: +df_read_mdf_lzma.xml | TMO: any=20 load=200 +df_read_mdf_lzma.xml | FMC: wd=-w ${WORKING_DIR} define=-D noDF_DEBUG=1 +df_read_mdf_lzma.xml +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 +df_read_mdf_lzma.xml | Command:runTask.sh +df_read_mdf_lzma.xml | I/O parameters: +df_read_mdf_lzma.xml | TMO: any=30 load=200 +df_read_mdf_lzma.xml | FMC: define=-D nDF_DEBUG=1 wd=-w ${WORKING_DIR} +df_read_mdf_lzma.xml +-- Task:StorageFileReader [online,onliners] VIP: false Instances: 1 +df_read_mdf_lzma.xml | Command:runTask.sh +df_read_mdf_lzma.xml | I/O parameters: +df_read_mdf_lzma.xml | TMO: any=30 load=200 +df_read_mdf_lzma.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG1=directory=[os.getenv('DATA_DIR')] define=-D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') define=-D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) define=-D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) define=-D ARG5=,checksum=os.getenv('DATA_CHECKSUM') define=-D noDF_DEBUG=1 ++--------------------------------------------------------------- +| Loading architecture file: "df_read_mdf_lzma_stream.xml" ++--------------------------------------------------------------- +df_read_mdf_lzma_stream.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_read_mdf_lzma_stream.xml | Command:runTask.sh +df_read_mdf_lzma_stream.xml | I/O parameters: +df_read_mdf_lzma_stream.xml | TMO: any=20 load=200 +df_read_mdf_lzma_stream.xml | FMC: wd=-w ${WORKING_DIR} define=-D noDF_DEBUG=1 +df_read_mdf_lzma_stream.xml +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 +df_read_mdf_lzma_stream.xml | Command:runTask.sh +df_read_mdf_lzma_stream.xml | I/O parameters: +df_read_mdf_lzma_stream.xml | TMO: any=30 load=200 +df_read_mdf_lzma_stream.xml | FMC: define=-D nDF_DEBUG=1 wd=-w ${WORKING_DIR} +df_read_mdf_lzma_stream.xml +-- Task:Reader [online,onliners] VIP: false Instances: 1 +df_read_mdf_lzma_stream.xml | Command:runTask.sh +df_read_mdf_lzma_stream.xml | I/O parameters: +df_read_mdf_lzma_stream.xml | TMO: any=30 load=200 +df_read_mdf_lzma_stream.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG1=directory=[os.getenv('DATA_DIR')] define=-D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') define=-D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) define=-D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) define=-D ARG5=,checksum=os.getenv('DATA_CHECKSUM') define=-D noDF_DEBUG=1 ++--------------------------------------------------------------- | Loading architecture file: "df_read_mdf_zstd.xml" +--------------------------------------------------------------- df_read_mdf_zstd.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 @@ -228,24 +264,6 @@ df_read_mdf_zstd.xml | I/O parameters: df_read_mdf_zstd.xml | TMO: any=30 load=200 df_read_mdf_zstd.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG1=directory=[os.getenv('DATA_DIR')] define=-D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') define=-D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) define=-D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) define=-D ARG5=,checksum=os.getenv('DATA_CHECKSUM') define=-D noDF_DEBUG=1 +--------------------------------------------------------------- -| Loading architecture file: "df_read_mdf_zstd.xml~" -+--------------------------------------------------------------- -df_read_mdf_zstd.xml~ +-- Task:MBM [online,onliners] VIP: false Instances: 1 -df_read_mdf_zstd.xml~ | Command:runTask.sh -df_read_mdf_zstd.xml~ | I/O parameters: -df_read_mdf_zstd.xml~ | TMO: any=20 load=200 -df_read_mdf_zstd.xml~ | FMC: wd=-w ${WORKING_DIR} define=-D noDF_DEBUG=1 -df_read_mdf_zstd.xml~ +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 -df_read_mdf_zstd.xml~ | Command:runTask.sh -df_read_mdf_zstd.xml~ | I/O parameters: -df_read_mdf_zstd.xml~ | TMO: any=30 load=200 -df_read_mdf_zstd.xml~ | FMC: define=-D nDF_DEBUG=1 wd=-w ${WORKING_DIR} -df_read_mdf_zstd.xml~ +-- Task:StorageFileReader [online,onliners] VIP: false Instances: 1 -df_read_mdf_zstd.xml~ | Command:runTask.sh -df_read_mdf_zstd.xml~ | I/O parameters: -df_read_mdf_zstd.xml~ | TMO: any=30 load=200 -df_read_mdf_zstd.xml~ | FMC: wd=-w ${WORKING_DIR} define=-D ARG1=directory=[os.getenv('DATA_DIR')] define=-D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') define=-D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) define=-D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) define=-D ARG5=,checksum=os.getenv('DATA_CHECKSUM') define=-D noDF_DEBUG=1 -+--------------------------------------------------------------- | Loading architecture file: "df_read_mdf_zstd_stream.xml" +--------------------------------------------------------------- df_read_mdf_zstd_stream.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 @@ -336,6 +354,42 @@ df_read_tae_gzip_stream.xml | I/O parameters: df_read_tae_gzip_stream.xml | TMO: any=30 load=200 df_read_tae_gzip_stream.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG1=directory=[os.getenv('DATA_DIR')] define=-D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') define=-D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) define=-D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) define=-D ARG5=,checksum=os.getenv('DATA_CHECKSUM') define=-D noDF_DEBUG=1 +--------------------------------------------------------------- +| Loading architecture file: "df_read_tae_lzma.xml" ++--------------------------------------------------------------- +df_read_tae_lzma.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_read_tae_lzma.xml | Command:runTask.sh +df_read_tae_lzma.xml | I/O parameters: +df_read_tae_lzma.xml | TMO: any=20 load=200 +df_read_tae_lzma.xml | FMC: wd=-w ${WORKING_DIR} define=-D noDF_DEBUG=1 +df_read_tae_lzma.xml +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 +df_read_tae_lzma.xml | Command:runTask.sh +df_read_tae_lzma.xml | I/O parameters: +df_read_tae_lzma.xml | TMO: any=30 load=200 +df_read_tae_lzma.xml | FMC: define=-D nDF_DEBUG=1 wd=-w ${WORKING_DIR} +df_read_tae_lzma.xml +-- Task:StorageFileReader [online,onliners] VIP: false Instances: 1 +df_read_tae_lzma.xml | Command:runTask.sh +df_read_tae_lzma.xml | I/O parameters: +df_read_tae_lzma.xml | TMO: any=30 load=200 +df_read_tae_lzma.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG1=directory=[os.getenv('DATA_DIR')] define=-D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') define=-D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) define=-D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) define=-D ARG5=,checksum=os.getenv('DATA_CHECKSUM') define=-D noDF_DEBUG=1 ++--------------------------------------------------------------- +| Loading architecture file: "df_read_tae_lzma_stream.xml" ++--------------------------------------------------------------- +df_read_tae_lzma_stream.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_read_tae_lzma_stream.xml | Command:runTask.sh +df_read_tae_lzma_stream.xml | I/O parameters: +df_read_tae_lzma_stream.xml | TMO: any=20 load=200 +df_read_tae_lzma_stream.xml | FMC: wd=-w ${WORKING_DIR} define=-D noDF_DEBUG=1 +df_read_tae_lzma_stream.xml +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 +df_read_tae_lzma_stream.xml | Command:runTask.sh +df_read_tae_lzma_stream.xml | I/O parameters: +df_read_tae_lzma_stream.xml | TMO: any=30 load=200 +df_read_tae_lzma_stream.xml | FMC: define=-D nDF_DEBUG=1 wd=-w ${WORKING_DIR} +df_read_tae_lzma_stream.xml +-- Task:Reader [online,onliners] VIP: false Instances: 1 +df_read_tae_lzma_stream.xml | Command:runTask.sh +df_read_tae_lzma_stream.xml | I/O parameters: +df_read_tae_lzma_stream.xml | TMO: any=30 load=200 +df_read_tae_lzma_stream.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG1=directory=[os.getenv('DATA_DIR')] define=-D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') define=-D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) define=-D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) define=-D ARG5=,checksum=os.getenv('DATA_CHECKSUM') define=-D noDF_DEBUG=1 ++--------------------------------------------------------------- | Loading architecture file: "df_read_tae_zstd.xml" +--------------------------------------------------------------- df_read_tae_zstd.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 @@ -354,24 +408,6 @@ df_read_tae_zstd.xml | I/O parameters: df_read_tae_zstd.xml | TMO: any=30 load=200 df_read_tae_zstd.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG1=directory=[os.getenv('DATA_DIR')] define=-D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') define=-D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) define=-D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) define=-D ARG5=,checksum=os.getenv('DATA_CHECKSUM') define=-D noDF_DEBUG=1 +--------------------------------------------------------------- -| Loading architecture file: "df_read_tae_zstd.xml~" -+--------------------------------------------------------------- -df_read_tae_zstd.xml~ +-- Task:MBM [online,onliners] VIP: false Instances: 1 -df_read_tae_zstd.xml~ | Command:runTask.sh -df_read_tae_zstd.xml~ | I/O parameters: -df_read_tae_zstd.xml~ | TMO: any=20 load=200 -df_read_tae_zstd.xml~ | FMC: wd=-w ${WORKING_DIR} define=-D noDF_DEBUG=1 -df_read_tae_zstd.xml~ +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 -df_read_tae_zstd.xml~ | Command:runTask.sh -df_read_tae_zstd.xml~ | I/O parameters: -df_read_tae_zstd.xml~ | TMO: any=30 load=200 -df_read_tae_zstd.xml~ | FMC: define=-D nDF_DEBUG=1 wd=-w ${WORKING_DIR} -df_read_tae_zstd.xml~ +-- Task:StorageFileReader [online,onliners] VIP: false Instances: 1 -df_read_tae_zstd.xml~ | Command:runTask.sh -df_read_tae_zstd.xml~ | I/O parameters: -df_read_tae_zstd.xml~ | TMO: any=30 load=200 -df_read_tae_zstd.xml~ | FMC: wd=-w ${WORKING_DIR} define=-D ARG1=directory=[os.getenv('DATA_DIR')] define=-D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') define=-D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) define=-D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) define=-D ARG5=,checksum=os.getenv('DATA_CHECKSUM') define=-D noDF_DEBUG=1 -+--------------------------------------------------------------- | Loading architecture file: "df_read_tae_zstd_stream.xml" +--------------------------------------------------------------- df_read_tae_zstd_stream.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 @@ -498,6 +534,24 @@ df_write_mdf_hash32.xml | I/O parameters: df_write_mdf_hash32.xml | TMO: any=30 load=200 df_write_mdf_hash32.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=os.getenv('WRITER_FILENAME') define=-D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) define=-D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) define=-D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) define=-D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') define=-D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) define=-D noDF_DEBUG=1 +--------------------------------------------------------------- +| Loading architecture file: "df_write_mdf_lzma.xml" ++--------------------------------------------------------------- +df_write_mdf_lzma.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_write_mdf_lzma.xml | Command:runTask.sh +df_write_mdf_lzma.xml | I/O parameters: +df_write_mdf_lzma.xml | TMO: any=20 load=200 +df_write_mdf_lzma.xml | FMC: wd=-w ${WORKING_DIR} define=-D noDF_DEBUG=1 +df_write_mdf_lzma.xml +-- Task:MDFGen [online,onliners] VIP: false Instances: 1 +df_write_mdf_lzma.xml | Command:runTask.sh +df_write_mdf_lzma.xml | I/O parameters: +df_write_mdf_lzma.xml | TMO: any=30 load=200 +df_write_mdf_lzma.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=bursts=int(os.getenv('GEN_NUM_BURSTS',1000)) define=-D ARG1=,packing=int(os.getenv('GEN_PACKING',1)) define=-D ARG2=,half_window=int(os.getenv('GEN_HALF_WINDOW',0)) define=-D nDF_DEBUG=1 +df_write_mdf_lzma.xml +-- Task:Writer [online,onliners] VIP: false Instances: 1 +df_write_mdf_lzma.xml | Command:runTask.sh +df_write_mdf_lzma.xml | I/O parameters: +df_write_mdf_lzma.xml | TMO: any=30 load=200 +df_write_mdf_lzma.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=os.getenv('WRITER_FILENAME') define=-D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) define=-D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) define=-D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) define=-D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') define=-D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) define=-D noDF_DEBUG=1 ++--------------------------------------------------------------- | Loading architecture file: "df_write_mdf_zstd.xml" +--------------------------------------------------------------- df_write_mdf_zstd.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 @@ -516,24 +570,6 @@ df_write_mdf_zstd.xml | I/O parameters: df_write_mdf_zstd.xml | TMO: any=30 load=200 df_write_mdf_zstd.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=os.getenv('WRITER_FILENAME') define=-D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) define=-D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) define=-D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) define=-D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') define=-D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) define=-D noDF_DEBUG=1 +--------------------------------------------------------------- -| Loading architecture file: "df_write_mdf_zstd.xml~" -+--------------------------------------------------------------- -df_write_mdf_zstd.xml~ +-- Task:MBM [online,onliners] VIP: false Instances: 1 -df_write_mdf_zstd.xml~ | Command:runTask.sh -df_write_mdf_zstd.xml~ | I/O parameters: -df_write_mdf_zstd.xml~ | TMO: any=20 load=200 -df_write_mdf_zstd.xml~ | FMC: wd=-w ${WORKING_DIR} define=-D noDF_DEBUG=1 -df_write_mdf_zstd.xml~ +-- Task:MDFGen [online,onliners] VIP: false Instances: 1 -df_write_mdf_zstd.xml~ | Command:runTask.sh -df_write_mdf_zstd.xml~ | I/O parameters: -df_write_mdf_zstd.xml~ | TMO: any=30 load=200 -df_write_mdf_zstd.xml~ | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=bursts=int(os.getenv('GEN_NUM_BURSTS',1000)) define=-D ARG1=,packing=int(os.getenv('GEN_PACKING',1)) define=-D ARG2=,half_window=int(os.getenv('GEN_HALF_WINDOW',0)) define=-D nDF_DEBUG=1 -df_write_mdf_zstd.xml~ +-- Task:Writer [online,onliners] VIP: false Instances: 1 -df_write_mdf_zstd.xml~ | Command:runTask.sh -df_write_mdf_zstd.xml~ | I/O parameters: -df_write_mdf_zstd.xml~ | TMO: any=30 load=200 -df_write_mdf_zstd.xml~ | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=os.getenv('WRITER_FILENAME') define=-D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) define=-D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) define=-D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) define=-D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') define=-D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) define=-D noDF_DEBUG=1 -+--------------------------------------------------------------- | Loading architecture file: "df_write_tae.xml" +--------------------------------------------------------------- df_write_tae.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 @@ -588,6 +624,24 @@ df_write_tae_gzip.xml | I/O parameters: df_write_tae_gzip.xml | TMO: any=30 load=200 df_write_tae_gzip.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=os.getenv('WRITER_FILENAME') define=-D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) define=-D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) define=-D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) define=-D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') define=-D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) define=-D noDF_DEBUG=1 +--------------------------------------------------------------- +| Loading architecture file: "df_write_tae_lzma.xml" ++--------------------------------------------------------------- +df_write_tae_lzma.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_write_tae_lzma.xml | Command:runTask.sh +df_write_tae_lzma.xml | I/O parameters: +df_write_tae_lzma.xml | TMO: any=20 load=200 +df_write_tae_lzma.xml | FMC: wd=-w ${WORKING_DIR} define=-D noDF_DEBUG=1 +df_write_tae_lzma.xml +-- Task:MDFGen [online,onliners] VIP: false Instances: 1 +df_write_tae_lzma.xml | Command:runTask.sh +df_write_tae_lzma.xml | I/O parameters: +df_write_tae_lzma.xml | TMO: any=30 load=200 +df_write_tae_lzma.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=bursts=int(os.getenv('GEN_NUM_BURSTS',1000)) define=-D ARG1=,packing=int(os.getenv('GEN_PACKING',1)) define=-D ARG2=,half_window=int(os.getenv('GEN_HALF_WINDOW',0)) define=-D nDF_DEBUG=1 +df_write_tae_lzma.xml +-- Task:Writer [online,onliners] VIP: false Instances: 1 +df_write_tae_lzma.xml | Command:runTask.sh +df_write_tae_lzma.xml | I/O parameters: +df_write_tae_lzma.xml | TMO: any=30 load=200 +df_write_tae_lzma.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=os.getenv('WRITER_FILENAME') define=-D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) define=-D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) define=-D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) define=-D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') define=-D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) define=-D noDF_DEBUG=1 ++--------------------------------------------------------------- | Loading architecture file: "df_write_tae_zstd.xml" +--------------------------------------------------------------- df_write_tae_zstd.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 @@ -605,21 +659,3 @@ df_write_tae_zstd.xml | Command:runTask.sh df_write_tae_zstd.xml | I/O parameters: df_write_tae_zstd.xml | TMO: any=30 load=200 df_write_tae_zstd.xml | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=os.getenv('WRITER_FILENAME') define=-D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) define=-D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) define=-D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) define=-D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') define=-D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) define=-D noDF_DEBUG=1 -+--------------------------------------------------------------- -| Loading architecture file: "df_write_tae_zstd.xml~" -+--------------------------------------------------------------- -df_write_tae_zstd.xml~ +-- Task:MBM [online,onliners] VIP: false Instances: 1 -df_write_tae_zstd.xml~ | Command:runTask.sh -df_write_tae_zstd.xml~ | I/O parameters: -df_write_tae_zstd.xml~ | TMO: any=20 load=200 -df_write_tae_zstd.xml~ | FMC: wd=-w ${WORKING_DIR} define=-D noDF_DEBUG=1 -df_write_tae_zstd.xml~ +-- Task:MDFGen [online,onliners] VIP: false Instances: 1 -df_write_tae_zstd.xml~ | Command:runTask.sh -df_write_tae_zstd.xml~ | I/O parameters: -df_write_tae_zstd.xml~ | TMO: any=30 load=200 -df_write_tae_zstd.xml~ | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=bursts=int(os.getenv('GEN_NUM_BURSTS',1000)) define=-D ARG1=,packing=int(os.getenv('GEN_PACKING',1)) define=-D ARG2=,half_window=int(os.getenv('GEN_HALF_WINDOW',0)) define=-D nDF_DEBUG=1 -df_write_tae_zstd.xml~ +-- Task:Writer [online,onliners] VIP: false Instances: 1 -df_write_tae_zstd.xml~ | Command:runTask.sh -df_write_tae_zstd.xml~ | I/O parameters: -df_write_tae_zstd.xml~ | TMO: any=30 load=200 -df_write_tae_zstd.xml~ | FMC: wd=-w ${WORKING_DIR} define=-D ARG0=os.getenv('WRITER_FILENAME') define=-D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) define=-D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) define=-D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) define=-D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') define=-D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) define=-D noDF_DEBUG=1 diff --git a/Online/GaudiOnlineTests/tests/refs/CTRL/controller_read_architecture_xml.ref b/Online/GaudiOnlineTests/tests/refs/CTRL/controller_read_architecture_xml.ref index 9612c47f1..770e4b86d 100644 --- a/Online/GaudiOnlineTests/tests/refs/CTRL/controller_read_architecture_xml.ref +++ b/Online/GaudiOnlineTests/tests/refs/CTRL/controller_read_architecture_xml.ref @@ -382,6 +382,72 @@ df_read_mdf_hash32.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WO df_read_mdf_hash32.xml +----------------------------------------------------------------------------------- df_read_mdf_hash32.xml +----------------------------------------------------------------------------------- +--------------------------------------------------------------- +| Loading architecture file: "df_read_mdf_lzma.xml" ++--------------------------------------------------------------- +df_read_mdf_lzma.xml | Include: ../tasks/MBM.xml +df_read_mdf_lzma.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_read_mdf_lzma.xml | Command:runTask.sh +| IO: df_read_mdf_lzma.xml | I/O parameters: +df_read_mdf_lzma.xml | TMO: + Timeout: Any = 20 + Timeout: load = 200 +df_read_mdf_lzma.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D noDF_DEBUG=1 +df_read_mdf_lzma.xml +----------------------------------------------------------------------------------- +df_read_mdf_lzma.xml +----------------------------------------------------------------------------------- +df_read_mdf_lzma.xml | Include: ../tasks/Passthrough.xml +df_read_mdf_lzma.xml +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 +df_read_mdf_lzma.xml | Command:runTask.sh +| IO: df_read_mdf_lzma.xml | I/O parameters: +df_read_mdf_lzma.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_read_mdf_lzma.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -D nDF_DEBUG=1 -w ${WORKING_DIR} +df_read_mdf_lzma.xml +----------------------------------------------------------------------------------- +df_read_mdf_lzma.xml +----------------------------------------------------------------------------------- +df_read_mdf_lzma.xml | Include: ../tasks/StorageFileReader.xml +df_read_mdf_lzma.xml +-- Task:StorageFileReader [online,onliners] VIP: false Instances: 1 +df_read_mdf_lzma.xml | Command:runTask.sh +| IO: df_read_mdf_lzma.xml | I/O parameters: +df_read_mdf_lzma.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_read_mdf_lzma.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG1=directory=[os.getenv('DATA_DIR')] -D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') -D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) -D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) -D ARG5=,checksum=os.getenv('DATA_CHECKSUM') -D noDF_DEBUG=1 +df_read_mdf_lzma.xml +----------------------------------------------------------------------------------- +df_read_mdf_lzma.xml +----------------------------------------------------------------------------------- ++--------------------------------------------------------------- +| Loading architecture file: "df_read_mdf_lzma_stream.xml" ++--------------------------------------------------------------- +df_read_mdf_lzma_stream.xml | Include: ../tasks/MBM.xml +df_read_mdf_lzma_stream.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_read_mdf_lzma_stream.xml | Command:runTask.sh +| IO: df_read_mdf_lzma_stream.xml | I/O parameters: +df_read_mdf_lzma_stream.xml | TMO: + Timeout: Any = 20 + Timeout: load = 200 +df_read_mdf_lzma_stream.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D noDF_DEBUG=1 +df_read_mdf_lzma_stream.xml +----------------------------------------------------------------------------------- +df_read_mdf_lzma_stream.xml +----------------------------------------------------------------------------------- +df_read_mdf_lzma_stream.xml | Include: ../tasks/Passthrough.xml +df_read_mdf_lzma_stream.xml +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 +df_read_mdf_lzma_stream.xml | Command:runTask.sh +| IO: df_read_mdf_lzma_stream.xml | I/O parameters: +df_read_mdf_lzma_stream.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_read_mdf_lzma_stream.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -D nDF_DEBUG=1 -w ${WORKING_DIR} +df_read_mdf_lzma_stream.xml +----------------------------------------------------------------------------------- +df_read_mdf_lzma_stream.xml +----------------------------------------------------------------------------------- +df_read_mdf_lzma_stream.xml | Include: ../tasks/DF-MDF-Reader.xml +df_read_mdf_lzma_stream.xml +-- Task:Reader [online,onliners] VIP: false Instances: 1 +df_read_mdf_lzma_stream.xml | Command:runTask.sh +| IO: df_read_mdf_lzma_stream.xml | I/O parameters: +df_read_mdf_lzma_stream.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_read_mdf_lzma_stream.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG1=directory=[os.getenv('DATA_DIR')] -D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') -D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) -D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) -D ARG5=,checksum=os.getenv('DATA_CHECKSUM') -D noDF_DEBUG=1 +df_read_mdf_lzma_stream.xml +----------------------------------------------------------------------------------- +df_read_mdf_lzma_stream.xml +----------------------------------------------------------------------------------- ++--------------------------------------------------------------- | Loading architecture file: "df_read_mdf_zstd.xml" +--------------------------------------------------------------- df_read_mdf_zstd.xml | Include: ../tasks/MBM.xml @@ -415,39 +481,6 @@ df_read_mdf_zstd.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORK df_read_mdf_zstd.xml +----------------------------------------------------------------------------------- df_read_mdf_zstd.xml +----------------------------------------------------------------------------------- +--------------------------------------------------------------- -| Loading architecture file: "df_read_mdf_zstd.xml~" -+--------------------------------------------------------------- -df_read_mdf_zstd.xml~ | Include: ../tasks/MBM.xml -df_read_mdf_zstd.xml~ +-- Task:MBM [online,onliners] VIP: false Instances: 1 -df_read_mdf_zstd.xml~ | Command:runTask.sh -| IO: df_read_mdf_zstd.xml~ | I/O parameters: -df_read_mdf_zstd.xml~ | TMO: - Timeout: Any = 20 - Timeout: load = 200 -df_read_mdf_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D noDF_DEBUG=1 -df_read_mdf_zstd.xml~ +----------------------------------------------------------------------------------- -df_read_mdf_zstd.xml~ +----------------------------------------------------------------------------------- -df_read_mdf_zstd.xml~ | Include: ../tasks/Passthrough.xml -df_read_mdf_zstd.xml~ +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 -df_read_mdf_zstd.xml~ | Command:runTask.sh -| IO: df_read_mdf_zstd.xml~ | I/O parameters: -df_read_mdf_zstd.xml~ | TMO: - Timeout: Any = 30 - Timeout: load = 200 -df_read_mdf_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -D nDF_DEBUG=1 -w ${WORKING_DIR} -df_read_mdf_zstd.xml~ +----------------------------------------------------------------------------------- -df_read_mdf_zstd.xml~ +----------------------------------------------------------------------------------- -df_read_mdf_zstd.xml~ | Include: ../tasks/StorageFileReader.xml -df_read_mdf_zstd.xml~ +-- Task:StorageFileReader [online,onliners] VIP: false Instances: 1 -df_read_mdf_zstd.xml~ | Command:runTask.sh -| IO: df_read_mdf_zstd.xml~ | I/O parameters: -df_read_mdf_zstd.xml~ | TMO: - Timeout: Any = 30 - Timeout: load = 200 -df_read_mdf_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG1=directory=[os.getenv('DATA_DIR')] -D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') -D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) -D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) -D ARG5=,checksum=os.getenv('DATA_CHECKSUM') -D noDF_DEBUG=1 -df_read_mdf_zstd.xml~ +----------------------------------------------------------------------------------- -df_read_mdf_zstd.xml~ +----------------------------------------------------------------------------------- -+--------------------------------------------------------------- | Loading architecture file: "df_read_mdf_zstd_stream.xml" +--------------------------------------------------------------- df_read_mdf_zstd_stream.xml | Include: ../tasks/MBM.xml @@ -613,6 +646,72 @@ df_read_tae_gzip_stream.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w df_read_tae_gzip_stream.xml +----------------------------------------------------------------------------------- df_read_tae_gzip_stream.xml +----------------------------------------------------------------------------------- +--------------------------------------------------------------- +| Loading architecture file: "df_read_tae_lzma.xml" ++--------------------------------------------------------------- +df_read_tae_lzma.xml | Include: ../tasks/MBM.xml +df_read_tae_lzma.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_read_tae_lzma.xml | Command:runTask.sh +| IO: df_read_tae_lzma.xml | I/O parameters: +df_read_tae_lzma.xml | TMO: + Timeout: Any = 20 + Timeout: load = 200 +df_read_tae_lzma.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D noDF_DEBUG=1 +df_read_tae_lzma.xml +----------------------------------------------------------------------------------- +df_read_tae_lzma.xml +----------------------------------------------------------------------------------- +df_read_tae_lzma.xml | Include: ../tasks/Passthrough.xml +df_read_tae_lzma.xml +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 +df_read_tae_lzma.xml | Command:runTask.sh +| IO: df_read_tae_lzma.xml | I/O parameters: +df_read_tae_lzma.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_read_tae_lzma.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -D nDF_DEBUG=1 -w ${WORKING_DIR} +df_read_tae_lzma.xml +----------------------------------------------------------------------------------- +df_read_tae_lzma.xml +----------------------------------------------------------------------------------- +df_read_tae_lzma.xml | Include: ../tasks/StorageFileReader.xml +df_read_tae_lzma.xml +-- Task:StorageFileReader [online,onliners] VIP: false Instances: 1 +df_read_tae_lzma.xml | Command:runTask.sh +| IO: df_read_tae_lzma.xml | I/O parameters: +df_read_tae_lzma.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_read_tae_lzma.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG1=directory=[os.getenv('DATA_DIR')] -D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') -D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) -D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) -D ARG5=,checksum=os.getenv('DATA_CHECKSUM') -D noDF_DEBUG=1 +df_read_tae_lzma.xml +----------------------------------------------------------------------------------- +df_read_tae_lzma.xml +----------------------------------------------------------------------------------- ++--------------------------------------------------------------- +| Loading architecture file: "df_read_tae_lzma_stream.xml" ++--------------------------------------------------------------- +df_read_tae_lzma_stream.xml | Include: ../tasks/MBM.xml +df_read_tae_lzma_stream.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_read_tae_lzma_stream.xml | Command:runTask.sh +| IO: df_read_tae_lzma_stream.xml | I/O parameters: +df_read_tae_lzma_stream.xml | TMO: + Timeout: Any = 20 + Timeout: load = 200 +df_read_tae_lzma_stream.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D noDF_DEBUG=1 +df_read_tae_lzma_stream.xml +----------------------------------------------------------------------------------- +df_read_tae_lzma_stream.xml +----------------------------------------------------------------------------------- +df_read_tae_lzma_stream.xml | Include: ../tasks/Passthrough.xml +df_read_tae_lzma_stream.xml +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 +df_read_tae_lzma_stream.xml | Command:runTask.sh +| IO: df_read_tae_lzma_stream.xml | I/O parameters: +df_read_tae_lzma_stream.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_read_tae_lzma_stream.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -D nDF_DEBUG=1 -w ${WORKING_DIR} +df_read_tae_lzma_stream.xml +----------------------------------------------------------------------------------- +df_read_tae_lzma_stream.xml +----------------------------------------------------------------------------------- +df_read_tae_lzma_stream.xml | Include: ../tasks/DF-MDF-Reader.xml +df_read_tae_lzma_stream.xml +-- Task:Reader [online,onliners] VIP: false Instances: 1 +df_read_tae_lzma_stream.xml | Command:runTask.sh +| IO: df_read_tae_lzma_stream.xml | I/O parameters: +df_read_tae_lzma_stream.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_read_tae_lzma_stream.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG1=directory=[os.getenv('DATA_DIR')] -D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') -D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) -D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) -D ARG5=,checksum=os.getenv('DATA_CHECKSUM') -D noDF_DEBUG=1 +df_read_tae_lzma_stream.xml +----------------------------------------------------------------------------------- +df_read_tae_lzma_stream.xml +----------------------------------------------------------------------------------- ++--------------------------------------------------------------- | Loading architecture file: "df_read_tae_zstd.xml" +--------------------------------------------------------------- df_read_tae_zstd.xml | Include: ../tasks/MBM.xml @@ -646,39 +745,6 @@ df_read_tae_zstd.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORK df_read_tae_zstd.xml +----------------------------------------------------------------------------------- df_read_tae_zstd.xml +----------------------------------------------------------------------------------- +--------------------------------------------------------------- -| Loading architecture file: "df_read_tae_zstd.xml~" -+--------------------------------------------------------------- -df_read_tae_zstd.xml~ | Include: ../tasks/MBM.xml -df_read_tae_zstd.xml~ +-- Task:MBM [online,onliners] VIP: false Instances: 1 -df_read_tae_zstd.xml~ | Command:runTask.sh -| IO: df_read_tae_zstd.xml~ | I/O parameters: -df_read_tae_zstd.xml~ | TMO: - Timeout: Any = 20 - Timeout: load = 200 -df_read_tae_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D noDF_DEBUG=1 -df_read_tae_zstd.xml~ +----------------------------------------------------------------------------------- -df_read_tae_zstd.xml~ +----------------------------------------------------------------------------------- -df_read_tae_zstd.xml~ | Include: ../tasks/Passthrough.xml -df_read_tae_zstd.xml~ +-- Task:Passthrough [online,onliners] VIP: false Instances: 1 -df_read_tae_zstd.xml~ | Command:runTask.sh -| IO: df_read_tae_zstd.xml~ | I/O parameters: -df_read_tae_zstd.xml~ | TMO: - Timeout: Any = 30 - Timeout: load = 200 -df_read_tae_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -D nDF_DEBUG=1 -w ${WORKING_DIR} -df_read_tae_zstd.xml~ +----------------------------------------------------------------------------------- -df_read_tae_zstd.xml~ +----------------------------------------------------------------------------------- -df_read_tae_zstd.xml~ | Include: ../tasks/StorageFileReader.xml -df_read_tae_zstd.xml~ +-- Task:StorageFileReader [online,onliners] VIP: false Instances: 1 -df_read_tae_zstd.xml~ | Command:runTask.sh -| IO: df_read_tae_zstd.xml~ | I/O parameters: -df_read_tae_zstd.xml~ | TMO: - Timeout: Any = 30 - Timeout: load = 200 -df_read_tae_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG1=directory=[os.getenv('DATA_DIR')] -D ARG2=,prefix=os.getenv('DATA_PREFIX','df_output_mdf_0000') -D ARG3=,delay=int(os.getenv('READER_MUDELAY',0)) -D ARG4=,wait=int(os.getenv('READER_WAIT_START',5)) -D ARG5=,checksum=os.getenv('DATA_CHECKSUM') -D noDF_DEBUG=1 -df_read_tae_zstd.xml~ +----------------------------------------------------------------------------------- -df_read_tae_zstd.xml~ +----------------------------------------------------------------------------------- -+--------------------------------------------------------------- | Loading architecture file: "df_read_tae_zstd_stream.xml" +--------------------------------------------------------------- df_read_tae_zstd_stream.xml | Include: ../tasks/MBM.xml @@ -910,6 +976,39 @@ df_write_mdf_hash32.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${W df_write_mdf_hash32.xml +----------------------------------------------------------------------------------- df_write_mdf_hash32.xml +----------------------------------------------------------------------------------- +--------------------------------------------------------------- +| Loading architecture file: "df_write_mdf_lzma.xml" ++--------------------------------------------------------------- +df_write_mdf_lzma.xml | Include: ../tasks/MBM.xml +df_write_mdf_lzma.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_write_mdf_lzma.xml | Command:runTask.sh +| IO: df_write_mdf_lzma.xml | I/O parameters: +df_write_mdf_lzma.xml | TMO: + Timeout: Any = 20 + Timeout: load = 200 +df_write_mdf_lzma.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D noDF_DEBUG=1 +df_write_mdf_lzma.xml +----------------------------------------------------------------------------------- +df_write_mdf_lzma.xml +----------------------------------------------------------------------------------- +df_write_mdf_lzma.xml | Include: ../tasks/MDFGen.xml +df_write_mdf_lzma.xml +-- Task:MDFGen [online,onliners] VIP: false Instances: 1 +df_write_mdf_lzma.xml | Command:runTask.sh +| IO: df_write_mdf_lzma.xml | I/O parameters: +df_write_mdf_lzma.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_write_mdf_lzma.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG0=bursts=int(os.getenv('GEN_NUM_BURSTS',1000)) -D ARG1=,packing=int(os.getenv('GEN_PACKING',1)) -D ARG2=,half_window=int(os.getenv('GEN_HALF_WINDOW',0)) -D nDF_DEBUG=1 +df_write_mdf_lzma.xml +----------------------------------------------------------------------------------- +df_write_mdf_lzma.xml +----------------------------------------------------------------------------------- +df_write_mdf_lzma.xml | Include: ../tasks/Writer.xml +df_write_mdf_lzma.xml +-- Task:Writer [online,onliners] VIP: false Instances: 1 +df_write_mdf_lzma.xml | Command:runTask.sh +| IO: df_write_mdf_lzma.xml | I/O parameters: +df_write_mdf_lzma.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_write_mdf_lzma.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG0=os.getenv('WRITER_FILENAME') -D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) -D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) -D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) -D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') -D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) -D noDF_DEBUG=1 +df_write_mdf_lzma.xml +----------------------------------------------------------------------------------- +df_write_mdf_lzma.xml +----------------------------------------------------------------------------------- ++--------------------------------------------------------------- | Loading architecture file: "df_write_mdf_zstd.xml" +--------------------------------------------------------------- df_write_mdf_zstd.xml | Include: ../tasks/MBM.xml @@ -943,39 +1042,6 @@ df_write_mdf_zstd.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WOR df_write_mdf_zstd.xml +----------------------------------------------------------------------------------- df_write_mdf_zstd.xml +----------------------------------------------------------------------------------- +--------------------------------------------------------------- -| Loading architecture file: "df_write_mdf_zstd.xml~" -+--------------------------------------------------------------- -df_write_mdf_zstd.xml~ | Include: ../tasks/MBM.xml -df_write_mdf_zstd.xml~ +-- Task:MBM [online,onliners] VIP: false Instances: 1 -df_write_mdf_zstd.xml~ | Command:runTask.sh -| IO: df_write_mdf_zstd.xml~ | I/O parameters: -df_write_mdf_zstd.xml~ | TMO: - Timeout: Any = 20 - Timeout: load = 200 -df_write_mdf_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D noDF_DEBUG=1 -df_write_mdf_zstd.xml~ +----------------------------------------------------------------------------------- -df_write_mdf_zstd.xml~ +----------------------------------------------------------------------------------- -df_write_mdf_zstd.xml~ | Include: ../tasks/MDFGen.xml -df_write_mdf_zstd.xml~ +-- Task:MDFGen [online,onliners] VIP: false Instances: 1 -df_write_mdf_zstd.xml~ | Command:runTask.sh -| IO: df_write_mdf_zstd.xml~ | I/O parameters: -df_write_mdf_zstd.xml~ | TMO: - Timeout: Any = 30 - Timeout: load = 200 -df_write_mdf_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG0=bursts=int(os.getenv('GEN_NUM_BURSTS',1000)) -D ARG1=,packing=int(os.getenv('GEN_PACKING',1)) -D ARG2=,half_window=int(os.getenv('GEN_HALF_WINDOW',0)) -D nDF_DEBUG=1 -df_write_mdf_zstd.xml~ +----------------------------------------------------------------------------------- -df_write_mdf_zstd.xml~ +----------------------------------------------------------------------------------- -df_write_mdf_zstd.xml~ | Include: ../tasks/Writer.xml -df_write_mdf_zstd.xml~ +-- Task:Writer [online,onliners] VIP: false Instances: 1 -df_write_mdf_zstd.xml~ | Command:runTask.sh -| IO: df_write_mdf_zstd.xml~ | I/O parameters: -df_write_mdf_zstd.xml~ | TMO: - Timeout: Any = 30 - Timeout: load = 200 -df_write_mdf_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG0=os.getenv('WRITER_FILENAME') -D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) -D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) -D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) -D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') -D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) -D noDF_DEBUG=1 -df_write_mdf_zstd.xml~ +----------------------------------------------------------------------------------- -df_write_mdf_zstd.xml~ +----------------------------------------------------------------------------------- -+--------------------------------------------------------------- | Loading architecture file: "df_write_tae.xml" +--------------------------------------------------------------- df_write_tae.xml | Include: ../tasks/MBM.xml @@ -1075,6 +1141,39 @@ df_write_tae_gzip.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WOR df_write_tae_gzip.xml +----------------------------------------------------------------------------------- df_write_tae_gzip.xml +----------------------------------------------------------------------------------- +--------------------------------------------------------------- +| Loading architecture file: "df_write_tae_lzma.xml" ++--------------------------------------------------------------- +df_write_tae_lzma.xml | Include: ../tasks/MBM.xml +df_write_tae_lzma.xml +-- Task:MBM [online,onliners] VIP: false Instances: 1 +df_write_tae_lzma.xml | Command:runTask.sh +| IO: df_write_tae_lzma.xml | I/O parameters: +df_write_tae_lzma.xml | TMO: + Timeout: Any = 20 + Timeout: load = 200 +df_write_tae_lzma.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D noDF_DEBUG=1 +df_write_tae_lzma.xml +----------------------------------------------------------------------------------- +df_write_tae_lzma.xml +----------------------------------------------------------------------------------- +df_write_tae_lzma.xml | Include: ../tasks/MDFGen.xml +df_write_tae_lzma.xml +-- Task:MDFGen [online,onliners] VIP: false Instances: 1 +df_write_tae_lzma.xml | Command:runTask.sh +| IO: df_write_tae_lzma.xml | I/O parameters: +df_write_tae_lzma.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_write_tae_lzma.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG0=bursts=int(os.getenv('GEN_NUM_BURSTS',1000)) -D ARG1=,packing=int(os.getenv('GEN_PACKING',1)) -D ARG2=,half_window=int(os.getenv('GEN_HALF_WINDOW',0)) -D nDF_DEBUG=1 +df_write_tae_lzma.xml +----------------------------------------------------------------------------------- +df_write_tae_lzma.xml +----------------------------------------------------------------------------------- +df_write_tae_lzma.xml | Include: ../tasks/Writer.xml +df_write_tae_lzma.xml +-- Task:Writer [online,onliners] VIP: false Instances: 1 +df_write_tae_lzma.xml | Command:runTask.sh +| IO: df_write_tae_lzma.xml | I/O parameters: +df_write_tae_lzma.xml | TMO: + Timeout: Any = 30 + Timeout: load = 200 +df_write_tae_lzma.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG0=os.getenv('WRITER_FILENAME') -D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) -D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) -D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) -D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') -D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) -D noDF_DEBUG=1 +df_write_tae_lzma.xml +----------------------------------------------------------------------------------- +df_write_tae_lzma.xml +----------------------------------------------------------------------------------- ++--------------------------------------------------------------- | Loading architecture file: "df_write_tae_zstd.xml" +--------------------------------------------------------------- df_write_tae_zstd.xml | Include: ../tasks/MBM.xml @@ -1107,36 +1206,3 @@ df_write_tae_zstd.xml | TMO: df_write_tae_zstd.xml | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG0=os.getenv('WRITER_FILENAME') -D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) -D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) -D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) -D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') -D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) -D noDF_DEBUG=1 df_write_tae_zstd.xml +----------------------------------------------------------------------------------- df_write_tae_zstd.xml +----------------------------------------------------------------------------------- -+--------------------------------------------------------------- -| Loading architecture file: "df_write_tae_zstd.xml~" -+--------------------------------------------------------------- -df_write_tae_zstd.xml~ | Include: ../tasks/MBM.xml -df_write_tae_zstd.xml~ +-- Task:MBM [online,onliners] VIP: false Instances: 1 -df_write_tae_zstd.xml~ | Command:runTask.sh -| IO: df_write_tae_zstd.xml~ | I/O parameters: -df_write_tae_zstd.xml~ | TMO: - Timeout: Any = 20 - Timeout: load = 200 -df_write_tae_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D noDF_DEBUG=1 -df_write_tae_zstd.xml~ +----------------------------------------------------------------------------------- -df_write_tae_zstd.xml~ +----------------------------------------------------------------------------------- -df_write_tae_zstd.xml~ | Include: ../tasks/MDFGen.xml -df_write_tae_zstd.xml~ +-- Task:MDFGen [online,onliners] VIP: false Instances: 1 -df_write_tae_zstd.xml~ | Command:runTask.sh -| IO: df_write_tae_zstd.xml~ | I/O parameters: -df_write_tae_zstd.xml~ | TMO: - Timeout: Any = 30 - Timeout: load = 200 -df_write_tae_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG0=bursts=int(os.getenv('GEN_NUM_BURSTS',1000)) -D ARG1=,packing=int(os.getenv('GEN_PACKING',1)) -D ARG2=,half_window=int(os.getenv('GEN_HALF_WINDOW',0)) -D nDF_DEBUG=1 -df_write_tae_zstd.xml~ +----------------------------------------------------------------------------------- -df_write_tae_zstd.xml~ +----------------------------------------------------------------------------------- -df_write_tae_zstd.xml~ | Include: ../tasks/Writer.xml -df_write_tae_zstd.xml~ +-- Task:Writer [online,onliners] VIP: false Instances: 1 -df_write_tae_zstd.xml~ | Command:runTask.sh -| IO: df_write_tae_zstd.xml~ | I/O parameters: -df_write_tae_zstd.xml~ | TMO: - Timeout: Any = 30 - Timeout: load = 200 -df_write_tae_zstd.xml~ | FMC: -u ${PARTITION}_${NAME}_${INSTANCE} -w ${WORKING_DIR} -D ARG0=os.getenv('WRITER_FILENAME') -D ARG1=,max_events=int(os.getenv('WRITER_MAX_EVENTS',100)) -D ARG2=,compression=eval(os.getenv('WRITER_COMPRESSION','None')) -D ARG3=,checksum=os.getenv('WRITER_CHECKSUM_TYPE',None) -D ARG4=,buffer=os.getenv('WRITER_DATA_BUFFER','Events') -D ARG5=,file_compression=int(os.getenv('WRITER_FILE_COMPRESSION','0')) -D noDF_DEBUG=1 -df_write_tae_zstd.xml~ +----------------------------------------------------------------------------------- -df_write_tae_zstd.xml~ +----------------------------------------------------------------------------------- diff --git a/Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_gzip_stream.ref b/Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_gzip_stream.ref index 194b4e45b..29a4145e4 100644 --- a/Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_gzip_stream.ref +++ b/Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_gzip_stream.ref @@ -25,8 +25,28 @@ ####-##-## TIME-### Reader INFO Scanning directory list disabled! ####-##-## TIME-### Reader INFO Scanning directory list disabled! ####-##-## TIME-### Reader INFO Sleeping before going to PAUSE.... -####-##-## TIME-### Reader INFO Executed cancellation of pending I/O requests. +####-##-## TIME-### Summary INFO (pause) +++++ MBM buffer section sucessfully mapped. +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) ======================== MBM Bufer summary for buffer "Events_##ID##" ======================== +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) Buffer "Events_##ID##" Events: Produced:1000 Seen:1000 Pending:0 Max:50 +####-##-## TIME-### Summary INFO (pause) Space(kB):[Tot:999969 Free:999969] Users:[Tot:2 Max:30] +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) Name Partition Type State Produced %prod #seen %seen Reqs Buffer +####-##-## TIME-### Summary INFO (pause) ##ID##_Reader_0 feed P 1000 100 +####-##-## TIME-### Summary INFO (pause) CONS_ONE feed C 1000 100 1 Events_##ID## +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) ======================== MBM Bufer summary for buffer "Output_##ID##" ======================== +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) Buffer "Output_##ID##" Events: Produced:1000 Seen:0 Pending:0 Max:50 +####-##-## TIME-### Summary INFO (pause) Space(kB):[Tot:99975 Free:99975] Users:[Tot:1 Max:15] +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) Name Partition Type State Produced %prod #seen %seen Reqs Buffer +####-##-## TIME-### Summary INFO (pause) PROD_ONE feed P 1000 100 Output_##ID## +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) +++++ MBM summary finished. ####-##-## TIME-### Reader INFO Quitting... +####-##-## TIME-### Reader INFO Executed cancellation of pending I/O requests. ####-##-## TIME-### MBM INFO Excluding from buffers. No more buffer access possible. +++ MBMMON.log +---------------------------------------------------------------------------------------------------- diff --git a/Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_lzma.ref b/Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_lzma.ref new file mode 100644 index 000000000..d5b4a7909 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_lzma.ref @@ -0,0 +1,82 @@ ++++ Reader_0.log +####-##-## TIME-### ##ID##_StorageFileReader_0 [INFO] +++ UTGID: ##ID##_StorageFileReader_0 +####-##-## TIME-### ##ID##_StorageFileReader_0 [INFO] +++ DIM_DNS port:#### +####-##-## TIME-### ##ID##_StorageFileReader_0 [INFO] +++ DIM_DNS node:localhost +####-##-## TIME-### ##ID##_StorageFileReader_0 [INFO] +++ OPTIONS: GaudiOnlineTests.DF +####-##-## TIME-### ##ID##_StorageFileReader_0 [INFO] +++ TASK_TYPE: StorageFileReader +####-##-## TIME-### INFO: Loading library: libDataflowDict +####-##-## TIME-### INFO: Import Online namespace from ROOT +####-##-## TIME-### INFO: +++ Reader directory: ['../data'] file prefix: 'df_output_mdf_lzma_0000' +####-##-## TIME-### INFO: +++ Setting up output logger component +####-##-## TIME-### INFO: +++ Starting dataflow process +####-##-## TIME-### Reader INFO Scanned directory ../data: 10 files. +####-##-## TIME-### Delay INFO +++ Got incident DAQ_RUNNING: trigger auto transition to PAUSED after n seconds +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_mdf_lzma_0000012345_0001.mdf.xz [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_mdf_lzma_0000012345_0001.mdf.xz [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_mdf_lzma_0000012345_0002.mdf.xz [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_mdf_lzma_0000012345_0002.mdf.xz [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_mdf_lzma_0000012345_0003.mdf.xz [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_mdf_lzma_0000012345_0003.mdf.xz [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_mdf_lzma_0000012345_0004.mdf.xz [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_mdf_lzma_0000012345_0004.mdf.xz [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_mdf_lzma_0000012345_0005.mdf.xz [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_mdf_lzma_0000012345_0005.mdf.xz [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_mdf_lzma_0000012345_0006.mdf.xz [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_mdf_lzma_0000012345_0006.mdf.xz [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_mdf_lzma_0000012345_0007.mdf.xz [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_mdf_lzma_0000012345_0007.mdf.xz [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_mdf_lzma_0000012345_0008.mdf.xz [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_mdf_lzma_0000012345_0008.mdf.xz [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_mdf_lzma_0000012345_0009.mdf.xz [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_mdf_lzma_0000012345_0009.mdf.xz [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_mdf_lzma_0000012345_0010.mdf.xz [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_mdf_lzma_0000012345_0010.mdf.xz [0 MB] +####-##-## TIME-### Reader INFO Locking event loop. Waiting for work.... +####-##-## TIME-### Delay INFO +++ Timeout expired: Invoke auto transition to PAUSED. +####-##-## TIME-### Summary INFO (pause) +++++ MBM buffer section sucessfully mapped. +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) ======================== MBM Bufer summary for buffer "Events_##ID##" ======================== +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) Buffer "Events_##ID##" Events: Produced:100 Seen:100 Pending:0 Max:50 +####-##-## TIME-### Summary INFO (pause) Space(kB):[Tot:999969 Free:999969] Users:[Tot:2 Max:30] +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) Name Partition Type State Produced %prod #seen %seen Reqs Buffer +####-##-## TIME-### Summary INFO (pause) ##ID##_StorageFileReader_0 feed P 100 100 +####-##-## TIME-### Summary INFO (pause) CONS_ONE feed C 100 100 1 Events_##ID## +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) ======================== MBM Bufer summary for buffer "Output_##ID##" ======================== +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) Buffer "Output_##ID##" Events: Produced:100 Seen:0 Pending:0 Max:50 +####-##-## TIME-### Summary INFO (pause) Space(kB):[Tot:99975 Free:99975] Users:[Tot:1 Max:15] +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) Name Partition Type State Produced %prod #seen %seen Reqs Buffer +####-##-## TIME-### Summary INFO (pause) PROD_ONE feed P 100 100 Output_##ID## +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) +++++ MBM summary finished. +####-##-## TIME-### Reader INFO Executed cancellation of pending I/O requests. +####-##-## TIME-### MBM INFO Excluding from buffers. No more buffer access possible. ++++ MBMMON.log ++---------------------------------------------------------------------------------------------------- +| Buffer Manager summary [ (.*)_##ID## ] ++---------------------------------------------------------------------------------------------------- +| +++++ MBM buffer section sucessfully mapped. +| +| ======================== MBM Bufer summary for buffer "Events_##ID##" ======================== +| +| Buffer "Events_##ID##" Events: Produced:100 Seen:100 Pending:0 Max:50 +| Space(kB):[Tot:999969 Free:999969] Users:[Tot:2 Max:30] +| +| Name Partition Type State Produced %prod #seen %seen Reqs Buffer +| ##ID##_StorageFileReader_0 feed P #MBM# 100 100 +| CONS_ONE feed C 100 100 1 Events_##ID## +| +| ======================== MBM Bufer summary for buffer "Output_##ID##" ======================== +| +| Buffer "Output_##ID##" Events: Produced:100 Seen:0 Pending:0 Max:50 +| Space(kB):[Tot:99975 Free:99975] Users:[Tot:1 Max:15] +| +| Name Partition Type State Produced %prod #seen %seen Reqs Buffer +| PROD_ONE feed P 100 100 Output_##ID## +| +| +++++ MBM summary finished. ++---------------------------------------------------------------------------------------------------- diff --git a/Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_lzma_stream.ref b/Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_lzma_stream.ref new file mode 100644 index 000000000..2066425e0 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_lzma_stream.ref @@ -0,0 +1,55 @@ ++++ StorageFileReader_0.log +####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ UTGID: ##ID##_Reader_0 +####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ DIM_DNS port:#### +####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ DIM_DNS node:localhost +####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ OPTIONS: GaudiOnlineTests.DF +####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ TASK_TYPE: Reader +####-##-## TIME-### INFO: Loading library: libDataflowDict +####-##-## TIME-### INFO: Import Online namespace from ROOT +####-##-## TIME-### INFO: +++ Reader directory: ['../data'] file prefix: 'df_output_mdf_lzma_0000' +####-##-## TIME-### INFO: +++ Setting up output logger component +####-##-## TIME-### INFO: +++ Starting dataflow process +####-##-## TIME-### Reader INFO Scanned directory ../data: 10 files. +####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_lzma_0000012345_0001.mdf.xz +####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_lzma_0000012345_0002.mdf.xz +####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_lzma_0000012345_0003.mdf.xz +####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_lzma_0000012345_0004.mdf.xz +####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_lzma_0000012345_0005.mdf.xz +####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_lzma_0000012345_0006.mdf.xz +####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_lzma_0000012345_0007.mdf.xz +####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_lzma_0000012345_0008.mdf.xz +####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_lzma_0000012345_0009.mdf.xz +####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_lzma_0000012345_0010.mdf.xz +####-##-## TIME-### Reader INFO Rescanning directory list..... +####-##-## TIME-### Reader INFO Scanning directory list disabled! +####-##-## TIME-### Reader INFO Scanning directory list disabled! +####-##-## TIME-### Reader INFO Scanning directory list disabled! +####-##-## TIME-### Reader INFO Sleeping before going to PAUSE.... +####-##-## TIME-### Reader INFO Executed cancellation of pending I/O requests. +####-##-## TIME-### Reader INFO Quitting... +####-##-## TIME-### MBM INFO Excluding from buffers. No more buffer access possible. ++++ MBMMON.log ++---------------------------------------------------------------------------------------------------- +| Buffer Manager summary [ (.*)_##ID## ] ++---------------------------------------------------------------------------------------------------- +| +++++ MBM buffer section sucessfully mapped. +| +| ======================== MBM Bufer summary for buffer "Events_##ID##" ======================== +| +| Buffer "Events_##ID##" Events: Produced:1000 Seen:1000 Pending:0 Max:50 +| Space(kB):[Tot:999969 Free:999969] Users:[Tot:2 Max:30] +| +| Name Partition Type State Produced %prod #seen %seen Reqs Buffer +| ##ID##_Reader_0 feed P #MBM# 1000 100 +| CONS_ONE feed C 1000 100 1 Events_##ID## +| +| ======================== MBM Bufer summary for buffer "Output_##ID##" ======================== +| +| Buffer "Output_##ID##" Events: Produced:1000 Seen:0 Pending:0 Max:50 +| Space(kB):[Tot:99975 Free:99975] Users:[Tot:1 Max:15] +| +| Name Partition Type State Produced %prod #seen %seen Reqs Buffer +| PROD_ONE feed P 1000 100 Output_##ID## +| +| +++++ MBM summary finished. ++---------------------------------------------------------------------------------------------------- diff --git a/Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_zstd_stream.ref b/Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_zstd_stream.ref index 9bdc9c094..9c4706dcf 100644 --- a/Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_zstd_stream.ref +++ b/Online/GaudiOnlineTests/tests/refs/DF/df_read_mdf_zstd_stream.ref @@ -1,4 +1,4 @@ -+++ Reader_0.log ++++ StorageFileReader_0.log ####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ UTGID: ##ID##_Reader_0 ####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ DIM_DNS port:#### ####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ DIM_DNS node:localhost @@ -11,25 +11,15 @@ ####-##-## TIME-### INFO: +++ Starting dataflow process ####-##-## TIME-### Reader INFO Scanned directory ../data: 10 files. ####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_zstd_0000012345_0001.mdf.zst -####-##-## TIME-### RTL INFO ../data/df_output_mdf_zstd_0000012345_0001.mdf.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] ####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_zstd_0000012345_0002.mdf.zst -####-##-## TIME-### RTL INFO ../data/df_output_mdf_zstd_0000012345_0002.mdf.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] ####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_zstd_0000012345_0003.mdf.zst -####-##-## TIME-### RTL INFO ../data/df_output_mdf_zstd_0000012345_0003.mdf.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] ####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_zstd_0000012345_0004.mdf.zst -####-##-## TIME-### RTL INFO ../data/df_output_mdf_zstd_0000012345_0004.mdf.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] ####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_zstd_0000012345_0005.mdf.zst -####-##-## TIME-### RTL INFO ../data/df_output_mdf_zstd_0000012345_0005.mdf.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] ####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_zstd_0000012345_0006.mdf.zst -####-##-## TIME-### RTL INFO ../data/df_output_mdf_zstd_0000012345_0006.mdf.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] ####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_zstd_0000012345_0007.mdf.zst -####-##-## TIME-### RTL INFO ../data/df_output_mdf_zstd_0000012345_0007.mdf.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] ####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_zstd_0000012345_0008.mdf.zst -####-##-## TIME-### RTL INFO ../data/df_output_mdf_zstd_0000012345_0008.mdf.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] ####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_zstd_0000012345_0009.mdf.zst -####-##-## TIME-### RTL INFO ../data/df_output_mdf_zstd_0000012345_0009.mdf.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] ####-##-## TIME-### Reader INFO Opened file ../data/df_output_mdf_zstd_0000012345_0010.mdf.zst -####-##-## TIME-### RTL INFO ../data/df_output_mdf_zstd_0000012345_0010.mdf.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] ####-##-## TIME-### Reader INFO Rescanning directory list..... ####-##-## TIME-### Reader INFO Scanning directory list disabled! ####-##-## TIME-### Reader INFO Scanning directory list disabled! diff --git a/Online/GaudiOnlineTests/tests/refs/DF/df_read_tae_lzma.ref b/Online/GaudiOnlineTests/tests/refs/DF/df_read_tae_lzma.ref new file mode 100644 index 000000000..50f7ba2fc --- /dev/null +++ b/Online/GaudiOnlineTests/tests/refs/DF/df_read_tae_lzma.ref @@ -0,0 +1,82 @@ ++++ StorageFileReader_0.log +####-##-## TIME-### ##ID##_StorageFileReader_0 [INFO] +++ UTGID: ##ID##_StorageFileReader_0 +####-##-## TIME-### ##ID##_StorageFileReader_0 [INFO] +++ DIM_DNS port:#### +####-##-## TIME-### ##ID##_StorageFileReader_0 [INFO] +++ DIM_DNS node:localhost +####-##-## TIME-### ##ID##_StorageFileReader_0 [INFO] +++ OPTIONS: GaudiOnlineTests.DF +####-##-## TIME-### ##ID##_StorageFileReader_0 [INFO] +++ TASK_TYPE: StorageFileReader +####-##-## TIME-### INFO: Loading library: libDataflowDict +####-##-## TIME-### INFO: Import Online namespace from ROOT +####-##-## TIME-### INFO: +++ Reader directory: ['../data'] file prefix: 'df_output_tae_lzma_0000' +####-##-## TIME-### INFO: +++ Setting up output logger component +####-##-## TIME-### INFO: +++ Starting dataflow process +####-##-## TIME-### Reader INFO Scanned directory ../data: 10 files. +####-##-## TIME-### Delay INFO +++ Got incident DAQ_RUNNING: trigger auto transition to PAUSED after n seconds +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_tae_lzma_0000012345_0001.tae.xz [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_tae_lzma_0000012345_0001.tae.xz [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_tae_lzma_0000012345_0002.tae.xz [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_tae_lzma_0000012345_0002.tae.xz [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_tae_lzma_0000012345_0003.tae.xz [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_tae_lzma_0000012345_0003.tae.xz [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_tae_lzma_0000012345_0004.tae.xz [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_tae_lzma_0000012345_0004.tae.xz [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_tae_lzma_0000012345_0005.tae.xz [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_tae_lzma_0000012345_0005.tae.xz [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_tae_lzma_0000012345_0006.tae.xz [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_tae_lzma_0000012345_0006.tae.xz [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_tae_lzma_0000012345_0007.tae.xz [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_tae_lzma_0000012345_0007.tae.xz [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_tae_lzma_0000012345_0008.tae.xz [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_tae_lzma_0000012345_0008.tae.xz [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_tae_lzma_0000012345_0009.tae.xz [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_tae_lzma_0000012345_0009.tae.xz [0 MB] +####-##-## TIME-### Reader WARN Loaded file: ../data/df_output_tae_lzma_0000012345_0010.tae.xz [0 MB] +####-##-## TIME-### Reader WARN Opened ../data/df_output_tae_lzma_0000012345_0010.tae.xz [0 MB] +####-##-## TIME-### Reader INFO Locking event loop. Waiting for work.... +####-##-## TIME-### Delay INFO +++ Timeout expired: Invoke auto transition to PAUSED. +####-##-## TIME-### Summary INFO (pause) +++++ MBM buffer section sucessfully mapped. +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) ======================== MBM Bufer summary for buffer "Events_##ID##" ======================== +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) Buffer "Events_##ID##" Events: Produced:20 Seen:20 Pending:0 Max:50 +####-##-## TIME-### Summary INFO (pause) Space(kB):[Tot:999969 Free:999969] Users:[Tot:2 Max:30] +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) Name Partition Type State Produced %prod #seen %seen Reqs Buffer +####-##-## TIME-### Summary INFO (pause) ##ID##_StorageFileReader_0 feed P 20 100 +####-##-## TIME-### Summary INFO (pause) CONS_ONE feed C 20 100 1 Events_##ID## +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) ======================== MBM Bufer summary for buffer "Output_##ID##" ======================== +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) Buffer "Output_##ID##" Events: Produced:20 Seen:0 Pending:0 Max:50 +####-##-## TIME-### Summary INFO (pause) Space(kB):[Tot:99975 Free:99975] Users:[Tot:1 Max:15] +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) Name Partition Type State Produced %prod #seen %seen Reqs Buffer +####-##-## TIME-### Summary INFO (pause) PROD_ONE feed P 20 100 Output_##ID## +####-##-## TIME-### Summary INFO (pause) +####-##-## TIME-### Summary INFO (pause) +++++ MBM summary finished. +####-##-## TIME-### Reader INFO Executed cancellation of pending I/O requests. +####-##-## TIME-### MBM INFO Excluding from buffers. No more buffer access possible. ++++ MBMMON.log ++---------------------------------------------------------------------------------------------------- +| Buffer Manager summary [ (.*)_##ID## ] ++---------------------------------------------------------------------------------------------------- +| +++++ MBM buffer section sucessfully mapped. +| +| ======================== MBM Bufer summary for buffer "Events_##ID##" ======================== +| +| Buffer "Events_##ID##" Events: Produced:20 Seen:20 Pending:0 Max:50 +| Space(kB):[Tot:999969 Free:999969] Users:[Tot:2 Max:30] +| +| Name Partition Type State Produced %prod #seen %seen Reqs Buffer +| ##ID##_StorageFileReader_0 feed P #MBM# 20 100 +| CONS_ONE feed C 20 100 1 Events_##ID## +| +| ======================== MBM Bufer summary for buffer "Output_##ID##" ======================== +| +| Buffer "Output_##ID##" Events: Produced:20 Seen:0 Pending:0 Max:50 +| Space(kB):[Tot:99975 Free:99975] Users:[Tot:1 Max:15] +| +| Name Partition Type State Produced %prod #seen %seen Reqs Buffer +| PROD_ONE feed P 20 100 Output_##ID## +| +| +++++ MBM summary finished. ++---------------------------------------------------------------------------------------------------- diff --git a/Online/GaudiOnlineTests/tests/refs/DF/df_read_tae_lzma_stream.ref b/Online/GaudiOnlineTests/tests/refs/DF/df_read_tae_lzma_stream.ref new file mode 100644 index 000000000..d2f7e2a9c --- /dev/null +++ b/Online/GaudiOnlineTests/tests/refs/DF/df_read_tae_lzma_stream.ref @@ -0,0 +1,55 @@ ++++ Reader_0.log +####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ UTGID: ##ID##_Reader_0 +####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ DIM_DNS port:#### +####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ DIM_DNS node:localhost +####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ OPTIONS: GaudiOnlineTests.DF +####-##-## TIME-### ##ID##_Reader_0 [INFO] +++ TASK_TYPE: Reader +####-##-## TIME-### INFO: Loading library: libDataflowDict +####-##-## TIME-### INFO: Import Online namespace from ROOT +####-##-## TIME-### INFO: +++ Reader directory: ['../data'] file prefix: 'df_output_tae_lzma_0000' +####-##-## TIME-### INFO: +++ Setting up output logger component +####-##-## TIME-### INFO: +++ Starting dataflow process +####-##-## TIME-### Reader INFO Scanned directory ../data: 10 files. +####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_lzma_0000012345_0001.tae.xz +####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_lzma_0000012345_0002.tae.xz +####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_lzma_0000012345_0003.tae.xz +####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_lzma_0000012345_0004.tae.xz +####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_lzma_0000012345_0005.tae.xz +####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_lzma_0000012345_0006.tae.xz +####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_lzma_0000012345_0007.tae.xz +####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_lzma_0000012345_0008.tae.xz +####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_lzma_0000012345_0009.tae.xz +####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_lzma_0000012345_0010.tae.xz +####-##-## TIME-### Reader INFO Rescanning directory list..... +####-##-## TIME-### Reader INFO Scanning directory list disabled! +####-##-## TIME-### Reader INFO Scanning directory list disabled! +####-##-## TIME-### Reader INFO Scanning directory list disabled! +####-##-## TIME-### Reader INFO Sleeping before going to PAUSE.... +####-##-## TIME-### Reader INFO Executed cancellation of pending I/O requests. +####-##-## TIME-### Reader INFO Quitting... +####-##-## TIME-### MBM INFO Excluding from buffers. No more buffer access possible. ++++ MBMMON.log ++---------------------------------------------------------------------------------------------------- +| Buffer Manager summary [ (.*)_##ID## ] ++---------------------------------------------------------------------------------------------------- +| +++++ MBM buffer section sucessfully mapped. +| +| ======================== MBM Bufer summary for buffer "Events_##ID##" ======================== +| +| Buffer "Events_##ID##" Events: Produced:200 Seen:200 Pending:0 Max:50 +| Space(kB):[Tot:999969 Free:999969] Users:[Tot:2 Max:30] +| +| Name Partition Type State Produced %prod #seen %seen Reqs Buffer +| ##ID##_Reader_0 feed P #MBM# 200 100 +| CONS_ONE feed C 200 100 1 Events_##ID## +| +| ======================== MBM Bufer summary for buffer "Output_##ID##" ======================== +| +| Buffer "Output_##ID##" Events: Produced:200 Seen:0 Pending:0 Max:50 +| Space(kB):[Tot:99975 Free:99975] Users:[Tot:1 Max:15] +| +| Name Partition Type State Produced %prod #seen %seen Reqs Buffer +| PROD_ONE feed P 200 100 Output_##ID## +| +| +++++ MBM summary finished. ++---------------------------------------------------------------------------------------------------- diff --git a/Online/GaudiOnlineTests/tests/refs/DF/df_read_tae_zstd_stream.ref b/Online/GaudiOnlineTests/tests/refs/DF/df_read_tae_zstd_stream.ref index 1d7b422c0..432418229 100644 --- a/Online/GaudiOnlineTests/tests/refs/DF/df_read_tae_zstd_stream.ref +++ b/Online/GaudiOnlineTests/tests/refs/DF/df_read_tae_zstd_stream.ref @@ -11,25 +11,15 @@ ####-##-## TIME-### INFO: +++ Starting dataflow process ####-##-## TIME-### Reader INFO Scanned directory ../data: 10 files. ####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_zstd_0000012345_0001.tae.zst -####-##-## TIME-### RTL INFO ../data/df_output_tae_zstd_0000012345_0001.tae.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] ####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_zstd_0000012345_0002.tae.zst -####-##-## TIME-### RTL INFO ../data/df_output_tae_zstd_0000012345_0002.tae.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] ####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_zstd_0000012345_0003.tae.zst -####-##-## TIME-### RTL INFO ../data/df_output_tae_zstd_0000012345_0003.tae.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] ####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_zstd_0000012345_0004.tae.zst -####-##-## TIME-### RTL INFO ../data/df_output_tae_zstd_0000012345_0004.tae.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] ####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_zstd_0000012345_0005.tae.zst -####-##-## TIME-### RTL INFO ../data/df_output_tae_zstd_0000012345_0005.tae.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] ####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_zstd_0000012345_0006.tae.zst -####-##-## TIME-### RTL INFO ../data/df_output_tae_zstd_0000012345_0006.tae.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] ####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_zstd_0000012345_0007.tae.zst -####-##-## TIME-### RTL INFO ../data/df_output_tae_zstd_0000012345_0007.tae.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] ####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_zstd_0000012345_0008.tae.zst -####-##-## TIME-### RTL INFO ../data/df_output_tae_zstd_0000012345_0008.tae.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] ####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_zstd_0000012345_0009.tae.zst -####-##-## TIME-### RTL INFO ../data/df_output_tae_zstd_0000012345_0009.tae.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] ####-##-## TIME-### Reader INFO Opened file ../data/df_output_tae_zstd_0000012345_0010.tae.zst -####-##-## TIME-### RTL INFO ../data/df_output_tae_zstd_0000012345_0010.tae.zst: ZSTD FAILED to read 131075 bytes. [Read error: Success] ####-##-## TIME-### Reader INFO Rescanning directory list..... ####-##-## TIME-### Reader INFO Scanning directory list disabled! ####-##-## TIME-### Reader INFO Scanning directory list disabled! diff --git a/Online/GaudiOnlineTests/tests/refs/DF/df_write_mdf_lzma.ref b/Online/GaudiOnlineTests/tests/refs/DF/df_write_mdf_lzma.ref new file mode 100644 index 000000000..677dde365 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/refs/DF/df_write_mdf_lzma.ref @@ -0,0 +1,59 @@ +####-##-## TIME-### ##ID##_Writer_0 [INFO] +++ UTGID: ##ID##_Writer_0 +####-##-## TIME-### ##ID##_Writer_0 [INFO] +++ DIM_DNS port:#### +####-##-## TIME-### ##ID##_Writer_0 [INFO] +++ DIM_DNS node:localhost +####-##-## TIME-### ##ID##_Writer_0 [INFO] +++ OPTIONS: GaudiOnlineTests.DF +####-##-## TIME-### ##ID##_Writer_0 [INFO] +++ TASK_TYPE: Writer +####-##-## TIME-### INFO: Loading library: libDataflowDict +####-##-## TIME-### INFO: Import Online namespace from ROOT +####-##-## TIME-### INFO: +++ Writer output specs: ../data/df_output_mdf_lzma_${RUN}_${SEQ}.mdf.xz [max. 100 events per file] +####-##-## TIME-### INFO: +++ Setting up output logger component +####-##-## TIME-### INFO: +++ Starting dataflow process +####-##-## TIME-### Writer WARN Opened ../data/df_output_mdf_lzma_0000012345_0001.mdf.xz +####-##-## TIME-### Writer WARN Closed ../data/df_output_mdf_lzma_0000012345_0001.mdf.xz after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_mdf_lzma_0000012345_0002.mdf.xz +####-##-## TIME-### Writer WARN Closed ../data/df_output_mdf_lzma_0000012345_0002.mdf.xz after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_mdf_lzma_0000012345_0003.mdf.xz +####-##-## TIME-### Writer WARN Closed ../data/df_output_mdf_lzma_0000012345_0003.mdf.xz after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_mdf_lzma_0000012345_0004.mdf.xz +####-##-## TIME-### Writer WARN Closed ../data/df_output_mdf_lzma_0000012345_0004.mdf.xz after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_mdf_lzma_0000012345_0005.mdf.xz +####-##-## TIME-### Writer WARN Closed ../data/df_output_mdf_lzma_0000012345_0005.mdf.xz after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_mdf_lzma_0000012345_0006.mdf.xz +####-##-## TIME-### Writer WARN Closed ../data/df_output_mdf_lzma_0000012345_0006.mdf.xz after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_mdf_lzma_0000012345_0007.mdf.xz +####-##-## TIME-### Writer WARN Closed ../data/df_output_mdf_lzma_0000012345_0007.mdf.xz after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_mdf_lzma_0000012345_0008.mdf.xz +####-##-## TIME-### Writer WARN Closed ../data/df_output_mdf_lzma_0000012345_0008.mdf.xz after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_mdf_lzma_0000012345_0009.mdf.xz +####-##-## TIME-### Writer WARN Closed ../data/df_output_mdf_lzma_0000012345_0009.mdf.xz after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_mdf_lzma_0000012345_0010.mdf.xz +####-##-## TIME-### Writer WARN Closed ../data/df_output_mdf_lzma_0000012345_0010.mdf.xz after 0 MB [event-limit]. +####-##-## TIME-### Writer INFO +---------------------------------------------------------------- +####-##-## TIME-### Writer INFO | Overview of monitoring items: +####-##-## TIME-### Writer INFO | Number of events written to output: 1000 +####-##-## TIME-### Writer INFO | Number of events not written and dropped: 0 +####-##-## TIME-### Writer INFO | Number of bursts submitted to output: 1000 +####-##-## TIME-### Writer INFO | Number of files opened to write output: 10 +####-##-## TIME-### Writer INFO | Number of files closed to write output: 10 +####-##-## TIME-### Writer INFO | Number of writte errors: 0 +####-##-## TIME-### Writer INFO | Number of bytes written to output: 608000 +####-##-## TIME-### Writer INFO | Number of bytes non-compressed written to output: 608000 +####-##-## TIME-### Writer INFO | Number of bytes dropped from output: 0 +####-##-## TIME-### Writer INFO | Number of events with a bad header structure: 0 +####-##-## TIME-### Writer INFO | Number of currently active buffers: 0 +####-##-## TIME-### Writer INFO | Number of current todo buffers: 0 +####-##-## TIME-### Writer INFO | Number of currently free buffers: 0 +####-##-## TIME-### Writer INFO +---------------------------------------------------------------- +####-##-## TIME-### Writer INFO | Files successfully written: +####-##-## TIME-### Writer INFO +---------------------------------------------------------------- +####-##-## TIME-### Writer INFO | ../data/df_output_mdf_lzma_0000012345_0001.mdf.xz Run: 12345 ####-##-## TIME-### 100 events 60800 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_mdf_lzma_0000012345_0002.mdf.xz Run: 12345 ####-##-## TIME-### 100 events 60800 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_mdf_lzma_0000012345_0003.mdf.xz Run: 12345 ####-##-## TIME-### 100 events 60800 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_mdf_lzma_0000012345_0004.mdf.xz Run: 12345 ####-##-## TIME-### 100 events 60800 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_mdf_lzma_0000012345_0005.mdf.xz Run: 12345 ####-##-## TIME-### 100 events 60800 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_mdf_lzma_0000012345_0006.mdf.xz Run: 12345 ####-##-## TIME-### 100 events 60800 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_mdf_lzma_0000012345_0007.mdf.xz Run: 12345 ####-##-## TIME-### 100 events 60800 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_mdf_lzma_0000012345_0008.mdf.xz Run: 12345 ####-##-## TIME-### 100 events 60800 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_mdf_lzma_0000012345_0009.mdf.xz Run: 12345 ####-##-## TIME-### 100 events 60800 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_mdf_lzma_0000012345_0010.mdf.xz Run: 12345 ####-##-## TIME-### 100 events 60800 bytes +####-##-## TIME-### Writer INFO +---------------------------------------------------------------- diff --git a/Online/GaudiOnlineTests/tests/refs/DF/df_write_tae_lzma.ref b/Online/GaudiOnlineTests/tests/refs/DF/df_write_tae_lzma.ref new file mode 100644 index 000000000..f0be88047 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/refs/DF/df_write_tae_lzma.ref @@ -0,0 +1,59 @@ +####-##-## TIME-### ##ID##_Writer_0 [INFO] +++ UTGID: ##ID##_Writer_0 +####-##-## TIME-### ##ID##_Writer_0 [INFO] +++ DIM_DNS port:#### +####-##-## TIME-### ##ID##_Writer_0 [INFO] +++ DIM_DNS node:localhost +####-##-## TIME-### ##ID##_Writer_0 [INFO] +++ OPTIONS: GaudiOnlineTests.DF +####-##-## TIME-### ##ID##_Writer_0 [INFO] +++ TASK_TYPE: Writer +####-##-## TIME-### INFO: Loading library: libDataflowDict +####-##-## TIME-### INFO: Import Online namespace from ROOT +####-##-## TIME-### INFO: +++ Writer output specs: ../data/df_output_tae_lzma_${RUN}_${SEQ}.tae.xz [max. 20 events per file] +####-##-## TIME-### INFO: +++ Setting up output logger component +####-##-## TIME-### INFO: +++ Starting dataflow process +####-##-## TIME-### Writer WARN Opened ../data/df_output_tae_lzma_0000012345_0001.tae.xz +####-##-## TIME-### Writer WARN Closed ../data/df_output_tae_lzma_0000012345_0001.tae.xz after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_tae_lzma_0000012345_0002.tae.xz +####-##-## TIME-### Writer WARN Closed ../data/df_output_tae_lzma_0000012345_0002.tae.xz after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_tae_lzma_0000012345_0003.tae.xz +####-##-## TIME-### Writer WARN Closed ../data/df_output_tae_lzma_0000012345_0003.tae.xz after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_tae_lzma_0000012345_0004.tae.xz +####-##-## TIME-### Writer WARN Closed ../data/df_output_tae_lzma_0000012345_0004.tae.xz after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_tae_lzma_0000012345_0005.tae.xz +####-##-## TIME-### Writer WARN Closed ../data/df_output_tae_lzma_0000012345_0005.tae.xz after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_tae_lzma_0000012345_0006.tae.xz +####-##-## TIME-### Writer WARN Closed ../data/df_output_tae_lzma_0000012345_0006.tae.xz after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_tae_lzma_0000012345_0007.tae.xz +####-##-## TIME-### Writer WARN Closed ../data/df_output_tae_lzma_0000012345_0007.tae.xz after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_tae_lzma_0000012345_0008.tae.xz +####-##-## TIME-### Writer WARN Closed ../data/df_output_tae_lzma_0000012345_0008.tae.xz after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_tae_lzma_0000012345_0009.tae.xz +####-##-## TIME-### Writer WARN Closed ../data/df_output_tae_lzma_0000012345_0009.tae.xz after 0 MB [event-limit]. +####-##-## TIME-### Writer WARN Opened ../data/df_output_tae_lzma_0000012345_0010.tae.xz +####-##-## TIME-### Writer WARN Closed ../data/df_output_tae_lzma_0000012345_0010.tae.xz after 0 MB [event-limit]. +####-##-## TIME-### Writer INFO +---------------------------------------------------------------- +####-##-## TIME-### Writer INFO | Overview of monitoring items: +####-##-## TIME-### Writer INFO | Number of events written to output: 200 +####-##-## TIME-### Writer INFO | Number of events not written and dropped: 0 +####-##-## TIME-### Writer INFO | Number of bursts submitted to output: 200 +####-##-## TIME-### Writer INFO | Number of files opened to write output: 10 +####-##-## TIME-### Writer INFO | Number of files closed to write output: 10 +####-##-## TIME-### Writer INFO | Number of writte errors: 0 +####-##-## TIME-### Writer INFO | Number of bytes written to output: 1727200 +####-##-## TIME-### Writer INFO | Number of bytes non-compressed written to output: 1727200 +####-##-## TIME-### Writer INFO | Number of bytes dropped from output: 0 +####-##-## TIME-### Writer INFO | Number of events with a bad header structure: 0 +####-##-## TIME-### Writer INFO | Number of currently active buffers: 0 +####-##-## TIME-### Writer INFO | Number of current todo buffers: 0 +####-##-## TIME-### Writer INFO | Number of currently free buffers: 0 +####-##-## TIME-### Writer INFO +---------------------------------------------------------------- +####-##-## TIME-### Writer INFO | Files successfully written: +####-##-## TIME-### Writer INFO +---------------------------------------------------------------- +####-##-## TIME-### Writer INFO | ../data/df_output_tae_lzma_0000012345_0001.tae.xz Run: 12345 ####-##-## TIME-### 20 events 172720 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_tae_lzma_0000012345_0002.tae.xz Run: 12345 ####-##-## TIME-### 20 events 172720 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_tae_lzma_0000012345_0003.tae.xz Run: 12345 ####-##-## TIME-### 20 events 172720 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_tae_lzma_0000012345_0004.tae.xz Run: 12345 ####-##-## TIME-### 20 events 172720 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_tae_lzma_0000012345_0005.tae.xz Run: 12345 ####-##-## TIME-### 20 events 172720 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_tae_lzma_0000012345_0006.tae.xz Run: 12345 ####-##-## TIME-### 20 events 172720 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_tae_lzma_0000012345_0007.tae.xz Run: 12345 ####-##-## TIME-### 20 events 172720 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_tae_lzma_0000012345_0008.tae.xz Run: 12345 ####-##-## TIME-### 20 events 172720 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_tae_lzma_0000012345_0009.tae.xz Run: 12345 ####-##-## TIME-### 20 events 172720 bytes +####-##-## TIME-### Writer INFO | ../data/df_output_tae_lzma_0000012345_0010.tae.xz Run: 12345 ####-##-## TIME-### 20 events 172720 bytes +####-##-## TIME-### Writer INFO +---------------------------------------------------------------- diff --git a/Online/OnlineBase/CMakeLists.txt b/Online/OnlineBase/CMakeLists.txt index b586727ad..75952fe32 100755 --- a/Online/OnlineBase/CMakeLists.txt +++ b/Online/OnlineBase/CMakeLists.txt @@ -166,7 +166,7 @@ online_library(OnlineCompress src/COMPRESS/posix_lzma.cpp src/COMPRESS/posix_lz4.cpp) # -target_link_libraries(OnlineCompress PRIVATE Online::OnlineBase ROOT::Core -lz -llzma) +target_link_libraries(OnlineCompress PRIVATE Online::OnlineBase ROOT::Core -lz -llzma -llz4) # find_path(ZLIB_HEADER_DIR NAMES gzip.h HINTS $ENV{LBENV_PREFIX}/include) if( NOT "${ZLIB_HEADER_DIR}" STREQUAL "" ) diff --git a/Online/OnlineBase/include/RTL/Compress.h b/Online/OnlineBase/include/RTL/Compress.h index 22a8effd9..46692ee30 100644 --- a/Online/OnlineBase/include/RTL/Compress.h +++ b/Online/OnlineBase/include/RTL/Compress.h @@ -39,6 +39,24 @@ namespace Online { /// Inflate the response using zstd std::vector<unsigned char> decompress_zstd(std::vector<unsigned char>&& data); + /// Inflate the response using lzma + std::vector<unsigned char> decompress_lzma(const unsigned char* data, std::size_t len); + + /// Inflate the response using lzma + std::vector<unsigned char> decompress_lzma(const std::vector<unsigned char>& data); + + /// Inflate the response using lzma + std::vector<unsigned char> decompress_lzma(std::vector<unsigned char>&& data); + + /// Inflate the response using lz4 + std::vector<unsigned char> decompress_lz4(const unsigned char* data, std::size_t len); + + /// Inflate the response using lz4 + std::vector<unsigned char> decompress_lz4(const std::vector<unsigned char>& data); + + /// Inflate the response using lz4 + std::vector<unsigned char> decompress_lz4(std::vector<unsigned char>&& data); + /// Inflate the response using gzip std::vector<unsigned char> decompress_gzip(const std::vector<unsigned char>& data); diff --git a/Online/OnlineBase/include/RTL/posix.h b/Online/OnlineBase/include/RTL/posix.h index 977c27b4d..b39cdae93 100644 --- a/Online/OnlineBase/include/RTL/posix.h +++ b/Online/OnlineBase/include/RTL/posix.h @@ -92,6 +92,9 @@ namespace Online { int ( *closedir)( DIR* dirp ) = nullptr; dirent* ( *readdir )( DIR* dirp ) = nullptr; }; + + /// ------------------------------------------------------------------------------ + std::size_t posix_print(int debug, const char* format, ...); } // End namespace Online /// Descriptor for the POSIX file system diff --git a/Online/OnlineBase/src/COMPRESS/Compress.cpp b/Online/OnlineBase/src/COMPRESS/Compress.cpp index c7c14ea7a..7c41d8020 100644 --- a/Online/OnlineBase/src/COMPRESS/Compress.cpp +++ b/Online/OnlineBase/src/COMPRESS/Compress.cpp @@ -36,6 +36,75 @@ namespace { } } +#if defined(ONLINE_HAVE_LZ4) +#include <lz4.h> + +/// Namespace for the Online utilities +namespace Online { + /// Namespace for the Online compression utilities + namespace compress { + std::vector<uint8_t> decompress_lz4(const uint8_t*, std::size_t) { + return {}; + } + } // End namespace compress +} // End namespace Online +#else +namespace Online { namespace compress { + std::vector<uint8_t> decompress_lz4(const uint8_t*, std::size_t) { return {}; } + }} +#endif + + +#if defined(ONLINE_HAVE_LZMA) +#include <lzma.h> + +/// Namespace for the Online utilities +namespace Online { + /// Namespace for the Online compression utilities + namespace compress { + + /// Inflate the response using lzma + std::vector<uint8_t> decompress_lzma(const uint8_t* data, std::size_t len) { + std::vector<uint8_t> buffer, result; + // Initialize the encoder using the custom filter chain. + lzma_stream strm = LZMA_STREAM_INIT; + auto ret = ::lzma_stream_decoder(&strm, UINT64_MAX, LZMA_CONCATENATED); + if ( ret != LZMA_OK ) { + ::lib_rtl_output(LIB_RTL_ERROR, "LZMA: Failed to initialize decoder. [%08X]", ret); + return {}; + } + buffer.resize(4*1024*1024); + for( strm.avail_in = len, strm.next_in = data; strm.avail_in > 0; ) { + int64_t out_now = strm.total_out; + strm.next_out = &buffer.at(0); + strm.avail_out = buffer.size(); + ::lzma_ret ret = ::lzma_code(&strm, LZMA_RUN); + if ( !(ret == LZMA_OK || ret == LZMA_STREAM_END) ) { + std::string msg = std::make_error_code(std::errc(errno=EINVAL)).message(); + ::lib_rtl_output(LIB_RTL_ERROR, "LZMA: Failed to decode frame with %ld bytes [%s]", + len, msg.c_str()); + return {}; + } + else if ( strm.avail_out == 0 ) { + ::lib_rtl_output(LIB_RTL_ERROR, + "LZMA: Output buffer overflow. Stop coding after %ld out of %ld bytes.", + len-strm.avail_in, len); + return {}; + } + out_now = strm.total_out - out_now; + std::copy(&buffer.at(0), &buffer.at(0)+out_now, std::back_inserter(result)); + } + ::lzma_end(&strm); + return result; + } + } // End namespace compress +} // End namespace Online +#else +namespace Online { namespace compress { + std::vector<uint8_t> decompress_zstd(const uint8_t*, std::size_t) { return {}; } + }} +#endif + #if defined(ONLINE_HAVE_ZSTD) #include <zstd.h> @@ -167,6 +236,25 @@ namespace Online { return decompress_zstd(&data[0], data.size()); } + /// Inflate the response using lzma + std::vector<uint8_t> decompress_lzma(const std::vector<uint8_t>& data) { + return decompress_lzma(&data[0], data.size()); + } + + /// Inflate the response using lzma + std::vector<uint8_t> decompress_lzma(std::vector<uint8_t>&& data) { + return decompress_lzma(&data[0], data.size()); + } + + /// Inflate the response using lz4 + std::vector<uint8_t> decompress_lz4(const std::vector<uint8_t>& data) { + return decompress_lz4(&data[0], data.size()); + } + + /// Inflate the response using lz4 + std::vector<uint8_t> decompress_lz4(std::vector<uint8_t>&& data) { + return decompress_lz4(&data[0], data.size()); + } #define CHUNK 0x4000 @@ -323,6 +411,8 @@ namespace Online { return decompress_zstream(false, data, data_len); else if ( encoding.find("zstd") != std::string::npos ) return decompress_zstd(data, data_len); + else if ( encoding.find("lz4") != std::string::npos ) + return decompress_lz4(data, data_len); else if ( encoding.find("identity") != std::string::npos ) goto Default; } diff --git a/Online/OnlineBase/src/COMPRESS/posix_gzip.cpp b/Online/OnlineBase/src/COMPRESS/posix_gzip.cpp index 247d10570..019fada43 100644 --- a/Online/OnlineBase/src/COMPRESS/posix_gzip.cpp +++ b/Online/OnlineBase/src/COMPRESS/posix_gzip.cpp @@ -112,7 +112,7 @@ namespace { } return 0; } - else if ( opt.substr(0,6) == "buffer" ) { + else if ( opt.substr(0,9) == "read_size" || opt.substr(0,10) == "write_size" ) { char* end = 0; int len = ::strtol(value, &end, 10); if ( len >= 0 ) diff --git a/Online/OnlineBase/src/COMPRESS/posix_lz4.cpp b/Online/OnlineBase/src/COMPRESS/posix_lz4.cpp index d21668dd6..876f430ca 100644 --- a/Online/OnlineBase/src/COMPRESS/posix_lz4.cpp +++ b/Online/OnlineBase/src/COMPRESS/posix_lz4.cpp @@ -13,44 +13,71 @@ // Author : M.Frank // //========================================================================== + /// Framework include files +#include <RTL/rtl.h> #include <RTL/posix.h> #include <RTL/strdef.h> +#include <CPP/mem_buff.h> /// C/C++ include files -#include <cstdio> -#include <cstring> -#include <sys/stat.h> -#include <sys/types.h> -#include <fcntl.h> - #include <map> -#include <cctype> -#include <iostream> - -#include <unistd.h> +#include <vector> +#include <fcntl.h> #define O_BINARY 0 #if defined(ONLINE_HAVE_LZ4) -#include <zlib.h> +#include <lzma.h> +#include <lz4.h> +#include <lz4frame.h> + +#define POSIX_DEBUG 1 /// -------------------------------------------------------------------------------- namespace { + + static int s_debug = 0; + + constexpr static std::size_t MBYTE = 1024*1024; + constexpr static std::size_t DEFAULT_BUFFER_SIZE = 3*MBYTE; + constexpr static std::size_t DEFAULT_WRITE_SIZE = 5*MBYTE; + class descriptor { public: - int fd { 0 }; - FILE* file { nullptr }; - gzFile gz_file { nullptr }; - std::string path { }; - int level { Z_DEFAULT_COMPRESSION }; - int strategy { Z_DEFAULT_STRATEGY }; - int applied { 0 }; - descriptor(int f, gzFile gz, const std::string& p) : fd(f), gz_file(gz), path(p) {} + using mem_buff_t = Online::mem_buff; + int32_t fd { 0 }; + int32_t flags { 0 }; + int32_t applied { 0 }; + int32_t debug { s_debug }; + int32_t level { 0 }; + uint32_t autoflush { 0 }; + std::string path { }; + std::size_t in_size { DEFAULT_BUFFER_SIZE }; + std::size_t out_limit { DEFAULT_WRITE_SIZE }; + std::size_t tmp_size { DEFAULT_BUFFER_SIZE }; + std::size_t position { 0 }; + std::size_t out_disk { 0 }; + std::size_t out_total { 0 }; + std::size_t in_total { 0 }; + + mem_buff_t in_buffer { }; + mem_buff_t out_buffer { }; + mem_buff_t tmp_buffer { }; + + LZ4F_cctx* compress { nullptr }; + LZ4F_dctx* decompress { nullptr }; + + public: + /// Default constructor descriptor() = default; + /// Move constructor descriptor(descriptor&& copy) = default; - descriptor(const descriptor& copy) = delete; + /// Move assignemtn descriptor& operator=(descriptor&& copy) = default; + /// Disable copy constructor + descriptor(const descriptor& copy) = delete; + /// Disable copy assignment descriptor& operator=(const descriptor& copy) = delete; }; @@ -59,274 +86,449 @@ namespace { /// ------------------------------------------------------------------------------ FileMap& fileMap() { - static FileMap s_fileMap; - return s_fileMap; + static std::unique_ptr<FileMap> s_fileMap = std::make_unique<FileMap>(); + return *(s_fileMap.get()); } - + /// ------------------------------------------------------------------------------ + descriptor* lzma_descriptor(int fd) { + auto i = fileMap().find( fd ); + if ( i != fileMap().end() ) { + return &i->second; + } + errno = EBADF; + return nullptr; + } + /// ------------------------------------------------------------------------------ + auto posix_lz4_print = Online::posix_print; + std::pair<bool, int> parse_digit(const std::string& val) { + if ( !::isdigit(val[0]) ) { + errno = EINVAL; + return { false, 0 }; + } + return { true, int(val[0]) - int('0') }; + } +#if 0 + /// ------------------------------------------------------------------------------ + std::size_t get_block_size(const LZ4F_frameInfo_t* info) { + switch (info->blockSizeID) { + case LZ4F_default: + case LZ4F_max64KB: return 1 << 16; + case LZ4F_max256KB: return 1 << 18; + case LZ4F_max1MB: return 1 << 20; + case LZ4F_max4MB: return 1 << 22; + default: + printf("Impossible with expected frame specification (<=v1.6.1)\n"); + exit(1); + } + } +#endif /// ------------------------------------------------------------------------------ int posix_lz4_set_option(int fd, const char* option, const char* value) { + errno = EINVAL; if ( option ) { - std::string path; std::string opt = RTL::str_lower(option); + if ( opt == "global_debug" ) { + std::string val = RTL::str_lower(value); + if ( val[0] == 'y' ) s_debug = 1; // yes + else if ( val[0] == 't' ) s_debug = 1; // true + else if ( val[0] == 'n' ) s_debug = 0; // no + else if ( val[0] == 'f' ) s_debug = 0; // false + return 0; + } + auto i = fileMap().find( fd ); - if ( i != fileMap().end() ) { - path = i->second.path; - if ( opt.substr(0,4) == "strat" ) { + if ( i != fileMap().end() ) { + auto& desc = i->second; + if ( opt.substr(0,5) == "debug" ) { std::string val = RTL::str_lower(value); - if ( val == "default" ) - i->second.strategy = Z_DEFAULT_STRATEGY; - else if ( val == "fixed" ) - i->second.strategy = Z_FIXED; - else if ( val == "huffman" ) - i->second.strategy = Z_HUFFMAN_ONLY; - else if ( val == "rle" ) - i->second.strategy = Z_RLE; - else if ( val == "filtered" ) - i->second.strategy = Z_FILTERED; - else - goto Error; + if ( val[0] == 'y' ) desc.debug = 1; // yes + else if ( val[0] == 't' ) desc.debug = 1; // true + else if ( val[0] == 'n' ) desc.debug = 0; // no + else if ( val[0] == 'f' ) desc.debug = 0; // false return 0; } else if ( opt.substr(0,8) == "compress" ) { std::string val = RTL::str_lower(value); - if ( val == "none" ) - i->second.level = Z_NO_COMPRESSION; - else if ( val == "best" ) - i->second.level = Z_BEST_COMPRESSION; - else if ( val == "fast" ) - i->second.level = Z_BEST_SPEED; - else if ( val == "default" ) - i->second.level = Z_DEFAULT_COMPRESSION; - else { - char* end = 0; - int level = ::strtol(val.c_str(), &end, 10); - if ( level >= 0 && level <= 9 ) - i->second.level = level; - else - goto Error; - } + if ( val == "none" ) val = "0"; + else if ( val == "default" ) val = "5"; + else if ( val == "high" ) val = "9e"; + auto comp = parse_digit(val); + if ( !comp.first ) goto Error; + desc.level = comp.second; return 0; } - else if ( opt.substr(0, 5) == "apply" ) { - gzFile lz4 = i->second.gz_file; - int ret = ::gzsetparams(lz4, i->second.level, i->second.strategy); - if ( ret == Z_OK ) { - i->second.applied = 1; - return 0; - } + else if ( opt.substr(0,6) == "read_size" ) { + int64_t len = ::atol(RTL::str_lower(value).c_str()); + if ( 0 == len ) goto Error; + desc.in_size = len; + return 0; } + else if ( opt.substr(0,6) == "write_size" ) { + int64_t len = ::atol(RTL::str_lower(value).c_str()); + if ( 0 == len ) goto Error; + desc.out_limit = len; + return 0; + } + return 0; } + errno = EBADF; } Error: -#if POSIX_DEBUG - std::cout << fd << " +++ FAILED to set option " << option << " = " << value << std::endl; -#endif + posix_lz4_print(LIB_RTL_ERROR, "LZMA error: FAILED to set option %s = %s", option, value); return -1; } /// ------------------------------------------------------------------------------ - int posix_lz4_attach(int fd, const char* path, int flags) { - gzFile lz4 = nullptr; + int posix_lz4_attach(descriptor& desc) { + const char* path = desc.path.c_str(); + int flags = desc.flags; + + if ( (flags & (O_WRONLY|O_CREAT)) != 0 ) { - lz4 = ::gzdopen(fd, "w"); - } - else if ( (flags & O_APPEND) != 0 ) { - lz4 = ::gzdopen(fd, "a"); + LZ4F_preferences_t prefs = { + { LZ4F_max256KB, + LZ4F_blockLinked, + LZ4F_noContentChecksum, + LZ4F_frame, + 0 /* unknown content size */, + 0 /* no dictID */ , + LZ4F_noBlockChecksum }, + desc.level, /* compression level; 0 == default */ + desc.autoflush, /* autoflush */ + 0, /* favor decompression speed */ + { 0, 0, 0 }, /* reserved, must be set to 0 */ + }; + desc.tmp_size = ::LZ4F_compressBound(desc.in_size, &prefs); // large enough for any input <= desc.in_size + std::size_t ctx = ::LZ4F_createCompressionContext(&desc.compress, LZ4F_VERSION); + if ( LZ4F_isError(ctx) ) { + posix_lz4_print(desc.debug, "%s: LZ4: FAILED to initialize compressor: %ld", path, ctx); + return -1; + } + if ( desc.in_size < LZ4F_HEADER_SIZE_MAX ) desc.in_size = LZ4F_HEADER_SIZE_MAX; + desc.in_buffer.allocate(desc.in_size); + desc.tmp_buffer.allocate(desc.tmp_size); + desc.out_buffer.allocate(desc.out_limit); + // write frame header + uint64_t status = ::LZ4F_compressBegin(desc.compress, + desc.tmp_buffer.begin(), + desc.tmp_buffer.length(), + &prefs); + if ( ::LZ4F_isError(status) ) { + posix_lz4_print(desc.debug, "%s: LZ4: Failed to start compression: error %ld [%s]", + path, status, ::LZ4F_getErrorName(status)); + return -1; + } + posix_lz4_print(desc.debug, "%s: LZ4: Buffer size is %ld bytes, header size %u bytes", + path, desc.tmp_buffer.length(), status); + desc.out_buffer.append(desc.tmp_buffer.begin(), status); } else if ( flags == O_RDONLY || flags == ( O_BINARY | O_RDONLY ) ) { - lz4 = ::gzdopen(fd, "r"); + uint64_t status = ::LZ4F_createDecompressionContext(&desc.decompress, LZ4F_VERSION); + if ( ::LZ4F_isError(status) ) { + posix_lz4_print(desc.debug, "%s: LZ4: LZ4F_dctx creation error: %s\n", path, ::LZ4F_getErrorName(status)); + } + LZ4F_frameInfo_t info; + desc.in_buffer.allocate(desc.in_size); + desc.tmp_buffer.allocate(desc.tmp_size); + desc.out_buffer.allocate(desc.out_limit); + size_t consumedSize = desc.tmp_size; + status = ::LZ4F_getFrameInfo(desc.decompress, &info, desc.in_buffer.begin(), &consumedSize); + if ( ::LZ4F_isError(status) ) { + posix_lz4_print(desc.debug, "%s: LZ4: LZ4F_getFrameInfo error: %s\n", path, ::LZ4F_getErrorName(status)); + return -1; + } } - if ( lz4 == nullptr ) { - ::close(fd); -#if POSIX_DEBUG - std::cout << fd << " +++ FAILED to attach lz4 file." << path << std::endl; -#endif - return -1; + desc.applied = 1; + posix_lz4_print(desc.debug, "%s: LZ4: Successfully opened file.", path); + return 0; + } + /// ------------------------------------------------------------------------------ + int posix_lz4_flush_buffer(descriptor& desc, std::size_t max_len) { + if ( desc.out_buffer.used() >= max_len ) { + int64_t ret = posix_t::write_disk(desc.fd, desc.out_buffer.begin(), desc.out_buffer.used()); + if ( ret < 0 ) { + std::string msg = std::make_error_code(std::errc(errno)).message(); + posix_lz4_print(desc.debug, + "%s: LZ4: FAILED to write %ld bytes to file [%s]", + desc.path.c_str(), desc.out_buffer.used(), msg.c_str()); + return -1; + } + desc.out_disk += desc.out_buffer.used(); + posix_lz4_print(desc.debug, "%s: LZ4: Wrote %ld bytes to disk. [Total: %ld bytes]", + desc.path.c_str(), desc.out_buffer.used(), desc.out_disk); + desc.out_buffer.set_cursor(0); + desc.position = 0; } - int fd_desc = posix_t::new_fd(); - fileMap()[fd_desc] = ::descriptor(fd, lz4, path); -#if POSIX_DEBUG - std::cout << fd_desc << " +++ Opened lz4 file: " << path << std::endl; -#endif - return fd_desc; + return 0; } /// ------------------------------------------------------------------------------ - int posix_lz4_open( const char* path, int flags, ... ) { - int mode = 0; - int fd = -1; - if (flags & O_CREAT) { - va_list arg; - va_start(arg, flags); - mode = va_arg(arg, int); - va_end(arg); - fd = ::open(path, flags, mode); + long posix_lz4_write_buffer(descriptor& desc, uint8_t* buff, std::size_t len) { + for(long done=0, out_now=0, todo=len; done < todo; done += out_now ) { + out_now = std::min(desc.tmp_size, len-done); + std::size_t status = ::LZ4F_compressUpdate(desc.compress, + desc.tmp_buffer.begin(), + desc.tmp_buffer.length(), + buff + done, + out_now, + nullptr); + if ( ::LZ4F_isError(status) ) { + posix_lz4_print(desc.debug, "%s: LZ4: Compression failed: error %X [%s]", + desc.path.c_str(), status, ::LZ4F_getErrorName(status)); + return -1; + } + if ( out_now+desc.out_buffer.used() > desc.out_limit ) { + ::posix_lz4_flush_buffer(desc, desc.out_buffer.used()); + } + if ( out_now > 0 ) { + desc.out_total += out_now; + desc.out_buffer.append(desc.tmp_buffer.begin(), out_now); + } + } + posix_lz4_print(desc.debug, + "%s: LZ4: Encoded %ld bytes. Total in:%9ld compressed:%9ld disk:%9ld bytes", + desc.path.c_str(), len, desc.in_total, desc.out_total, desc.out_disk); + return int64_t(len); + } + /// ------------------------------------------------------------------------------ + ssize_t posix_lz4_write( int fd, const void* ptr, size_t size ) { + auto i = fileMap().find(fd); + if ( i != fileMap().end() ) { + auto& desc = i->second; + if ( !desc.applied ) { + if ( posix_lz4_attach(desc) < 0 ) { + return -1; + } + } + uint8_t *pointer = (uint8_t*)ptr; + uint8_t *start = desc.in_buffer.begin()+desc.position; + uint64_t in_length = desc.in_buffer.length(); + uint64_t in_space = in_length - desc.position; + uint64_t in_miss = 0; + if ( in_space >= size ) { + ::memcpy(start, pointer, size); + desc.position += size; + desc.in_total += size; + return size; + } + else if ( in_space > 0 ) { + ::memcpy(start, pointer, in_space); + in_miss = size - in_space; + desc.position += in_space; + desc.in_total += in_space; + pointer += in_space; + } + else { + in_miss = size; + } + int64_t written = posix_lz4_write_buffer(desc, desc.in_buffer.begin(), in_length); + if ( int64_t(in_length) != written ) { + desc.position = 0; + return -1; + } + desc.position = 0; + posix_lz4_print(desc.debug, + "%s: LZ4: Wrote buffer of %8ld bytes. [raw: %9ld encoded: %9ld buffered: %9ld disk: %9ld bytes]", + desc.path.c_str(), in_length, desc.in_total, desc.out_total, + desc.out_buffer.used(), desc.out_disk); + return in_space + posix_lz4_write(fd, pointer+in_space, in_miss); } - else { - fd = ::open(path, flags); + std::string msg = std::make_error_code(std::errc(errno=EBADF)).message(); + posix_lz4_print(s_debug, "%05X: LZ4: FAILED to write %ld bytes [%s]", + fd, size, msg.c_str()); + return -1; + } + /// ------------------------------------------------------------------------------ + ssize_t posix_lz4_read( int fd, void* ptr, size_t size ) { + auto i = fileMap().find( fd ); + if ( i != fileMap().end() ) { + auto& desc = i->second; + const char* path = desc.path.c_str(); + if ( !desc.applied ) { + if ( posix_lz4_attach(desc) < 0 ) { + return -1; + } + } + uint8_t *pointer = (uint8_t*)ptr; + uint8_t *start = desc.out_buffer.begin() + desc.position; + uint64_t out_left = desc.out_buffer.used() - desc.position; + int64_t out_miss = 0; + if ( out_left >= size ) { + ::memcpy(pointer, start, size); + desc.position += size; + return size; + } + else if ( out_left > 0 ) { + ::memcpy(pointer, start, out_left); + out_miss = size - out_left; + desc.position += out_left; + pointer += out_left; + } + else { + out_miss = size; + } + // We have to freshly populate the output buffer from zero position + if ( desc.out_buffer.length() < size ) { + desc.out_buffer.allocate(size*1.5); + } + /// + /// Now fill new decompressed buffer: + desc.position = 0; + desc.out_buffer.set_cursor(0); + for (int64_t len=0; len < out_miss; ) { + int64_t total = posix_t::read_disk(desc.fd, desc.in_buffer.begin(), desc.in_size); + if ( total <= 0 ) { + std::string msg = std::make_error_code(std::errc(errno)).message(); + posix_lz4_print(desc.debug, + "%s: LZMA FAILED to read %ld bytes. [Read error: %s]", + path, size, msg.c_str()); + return (out_left > 0) ? out_left : -1; + } + else if ( total < int64_t(desc.in_size) ) { + posix_lz4_print(desc.debug, "%s: LZMA LIMITED read %ld bytes. [End-of-file]", path, size); + } + if ( total <= 0 ) { + return out_left > 0 ? out_left : -1; + } + posix_lz4_print(desc.debug, "%s: LZ4: read %ld bytes from disk.", path, total); + for(uint8_t *next=desc.in_buffer.begin(), *end = next + total; next < end; ) { + uint64_t in_size = end - next; + uint64_t out_size = desc.tmp_buffer.length(); + uint64_t ret = ::LZ4F_decompress(desc.decompress, + desc.tmp_buffer.begin(), + &out_size, + next, + &in_size, + /* LZ4F_decompressOptions_t */ nullptr); + if ( ::LZ4F_isError(ret) ) { + printf("Decompression error: %s\n", LZ4F_getErrorName(ret)); + return (out_left > 0) ? out_left : -1; + } + next += in_size; + if ( out_size > 0 ) { + desc.out_buffer.append(desc.tmp_buffer.begin(), out_size); + } + } + len = desc.out_buffer.used(); + } + return out_left + posix_lz4_read(fd, pointer, out_miss); } + std::string msg = std::make_error_code(std::errc(errno=EBADF)).message(); + posix_lz4_print(s_debug, "%05X: LZMA FAILED to read %ld bytes. [%s]", fd, size, msg.c_str()); + return -1; + } + /// ------------------------------------------------------------------------------ + int posix_lz4_open( const char* path, int flags, ... ) { + va_list args; + va_start(args, flags); + int fd = posix_t::open_disk(path, flags, args); if ( fd == -1 ) { -#if POSIX_DEBUG - std::cout << desc << " +++ FAILED to open lz4 file: " << path << std::endl; -#endif + std::string msg = std::make_error_code(std::errc(errno)).message(); + posix_lz4_print(LIB_RTL_ERROR, "%s: LZ4: FAILED to open file [%s]", path, msg.c_str()); return -1; } - return posix_lz4_attach(fd, path, flags); + descriptor desc; + int new_fd = posix_t::new_fd(); + desc.fd = fd; + desc.path = path; + desc.flags = flags; + fileMap()[new_fd] = std::move(desc); + posix_lz4_print(desc.debug, "%s: LZ4: Opened file fd: %d", path, new_fd); + return new_fd; } /// ------------------------------------------------------------------------------ int posix_lz4_close( int fd ) { auto i = fileMap().find( fd ); if ( i != fileMap().end() ) { -#if POSIX_DEBUG - std::cout << fd << " +++ Close lz4 file. " << std::endl; -#endif - ::gzclose(i->second.gz_file); + auto& desc = i->second; + posix_lz4_print(desc.debug, "%s: LZ4: Closing file", desc.path.c_str()); + if ( desc.compress ) { + if ( desc.position > 0 ) { + ::posix_lz4_flush_buffer(desc, desc.position); + } + std::size_t status = ::LZ4F_compressEnd(desc.compress, + desc.out_buffer.begin(), + desc.out_buffer.length(), + NULL); + desc.position = status; + ::posix_lz4_flush_buffer(desc, desc.position); + ::LZ4F_freeCompressionContext(desc.compress); + desc.compress = nullptr; + } + else if ( desc.decompress ) { + ::LZ4F_freeDecompressionContext(desc.decompress); + desc.decompress = nullptr; + } + if ( desc.fd != 0 ) ::close(desc.fd); fileMap().erase(i); return 0; } -#if POSIX_DEBUG - std::cout << fd << " +++ FAILED to close lz4 file. " << std::endl; -#endif + std::string msg = std::make_error_code(std::errc(errno=EBADF)).message(); + posix_lz4_print(s_debug, "%05X: LZ4: FAILED to close file [%s]", fd, msg.c_str()); return -1; } /// ------------------------------------------------------------------------------ int posix_lz4_access( const char* nam, int mode ) { - return ::access(nam, mode); + return posix_t::access_disk(nam, mode); } /// ------------------------------------------------------------------------------ int posix_lz4_unlink( const char* name) { - return ::unlink(name); + return posix_t::unlink_disk(name); } /// ------------------------------------------------------------------------------ int64_t posix_lz4_lseek64( int fd, int64_t offset, int how ) { - auto i = fileMap().find( fd ); - if ( i != fileMap().end() ) { - gzFile lz4 = i->second.gz_file; - return ::gzseek(lz4, offset, how); - } - return -1; + auto* dsc = lzma_descriptor(fd); + return dsc ? posix_t::lseek64_disk(dsc->fd, offset, how) : -1; } /// ------------------------------------------------------------------------------ long posix_lz4_lseek( int fd, long offset, int how ) { return (long)posix_lz4_lseek64( fd, offset, how ); } /// ------------------------------------------------------------------------------ - ssize_t posix_lz4_read( int fd, void* ptr, size_t size ) { - auto i = fileMap().find( fd ); - if ( i != fileMap().end() ) { - gzFile lz4 = i->second.gz_file; - ssize_t ret = ::gzread(lz4, ptr, size); -#if POSIX_DEBUG - std::cout << fd << " [" << i->second.path - << "] +++ Read " << size << " bytes. ret=" << ret << std::endl; -#endif - return ret; - } -#if POSIX_DEBUG - std::cout << fd << " [" << i->second.path - << "] +++ FAILED to read " << size << " bytes." << std::endl; -#endif - return -1; + int posix_lz4_fstat( int fd, struct stat* statbuf ) { + auto* dsc = lzma_descriptor(fd); + return dsc ? posix_t::fstat_disk(dsc->fd, statbuf) : -1; } /// ------------------------------------------------------------------------------ - ssize_t posix_lz4_write( int fd, const void* ptr, size_t size ) { - auto i = fileMap().find( fd ); - if ( i != fileMap().end() ) { - gzFile lz4 = i->second.gz_file; - if ( !i->second.applied ) { - if ( Z_OK != ::gzsetparams(lz4, i->second.level, i->second.strategy) ) { -#if POSIX_DEBUG - std::cout << fd << " [" << i->second.path - << "] FAILED to set parameters." << std::endl; -#endif - } - i->second.applied = 1; - } - ssize_t ret = ::gzwrite(lz4, ptr, size); -#if POSIX_DEBUG - std::cout << fd << " [" << i->second.path - << "] +++ Wrote " << size << " bytes. ret=" << ret << std::endl; -#endif - return ret; - } -#if POSIX_DEBUG - std::cout << fd << " [" << i->second.path - << "] +++ FAILED to write " << size << " bytes." << std::endl; -#endif - return -1; + int posix_lz4_fstat64( int fd, struct stat64* statbuf ) { + auto* dsc = lzma_descriptor(fd); + return dsc ? posix_t::fstat64_disk(dsc->fd, statbuf) : -1; } /// ------------------------------------------------------------------------------ - int posix_lz4_fileno( FILE* file ) { - return posix_t::fileno_specific(file); + FILE* posix_lz4_fopen(const char* path, const char* mode) { + auto [fd, flags] = posix_t::fopen_disk(path, mode); + auto* dsc = lzma_descriptor(fd); + return dsc ? (FILE*)long(posix_lz4_attach(*dsc)) : nullptr; } /// ------------------------------------------------------------------------------ - long posix_lz4_ftell( FILE* stream ) { - int fd = (int)(long(stream)&0xFFFF); - auto i = fileMap().find( fd ); - if ( i != fileMap().end() ) { - gzFile lz4 = i->second.gz_file; - return ::gztell(lz4); - } - return -1; + int posix_lz4_fclose( FILE* file ) { + return posix_lz4_close(posix_t::fileno_specific(file)); } /// ------------------------------------------------------------------------------ - FILE* posix_lz4_fopen(const char* path, const char* mode) { - FILE* file = ::fopen(path, mode); - if ( file ) { - int fd = ::dup(::fileno(file)); - ::fclose(file); - char m = ::tolower(mode[0]); - int flags = 0; - if ( m == 'w' ) flags = O_WRONLY; - else if ( m == 'r' ) flags = O_RDONLY; - else if ( m == 'a' ) flags = O_APPEND; - return (FILE*)long(posix_lz4_attach(fd, path, flags)); - } - return nullptr; + int posix_lz4_fileno( FILE* file ) { + return posix_t::fileno_specific(file); } /// ------------------------------------------------------------------------------ - int posix_lz4_fclose( FILE* file ) { - int fd = (int)(long(file)&0xFFFF); - return posix_lz4_close(fd); + long posix_lz4_ftell( FILE* stream ) { + return posix_lz4_lseek(posix_t::fileno_specific(stream), 0, SEEK_CUR); } /// ------------------------------------------------------------------------------ int posix_lz4_fseek( FILE* file, int64_t offset, int how ) { - int fd = (int)(long(file)&0xFFFF); - return posix_lz4_lseek(fd, offset, how); + return posix_lz4_lseek(posix_t::fileno_specific(file), offset, how); } /// ------------------------------------------------------------------------------ size_t posix_lz4_fread( void* ptr, size_t size, size_t nitems, FILE* file ) { - int fd = (int)(long(file)&0xFFFF); - return posix_lz4_read(fd, ptr, nitems*size); + return posix_lz4_read(posix_t::fileno_specific(file), ptr, nitems*size); } /// ------------------------------------------------------------------------------ size_t posix_lz4_fwrite( const void* ptr, size_t size, size_t nitems, FILE* file ) { - int fd = (int)(long(file)&0xFFFF); - return posix_lz4_write(fd, ptr, nitems*size); - } - /// ------------------------------------------------------------------------------ - int posix_lz4_stat( const char* path, struct stat* statbuf ) { - return ::stat(path, statbuf); + return posix_lz4_write(posix_t::fileno_specific(file), ptr, nitems*size); } /// ------------------------------------------------------------------------------ - int posix_lz4_stat64( const char* path, struct stat64* statbuf ) { - return ::stat64(path, statbuf); - } - /// ------------------------------------------------------------------------------ - int posix_lz4_fstat( int fd, struct stat* statbuf ) { - auto i = fileMap().find( fd ); - if ( i != fileMap().end() ) { - // gzFile lz4 = i->second.gz_file; - return ::fstat(i->second.fd, statbuf); - } - return -1; + int posix_lz4_stat( const char* path, struct stat* statbuf ) { + return posix_t::stat_disk(path, statbuf); } /// ------------------------------------------------------------------------------ - int posix_lz4_fstat64( int fd, struct stat64* statbuf ) { - auto i = fileMap().find( fd ); - if ( i != fileMap().end() ) { - // gzFile lz4 = i->second.gz_file; - return ::fstat64(i->second.fd, statbuf); - } - return -1; + int posix_lz4_stat64( const char* path, struct stat64* statbuf ) { + return posix_t::stat64_disk(path, statbuf); } } // end anonymous namespace @@ -357,7 +559,7 @@ extern "C" Online::posix_t* posix_lz4_descriptor() { p.fseek = posix_lz4_fseek; p.ftell = posix_lz4_ftell; p.fileno = posix_lz4_fileno; - + p.directory = Online::posix_t::COMPLETE; p.rmdir = ::rmdir; p.mkdir = ::mkdir; @@ -374,3 +576,4 @@ extern "C" Online::posix_t* posix_lz4_descriptor() { throw std::runtime_error("No LZ4 compression availible!"); } #endif +/// -------------------------------------------------------------------------------- diff --git a/Online/OnlineBase/src/COMPRESS/posix_lzma.cpp b/Online/OnlineBase/src/COMPRESS/posix_lzma.cpp index 453242a49..02cb5fffa 100644 --- a/Online/OnlineBase/src/COMPRESS/posix_lzma.cpp +++ b/Online/OnlineBase/src/COMPRESS/posix_lzma.cpp @@ -37,36 +37,42 @@ namespace { static int s_debug = 0; - constexpr static std::size_t DEFAULT_BUFFER_SIZE = 3*1024*1024; + constexpr static std::size_t MBYTE = 1024*1024; + constexpr static std::size_t DEFAULT_BUFFER_SIZE = 3*MBYTE; + constexpr static std::size_t DEFAULT_WRITE_SIZE = 5*MBYTE; + class descriptor { public: using mem_buff_t = Online::mem_buff; enum { NONE=0, WRITING=1, READING=2 }; int fd { 0 }; int flags { 0 }; - int level { 0 }; int applied { 0 }; int mode { NONE }; int debug { s_debug }; + int preset { LZMA_PRESET_DEFAULT }; std::string path { }; - std::size_t in_size { 0 }; - std::size_t out_limit { 5*1024*1024 }; - std::size_t tmp_size { 0 }; + std::size_t in_size { DEFAULT_BUFFER_SIZE }; + std::size_t out_limit { DEFAULT_WRITE_SIZE }; + std::size_t tmp_size { DEFAULT_BUFFER_SIZE }; + std::size_t out_disk { 0 }; std::size_t position { 0 }; - struct monitoring { - std::size_t in_total { 0 }; - std::size_t out_disk{ 0 }; - } monitor; - lzma_stream stream = LZMA_STREAM_INIT; mem_buff_t in_buffer { }; mem_buff_t out_buffer { }; mem_buff_t tmp_buffer { }; + lzma_stream stream = LZMA_STREAM_INIT; + public: + /// Default constructor descriptor() = default; + /// Move constructor descriptor(descriptor&& copy) = default; - descriptor(const descriptor& copy) = delete; + /// Move assignemtn descriptor& operator=(descriptor&& copy) = default; + /// Disable copy constructor + descriptor(const descriptor& copy) = delete; + /// Disable copy assignment descriptor& operator=(const descriptor& copy) = delete; }; @@ -88,16 +94,7 @@ namespace { return nullptr; } /// ------------------------------------------------------------------------------ - std::size_t posix_lzma_print(int debug, const char* format, ...) { - if ( debug ) { - va_list args; - va_start( args, format ); - std::size_t len = ::lib_rtl_log(debug > 1 ? debug : LIB_RTL_ALWAYS, format, args); - va_end(args); - return len; - } - return 0; - } + auto posix_lzma_print = Online::posix_print; /// ------------------------------------------------------------------------------ int posix_lzma_handle_error(descriptor& desc, lzma_ret ret) { const char* path = desc.path.c_str(); @@ -152,38 +149,70 @@ namespace { } return 0; } + std::pair<bool, int> parse_digit(const std::string& val) { + if ( !::isdigit(val[0]) ) { + errno = EINVAL; + return { false, 0 }; + } + return { true, int(val[0]) - int('0') }; + } /// ------------------------------------------------------------------------------ int posix_lzma_set_option(int fd, const char* option, const char* value) { + const char* path = "Unknown-file-descriptor"; + errno = EINVAL; if ( option ) { std::string opt = RTL::str_lower(option); + if ( opt == "global_debug" ) { + std::string val = RTL::str_lower(value); + if ( val[0] == 'y' ) s_debug = 1; // yes + else if ( val[0] == 't' ) s_debug = 1; // true + else if ( val[0] == 'n' ) s_debug = 0; // no + else if ( val[0] == 'f' ) s_debug = 0; // false + return 0; + } + auto i = fileMap().find( fd ); - if ( i != fileMap().end() ) { + if ( i != fileMap().end() ) { auto& desc = i->second; + path = desc.path.c_str(); if ( opt.substr(0,5) == "debug" ) { std::string val = RTL::str_lower(value); if ( val[0] == 'y' ) desc.debug = 1; // yes else if ( val[0] == 't' ) desc.debug = 1; // true else if ( val[0] == 'n' ) desc.debug = 0; // no else if ( val[0] == 'f' ) desc.debug = 0; // false + return 0; } else if ( opt.substr(0,8) == "compress" ) { std::string val = RTL::str_lower(value); if ( val == "none" ) val = "0"; - else if ( val == "default" ) val = "3"; - else if ( val == "high" ) val = "9"; - if ( !::isdigit(val[0]) ) - goto Error; - desc.level = int(val[0]) - int('0'); - if ( ::tolower(val[1]) == 'e' ) { - desc.level |= LZMA_PRESET_EXTREME; - } + else if ( val == "default" ) val = "5"; + else if ( val == "high" ) val = "9e"; + auto comp = parse_digit(val); + if ( !comp.first ) goto Error; + desc.preset = comp.second; + if ( val.length()>1 && val[1] == 'e') desc.preset |= LZMA_PRESET_EXTREME; + return 0; + } + else if ( opt.substr(0,6) == "read_size" ) { + int64_t len = ::atol(RTL::str_lower(value).c_str()); + if ( 0 == len ) goto Error; + desc.tmp_size = len; + desc.in_size = len; + return 0; + } + else if ( opt.substr(0,6) == "write_size" ) { + int64_t len = ::atol(RTL::str_lower(value).c_str()); + if ( 0 == len ) goto Error; + desc.out_limit = len; + return 0; } return 0; } errno = EBADF; } Error: - posix_lzma_print(LIB_RTL_ERROR, "LZMA error: FAILED to set option %s = %s", option, value); + posix_lzma_print(LIB_RTL_ERROR, "%s: LZMA: FAILED to set option %s = %s", path, option, value); return -1; } /// ------------------------------------------------------------------------------ @@ -195,7 +224,7 @@ namespace { desc.stream = LZMA_STREAM_INIT; if ( (flags & (O_WRONLY|O_CREAT)) != 0 ) { lzma_options_lzma opt_lzma; - if ( ::lzma_lzma_preset(&opt_lzma, LZMA_PRESET_DEFAULT) ) { + if ( ::lzma_lzma_preset(&opt_lzma, desc.preset) ) { // It should never fail because the default preset // (and presets 0-9 optionally with LZMA_PRESET_EXTREME) // are supported by all stable liblzma versions. @@ -221,8 +250,8 @@ namespace { } switch (ret) { case LZMA_OK: { - desc.in_buffer.allocate(desc.in_size=DEFAULT_BUFFER_SIZE); - desc.tmp_buffer.allocate(desc.tmp_size=DEFAULT_BUFFER_SIZE); + desc.in_buffer.allocate(desc.in_size); + desc.tmp_buffer.allocate(desc.tmp_size); desc.out_buffer.allocate(desc.out_limit); desc.applied = 1; posix_lzma_print(desc.debug, "%s: LZMA: Successfully opened file.", path); @@ -247,9 +276,9 @@ namespace { desc.path.c_str(), desc.out_buffer.used(), msg.c_str()); return -1; } - desc.monitor.out_disk += desc.out_buffer.used(); + desc.out_disk += desc.out_buffer.used(); posix_lzma_print(desc.debug, "%s: LZMA: Wrote %ld bytes to disk. [Total: %ld bytes]", - desc.path.c_str(), desc.out_buffer.used(), desc.monitor.out_disk); + desc.path.c_str(), desc.out_buffer.used(), desc.out_disk); desc.out_buffer.set_cursor(0); desc.position = 0; } @@ -258,16 +287,44 @@ namespace { /// ------------------------------------------------------------------------------ long posix_lzma_write_buffer(descriptor& desc, uint8_t* buff, std::size_t len, lzma_action flag) { auto& strm = desc.stream; - long done = 0; - for (long todo=len; todo > 0; todo = len-done) { - long out_now = strm.total_out; - long in_now = strm.total_in; + for( strm.avail_in = len, strm.next_in = buff; strm.avail_in > 0; ) { + int64_t out_now = strm.total_out; + strm.next_out = desc.tmp_buffer.begin(); + strm.avail_out = desc.tmp_buffer.length(); + ::lzma_ret ret = ::lzma_code(&strm, flag); + if ( !(ret == LZMA_OK || ret == LZMA_STREAM_END) ) { + posix_lzma_handle_error(desc, ret); + std::string msg = std::make_error_code(std::errc(errno)).message(); + posix_lzma_print(LIB_RTL_ERROR, + "%s: LZMA: Failed to encode frame with %ld bytes [%s]", + desc.path.c_str(), len, msg.c_str()); + return -1; + } + else if ( strm.avail_out == 0 ) { + posix_lzma_print(LIB_RTL_ERROR,"%s: LZMA: Output buffer overflow. " + "Stop coding after %ld out of %ld bytes.", + desc.path.c_str(), len-strm.avail_in, len); + return -1; + } + out_now = strm.total_out - out_now; + if ( out_now+desc.out_buffer.used() > desc.out_limit ) { + ::posix_lzma_flush_buffer(desc, desc.out_buffer.used()); + } + if ( out_now > 0 ) { + desc.out_buffer.append(desc.tmp_buffer.begin(), out_now); + } + } +#if 0 + int64_t done = 0; + for (int64_t todo=len; todo > 0; todo = len-done) { + int64_t out_now = strm.total_out; + int64_t in_now = strm.total_in; strm.avail_in = todo; strm.next_in = buff + done; strm.next_out = desc.tmp_buffer.begin(); strm.avail_out = desc.tmp_buffer.length(); ::lzma_ret ret = ::lzma_code(&strm, flag); - long done_now = strm.total_in - in_now; + int64_t done_now = strm.total_in - in_now; done += done_now; out_now = strm.total_out - out_now; if ( !(ret == LZMA_OK || ret == LZMA_STREAM_END) ) { @@ -291,17 +348,18 @@ namespace { break; } } +#endif posix_lzma_print(desc.debug, "%s: LZMA: Encoded %ld bytes with flag:%s Total in:%9ld compressed:%9ld disk:%9ld bytes", desc.path.c_str(), len, (flag==LZMA_RUN) ? "LZMA_RUN" : (flag==LZMA_FINISH) ? "LZMA_FINISH" : "????", - strm.total_in, strm.total_out, desc.monitor.out_disk); + strm.total_in, strm.total_out, desc.out_disk); if ( flag == LZMA_FINISH ) { posix_lzma_print(desc.debug, "%s: LZMA: Closing frame: %ld bytes", desc.path.c_str(), desc.position); } ::posix_lzma_flush_buffer(desc, flag==LZMA_RUN ? desc.out_limit : 0); - return done; + return len; } /// ------------------------------------------------------------------------------ ssize_t posix_lzma_write( int fd, const void* ptr, size_t size ) { @@ -341,7 +399,7 @@ namespace { posix_lzma_print(desc.debug, "%s: LZMA: Wrote buffer of %8ld bytes. [raw: %9ld encoded: %9ld buffered: %9ld disk: %9ld bytes]", desc.path.c_str(), in_length, desc.stream.total_in, desc.stream.total_out, - desc.out_buffer.used(), desc.monitor.out_disk); + desc.out_buffer.used(), desc.out_disk); return in_space + posix_lzma_write(fd, pointer+in_space, in_miss); } std::string msg = std::make_error_code(std::errc(errno=EBADF)).message(); @@ -388,7 +446,7 @@ namespace { desc.out_buffer.set_cursor(0); for (int64_t len=0; len < out_miss; ) { auto& strm = desc.stream; - long total = posix_t::read_disk(desc.fd, desc.in_buffer.begin(), desc.in_size); + int64_t total = posix_t::read_disk(desc.fd, desc.in_buffer.begin(), desc.in_size); if ( total <= 0 ) { std::string msg = std::make_error_code(std::errc(errno)).message(); posix_lzma_print(desc.debug, @@ -396,7 +454,7 @@ namespace { path, size, msg.c_str()); return (out_left > 0) ? out_left : -1; } - else if ( total < desc.in_size ) { + else if ( total < int64_t(desc.in_size) ) { posix_lzma_print(desc.debug, "%s: LZMA LIMITED read %ld bytes. [End-of-file]", path, size); } @@ -404,22 +462,19 @@ namespace { return out_left > 0 ? out_left : -1; } posix_lzma_print(desc.debug, "%s: LZMA: read %ld bytes from disk.", path, total); - long in_done = 0; - long out_done = 0; strm.next_in = desc.in_buffer.begin(); for( strm.avail_in = total; strm.avail_in > 0; ) { - long out_now = strm.total_out; - strm.next_out = desc.tmp_buffer.begin(); - strm.avail_out = desc.tmp_buffer.length(); - ::lzma_ret ret = ::lzma_code(&strm, LZMA_RUN); - long done_now = strm.total_out - out_now; + int64_t out_now = strm.total_out; + strm.next_out = desc.tmp_buffer.begin(); + strm.avail_out = desc.tmp_buffer.length(); + ::lzma_ret ret = ::lzma_code(&strm, LZMA_RUN); + int64_t done = strm.total_out - out_now; if ( ret != LZMA_OK ) { posix_lzma_handle_error(desc, ret); return -1; } - if ( done_now > 0 ) { - out_done += done_now; - desc.out_buffer.append(desc.tmp_buffer.begin(), done_now); + if ( done > 0 ) { + desc.out_buffer.append(desc.tmp_buffer.begin(), done); } } len = desc.out_buffer.used(); @@ -466,6 +521,7 @@ namespace { } if ( desc.fd != 0 ) ::close(desc.fd); fileMap().erase(i); + desc.mode = descriptor::NONE; return 0; } std::string msg = std::make_error_code(std::errc(errno=EBADF)).message(); diff --git a/Online/OnlineBase/src/COMPRESS/posix_zstd.cpp b/Online/OnlineBase/src/COMPRESS/posix_zstd.cpp index fdf4c965d..d890864af 100644 --- a/Online/OnlineBase/src/COMPRESS/posix_zstd.cpp +++ b/Online/OnlineBase/src/COMPRESS/posix_zstd.cpp @@ -35,17 +35,23 @@ /// -------------------------------------------------------------------------------- namespace { + static int s_debug = 0; + + constexpr static std::size_t MBYTE = 1024*1024; + constexpr static std::size_t DEFAULT_WRITE_SIZE = 5*MBYTE; + class descriptor { public: using mem_buff_t = Online::mem_buff; int fd { 0 }; + int debug { s_debug }; int level { 0 }; int applied { 0 }; - std::string path { }; + std::string path { }; std::size_t in_size { 0 }; - std::size_t out_limit { 32*1024*1024 }; std::size_t out_size { 0 }; - std::size_t position { 0 }; + std::size_t out_limit { DEFAULT_WRITE_SIZE }; + std::size_t position { 0 }; ZSTD_CCtx* compress { nullptr }; ZSTD_DCtx* decompress { nullptr }; mem_buff_t in_buffer { }; @@ -64,14 +70,16 @@ namespace { using posix_t = Online::posix_t; using FileMap = std::map<int, descriptor>; /// ------------------------------------------------------------------------------ - FileMap& fileMap() { + auto posix_zstd_print = Online::posix_print; + /// ------------------------------------------------------------------------------ + FileMap& fileMap() { static std::unique_ptr<FileMap> s_fileMap = std::make_unique<FileMap>(); return *(s_fileMap.get()); } /// ------------------------------------------------------------------------------ - descriptor* zstd_descriptor(int fd) { + descriptor* zstd_descriptor(int fd) { auto i = fileMap().find( fd ); - if ( i != fileMap().end() ) { + if ( i != fileMap().end() ) { return &i->second; } errno = EBADF; @@ -79,12 +87,31 @@ namespace { } /// ------------------------------------------------------------------------------ int posix_zstd_set_option(int fd, const char* option, const char* value) { - if ( option ) { + const char* path = "Unknown-file-descriptor"; + if ( option ) { std::string opt = RTL::str_lower(option); + if ( opt == "global_debug" ) { + std::string val = RTL::str_lower(value); + if ( val[0] == 'y' ) s_debug = 1; // yes + else if ( val[0] == 't' ) s_debug = 1; // true + else if ( val[0] == 'n' ) s_debug = 0; // no + else if ( val[0] == 'f' ) s_debug = 0; // false + return 0; + } + auto i = fileMap().find( fd ); if ( i != fileMap().end() ) { auto& desc = i->second; - if ( opt.substr(0, 8) == "strategy" ) { + path = desc.path.c_str(); + if ( opt.substr(0,5) == "debug" ) { + std::string val = RTL::str_lower(value); + if ( val[0] == 'y' ) desc.debug = 1; // yes + else if ( val[0] == 't' ) desc.debug = 1; // true + else if ( val[0] == 'n' ) desc.debug = 0; // no + else if ( val[0] == 'f' ) desc.debug = 0; // false + return 0; + } + else if ( opt.substr(0, 8) == "strategy" ) { std::string val = RTL::str_lower(value); if ( val == "default" ) desc.cparams.emplace_back(std::make_pair( ZSTD_c_strategy, ZSTD_fast )); @@ -110,7 +137,7 @@ namespace { goto Error; return 0; } - else if ( opt.substr(0, 8) == "compress" ) { + else if ( opt.substr(0, 8) == "compress" ) { std::string val = RTL::str_lower(value); if ( val == "default" ) desc.cparams.emplace_back(std::make_pair( ZSTD_c_compressionLevel, ZSTD_CLEVEL_DEFAULT )); @@ -124,15 +151,21 @@ namespace { } return 0; } - else if ( opt.substr(0,6) == "buffer" ) { + else if ( opt.substr(0,9) == "read_size" ) { + return 0; + } + else if ( opt.substr(0,10) == "write_size" ) { + long len = ::atol(RTL::str_lower(value).c_str()); + if ( 0 == len ) goto Error; + desc.out_limit = len; return 0; } else if ( opt.substr(0, 5) == "apply" ) { - if ( desc.compress ) { + if ( desc.compress ) { for( const auto& p : desc.cparams ) ZSTD_CCtx_setParameter(desc.compress, p.first, p.second); } - else if ( desc.decompress ) { + else if ( desc.decompress ) { for( const auto& p : desc.dparams ) ZSTD_DCtx_setParameter(desc.decompress, p.first, p.second); } @@ -143,7 +176,7 @@ namespace { errno = EBADF; } Error: - ::lib_rtl_output(LIB_RTL_ERROR,"ZSTD error: FAILED to set option %s = %s", option, value); + posix_zstd_print(LIB_RTL_ERROR,"%s: ZSTD: FAILED to set option %s = %s", path, option, value); return -1; } /// ------------------------------------------------------------------------------ @@ -151,7 +184,7 @@ namespace { descriptor desc; desc.fd = fd; desc.path = path; - if ( (flags & (O_WRONLY|O_CREAT)) != 0 ) { + if ( (flags & (O_WRONLY|O_CREAT)) != 0 ) { desc.compress = ::ZSTD_createCCtx(); desc.in_size = ::ZSTD_CStreamInSize(); desc.out_size = ::ZSTD_CStreamOutSize(); @@ -167,28 +200,24 @@ namespace { desc.tmp_buffer.allocate(desc.out_size); desc.out_buffer.allocate(std::max(desc.out_limit, desc.out_size)); } - if ( desc.compress == nullptr && desc.decompress == nullptr ) { + if ( desc.compress == nullptr && desc.decompress == nullptr ) { ::close(fd); -#if POSIX_DEBUG - ::lib_rtl_output(LIB_RTL_ERROR,"+++ FAILED to open ZSTD file: %s", path); -#endif + posix_zstd_print(desc.debug,"%s: ZSTD: FAILED to open file", path); return -1; } int the_fd = posix_t::new_fd(); fileMap()[the_fd] = std::move(desc); -#if POSIX_DEBUG - ::lib_rtl_output(LIB_RTL_DEBUG,"+++ Opened ZSTD file: %s", path); -#endif + posix_zstd_print(desc.debug,"%s: ZSTD: Successfully opened file", path); return the_fd; } /// ------------------------------------------------------------------------------ - int posix_zstd_flush(descriptor& desc) { - if ( desc.out_buffer.used() > 0 ) { + int posix_zstd_flush(descriptor& desc) { + if ( desc.out_buffer.used() > 0 ) { int64_t ret = posix_t::write_disk(desc.fd, desc.out_buffer.begin(), desc.out_buffer.used()); if ( ret < 0 ) { std::string msg = std::make_error_code(std::errc(errno)).message(); - ::lib_rtl_output(LIB_RTL_ERROR,"%s: +++ FAILED to write %ld bytes. [Write error: %s]", + posix_zstd_print(desc.debug,"%s: ZSTD: FAILED to write %ld bytes. [Write error: %s]", desc.path.c_str(), desc.out_buffer.used(), msg.c_str()); return -1; } @@ -198,37 +227,32 @@ namespace { return 0; } /// ------------------------------------------------------------------------------ - int posix_zstd_open( const char* path, int flags, ... ) { + int posix_zstd_open( const char* path, int flags, ... ) { va_list args; va_start(args, flags); int fd = posix_t::open_disk(path, flags, args); if ( fd == -1 ) { std::string msg = std::make_error_code(std::errc(errno)).message(); - ::lib_rtl_output(LIB_RTL_ERROR,"+++ FAILED to open ZSTD file %s [%s]", path, msg.c_str()); + posix_zstd_print(s_debug,"%s: ZSTD: FAILED to open file [%s]", path, msg.c_str()); return -1; } return posix_zstd_attach(fd, path, flags); } /// ------------------------------------------------------------------------------ - int posix_zstd_close( int fd ) { + int posix_zstd_close( int fd ) { auto i = fileMap().find( fd ); - if ( i != fileMap().end() ) { + if ( i != fileMap().end() ) { auto& desc = i->second; -#if POSIX_DEBUG - ::lib_rtl_output(LIB_RTL_INFO,"+++ Close ZSTD file %s", desc.path.c_str()); -#endif + posix_zstd_print(desc.debug,"%s: ZSTD: Closing file", desc.path.c_str()); /// - if ( desc.compress ) { + if ( desc.compress ) { ::ZSTD_inBuffer input = { desc.in_buffer.begin(), desc.position, 0 }; do { ::ZSTD_outBuffer output = { desc.tmp_buffer.begin(), desc.tmp_buffer.length(), 0 }; std::size_t ret = ::ZSTD_compressStream2(desc.compress, &output, &input, ZSTD_e_end); - if ( ZSTD_isError(ret) ) { -#if POSIX_DEBUG - ::lib_rtl_output(LIB_RTL_ERROR, - "+++ FAILED to compress %ld bytes to ZSTD file %s", - desc.out_buffer.used(), desc.path.c_str()); -#endif + if ( ZSTD_isError(ret) ) { + posix_zstd_print(desc.debug,"%s: ZSTD: FAILED to compress %ld bytes to file", + desc.path.c_str(), desc.out_buffer.used()); errno = EIO; return -1; } @@ -242,7 +266,7 @@ namespace { ::ZSTD_freeCCtx( desc.compress ); } /// - if ( desc.decompress ) { + if ( desc.decompress ) { ::ZSTD_freeDCtx( desc.decompress ); } /// @@ -253,16 +277,14 @@ namespace { return 0; } errno = EBADF; -#if POSIX_DEBUG std::string msg = std::make_error_code(std::errc(errno)).message(); - ::lib_rtl_output(LIB_RTL_ERROR, "%05X: ZSTD FAILED to close file [%s]", fd, msg.c_str()); -#endif + posix_zstd_print(s_debug, "%05X: ZSTD: FAILED to close file [%s]", fd, msg.c_str()); return -1; } /// ------------------------------------------------------------------------------ - ssize_t posix_zstd_read( int fd, void* ptr, size_t size ) { + ssize_t posix_zstd_read( int fd, void* ptr, size_t size ) { auto i = fileMap().find( fd ); - if ( i != fileMap().end() ) { + if ( i != fileMap().end() ) { auto& desc = i->second; uint8_t *pointer = (uint8_t*)ptr; int64_t out_len = size; @@ -286,29 +308,32 @@ namespace { out_miss = out_len; } // We have to freshly populate the output buffer from zero position - if ( desc.out_buffer.length() < size ) { + if ( desc.out_buffer.length() < size ) { desc.out_buffer.allocate(size*1.5); } /// Now fill new decompressed buffer: desc.position = 0; desc.out_buffer.set_cursor(0); - for (int64_t len=0; len < out_miss; ) { + for (int64_t len=0; len < out_miss; ) { int64_t total = posix_t::read_disk(desc.fd, desc.in_buffer.begin(), desc.in_size); - if ( total <= 0 ) { + if ( total < 0 ) { std::string msg = std::make_error_code(std::errc(errno)).message(); - ::lib_rtl_output(LIB_RTL_INFO, - "%s: ZSTD FAILED to read %ld bytes. [Read error: %s]", - desc.path.c_str(), desc.in_size, msg.c_str()); + posix_zstd_print(desc.debug, + "%s: ZSTD: FAILED to read %ld bytes. [Read error: %d/%s]", + desc.path.c_str(), desc.in_size, errno, msg.c_str()); + return out_left > 0 ? out_left : -1; + } + else if ( total == 0 ) { return out_left > 0 ? out_left : -1; } - ::lib_rtl_output(LIB_RTL_DEBUG,"%s: +++ Read %ld bytes", desc.path.c_str(), total); + posix_zstd_print(desc.debug, "%s: ZSTD: Read %ld bytes", desc.path.c_str(), total); ::ZSTD_inBuffer input = { desc.in_buffer.ptr(), std::size_t(total), 0 }; while (input.pos < input.size) { ZSTD_outBuffer output = { desc.tmp_buffer.begin(), desc.tmp_buffer.length(), 0 }; std::size_t ret = ::ZSTD_decompressStream(desc.decompress, &output , &input); if ( ZSTD_isError(ret) ) { - ::lib_rtl_output(LIB_RTL_INFO, - "%s: ZSTD FAILED to decompress %ld bytes.", + posix_zstd_print(desc.debug, + "%s: ZSTD: FAILED to decompress %ld bytes.", desc.path.c_str(), desc.in_size); return out_left > 0 ? out_left : -1; } @@ -319,16 +344,12 @@ namespace { return out_left + posix_zstd_read(fd, pointer, out_miss); } errno = EBADF; -#if POSIX_DEBUG std::string msg = std::make_error_code(std::errc(errno)).message(); - ::lib_rtl_output(LIB_RTL_INFO, - "%05X: ZSTD FAILED to load %ld bytes [%s]", - fd, size, msg.c_str()); -#endif + posix_zstd_print(s_debug, "%05X: ZSTD FAILED to load %ld bytes [%s]", fd, size, msg.c_str()); return -1; } /// ------------------------------------------------------------------------------ - ssize_t posix_zstd_write( int fd, const void* ptr, size_t size ) { + ssize_t posix_zstd_write( int fd, const void* ptr, size_t size ) { auto i = fileMap().find( fd ); if ( i != fileMap().end() ) { auto& desc = i->second; @@ -354,9 +375,9 @@ namespace { do { ::ZSTD_outBuffer output = { desc.tmp_buffer.begin(), desc.tmp_buffer.length(), 0 }; std::size_t ret = ::ZSTD_compressStream2(desc.compress, &output, &input, ZSTD_e_continue); - if ( ::ZSTD_isError(ret) ) { - ::lib_rtl_output(LIB_RTL_INFO, - "%s: ZSTD FAILED to compress %ld bytes [compression error]", + if ( ::ZSTD_isError(ret) ) { + posix_zstd_print(desc.debug, + "%s: ZSTD: FAILED to compress %ld bytes [compression error]", desc.path.c_str(), size); return -1; } @@ -372,12 +393,8 @@ namespace { return ret + in_left; } errno = EBADF; -#if POSIX_DEBUG std::string msg = std::make_error_code(std::errc(ENOENT)).message(); - ::lib_rtl_output(LIB_RTL_INFO, - "%05X: ZSTD FAILED to save %ld bytes [%s]", - fd, size, msg.c_str()); -#endif + posix_zstd_print(s_debug, "%05X: ZSTD: FAILED to save %ld bytes [%s]", fd, size, msg.c_str()); return -1; } /// ------------------------------------------------------------------------------ diff --git a/Online/OnlineBase/src/RTL/posix.cpp b/Online/OnlineBase/src/RTL/posix.cpp index e0a8527ac..a47c41e9e 100644 --- a/Online/OnlineBase/src/RTL/posix.cpp +++ b/Online/OnlineBase/src/RTL/posix.cpp @@ -17,6 +17,7 @@ /// Framework include files #include <RTL/posix.h> +#include <RTL/rtl.h> /// C/C++ include files #include <sys/stat.h> @@ -70,6 +71,18 @@ const Online::posix_t* native_posix_descriptor() { return &p; } +/// ------------------------------------------------------------------------------ +std::size_t Online::posix_print(int debug, const char* format, ...) { + if ( debug ) { + va_list args; + va_start( args, format ); + std::size_t len = ::lib_rtl_log(debug > 1 ? debug : LIB_RTL_ALWAYS, format, args); + va_end(args); + return len; + } + return 0; +} + /// ------------------------------------------------------------------------------ Online::posix_t::posix_t() { } -- GitLab From f391778e6c4642a767c5856ec849b56572b59a3d Mon Sep 17 00:00:00 2001 From: Markus Frank <Markus.Frank@cern.ch> Date: Fri, 5 Apr 2024 22:56:06 +0200 Subject: [PATCH 3/6] Add basic LZ4 compression example --- Online/Dataflow/src/Storage/StorageReader.cpp | 7 +- Online/Dataflow/src/Storage/StorageWriter.cpp | 8 +- Online/Dataflow/src/framework/DiskReader.cpp | 13 +- .../GaudiOnlineTests/src/TestCompression.cpp | 2 +- .../rtl_test_compression_lz4.qmt | 26 ++++ .../refs/BASE/rtl_test_compression_lz4.ref | 2 + Online/OnlineBase/include/RTL/posix.h | 27 ++-- Online/OnlineBase/src/COMPRESS/posix_gzip.cpp | 6 +- Online/OnlineBase/src/COMPRESS/posix_lz4.cpp | 125 +++++++++++------- Online/OnlineBase/src/COMPRESS/posix_lzma.cpp | 7 +- Online/OnlineBase/src/COMPRESS/posix_zstd.cpp | 7 +- Online/OnlineBase/src/RTL/posix.cpp | 4 + Online/PcSrv/src/CacheDBApi.cpp | 4 +- Online/PcSrv/src/TaskDBApi.cpp | 2 +- 14 files changed, 155 insertions(+), 85 deletions(-) create mode 100644 Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_compression_lz4.qmt create mode 100644 Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_compression_lz4.ref diff --git a/Online/Dataflow/src/Storage/StorageReader.cpp b/Online/Dataflow/src/Storage/StorageReader.cpp index 66d1aa0be..7d7b252f8 100644 --- a/Online/Dataflow/src/Storage/StorageReader.cpp +++ b/Online/Dataflow/src/Storage/StorageReader.cpp @@ -387,7 +387,12 @@ int StorageReader::open_file_posix_raw(const std::string& loc, Buffer& buffer) hdr[3] == 'X' && hdr[4] == 'Z' && hdr[5] == 0 ) { buffer.data = compress::decompress_lzma(std::move(buffer.data)); } - ++this->m_filesClosed; + /// Check LZ4 magic word in first 4 bytes + /// (See https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md) + else if ( *(uint32_t*)hdr == 0x184D2204 ) { + buffer.data = compress::decompress_lz4(std::move(buffer.data)); + } + ++this->m_filesClosed; input.close(); return DF_SUCCESS; } diff --git a/Online/Dataflow/src/Storage/StorageWriter.cpp b/Online/Dataflow/src/Storage/StorageWriter.cpp index 2d6cc32ee..ce4ba8e23 100644 --- a/Online/Dataflow/src/Storage/StorageWriter.cpp +++ b/Online/Dataflow/src/Storage/StorageWriter.cpp @@ -88,10 +88,10 @@ namespace { } } #include <RTL/posix.h> -extern "C" Online::posix_t* posix_gzip_descriptor(); -extern "C" Online::posix_t* posix_zstd_descriptor(); -extern "C" Online::posix_t* posix_lzma_descriptor(); -extern "C" Online::posix_t* posix_lz4_descriptor(); +Online::posix_t* posix_gzip_descriptor(); +Online::posix_t* posix_zstd_descriptor(); +Online::posix_t* posix_lzma_descriptor(); +Online::posix_t* posix_lz4_descriptor(); struct StorageWriter::POSIX_FILE { public: diff --git a/Online/Dataflow/src/framework/DiskReader.cpp b/Online/Dataflow/src/framework/DiskReader.cpp index b7caa6101..62b9639f8 100644 --- a/Online/Dataflow/src/framework/DiskReader.cpp +++ b/Online/Dataflow/src/framework/DiskReader.cpp @@ -44,10 +44,10 @@ using namespace Online; #ifndef Z_DEFLATED #define Z_DEFLATED 8 #endif -extern "C" Online::posix_t* posix_zlib_descriptor(); -extern "C" Online::posix_t* posix_zstd_descriptor(); -extern "C" Online::posix_t* posix_lzma_descriptor(); -extern "C" Online::posix_t* posix_lz4_descriptor(); +Online::posix_t* posix_zlib_descriptor(); +Online::posix_t* posix_zstd_descriptor(); +Online::posix_t* posix_lzma_descriptor(); +Online::posix_t* posix_lz4_descriptor(); namespace { static constexpr int peek_size = int(8*sizeof(int)); @@ -1367,8 +1367,9 @@ int DiskReader::i_run() { continue; } } - /// LZ4 compression - else if ( file_buff[0] == uint8_t('L') && file_buff[1] == uint8_t('4') ) { + /// LZ4 compression: Check magic number 0x184D2204 in first 4 bytes + /// (See https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md) + else if ( *(uint32_t*)file_buff == 0x184D2204 ) { current_input.close(); current_input = RawFile(fname, posix_lz4_descriptor()); if ( current_input.open() == -1 ) { diff --git a/Online/GaudiOnlineTests/src/TestCompression.cpp b/Online/GaudiOnlineTests/src/TestCompression.cpp index 3816d1b65..de16a48f8 100644 --- a/Online/GaudiOnlineTests/src/TestCompression.cpp +++ b/Online/GaudiOnlineTests/src/TestCompression.cpp @@ -210,7 +210,7 @@ namespace { } #define TEST_COMPRESSION(x) \ - extern "C" Online::posix_t* posix_##x##_descriptor(); \ + Online::posix_t* posix_##x##_descriptor(); \ /* Compress file using x algorithm */ \ extern "C" int rtl_test_##x##_compression(int argc, char** argv) { \ return rtl_test_compression(argc, argv, posix_##x##_descriptor()); \ diff --git a/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_compression_lz4.qmt b/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_compression_lz4.qmt new file mode 100644 index 000000000..cbb906f2b --- /dev/null +++ b/Online/GaudiOnlineTests/tests/qmtest/onlinekernel.qms/rtl_test_compression_lz4.qmt @@ -0,0 +1,26 @@ +<?xml version="1.0" ?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'> +<!-- + (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration + + This software is distributed under the terms of the GNU General Public + Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". + + In applying this licence, CERN does not waive the privileges and immunities + granted to it by virtue of its status as an Intergovernmental Organization + or submit itself to any jurisdiction. + +--> +<extension class="OnlineTest.Test" kind="test"> + <argument name="program"><text>online_test</text></argument> + <argument name="args"><set> + <text>rtl_test_lz4_compression</text> + <text>-input=../../../../InstallArea/${BINARY_TAG}/lib/libProperties.so</text> + <text>-output=rtl_test_compression_lz4.output.lz4</text> + <text>-compression=default</text> + </set></argument> + <argument name="use_temp_dir"><enumeral>true</enumeral></argument> + <argument name="reference"><text>../refs/BASE/rtl_test_compression_lz4.ref</text></argument> + <argument name="prerequisites"><set><tuple> + <text>cleanup.cleanup</text><enumeral>PASS</enumeral> + </tuple></set></argument> +</extension> diff --git a/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_compression_lz4.ref b/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_compression_lz4.ref new file mode 100644 index 000000000..644ee9878 --- /dev/null +++ b/Online/GaudiOnlineTests/tests/refs/BASE/rtl_test_compression_lz4.ref @@ -0,0 +1,2 @@ +Failed to read input file libProperties.so (iret=0) after a total of 0x######## bytes read. +Compressed input file libProperties.so [0x######## bytes] to rtl_test_compression_lz4.output.lz4 [ 0x######## uncompressed / 0x######## compressed bytes] diff --git a/Online/OnlineBase/include/RTL/posix.h b/Online/OnlineBase/include/RTL/posix.h index b39cdae93..260335807 100644 --- a/Online/OnlineBase/include/RTL/posix.h +++ b/Online/OnlineBase/include/RTL/posix.h @@ -46,21 +46,22 @@ namespace Online { posix_t& operator=(const posix_t& copy) = delete; static std::pair<int,int> fopen_disk(const char* path, const char* mode); - static int open_disk ( const char* path, int flags, va_list& args ); - static long read_disk ( int fd, void* ptr, std::size_t len ); - static long write_disk ( int fd, const void* ptr, std::size_t len ); - static int access_disk ( const char* nam, int mode ); - static int unlink_disk ( const char* nam ); - static int lseek_disk ( int fd, long offset, int how ); - static int lseek64_disk( int fd, int64_t offset, int how ); - static int fstat_disk ( int fd, struct stat* statbuf ); - static int fstat64_disk( int fd, struct stat64* statbuf ); - static int stat_disk ( const char* path, struct stat* statbuf ); - static int stat64_disk ( const char* path, struct stat64* statbuf ); + static int open_disk ( const char* path, int flags, va_list& args ); + static long read_disk ( int fd, void* ptr, std::size_t len ); + static long write_disk ( int fd, const void* ptr, std::size_t len ); + static int access_disk ( const char* nam, int mode ); + static int unlink_disk ( const char* nam ); + static int truncate_disk ( int fd, std::size_t len ); + static int lseek_disk ( int fd, long offset, int how ); + static int lseek64_disk ( int fd, int64_t offset, int how ); + static int fstat_disk ( int fd, struct stat* statbuf ); + static int fstat64_disk ( int fd, struct stat64* statbuf ); + static int stat_disk ( const char* path, struct stat* statbuf ); + static int stat64_disk ( const char* path, struct stat64* statbuf ); static int fileno_specific ( const FILE* stream ); - static int new_fd (); + static int new_fd ( ); /// Set option to specific implemetation of the descriptor - int ( *set_option )(int fd, const char* what, const char* value) = nullptr; + int ( *set_option ) (int fd, const char* what, const char* value) = nullptr; int unbuffered; int ( *open )( const char* filepath, int flags, ...) = nullptr; diff --git a/Online/OnlineBase/src/COMPRESS/posix_gzip.cpp b/Online/OnlineBase/src/COMPRESS/posix_gzip.cpp index 019fada43..c0919b51e 100644 --- a/Online/OnlineBase/src/COMPRESS/posix_gzip.cpp +++ b/Online/OnlineBase/src/COMPRESS/posix_gzip.cpp @@ -299,7 +299,7 @@ namespace { } // end anonymous namespace /// -------------------------------------------------------------------------------- -extern "C" Online::posix_t* posix_zlib_descriptor() { +Online::posix_t* posix_zlib_descriptor() { static Online::posix_t p; if ( !p.open ) { p.set_option = posix_gzip_set_option; @@ -338,12 +338,12 @@ extern "C" Online::posix_t* posix_zlib_descriptor() { /// -------------------------------------------------------------------------------- #else #include <stdexcept> -extern "C" Online::posix_t* posix_zlib_descriptor() { +Online::posix_t* posix_zlib_descriptor() { throw std::runtime_error("No ZLIB compression availible!"); } #endif /// -------------------------------------------------------------------------------- -extern "C" Online::posix_t* posix_gzip_descriptor() { +Online::posix_t* posix_gzip_descriptor() { return posix_zlib_descriptor(); } /// -------------------------------------------------------------------------------- diff --git a/Online/OnlineBase/src/COMPRESS/posix_lz4.cpp b/Online/OnlineBase/src/COMPRESS/posix_lz4.cpp index 876f430ca..8b7569578 100644 --- a/Online/OnlineBase/src/COMPRESS/posix_lz4.cpp +++ b/Online/OnlineBase/src/COMPRESS/posix_lz4.cpp @@ -32,12 +32,10 @@ #include <lz4.h> #include <lz4frame.h> -#define POSIX_DEBUG 1 - /// -------------------------------------------------------------------------------- namespace { - static int s_debug = 0; + static int s_debug = 1; constexpr static std::size_t MBYTE = 1024*1024; constexpr static std::size_t DEFAULT_BUFFER_SIZE = 3*MBYTE; @@ -107,9 +105,8 @@ namespace { } return { true, int(val[0]) - int('0') }; } -#if 0 /// ------------------------------------------------------------------------------ - std::size_t get_block_size(const LZ4F_frameInfo_t* info) { + std::size_t posix_lz4_block_size(const LZ4F_frameInfo_t* info) { switch (info->blockSizeID) { case LZ4F_default: case LZ4F_max64KB: return 1 << 16; @@ -121,7 +118,6 @@ namespace { exit(1); } } -#endif /// ------------------------------------------------------------------------------ int posix_lz4_set_option(int fd, const char* option, const char* value) { errno = EINVAL; @@ -174,17 +170,41 @@ namespace { errno = EBADF; } Error: - posix_lz4_print(LIB_RTL_ERROR, "LZMA error: FAILED to set option %s = %s", option, value); + posix_lz4_print(LIB_RTL_ERROR, "LZ4 error: FAILED to set option %s = %s", option, value); return -1; } /// ------------------------------------------------------------------------------ + bool posix_lz4_decompress(descriptor& desc, uint64_t len) { + for(uint8_t *next=desc.in_buffer.begin(), *end=next + len; next < end; ) { + uint64_t in_size = end - next; + uint64_t out_size = desc.tmp_buffer.length(); + uint64_t ret = ::LZ4F_decompress(desc.decompress, + desc.tmp_buffer.begin(), + &out_size, + next, + &in_size, + /* LZ4F_decompressOptions_t */ nullptr); + if ( ::LZ4F_isError(ret) ) { + posix_lz4_print(desc.debug, "%s: LZ4: Decompression error: %s", + desc.path.c_str(), ::LZ4F_getErrorName(ret)); + return false; + } + next += in_size; + if ( out_size > 0 ) { + desc.out_buffer.append(desc.tmp_buffer.begin(), out_size); + } + } + return true; + } + /// ------------------------------------------------------------------------------ int posix_lz4_attach(descriptor& desc) { const char* path = desc.path.c_str(); int flags = desc.flags; + if ( desc.in_size < LZ4F_HEADER_SIZE_MAX ) desc.in_size = LZ4F_HEADER_SIZE_MAX; if ( (flags & (O_WRONLY|O_CREAT)) != 0 ) { - LZ4F_preferences_t prefs = { + static LZ4F_preferences_t prefs = { { LZ4F_max256KB, LZ4F_blockLinked, LZ4F_noContentChecksum, @@ -199,11 +219,10 @@ namespace { }; desc.tmp_size = ::LZ4F_compressBound(desc.in_size, &prefs); // large enough for any input <= desc.in_size std::size_t ctx = ::LZ4F_createCompressionContext(&desc.compress, LZ4F_VERSION); - if ( LZ4F_isError(ctx) ) { + if ( ::LZ4F_isError(ctx) ) { posix_lz4_print(desc.debug, "%s: LZ4: FAILED to initialize compressor: %ld", path, ctx); return -1; } - if ( desc.in_size < LZ4F_HEADER_SIZE_MAX ) desc.in_size = LZ4F_HEADER_SIZE_MAX; desc.in_buffer.allocate(desc.in_size); desc.tmp_buffer.allocate(desc.tmp_size); desc.out_buffer.allocate(desc.out_limit); @@ -220,22 +239,47 @@ namespace { posix_lz4_print(desc.debug, "%s: LZ4: Buffer size is %ld bytes, header size %u bytes", path, desc.tmp_buffer.length(), status); desc.out_buffer.append(desc.tmp_buffer.begin(), status); + posix_t::truncate_disk(desc.fd, 0); } else if ( flags == O_RDONLY || flags == ( O_BINARY | O_RDONLY ) ) { uint64_t status = ::LZ4F_createDecompressionContext(&desc.decompress, LZ4F_VERSION); if ( ::LZ4F_isError(status) ) { posix_lz4_print(desc.debug, "%s: LZ4: LZ4F_dctx creation error: %s\n", path, ::LZ4F_getErrorName(status)); } + char buff[LZ4F_HEADER_SIZE_MAX]; LZ4F_frameInfo_t info; - desc.in_buffer.allocate(desc.in_size); desc.tmp_buffer.allocate(desc.tmp_size); desc.out_buffer.allocate(desc.out_limit); - size_t consumedSize = desc.tmp_size; - status = ::LZ4F_getFrameInfo(desc.decompress, &info, desc.in_buffer.begin(), &consumedSize); - if ( ::LZ4F_isError(status) ) { + int64_t total = posix_t::read_disk(desc.fd, buff, sizeof(buff)); + if ( total <= 0 ) { + std::string msg = std::make_error_code(std::errc(errno)).message(); + posix_lz4_print(desc.debug, + "%s: LZ4 FAILED to open compressed file. read %ld bytes. [Read error: %s]", + path, desc.in_buffer.length(), msg.c_str()); + return -1; + } + std::size_t consumed_size = sizeof(buff); + status = ::LZ4F_getFrameInfo(desc.decompress, &info, buff, &consumed_size); + if ( ::LZ4F_isError(status) ) { posix_lz4_print(desc.debug, "%s: LZ4: LZ4F_getFrameInfo error: %s\n", path, ::LZ4F_getErrorName(status)); return -1; } + std::size_t block_size = posix_lz4_block_size(&info); + desc.position = 0; + desc.in_buffer.allocate(desc.in_size = block_size); + desc.in_buffer.append(buff+consumed_size, total-consumed_size); + total = posix_t::read_disk(desc.fd, desc.in_buffer.ptr(), desc.in_size-consumed_size); + if ( total <= 0 ) { + std::string msg = std::make_error_code(std::errc(errno)).message(); + posix_lz4_print(desc.debug, + "%s: LZ4 FAILED to open compressed file. read %ld bytes. [Read error: %s]", + path, desc.in_buffer.length(), msg.c_str()); + return -1; + } + desc.in_buffer.set_cursor(desc.in_size); + if ( !posix_lz4_decompress(desc, desc.in_size) ) { + return -1; + } } desc.applied = 1; posix_lz4_print(desc.debug, "%s: LZ4: Successfully opened file.", path); @@ -261,9 +305,9 @@ namespace { return 0; } /// ------------------------------------------------------------------------------ - long posix_lz4_write_buffer(descriptor& desc, uint8_t* buff, std::size_t len) { + long posix_lz4_compress_buffer(descriptor& desc, uint8_t* buff, std::size_t len) { for(long done=0, out_now=0, todo=len; done < todo; done += out_now ) { - out_now = std::min(desc.tmp_size, len-done); + out_now = std::min(desc.tmp_buffer.length(), len-done); std::size_t status = ::LZ4F_compressUpdate(desc.compress, desc.tmp_buffer.begin(), desc.tmp_buffer.length(), @@ -275,12 +319,12 @@ namespace { desc.path.c_str(), status, ::LZ4F_getErrorName(status)); return -1; } - if ( out_now+desc.out_buffer.used() > desc.out_limit ) { + if ( status+desc.out_buffer.used() > desc.out_limit ) { ::posix_lz4_flush_buffer(desc, desc.out_buffer.used()); } if ( out_now > 0 ) { desc.out_total += out_now; - desc.out_buffer.append(desc.tmp_buffer.begin(), out_now); + desc.out_buffer.append(desc.tmp_buffer.begin(), status); } } posix_lz4_print(desc.debug, @@ -319,7 +363,7 @@ namespace { else { in_miss = size; } - int64_t written = posix_lz4_write_buffer(desc, desc.in_buffer.begin(), in_length); + int64_t written = posix_lz4_compress_buffer(desc, desc.in_buffer.begin(), in_length); if ( int64_t(in_length) != written ) { desc.position = 0; return -1; @@ -378,41 +422,26 @@ namespace { if ( total <= 0 ) { std::string msg = std::make_error_code(std::errc(errno)).message(); posix_lz4_print(desc.debug, - "%s: LZMA FAILED to read %ld bytes. [Read error: %s]", + "%s: LZ4 FAILED to read %ld bytes. [Read error: %s]", path, size, msg.c_str()); return (out_left > 0) ? out_left : -1; } else if ( total < int64_t(desc.in_size) ) { - posix_lz4_print(desc.debug, "%s: LZMA LIMITED read %ld bytes. [End-of-file]", path, size); + posix_lz4_print(desc.debug, "%s: LZ4 LIMITED read %ld bytes. [End-of-file]", path, size); } if ( total <= 0 ) { return out_left > 0 ? out_left : -1; } posix_lz4_print(desc.debug, "%s: LZ4: read %ld bytes from disk.", path, total); - for(uint8_t *next=desc.in_buffer.begin(), *end = next + total; next < end; ) { - uint64_t in_size = end - next; - uint64_t out_size = desc.tmp_buffer.length(); - uint64_t ret = ::LZ4F_decompress(desc.decompress, - desc.tmp_buffer.begin(), - &out_size, - next, - &in_size, - /* LZ4F_decompressOptions_t */ nullptr); - if ( ::LZ4F_isError(ret) ) { - printf("Decompression error: %s\n", LZ4F_getErrorName(ret)); - return (out_left > 0) ? out_left : -1; - } - next += in_size; - if ( out_size > 0 ) { - desc.out_buffer.append(desc.tmp_buffer.begin(), out_size); - } + if ( !posix_lz4_decompress(desc, total) ) { + return (out_left > 0) ? out_left : -1; } len = desc.out_buffer.used(); } return out_left + posix_lz4_read(fd, pointer, out_miss); } std::string msg = std::make_error_code(std::errc(errno=EBADF)).message(); - posix_lz4_print(s_debug, "%05X: LZMA FAILED to read %ld bytes. [%s]", fd, size, msg.c_str()); + posix_lz4_print(s_debug, "%05X: LZ4 FAILED to read %ld bytes. [%s]", fd, size, msg.c_str()); return -1; } /// ------------------------------------------------------------------------------ @@ -442,15 +471,19 @@ namespace { posix_lz4_print(desc.debug, "%s: LZ4: Closing file", desc.path.c_str()); if ( desc.compress ) { if ( desc.position > 0 ) { - ::posix_lz4_flush_buffer(desc, desc.position); + int64_t len = desc.position; + if ( len != posix_lz4_compress_buffer(desc, desc.in_buffer.begin(), len) ) { + return -1; + } } std::size_t status = ::LZ4F_compressEnd(desc.compress, - desc.out_buffer.begin(), - desc.out_buffer.length(), + desc.tmp_buffer.begin(), + desc.tmp_buffer.length(), NULL); - desc.position = status; - ::posix_lz4_flush_buffer(desc, desc.position); + desc.out_buffer.append(desc.tmp_buffer.begin(), status); + ::posix_lz4_flush_buffer(desc, 1); ::LZ4F_freeCompressionContext(desc.compress); + desc.position = 0; desc.compress = nullptr; } else if ( desc.decompress ) { @@ -533,7 +566,7 @@ namespace { } // end anonymous namespace /// -------------------------------------------------------------------------------- -extern "C" Online::posix_t* posix_lz4_descriptor() { +Online::posix_t* posix_lz4_descriptor() { static Online::posix_t p; if ( !p.open ) { p.set_option = posix_lz4_set_option; @@ -572,7 +605,7 @@ extern "C" Online::posix_t* posix_lz4_descriptor() { /// -------------------------------------------------------------------------------- #else #include <stdexcept> -extern "C" Online::posix_t* posix_lz4_descriptor() { +Online::posix_t* posix_lz4_descriptor() { throw std::runtime_error("No LZ4 compression availible!"); } #endif diff --git a/Online/OnlineBase/src/COMPRESS/posix_lzma.cpp b/Online/OnlineBase/src/COMPRESS/posix_lzma.cpp index 02cb5fffa..78803ca8c 100644 --- a/Online/OnlineBase/src/COMPRESS/posix_lzma.cpp +++ b/Online/OnlineBase/src/COMPRESS/posix_lzma.cpp @@ -30,8 +30,6 @@ #include <lzma.h> -#define POSIX_DEBUG 1 - /// -------------------------------------------------------------------------------- namespace { @@ -242,6 +240,7 @@ namespace { // Initialize the encoder using the custom filter chain. ret = ::lzma_stream_encoder(&desc.stream, filters, LZMA_CHECK_CRC64); desc.mode = descriptor::WRITING; + posix_t::truncate_disk(desc.fd, 0); } else if ( flags == O_RDONLY || flags == ( O_BINARY | O_RDONLY ) ) { // Initialize the encoder using the custom filter chain. @@ -596,7 +595,7 @@ namespace { } // end anonymous namespace /// -------------------------------------------------------------------------------- -extern "C" Online::posix_t* posix_lzma_descriptor() { +Online::posix_t* posix_lzma_descriptor() { static Online::posix_t p; if ( !p.open ) { p.set_option = posix_lzma_set_option; @@ -635,7 +634,7 @@ extern "C" Online::posix_t* posix_lzma_descriptor() { /// -------------------------------------------------------------------------------- #else #include <stdexcept> -extern "C" Online::posix_t* posix_lzma_descriptor() { +Online::posix_t* posix_lzma_descriptor() { throw std::runtime_error("No LZMA compression availible!"); } #endif diff --git a/Online/OnlineBase/src/COMPRESS/posix_zstd.cpp b/Online/OnlineBase/src/COMPRESS/posix_zstd.cpp index d890864af..8e5d41e13 100644 --- a/Online/OnlineBase/src/COMPRESS/posix_zstd.cpp +++ b/Online/OnlineBase/src/COMPRESS/posix_zstd.cpp @@ -30,8 +30,6 @@ #include <zstd.h> -//#define POSIX_DEBUG 1 - /// -------------------------------------------------------------------------------- namespace { @@ -191,6 +189,7 @@ namespace { desc.in_buffer.allocate(desc.in_size); desc.tmp_buffer.allocate(desc.out_size); desc.out_buffer.allocate(std::max(desc.out_limit, desc.out_size)); + posix_t::truncate_disk(desc.fd, 0); } else if ( flags == O_RDONLY || flags == ( O_BINARY | O_RDONLY ) ) { desc.decompress = ::ZSTD_createDCtx(); @@ -464,7 +463,7 @@ namespace { } // end anonymous namespace /// -------------------------------------------------------------------------------- -extern "C" Online::posix_t* posix_zstd_descriptor() { +Online::posix_t* posix_zstd_descriptor() { static Online::posix_t p; if ( !p.open ) { p.set_option = posix_zstd_set_option; @@ -503,7 +502,7 @@ extern "C" Online::posix_t* posix_zstd_descriptor() { /// -------------------------------------------------------------------------------- #else #include <stdexcept> -extern "C" Online::posix_t* posix_zstd_descriptor() { +Online::posix_t* posix_zstd_descriptor() { throw std::runtime_error("No ZSTD compression availible!"); } #endif diff --git a/Online/OnlineBase/src/RTL/posix.cpp b/Online/OnlineBase/src/RTL/posix.cpp index a47c41e9e..6ed6aff40 100644 --- a/Online/OnlineBase/src/RTL/posix.cpp +++ b/Online/OnlineBase/src/RTL/posix.cpp @@ -100,6 +100,10 @@ int Online::posix_t::fileno_specific(const FILE* file) { return fd; } /// ------------------------------------------------------------------------------ +int Online::posix_t::truncate_disk ( int fd, std::size_t len ) { + return ::ftruncate(fd, len); +} +/// ------------------------------------------------------------------------------ int Online::posix_t::access_disk ( const char* nam, int mode ) { return ::access(nam, mode); } diff --git a/Online/PcSrv/src/CacheDBApi.cpp b/Online/PcSrv/src/CacheDBApi.cpp index 255927189..8980281f7 100644 --- a/Online/PcSrv/src/CacheDBApi.cpp +++ b/Online/PcSrv/src/CacheDBApi.cpp @@ -48,7 +48,7 @@ void CacheDBApi::check() const { /// Attach snapshot object void CacheDBApi::setSnapShot(string&& data) { try { - vector<unsigned char> xml(Online::compress::decompress("gzip", std::move(Online::compress::base64_decode(data)))); + vector<unsigned char> xml(Online::compress::decompress("gzip", Online::compress::base64_decode(data))); dd4hep::xml::DocumentHolder doc(dd4hep::xml::DocumentHandler().parse((char*)&xml[0],xml.size())); xml_h root = doc.root().child(_rpcU(param)).child(_rpcU(value)); unique_ptr<Snapshot> s(new Snapshot(xmlrpc::XmlCoder(root).to<taskdb::Snapshot>())); @@ -59,7 +59,7 @@ void CacheDBApi::setSnapShot(string&& data) { m_connectTime = chrono::system_clock::now(); } catch(const exception& e) { - vector<unsigned char> xml(Online::compress::decompress("gzip", std::move(Online::compress::base64_decode(data)))); + vector<unsigned char> xml(Online::compress::decompress("gzip", Online::compress::base64_decode(data))); cout << "XML Record [exception]:" << e.what() << endl; cout << (char*)&xml[0] << flush << endl << flush; } diff --git a/Online/PcSrv/src/TaskDBApi.cpp b/Online/PcSrv/src/TaskDBApi.cpp index b8e44044c..998ac90b3 100644 --- a/Online/PcSrv/src/TaskDBApi.cpp +++ b/Online/PcSrv/src/TaskDBApi.cpp @@ -153,7 +153,7 @@ string TaskDBApi::getCacheRaw(long stamp) const { /// Retrieve the database cache in XML form vector<unsigned char> TaskDBApi::getCache(long stamp) const { string data = getCacheRaw(stamp); - return Online::compress::decompress("gzip", std::move(Online::compress::base64_decode(data))); + return Online::compress::decompress("gzip", Online::compress::base64_decode(data)); } /// Access TaskDB to retrieve the tasks for this node -- GitLab From 77c5e3d745456d17cb1ea8475dee84ef88a2210d Mon Sep 17 00:00:00 2001 From: Markus Frank <markus.frank@cern.ch> Date: Sat, 6 Apr 2024 08:08:22 +0200 Subject: [PATCH 4/6] Update posix_lz4.cpp --- Online/OnlineBase/src/COMPRESS/posix_lz4.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Online/OnlineBase/src/COMPRESS/posix_lz4.cpp b/Online/OnlineBase/src/COMPRESS/posix_lz4.cpp index 8b7569578..f60f8fcf8 100644 --- a/Online/OnlineBase/src/COMPRESS/posix_lz4.cpp +++ b/Online/OnlineBase/src/COMPRESS/posix_lz4.cpp @@ -35,7 +35,7 @@ /// -------------------------------------------------------------------------------- namespace { - static int s_debug = 1; + static int s_debug = 0; constexpr static std::size_t MBYTE = 1024*1024; constexpr static std::size_t DEFAULT_BUFFER_SIZE = 3*MBYTE; -- GitLab From 2e975ce00400481afffa60979f39f13cad4c5404 Mon Sep 17 00:00:00 2001 From: Markus Frank <Markus.Frank@cern.ch> Date: Sat, 6 Apr 2024 08:55:33 +0200 Subject: [PATCH 5/6] Remove shadow warning --- Online/OnlineBase/src/COMPRESS/Compress.cpp | 24 ++++++++++----------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/Online/OnlineBase/src/COMPRESS/Compress.cpp b/Online/OnlineBase/src/COMPRESS/Compress.cpp index 7c41d8020..ec63f8004 100644 --- a/Online/OnlineBase/src/COMPRESS/Compress.cpp +++ b/Online/OnlineBase/src/COMPRESS/Compress.cpp @@ -67,8 +67,8 @@ namespace Online { std::vector<uint8_t> decompress_lzma(const uint8_t* data, std::size_t len) { std::vector<uint8_t> buffer, result; // Initialize the encoder using the custom filter chain. - lzma_stream strm = LZMA_STREAM_INIT; - auto ret = ::lzma_stream_decoder(&strm, UINT64_MAX, LZMA_CONCATENATED); + lzma_stream strm = LZMA_STREAM_INIT; + ::lzma_ret ret = ::lzma_stream_decoder(&strm, UINT64_MAX, LZMA_CONCATENATED); if ( ret != LZMA_OK ) { ::lib_rtl_output(LIB_RTL_ERROR, "LZMA: Failed to initialize decoder. [%08X]", ret); return {}; @@ -78,7 +78,7 @@ namespace Online { int64_t out_now = strm.total_out; strm.next_out = &buffer.at(0); strm.avail_out = buffer.size(); - ::lzma_ret ret = ::lzma_code(&strm, LZMA_RUN); + ret = ::lzma_code(&strm, LZMA_RUN); if ( !(ret == LZMA_OK || ret == LZMA_STREAM_END) ) { std::string msg = std::make_error_code(std::errc(errno=EINVAL)).message(); ::lib_rtl_output(LIB_RTL_ERROR, "LZMA: Failed to decode frame with %ld bytes [%s]", @@ -86,6 +86,7 @@ namespace Online { return {}; } else if ( strm.avail_out == 0 ) { + errno = ENOSPC; ::lib_rtl_output(LIB_RTL_ERROR, "LZMA: Output buffer overflow. Stop coding after %ld out of %ld bytes.", len-strm.avail_in, len); @@ -126,6 +127,7 @@ namespace Online { ZSTD_outBuffer output = { &buffer.at(0), buffer.size(), 0 }; std::size_t ret = ::ZSTD_decompressStream(decompress, &output , &input); if ( ZSTD_isError(ret) ) { + errno = EINVAL; ::lib_rtl_output(LIB_RTL_ERROR, "ZSTD FAILED to decompress %ld bytes.", len); return {}; } @@ -272,7 +274,6 @@ namespace Online { std::vector<uint8_t> result; used_encoding = ""; if ( !encoding.empty() ) { - std::stringstream str; bool gzip = encoding.find("gzip") != std::string::npos; bool deflate = encoding.find("deflate") != std::string::npos; if ( gzip || deflate ) { @@ -292,8 +293,7 @@ namespace Online { window, 8, Z_DEFAULT_STRATEGY); if ( status < 0 ) { - str << "Compress [Failed to initialize zlib/gzip " - << std::make_error_code(std::errc(EINVAL)).message() << "]"; + errno = ENOSPC; goto Default; } strm.avail_in = data.size(); @@ -304,8 +304,7 @@ namespace Online { strm.next_out = out; status = ::deflate(&strm, Z_FINISH); if ( status < 0 ) { - str << "Compress [Failed to deflate buffer with zlib/gzip " - << std::make_error_code(std::errc(EINVAL)).message() << "]"; + errno = ENOSPC; ::deflateEnd (&strm); goto Default; } @@ -324,7 +323,6 @@ namespace Online { std::vector<uint8_t> decompress_zstream(bool have_gzip, const uint8_t* data, std::size_t data_len) { - std::stringstream str; uint8_t out[CHUNK+1]; std::vector<uint8_t> result; z_stream strm; @@ -339,8 +337,7 @@ namespace Online { int window = windowBits + (have_gzip ? ENABLE_ZLIB_GZIP : 0); int status = ::inflateInit2(&strm, window); if ( status < 0 ) { - str << "XMLRPC [Failed to initialize zlib/gzip " - << std::make_error_code(std::errc(EINVAL)).message() << "]"; + errno = EINVAL; goto Default; } strm.avail_in = data_len; @@ -354,9 +351,12 @@ namespace Online { case Z_STREAM_END: break; case Z_BUF_ERROR: + errno = ENOSPC; + inflateEnd(&strm); + goto Default; default: + errno = EINVAL; inflateEnd(&strm); - str << "Compress [Failed inflate buffer with zlib/gzip : " << status; goto Default; } std::copy(out, out+CHUNK-strm.avail_out, back_inserter(result)); -- GitLab From 86536a93dfa5e413757f238bd7f636df159630d9 Mon Sep 17 00:00:00 2001 From: Markus Frank <markus.frank@cern.ch> Date: Sat, 6 Apr 2024 13:40:25 +0200 Subject: [PATCH 6/6] Update CMakeLists.txt --- Online/OnlineBase/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Online/OnlineBase/CMakeLists.txt b/Online/OnlineBase/CMakeLists.txt index 9c6f3b27b..66c52102b 100755 --- a/Online/OnlineBase/CMakeLists.txt +++ b/Online/OnlineBase/CMakeLists.txt @@ -166,7 +166,7 @@ online_library(OnlineCompress src/COMPRESS/posix_lzma.cpp src/COMPRESS/posix_lz4.cpp) # -target_link_libraries(OnlineCompress PRIVATE Online::OnlineBase ROOT::Core -lz -llzma -llz4) +target_link_libraries(OnlineCompress PRIVATE Online::OnlineBase) # if( TARGET PkgConfig::zlib ) message(STATUS "ZLIB compression availible") -- GitLab