Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
scdaq
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Iterations
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
scouting-demonstrator
scdaq
Commits
b18359a8
Commit
b18359a8
authored
1 year ago
by
Giovanna Lazzari Miotto
Browse files
Options
Downloads
Patches
Plain Diff
ref: ci: Invert test outcome default to fail
parent
9794073e
Branches
Branches containing commit
Tags
Tags containing commit
1 merge request
!91
ref: ci: Invert outcome default to fail
Pipeline
#6920530
passed
1 year ago
Stage: check
Stage: build
Stage: run
Changes
1
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
scripts/test_filedma.sh
+52
-17
52 additions, 17 deletions
scripts/test_filedma.sh
with
52 additions
and
17 deletions
scripts/test_filedma.sh
+
52
−
17
View file @
b18359a8
...
...
@@ -92,7 +92,7 @@ function run_filedma_test() {
output_dir
=
$(
awk
-F
":"
'/^output_filename_base:/{printf $2}'
$config_file
)
output_prefix
=
$(
awk
-F
":"
'/^output_filename_prefix:/{printf $2}'
$config_file
)
local
test_failed
=
0
local
test_failed
=
1
echo
"
${
FUNCNAME
}
: starting SCDAQ test with
${
test_id
}
P5 capture file"
echo
"
${
FUNCNAME
}
: timeout
${
timeout_secs
}
s ./
${
SCDAQ_PATH
}
--config
${
config_file
}
--nstreams
${
num_streams
}
"
timeout
${
timeout_secs
}
s ./
"
${
SCDAQ_PATH
}
"
--config
${
config_file
}
--nstreams
${
num_streams
}
|
uniq
-uc
...
...
@@ -102,7 +102,6 @@ function run_filedma_test() {
# We expect a TIMEOUT failure because `scdaq` is not normally supposed to return.
if
[[
"
${
ret_status
}
"
-ne
124
]]
;
then
echo
"
${
FUNCNAME
}
: SCDAQ test for
${
test_id
}
FAILED with exit code
${
ret_status
}
!"
test_failed
=
1
# Check if output file was correctly generated
elif
[[
${
expects_output
}
=
true
]]
;
then
# Find file matching name pattern under the output directory, and excluding input data files
...
...
@@ -125,25 +124,42 @@ function run_filedma_test() {
# Hash-verify contents
if
check_hashes
${
bytes_to_hash
}
${
output_file
}
${
ref_hash_file
}
;
then
echo
"
${
FUNCNAME
}
: SCDAQ test PASSED for
${
test_id
}
"
test_failed
=
0
else
echo
"
${
FUNCNAME
}
: SCDAQ test for
${
test_id
}
FAILED"
test_failed
=
1
fi
fi
else
# Execution ended by timeout, and not checking for output files
test_failed
=
0
fi
return
${
test_failed
}
}
function
echo_bool
()
{
var
=
$1
if
[[
-z
$var
||
$var
-eq
1
]]
;
then
echo
"X"
else
echo
"Ü"
fi
}
function
run_all_tests
()
{
local
test_description
=
$1
local
expects_output
=
${
EXPECTS_OUTPUT
}
declare
-i
timeout_secs
=
${
MAX_RUNTIME_SECS
}
readonly
test_description timeout_secs expects_output
local
gmt_conf_failed gmt_json5_failed calo_conf_failed calo_json5_failed
local
passthrough_conf_failed passthrough_json5_failed multistream_conf_failed
local
gmt_conf_failed
=
1
gmt_json5_failed
=
1
local
calo_conf_failed
=
1
calo_json5_failed
=
1
local
passthrough_conf_failed
=
1
passthrough_json5_failed
=
1
local
multistream_conf_failed
=
1
multistream_json5_failed
=
1
echo
"
${
FUNCNAME
}
: running
${
test_description
}
"
failure_statuses
=(
gmt_conf_failed calo_conf_failed passthrough_conf_failed
)
# Legacy configuration files
run_filedma_test
"GMT"
${
timeout_secs
}
0
"
${
CONFIG_DIR
}
/scdaq-gmt.conf"
gmt_conf_failed
=
$?
run_filedma_test
"CALO"
${
timeout_secs
}
0
"
${
CONFIG_DIR
}
/scdaq-calo.conf"
...
...
@@ -151,6 +167,9 @@ function run_all_tests () {
run_filedma_test
"PASS0_GMT"
${
timeout_secs
}
0
"
${
CONFIG_DIR
}
/scdaq-passthrough.conf"
passthrough_conf_failed
=
$?
# JSON5 configuration files
failure_statuses+
=(
gmt_json5_failed calo_json5_failed passthrough_json5_failed
)
run_filedma_test
"GMT"
${
timeout_secs
}
0
"
${
CONFIG_DIR
}
/filedma-gmt.json5"
gmt_json5_failed
=
$?
run_filedma_test
"CALO"
${
timeout_secs
}
0
"
${
CONFIG_DIR
}
/filedma-calo.json5"
...
...
@@ -159,6 +178,7 @@ function run_all_tests () {
passthrough_json5_failed
=
$?
if
[[
${
expects_output
}
=
false
]]
;
then
failure_statuses+
=(
multistream_conf_failed multistream_json5_failed
)
# We evaluate multi-stream operation with two parallel pipelines
run_filedma_test
"MULTI_PASS"
${
timeout_secs
}
2
"
${
CONFIG_DIR
}
/scdaq-passthrough.conf"
multistream_conf_failed
=
$?
...
...
@@ -166,24 +186,37 @@ function run_all_tests () {
multistream_json5_failed
=
$?
fi
local
failed_any
=
0
echo
"
${
FUNCNAME
}
: error report for
${
test_description
}
(0 = SUCCESS)"
declare
-i
some_failed
=
0
for
status
in
"
${
failure_statuses
[@]
}
"
;
do
if
[[
!
${
!status
}
=
~ ^[01]
$
]]
;
then
echo
"
${
FUNCNAME
}
:
${
test_description
}
===> ERROR: Invalid return value for
$status
"
some_failed
=
1
fi
if
((
$status
!=
0
))
;
then
echo
"
${
FUNCNAME
}
:
${
test_description
}
===> FAILED (
$status
)!"
some_failed
=
1
fi
done
echo
"
${
FUNCNAME
}
: error report for
${
test_description
}
(Ü = SÜCCESS)"
echo
" conf | json5"
echo
"-- GMT :
${
gmt_conf_failed
}
|
${
gmt_json5_failed
}
"
echo
"-- Calo :
${
calo_conf_failed
}
|
${
calo_json5_failed
}
"
echo
"-- Passthrough :
${
passthrough_conf_failed
}
|
${
passthrough_json5_failed
}
"
echo
"-- GMT :
$(
echo_bool
${
gmt_conf_failed
}
)
|
$(
echo_bool
${
gmt_json5_failed
}
)
"
echo
"-- Calo :
$(
echo_bool
${
calo_conf_failed
}
)
|
$(
echo_bool
${
calo_json5_failed
}
)
"
echo
"-- Passthrough :
$(
echo_bool
${
passthrough_conf_failed
}
)
|
$(
echo_bool
${
passthrough_json5_failed
}
)
"
if
[[
${
expects_output
}
=
false
]]
;
then
echo
"-- Multi-stream:
${
multistream_conf_failed
}
|
${
multistream_json5_failed
}
"
failed_any
=
${
multistream_conf_failed
}
+
${
multistream_json5_failed
}
echo
"-- Multi-stream:
$(
echo_bool
${
multistream_conf_failed
}
)
|
$(
echo_bool
${
multistream_json5_failed
}
)
"
fi
failed_any
=
${
failed_any
}
+
${
gmt_json5_failed
}
+
${
gmt_conf_failed
}
+
${
calo_json5_failed
}
+
${
calo_conf_failed
}
+
${
passthrough_json5_failed
}
+
${
passthrough_conf_failed
}
if
[[
$failed_any
-ne
0
]]
;
then
echo
"
${
FUNCNAME
}
:
${
test_description
}
===> FAILED!"
exit
1
else
if
((
$some_failed
==
0
))
;
then
echo
"
${
FUNCNAME
}
:
${
test_description
}
===> SUCCESS"
fi
return
$some_failed
}
function
exit_on_failure
{
if
[[
$?
-ne
0
]]
;
then
exit
1
fi
}
function
main
{
...
...
@@ -193,9 +226,11 @@ function main {
# Test 1: time-bound operation, checks for crashes
MODIFY_CONFIG
=
false
EXPECTS_OUTPUT
=
false
run_all_tests
"Test 1 (operation) -
${
MAX_RUNTIME_SECS
}
-second runs, no output files"
exit_on_failure
$?
# Test 2: validates file output for basic runs
MODIFY_CONFIG
=
true
EXPECTS_OUTPUT
=
true
run_all_tests
"Test 2 (regression) -
${
MAX_RUNTIME_SECS
}
-second runs, check output files"
exit_on_failure
$?
echo
"
${
FUNCNAME
}
: all tests finished"
}
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment