Skip to content
Snippets Groups Projects
Verified Commit 61a5bae5 authored by Alex Iribarren's avatar Alex Iribarren
Browse files

Run tests non-ironically

parent b988d112
Branches
No related tags found
No related merge requests found
...@@ -10,6 +10,7 @@ variables: ...@@ -10,6 +10,7 @@ variables:
TEST_VIRTUAL: 'False' TEST_VIRTUAL: 'False'
TEST_UNMANAGED: 'True' TEST_UNMANAGED: 'True'
TEST_PUPPET: 'False' TEST_PUPPET: 'False'
TEST_INSTALLHOST: 'False'
DELETE_FAILURES: 'True' DELETE_FAILURES: 'True'
PUPPET_ENVIRONMENT: 'qa' PUPPET_ENVIRONMENT: 'qa'
PUPPET_HOSTGROUP: 'playground/imageci' PUPPET_HOSTGROUP: 'playground/imageci'
......
...@@ -13,6 +13,7 @@ Puppet jobs have to be run on custom runners (hostgroup `punch/cirunners`) manag ...@@ -13,6 +13,7 @@ Puppet jobs have to be run on custom runners (hostgroup `punch/cirunners`) manag
* `TEST_PHYSICAL`: Test using physical machines. Defaults to `False`. * `TEST_PHYSICAL`: Test using physical machines. Defaults to `False`.
* `TEST_UNMANAGED`: Create unmanaged machines. Defaults to `True`. * `TEST_UNMANAGED`: Create unmanaged machines. Defaults to `True`.
* `TEST_PUPPET`: Create Puppet-managed machines. Defaults to `False`. * `TEST_PUPPET`: Create Puppet-managed machines. Defaults to `False`.
* `TEST_INSTALLHOST`: Test physical machines with `ai-installhost`. Defaults to `False`.
* `TEST_OS7`: Test the CC7 image. Defaults to `True`. * `TEST_OS7`: Test the CC7 image. Defaults to `True`.
* `TEST_OS8`: Test the C8 image. Defaults to `True`. * `TEST_OS8`: Test the C8 image. Defaults to `True`.
* `TEST_OS8s`: Test the CS8 image. Defaults to `True`. * `TEST_OS8s`: Test the CS8 image. Defaults to `True`.
...@@ -29,9 +30,10 @@ Note that `True`/`False` are case-sensitive: specifying `true` will not work. ...@@ -29,9 +30,10 @@ Note that `True`/`False` are case-sensitive: specifying `true` will not work.
In order to run any tests, there must be *at least one* `True` value in *each* of the following columns: In order to run any tests, there must be *at least one* `True` value in *each* of the following columns:
| **OS version** | **Machine type** | **Environment** | | **OS version** | **Machine type** | **Environment** |
|----------------|------------------|------------------| |----------------|--------------------|------------------|
| `TEST_OS7` | `TEST_VIRTUAL` | `TEST_UNMANAGED` | | `TEST_OS7` | `TEST_VIRTUAL` | `TEST_UNMANAGED` |
| `TEST_OS8` | `TEST_PHYSICAL` | `TEST_PUPPET` | | `TEST_OS8` | `TEST_PHYSICAL` | `TEST_PUPPET` |
| `TEST_OS8s` | `TEST_INSTALLHOST` | |
If you start a pipeline without specifying any variables, no tests will run (both machine type variables are `False` by default). If you start a pipeline without specifying any variables, no tests will run (both machine type variables are `False` by default).
......
...@@ -34,3 +34,12 @@ c8_puppet_virtual: ...@@ -34,3 +34,12 @@ c8_puppet_virtual:
only: only:
variables: variables:
- $TEST_OS8 == 'True' && $TEST_VIRTUAL == 'True' && $TEST_PUPPET == 'True' - $TEST_OS8 == 'True' && $TEST_VIRTUAL == 'True' && $TEST_PUPPET == 'True'
c8_installhost:
extends: .test_installhost
variables:
OS: 8
ARCH: x86_64
only:
variables:
- $TEST_OS8 == 'True' && $TEST_INSTALLHOST == 'True'
...@@ -34,3 +34,12 @@ cs8_puppet_virtual: ...@@ -34,3 +34,12 @@ cs8_puppet_virtual:
only: only:
variables: variables:
- $TEST_OS8s == 'True' && $TEST_VIRTUAL == 'True' && $TEST_PUPPET == 'True' - $TEST_OS8s == 'True' && $TEST_VIRTUAL == 'True' && $TEST_PUPPET == 'True'
cs8_installhost:
extends: .test_installhost
variables:
OS: 8s
ARCH: x86_64
only:
variables:
- $TEST_OS8s == 'True' && $TEST_INSTALLHOST == 'True'
...@@ -34,3 +34,12 @@ cc7_puppet_virtual: ...@@ -34,3 +34,12 @@ cc7_puppet_virtual:
only: only:
variables: variables:
- $TEST_OS7 == 'True' && $TEST_VIRTUAL == 'True' && $TEST_PUPPET == 'True' - $TEST_OS7 == 'True' && $TEST_VIRTUAL == 'True' && $TEST_PUPPET == 'True'
cc7_installhost:
extends: .test_installhost
variables:
OS: 7
ARCH: x86_64
only:
variables:
- $TEST_OS7 == 'True' && $TEST_INSTALLHOST == 'True'
...@@ -77,3 +77,15 @@ ...@@ -77,3 +77,15 @@
- yum install -y openssh-clients jq - yum install -y openssh-clients jq
- echo bash testallthethings.sh -i "${IMAGE}" -f "m2.small" -p - echo bash testallthethings.sh -i "${IMAGE}" -f "m2.small" -p
- bash testallthethings.sh -i "${IMAGE}" -f "m2.small" -p - bash testallthethings.sh -i "${IMAGE}" -f "m2.small" -p
.test_installhost:
extends: .openstack
image: 'gitlab-registry.cern.ch/ai-config-team/ai-tools'
tags:
- puppet
script:
- export OS_PROJECT_NAME="${PROJECT_PHYSICAL}"
- yum install -y openssh-clients jq
- echo bash test-nonironic.sh
- bash test-nonironic.sh
allow_failure: true
#!/bin/bash
source common.sh
# Hardcoding UEFI flavors to test
[ -z "$FLAVOR" ] && FLAVOR=("p1.dl8200652.S513-V-IP562.linux-ci", "p1.dl8200652.S513-V-IP562.linux-ci-raid")
CI_PIPELINE_ID="${CI_PIPELINE_ID:-manual}"
# Delete failures by default
[[ "${DELETE_FAILURES,,}" == "false" ]] && DELETE_FAILURES=false || DELETE_FAILURES=true
TESTTYPE="installhost"
# How often and how many times to retry creating a machine with a "busy" flavor
MIN_RETRY_TIME=300 # 5 minutes
MIN_RETRY_COUNT=12 # 5*12 => 1 hour
function aimsDelete() {
local p_name="$1"
local flavor="$2"
local test="$3"
local delete=true
# Figure out if we should really delete or not
local result=`testGetResult ${flavor} ${test}`
if [[ "$DELETE_FAILURES" == false && $result -ne $TEST_PASS && $result -ne $TEST_SKIPPED || $result == "" ]]; then
delete=false
fi
if [[ "$delete" != false ]]; then
t_Log "Deleting ${p_name} from AIMS"
cmd="aims2 delhost ${p_name}"
t_BoldGreen "${cmd}"
eval ${cmd}
[[ $? -eq 0 ]] && return 0
fi
}
t_Log "Openstack flavors to test:"
for f in "${FLAVOR[@]}"; do
t_Log " ${f}"
# Create entries for the tests we're going to run
testCreate "${TESTTYPE}-${f}" "create"
testCreate "${TESTTYPE}-${f}" "install"
testCreate "${TESTTYPE}-${f}" "ssh_access"
done
# Poor man's FIFO queue
TOBETESTED=("${FLAVOR[@]}")
declare -A BUSY_FLAVORS
GLOBAL_RETURN=0
while [[ ${#TOBETESTED[@]} -gt 0 ]]; do
# Pop the first flavor
flavor=${TOBETESTED[0]}
TOBETESTED=("${TOBETESTED[@]:1}")
PREV_TIME=`echo "${BUSY_FLAVORS[$flavor]}" | cut -d' ' -f1`
COUNT=`echo "${BUSY_FLAVORS[$flavor]}" | cut -d' ' -f2`
# If we've already retried too much, give up
if [[ $COUNT -gt $MIN_RETRY_COUNT ]]; then
testAddComments "${TESTTYPE}-${flavor}" "create" "All machines of this type were busy"
continue
fi
if [[ ! -z $PREV_TIME ]]; then
testClearComments "${TESTTYPE}-${flavor}" "create"
# Wait at least $MIN_RETRY_TIME before trying again. Add up to 30 seconds to avoid deadlocks
DELAY=$(( $MIN_RETRY_TIME - (`date +%s` - $PREV_TIME) + ($RANDOM % 30) ))
if [[ $DELAY -gt 0 ]]; then
t_Log "Waiting for $DELAY seconds..."
sleep $DELAY
fi
fi
p_name="`mktemp -u "citest-${CI_PIPELINE_ID}-XXXXX" | tr '[:upper:]' '[:lower:]'`"
t_Log "Trying to create ${p_name}, a machine of flavor ${flavor}"
# We have to set some stuff based on the $OS
if [[ "$OS" == "7" ]]; then
AI_OS="--cc7"
MEDIUM="CentOS mirror"
FOREMAN_OS="CentOS 7.0"
elif [[ "$OS" == "8" ]]; then
AI_OS="--c8"
MEDIUM="CentOS8"
FOREMAN_OS="CentOS 8.0"
else
AI_OS="--cs8"
MEDIUM="CentOSStream8"
FOREMAN_OS="CentOS Stream 8"
fi
# Foreman may deadlock if we try to create too many machines at once, so wait a random bit
DELAY=$(($RANDOM % 10))
t_Log "Wait ${DELAY} seconds"
sleep $DELAY
cmd="ai-bs $AI_OS --nova-sshkey imageci --landb-responsible imageci --nova-property centos_test_cleanup=true --nova-property puppet_managed=true --nova-flavor '${flavor}' --foreman-hostgroup '${PUPPET_HOSTGROUP}' --foreman-environment '${PUPPET_ENVIRONMENT}' '${p_name}'"
t_BoldGreen "${cmd}"
testStart "${TESTTYPE}-${flavor}" "create"
cmd_output=$(eval ${cmd} 2>&1)
if [[ $? -ne 0 ]]; then
# Unable to create the server, remove it from the list
t_Log "Unable to create server of flavor ${flavor}"
testEnd "${TESTTYPE}-${flavor}" "create" $TEST_ERROR
testAddComments "${TESTTYPE}-${flavor}" "create" "Unable to create server"
# Add the error to the comments
testAddComments "${TESTTYPE}-${flavor}" "create" "${cmd_output}"
continue
fi
# Wait for the server to be ready or to fail. Retry every 60 seconds, up to 30 times => wait 30min
waitFor 30 60 isServerCreated ${p_name}
if [[ $? -eq 1 ]]; then
t_Log "Timed out while creating a server of flavor ${flavor}"
testEnd "${TESTTYPE}-${flavor}" "create" $TEST_ERROR
testAddComments "${TESTTYPE}-${flavor}" "create" "Timed out while creating server"
deleteServer "${p_name}" "${TESTTYPE}-${flavor}" "create"
continue
fi
show=$(openstack server show ${p_name} -c status -c fault --format json)
status=$(echo "$show" | jq -r '.status')
error=$(echo "$show" | jq -r '.fault.message')
if [[ $status == 'ERROR' ]]; then
t_Log "Fault message: $error"
testEnd "${TESTTYPE}-${flavor}" "create" $TEST_ERROR
openstack server list
if [[ $error =~ "No valid host was found" ]]; then
# Is this the first time we've tried this flavor?
COUNT=`echo "${BUSY_FLAVORS[$flavor]}" | cut -d' ' -f2`
if [[ -z "$COUNT" ]]; then
# Yep, never tried it before.
# There are either no machines of that hardware type available, or we're already testing all the ones we have
# Let's check if we already have a machine of that hw type created
existing=0
while IFS= read -r LINE; do
[[ -z "$LINE" ]] && break
existing=1
ID="`echo $LINE | cut -d' ' -f1`"
NAME="`echo $LINE | cut -d' ' -f2`"
t_Log " $NAME ($ID) already exists with that flavor"
done <<< "$(openstack server list -c ID -c Name -c Flavor -c Status --format value | grep -v ERROR | grep -v "${p_name}" | grep "${flavor}")"
if [[ $existing -eq 1 ]]; then
# Yep, there's another machine, so we can try to test this flavor again later
TOBETESTED+=("${flavor}")
COUNT=`echo "${BUSY_FLAVORS[$flavor]}" | cut -d' ' -f2`
BUSY_FLAVORS[${flavor}]="`date +%s` $(($COUNT+1))"
else
# No machine of the type currently exists
t_Log " No other machine found, maybe none are available with this flavor?"
testAddComments "${TESTTYPE}-${flavor}" "create" "No machines of this type found"
fi
else
# If we tried it before, that's because at some point we determined it was possible
# to create a machine of this type, so keep trying
TOBETESTED+=("${flavor}")
COUNT=`echo "${BUSY_FLAVORS[$flavor]}" | cut -d' ' -f2`
BUSY_FLAVORS[${flavor}]="`date +%s` $(($COUNT+1))"
fi
else
# If we can't even create the machine, we've failed
t_CheckExitStatus 1
GLOBAL_RETURN=$((GLOBAL_RETURN+1))
t_Log "Unexpected error while creating machine: $error"
testAddComments "${TESTTYPE}-${flavor}" "create" "$error"
fi
deleteServer "${p_name}" "${TESTTYPE}-${flavor}" "create"
else
testEnd "${TESTTYPE}-${flavor}" "create" $TEST_PASS
t_Log "Machine created successfully!"
openstack server show "${p_name}" --max-width 100
t_Log "Let's prepare this machine for ai-installhost"
# First we have to change the OS, medium and set the partition table we want to test
cmd="ai-foreman updatehost -p TEST_RAID -m '${MEDIUM}' -o '${FOREMAN_OS}' '${p_name}'"
t_BoldGreen "${cmd}"
testStart "${TESTTYPE}-${flavor}" "install"
cmd_output=$(eval ${cmd} 2>&1)
if [[ $? -ne 0 ]]; then
# Unable to create the server, remove it from the list
t_Log "Unable to change partition table of flavor ${flavor}"
testEnd "${TESTTYPE}-${flavor}" "install" $TEST_ERROR
testAddComments "${TESTTYPE}-${flavor}" "install" "Unable to change partition table"
# Add the error to the comments
testAddComments "${TESTTYPE}-${flavor}" "install" "${cmd_output}"
deleteServer "${p_name}" "${TESTTYPE}-${flavor}" "install"
continue
fi
# Now let's do the actual ai-installhost
t_Log "Let's try to ai-installhost this machine"
cmd="ai-installhost --reboot -m uefi '${p_name}'"
t_BoldGreen "${cmd}"
cmd_output=$(eval ${cmd})
if [[ $? -ne 0 ]]; then
# Unable to create the server, remove it from the list
t_Log "Unable to create server of flavor ${flavor}"
testEnd "${TESTTYPE}-${flavor}" "install" $TEST_ERROR
testAddComments "${TESTTYPE}-${flavor}" "install" "Unable to ai-installhost server"
# Add the error to the comments
testAddComments "${TESTTYPE}-${flavor}" "install" "${cmd_output}"
deleteServer "${p_name}" "${TESTTYPE}-${flavor}" "install"
continue
fi
cmd="aims2 showhost --full '${p_name}'"
t_BoldGreen "${cmd}"
cmd_output=$(eval ${cmd})
if [[ $? -ne 0 ]]; then
testEnd "${TESTTYPE}-${flavor}" "install" $TEST_ERROR
testAddComments "${TESTTYPE}-${flavor}" "install" "AIMS error"
# Add the error to the comments
testAddComments "${TESTTYPE}-${flavor}" "install" "${cmd_output}"
aimsDelete "${p_name}" "${TESTTYPE}-${flavor}" "install"
deleteServer "${p_name}" "${TESTTYPE}-${flavor}" "install"
continue
fi
testEnd "${TESTTYPE}-${flavor}" "install" $TEST_PASS
t_Log "Machine should hopefully install now"
t_Log "Trying to SSH into the machine"
# Sleep a bit to give the machine time to go down, otherwise the next test might fail immediately
sleep 2m
testStart "${TESTTYPE}-${flavor}" "ssh_access"
# Wait for the machine to really be up. Retry every 30 seconds (+ 10 seconds SSH timeout), up to 80 times => wait 40min
waitFor 80 30 isServerUp "${p_name}"
# If we can't SSH into the machine, we've failed
g=$?
GLOBAL_RETURN=$((GLOBAL_RETURN+g))
t_CheckExitStatus $g
if [[ $g -ne 0 ]]; then
testEnd "${TESTTYPE}-${flavor}" "ssh_access" $TEST_FAIL
testAddComments "${TESTTYPE}-${flavor}" "ssh_access" "Unable to SSH into the machine"
aimsDelete "${p_name}" "${TESTTYPE}-${flavor}" "ssh_access"
deleteServer "${p_name}" "${TESTTYPE}-${flavor}" "ssh_access"
continue
fi
testEnd "${TESTTYPE}-${flavor}" "ssh_access" $TEST_PASS
# Log the disk layout
runOnServer "${p_name}" "fdisk -l; cat /proc/mdstat"
t_CheckExitStatus $?
# Remove the server
aimsDelete "${p_name}" "${TESTTYPE}-${flavor}" "ssh_access"
deleteServer "${p_name}" "${TESTTYPE}-${flavor}" "ssh_access"
fi
done
testShowResults "ai-installhost"
testCreateJUnit
exit $GLOBAL_RETURN
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment