2020-03-03 22:38:09 +00:00
|
|
|
#!/bin/sh
|
|
|
|
|
2022-04-07 20:01:27 +01:00
|
|
|
# Make sure to kill itself and all the children process from this script on
|
|
|
|
# exiting, since any console output may interfere with LAVA signals handling,
|
|
|
|
# which based on the log console.
|
|
|
|
cleanup() {
|
2022-06-01 20:29:07 +01:00
|
|
|
if [ "$BACKGROUND_PIDS" = "" ]; then
|
|
|
|
return 0
|
|
|
|
fi
|
|
|
|
|
2022-04-07 20:01:27 +01:00
|
|
|
set +x
|
|
|
|
echo "Killing all child processes"
|
|
|
|
for pid in $BACKGROUND_PIDS
|
|
|
|
do
|
2022-06-01 20:29:07 +01:00
|
|
|
kill "$pid" 2>/dev/null || true
|
2022-04-07 20:01:27 +01:00
|
|
|
done
|
|
|
|
|
|
|
|
# Sleep just a little to give enough time for subprocesses to be gracefully
|
|
|
|
# killed. Then apply a SIGKILL if necessary.
|
|
|
|
sleep 5
|
|
|
|
for pid in $BACKGROUND_PIDS
|
|
|
|
do
|
|
|
|
kill -9 "$pid" 2>/dev/null || true
|
|
|
|
done
|
2022-06-01 20:29:07 +01:00
|
|
|
|
|
|
|
BACKGROUND_PIDS=
|
|
|
|
set -x
|
2022-04-07 20:01:27 +01:00
|
|
|
}
|
|
|
|
trap cleanup INT TERM EXIT
|
|
|
|
|
|
|
|
# Space separated values with the PIDS of the processes started in the
|
|
|
|
# background by this script
|
|
|
|
BACKGROUND_PIDS=
|
|
|
|
|
|
|
|
|
2021-06-11 16:13:01 +01:00
|
|
|
# Second-stage init, used to set up devices and our job environment before
|
|
|
|
# running tests.
|
2021-06-11 15:50:15 +01:00
|
|
|
|
2020-05-04 21:42:41 +01:00
|
|
|
. /set-job-env-vars.sh
|
|
|
|
|
2021-06-11 16:13:01 +01:00
|
|
|
set -ex
|
|
|
|
|
2021-06-11 15:50:15 +01:00
|
|
|
# Set up any devices required by the jobs
|
2022-02-22 14:49:28 +00:00
|
|
|
[ -z "$HWCI_KERNEL_MODULES" ] || {
|
|
|
|
echo -n $HWCI_KERNEL_MODULES | xargs -d, -n1 /usr/sbin/modprobe
|
|
|
|
}
|
|
|
|
|
2022-02-22 14:58:47 +00:00
|
|
|
#
|
2022-02-22 14:49:28 +00:00
|
|
|
# Load the KVM module specific to the detected CPU virtualization extensions:
|
|
|
|
# - vmx for Intel VT
|
|
|
|
# - svm for AMD-V
|
2022-02-22 14:58:47 +00:00
|
|
|
#
|
|
|
|
# Additionally, download the kernel image to boot the VM via HWCI_TEST_SCRIPT.
|
|
|
|
#
|
2022-02-22 14:49:28 +00:00
|
|
|
if [ "$HWCI_KVM" = "true" ]; then
|
|
|
|
unset KVM_KERNEL_MODULE
|
|
|
|
grep -qs '\bvmx\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_intel || {
|
|
|
|
grep -qs '\bsvm\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_amd
|
|
|
|
}
|
|
|
|
|
|
|
|
[ -z "${KVM_KERNEL_MODULE}" ] && \
|
|
|
|
echo "WARNING: Failed to detect CPU virtualization extensions" || \
|
|
|
|
modprobe ${KVM_KERNEL_MODULE}
|
2022-02-22 14:58:47 +00:00
|
|
|
|
|
|
|
mkdir -p /lava-files
|
|
|
|
wget -S --progress=dot:giga -O /lava-files/${KERNEL_IMAGE_NAME} \
|
|
|
|
"${KERNEL_IMAGE_BASE_URL}/${KERNEL_IMAGE_NAME}"
|
2022-02-22 14:49:28 +00:00
|
|
|
fi
|
ci: add testing for VC4 drivers (Raspberry Pi 3)
This tests OpenGL ES 2.0 CTS suite with VC4 drivers, through baremetal
Raspberry Pi 3 devices.
The devices are connected to a switch that supports Power over Ethernet
(PoE), so the devices can be started/stopped through the switch, and
also to a host that runs the GitLab runner through serial-to-USB cables,
to monitor the devices to know when the testing finishes.
The Raspberries uses a network boot, using NFS and TFTP. For the root
filesystem, they use the one created in the armhf container. For the
kernel/modules case, this is handled externally. Currently it is using
the same kernel/modules that come with the Raspberry Pi OS. In future we
could build them in the same armhf container.
At this moment we only test armhf architecture, as this is the default
one suggested by the Raspberry Pi Foundation. In future we could also
add testing for arm64 architecture.
Finally, for the very rare ocassions where the Raspberry Pi 3 device is
booted but no data is received, it retries the testing for a second
time, powering off and on the device in the process.
v2:
- Remove commit that exists capture devcoredump (Eric)
- Squash remaining commits in one (Andres)
v3:
- Add missing boot timeout check (Juan)
v4:
- Use locks when running the PoE on/off script (Eric)
- Use a timeout for serial read (Eric)
v5:
- Rename stage to "raspberrypi" (Eric)
- Bump up arm64_test tag (Eric)
v6:
- Make serial buffer timeout optional (Juan)
Reviewed-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Juan A. Suarez Romero <jasuarez@igalia.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/7628>
2020-11-15 19:57:55 +00:00
|
|
|
|
2021-06-11 15:54:34 +01:00
|
|
|
# Fix prefix confusion: the build installs to $CI_PROJECT_DIR, but we expect
|
|
|
|
# it in /install
|
|
|
|
ln -sf $CI_PROJECT_DIR/install /install
|
2021-06-11 15:56:19 +01:00
|
|
|
export LD_LIBRARY_PATH=/install/lib
|
|
|
|
export LIBGL_DRIVERS_PATH=/install/lib/dri
|
2021-06-11 15:54:34 +01:00
|
|
|
|
2020-07-20 18:46:51 +01:00
|
|
|
# Store Mesa's disk cache under /tmp, rather than sending it out over NFS.
|
|
|
|
export XDG_CACHE_HOME=/tmp
|
|
|
|
|
2021-06-11 15:56:19 +01:00
|
|
|
# Make sure Python can find all our imports
|
|
|
|
export PYTHONPATH=$(python3 -c "import sys;print(\":\".join(sys.path))")
|
|
|
|
|
2021-06-11 17:09:17 +01:00
|
|
|
if [ "$HWCI_FREQ_MAX" = "true" ]; then
|
2021-08-30 10:01:30 +01:00
|
|
|
# Ensure initialization of the DRM device (needed by MSM)
|
|
|
|
head -0 /dev/dri/renderD128
|
|
|
|
|
2021-06-11 17:09:17 +01:00
|
|
|
# Disable GPU frequency scaling
|
|
|
|
DEVFREQ_GOVERNOR=`find /sys/devices -name governor | grep gpu || true`
|
|
|
|
test -z "$DEVFREQ_GOVERNOR" || echo performance > $DEVFREQ_GOVERNOR || true
|
|
|
|
|
|
|
|
# Disable CPU frequency scaling
|
|
|
|
echo performance | tee -a /sys/devices/system/cpu/cpufreq/policy*/scaling_governor || true
|
|
|
|
|
|
|
|
# Disable GPU runtime power management
|
|
|
|
GPU_AUTOSUSPEND=`find /sys/devices -name autosuspend_delay_ms | grep gpu | head -1`
|
|
|
|
test -z "$GPU_AUTOSUSPEND" || echo -1 > $GPU_AUTOSUSPEND || true
|
2022-03-30 07:57:19 +01:00
|
|
|
# Lock Intel GPU frequency to 70% of the maximum allowed by hardware
|
|
|
|
# and enable throttling detection & reporting.
|
2022-04-25 12:02:38 +01:00
|
|
|
# Additionally, set the upper limit for CPU scaling frequency to 65% of the
|
|
|
|
# maximum permitted, as an additional measure to mitigate thermal throttling.
|
|
|
|
./intel-gpu-freq.sh -s 70% --cpu-set-max 65% -g all -d
|
2021-06-11 17:09:17 +01:00
|
|
|
fi
|
|
|
|
|
2022-03-16 22:50:00 +00:00
|
|
|
# Increase freedreno hangcheck timer because it's right at the edge of the
|
|
|
|
# spilling tests timing out (and some traces, too)
|
|
|
|
if [ -n "$FREEDRENO_HANGCHECK_MS" ]; then
|
|
|
|
echo $FREEDRENO_HANGCHECK_MS | tee -a /sys/kernel/debug/dri/128/hangcheck_period_ms
|
|
|
|
fi
|
|
|
|
|
2020-07-22 21:59:49 +01:00
|
|
|
# Start a little daemon to capture the first devcoredump we encounter. (They
|
|
|
|
# expire after 5 minutes, so we poll for them).
|
2022-04-07 20:01:27 +01:00
|
|
|
/capture-devcoredump.sh &
|
|
|
|
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
2020-07-22 21:59:49 +01:00
|
|
|
|
2021-01-22 00:19:43 +00:00
|
|
|
# If we want Xorg to be running for the test, then we start it up before the
|
2021-06-11 16:53:54 +01:00
|
|
|
# HWCI_TEST_SCRIPT because we need to use xinit to start X (otherwise
|
2021-01-22 00:19:43 +00:00
|
|
|
# without using -displayfd you can race with Xorg's startup), but xinit will eat
|
|
|
|
# your client's return code
|
2021-06-11 16:46:57 +01:00
|
|
|
if [ -n "$HWCI_START_XORG" ]; then
|
2021-01-22 00:19:43 +00:00
|
|
|
echo "touch /xorg-started; sleep 100000" > /xorg-script
|
|
|
|
env \
|
2021-06-12 01:07:31 +01:00
|
|
|
xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -s 0 -dpms -logfile /Xorg.0.log &
|
2022-04-07 20:01:27 +01:00
|
|
|
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
2021-01-22 00:19:43 +00:00
|
|
|
|
|
|
|
# Wait for xorg to be ready for connections.
|
|
|
|
for i in 1 2 3 4 5; do
|
|
|
|
if [ -e /xorg-started ]; then
|
|
|
|
break
|
|
|
|
fi
|
|
|
|
sleep 5
|
|
|
|
done
|
|
|
|
export DISPLAY=:0
|
|
|
|
fi
|
|
|
|
|
2022-02-17 01:11:49 +00:00
|
|
|
RESULT=fail
|
|
|
|
set +e
|
|
|
|
sh -c "$HWCI_TEST_SCRIPT"
|
|
|
|
EXIT_CODE=$?
|
2022-05-04 14:52:10 +01:00
|
|
|
set -e
|
2021-11-04 18:49:05 +00:00
|
|
|
|
|
|
|
# Let's make sure the results are always stored in current working directory
|
|
|
|
mv -f ${CI_PROJECT_DIR}/results ./ 2>/dev/null || true
|
|
|
|
|
2022-02-17 01:11:49 +00:00
|
|
|
[ ${EXIT_CODE} -ne 0 ] || rm -rf results/trace/"$PIGLIT_REPLAY_DEVICE_NAME"
|
2020-06-10 13:44:17 +01:00
|
|
|
|
2022-06-01 20:29:07 +01:00
|
|
|
# Make sure that capture-devcoredump is done before we start trying to tar up
|
|
|
|
# artifacts -- if it's writing while tar is reading, tar will throw an error and
|
|
|
|
# kill the job.
|
|
|
|
cleanup
|
|
|
|
|
2021-06-11 17:14:21 +01:00
|
|
|
# upload artifacts
|
2022-03-10 22:21:53 +00:00
|
|
|
if [ -n "$MINIO_RESULTS_UPLOAD" ]; then
|
2021-06-11 17:14:21 +01:00
|
|
|
tar -czf results.tar.gz results/;
|
2021-12-02 13:13:10 +00:00
|
|
|
ci-fairy minio login --token-file "${CI_JOB_JWT_FILE}";
|
2022-03-10 22:21:53 +00:00
|
|
|
ci-fairy minio cp results.tar.gz minio://"$MINIO_RESULTS_UPLOAD"/results.tar.gz;
|
2021-06-11 17:14:21 +01:00
|
|
|
fi
|
|
|
|
|
2022-02-17 01:11:49 +00:00
|
|
|
# We still need to echo the hwci: mesa message, as some scripts rely on it, such
|
|
|
|
# as the python ones inside the bare-metal folder
|
|
|
|
[ ${EXIT_CODE} -eq 0 ] && RESULT=pass
|
2022-06-21 20:30:34 +01:00
|
|
|
|
|
|
|
set +x
|
2021-06-11 16:44:55 +01:00
|
|
|
echo "hwci: mesa: $RESULT"
|
2022-06-21 20:30:34 +01:00
|
|
|
# Sleep a bit to avoid kernel dump message interleave from LAVA ENDTC signal
|
|
|
|
sleep 1
|
2022-02-17 01:11:49 +00:00
|
|
|
exit $EXIT_CODE
|