2022-12-11 16:46:41 +00:00
|
|
|
#!/bin/bash
|
2023-05-20 23:47:05 +01:00
|
|
|
# shellcheck disable=SC1090
|
|
|
|
# shellcheck disable=SC1091
|
|
|
|
# shellcheck disable=SC2086 # we want word splitting
|
|
|
|
# shellcheck disable=SC2155
|
2020-03-03 22:38:09 +00:00
|
|
|
|
2023-04-27 22:45:40 +01:00
|
|
|
# Second-stage init, used to set up devices and our job environment before
|
|
|
|
# running tests.
|
|
|
|
|
2023-09-27 13:37:25 +01:00
|
|
|
shopt -s extglob
|
|
|
|
|
2022-04-07 20:01:27 +01:00
|
|
|
# Make sure to kill itself and all the children process from this script on
|
|
|
|
# exiting, since any console output may interfere with LAVA signals handling,
|
|
|
|
# which based on the log console.
|
|
|
|
cleanup() {
|
2022-06-01 20:29:07 +01:00
|
|
|
if [ "$BACKGROUND_PIDS" = "" ]; then
|
|
|
|
return 0
|
|
|
|
fi
|
|
|
|
|
2022-04-07 20:01:27 +01:00
|
|
|
set +x
|
|
|
|
echo "Killing all child processes"
|
|
|
|
for pid in $BACKGROUND_PIDS
|
|
|
|
do
|
2022-06-01 20:29:07 +01:00
|
|
|
kill "$pid" 2>/dev/null || true
|
2022-04-07 20:01:27 +01:00
|
|
|
done
|
|
|
|
|
|
|
|
# Sleep just a little to give enough time for subprocesses to be gracefully
|
|
|
|
# killed. Then apply a SIGKILL if necessary.
|
|
|
|
sleep 5
|
|
|
|
for pid in $BACKGROUND_PIDS
|
|
|
|
do
|
|
|
|
kill -9 "$pid" 2>/dev/null || true
|
|
|
|
done
|
2022-06-01 20:29:07 +01:00
|
|
|
|
|
|
|
BACKGROUND_PIDS=
|
|
|
|
set -x
|
2022-04-07 20:01:27 +01:00
|
|
|
}
|
|
|
|
trap cleanup INT TERM EXIT
|
|
|
|
|
|
|
|
# Space separated values with the PIDS of the processes started in the
|
|
|
|
# background by this script
|
|
|
|
BACKGROUND_PIDS=
|
|
|
|
|
|
|
|
|
2023-04-27 22:45:40 +01:00
|
|
|
for path in '/dut-env-vars.sh' '/set-job-env-vars.sh' './set-job-env-vars.sh'; do
|
2023-03-14 08:17:48 +00:00
|
|
|
[ -f "$path" ] && source "$path"
|
|
|
|
done
|
2022-12-11 16:46:41 +00:00
|
|
|
. "$SCRIPTS_DIR"/setup-test-env.sh
|
2020-05-04 21:42:41 +01:00
|
|
|
|
2021-06-11 16:13:01 +01:00
|
|
|
set -ex
|
|
|
|
|
2021-06-11 15:50:15 +01:00
|
|
|
# Set up any devices required by the jobs
|
2022-02-22 14:49:28 +00:00
|
|
|
[ -z "$HWCI_KERNEL_MODULES" ] || {
|
|
|
|
echo -n $HWCI_KERNEL_MODULES | xargs -d, -n1 /usr/sbin/modprobe
|
|
|
|
}
|
|
|
|
|
2022-11-04 12:16:40 +00:00
|
|
|
# Set up ZRAM
|
|
|
|
HWCI_ZRAM_SIZE=2G
|
2023-04-27 22:45:40 +01:00
|
|
|
if /sbin/zramctl --find --size $HWCI_ZRAM_SIZE -a zstd; then
|
2022-11-04 12:16:40 +00:00
|
|
|
mkswap /dev/zram0
|
|
|
|
swapon /dev/zram0
|
|
|
|
echo "zram: $HWCI_ZRAM_SIZE activated"
|
|
|
|
else
|
|
|
|
echo "zram: skipping, not supported"
|
|
|
|
fi
|
|
|
|
|
2022-02-22 14:58:47 +00:00
|
|
|
#
|
2022-02-22 14:49:28 +00:00
|
|
|
# Load the KVM module specific to the detected CPU virtualization extensions:
|
|
|
|
# - vmx for Intel VT
|
|
|
|
# - svm for AMD-V
|
2022-02-22 14:58:47 +00:00
|
|
|
#
|
|
|
|
# Additionally, download the kernel image to boot the VM via HWCI_TEST_SCRIPT.
|
|
|
|
#
|
2022-02-22 14:49:28 +00:00
|
|
|
if [ "$HWCI_KVM" = "true" ]; then
|
|
|
|
unset KVM_KERNEL_MODULE
|
2023-06-05 23:21:48 +01:00
|
|
|
{
|
|
|
|
grep -qs '\bvmx\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_intel
|
|
|
|
} || {
|
|
|
|
grep -qs '\bsvm\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_amd
|
2022-02-22 14:49:28 +00:00
|
|
|
}
|
|
|
|
|
2023-06-05 23:21:48 +01:00
|
|
|
{
|
|
|
|
[ -z "${KVM_KERNEL_MODULE}" ] && \
|
|
|
|
echo "WARNING: Failed to detect CPU virtualization extensions"
|
|
|
|
} || \
|
2022-02-22 14:49:28 +00:00
|
|
|
modprobe ${KVM_KERNEL_MODULE}
|
2022-02-22 14:58:47 +00:00
|
|
|
|
|
|
|
mkdir -p /lava-files
|
2023-01-18 23:26:03 +00:00
|
|
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
|
|
-o "/lava-files/${KERNEL_IMAGE_NAME}" \
|
2023-06-10 04:16:56 +01:00
|
|
|
"${KERNEL_IMAGE_BASE}/amd64/${KERNEL_IMAGE_NAME}"
|
2022-02-22 14:49:28 +00:00
|
|
|
fi
|
ci: add testing for VC4 drivers (Raspberry Pi 3)
This tests OpenGL ES 2.0 CTS suite with VC4 drivers, through baremetal
Raspberry Pi 3 devices.
The devices are connected to a switch that supports Power over Ethernet
(PoE), so the devices can be started/stopped through the switch, and
also to a host that runs the GitLab runner through serial-to-USB cables,
to monitor the devices to know when the testing finishes.
The Raspberries uses a network boot, using NFS and TFTP. For the root
filesystem, they use the one created in the armhf container. For the
kernel/modules case, this is handled externally. Currently it is using
the same kernel/modules that come with the Raspberry Pi OS. In future we
could build them in the same armhf container.
At this moment we only test armhf architecture, as this is the default
one suggested by the Raspberry Pi Foundation. In future we could also
add testing for arm64 architecture.
Finally, for the very rare ocassions where the Raspberry Pi 3 device is
booted but no data is received, it retries the testing for a second
time, powering off and on the device in the process.
v2:
- Remove commit that exists capture devcoredump (Eric)
- Squash remaining commits in one (Andres)
v3:
- Add missing boot timeout check (Juan)
v4:
- Use locks when running the PoE on/off script (Eric)
- Use a timeout for serial read (Eric)
v5:
- Rename stage to "raspberrypi" (Eric)
- Bump up arm64_test tag (Eric)
v6:
- Make serial buffer timeout optional (Juan)
Reviewed-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Juan A. Suarez Romero <jasuarez@igalia.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/7628>
2020-11-15 19:57:55 +00:00
|
|
|
|
2021-06-11 15:54:34 +01:00
|
|
|
# Fix prefix confusion: the build installs to $CI_PROJECT_DIR, but we expect
|
|
|
|
# it in /install
|
|
|
|
ln -sf $CI_PROJECT_DIR/install /install
|
2021-06-11 15:56:19 +01:00
|
|
|
export LD_LIBRARY_PATH=/install/lib
|
|
|
|
export LIBGL_DRIVERS_PATH=/install/lib/dri
|
2021-06-11 15:54:34 +01:00
|
|
|
|
2023-04-24 17:39:09 +01:00
|
|
|
# https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22495#note_1876691
|
|
|
|
# The navi21 boards seem to have trouble with ld.so.cache, so try explicitly
|
|
|
|
# telling it to look in /usr/local/lib.
|
|
|
|
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib
|
|
|
|
|
2020-07-20 18:46:51 +01:00
|
|
|
# Store Mesa's disk cache under /tmp, rather than sending it out over NFS.
|
|
|
|
export XDG_CACHE_HOME=/tmp
|
|
|
|
|
2021-06-11 15:56:19 +01:00
|
|
|
# Make sure Python can find all our imports
|
|
|
|
export PYTHONPATH=$(python3 -c "import sys;print(\":\".join(sys.path))")
|
|
|
|
|
2023-09-27 13:37:25 +01:00
|
|
|
# If we need to specify a driver, it means several drivers could pick up this gpu;
|
|
|
|
# ensure that the other driver can't accidentally be used
|
|
|
|
if [ -n "$MESA_LOADER_DRIVER_OVERRIDE" ]; then
|
|
|
|
rm /install/lib/dri/!($MESA_LOADER_DRIVER_OVERRIDE)_dri.so
|
|
|
|
fi
|
2024-03-13 21:28:34 +00:00
|
|
|
ls -1 /install/lib/dri/*_dri.so
|
2023-09-27 13:37:25 +01:00
|
|
|
|
2021-06-11 17:09:17 +01:00
|
|
|
if [ "$HWCI_FREQ_MAX" = "true" ]; then
|
2021-08-30 10:01:30 +01:00
|
|
|
# Ensure initialization of the DRM device (needed by MSM)
|
|
|
|
head -0 /dev/dri/renderD128
|
|
|
|
|
2021-06-11 17:09:17 +01:00
|
|
|
# Disable GPU frequency scaling
|
2023-05-20 23:47:05 +01:00
|
|
|
DEVFREQ_GOVERNOR=$(find /sys/devices -name governor | grep gpu || true)
|
2021-06-11 17:09:17 +01:00
|
|
|
test -z "$DEVFREQ_GOVERNOR" || echo performance > $DEVFREQ_GOVERNOR || true
|
|
|
|
|
|
|
|
# Disable CPU frequency scaling
|
|
|
|
echo performance | tee -a /sys/devices/system/cpu/cpufreq/policy*/scaling_governor || true
|
|
|
|
|
|
|
|
# Disable GPU runtime power management
|
2023-05-20 23:47:05 +01:00
|
|
|
GPU_AUTOSUSPEND=$(find /sys/devices -name autosuspend_delay_ms | grep gpu | head -1)
|
2021-06-11 17:09:17 +01:00
|
|
|
test -z "$GPU_AUTOSUSPEND" || echo -1 > $GPU_AUTOSUSPEND || true
|
2022-03-30 07:57:19 +01:00
|
|
|
# Lock Intel GPU frequency to 70% of the maximum allowed by hardware
|
|
|
|
# and enable throttling detection & reporting.
|
2022-04-25 12:02:38 +01:00
|
|
|
# Additionally, set the upper limit for CPU scaling frequency to 65% of the
|
|
|
|
# maximum permitted, as an additional measure to mitigate thermal throttling.
|
2023-04-27 22:45:40 +01:00
|
|
|
/intel-gpu-freq.sh -s 70% --cpu-set-max 65% -g all -d
|
2021-06-11 17:09:17 +01:00
|
|
|
fi
|
|
|
|
|
2023-07-17 07:54:58 +01:00
|
|
|
# Start a little daemon to capture sysfs records and produce a JSON file
|
|
|
|
if [ -x /kdl.sh ]; then
|
|
|
|
echo "launch kdl.sh!"
|
|
|
|
/kdl.sh &
|
|
|
|
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
|
|
|
else
|
|
|
|
echo "kdl.sh not found!"
|
|
|
|
fi
|
|
|
|
|
2022-03-16 22:50:00 +00:00
|
|
|
# Increase freedreno hangcheck timer because it's right at the edge of the
|
|
|
|
# spilling tests timing out (and some traces, too)
|
|
|
|
if [ -n "$FREEDRENO_HANGCHECK_MS" ]; then
|
|
|
|
echo $FREEDRENO_HANGCHECK_MS | tee -a /sys/kernel/debug/dri/128/hangcheck_period_ms
|
|
|
|
fi
|
|
|
|
|
2020-07-22 21:59:49 +01:00
|
|
|
# Start a little daemon to capture the first devcoredump we encounter. (They
|
|
|
|
# expire after 5 minutes, so we poll for them).
|
2023-05-12 16:46:34 +01:00
|
|
|
if [ -x /capture-devcoredump.sh ]; then
|
|
|
|
/capture-devcoredump.sh &
|
|
|
|
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
|
|
|
fi
|
2020-07-22 21:59:49 +01:00
|
|
|
|
2021-01-22 00:19:43 +00:00
|
|
|
# If we want Xorg to be running for the test, then we start it up before the
|
2021-06-11 16:53:54 +01:00
|
|
|
# HWCI_TEST_SCRIPT because we need to use xinit to start X (otherwise
|
2021-01-22 00:19:43 +00:00
|
|
|
# without using -displayfd you can race with Xorg's startup), but xinit will eat
|
|
|
|
# your client's return code
|
2021-06-11 16:46:57 +01:00
|
|
|
if [ -n "$HWCI_START_XORG" ]; then
|
2021-01-22 00:19:43 +00:00
|
|
|
echo "touch /xorg-started; sleep 100000" > /xorg-script
|
|
|
|
env \
|
2024-04-01 13:34:32 +01:00
|
|
|
VK_DRIVER_FILES="/install/share/vulkan/icd.d/${VK_DRIVER}_icd.$(uname -m).json" \
|
2021-06-12 01:07:31 +01:00
|
|
|
xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -s 0 -dpms -logfile /Xorg.0.log &
|
2022-04-07 20:01:27 +01:00
|
|
|
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
2021-01-22 00:19:43 +00:00
|
|
|
|
|
|
|
# Wait for xorg to be ready for connections.
|
2023-05-20 23:47:05 +01:00
|
|
|
for _ in 1 2 3 4 5; do
|
2021-01-22 00:19:43 +00:00
|
|
|
if [ -e /xorg-started ]; then
|
|
|
|
break
|
|
|
|
fi
|
|
|
|
sleep 5
|
|
|
|
done
|
|
|
|
export DISPLAY=:0
|
|
|
|
fi
|
|
|
|
|
2022-11-17 19:13:42 +00:00
|
|
|
if [ -n "$HWCI_START_WESTON" ]; then
|
2023-03-04 17:01:49 +00:00
|
|
|
WESTON_X11_SOCK="/tmp/.X11-unix/X0"
|
|
|
|
if [ -n "$HWCI_START_XORG" ]; then
|
|
|
|
echo "Please consider dropping HWCI_START_XORG and instead using Weston XWayland for testing."
|
|
|
|
WESTON_X11_SOCK="/tmp/.X11-unix/X1"
|
|
|
|
fi
|
|
|
|
export WAYLAND_DISPLAY=wayland-0
|
2022-11-17 19:13:42 +00:00
|
|
|
|
2023-03-04 17:01:49 +00:00
|
|
|
# Display server is Weston Xwayland when HWCI_START_XORG is not set or Xorg when it's
|
2022-12-20 10:14:06 +00:00
|
|
|
export DISPLAY=:0
|
|
|
|
mkdir -p /tmp/.X11-unix
|
|
|
|
|
2022-12-14 23:06:13 +00:00
|
|
|
env \
|
2024-04-01 13:34:32 +01:00
|
|
|
VK_DRIVER_FILES="/install/share/vulkan/icd.d/${VK_DRIVER}_icd.$(uname -m).json" \
|
2023-03-13 10:58:45 +00:00
|
|
|
weston -Bheadless-backend.so --use-gl -Swayland-0 --xwayland --idle-time=0 &
|
2023-03-04 18:37:51 +00:00
|
|
|
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
2023-03-04 17:01:49 +00:00
|
|
|
|
|
|
|
while [ ! -S "$WESTON_X11_SOCK" ]; do sleep 1; done
|
2022-11-17 19:13:42 +00:00
|
|
|
fi
|
|
|
|
|
2022-02-17 01:11:49 +00:00
|
|
|
set +e
|
2022-12-11 16:46:41 +00:00
|
|
|
bash -c ". $SCRIPTS_DIR/setup-test-env.sh && $HWCI_TEST_SCRIPT"
|
2022-02-17 01:11:49 +00:00
|
|
|
EXIT_CODE=$?
|
2022-05-04 14:52:10 +01:00
|
|
|
set -e
|
2021-11-04 18:49:05 +00:00
|
|
|
|
|
|
|
# Let's make sure the results are always stored in current working directory
|
|
|
|
mv -f ${CI_PROJECT_DIR}/results ./ 2>/dev/null || true
|
|
|
|
|
2022-02-17 01:11:49 +00:00
|
|
|
[ ${EXIT_CODE} -ne 0 ] || rm -rf results/trace/"$PIGLIT_REPLAY_DEVICE_NAME"
|
2020-06-10 13:44:17 +01:00
|
|
|
|
2022-06-01 20:29:07 +01:00
|
|
|
# Make sure that capture-devcoredump is done before we start trying to tar up
|
|
|
|
# artifacts -- if it's writing while tar is reading, tar will throw an error and
|
|
|
|
# kill the job.
|
|
|
|
cleanup
|
|
|
|
|
2021-06-11 17:14:21 +01:00
|
|
|
# upload artifacts
|
2023-06-09 18:09:34 +01:00
|
|
|
if [ -n "$S3_RESULTS_UPLOAD" ]; then
|
2022-07-28 00:19:21 +01:00
|
|
|
tar --zstd -cf results.tar.zst results/;
|
2023-06-09 18:09:34 +01:00
|
|
|
ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" results.tar.zst https://"$S3_RESULTS_UPLOAD"/results.tar.zst;
|
2021-06-11 17:14:21 +01:00
|
|
|
fi
|
|
|
|
|
2022-02-17 01:11:49 +00:00
|
|
|
# We still need to echo the hwci: mesa message, as some scripts rely on it, such
|
|
|
|
# as the python ones inside the bare-metal folder
|
2023-03-06 16:16:12 +00:00
|
|
|
[ ${EXIT_CODE} -eq 0 ] && RESULT=pass || RESULT=fail
|
2022-06-21 20:30:34 +01:00
|
|
|
|
|
|
|
set +x
|
2023-11-03 15:50:58 +00:00
|
|
|
|
|
|
|
# Print the final result; both bare-metal and LAVA look for this string to get
|
|
|
|
# the result of our run, so try really hard to get it out rather than losing
|
|
|
|
# the run. The device gets shut down right at this point, and a630 seems to
|
|
|
|
# enjoy corrupting the last line of serial output before shutdown.
|
|
|
|
for _ in $(seq 0 3); do echo "hwci: mesa: $RESULT"; sleep 1; echo; done
|
|
|
|
|
2022-02-17 01:11:49 +00:00
|
|
|
exit $EXIT_CODE
|