mesa/.gitlab-ci/lava/lava.yml.jinja2

149 lines
4.8 KiB
Django/Jinja

job_name: "mesa: {{ pipeline_info }}"
device_type: {{ device_type }}
context:
extra_nfsroot_args: " init=/init rootwait minio_results={{ job_artifacts_base }}"
timeouts:
job:
minutes: 30
priority: 75
visibility:
group:
- "Collabora+fdo"
{% if tags %}
{% set lavatags = tags.split(',') %}
tags:
{% for tag in lavatags %}
- {{ tag }}
{% endfor %}
{% endif %}
actions:
- deploy:
timeout:
minutes: 10
to: tftp
kernel:
url: {{ base_system_url_prefix }}/{{ kernel_image_name }}
{% if kernel_image_type %}
{{ kernel_image_type }}
{% endif %}
nfsrootfs:
url: {{ base_system_url_prefix }}/lava-rootfs.tgz
compression: gz
{% if dtb %}
dtb:
url: {{ base_system_url_prefix }}/{{ dtb }}.dtb
{% endif %}
os: oe
- boot:
timeout:
minutes: 25
method: {{ boot_method }}
{% if boot_method == "fastboot" %}
{#
For fastboot, LAVA doesn't know how to unpack the rootfs/apply overlay/repack,
so we transfer the overlay over the network after boot.
#}
transfer_overlay:
download_command: wget -S --progress=dot:giga
unpack_command: tar -C / -xzf
{% else %}
commands: nfs
{% endif %}
prompts:
- 'lava-shell:'
- test:
timeout:
minutes: 30
failure_retry: 1
definitions:
- repository:
metadata:
format: Lava-Test Test Definition 1.0
name: mesa
description: "Mesa test plan"
os:
- oe
scope:
- functional
run:
steps:
# A bunch of setup we have to do before we can pull anything
- cd /
- mount -t proc none /proc
- mount -t sysfs none /sys
- mount -t devtmpfs none /dev || echo possibly already mounted
- mkdir -p /dev/pts
- mount -t devpts devpts /dev/pts
- mkdir -p /dev/shm
- mount -t tmpfs tmpfs /dev/shm
- mount -t tmpfs tmpfs /tmp
- echo "nameserver 8.8.8.8" > /etc/resolv.conf
- echo "$NFS_SERVER_IP caching-proxy" >> /etc/hosts
- for i in 1 2 3; do sntp -sS pool.ntp.org && break || sleep 2; done
- mkdir -p $CI_PROJECT_DIR
- wget -S --progress=dot:giga -O- {{ mesa_build_url }} | tar -xz -C $CI_PROJECT_DIR
- wget -S --progress=dot:giga -O- {{ job_rootfs_overlay_url }} | tar -xz -C /
- . /set-job-env-vars.sh
- ln -sf $CI_PROJECT_DIR/install /install
# Set up our devices
- '[ -z "$HWCI_KERNEL_MODULES" ] || (echo -n $HWCI_KERNEL_MODULES | xargs -d, -n1 /usr/sbin/modprobe)'
# Disable GPU frequency scaling
- DEVFREQ_GOVERNOR=`find /sys/devices -name governor | grep gpu || true`
- echo performance > $DEVFREQ_GOVERNOR || true
# Disable CPU frequency scaling
- echo performance | tee -a /sys/devices/system/cpu/cpufreq/policy*/scaling_governor || true
# Disable GPU runtime PM
- GPU_AUTOSUSPEND=`find /sys/devices -name autosuspend_delay_ms | grep gpu | head -1`
- echo -1 > $GPU_AUTOSUSPEND || true
- ./capture-devcoredump.sh &
- export CI_JOB_JWT="{{ jwt }}"
# Store Mesa's disk cache under /tmp, rather than sending it out over NFS.
- export XDG_CACHE_HOME=/tmp
- export PYTHONPATH=$(python3 -c "import sys;print(\":\".join(sys.path))")
- export LD_LIBRARY_PATH=/install/lib/
- export LIBGL_DRIVERS_PATH=/install/lib/dri
# If we want Xorg to be running for the test, then we start it up before the
# HWCI_TEST_SCRIPT because we need to use xinit to start X (otherwise
# without using -displayfd you can race with Xorg's startup), but xinit will eat
# your client's return code
- "if [ -n $HWCI_START_XORG ]; then
echo 'touch /xorg-started; sleep 100000' > /xorg-script;
env xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -s 0 -dpms -logfile /Xorg.0.log &
for i in 1 2 3 4 5; do
if [ -e /xorg-started ]; then
break;
fi;
sleep 5;
done;
export DISPLAY=:0;
fi"
- "export RESULT=fail;
if sh $HWCI_TEST_SCRIPT; then
export RESULT=pass;
fi"
- "MINIO=$(cat /proc/cmdline | tr ' ' '\n' | grep minio_results | cut -d '=' -f 2 || true)
if [ -n $MINIO ]; then
tar -czf results.tar.gz results/;
ci-fairy minio login $CI_JOB_JWT;
ci-fairy minio cp results.tar.gz minio://$MINIO/results.tar.gz;
fi"
- "echo hwci: mesa: $RESULT"
parse:
pattern: 'hwci: (?P<test_case_id>\S*):\s+(?P<result>(pass|fail))'
from: inline
name: mesa
path: inline/mesa.yaml