mesa/.gitlab-ci/lava.yml.jinja2

154 lines
4.9 KiB
Django/Jinja

job_name: mesa-{{ test_suite }}-{{ deqp_version }}-{{ gpu_version }} {{ pipeline_info }}
device_type: {{ device_type }}
context:
extra_nfsroot_args: " init=/init rootwait"
timeouts:
job:
minutes: 30
priority: 75
visibility:
group:
- "Collabora+fdo"
{% if tags %}
{% set lavatags = tags.split(',') %}
tags:
{% for tag in lavatags %}
- {{ tag }}
{% endfor %}
{% endif %}
actions:
- deploy:
timeout:
minutes: 10
to: tftp
kernel:
url: {{ base_artifacts_url }}/{{ kernel_image_name }}
{% if kernel_image_type %}
{{ kernel_image_type }}
{% endif %}
nfsrootfs:
url: {{ base_artifacts_url }}/lava-rootfs.tgz
compression: gz
{% if dtb %}
dtb:
url: {{ base_artifacts_url }}/{{ dtb }}.dtb
{% endif %}
os: oe
- boot:
timeout:
minutes: 25
method: {{ boot_method }}
{% if boot_method == "fastboot" %}
{#
For fastboot, LAVA doesn't know how to unpack the rootfs/apply overlay/repack,
so we transfer the overlay over the network after boot.
#}
transfer_overlay:
download_command: wget -S --progress=dot:giga
unpack_command: tar -C / -xzf
{% else %}
commands: nfs
{% endif %}
prompts:
- 'lava-shell:'
- test:
timeout:
minutes: 30
failure_retry: 1
definitions:
- repository:
metadata:
format: Lava-Test Test Definition 1.0
name: mesa
description: "Mesa test plan"
os:
- oe
scope:
- functional
run:
steps:
- mount -t proc none /proc
- mount -t sysfs none /sys
- mount -t devtmpfs none /dev || echo possibly already mounted
- mkdir -p /dev/pts
- mount -t devpts devpts /dev/pts
- mkdir -p /dev/shm
- mount -t tmpfs tmpfs /dev/shm
- mount -t tmpfs tmpfs /tmp
- echo "nameserver 8.8.8.8" > /etc/resolv.conf
- echo "$NFS_SERVER_IP caching-proxy" >> /etc/hosts
- for i in 1 2 3; do sntp -sS pool.ntp.org && break || sleep 2; done
- modprobe amdgpu || true
- DEVFREQ_GOVERNOR=`find /sys/devices -name governor | grep gpu || true`
- echo performance > $DEVFREQ_GOVERNOR || true
- GPU_AUTOSUSPEND=`find /sys/devices -name autosuspend_delay_ms | grep gpu | head -1`
- echo -1 > $GPU_AUTOSUSPEND || true
{% if env_vars %}
- export {{ env_vars }}
{% endif %}
# runner script assumes some stuff is in pwd
- cd /
- wget -S --progress=dot:giga -O- {{ mesa_url }} | tar -xz
- mkdir -p $CI_PROJECT_DIR
- ln -sf /install $CI_PROJECT_DIR/install
- export DEQP_NO_SAVE_RESULTS=1
- export GPU_VERSION={{ gpu_version }}
- export DEQP_VER={{ deqp_version }}
# Store Mesa's disk cache under /tmp, rather than sending it out over NFS.
- export XDG_CACHE_HOME=/tmp
- export PYTHONPATH=$(python3 -c "import sys;print(\":\".join(sys.path))")
- export PIGLIT_REPLAY_EXTRA_ARGS="--keep-image"
- export PIGLIT_REPLAY_REFERENCE_IMAGES_BASE_URL="/mesa-tracie-results/${CI_PROJECT_PATH}"
- export PIGLIT_REPLAY_ARTIFACTS_BASE_URL="/artifacts/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}/${CI_JOB_ID}"
- export MINIO_ARTIFACTS_PATH="minio://${MINIO_HOST}/${PIGLIT_REPLAY_ARTIFACTS_BASE_URL}/"
- export PIGLIT_REPLAY_DESCRIPTION_FILE="/install/traces-${DRIVER_NAME}.yml"
- export PIGLIT_REPLAY_DEVICE_NAME=gl-{{ gpu_version }}
- export PIGLIT_RESULTS={{ gpu_version }}-${PIGLIT_PROFILES}
- export LIBGL_DRIVERS_PATH=/install/lib/dri
# If we want Xorg to be running for the test, then we start it up before the
# LAVA_TEST_SCRIPT because we need to use xinit to start X (otherwise
# without using -displayfd you can race with Xorg's startup), but xinit will eat
# your client's return code
- "if [ -n $LAVA_START_XORG ]; then
echo 'touch /xorg-started; sleep 100000' > /xorg-script;
env LD_LIBRARY_PATH=/install/lib/ xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -dpms -logfile /Xorg.0.log &
for i in 1 2 3 4 5; do
if [ -e /xorg-started ]; then
break;
fi;
sleep 5;
done;
export DISPLAY=:0;
fi"
- "if sh $LAVA_TEST_SCRIPT; then
export RESULT=pass;
else
export RESULT=fail;
fi"
- "if [ -d results ]; then
tar -czf results.tar.gz results/;
ci-fairy minio login $CI_JOB_JWT;
ci-fairy minio cp results.tar.gz $MINIO_ARTIFACTS_PATH/results.tar.gz;
fi"
- "echo mesa: $RESULT"
parse:
pattern: '(?P<test_case_id>\S*):\s+(?P<result>(pass|fail))'
from: inline
name: mesa
path: inline/mesa.yaml