ci/lava: Provide list of overlays to submitter

Instead of providing a hardcoded set of arguments, allow overlays to be
added to the submitter script. Passing Python dicts as a string
representation and relying on coercion from strings is far from great,
but fire doesn't give anything else, so.

Signed-off-by: Daniel Stone <daniels@collabora.com>
Co-authored-by: Guilherme Gallo <guilherme.gallo@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/31882>
This commit is contained in:
Daniel Stone
2024-08-29 00:22:19 +01:00
committed by Marge Bot
parent f32a2de26d
commit f44970173d
9 changed files with 67 additions and 81 deletions

View File

@@ -37,31 +37,25 @@ ci-fairy s3cp --token-file "${S3_JWT_FILE}" job-rootfs-overlay.tar.gz "https://$
section_switch variables "Environment variables passed through to device:"
cat results/job-rootfs-overlay/set-job-env-vars.sh
ARTIFACT_URL="https://${PIPELINE_ARTIFACTS_BASE}/${LAVA_S3_ARTIFACT_NAME:?}.tar.zst"
section_switch lava_submit "Submitting job for scheduling"
touch results/lava.log
tail -f results/lava.log &
PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
submit \
--farm "${FARM}" \
--device-type "${DEVICE_TYPE}" \
--boot-method "${BOOT_METHOD}" \
--job-timeout-min ${JOB_TIMEOUT:-30} \
--dump-yaml \
--pipeline-info "$CI_JOB_NAME: $CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \
--rootfs-url "https://${BASE_SYSTEM_HOST_PATH}/lava-rootfs.tar.zst" \
--rootfs-url "${ROOTFS_URL}" \
--kernel-url-prefix "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}" \
--kernel-external "${EXTERNAL_KERNEL_TAG}" \
--build-url "${ARTIFACT_URL}" \
--job-rootfs-overlay-url "https://${JOB_ROOTFS_OVERLAY_PATH}" \
--job-timeout-min ${JOB_TIMEOUT:-30} \
--first-stage-init artifacts/ci-common/init-stage1.sh \
--ci-project-dir "${CI_PROJECT_DIR}" \
--device-type "${DEVICE_TYPE}" \
--farm "${FARM}" \
--dtb-filename "${DTB}" \
--jwt-file "${S3_JWT_FILE}" \
--kernel-image-name "${KERNEL_IMAGE_NAME}" \
--kernel-image-type "${KERNEL_IMAGE_TYPE}" \
--boot-method "${BOOT_METHOD}" \
--visibility-group "${VISIBILITY_GROUP}" \
--lava-tags "${LAVA_TAGS}" \
--mesa-job-name "$CI_JOB_NAME" \
@@ -70,4 +64,17 @@ PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
--project-name "${CI_PROJECT_NAME}" \
--starting-section "${CURRENT_SECTION}" \
--job-submitted-at "${CI_JOB_STARTED_AT}" \
- append-overlay \
--name=mesa-build \
--url="https://${PIPELINE_ARTIFACTS_BASE}/${LAVA_S3_ARTIFACT_NAME:?}.tar.zst" \
--compression=zstd \
--path="${CI_PROJECT_DIR}" \
--format=tar \
- append-overlay \
--name=job-overlay \
--url="https://${JOB_ROOTFS_OVERLAY_PATH}" \
--compression=gz \
--path="/" \
--format=tar \
- submit \
>> results/lava.log

View File

@@ -15,10 +15,10 @@ import pathlib
import sys
import time
from collections import defaultdict
from dataclasses import dataclass, fields
from dataclasses import dataclass, field, fields
from datetime import datetime, timedelta, UTC
from os import environ, getenv, path
from typing import Any, Optional
from os import environ, getenv
from typing import Any, Optional, Self
import fire
from lavacli.utils import flow_yaml as lava_yaml
@@ -396,11 +396,9 @@ class PathResolver:
@dataclass
class LAVAJobSubmitter(PathResolver):
boot_method: str
ci_project_dir: str
device_type: str
farm: str
job_timeout_min: int # The job timeout in minutes
build_url: str = None
dtb_filename: str = None
dump_yaml: bool = False # Whether to dump the YAML payload to stdout
first_stage_init: str = None
@@ -415,27 +413,62 @@ class LAVAJobSubmitter(PathResolver):
rootfs_url: str = None
validate_only: bool = False # Whether to only validate the job, not execute it
visibility_group: str = None # Only affects LAVA farm maintainers
job_rootfs_overlay_url: str = None
structured_log_file: pathlib.Path = None # Log file path with structured LAVA log
ssh_client_image: str = None # x86_64 SSH client image to follow the job's output
project_name: str = None # Project name to be used in the job name
starting_section: str = None # GitLab section used to start
job_submitted_at: [str | datetime] = None
__structured_log_context = contextlib.nullcontext() # Structured Logger context
_overlays: dict = field(default_factory=dict, init=False)
def __post_init__(self) -> None:
def __post_init__(self) -> Self:
super().__post_init__()
# Remove mesa job names with spaces, which breaks the lava-test-case command
self.mesa_job_name = self.mesa_job_name.split(" ")[0]
if not self.structured_log_file:
return
if self.structured_log_file:
self.__structured_log_context = StructuredLoggerWrapper(self).logger_context()
if self.job_submitted_at:
self.job_submitted_at = datetime.fromisoformat(self.job_submitted_at)
self.__structured_log_context = StructuredLoggerWrapper(self).logger_context()
self.proxy = setup_lava_proxy()
return self
def append_overlay(
self, compression: str, name: str, path: str, url: str, format: str = "tar"
) -> Self:
"""
Append an overlay to the LAVA job definition.
Args:
compression (str): The compression type of the overlay (e.g., "gz", "xz").
name (str): The name of the overlay.
path (str): The path where the overlay should be applied.
url (str): The URL from where the overlay can be downloaded.
format (str, optional): The format of the overlay (default is "tar").
Returns:
Self: The instance of LAVAJobSubmitter with the overlay appended.
"""
self._overlays[name] = {
"compression": compression,
"format": format,
"path": path,
"url": url,
}
return self
def print(self) -> Self:
"""
Prints the dictionary representation of the instance and returns the instance itself.
Returns:
Self: The instance of the class.
"""
print(self.__dict__)
return self
def __prepare_submission(self) -> str:
# Overwrite the timeout for the testcases with the value offered by the
# user. The testcase running time should be at least 4 times greater than

View File

@@ -65,20 +65,7 @@ class LAVAJobDefinition:
"url": f"{args.rootfs_url}",
"compression": "zstd",
"format": "tar",
"overlays": {
"mesa-build": {
"url": self.job_submitter.build_url,
"compression": "zstd",
"format": "tar",
"path": self.job_submitter.ci_project_dir
},
"job-metadata": {
"url": args.job_rootfs_overlay_url,
"compression": "gz",
"format": "tar",
"path": "/"
}
}
"overlays": args._overlays,
}
values = self.generate_metadata()

View File

@@ -25,17 +25,7 @@ actions:
url: None
compression: zstd
format: tar
overlays:
mesa-build:
url: null # aka None
compression: zstd
format: tar
path: /ci/project/dir
job-metadata:
url: null # aka None
compression: gz
format: tar
path: /
overlays: {}
namespace: dut
- deploy:
timeout:

View File

@@ -25,17 +25,7 @@ actions:
url: None
compression: zstd
format: tar
overlays:
mesa-build:
url: null # aka None
compression: zstd
format: tar
path: /ci/project/dir
job-metadata:
url: null # aka None
compression: gz
format: tar
path: /
overlays: {}
- deploy:
timeout:
minutes: 5
@@ -94,6 +84,7 @@ actions:
run:
steps:
- echo test FASTBOOT
- export CURRENT_SECTION=dut_boot
- set -e
- echo Could not find jwt file, disabling S3 requests...
- sed -i '/S3_RESULTS_UPLOAD/d' /set-job-env-vars.sh

View File

@@ -28,17 +28,7 @@ actions:
url: None
compression: zstd
format: tar
overlays:
mesa-build:
url: null # aka None
compression: zstd
format: tar
path: /ci/project/dir
job-metadata:
url: null # aka None
compression: gz
format: tar
path: /
overlays: {}
dtb:
url: None/my_dtb_filename.dtb
namespace: dut

View File

@@ -28,17 +28,7 @@ actions:
url: None
compression: zstd
format: tar
overlays:
mesa-build:
url: null # aka None
compression: zstd
format: tar
path: /ci/project/dir
job-metadata:
url: null # aka None
compression: gz
format: tar
path: /
overlays: {}
dtb:
url: None/my_dtb_filename.dtb
- boot:

View File

@@ -73,7 +73,6 @@ def lava_job_submitter(
mock_setup_lava_proxy.return_value = mock_proxy()
yield LAVAJobSubmitter(
boot_method="test_boot",
ci_project_dir="test_dir",
device_type="test_device",
farm="test_farm",
job_timeout_min=1,

View File

@@ -54,7 +54,6 @@ def job_submitter_factory(mode: Literal["UBOOT", "FASTBOOT"], shell_file):
return LAVAJobSubmitter(
boot_method=boot_method,
ci_project_dir="/ci/project/dir",
device_type=device_type,
farm="test_farm",
dtb_filename="my_dtb_filename",