ci/lava: Create LogFollower and move logging methods

- Create LogFollower to capture LAVA log and process it adding some
- GitlabSection and color treatment to it
- Break logs further, make new gitlab sections between testcases
- Implement LogFollower as ContextManager to deal with incomplete LAVA
  jobs.
- Use template method to simplify gitlab log sections management
- Fix sections timestamps

Signed-off-by: Guilherme Gallo <guilherme.gallo@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/16323>
This commit is contained in:
Guilherme Gallo
2022-06-23 21:43:00 -03:00
committed by Marge Bot
parent c86ba3640f
commit 2569d7d7df
9 changed files with 512 additions and 424 deletions

View File

@@ -0,0 +1,51 @@
from unittest.mock import MagicMock, patch
import pytest
import yaml
from freezegun import freeze_time
from .lava.helpers import generate_testsuite_result, jobs_logs_response
@pytest.fixture
def mock_sleep():
"""Mock time.sleep to make test faster"""
with patch("time.sleep", return_value=None):
yield
@pytest.fixture
def frozen_time(mock_sleep):
with freeze_time() as frozen_time:
yield frozen_time
RESULT_GET_TESTJOB_RESULTS = [{"metadata": {"result": "test"}}]
@pytest.fixture
def mock_proxy():
def create_proxy_mock(
job_results=RESULT_GET_TESTJOB_RESULTS,
testsuite_results=[generate_testsuite_result()],
**kwargs
):
proxy_mock = MagicMock()
proxy_submit_mock = proxy_mock.scheduler.jobs.submit
proxy_submit_mock.return_value = "1234"
proxy_results_mock = proxy_mock.results.get_testjob_results_yaml
proxy_results_mock.return_value = yaml.safe_dump(job_results)
proxy_test_suites_mock = proxy_mock.results.get_testsuite_results_yaml
proxy_test_suites_mock.return_value = yaml.safe_dump(testsuite_results)
proxy_logs_mock = proxy_mock.scheduler.jobs.logs
proxy_logs_mock.return_value = jobs_logs_response()
for key, value in kwargs.items():
setattr(proxy_logs_mock, key, value)
return proxy_mock
yield create_proxy_mock

View File

View File

@@ -5,19 +5,6 @@ from typing import Callable, Generator, Iterable, Tuple, Union
import yaml
from freezegun import freeze_time
from lava.utils.lava_log import (
DEFAULT_GITLAB_SECTION_TIMEOUTS,
FALLBACK_GITLAB_SECTION_TIMEOUT,
LogSectionType,
)
def section_timeout(section_type: LogSectionType) -> int:
return int(
DEFAULT_GITLAB_SECTION_TIMEOUTS.get(
section_type, FALLBACK_GITLAB_SECTION_TIMEOUT
).total_seconds()
)
def create_lava_yaml_msg(
@@ -26,32 +13,44 @@ def create_lava_yaml_msg(
return {"dt": str(dt()), "msg": msg, "lvl": lvl}
def jobs_logs_response(finished=False, msg=None, **kwargs) -> Tuple[bool, str]:
timed_msg = create_lava_yaml_msg(**kwargs)
def generate_testsuite_result(
name="test-mesa-ci", result="pass", metadata_extra=None, extra=None
):
if metadata_extra is None:
metadata_extra = {}
if extra is None:
extra = {}
return {"metadata": {"result": result, **metadata_extra}, "name": name}
def jobs_logs_response(
finished=False, msg=None, lvl="target", result=None
) -> Tuple[bool, str]:
timed_msg = {"dt": str(datetime.now()), "msg": "New message", "lvl": lvl}
if result:
timed_msg["lvl"] = "target"
timed_msg["msg"] = f"hwci: mesa: {result}"
logs = [timed_msg] if msg is None else msg
return finished, yaml.safe_dump(logs)
def message_generator_new(
messages: dict[LogSectionType, Iterable[int]]
) -> Iterable[tuple[dict, Iterable[int]]]:
default = [1]
for section_type in LogSectionType:
delay = messages.get(section_type, default)
yield mock_lava_signal(section_type), delay
def message_generator():
for section_type in LogSectionType:
yield mock_lava_signal(section_type)
def level_generator():
# Tests all known levels by default
yield from cycle(("results", "feedback", "warning", "error", "debug", "target"))
def generate_n_logs(
n=0,
n=1,
tick_fn: Union[Generator, Iterable[int], int] = 1,
message_fn=message_generator,
level_fn=level_generator,
result="pass",
):
"""Simulate a log partitionated in n components"""
level_gen = level_fn()
if isinstance(tick_fn, Generator):
tick_gen = tick_fn
elif isinstance(tick_fn, Iterable):
@@ -62,12 +61,15 @@ def generate_n_logs(
with freeze_time(datetime.now()) as time_travel:
tick_sec: int = next(tick_gen)
while True:
# Simulate a complete run given by message_fn
for msg in message_fn():
yield jobs_logs_response(finished=False, msg=[msg])
time_travel.tick(tick_sec)
# Simulate a scenario where the target job is waiting for being started
for _ in range(n - 1):
level: str = next(level_gen)
yield jobs_logs_response(finished=True)
time_travel.tick(tick_sec)
yield jobs_logs_response(finished=False, msg=[], lvl=level)
time_travel.tick(tick_sec)
yield jobs_logs_response(finished=True, result=result)
def to_iterable(tick_fn):
@@ -78,30 +80,3 @@ def to_iterable(tick_fn):
else:
tick_gen = cycle((tick_fn,))
return tick_gen
def mock_logs(
messages={},
):
with freeze_time(datetime.now()) as time_travel:
# Simulate a complete run given by message_fn
for msg, tick_list in message_generator_new(messages):
for tick_sec in tick_list:
yield jobs_logs_response(finished=False, msg=[msg])
time_travel.tick(tick_sec)
yield jobs_logs_response(finished=True)
def mock_lava_signal(type: LogSectionType) -> dict[str, str]:
return {
LogSectionType.TEST_CASE: create_lava_yaml_msg(
msg="<STARTTC> case", lvl="debug"
),
LogSectionType.TEST_SUITE: create_lava_yaml_msg(
msg="<STARTRUN> suite", lvl="debug"
),
LogSectionType.LAVA_POST_PROCESSING: create_lava_yaml_msg(
msg="<LAVA_SIGNAL_ENDTC case>", lvl="target"
),
}.get(type, create_lava_yaml_msg())

View File

@@ -25,85 +25,28 @@
import xmlrpc.client
from contextlib import nullcontext as does_not_raise
from datetime import datetime
from itertools import cycle, repeat
from typing import Callable, Generator, Iterable, Tuple, Union
from unittest.mock import MagicMock, patch
from itertools import repeat
import pytest
import yaml
from freezegun import freeze_time
from lava.exceptions import MesaCIException, MesaCIRetryError, MesaCITimeoutError
from lava.exceptions import MesaCIException, MesaCIRetryError
from lava.lava_job_submitter import (
DEVICE_HANGING_TIMEOUT_SEC,
NUMBER_OF_RETRIES_TIMEOUT_DETECTION,
LAVAJob,
filter_debug_messages,
fix_lava_color_log,
fix_lava_gitlab_section_log,
follow_job_execution,
hide_sensitive_data,
retriable_follow_job,
)
from .lava.helpers import (
create_lava_yaml_msg,
generate_n_logs,
generate_testsuite_result,
jobs_logs_response,
)
NUMBER_OF_MAX_ATTEMPTS = NUMBER_OF_RETRIES_TIMEOUT_DETECTION + 1
def create_lava_yaml_msg(
dt: Callable = datetime.now, msg="test", lvl="target"
) -> dict[str, str]:
return {"dt": str(dt()), "msg": msg, "lvl": lvl}
def jobs_logs_response(finished=False, msg=None, lvl="target", result=None) -> Tuple[bool, str]:
timed_msg = {"dt": str(datetime.now()), "msg": "New message", "lvl": lvl}
if result:
timed_msg["lvl"] = "target"
timed_msg["msg"] = f"hwci: mesa: {result}"
logs = [timed_msg] if msg is None else msg
return finished, yaml.safe_dump(logs)
RESULT_GET_TESTJOB_RESULTS = [{"metadata": {"result": "test"}}]
def generate_testsuite_result(name="test-mesa-ci", result="pass", metadata_extra = None, extra = None):
if metadata_extra is None:
metadata_extra = {}
if extra is None:
extra = {}
return {"metadata": {"result": result, **metadata_extra}, "name": name}
@pytest.fixture
def mock_proxy():
def create_proxy_mock(
job_results=RESULT_GET_TESTJOB_RESULTS,
testsuite_results=[generate_testsuite_result()],
**kwargs
):
proxy_mock = MagicMock()
proxy_submit_mock = proxy_mock.scheduler.jobs.submit
proxy_submit_mock.return_value = "1234"
proxy_results_mock = proxy_mock.results.get_testjob_results_yaml
proxy_results_mock.return_value = yaml.safe_dump(job_results)
proxy_test_suites_mock = proxy_mock.results.get_testsuite_results_yaml
proxy_test_suites_mock.return_value = yaml.safe_dump(testsuite_results)
proxy_logs_mock = proxy_mock.scheduler.jobs.logs
proxy_logs_mock.return_value = jobs_logs_response()
for key, value in kwargs.items():
setattr(proxy_logs_mock, key, value)
return proxy_mock
yield create_proxy_mock
@pytest.fixture
def mock_proxy_waiting_time(mock_proxy):
def update_mock_proxy(frozen_time, **kwargs):
@@ -118,19 +61,6 @@ def mock_proxy_waiting_time(mock_proxy):
return update_mock_proxy
@pytest.fixture
def mock_sleep():
"""Mock time.sleep to make test faster"""
with patch("time.sleep", return_value=None):
yield
@pytest.fixture
def frozen_time(mock_sleep):
with freeze_time() as frozen_time:
yield frozen_time
@pytest.mark.parametrize("exception", [RuntimeError, SystemError, KeyError])
def test_submit_and_follow_respects_exceptions(mock_sleep, mock_proxy, exception):
with pytest.raises(MesaCIException):
@@ -139,35 +69,6 @@ def test_submit_and_follow_respects_exceptions(mock_sleep, mock_proxy, exception
follow_job_execution(job)
def level_generator():
# Tests all known levels by default
yield from cycle(( "results", "feedback", "warning", "error", "debug", "target" ))
def generate_n_logs(n=1, tick_fn: Union[Generator, Iterable[int], int]=1, level_fn=level_generator, result="pass"):
"""Simulate a log partitionated in n components"""
level_gen = level_fn()
if isinstance(tick_fn, Generator):
tick_gen = tick_fn
elif isinstance(tick_fn, Iterable):
tick_gen = cycle(tick_fn)
else:
tick_gen = cycle((tick_fn,))
with freeze_time(datetime.now()) as time_travel:
tick_sec: int = next(tick_gen)
while True:
# Simulate a scenario where the target job is waiting for being started
for _ in range(n - 1):
level: str = next(level_gen)
time_travel.tick(tick_sec)
yield jobs_logs_response(finished=False, msg=[], lvl=level)
time_travel.tick(tick_sec)
yield jobs_logs_response(finished=True, result=result)
NETWORK_EXCEPTION = xmlrpc.client.ProtocolError("", 0, "test", {})
XMLRPC_FAULT = xmlrpc.client.Fault(0, "test")
@@ -248,7 +149,6 @@ PROXY_SCENARIOS = {
}
@patch("time.sleep", return_value=None) # mock sleep to make test faster
@pytest.mark.parametrize(
"side_effect, expectation, job_result, proxy_args",
PROXY_SCENARIOS.values(),
@@ -296,38 +196,6 @@ def test_simulate_a_long_wait_to_start_a_job(
assert delta_time.total_seconds() >= wait_time
SENSITIVE_DATA_SCENARIOS = {
"no sensitive data tagged": (
["bla bla", "mytoken: asdkfjsde1341=="],
["bla bla", "mytoken: asdkfjsde1341=="],
"HIDEME",
),
"sensitive data tagged": (
["bla bla", "mytoken: asdkfjsde1341== # HIDEME"],
["bla bla"],
"HIDEME",
),
"sensitive data tagged with custom word": (
["bla bla", "mytoken: asdkfjsde1341== # DELETETHISLINE", "third line"],
["bla bla", "third line"],
"DELETETHISLINE",
),
}
@pytest.mark.parametrize(
"input, expectation, tag",
SENSITIVE_DATA_SCENARIOS.values(),
ids=SENSITIVE_DATA_SCENARIOS.keys(),
)
def test_hide_sensitive_data(input, expectation, tag):
yaml_data = yaml.safe_dump(input)
yaml_result = hide_sensitive_data(yaml_data, tag)
result = yaml.safe_load(yaml_result)
assert result == expectation
CORRUPTED_LOG_SCENARIOS = {
"too much subsequent corrupted data": (
[(False, "{'msg': 'Incomplete}")] * 100 + [jobs_logs_response(True)],
@@ -353,120 +221,6 @@ def test_log_corruption(mock_sleep, data_sequence, expected_exception, mock_prox
retriable_follow_job(proxy_mock, "")
COLOR_MANGLED_SCENARIOS = {
"Mangled error message at target level": (
create_lava_yaml_msg(msg="[0m[0m[31mERROR - dEQP error: ", lvl="target"),
"\x1b[0m\x1b[0m\x1b[31mERROR - dEQP error: ",
),
"Mangled pass message at target level": (
create_lava_yaml_msg(
msg="[0mPass: 26718, ExpectedFail: 95, Skip: 25187, Duration: 8:18, Remaining: 13",
lvl="target",
),
"\x1b[0mPass: 26718, ExpectedFail: 95, Skip: 25187, Duration: 8:18, Remaining: 13",
),
"Mangled error message with bold formatting at target level": (
create_lava_yaml_msg(msg="[1;31mReview the image changes...", lvl="target"),
"\x1b[1;31mReview the image changes...",
),
"Mangled error message with high intensity background at target level": (
create_lava_yaml_msg(msg="[100mReview the image changes...", lvl="target"),
"\x1b[100mReview the image changes...",
),
"Mangled error message with underline+bg color+fg color at target level": (
create_lava_yaml_msg(msg="[4;41;97mReview the image changes...", lvl="target"),
"\x1b[4;41;97mReview the image changes...",
),
"Bad input for color code.": (
create_lava_yaml_msg(
msg="[4;97 This message is missing the `m`.", lvl="target"
),
"[4;97 This message is missing the `m`.",
),
}
@pytest.mark.parametrize(
"message, fixed_message",
COLOR_MANGLED_SCENARIOS.values(),
ids=COLOR_MANGLED_SCENARIOS.keys(),
)
def test_fix_lava_color_log(message, fixed_message):
fix_lava_color_log(message)
assert message["msg"] == fixed_message
GITLAB_SECTION_MANGLED_SCENARIOS = {
"Mangled section_start at target level": (
create_lava_yaml_msg(
msg="[0Ksection_start:1652658415:deqp[collapsed=false][0Kdeqp-runner",
lvl="target",
),
"\x1b[0Ksection_start:1652658415:deqp[collapsed=false]\r\x1b[0Kdeqp-runner",
),
"Mangled section_start at target level with header with spaces": (
create_lava_yaml_msg(
msg="[0Ksection_start:1652658415:deqp[collapsed=false][0Kdeqp runner stats",
lvl="target",
),
"\x1b[0Ksection_start:1652658415:deqp[collapsed=false]\r\x1b[0Kdeqp runner stats",
),
"Mangled section_end at target level": (
create_lava_yaml_msg(
msg="[0Ksection_end:1652658415:test_setup[0K",
lvl="target",
),
"\x1b[0Ksection_end:1652658415:test_setup\r\x1b[0K",
),
}
@pytest.mark.parametrize(
"message, fixed_message",
GITLAB_SECTION_MANGLED_SCENARIOS.values(),
ids=GITLAB_SECTION_MANGLED_SCENARIOS.keys(),
)
def test_fix_lava_gitlab_section_log(message, fixed_message):
fix_lava_gitlab_section_log(message)
assert message["msg"] == fixed_message
LAVA_DEBUG_SPAM_MESSAGES = {
"Listened to connection in debug level": (
create_lava_yaml_msg(
msg="Listened to connection for namespace 'common' for up to 1s",
lvl="debug",
),
True,
),
"Listened to connection in debug level - v2": (
create_lava_yaml_msg(
msg="Listened to connection for namespace 'prepare' for up to 9s",
lvl="debug",
),
True,
),
"Listened to connection in target level": (
create_lava_yaml_msg(
msg="Listened to connection for namespace 'common' for up to 1s",
lvl="target",
),
False,
),
}
@pytest.mark.parametrize(
"message, expectation",
LAVA_DEBUG_SPAM_MESSAGES.values(),
ids=LAVA_DEBUG_SPAM_MESSAGES.keys(),
)
def test_filter_debug_messages(message, expectation):
assert filter_debug_messages(message) == expectation
LAVA_RESULT_LOG_SCENARIOS = {
# the submitter should accept xtrace logs
"Bash xtrace echo with kmsg interleaving": (

View File

@@ -22,8 +22,20 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from datetime import datetime
import pytest
from lava.utils.lava_log import GitlabSection
import yaml
from lava.utils.lava_log import (
GitlabSection,
LogFollower,
filter_debug_messages,
fix_lava_color_log,
fix_lava_gitlab_section_log,
hide_sensitive_data,
)
from ..lava.helpers import create_lava_yaml_msg, does_not_raise
GITLAB_SECTION_SCENARIOS = {
"start collapsed": (
@@ -58,3 +70,207 @@ def test_gitlab_section(method, collapsed, expectation):
gs.get_timestamp = lambda: "mock_date"
result = getattr(gs, method)()
assert result == expectation
def test_gl_sections():
lines = [
{
"dt": datetime.now(),
"lvl": "debug",
"msg": "Received signal: <STARTRUN> 0_mesa 5971831_1.3.2.3.1",
},
{
"dt": datetime.now(),
"lvl": "debug",
"msg": "Received signal: <STARTTC> mesa-ci_iris-kbl-traces",
},
{
"dt": datetime.now(),
"lvl": "target",
"msg": "<LAVA_SIGNAL_ENDTC mesa-ci_iris-kbl-traces>",
},
]
lf = LogFollower()
for line in lines:
lf.manage_gl_sections(line)
parsed_lines = lf.flush()
assert "section_start" in parsed_lines[0]
assert "collapsed=true" not in parsed_lines[0]
assert "section_end" in parsed_lines[1]
assert "section_start" in parsed_lines[2]
assert "collapsed=true" not in parsed_lines[2]
assert "section_end" in parsed_lines[3]
assert "section_start" in parsed_lines[4]
assert "collapsed=true" in parsed_lines[4]
def test_log_follower_flush():
lines = [
{
"dt": datetime.now(),
"lvl": "debug",
"msg": "Received signal: <STARTTC> mesa-ci_iris-kbl-traces",
},
{
"dt": datetime.now(),
"lvl": "target",
"msg": "<LAVA_SIGNAL_ENDTC mesa-ci_iris-kbl-traces>",
},
]
lf = LogFollower()
lf.feed(lines)
parsed_lines = lf.flush()
empty = lf.flush()
lf.feed(lines)
repeated_parsed_lines = lf.flush()
assert parsed_lines
assert not empty
assert repeated_parsed_lines
SENSITIVE_DATA_SCENARIOS = {
"no sensitive data tagged": (
["bla bla", "mytoken: asdkfjsde1341=="],
["bla bla", "mytoken: asdkfjsde1341=="],
"HIDEME",
),
"sensitive data tagged": (
["bla bla", "mytoken: asdkfjsde1341== # HIDEME"],
["bla bla"],
"HIDEME",
),
"sensitive data tagged with custom word": (
["bla bla", "mytoken: asdkfjsde1341== # DELETETHISLINE", "third line"],
["bla bla", "third line"],
"DELETETHISLINE",
),
}
@pytest.mark.parametrize(
"input, expectation, tag",
SENSITIVE_DATA_SCENARIOS.values(),
ids=SENSITIVE_DATA_SCENARIOS.keys(),
)
def test_hide_sensitive_data(input, expectation, tag):
yaml_data = yaml.safe_dump(input)
yaml_result = hide_sensitive_data(yaml_data, tag)
result = yaml.safe_load(yaml_result)
assert result == expectation
COLOR_MANGLED_SCENARIOS = {
"Mangled error message at target level": (
create_lava_yaml_msg(msg="[0m[0m[31mERROR - dEQP error: ", lvl="target"),
"\x1b[0m\x1b[0m\x1b[31mERROR - dEQP error: ",
),
"Mangled pass message at target level": (
create_lava_yaml_msg(
msg="[0mPass: 26718, ExpectedFail: 95, Skip: 25187, Duration: 8:18, Remaining: 13",
lvl="target",
),
"\x1b[0mPass: 26718, ExpectedFail: 95, Skip: 25187, Duration: 8:18, Remaining: 13",
),
"Mangled error message with bold formatting at target level": (
create_lava_yaml_msg(msg="[1;31mReview the image changes...", lvl="target"),
"\x1b[1;31mReview the image changes...",
),
"Mangled error message with high intensity background at target level": (
create_lava_yaml_msg(msg="[100mReview the image changes...", lvl="target"),
"\x1b[100mReview the image changes...",
),
"Mangled error message with underline+bg color+fg color at target level": (
create_lava_yaml_msg(msg="[4;41;97mReview the image changes...", lvl="target"),
"\x1b[4;41;97mReview the image changes...",
),
"Bad input for color code.": (
create_lava_yaml_msg(
msg="[4;97 This message is missing the `m`.", lvl="target"
),
"[4;97 This message is missing the `m`.",
),
}
@pytest.mark.parametrize(
"message, fixed_message",
COLOR_MANGLED_SCENARIOS.values(),
ids=COLOR_MANGLED_SCENARIOS.keys(),
)
def test_fix_lava_color_log(message, fixed_message):
fix_lava_color_log(message)
assert message["msg"] == fixed_message
GITLAB_SECTION_MANGLED_SCENARIOS = {
"Mangled section_start at target level": (
create_lava_yaml_msg(
msg="[0Ksection_start:1652658415:deqp[collapsed=false][0Kdeqp-runner",
lvl="target",
),
"\x1b[0Ksection_start:1652658415:deqp[collapsed=false]\r\x1b[0Kdeqp-runner",
),
"Mangled section_start at target level with header with spaces": (
create_lava_yaml_msg(
msg="[0Ksection_start:1652658415:deqp[collapsed=false][0Kdeqp runner stats",
lvl="target",
),
"\x1b[0Ksection_start:1652658415:deqp[collapsed=false]\r\x1b[0Kdeqp runner stats",
),
"Mangled section_end at target level": (
create_lava_yaml_msg(
msg="[0Ksection_end:1652658415:test_setup[0K",
lvl="target",
),
"\x1b[0Ksection_end:1652658415:test_setup\r\x1b[0K",
),
}
@pytest.mark.parametrize(
"message, fixed_message",
GITLAB_SECTION_MANGLED_SCENARIOS.values(),
ids=GITLAB_SECTION_MANGLED_SCENARIOS.keys(),
)
def test_fix_lava_gitlab_section_log(message, fixed_message):
fix_lava_gitlab_section_log(message)
assert message["msg"] == fixed_message
LAVA_DEBUG_SPAM_MESSAGES = {
"Listened to connection in debug level": (
create_lava_yaml_msg(
msg="Listened to connection for namespace 'common' for up to 1s",
lvl="debug",
),
True,
),
"Listened to connection in debug level - v2": (
create_lava_yaml_msg(
msg="Listened to connection for namespace 'prepare' for up to 9s",
lvl="debug",
),
True,
),
"Listened to connection in target level": (
create_lava_yaml_msg(
msg="Listened to connection for namespace 'common' for up to 1s",
lvl="target",
),
False,
),
}
@pytest.mark.parametrize(
"message, expectation",
LAVA_DEBUG_SPAM_MESSAGES.values(),
ids=LAVA_DEBUG_SPAM_MESSAGES.keys(),
)
def test_filter_debug_messages(message, expectation):
assert filter_debug_messages(message) == expectation