Source code for tests.utils.scp_cli_utils

# SPDX-FileCopyrightText: <text>Copyright 2026 Arm Limited
# and/or its affiliates <open-source-office@arm.com></text>
#
# SPDX-License-Identifier: MIT
#

from __future__ import annotations


import logging
import re
import time
from typing import Any, Dict, List, Optional


import pexpect

logger = logging.getLogger(__name__)


[docs] class ScpCliUtils: """ SCP Debugger CLI utility class. Provides helpers to interact with the SCP CLI: - Enter or exit CLI - Capture logs between test markers """
[docs] DEFAULT_TIMEOUT = 120
[docs] def enter_scp_cli( self, session: pexpect.spawn, pattern: str, prompt: str, timeout: int ) -> str: """ Send Ctrl+E and wait for the SCP CLI prompt to appear. :param session: pexpect session connected to the console :param pattern: Regex pattern to identify successful entry into CLI :param prompt: Regex pattern to identify the CLI prompt after entry :param timeout: Timeout waiting for patterns :returns: Captured output from the CLI entry process :raises AssertionError: If expected patterns are not found within the timeout """ session.sendcontrol("e") time.sleep(1.0) try: session.expect([pattern], timeout=timeout) data_1 = session.after or "" time.sleep(0.5) session.expect(prompt, timeout=timeout) data_2 = session.after or "" data = f"{data_1}\n{data_2}" return self._ensure_str(data).strip() except (pexpect.TIMEOUT, pexpect.EOF) as exc: raise AssertionError("Timeout: Failed to enter SCP CLI") from exc
[docs] def exit_scp_cli( self, session: pexpect.spawn, pattern: str, timeout: int ) -> str: """ Send Ctrl+D, ensure the exit banner is printed, and return it. :param session: pexpect session connected to the console :param pattern: Regex pattern to identify successful exit from CLI :param timeout: Timeout for waiting for the exit pattern :returns: Captured output from the CLI exit process :raises AssertionError: If the expected exit pattern is not found within the timeout """ try: session.sendcontrol("d") time.sleep(0.5) session.expect([pattern], timeout=timeout) data = session.after or "" return self._ensure_str(data).strip() except (pexpect.TIMEOUT, pexpect.EOF) as exc: raise AssertionError( "Timeout: Failed to detect CLI exit marker" ) from exc
[docs] def capture_integration_logs( self, manager: Any, session: pexpect.spawn, test_name: str, timeout: int, ) -> str: """ Capture logs emitted between the Start/End markers of a test. :param manager: The console manager to use for pattern matching :param session: pexpect session connected to the console :param test_name: The name of the test, used to identify log markers :param timeout: Timeout for waiting for log markers :returns: Captured logs between the Start and End markers :raises AssertionError: If the expected log markers are not found within the timeout """ start = rf"\[INTEGRATION_TEST\]\s+Start:\s*{re.escape(test_name)}" end = rf"\[INTEGRATION_TEST\]\s+End:\s*{re.escape(test_name)}" try: matched, buffer = manager._expect_pattern(session, start, timeout) assert matched, ( f"Timeout waiting for pattern: {start}\n" f"Console output:\n{buffer}" ) time.sleep(1.0) matched, buffer = manager._expect_pattern(session, end, timeout) assert ( matched ), f"Timeout waiting for pattern: {end}\nConsole output:\n{buffer}" data = session.before or "" return self._ensure_str(data).strip() except (pexpect.TIMEOUT, pexpect.EOF) as exc: raise AssertionError( f"Timeout while capturing logs for test '{test_name}'" ) from exc
[docs] def _ensure_str(self, data: object) -> str: """ Ensure that the given data is returned as a string. If the input is of type ``bytes``, it is then decoded using UTF-8 with errors ignored. For all other types, the value is converted using ``str()``. :param data: Value to normalize into a string representation. """ if isinstance(data, bytes): return data.decode(errors="ignore") return str(data)
[docs] class ScpTestUtils: """Reusable SCP test helpers"""
[docs] DEFAULT_TIMEOUT = 120
# Matches per-test result lines: # ./path/file.c:111:test_name:PASS|FAIL|IGNORE
[docs] TEST_RESULT_RE = ( r"(?P<file>.+?):(?P<line>\d+):" r"(?P<name>[^:]+):(?P<status>PASS|FAIL|IGNORE)" )
[docs] def _build_test_patterns(self, test_name: str) -> tuple[str, str]: """ Build regular expression patterns for integration test. The provided test name is escaped to ensure safe inclusion in a regular expression. The returned patterns match log lines of the form:: [INTEGRATION_TEST] Start: <test_name> [INTEGRATION_TEST] End: <test_name> :param test_name: Name of the integration test. :returns: Tuple containing the start and end regex patterns. """ escaped = re.escape(test_name) start = rf"\[INTEGRATION_TEST\]\s+Start:\s*{escaped}" end = rf"\[INTEGRATION_TEST\]\s+End:\s*{escaped}" return start, end
[docs] def _wait_for_pattern( self, session: pexpect.spawn, pattern: str, timeout: int, manager: Any, ) -> None: """ Wait for a specific pattern to appear in the console session. If a ``manager`` object providing an ``_expect_pattern`` method is supplied, it is used for pattern matching. Otherwise, the method falls back to direct ``pexpect`` matching. :param session: Active pexpect session connected to the console. :param pattern: Regular expression pattern to wait for. :param timeout: Maximum time in seconds to wait for the pattern. :param manager: Optional console manager implementing ``_expect_pattern``. :raises AssertionError: If the expected pattern is not matched within given timeout when using the manager. :raises pexpect.TIMEOUT: If direct ``pexpect`` matching times out. :raises pexpect.EOF: If the session ends unexpectedly. """ if manager and hasattr(manager, "_expect_pattern"): matched, buffer = manager._expect_pattern( session, pattern, timeout, ) assert matched, ( f"Timeout waiting for pattern: {pattern}\n" f"Console output:\n{buffer}" ) else: session.expect(pattern, timeout=timeout)
[docs] def _normalize_output(self, data: Any) -> str: """ Normalize raw console output into a clean string. This helper ensures that data returned from pexpect or other subprocess interactions is consistently represented as a string. If the input is of type `bytes`, it is decoded using UTF-8 with errors ignored. The result is then converted to a string (if not already) and stripped of leading and trailing whitespace. :param data: Raw console output to normalize. :returns: Cleaned string representation of the input. """ if isinstance(data, bytes): data = data.decode(errors="ignore") return str(data).strip()
[docs] def capture_logs( self, session: pexpect.spawn, test_name: str, timeout: Optional[int] = None, manager: Any = None, ) -> str: """ Capture integration test logs between Start/End markers. This method waits for the integration test start and end markers corresponding to the given ``test_name`` and extracts all console output emitted between those markers. The method supports both direct ``pexpect`` pattern matching and manager-assisted matching (when a console manager providing ``_expect_pattern`` is supplied). :param session: Active pexpect session connected to the console. :param test_name: Name of the integration test. :param timeout: Optional timeout in seconds. If not provided, ``DEFAULT_TIMEOUT`` is used. :param manager: Optional console manager providing ``_expect_pattern`` for matching. :returns: Captured console output between test start and end markers. :raises AssertionError: If the expected start or end markers are not detected within the timeout. """ tmo = timeout or getattr(self, "DEFAULT_TIMEOUT", self.DEFAULT_TIMEOUT) start, end = self._build_test_patterns(test_name) try: self._wait_for_pattern(session, start, tmo, manager) time.sleep(1.0) self._wait_for_pattern(session, end, tmo, manager) return self._normalize_output(session.before) except (pexpect.TIMEOUT, pexpect.EOF) as exc: raise AssertionError( f"Timeout while capturing logs for test '{test_name}'" ) from exc
[docs] def _compute_width(self, runs: List[Dict[str, Any]]) -> int: """ Compute the display width required for the RUN column in the summary. The width is determined by the longest run name in the provided list, A minimum width of 12 character is enforced to keep table formatting readable. :param runs: List of per-run summary dictionaries containing at least a ``"name"`` key. :returns: Calculated column width for the RUN field. """ return max( 12, max(len(r["name"]) for r in runs + [{"name": "OVERALL"}]) )
[docs] def _compute_totals(self, runs: List[Dict[str, Any]]) -> Dict[str, Any]: """ Compute aggregated totals across multiple test runs. This method sums the total number of executed, passed, failed, and ignored tests from the provided run summaries and determines an overall status. :param runs: List of per-run summary dictionaries containing the keys ``"total"``, ``"passed"``, ``"failures"``, ``"ignored"``, and optionally ``"ok"``. :returns: Dictionary containing aggregated totals and overall status. Keys include ``"total"``, ``"passed"``, ``"failures"``, ``"ignored"``, and ``"status"``. """ total = sum(r["total"] for r in runs) passed = sum(r["passed"] for r in runs) failures = sum(r["failures"] for r in runs) ignored = sum(r["ignored"] for r in runs) status = ( "PASS" if failures == 0 and ignored == 0 and all(r.get("ok", False) for r in runs) else "FAIL" ) return { "total": total, "passed": passed, "failures": failures, "ignored": ignored, "status": status, }
[docs] def _build_header( self, title: str, meta: Dict[str, Any], ) -> List[str]: """ Build the formatted header section for the combined summary output. The header includes: - A top separator line - The provided title - Optional metadata fields (platform, port, suite) - A separator line below the metadata :param title: Title displayed at the top of the summary. :param meta: Dictionary containing optional metadata fields. :returns: List of formatted header lines. """ line = "=" * 78 sep = "-" * 78 lines = [line, title] meta_parts = [ f"{k.upper()}={v}" for k, v in meta.items() if k in ("platform", "port", "suite") and v is not None ] if meta_parts: lines.append(" " + " ".join(meta_parts)) lines.append(sep) return lines
[docs] def _build_table( self, runs: List[Dict[str, Any]], totals: Dict[str, Any], width_name: int, ) -> List[str]: """ Build the formatted summary table section. This method generates a fixed-width, human-readable table showing: - Per-run statistics (total, passed, failed, ignored, result) - An aggregated "OVERALL" summary row The table layout is aligned using the provided column width for the RUN column to ensure consistent formatting. :param runs: List of per-run summary dictionaries. Each dictionary must contain the keys ``name``, ``total``, ``passed``, ``failures``, ``ignored``, and ``status``. :param totals: Dictionary containing aggregated totals and overall status values. :param width_name: Width used to left-align the ``RUN`` column. :returns: List of formatted table lines. """ sep = "-" * 78 lines: List[str] = [] lines.append( f"{'RUN':<{width_name}} " f"{'TOTAL':>5} " f"{'PASS':>5} " f"{'FAIL':>5} " f"{'IGN':>5} " f"{'RESULT':>6}" ) lines.append(sep) for r in runs: lines.append( f"{r['name']:<{width_name}} " f"{r['total']:>5} " f"{r['passed']:>5} " f"{r['failures']:>5} " f"{r['ignored']:>5} " f"{r['status']:>6}" ) lines.append(sep) lines.append( f"{'OVERALL':<{width_name}} " f"{totals['total']:>5} " f"{totals['passed']:>5} " f"{totals['failures']:>5} " f"{totals['ignored']:>5} " f"{totals['status']:>6}" ) return lines
[docs] def _build_appendix( self, runs: List[Dict[str, Any]], overall_status: str, ) -> List[str]: """ Build an optional appendix section listing failed or ignored tests. The appendix is included only when: - The overall status is not "PASS", or - At least one run contains failed or ignored test entries. For each run with issues, the appendix lists the specific failed and/or ignored test names to provide additional detail. :param runs: List of per-run summary dictionaries. Each dictionary may contain ``failed_tests`` and ``ignored_tests`` keys. :param overall_status: The computed overall test status (typically ``"PASS"`` or ``"FAIL"``). :returns: List of formatted appendix lines. Returns an empty list if the appendix should be omitted. """ if self._should_skip_appendix(runs, overall_status): return [] lines: List[str] = ["", "Appendix: failed/ignored tests", "-" * 78] for run in runs: lines.extend(self._format_run_details(run)) return lines
[docs] def _should_skip_appendix( self, runs: List[Dict[str, Any]], overall_status: str, ) -> bool: """ Determine whether the appendix section should be omitted. :param runs: List of per-run summary dictionaries. Each dictionary may contain ``failed_tests`` and ``ignored_tests`` keys. :param overall_status: The computed overall test status. :returns: ``True`` if the appendix should be omitted, else ``False`` """ if overall_status != "PASS": return False return not any( r.get("failed_tests") or r.get("ignored_tests") for r in runs )
[docs] def _format_run_details(self, run: Dict[str, Any]) -> List[str]: """ Format detailed failure/ignore information for a single run. This method generates a list of lines describing failed and/or ignored tests for the given run. If no such tests exist, an empty list is returned. :param run: Dictionary containing per-run summary information. Expected keys include ``name``, ``failed_tests`` and ``ignored_tests``. :returns: List of formatted strings describing failed and/or ignored tests. Returns an empty list if none exist. """ failed = run.get("failed_tests") or [] ignored = run.get("ignored_tests") or [] if not failed and not ignored: return [] name = run.get("name", "<unknown>") lines = [f"{name}:"] if failed: lines.append(" FAIL: " + ", ".join(failed)) if ignored: lines.append(" IGN : " + ", ".join(ignored)) return lines
[docs] def format_combined_summary( self, runs: List[Dict[str, Any]], title: str = "SCP Integration Test Summary", meta: Optional[Dict[str, Any]] = None, ) -> str: """ Generate a formatted combined summary for multiple test runs. This method aggregates per-run results, computes overall totals, and produces a structured, human-readable summary string :param runs: List of per-run summary dictionaries. Each dictionary must contain keys such as ``name``, ``total``, ``passed``, ``failures``, ``ignored``, and ``status``. :param title: Title displayed at the top of the summary. Defaults to ``"SCP Integration Test Summary"``. :param meta: Optional metadata dictionary containing fields such as ``platform``, ``port``, or ``suite``. :returns: A newline-separated formatted summary string. """ meta = meta or {} totals = self._compute_totals(runs) width_name = self._compute_width(runs) lines: List[str] = [] lines.extend(self._build_header(title, meta)) lines.extend(self._build_table(runs, totals, width_name)) lines.extend(self._build_footer(totals, width_name)) lines.extend(self._build_appendix(runs, totals["status"])) return "\n".join(lines)
[docs] def _parse_overall_summary( self, text: str, test_name: str ) -> tuple[int, int, int, int]: """ Parse the overall test summary line from captured output. :param text: Full captured console output containing the summary line. :param test_name: The test being parsed. Used for error reporting. :returns: A tuple containing (total_tests, failures, ignored, passed). :raises AssertionError: If ``SUMMARY_RE`` is not defined on the class. :raises AssertionError: If the expected summary line is not found in the provided text. """ summary_re = getattr(self, "SUMMARY_RE", None) if not summary_re: raise AssertionError( f"{test_name}: SUMMARY_RE is not defined on the test class" ) match = re.search(summary_re, text) if not match: raise AssertionError(f"{test_name}: Missing overall summary line") total = int(match.group("total")) failures = int(match.group("failures")) ignored = int(match.group("ignored")) passed = total - failures - ignored return total, failures, ignored, passed
[docs] def _parse_test_results(self, text: str) -> List[Dict[str, str]]: """ Extract individual test result entries from captured output. :param text: Full captured console output containing per-test results. :returns: A list of dictionaries with keys ``"name"`` and ``"status"`` representing individual test outcomes. """ keys = ("name", "status") return [ {k: match.group(k) for k in keys} for match in re.finditer(self.TEST_RESULT_RE, text) ]
[docs] def _validate_results( self, test_name: str, ok_present: bool, failures: int, ignored: int, failed_tests: list, ignored_tests: list, ) -> None: """ Validate parsed test results and raise an error if validation fails. :param test_name: Name of the integration test being validated. :param ok_present: Indicates whether the ``OK`` marker was detected. :param failures: Number of failed tests reported in the summary. :param ignored: Number of ignored tests reported in the summary. :param failed_tests: List of individual failed test names. :param ignored_tests: List of individual ignored test names. :raises AssertionError: If the ``OK`` marker is missing or if any tests failed or were ignored. """ if ok_present and failures == 0 and ignored == 0: return log = getattr(self, "logger", logger) if failed_tests: log.error( "%s: failed tests: %s", test_name, ", ".join(failed_tests), ) if ignored_tests: log.error( "%s: ignored tests: %s", test_name, ", ".join(ignored_tests), ) if not ok_present: raise AssertionError(f"{test_name}: Missing OK marker") raise AssertionError( f"{test_name}: One or more tests failed or were ignored" )
[docs] def _collect_problem_tests(self, results): """ Collect failed and ignored test names from parsed test results. This method iterates over parsed per-test result entries and separates tests marked as ``FAIL`` and ``IGNORE`` into two lists. :param results: Parsed per-test result entries, typically returned by :meth:`_parse_test_results`. :returns: A tuple containing ``(failed_tests, ignored_tests)``. """ failed = [] ignored = [] for r in results: if r["status"] == "FAIL": failed.append(r["name"]) elif r["status"] == "IGNORE": ignored.append(r["name"]) return failed, ignored
[docs] def summarize_results( self, text: str, test_name: str, *, raise_on_fail: bool = True ) -> Dict[str, Any]: """ Generate a structured summary from raw integration test output. :param text: Full captured console output of the integration test. :param test_name: Name of the test being summarized. :param raise_on_fail: Whether to raise an exception if validation fails. Defaults to ``True``. :returns: Dictionary containing structured summary information, including totals, status, and problematic test names. :raises AssertionError: If validation fails and ``raise_on_fail`` is ``True``. """ ok_present = bool(re.search(r"\bOK\b", text)) # Defaults so we can still show a table even if parsing fails summary: Dict[str, Any] = { "name": test_name, "status": "FAIL", "ok": ok_present, "total": 0, "passed": 0, "failures": 0, "ignored": 0, "failed_tests": [], "ignored_tests": [], } try: total, failures, ignored, passed = self._parse_overall_summary( text, test_name ) test_results = self._parse_test_results(text) failed_tests, ignored_tests = self._collect_problem_tests( test_results, ) status = ( "PASS" if ok_present and failures == 0 and ignored == 0 else "FAIL" ) summary.update( { "status": status, "total": total, "passed": passed, "failures": failures, "ignored": ignored, "failed_tests": failed_tests, "ignored_tests": ignored_tests, } ) # Only raise if caller wants strict behavior if raise_on_fail: self._validate_results( test_name, ok_present, failures, ignored, failed_tests, ignored_tests, ) return summary except AssertionError as e: summary["error"] = str(e) if raise_on_fail: raise return summary
__all__ = ["ScpCliUtils", "ScpTestUtils"]