From 4ed9d9fecf914452c875a37e8d2444ae93e44aee Mon Sep 17 00:00:00 2001 From: PranjalManhgaye Date: Sun, 8 Mar 2026 11:11:39 +0530 Subject: [PATCH] Archive and compare iterations.log for implicit coupling (fixes #440) --- changelog-entries/440.md | 1 + tools/tests/README.md | 2 +- tools/tests/generate_reference_results.py | 11 +++ tools/tests/systemtests/Systemtest.py | 103 +++++++++++++++++++++- 4 files changed, 115 insertions(+), 2 deletions(-) create mode 100644 changelog-entries/440.md diff --git a/changelog-entries/440.md b/changelog-entries/440.md new file mode 100644 index 000000000..21fd24971 --- /dev/null +++ b/changelog-entries/440.md @@ -0,0 +1 @@ +- Archive `precice-*-iterations.log` files into `iterations-logs/` and compare them by SHA-256 hash against reference data for implicit-coupling regression checks; reference hashes are stored in `.iterations-hashes.json` sidecar files (fixes [#440](https://github.com/precice/tutorials/issues/440)). diff --git a/tools/tests/README.md b/tools/tests/README.md index 5675c47b6..e090c6d25 100644 --- a/tools/tests/README.md +++ b/tools/tests/README.md @@ -105,7 +105,7 @@ In this case, building and running seems to work out, but the tests fail because The easiest way to debug a systemtest run is first to have a look at the output written into the action on GitHub. If this does not provide enough hints, the next step is to download the generated `system_tests_run__` artifact. Note that by default this will only be generated if the systemtests fail. -Inside the archive, a test-specific subfolder like `flow-over-heated-plate_fluid-openfoam-solid-fenics_2023-11-19-211723` contains two log files: a `stderr.log` and `stdout.log`. This can be a starting point for a further investigation. +Inside the archive, a test-specific subfolder like `flow-over-heated-plate_fluid-openfoam-solid-fenics_2023-11-19-211723` contains two log files: a `stderr.log` and `stdout.log`. This can be a starting point for a further investigation. For implicit-coupling runs, `precice-*-iterations.log` files are collected into `iterations-logs/` and compared by hash against reference data (when a corresponding `.iterations-hashes.json` sidecar exists); a mismatch fails the test. ## Adding new tests diff --git a/tools/tests/generate_reference_results.py b/tools/tests/generate_reference_results.py index 055e7b31c..c4e16f266 100644 --- a/tools/tests/generate_reference_results.py +++ b/tools/tests/generate_reference_results.py @@ -15,6 +15,7 @@ from paths import PRECICE_TUTORIAL_DIR, PRECICE_TESTS_RUN_DIR, PRECICE_TESTS_DIR, PRECICE_REL_OUTPUT_DIR import time +import json def create_tar_gz(source_folder: Path, output_filename: Path): @@ -139,6 +140,16 @@ def main(): raise RuntimeError( f"Error executing: \n {systemtest} \n Could not find result folder {reference_result_folder}\n Probably the tutorial did not run through properly. Please check corresponding logs") + # Write iterations.log hashes sidecar for implicit-coupling regression checks (issue #440) + collected = systemtest._collect_iterations_logs(systemtest.get_system_test_dir()) + if collected: + hashes = { + rel: Systemtest._sha256_file(p) for rel, p in collected + } + sidecar = systemtest.reference_result.path.with_suffix(".iterations-hashes.json") + sidecar.write_text(json.dumps(hashes, sort_keys=True, indent=2)) + logging.info(f"Wrote iterations hashes for {systemtest.reference_result.path.name}") + # write readme for tutorial in reference_result_per_tutorial.keys(): reference_results_dir = tutorial.path / "reference-results" diff --git a/tools/tests/systemtests/Systemtest.py b/tools/tests/systemtests/Systemtest.py index 6abc5a029..73474c932 100644 --- a/tools/tests/systemtests/Systemtest.py +++ b/tools/tests/systemtests/Systemtest.py @@ -1,11 +1,15 @@ +import hashlib +import json import subprocess -from typing import List, Dict, Optional +from typing import List, Dict, Optional, Tuple from jinja2 import Environment, FileSystemLoader from dataclasses import dataclass, field import shutil from pathlib import Path from paths import PRECICE_REL_OUTPUT_DIR, PRECICE_TOOLS_DIR, PRECICE_REL_REFERENCE_DIR, PRECICE_TESTS_DIR, PRECICE_TUTORIAL_DIR +ITERATIONS_LOGS_DIR = "iterations-logs" + from metadata_parser.metdata import Tutorial, CaseCombination, Case, ReferenceResult from .SystemtestArguments import SystemtestArguments @@ -413,6 +417,88 @@ def _run_field_compare(self): elapsed_time = time.perf_counter() - time_start return FieldCompareResult(1, stdout_data, stderr_data, self, elapsed_time) + @staticmethod + def _sha256_file(path: Path) -> str: + """Compute SHA-256 hex digest of a file.""" + h = hashlib.sha256() + mv = memoryview(bytearray(128 * 1024)) + with open(path, 'rb', buffering=0) as f: + while n := f.readinto(mv): + h.update(mv[:n]) + return h.hexdigest() + + def _collect_iterations_logs( + self, system_test_dir: Path + ) -> List[Tuple[str, Path]]: + """ + Collect precice-*-iterations.log files from case dirs. + Returns list of (relative_path, absolute_path) e.g. ("solid-fenics/precice-Solid-iterations.log", path). + """ + collected = [] + for case in self.case_combination.cases: + case_dir = system_test_dir / Path(case.path).name + if not case_dir.exists(): + continue + for log_file in case_dir.glob("precice-*-iterations.log"): + if log_file.is_file(): + rel = f"{Path(case.path).name}/{log_file.name}" + collected.append((rel, log_file)) + return collected + + def __archive_iterations_logs(self): + """ + Copy precice-*-iterations.log from case dirs into iterations-logs/ + so they are available in CI artifacts (issue #440). + """ + collected = self._collect_iterations_logs(self.system_test_dir) + if not collected: + return + dest_dir = self.system_test_dir / ITERATIONS_LOGS_DIR + dest_dir.mkdir(exist_ok=True) + for rel, src in collected: + dest_name = Path(rel).name + if len(collected) > 1: + prefix = Path(rel).parent.name + "_" + dest_name = prefix + dest_name + shutil.copy2(src, dest_dir / dest_name) + logging.debug(f"Archived {len(collected)} iterations log(s) to {dest_dir} for {self}") + + def __compare_iterations_hashes(self) -> bool: + """ + Compare current iterations.log hashes against reference sidecar. + Returns True if comparison passes (or is skipped). Returns False if hashes differ. + """ + sidecar = self.reference_result.path.with_suffix(".iterations-hashes.json") + if not sidecar.exists(): + return True + try: + ref_hashes = json.loads(sidecar.read_text()) + except (json.JSONDecodeError, OSError) as e: + logging.warning(f"Could not read iterations hashes from {sidecar}: {e}") + return True + if not ref_hashes: + return True + collected = self._collect_iterations_logs(self.system_test_dir) + current = {rel: self._sha256_file(p) for rel, p in collected} + for rel, expected in ref_hashes.items(): + if rel not in current: + logging.critical( + f"Missing iterations log {rel} (expected from reference); {self} fails" + ) + return False + if current[rel] != expected: + logging.critical( + f"Hash mismatch for {rel} (iterations.log regression); {self} fails" + ) + return False + if len(current) != len(ref_hashes): + extra = set(current) - set(ref_hashes) + logging.critical( + f"Unexpected iterations log(s) {extra}; {self} fails" + ) + return False + return True + def _build_docker(self): """ Builds the docker image @@ -562,6 +648,21 @@ def run(self, run_directory: Path): solver_time=docker_run_result.runtime, fieldcompare_time=0) + self.__archive_iterations_logs() + if not self.__compare_iterations_hashes(): + self.__write_logs(std_out, std_err) + logging.critical( + f"Iterations.log hash comparison failed (regression), {self} failed" + ) + return SystemtestResult( + False, + std_out, + std_err, + self, + build_time=docker_build_result.runtime, + solver_time=docker_run_result.runtime, + fieldcompare_time=0) + fieldcompare_result = self._run_field_compare() std_out.extend(fieldcompare_result.stdout_data) std_err.extend(fieldcompare_result.stderr_data)