diff --git a/.github/scripts/js/e2e/report/cluster-report.js b/.github/scripts/js/e2e/report/cluster-report.js new file mode 100644 index 0000000000..637a0e4c54 --- /dev/null +++ b/.github/scripts/js/e2e/report/cluster-report.js @@ -0,0 +1,357 @@ +// Copyright 2026 Flant JSC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const fs = require("fs"); + +const { findSingleMatchingFile } = require("./shared/fs-utils"); +const { parseGinkgoReport } = require("./shared/ginkgo-report-utils"); +const { + archivedReportPattern, + buildClusterStatus, + buildReportSummary, + buildTestStatus, + reportFileName, + zeroMetrics, +} = require("./shared/report-model"); + +/** + * @typedef {Record} StageResults + */ + +/** + * @typedef {Record} StageUrls + */ + +/** + * @typedef {Object} ClusterReportCore + * @property {function(string): void} info + * @property {function(string): void} warning + * @property {function(string, string): void} setOutput + */ + +/** + * @typedef {Object} ClusterReportContext + * @property {string} serverUrl + * @property {{ owner: string, repo: string }} repo + * @property {string|number} runId + * @property {string} [ref] + */ + +/** + * @typedef {Object} ClusterReportConfig + * @property {string} storageType + * @property {string} pipelineJobName + * @property {string} reportsDir + * @property {string} reportFile + * @property {StageResults} stageResults + * @property {StageUrls} [stageJobUrls] + */ + +/** + * @typedef {Object} ClusterReportParams + * @property {ClusterReportCore} core + * @property {ClusterReportContext} context + * @property {any} [github] + * @property {ClusterReportConfig} [config] + */ + +const workflowStages = [ + { name: "bootstrap", displayName: "Bootstrap cluster", needsJobId: "bootstrap" }, + { name: "configure-sdn", displayName: "Configure SDN", needsJobId: "configure-sdn" }, + { name: "storage-setup", displayName: "Configure storage", needsJobId: "configure-storage" }, + { name: "virtualization-setup", displayName: "Configure Virtualization", needsJobId: "configure-virtualization" }, + { name: "e2e-test", displayName: "E2E test", needsJobId: "e2e-test" }, +]; + +function readClusterReportConfigFromEnv(env = process.env) { + const storageType = String(env.STORAGE_TYPE || "").trim(); + + return { + storageType, + pipelineJobName: String(env.PIPELINE_JOB_NAME || "").trim(), + reportsDir: env.REPORTS_DIR || "test/e2e", + reportFile: env.REPORT_FILE || reportFileName(storageType), + }; +} + +function requireClusterReportConfig(config) { + if (!config.storageType) { + throw new Error("buildClusterReport requires storageType"); + } + + if (!config.reportsDir) { + throw new Error("buildClusterReport requires reportsDir"); + } + + if (!config.reportFile) { + throw new Error("buildClusterReport requires reportFile"); + } + + return { ...config }; +} + +function getWorkflowRunUrl(context) { + return `${context.serverUrl}/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`; +} + +function getBranchName(context) { + return String(context.ref || "").replace(/^refs\/heads\//, ""); +} + +async function listWorkflowRunJobs(github, context) { + if (!github || !github.rest || !github.rest.actions) { + throw new Error("buildClusterReport requires github client"); + } + + const params = { + owner: context.repo.owner, + repo: context.repo.repo, + run_id: context.runId, + per_page: 100, + }; + + if (github.paginate) { + return github.paginate(github.rest.actions.listJobsForWorkflowRun, params); + } + + const response = await github.rest.actions.listJobsForWorkflowRun(params); + return response.data.jobs || []; +} + +function findWorkflowJob(jobs, pipelineJobName, jobName) { + const nestedJobName = pipelineJobName ? `${pipelineJobName} / ${jobName}` : ""; + + return ( + jobs.find((job) => job.name === nestedJobName) || + jobs.find((job) => job.name === jobName) || + jobs.find((job) => String(job.name || "").endsWith(` / ${jobName}`)) + ); +} + +function readStageResultsFromEnv(env = process.env) { + let needs = {}; + try { + needs = JSON.parse(env.NEEDS_CONTEXT || "{}"); + } catch { + // malformed JSON — treat all stages as skipped + } + + const stageResults = {}; + for (const { name, needsJobId } of workflowStages) { + stageResults[name] = String((needs[needsJobId] || {}).result || "").trim() || "skipped"; + } + return stageResults; +} + +async function readStageJobUrlsFromApi(github, context, config, core) { + const jobs = await listWorkflowRunJobs(github, context); + const stageJobUrls = {}; + + for (const { name, displayName } of workflowStages) { + const job = findWorkflowJob(jobs, config.pipelineJobName, displayName); + if (job) { + stageJobUrls[name] = job.html_url || ""; + } else { + core.warning(`Unable to find workflow job "${displayName}" for E2E report`); + } + } + + return stageJobUrls; +} + +function findGinkgoReport(config) { + const rawReportPattern = archivedReportPattern(config.storageType); + + return findSingleMatchingFile( + config.reportsDir, + rawReportPattern, + "Ginkgo JSON report" + ); +} + +function parseGinkgoReportFile(rawReportPath, core) { + if (!rawReportPath) { + return { + metrics: zeroMetrics(), + failedTests: [], + startedAt: null, + source: "empty", + }; + } + + core.info(`Found Ginkgo JSON report: ${rawReportPath}`); + try { + return { + ...parseGinkgoReport(fs.readFileSync(rawReportPath, "utf8")), + source: "ginkgo-json", + }; + } catch (error) { + core.warning( + `Unable to parse Ginkgo JSON report ${rawReportPath}: ${error.message}` + ); + return { + metrics: zeroMetrics(), + failedTests: [], + startedAt: null, + source: "ginkgo-json-invalid", + }; + } +} + +function buildReportPayload({ + config, + context, + fallbackWorkflowRunUrl, + branchName, + parsedReport, + rawReportPath, +}) { + const clusterStatus = buildClusterStatus(config.stageResults); + const testStatus = buildTestStatus( + config.stageResults["e2e-test"], + parsedReport.source, + clusterStatus, + parsedReport.metrics + ); + const reportSummary = buildReportSummary( + config.storageType, + clusterStatus, + testStatus + ); + const workflowRunUrl = getReportJobUrl( + reportSummary, + config.stageJobUrls, + fallbackWorkflowRunUrl + ); + + return { + schemaVersion: 1, + cluster: config.storageType, + storageType: config.storageType, + reportKind: reportSummary.reportKind, + status: reportSummary.status, + statusMessage: reportSummary.statusMessage, + failedStage: reportSummary.failedStage, + failedStageLabel: reportSummary.failedStageLabel, + failedJobName: reportSummary.failedJobName, + workflowRunId: String(context.runId), + workflowRunUrl, + branch: branchName, + clusterStatus, + testStatus, + startedAt: parsedReport.startedAt, + metrics: parsedReport.metrics, + failedTests: parsedReport.failedTests, + sourceReport: rawReportPath, + reportSource: parsedReport.source, + }; +} + +function getReportJobUrl( + reportSummary, + stageJobUrls = {}, + fallbackWorkflowRunUrl +) { + if (reportSummary.failedStage && stageJobUrls[reportSummary.failedStage]) { + return stageJobUrls[reportSummary.failedStage]; + } + + if (stageJobUrls["e2e-test"]) { + return stageJobUrls["e2e-test"]; + } + + return fallbackWorkflowRunUrl; +} + +/** + * Exposes the generated report fields as GitHub Actions step outputs. + * + * @param {Record} report Final cluster report payload. + * @param {string} reportFile Path to the written JSON report file. + * @param {ClusterReportCore} core GitHub core API. + */ +function setReportOutputs(report, reportFile, core) { + core.setOutput("report_file", reportFile); + core.setOutput("report_kind", report.reportKind || ""); + core.setOutput("status", report.status || ""); + core.setOutput("failed_stage", report.failedStage || ""); + core.setOutput("failed_stage_label", report.failedStageLabel || ""); + core.setOutput("workflow_run_url", report.workflowRunUrl || ""); + core.setOutput("branch", report.branch || ""); +} + +/** + * Builds a per-cluster JSON report from workflow stage results and an optional + * raw Ginkgo JSON report, writes it to disk, and publishes step outputs. + * + * @param {ClusterReportParams} params GitHub script dependencies. + * @returns {Promise>} Generated cluster report. + * @throws {Error} If config is incomplete or the report file cannot be written. + */ +async function buildClusterReport({ core, context, github, config } = {}) { + const resolvedConfig = requireClusterReportConfig( + config || readClusterReportConfigFromEnv() + ); + + if (!resolvedConfig.stageResults) { + resolvedConfig.stageResults = readStageResultsFromEnv(); + } + + if (!resolvedConfig.stageJobUrls && github) { + resolvedConfig.stageJobUrls = await readStageJobUrlsFromApi( + github, + context, + resolvedConfig, + core + ); + } + + const fallbackWorkflowRunUrl = getWorkflowRunUrl(context); + const branchName = getBranchName(context); + const rawReportPath = findGinkgoReport(resolvedConfig); + + if (!rawReportPath) { + core.warning( + `Ginkgo JSON report was not found for ${resolvedConfig.storageType} under ${resolvedConfig.reportsDir}` + ); + } + + const parsedReport = parseGinkgoReportFile(rawReportPath, core); + const report = buildReportPayload({ + config: resolvedConfig, + context, + fallbackWorkflowRunUrl, + branchName, + parsedReport, + rawReportPath, + }); + + try { + fs.writeFileSync( + resolvedConfig.reportFile, + `${JSON.stringify(report, null, 2)}\n` + ); + } catch (error) { + throw new Error( + `Unable to write cluster report file ${resolvedConfig.reportFile}: ${error.message}` + ); + } + + setReportOutputs(report, resolvedConfig.reportFile, core); + core.info(`Created report file: ${resolvedConfig.reportFile}`); + core.info(JSON.stringify(report, null, 2)); + + return report; +} + +module.exports = buildClusterReport; +module.exports.readClusterReportConfigFromEnv = readClusterReportConfigFromEnv; diff --git a/.github/scripts/js/e2e/report/cluster-report.test.js b/.github/scripts/js/e2e/report/cluster-report.test.js new file mode 100644 index 0000000000..88f7cba81a --- /dev/null +++ b/.github/scripts/js/e2e/report/cluster-report.test.js @@ -0,0 +1,805 @@ +// Copyright 2026 Flant JSC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const fs = require("fs"); +const os = require("os"); +const path = require("path"); + +const buildClusterReport = require("./cluster-report"); +const { readClusterReportConfigFromEnv } = require("./cluster-report"); +const { parseGinkgoReport } = require("./shared/ginkgo-report-utils"); +const { buildClusterStatus } = require("./shared/report-model"); + +/** + * Creates a mocked GitHub Actions core object for unit tests. + * + * @returns {{ + * info: jest.Mock, + * warning: jest.Mock, + * debug: jest.Mock, + * setOutput: jest.Mock + * }} Mocked core object. + */ +function createCore() { + return { + info: jest.fn(), + warning: jest.fn(), + debug: jest.fn(), + setOutput: jest.fn(), + }; +} + +/** + * Creates a minimal GitHub Actions context object for unit tests. + * + * @returns {{ + * serverUrl: string, + * repo: { owner: string, repo: string }, + * runId: string, + * ref: string + * }} Mocked context object. + */ +function createContext() { + return { + serverUrl: "https://github.com", + repo: { owner: "test", repo: "repo" }, + runId: "12345", + ref: "refs/heads/main", + }; +} + +/** + * Creates a minimal GitHub API client mock for workflow job discovery. + * + * @param {Record} jobConclusions Job conclusion by job name. + * @returns {Record} Mocked GitHub client. + */ +function createGithub(jobConclusions) { + const jobs = Object.entries(jobConclusions).map( + ([name, conclusion], index) => ({ + name, + conclusion, + html_url: `https://github.com/test/repo/actions/runs/12345/job/${ + index + 1 + }`, + }) + ); + + return { + rest: { + actions: { + listJobsForWorkflowRun: jest.fn().mockResolvedValue({ + data: { jobs }, + }), + }, + }, + }; +} + +/** + * Runs a test body inside a temporary directory and removes it afterwards. + * + * @template T + * @param {function(string): (Promise|T)} testFn Test body. + * @returns {Promise} Test result. + */ +async function withTempDir(testFn) { + const tempDir = fs.mkdtempSync( + path.join(os.tmpdir(), "cluster-report-test-") + ); + try { + return await testFn(tempDir); + } finally { + fs.rmSync(tempDir, { recursive: true, force: true }); + } +} + +/** + * Creates explicit cluster report config for unit tests. + * + * @param {Partial>} [overrides={}] Config overrides. + * @returns {Record} Cluster report config. + */ +function createClusterConfig(overrides = {}) { + return { + storageType: "replicated", + reportsDir: "test/e2e", + reportFile: "e2e_report_replicated.json", + ...overrides, + stageResults: { + bootstrap: "success", + "configure-sdn": "success", + "storage-setup": "success", + "virtualization-setup": "success", + "e2e-test": "success", + ...(overrides.stageResults || {}), + }, + }; +} + +/** + * @typedef {Object} SpecReportOptions + * @property {string[]} [containerHierarchyTexts] + * @property {Array} [containerHierarchyLabels] + * @property {string} [leafNodeText] + * @property {string} [leafNodeType] + * @property {string[]} [leafNodeLabels] + * @property {string} [state] + * @property {string} [startTime] + * @property {string} [endTime] + * @property {Record|undefined} [failure] + */ + +/** + * Creates a synthetic Ginkgo spec report for parser tests. + * + * @param {SpecReportOptions} [options={}] Spec overrides. + * @returns {Record} Synthetic spec report. + */ +function createSpecReport({ + containerHierarchyTexts = [], + containerHierarchyLabels = [], + leafNodeText = "", + leafNodeType = "It", + leafNodeLabels = [], + state = "passed", + startTime = "2026-04-15T09:30:44Z", + endTime = "2026-04-15T09:31:44Z", + failure = undefined, +} = {}) { + return { + ContainerHierarchyTexts: containerHierarchyTexts, + ContainerHierarchyLocations: [], + ContainerHierarchyLabels: containerHierarchyLabels, + LeafNodeType: leafNodeType, + LeafNodeLocation: {}, + LeafNodeLabels: leafNodeLabels, + LeafNodeText: leafNodeText, + State: state, + StartTime: startTime, + EndTime: endTime, + RunTime: 60000000000, + ParallelProcess: 1, + ...(failure ? { Failure: failure } : {}), + }; +} + +/** + * Creates a serialized single-suite Ginkgo report for unit tests. + * + * @param {{ startedAt: string, specs: Array> }} params Report contents. + * @returns {string} JSON-serialized report. + */ +function createGinkgoReport({ startedAt, specs }) { + return JSON.stringify( + [ + { + SuitePath: "/tmp/test/e2e", + SuiteDescription: "Tests", + SuiteSucceeded: false, + StartTime: startedAt, + EndTime: "2026-04-15T10:00:00Z", + RunTime: 1800000000000, + SpecReports: specs, + }, + ], + null, + 2 + ); +} + +describe("cluster-report", () => { + afterEach(() => { + delete process.env.STORAGE_TYPE; + delete process.env.REPORTS_DIR; + delete process.env.REPORT_FILE; + delete process.env.PIPELINE_JOB_NAME; + delete process.env.NEEDS_CONTEXT; + }); + + test("requires storage type when config is absent", async () => { + await expect( + buildClusterReport({ + core: createCore(), + context: createContext(), + }) + ).rejects.toThrow("buildClusterReport requires storageType"); + }); + + test("determines cluster setup status from explicit stage results", () => { + expect( + buildClusterStatus({ + bootstrap: "success", + "configure-sdn": "failure", + "storage-setup": "skipped", + "virtualization-setup": "skipped", + }) + ).toMatchObject({ + status: "failure", + stage: "configure-sdn", + stageLabel: "CONFIGURE SDN", + reason: "cluster-stage-failure", + }); + }); + + test("builds report from explicit config without reading env", async () => + withTempDir(async (tempDir) => { + const reportFile = path.join(tempDir, "explicit-report.json"); + + const report = await buildClusterReport({ + core: createCore(), + context: createContext(), + config: { + storageType: "nfs", + reportsDir: tempDir, + reportFile, + stageResults: { + bootstrap: "success", + "configure-sdn": "failure", + "storage-setup": "skipped", + "virtualization-setup": "skipped", + "e2e-test": "skipped", + }, + }, + }); + + expect(report.cluster).toBe("nfs"); + expect(report.workflowRunUrl).toBe( + "https://github.com/test/repo/actions/runs/12345" + ); + expect(report.branch).toBe("main"); + expect(report.clusterStatus).toMatchObject({ + status: "failure", + stage: "configure-sdn", + }); + expect(JSON.parse(fs.readFileSync(reportFile, "utf8")).cluster).toBe( + "nfs" + ); + })); + + test("builds report from environment config", async () => + withTempDir(async (tempDir) => { + const reportFile = path.join(tempDir, "env-report.json"); + process.env.STORAGE_TYPE = "replicated"; + process.env.PIPELINE_JOB_NAME = "E2E Pipeline (Replicated)"; + process.env.REPORTS_DIR = tempDir; + process.env.REPORT_FILE = reportFile; + process.env.NEEDS_CONTEXT = JSON.stringify({ + "bootstrap": { result: "success" }, + "configure-sdn": { result: "success" }, + "configure-storage": { result: "success" }, + "configure-virtualization": { result: "success" }, + "e2e-test": { result: "success" }, + }); + + expect(readClusterReportConfigFromEnv()).toMatchObject({ + storageType: "replicated", + pipelineJobName: "E2E Pipeline (Replicated)", + reportsDir: tempDir, + reportFile, + }); + + const report = await buildClusterReport({ + core: createCore(), + context: createContext(), + }); + + expect(report.cluster).toBe("replicated"); + expect(report.workflowRunUrl).toBe( + "https://github.com/test/repo/actions/runs/12345" + ); + expect(report.branch).toBe("main"); + expect(JSON.parse(fs.readFileSync(reportFile, "utf8")).cluster).toBe( + "replicated" + ); + })); + + test("reads stage results from env vars", async () => + withTempDir(async (tempDir) => { + const reportFile = path.join(tempDir, "env-report.json"); + process.env.STORAGE_TYPE = "nfs"; + process.env.PIPELINE_JOB_NAME = "E2E Pipeline (NFS)"; + process.env.REPORTS_DIR = tempDir; + process.env.REPORT_FILE = reportFile; + process.env.NEEDS_CONTEXT = JSON.stringify({ + "bootstrap": { result: "success" }, + "configure-sdn": { result: "failure" }, + "configure-storage": { result: "skipped" }, + "configure-virtualization": { result: "skipped" }, + "e2e-test": { result: "skipped" }, + }); + + const report = await buildClusterReport({ + core: createCore(), + context: createContext(), + }); + + expect(report.clusterStatus).toMatchObject({ + status: "failure", + stage: "configure-sdn", + }); + // No github — falls back to workflow run URL + expect(report.workflowRunUrl).toBe( + "https://github.com/test/repo/actions/runs/12345" + ); + })); + + test("fetches job URLs from GitHub API", async () => + withTempDir(async (tempDir) => { + const reportFile = path.join(tempDir, "env-report.json"); + process.env.STORAGE_TYPE = "nfs"; + process.env.PIPELINE_JOB_NAME = "E2E Pipeline (NFS)"; + process.env.REPORTS_DIR = tempDir; + process.env.REPORT_FILE = reportFile; + process.env.NEEDS_CONTEXT = JSON.stringify({ + "bootstrap": { result: "success" }, + "configure-sdn": { result: "failure" }, + "configure-storage": { result: "skipped" }, + "configure-virtualization": { result: "skipped" }, + "e2e-test": { result: "skipped" }, + }); + + const report = await buildClusterReport({ + core: createCore(), + context: createContext(), + github: createGithub({ + "E2E Pipeline (NFS) / Bootstrap cluster": "success", + "E2E Pipeline (NFS) / Configure SDN": "failure", + "E2E Pipeline (NFS) / Configure storage": "skipped", + "E2E Pipeline (NFS) / Configure Virtualization": "skipped", + "E2E Pipeline (NFS) / E2E test": "skipped", + }), + }); + + expect(report.clusterStatus).toMatchObject({ + status: "failure", + stage: "configure-sdn", + }); + // github provided — URL points to the specific failed job + expect(report.workflowRunUrl).toBe( + "https://github.com/test/repo/actions/runs/12345/job/2" + ); + })); + + test("works without github (no job URLs)", async () => + withTempDir(async (tempDir) => { + const reportFile = path.join(tempDir, "env-report.json"); + process.env.STORAGE_TYPE = "replicated"; + process.env.REPORTS_DIR = tempDir; + process.env.REPORT_FILE = reportFile; + process.env.NEEDS_CONTEXT = JSON.stringify({ + "bootstrap": { result: "success" }, + "configure-sdn": { result: "success" }, + "configure-storage": { result: "success" }, + "configure-virtualization": { result: "success" }, + "e2e-test": { result: "success" }, + }); + + const report = await buildClusterReport({ + core: createCore(), + context: createContext(), + // no github + }); + + expect(report.cluster).toBe("replicated"); + // stageJobUrls is empty — falls back to workflow run URL + expect(report.workflowRunUrl).toBe( + "https://github.com/test/repo/actions/runs/12345" + ); + })); + + test("marks Ginkgo JSON with failed specs as failed", async () => + withTempDir(async (tempDir) => { + const rawReportPath = path.join( + tempDir, + "e2e_report_replicated_2026-04-15.json" + ); + fs.writeFileSync( + rawReportPath, + createGinkgoReport({ + startedAt: "2026-04-15T09:30:44Z", + specs: [ + createSpecReport({ + leafNodeType: "SynchronizedBeforeSuite", + state: "passed", + }), + createSpecReport({ + containerHierarchyTexts: ["Suite"], + leafNodeText: "passes", + state: "passed", + }), + createSpecReport({ + containerHierarchyTexts: ["Suite"], + leafNodeText: "fails & burns", + state: "failed", + leafNodeLabels: ["Slow"], + }), + createSpecReport({ + containerHierarchyTexts: ["Other"], + leafNodeText: "errors ", + state: "timedout", + }), + createSpecReport({ + leafNodeText: "skipped", + state: "skipped", + }), + ], + }) + ); + + const reportFile = path.join(tempDir, "report.json"); + const config = createClusterConfig({ + reportsDir: tempDir, + reportFile, + }); + + const core = createCore(); + const report = await buildClusterReport({ + core, + context: createContext(), + config, + }); + + expect(report.reportKind).toBe("tests"); + expect(report.failedStage).toBe("e2e-test"); + expect(report.clusterStatus).toMatchObject({ + status: "success", + stage: "ready", + stageLabel: "CLUSTER READY", + }); + expect(report.testStatus).toMatchObject({ + status: "failure", + reason: "ginkgo-failed", + }); + expect(report.metrics).toEqual({ + passed: 1, + failed: 1, + errors: 1, + skipped: 1, + total: 4, + successRate: 25, + }); + expect(report.failedTests).toEqual([ + "[It] Suite fails & burns [Slow]", + "[It] Other errors ", + ]); + expect(report.reportSource).toBe("ginkgo-json"); + expect(report.sourceReport).toBe(rawReportPath); + expect(JSON.parse(fs.readFileSync(reportFile, "utf8")).reportKind).toBe( + "tests" + ); + expect(core.setOutput).toHaveBeenCalledWith("report_file", reportFile); + expect(core.setOutput).toHaveBeenCalledWith("report_kind", "tests"); + expect(core.setOutput).toHaveBeenCalledWith("status", "failure"); + expect(core.setOutput).toHaveBeenCalledWith("failed_stage", "e2e-test"); + expect(core.setOutput).toHaveBeenCalledWith( + "failed_stage_label", + "E2E TEST" + ); + expect(core.setOutput).toHaveBeenCalledWith( + "workflow_run_url", + "https://github.com/test/repo/actions/runs/12345" + ); + expect(core.setOutput).toHaveBeenCalledWith("branch", "main"); + })); + + test("fails when multiple matching Ginkgo JSON reports exist", async () => + withTempDir(async (tempDir) => { + const firstReportPath = path.join( + tempDir, + "nested", + "e2e_report_replicated_2026-04-15.json" + ); + const secondReportPath = path.join( + tempDir, + "e2e_report_replicated_2026-04-16.json" + ); + fs.mkdirSync(path.dirname(firstReportPath), { recursive: true }); + + fs.writeFileSync( + firstReportPath, + createGinkgoReport({ + startedAt: "2026-04-15T09:30:44Z", + specs: [ + createSpecReport({ leafNodeText: "old pass", state: "passed" }), + ], + }) + ); + fs.writeFileSync( + secondReportPath, + createGinkgoReport({ + startedAt: "2026-04-16T09:30:44Z", + specs: [ + createSpecReport({ leafNodeText: "latest pass", state: "passed" }), + ], + }) + ); + + const reportFile = path.join(tempDir, "report.json"); + const config = createClusterConfig({ + reportsDir: tempDir, + reportFile, + }); + + await expect( + buildClusterReport({ + core: createCore(), + context: createContext(), + config, + }) + ).rejects.toThrow("Expected a single Ginkgo JSON report, but found 2"); + expect(fs.existsSync(reportFile)).toBe(false); + })); + + test("falls back to missing-report status when raw Ginkgo JSON is invalid", async () => + withTempDir(async (tempDir) => { + const rawReportPath = path.join( + tempDir, + "e2e_report_replicated_2026-04-15.json" + ); + fs.writeFileSync(rawReportPath, "{not-valid-json"); + + const reportFile = path.join(tempDir, "report.json"); + const config = createClusterConfig({ + reportsDir: tempDir, + reportFile, + }); + + const core = createCore(); + const report = await buildClusterReport({ + core, + context: createContext(), + config, + }); + + expect(report.reportKind).toBe("artifact-missing"); + expect(report.failedStage).toBe("artifact-missing"); + expect(report.status).toBe("missing"); + expect(report.reportSource).toBe("ginkgo-json-invalid"); + expect(report.testStatus).toMatchObject({ + status: "missing", + reason: "ginkgo-report-invalid", + }); + expect(core.warning).toHaveBeenCalledWith( + expect.stringContaining("Unable to parse Ginkgo JSON report") + ); + })); + + test("throws a descriptive error when writing the cluster report fails", async () => + withTempDir(async (tempDir) => { + const reportFile = path.join(tempDir, "report.json"); + const config = createClusterConfig({ + reportsDir: tempDir, + reportFile, + }); + + const writeSpy = jest + .spyOn(fs, "writeFileSync") + .mockImplementation(() => { + throw new Error("disk full"); + }); + + try { + await expect( + buildClusterReport({ + core: createCore(), + context: createContext(), + config, + }) + ).rejects.toThrow( + `Unable to write cluster report file ${reportFile}: disk full` + ); + } finally { + writeSpy.mockRestore(); + } + })); + + test("parses CI-like nfs counts from Ginkgo JSON and ignores non-It specs", () => { + const specs = [ + createSpecReport({ + leafNodeType: "SynchronizedBeforeSuite", + state: "passed", + }), + ]; + + for (let index = 1; index <= 90; index += 1) { + specs.push( + createSpecReport({ + containerHierarchyTexts: ["PassingSuite"], + leafNodeText: `passed ${index}`, + state: "passed", + }) + ); + } + + specs.push( + createSpecReport({ + containerHierarchyTexts: [ + "VirtualMachineOperationRestore", + "restores a virtual machine from a snapshot", + ], + containerHierarchyLabels: [["Slow"], []], + leafNodeText: + "BestEffort restore mode; automatic restart approval mode; manual run policy", + state: "failed", + }) + ); + + for (let index = 2; index <= 7; index += 1) { + specs.push( + createSpecReport({ + containerHierarchyTexts: ["FailingSuite"], + leafNodeText: `failed ${index}`, + state: "failed", + }) + ); + } + + specs.push( + createSpecReport({ + containerHierarchyTexts: ["SkippedSuite"], + leafNodeText: "skipped with reason", + state: "skipped", + failure: { + Message: "skip reason must not turn into a failure metric", + }, + }) + ); + + for (let index = 2; index <= 34; index += 1) { + specs.push( + createSpecReport({ + containerHierarchyTexts: ["SkippedSuite"], + leafNodeText: `skipped ${index}`, + state: "skipped", + }) + ); + } + + const parsed = parseGinkgoReport( + createGinkgoReport({ + startedAt: "2026-04-28T03:11:27.708387575Z", + specs, + }) + ); + + expect(parsed.metrics).toEqual({ + passed: 90, + failed: 7, + errors: 0, + skipped: 34, + total: 131, + successRate: 68.7, + }); + expect(parsed.startedAt).toBe("2026-04-28T03:11:27.708387575Z"); + expect(parsed.failedTests).toHaveLength(7); + expect(parsed.failedTests).toContain( + "[It] VirtualMachineOperationRestore restores a virtual machine from a snapshot BestEffort restore mode; automatic restart approval mode; manual run policy [Slow]" + ); + }); + + test("reports configure-sdn as the failed pre-E2E phase", async () => + withTempDir(async (tempDir) => { + const reportFile = path.join(tempDir, "report.json"); + const config = createClusterConfig({ + reportsDir: tempDir, + reportFile, + stageResults: { + "configure-sdn": "failure", + "storage-setup": "skipped", + "virtualization-setup": "skipped", + "e2e-test": "skipped", + }, + }); + + const report = await buildClusterReport({ + core: createCore(), + context: createContext(), + config, + }); + + expect(report.reportKind).toBe("stage-failure"); + expect(report.failedStage).toBe("configure-sdn"); + expect(report.failedStageLabel).toBe("CONFIGURE SDN"); + expect(report.status).toBe("failure"); + expect(report.clusterStatus).toMatchObject({ + status: "failure", + stage: "configure-sdn", + reason: "cluster-stage-failure", + }); + expect(report.testStatus).toMatchObject({ + status: "not-run", + reason: "cluster-stage-failure", + }); + })); + + test("marks missing artifacts when test stage is successful but no reports were found", async () => + withTempDir(async (tempDir) => { + const reportFile = path.join(tempDir, "report.json"); + const config = createClusterConfig({ + reportsDir: tempDir, + reportFile, + }); + + const report = await buildClusterReport({ + core: createCore(), + context: createContext(), + config, + }); + + expect(report.reportKind).toBe("artifact-missing"); + expect(report.failedStage).toBe("artifact-missing"); + expect(report.failedStageLabel).toBe("TEST REPORTS NOT FOUND"); + expect(report.status).toBe("missing"); + expect(report.clusterStatus.status).toBe("success"); + expect(report.testStatus).toMatchObject({ + status: "missing", + reason: "ginkgo-report-missing", + }); + })); + + test("keeps cancelled test stage when no reports were found", async () => + withTempDir(async (tempDir) => { + const reportFile = path.join(tempDir, "report.json"); + const config = createClusterConfig({ + reportsDir: tempDir, + reportFile, + stageResults: { + "e2e-test": "cancelled", + }, + }); + + const report = await buildClusterReport({ + core: createCore(), + context: createContext(), + config, + }); + + expect(report.reportKind).toBe("tests"); + expect(report.failedStage).toBe("e2e-test"); + expect(report.failedStageLabel).toBe("E2E TEST"); + expect(report.status).toBe("cancelled"); + expect(report.clusterStatus.status).toBe("success"); + expect(report.testStatus).toMatchObject({ + status: "cancelled", + reason: "e2e-cancelled", + }); + })); + + test("keeps failed test stage when no reports were found", async () => + withTempDir(async (tempDir) => { + const reportFile = path.join(tempDir, "report.json"); + const config = createClusterConfig({ + reportsDir: tempDir, + reportFile, + stageResults: { + "e2e-test": "failure", + }, + }); + + const report = await buildClusterReport({ + core: createCore(), + context: createContext(), + config, + }); + + expect(report.reportKind).toBe("tests"); + expect(report.failedStage).toBe("e2e-test"); + expect(report.failedStageLabel).toBe("E2E TEST"); + expect(report.status).toBe("failure"); + expect(report.clusterStatus.status).toBe("success"); + expect(report.testStatus).toMatchObject({ + status: "failure", + reason: "ginkgo-report-missing", + }); + })); +}); diff --git a/.github/scripts/js/e2e/report/messenger-report.js b/.github/scripts/js/e2e/report/messenger-report.js new file mode 100644 index 0000000000..ff46d175d1 --- /dev/null +++ b/.github/scripts/js/e2e/report/messenger-report.js @@ -0,0 +1,155 @@ +// Copyright 2026 Flant JSC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const fs = require("fs"); + +const { listMatchingFiles } = require("./shared/fs-utils"); +const { REPORT_FILE_PATTERN } = require("./shared/report-model"); +const { makeThreadedReportInLoop } = require("./messenger/loop-client"); +const { readMessengerConfigFromEnv } = require("./messenger/config"); +const { + createMissingReport, + getReportClusterKey, +} = require("./messenger/model"); +const { + buildMainMessage, + buildThreadMessages, +} = require("./messenger/markdown"); + +/** + * @typedef {Object} MessengerReportCore + * @property {function(string): void} warning + * @property {function(string): void} [info] + * @property {function(string, string): void} [setOutput] + */ + +/** + * @typedef {Object} MessengerMessagesParams + * @property {string} reportsDir + * @property {string[]} configuredClusters + * @property {MessengerReportCore} core + */ + +/** + * @typedef {Object} RenderMessengerReportParams + * @property {MessengerReportCore} core + * @property {string} [reportsDir] + */ + +/** + * Loads report JSON files from disk and injects synthetic reports for clusters + * whose artifacts are missing. + * + * The result is ordered as follows: + * 1. Configured clusters in their declared order (missing ones get synthetic reports). + * 2. Any extra clusters found on disk, sorted alphabetically. + * + * @param {string} reportsDir Directory containing `e2e_report_*.json`. + * @param {string[]} configuredClusters Clusters expected in the final report. + * @param {MessengerReportCore} core GitHub core API. + * @returns {Array>} Ordered cluster reports. + */ +function readReports(reportsDir, configuredClusters, core) { + const reportFiles = listMatchingFiles(reportsDir, REPORT_FILE_PATTERN); + const reportsByCluster = new Map(); + + for (const reportFile of reportFiles) { + try { + const report = JSON.parse(fs.readFileSync(reportFile, "utf8")); + const clusterName = getReportClusterKey(report); + if (!clusterName) { + // cluster-report.js always writes storageType; a missing key means + // the file is corrupt or was not produced by this pipeline. + throw new Error(`report is missing storageType/cluster fields`); + } + reportsByCluster.set(clusterName, report); + } catch (error) { + core.warning(`Unable to load ${reportFile}: ${error.message}`); + } + } + + // Configured clusters first, in declared order; missing ones get synthetic reports. + const result = configuredClusters.map( + (name) => reportsByCluster.get(name) ?? createMissingReport(name) + ); + + // Any extra clusters not in the configured list, sorted alphabetically. + const configuredSet = new Set(configuredClusters); + const extras = []; + for (const [key, report] of reportsByCluster) { + if (!configuredSet.has(key)) { + extras.push(report); + } + } + extras.sort((a, b) => + getReportClusterKey(a).localeCompare(getReportClusterKey(b)) + ); + + return [...result, ...extras]; +} + +/** + * Reads cluster reports from disk and builds both messenger message bodies. + * + * @param {MessengerMessagesParams} params Message rendering inputs. + * @returns {{ + * message: string, + * threadMessage: string, + * threadMessages: string[] + * }} Rendered markdown payloads. + */ +function buildMessengerMessages({ reportsDir, configuredClusters, core }) { + const orderedReports = readReports(reportsDir, configuredClusters, core); + const threadMessages = buildThreadMessages(orderedReports); + return { + message: buildMainMessage(orderedReports), + threadMessage: threadMessages.join("\n\n"), + threadMessages, + }; +} + +/** + * Entry point used by `actions/github-script` to render and optionally publish + * the aggregated E2E messenger report. + * + * @param {RenderMessengerReportParams} params GitHub script dependencies. + * @returns {Promise<{ + * message: string, + * threadMessage: string, + * threadMessages: string[] + * }>} Rendered messages. + */ +async function renderMessengerReport({ core, reportsDir }) { + const config = readMessengerConfigFromEnv(); + const { message, threadMessage, threadMessages } = buildMessengerMessages({ + reportsDir: reportsDir || config.reportsDir, + configuredClusters: config.configuredClusters, + core, + }); + + core.info(message); + core.setOutput("message", message); + core.setOutput("thread_message", threadMessage); + core.setOutput("thread_messages", JSON.stringify(threadMessages)); + + if (config.loop) { + try { + await makeThreadedReportInLoop({ message, threadMessages, loop: config.loop }, core); + } catch (error) { + core.warning(`Unable to deliver report to Loop API: ${error.message}`); + } + } + + return { message, threadMessage, threadMessages }; +} + +module.exports = renderMessengerReport; diff --git a/.github/scripts/js/e2e/report/messenger-report.test.js b/.github/scripts/js/e2e/report/messenger-report.test.js new file mode 100644 index 0000000000..5d22d3073f --- /dev/null +++ b/.github/scripts/js/e2e/report/messenger-report.test.js @@ -0,0 +1,683 @@ +// Copyright 2026 Flant JSC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const fs = require("fs"); +const os = require("os"); +const path = require("path"); + +const renderMessengerReport = require("./messenger-report"); +const { readMessengerConfigFromEnv } = require("./messenger/config"); + +/** + * Creates a mocked GitHub Actions core object for unit tests. + * + * @returns {{ + * info: jest.Mock, + * warning: jest.Mock, + * debug: jest.Mock, + * setOutput: jest.Mock + * }} Mocked core object. + */ +function createCore() { + return { + info: jest.fn(), + warning: jest.fn(), + debug: jest.fn(), + setOutput: jest.fn(), + }; +} + +/** + * Runs a test body inside a temporary directory and removes it afterwards. + * + * @template T + * @param {function(string): (Promise|T)} testFn Test body. + * @returns {Promise} Test result. + */ +async function withTempDir(testFn) { + const tempDir = fs.mkdtempSync( + path.join(os.tmpdir(), "messenger-report-test-") + ); + try { + return await testFn(tempDir); + } finally { + fs.rmSync(tempDir, { recursive: true, force: true }); + } +} + +describe("messenger-report", () => { + afterEach(() => { + delete process.env.REPORTS_DIR; + delete process.env.EXPECTED_STORAGE_TYPES; + delete process.env.LOOP_API_BASE_URL; + delete process.env.LOOP_CHANNEL_ID; + delete process.env.LOOP_TOKEN; + delete global.fetch; + }); + + test("reads normalized messenger config from env", () => { + const config = readMessengerConfigFromEnv({ + REPORTS_DIR: "custom-reports", + LOOP_API_BASE_URL: "https://loop.example.invalid/api/v4/", + LOOP_CHANNEL_ID: " channel-id ", + LOOP_TOKEN: " token ", + }); + + expect(config).toEqual({ + reportsDir: "custom-reports", + configuredClusters: ["replicated", "nfs"], + loop: { + apiUrl: "https://loop.example.invalid/api/v4/posts", + channelId: "channel-id", + token: "token", + }, + }); + }); + + test("returns null loop config when no Loop credentials are set", () => { + const config = readMessengerConfigFromEnv({}); + + expect(config.loop).toBeNull(); + }); + + test("throws when Loop credentials are only partially configured", () => { + expect(() => + readMessengerConfigFromEnv({ + LOOP_API_BASE_URL: "https://loop.example.invalid", + // LOOP_CHANNEL_ID and LOOP_TOKEN intentionally absent + }) + ).toThrow("LOOP_CHANNEL_ID, LOOP_TOKEN, and LOOP_API_BASE_URL are required"); + }); + + test("uses default configured clusters when env override is absent", () => { + const config = readMessengerConfigFromEnv({}); + + expect(config.configuredClusters).toEqual(["replicated", "nfs"]); + expect(config.reportsDir).toBe("downloaded-artifacts"); + }); + + test("renders test results, stage failures, and per-cluster thread replies", async () => + withTempDir(async (tempDir) => { + fs.writeFileSync( + path.join(tempDir, "e2e_report_replicated.json"), + JSON.stringify({ + cluster: "replicated", + storageType: "replicated", + reportKind: "tests", + branch: "main", + workflowRunUrl: "https://example.invalid/replicated", + startedAt: "2026-04-15T09:30:44", + metrics: { + passed: 12, + skipped: 2, + failed: 1, + errors: 0, + total: 15, + successRate: 80, + }, + failedTests: ["[It] fails"], + }) + ); + + fs.writeFileSync( + path.join(tempDir, "e2e_report_nfs.json"), + JSON.stringify({ + cluster: "nfs", + storageType: "nfs", + reportKind: "stage-failure", + branch: "main", + workflowRunUrl: "https://example.invalid/nfs", + failedStage: "configure-sdn", + failedStageLabel: "CONFIGURE SDN", + metrics: { + passed: 0, + failed: 0, + errors: 0, + total: 0, + successRate: 0, + }, + failedTests: [], + }) + ); + + process.env.REPORTS_DIR = tempDir; + process.env.EXPECTED_STORAGE_TYPES = '["replicated","nfs"]'; + + const result = await renderMessengerReport({ core: createCore() }); + + expect(result.message).toContain("### Test results"); + expect(result.message).toContain( + "| [replicated](https://example.invalid/replicated) | 12 | 2 | 1 | 0 | 15 | 80.00% |" + ); + expect(result.message).toContain("### Cluster failures"); + expect(result.message).toContain( + "- [nfs](https://example.invalid/nfs): CONFIGURE SDN" + ); + expect(result.message).not.toContain("### Failed tests"); + expect(result.threadMessages).toEqual([ + "### Failed tests\n\n**replicated**\n\n| Test group |\n|---|\n| fails |", + ]); + expect(result.threadMessage).toContain("### Failed tests"); + expect(result.threadMessage).toContain("**replicated**"); + expect(result.threadMessage).toContain("| Test group |"); + expect(result.threadMessage).toContain("| fails |"); + expect(result.threadMessage).not.toContain("**nfs**\n|"); + })); + + test("creates artifact-missing entry for absent cluster report", async () => + withTempDir(async (tempDir) => { + process.env.REPORTS_DIR = tempDir; + process.env.EXPECTED_STORAGE_TYPES = '["replicated"]'; + + const result = await renderMessengerReport({ core: createCore() }); + + expect(result.message).toContain("### Missing reports"); + expect(result.message).toContain( + "- replicated: ⚠️ E2E REPORT ARTIFACT NOT FOUND" + ); + expect(result.threadMessage).toBe(""); + expect(result.threadMessages).toEqual([]); + })); + + test("warns and skips report files that are missing storageType/cluster fields", async () => + withTempDir(async (tempDir) => { + fs.writeFileSync( + path.join(tempDir, "e2e_report_corrupt.json"), + JSON.stringify({ + reportKind: "stage-failure", + failedStage: "configure-sdn", + failedStageLabel: "CONFIGURE SDN", + status: "failure", + // no storageType / cluster fields + }) + ); + + fs.writeFileSync( + path.join(tempDir, "e2e_report_nfs.json"), + JSON.stringify({ + cluster: "nfs", + storageType: "nfs", + reportKind: "tests", + branch: "main", + workflowRunUrl: "https://example.invalid/nfs", + startedAt: "2026-04-15T09:30:44", + metrics: { + passed: 8, + skipped: 1, + failed: 1, + errors: 0, + total: 10, + successRate: 80, + }, + failedTests: ["[It] nfs fails"], + }) + ); + + process.env.REPORTS_DIR = tempDir; + process.env.EXPECTED_STORAGE_TYPES = '["nfs"]'; + + const core = createCore(); + const result = await renderMessengerReport({ core }); + + // The valid "nfs" report is still rendered normally. + expect(result.message).toContain("### Test results"); + // The corrupt file is dropped; no phantom entry appears in the output. + expect(result.message).not.toContain("corrupt"); + // A warning is emitted so the problem is visible in CI logs. + expect(core.warning).toHaveBeenCalledWith( + expect.stringContaining("report is missing storageType/cluster fields") + ); + })); + + test("splits failed tests into separate thread messages per cluster", async () => + withTempDir(async (tempDir) => { + fs.writeFileSync( + path.join(tempDir, "e2e_report_replicated.json"), + JSON.stringify({ + cluster: "replicated", + storageType: "replicated", + reportKind: "tests", + branch: "main", + workflowRunUrl: "https://example.invalid/replicated", + startedAt: "2026-04-15T09:30:44", + metrics: { + passed: 12, + skipped: 0, + failed: 1, + errors: 0, + total: 13, + successRate: 92.31, + }, + failedTests: ["[It] replicated fails"], + }) + ); + + fs.writeFileSync( + path.join(tempDir, "e2e_report_nfs.json"), + JSON.stringify({ + cluster: "nfs", + storageType: "nfs", + reportKind: "tests", + branch: "main", + workflowRunUrl: "https://example.invalid/nfs", + startedAt: "2026-04-15T09:30:44", + metrics: { + passed: 8, + skipped: 1, + failed: 1, + errors: 0, + total: 10, + successRate: 80, + }, + failedTests: ["[It] nfs fails"], + }) + ); + + process.env.REPORTS_DIR = tempDir; + process.env.EXPECTED_STORAGE_TYPES = '["replicated","nfs"]'; + + const result = await renderMessengerReport({ core: createCore() }); + + expect(result.threadMessages).toEqual([ + "### Failed tests\n\n**replicated**\n\n| Test group |\n|---|\n| replicated |", + "**nfs**\n\n| Test group |\n|---|\n| nfs |", + ]); + })); + + test("groups failed tests by top-level describe name", async () => + withTempDir(async (tempDir) => { + fs.writeFileSync( + path.join(tempDir, "e2e_report_nfs.json"), + JSON.stringify({ + cluster: "nfs", + storageType: "nfs", + reportKind: "tests", + branch: "main", + workflowRunUrl: "https://example.invalid/nfs", + startedAt: "2026-04-15T09:30:44", + metrics: { + passed: 90, + skipped: 34, + failed: 7, + errors: 0, + total: 131, + successRate: 68.7, + }, + failedTests: [ + "[It] VirtualMachineOperationRestore restores a virtual machine from a snapshot BestEffort restore mode; manual restart approval mode; always on unless stopped manually run policy [Slow]", + "[It] VirtualMachineOperationRestore restores a virtual machine from a snapshot Strict restore mode; manual restart approval mode; always on unless stopped manually run policy [Slow]", + "[It] VirtualMachineOperationRestore restores a virtual machine from a snapshot BestEffort restore mode; manual restart approval mode; always on unless stopped manually run policy; with resource deletion [Slow]", + "[It] VirtualMachineOperationRestore restores a virtual machine from a snapshot Strict restore mode; manual restart approval mode; always on unless stopped manually run policy; with resource deletion [Slow]", + "[It] VirtualMachineOperationRestore restores a virtual machine from a snapshot BestEffort restore mode; automatic restart approval mode; always on unless stopped manually run policy [Slow]", + "[It] VirtualMachineOperationRestore restores a virtual machine from a snapshot BestEffort restore mode; automatic restart approval mode; manual run policy [Slow]", + "[It] VirtualMachineAdditionalNetworkInterfaces verifies interface name persistence after removing middle ClusterNetwork should preserve interface name after removing middle ClusterNetwork and rebooting", + ], + }) + ); + + process.env.REPORTS_DIR = tempDir; + process.env.EXPECTED_STORAGE_TYPES = '["nfs"]'; + + const result = await renderMessengerReport({ core: createCore() }); + + expect(result.threadMessages).toEqual([ + [ + "### Failed tests", + "", + "**nfs**", + "", + "| Test group |", + "|---|", + "| VirtualMachineOperationRestore |", + "| VirtualMachineAdditionalNetworkInterfaces |", + ].join("\n"), + ]); + })); + + test("renders cluster status from downloaded report artifact", async () => + withTempDir(async (tempDir) => { + fs.writeFileSync( + path.join(tempDir, "e2e_report_replicated.json"), + JSON.stringify({ + cluster: "replicated", + storageType: "replicated", + branch: "main", + workflowRunUrl: "https://example.invalid/replicated", + clusterStatus: { + status: "failure", + stage: "configure-sdn", + stageLabel: "CONFIGURE SDN", + message: "❌ CONFIGURE SDN FAILED", + reason: "cluster-stage-failure", + }, + testStatus: { + status: "not-run", + reason: "cluster-stage-failure", + message: + "E2E tests were not run because cluster setup did not finish", + }, + metrics: { + passed: 0, + failed: 0, + errors: 0, + total: 0, + successRate: 0, + }, + failedTests: [], + }) + ); + + process.env.REPORTS_DIR = tempDir; + + const result = await renderMessengerReport({ core: createCore() }); + + expect(result.message).not.toContain("Branch: `main`"); + expect(result.message).toContain("### Cluster failures"); + expect(result.message).toContain( + "- [replicated](https://example.invalid/replicated): ❌ CONFIGURE SDN FAILED" + ); + expect(result.threadMessage).toBe(""); + expect(result.threadMessages).toEqual([]); + })); + + test("shows branch line for non-main branches", async () => + withTempDir(async (tempDir) => { + fs.writeFileSync( + path.join(tempDir, "e2e_report_replicated.json"), + JSON.stringify({ + cluster: "replicated", + storageType: "replicated", + branch: "release-1.2", + clusterStatus: { + status: "failure", + stage: "configure-sdn", + stageLabel: "CONFIGURE SDN", + message: "❌ CONFIGURE SDN FAILED", + reason: "cluster-stage-failure", + }, + testStatus: { + status: "not-run", + reason: "cluster-stage-failure", + message: + "E2E tests were not run because cluster setup did not finish", + }, + metrics: { + passed: 0, + failed: 0, + errors: 0, + total: 0, + successRate: 0, + }, + failedTests: [], + }) + ); + + process.env.REPORTS_DIR = tempDir; + + const result = await renderMessengerReport({ core: createCore() }); + + expect(result.message).toContain("Branch: `release-1.2`"); + })); + + test("renders missing test report status from downloaded report artifact", async () => + withTempDir(async (tempDir) => { + fs.writeFileSync( + path.join(tempDir, "e2e_report_replicated.json"), + JSON.stringify({ + cluster: "replicated", + storageType: "replicated", + branch: "main", + workflowRunUrl: "https://example.invalid/replicated", + clusterStatus: { + status: "success", + stage: "ready", + stageLabel: "CLUSTER READY", + message: "✅ CLUSTER READY", + reason: "", + }, + testStatus: { + status: "missing", + reason: "ginkgo-report-missing", + message: "⚠️ E2E TEST REPORT NOT FOUND", + }, + metrics: { + passed: 0, + failed: 0, + errors: 0, + total: 0, + successRate: 0, + }, + failedTests: [], + }) + ); + + process.env.REPORTS_DIR = tempDir; + + const result = await renderMessengerReport({ core: createCore() }); + + expect(result.message).toContain("### Missing reports"); + expect(result.message).toContain( + "- [replicated](https://example.invalid/replicated): ⚠️ E2E TEST REPORT NOT FOUND" + ); + expect(result.threadMessage).toBe(""); + expect(result.threadMessages).toEqual([]); + })); + + test("posts main report and per-cluster failed tests thread via Loop API", async () => + withTempDir(async (tempDir) => { + fs.writeFileSync( + path.join(tempDir, "e2e_report_replicated.json"), + JSON.stringify({ + cluster: "replicated", + storageType: "replicated", + reportKind: "tests", + branch: "main", + workflowRunUrl: "https://example.invalid/replicated", + startedAt: "2026-04-15T09:30:44", + metrics: { + passed: 10, + skipped: 1, + failed: 1, + errors: 0, + total: 12, + successRate: 83.33, + }, + failedTests: ["[It] fails"], + }) + ); + + process.env.REPORTS_DIR = tempDir; + process.env.EXPECTED_STORAGE_TYPES = '["replicated"]'; + process.env.LOOP_API_BASE_URL = "https://loop.example.invalid"; + process.env.LOOP_CHANNEL_ID = "channel-id"; + process.env.LOOP_TOKEN = "loop-token"; + + global.fetch = jest + .fn() + .mockResolvedValueOnce({ + ok: true, + status: 201, + text: async () => JSON.stringify({ id: "root-post-id" }), + }) + .mockResolvedValueOnce({ + ok: true, + status: 201, + text: async () => JSON.stringify({ id: "thread-post-id" }), + }); + + const result = await renderMessengerReport({ core: createCore() }); + + expect(global.fetch).toHaveBeenCalledTimes(2); + expect(global.fetch).toHaveBeenNthCalledWith( + 1, + "https://loop.example.invalid/api/v4/posts", + expect.objectContaining({ + method: "POST", + headers: expect.objectContaining({ + Authorization: "Bearer loop-token", + "Content-Type": "application/json", + }), + }) + ); + expect(JSON.parse(global.fetch.mock.calls[0][1].body)).toEqual({ + channel_id: "channel-id", + message: result.message, + }); + expect(JSON.parse(global.fetch.mock.calls[1][1].body)).toEqual({ + channel_id: "channel-id", + message: + "### Failed tests\n\n**replicated**\n\n| Test group |\n|---|\n| fails |", + root_id: "root-post-id", + }); + })); + + test("warns when Loop API returns an empty response body (no post id)", async () => + withTempDir(async (tempDir) => { + fs.writeFileSync( + path.join(tempDir, "e2e_report_replicated.json"), + JSON.stringify({ + cluster: "replicated", + storageType: "replicated", + reportKind: "tests", + branch: "main", + workflowRunUrl: "https://example.invalid/replicated", + startedAt: "2026-04-15T09:30:44", + metrics: { + passed: 11, + skipped: 0, + failed: 0, + errors: 0, + total: 11, + successRate: 100, + }, + failedTests: [], + }) + ); + + process.env.REPORTS_DIR = tempDir; + process.env.EXPECTED_STORAGE_TYPES = '["replicated"]'; + process.env.LOOP_API_BASE_URL = "https://loop.example.invalid"; + process.env.LOOP_CHANNEL_ID = "channel-id"; + process.env.LOOP_TOKEN = "loop-token"; + + const core = createCore(); + global.fetch = jest.fn().mockResolvedValue({ + ok: true, + status: 201, + text: async () => "", + }); + + await renderMessengerReport({ core }); + + // Empty body → no post id → thread replies cannot be sent → warning emitted. + expect(global.fetch).toHaveBeenCalledTimes(1); + expect(core.warning).toHaveBeenCalledWith( + expect.stringContaining("Loop API did not return a post id") + ); + // Report outputs are still set because the message was built before sending. + expect(core.setOutput).toHaveBeenCalledWith("thread_messages", "[]"); + })); + + test("warns when Loop API returns a non-JSON response body (no post id)", async () => + withTempDir(async (tempDir) => { + fs.writeFileSync( + path.join(tempDir, "e2e_report_replicated.json"), + JSON.stringify({ + cluster: "replicated", + storageType: "replicated", + reportKind: "tests", + branch: "main", + workflowRunUrl: "https://example.invalid/replicated", + startedAt: "2026-04-15T09:30:44", + metrics: { + passed: 11, + skipped: 0, + failed: 0, + errors: 0, + total: 11, + successRate: 100, + }, + failedTests: [], + }) + ); + + process.env.REPORTS_DIR = tempDir; + process.env.EXPECTED_STORAGE_TYPES = '["replicated"]'; + process.env.LOOP_API_BASE_URL = "https://loop.example.invalid"; + process.env.LOOP_CHANNEL_ID = "channel-id"; + process.env.LOOP_TOKEN = "loop-token"; + + const core = createCore(); + global.fetch = jest.fn().mockResolvedValue({ + ok: true, + status: 201, + text: async () => "not-json", + }); + + await renderMessengerReport({ core }); + + // Non-JSON body → parse warning → no post id → delivery warning. + expect(global.fetch).toHaveBeenCalledTimes(1); + expect(core.warning).toHaveBeenCalledWith( + expect.stringContaining("Loop API returned a non-JSON response body") + ); + expect(core.warning).toHaveBeenCalledWith( + expect.stringContaining("Loop API did not return a post id") + ); + // Report outputs are still set because the message was built before sending. + expect(core.setOutput).toHaveBeenCalledWith("thread_messages", "[]"); + })); + + test("logs readable Loop API errors for failed responses", async () => + withTempDir(async (tempDir) => { + fs.writeFileSync( + path.join(tempDir, "e2e_report_replicated.json"), + JSON.stringify({ + cluster: "replicated", + storageType: "replicated", + reportKind: "tests", + branch: "main", + workflowRunUrl: "https://example.invalid/replicated", + startedAt: "2026-04-15T09:30:44", + metrics: { + passed: 11, + skipped: 0, + failed: 0, + errors: 0, + total: 11, + successRate: 100, + }, + failedTests: [], + }) + ); + + process.env.REPORTS_DIR = tempDir; + process.env.EXPECTED_STORAGE_TYPES = '["replicated"]'; + process.env.LOOP_API_BASE_URL = "https://loop.example.invalid"; + process.env.LOOP_CHANNEL_ID = "channel-id"; + process.env.LOOP_TOKEN = "loop-token"; + + const core = createCore(); + global.fetch = jest.fn().mockResolvedValue({ + ok: false, + status: 500, + text: async () => "server exploded", + }); + + await renderMessengerReport({ core }); + + expect(global.fetch).toHaveBeenCalledTimes(1); + expect(core.warning).toHaveBeenCalledWith( + "Unable to deliver report to Loop API: Loop API request failed with status 500: server exploded" + ); + })); +}); diff --git a/.github/scripts/js/e2e/report/messenger/config.js b/.github/scripts/js/e2e/report/messenger/config.js new file mode 100644 index 0000000000..12fa7dc659 --- /dev/null +++ b/.github/scripts/js/e2e/report/messenger/config.js @@ -0,0 +1,112 @@ +// Copyright 2026 Flant JSC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * Normalizes the configured Loop API base URL to the `/api/v4/posts` endpoint. + * + * @param {string} value Raw Loop API base URL. + * @returns {string} Normalized posts endpoint URL or an empty string. + */ +function normalizeLoopApiBaseUrl(value) { + const trimmedValue = String(value || "") + .trim() + .replace(/\/+$/, ""); + + if (!trimmedValue) { + return ""; + } + + if (trimmedValue.endsWith("/api/v4/posts")) { + return trimmedValue; + } + + if (trimmedValue.endsWith("/api/v4")) { + return `${trimmedValue}/posts`; + } + + return `${trimmedValue}/api/v4/posts`; +} + +// Fallback used only when EXPECTED_STORAGE_TYPES is not set (e.g. local runs or tests). +// In CI the list is passed explicitly via the EXPECTED_STORAGE_TYPES env variable. +const defaultConfiguredClusters = ["replicated", "nfs"]; + +/** + * Parses the configured cluster list passed via workflow environment variables. + * Returns the default cluster list when the value is absent, is not valid JSON, + * or parses to a non-array value (e.g. an object `{}`). + * + * @param {string} value JSON-encoded array of cluster names, e.g. '["replicated","nfs"]'. + * @returns {string[]} Ordered cluster names. + */ +function parseConfiguredClusters(value) { + try { + const parsed = JSON.parse(value || "{}"); + return Array.isArray(parsed) ? parsed : defaultConfiguredClusters; + } catch { + return defaultConfiguredClusters; + } +} + +/** + * Reads Loop credentials from the environment. + * + * Returns `null` when none of the Loop variables are set, indicating that the + * messenger integration is intentionally disabled (e.g. local runs or forks). + * Throws when only some variables are present — that is always a configuration + * mistake and should surface as an error rather than a silent no-op. + * + * @param {NodeJS.ProcessEnv} [env=process.env] Environment variables source. + * @returns {{ apiUrl: string, channelId: string, token: string } | null} + */ +function readLoopConfig(env = process.env) { + const apiUrl = normalizeLoopApiBaseUrl(env.LOOP_API_BASE_URL); + const channelId = String(env.LOOP_CHANNEL_ID || "").trim(); + const token = String(env.LOOP_TOKEN || "").trim(); + + if (!apiUrl && !channelId && !token) { + return null; + } + if (!apiUrl || !channelId || !token) { + throw new Error( + "LOOP_CHANNEL_ID, LOOP_TOKEN, and LOOP_API_BASE_URL are required" + ); + } + return { apiUrl, channelId, token }; +} + +/** + * Reads messenger configuration from the environment. + * + * @param {NodeJS.ProcessEnv} [env=process.env] Environment variables source. + * @returns {{ + * reportsDir: string, + * configuredClusters: string[], + * loop: { apiUrl: string, channelId: string, token: string } | null + * }} Normalized messenger configuration. + */ +function readMessengerConfigFromEnv(env = process.env) { + const configuredClusters = env.EXPECTED_STORAGE_TYPES + ? parseConfiguredClusters(env.EXPECTED_STORAGE_TYPES) + : defaultConfiguredClusters; + + return { + reportsDir: env.REPORTS_DIR || "downloaded-artifacts", + configuredClusters, + loop: readLoopConfig(env), + }; +} + +module.exports = { + readLoopConfig, + readMessengerConfigFromEnv, +}; diff --git a/.github/scripts/js/e2e/report/messenger/loop-client.js b/.github/scripts/js/e2e/report/messenger/loop-client.js new file mode 100644 index 0000000000..b30b62b003 --- /dev/null +++ b/.github/scripts/js/e2e/report/messenger/loop-client.js @@ -0,0 +1,142 @@ +// Copyright 2026 Flant JSC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @typedef {Object} LoopClientCore + * @property {function(string): void} warning + * @property {function(string): void} [info] + * @property {function(string, string): void} [setOutput] + */ + +/** + * @typedef {Object} LoopPostRequest + * @property {string} apiUrl + * @property {string} channelId + * @property {string} token + * @property {string} message + * @property {string} [rootId] + */ + +/** + * @typedef {Object} LoopPublishParams + * @property {string} message + * @property {string[]} threadMessages + * @property {{ apiUrl: string, channelId: string, token: string }} loop + */ + +/** + * Parses a Loop API response body if it is JSON, otherwise returns an empty + * object and emits a warning for diagnostics. + * + * @param {string} responseText Raw response body. + * @param {LoopClientCore} core GitHub core API. + * @returns {Record} Parsed response payload or an empty object. + */ +function parseLoopApiPayload(responseText, core) { + if (!responseText) { + return {}; + } + + try { + return JSON.parse(responseText); + } catch (error) { + core.warning( + `Loop API returned a non-JSON response body: ${error.message}` + ); + return {}; + } +} + +/** + * Sends a single post to Loop and returns the parsed API payload. + * + * @param {LoopPostRequest} request Loop API request payload. + * @param {LoopClientCore} core GitHub core API. + * @returns {Promise>} Parsed Loop API response. + */ +async function postToLoopApi( + { apiUrl, channelId, token, message, rootId }, + core +) { + const response = await fetch(apiUrl, { + method: "POST", + headers: { + Authorization: `Bearer ${token}`, + "Content-Type": "application/json", + }, + body: JSON.stringify({ + channel_id: channelId, + message, + ...(rootId ? { root_id: rootId } : {}), + }), + }); + const responseText = await response.text(); + + if (!response.ok) { + throw new Error( + `Loop API request failed with status ${response.status}: ${responseText}` + ); + } + + const payload = parseLoopApiPayload(responseText, core); + core.info(`Loop API accepted report with status ${response.status}`); + return payload; +} + +/** + * Publishes the main report and optional failed-tests thread to Loop. + * + * @param {LoopPublishParams} params Message payload and Loop credentials. + * @param {LoopClientCore} core GitHub core API. + * @returns {Promise} + */ +async function makeThreadedReportInLoop({ message, threadMessages, loop }, core) { + const rootPost = await postToLoopApi( + { + apiUrl: loop.apiUrl, + channelId: loop.channelId, + token: loop.token, + message, + }, + core + ); + + if (!rootPost.id) { + throw new Error( + "Loop API did not return a post id; thread replies cannot be attached" + ); + } + + let lastReplyPost = null; + for (const replyMessage of threadMessages) { + lastReplyPost = await postToLoopApi( + { + apiUrl: loop.apiUrl, + channelId: loop.channelId, + token: loop.token, + message: replyMessage, + rootId: rootPost.id, + }, + core + ); + } + + core.setOutput("root_post_id", rootPost.id || ""); + core.setOutput( + "thread_post_id", + lastReplyPost && lastReplyPost.id ? lastReplyPost.id : "" + ); +} + +module.exports = { + makeThreadedReportInLoop, +}; diff --git a/.github/scripts/js/e2e/report/messenger/markdown.js b/.github/scripts/js/e2e/report/messenger/markdown.js new file mode 100644 index 0000000000..75aa427452 --- /dev/null +++ b/.github/scripts/js/e2e/report/messenger/markdown.js @@ -0,0 +1,251 @@ +// Copyright 2026 Flant JSC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const { + getReportClusterKey, + getReportDate, + isClusterFailureReport, + isMissingReport, + isTestResultReport, +} = require("./model"); + +function sanitizeCell(value) { + return String(value || "—") + .replace(/\|/g, "\\|") + .replace(/\r?\n/g, " ") + .trim(); +} + +function sanitizeListItem(value) { + return String(value || "") + .replace(/\r?\n/g, " ") + .trim(); +} + +function formatRate(value) { + const rate = Number(value || 0); + return `${Number.isFinite(rate) ? rate.toFixed(2) : "0.00"}%`; +} + +function formatClusterLink(report) { + const clusterName = sanitizeCell(report.cluster || report.storageType); + return report.workflowRunUrl + ? `[${clusterName}](${report.workflowRunUrl})` + : clusterName; +} + +function splitReportsBySection(orderedReports) { + const testsReports = orderedReports.filter( + (report) => isTestResultReport(report) && getReportClusterKey(report) + ); + const stageFailureReports = orderedReports.filter( + (report) => isClusterFailureReport(report) && getReportClusterKey(report) + ); + const missingReports = orderedReports.filter( + (report) => + isMissingReport(report) && + !isClusterFailureReport(report) && + getReportClusterKey(report) + ); + + return { + testsReports, + stageFailureReports, + missingReports, + }; +} + +function renderBranchLine(orderedReports) { + const branches = Array.from( + new Set(orderedReports.map((report) => report.branch).filter(Boolean)) + ); + + return branches.length === 1 && branches[0] !== "main" + ? [`Branch: \`${branches[0]}\``, ""] + : []; +} + +function renderTestResultsSection(testsReports) { + const lines = []; + + if (testsReports.length > 0) { + lines.push("### Test results"); + lines.push(""); + lines.push( + "| Cluster | ✅ Passed | ⏭️ Skipped | ❌ Failed | ⚠️ Errors | Total | Success Rate |" + ); + lines.push("|---|---:|---:|---:|---:|---:|---:|"); + + for (const report of testsReports) { + const metrics = report.metrics || {}; + lines.push( + `| ${formatClusterLink(report)} | ${metrics.passed || 0} | ${ + metrics.skipped || 0 + } | ${metrics.failed || 0} | ${metrics.errors || 0} | ${ + metrics.total || 0 + } | ${formatRate(metrics.successRate)} |` + ); + } + + lines.push(""); + } + + return lines; +} + +function renderClusterFailuresSection(stageFailureReports) { + const lines = []; + + if (stageFailureReports.length > 0) { + lines.push("### Cluster failures"); + lines.push(""); + + for (const report of stageFailureReports) { + lines.push( + `- ${formatClusterLink(report)}: ${sanitizeListItem( + (report.clusterStatus && report.clusterStatus.message) || + report.statusMessage || + report.failedStageLabel || + report.failedStage + )}` + ); + } + + lines.push(""); + } + + return lines; +} + +function renderMissingReportsSection(missingReports) { + const lines = []; + + if (missingReports.length > 0) { + lines.push("### Missing reports"); + lines.push(""); + + for (const report of missingReports) { + lines.push( + `- ${formatClusterLink(report)}: ${sanitizeListItem( + report.statusMessage || + (report.testStatus && report.testStatus.message) || + (report.clusterStatus && report.clusterStatus.message) || + report.failedStageLabel + )}` + ); + } + + lines.push(""); + } + + return lines; +} + +/** + * Builds the main E2E messenger report body. + * + * @param {Array>} orderedReports Cluster reports in display order. + * @returns {string} Markdown message body. + */ +function buildMainMessage(orderedReports) { + const reportDate = getReportDate(orderedReports); + const { testsReports, stageFailureReports, missingReports } = + splitReportsBySection(orderedReports); + const lines = [ + `## :dvp: DVP | E2E on nested clusters | ${reportDate}`, + "", + ...renderBranchLine(orderedReports), + ...renderClusterFailuresSection(stageFailureReports), + ...renderMissingReportsSection(missingReports), + ...renderTestResultsSection(testsReports), + ]; + + return lines.join("\n").trim(); +} + +function hasFailedTests(report) { + if (Array.isArray(report.failedTests) && report.failedTests.length > 0) { + return true; + } + + return Boolean( + (report.testStatus && report.testStatus.status === "failure") || + (report.metrics && report.metrics.failed) || + (report.metrics && report.metrics.errors) + ); +} + +function getFailedTestGroupName(testName) { + const normalizedName = sanitizeListItem(testName).replace( + /^\[[^\]]+\]\s*/, + "" + ); + const [groupName] = normalizedName.split(/\s+/, 1); + return groupName || "Unknown"; +} + +function summarizeFailedTestGroups(failedTests) { + return [...new Set(failedTests.map(getFailedTestGroupName))]; +} + +function renderFailedTestsThreadMessage(report) { + const clusterName = sanitizeListItem(report.cluster || report.storageType); + const lines = [`**${clusterName}**`]; + + if (Array.isArray(report.failedTests) && report.failedTests.length > 0) { + const failedGroups = summarizeFailedTestGroups(report.failedTests); + lines.push(""); + lines.push("| Test group |"); + lines.push("|---|"); + for (const groupName of failedGroups) { + lines.push(`| ${sanitizeCell(groupName)} |`); + } + } else { + lines.push( + `- ${ + sanitizeListItem(report.testStatus && report.testStatus.message) || + "No testcase-level failures were collected, but the E2E stage reported failures." + }` + ); + } + + return lines.join("\n"); +} + +/** + * Builds optional failed-tests thread messages for clusters with failed tests. + * + * @param {Array>} orderedReports Cluster reports in display order. + * @returns {string[]} Markdown thread message bodies. + */ +function buildThreadMessages(orderedReports) { + const testsReports = orderedReports.filter((report) => + isTestResultReport(report) + ); + const failedTestReports = testsReports.filter(hasFailedTests); + + if (failedTestReports.length === 0) { + return []; + } + + return failedTestReports.map((report, index) => { + const clusterMessage = renderFailedTestsThreadMessage(report); + return index === 0 + ? ["### Failed tests", clusterMessage].join("\n\n") + : clusterMessage; + }); +} + +module.exports = { + buildMainMessage, + buildThreadMessages, +}; diff --git a/.github/scripts/js/e2e/report/messenger/model.js b/.github/scripts/js/e2e/report/messenger/model.js new file mode 100644 index 0000000000..e3ca960c29 --- /dev/null +++ b/.github/scripts/js/e2e/report/messenger/model.js @@ -0,0 +1,95 @@ +// Copyright 2026 Flant JSC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const { + buildStatusMessage, + isClusterFailureReport, + isMissingReport, + isTestResultReport, + zeroMetrics, +} = require("../shared/report-model"); + +const genericArtifactMissingLabel = "E2E REPORT ARTIFACT NOT FOUND"; + +/** + * Creates a synthetic cluster report when the expected JSON artifact is absent. + * + * This allows the final messenger message to stay informative even when the + * report-preparation step failed or never produced an artifact. + * + * @param {string} clusterName Cluster or storage name. + * @returns {Record} Synthetic report payload. + */ +function createMissingReport(clusterName) { + return { + schemaVersion: 1, + cluster: clusterName, + storageType: clusterName, + reportKind: "artifact-missing", + status: "missing", + statusMessage: buildStatusMessage("missing", genericArtifactMissingLabel), + failedStage: "artifact-missing", + failedStageLabel: genericArtifactMissingLabel, + branch: "", + workflowRunUrl: "", + clusterStatus: { + status: "missing", + stage: "artifact-missing", + stageLabel: genericArtifactMissingLabel, + message: buildStatusMessage("missing", genericArtifactMissingLabel), + reason: "cluster-report-artifact-missing", + }, + testStatus: { + status: "not-run", + reason: "cluster-report-artifact-missing", + message: + "E2E status is unavailable because cluster report artifact was not found", + }, + metrics: zeroMetrics(), + failedTests: [], + reportSource: "missing-artifact", + }; +} + +/** + * Picks a report date from the first report that exposes `startedAt`. + * + * @param {Array>} reports Available cluster reports. + * @returns {string} ISO date string (`YYYY-MM-DD`). + */ +function getReportDate(reports) { + const datedReport = reports.find((report) => report.startedAt); + if (!datedReport) { + return new Date().toISOString().slice(0, 10); + } + + return String(datedReport.startedAt).slice(0, 10); +} + +/** + * Extracts the normalized cluster key from a report payload. + * + * @param {Record} report Cluster report payload. + * @returns {string} Cluster key or an empty string when it is missing. + */ +function getReportClusterKey(report) { + return String(report.storageType || report.cluster || "").trim(); +} + +module.exports = { + createMissingReport, + getReportClusterKey, + getReportDate, + isClusterFailureReport, + isMissingReport, + isTestResultReport, +}; diff --git a/.github/scripts/js/e2e/report/shared/fs-utils.js b/.github/scripts/js/e2e/report/shared/fs-utils.js new file mode 100644 index 0000000000..1b66b7a3ff --- /dev/null +++ b/.github/scripts/js/e2e/report/shared/fs-utils.js @@ -0,0 +1,82 @@ +// Copyright 2026 Flant JSC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const fs = require("fs"); +const path = require("path"); + +function collectMatchingFiles(dirPath, filePattern, acc) { + if (!fs.existsSync(dirPath)) { + return; + } + + let entries; + try { + entries = fs + .readdirSync(dirPath, { withFileTypes: true }) + .sort((left, right) => left.name.localeCompare(right.name)); + } catch (error) { + throw new Error(`Unable to scan directory ${dirPath}: ${error.message}`); + } + + for (const entry of entries) { + const fullPath = path.join(dirPath, entry.name); + if (entry.isDirectory()) { + collectMatchingFiles(fullPath, filePattern, acc); + } else if (filePattern.test(entry.name)) { + acc.push(fullPath); + } + } +} + +/** + * Recursively collects files whose base name matches the provided pattern. + * + * @param {string} dirPath Directory to scan. + * @param {RegExp} filePattern Regular expression applied to file names. + * @returns {string[]} Matching file paths. + */ +function listMatchingFiles(dirPath, filePattern) { + const acc = []; + collectMatchingFiles(dirPath, filePattern, acc); + return acc; +} + +/** + * Resolves a single file matching the provided pattern. + * + * @param {string} dirPath Directory containing candidate files. + * @param {RegExp} filePattern Pattern matching the expected file name. + * @param {string} [description="file"] Human-readable file kind for errors. + * @returns {string|null} Matching file path or null when no match exists. + * @throws {Error} When more than one matching file is found. + */ +function findSingleMatchingFile(dirPath, filePattern, description = "file") { + const matchingFiles = listMatchingFiles(dirPath, filePattern); + if (matchingFiles.length === 0) { + return null; + } + + if (matchingFiles.length > 1) { + throw new Error( + `Expected a single ${description}, but found ${ + matchingFiles.length + }: ${matchingFiles.join(", ")}` + ); + } + + return matchingFiles[0]; +} + +module.exports = { + findSingleMatchingFile, + listMatchingFiles, +}; diff --git a/.github/scripts/js/e2e/report/shared/fs-utils.test.js b/.github/scripts/js/e2e/report/shared/fs-utils.test.js new file mode 100644 index 0000000000..0ab496fbbe --- /dev/null +++ b/.github/scripts/js/e2e/report/shared/fs-utils.test.js @@ -0,0 +1,66 @@ +// Copyright 2026 Flant JSC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const fs = require("fs"); +const os = require("os"); +const path = require("path"); + +const { listMatchingFiles } = require("./fs-utils"); + +/** + * Runs a test body inside a temporary directory and removes it afterwards. + * + * @template T + * @param {function(string): (Promise|T)} testFn Test body. + * @returns {Promise} Test result. + */ +async function withTempDir(testFn) { + const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), "fs-utils-test-")); + try { + return await testFn(tempDir); + } finally { + fs.rmSync(tempDir, { recursive: true, force: true }); + } +} + +describe("fs-utils", () => { + test("returns sorted matching files recursively", async () => + withTempDir((tempDir) => { + const nestedDir = path.join(tempDir, "nested"); + fs.mkdirSync(nestedDir, { recursive: true }); + fs.writeFileSync(path.join(tempDir, "b.json"), "{}\n"); + fs.writeFileSync(path.join(tempDir, "a.txt"), "nope\n"); + fs.writeFileSync(path.join(nestedDir, "a.json"), "{}\n"); + + expect(listMatchingFiles(tempDir, /\.json$/)).toEqual([ + path.join(tempDir, "b.json"), + path.join(nestedDir, "a.json"), + ]); + })); + + test("throws a descriptive error when a directory cannot be scanned", async () => + withTempDir((tempDir) => { + const readdirSpy = jest + .spyOn(fs, "readdirSync") + .mockImplementation(() => { + throw new Error("permission denied"); + }); + + try { + expect(() => listMatchingFiles(tempDir, /\.json$/)).toThrow( + `Unable to scan directory ${tempDir}: permission denied` + ); + } finally { + readdirSpy.mockRestore(); + } + })); +}); diff --git a/.github/scripts/js/e2e/report/shared/ginkgo-report-utils.js b/.github/scripts/js/e2e/report/shared/ginkgo-report-utils.js new file mode 100644 index 0000000000..5cec91699b --- /dev/null +++ b/.github/scripts/js/e2e/report/shared/ginkgo-report-utils.js @@ -0,0 +1,171 @@ +// Copyright 2026 Flant JSC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const { zeroMetrics } = require("./report-model"); + +/** + * @typedef {Object} GinkgoMetrics + * @property {number} passed + * @property {number} failed + * @property {number} errors + * @property {number} skipped + * @property {number} total + * @property {number} successRate + */ + +/** + * Normalizes a value into an array. + * + * @param {any} value Input value. + * @returns {any[]} Array view of the input. + */ +function toArray(value) { + if (!value) { + return []; + } + + return Array.isArray(value) ? value : [value]; +} + +/** + * Flattens nested Ginkgo label arrays into a stable, unique list. + * + * @param {Array|string[]|null} labelGroups Raw label data. + * @returns {string[]} Flattened unique labels. + */ +function flattenLabels(labelGroups) { + const seen = new Set(); + const labels = []; + + for (const group of toArray(labelGroups)) { + for (const label of toArray(group)) { + const normalizedLabel = String(label || "").trim(); + if (normalizedLabel && !seen.has(normalizedLabel)) { + seen.add(normalizedLabel); + labels.push(normalizedLabel); + } + } + } + + return labels; +} + +/** + * Builds a human-readable test name close to the JUnit testcase naming that + * existing reports already expose to messenger output. + * + * @param {Record} specReport Raw Ginkgo spec report entry. + * @returns {string} Formatted test name. + */ +function formatSpecName(specReport) { + const nodeType = String(specReport.LeafNodeType || "Spec").trim(); + const hierarchyParts = toArray(specReport.ContainerHierarchyTexts) + .map((part) => String(part || "").trim()) + .filter(Boolean); + const leafText = String(specReport.LeafNodeText || "").trim(); + const labels = [...new Set([ + ...flattenLabels(specReport.ContainerHierarchyLabels), + ...flattenLabels(specReport.LeafNodeLabels), + ])]; + const labelSuffix = labels.map((label) => `[${label}]`).join(" "); + const body = [...hierarchyParts, leafText].filter(Boolean).join(" "); + + return [`[${nodeType}]`, body, labelSuffix] + .filter(Boolean) + .join(" ") + .replace(/\s+/g, " ") + .trim(); +} + +/** + * Maps a raw Ginkgo spec state into the metrics bucket used by the final + * messenger report. + * + * @param {string} state Raw `SpecReport.State` value. + * @returns {"passed"|"failed"|"errors"|"skipped"} Metrics key. + */ +function getMetricKeyForState(state) { + const normalizedState = String(state || "") + .trim() + .toLowerCase(); + + if (normalizedState === "passed") { + return "passed"; + } + + if (normalizedState === "failed") { + return "failed"; + } + + if (normalizedState === "skipped" || normalizedState === "pending") { + return "skipped"; + } + + return "errors"; +} + +/** + * Parses a Ginkgo JSON report into metrics and failed test names used by the + * markdown report. + * + * @param {string} jsonContent Raw JSON content. + * @returns {{ + * metrics: GinkgoMetrics, + * failedTests: string[], + * startedAt: string|null + * }} Parsed report payload. + */ +function parseGinkgoReport(jsonContent) { + const suites = toArray(JSON.parse(jsonContent)); + const metrics = zeroMetrics(); + const failedTests = []; + const startedAt = + suites.find((suite) => suite && suite.StartTime)?.StartTime || null; + + for (const suite of suites) { + for (const specReport of toArray(suite && suite.SpecReports)) { + // SpecReports can contain suite-level setup/teardown entries + // (BeforeSuite, AfterSuite, etc.) in addition to regular specs. + // `Specify` is a pure alias for `It` and serializes to the same + // "It" value. We only count actual spec nodes in the metrics. + if (String(specReport && specReport.LeafNodeType) !== "It") { + continue; + } + + metrics.total += 1; + const metricKey = getMetricKeyForState(specReport.State); + metrics[metricKey] += 1; + + if (metricKey === "failed" || metricKey === "errors") { + const specName = formatSpecName(specReport); + if (specName) { + failedTests.push(specName); + } + } + } + } + + metrics.successRate = + metrics.total > 0 + ? Number(((metrics.passed / metrics.total) * 100).toFixed(2)) + : 0; + + return { + metrics, + failedTests: Array.from(new Set(failedTests)), + startedAt, + }; +} + +module.exports = { + parseGinkgoReport, +}; diff --git a/.github/scripts/js/e2e/report/shared/report-model.js b/.github/scripts/js/e2e/report/shared/report-model.js new file mode 100644 index 0000000000..d0f713c098 --- /dev/null +++ b/.github/scripts/js/e2e/report/shared/report-model.js @@ -0,0 +1,285 @@ +// Copyright 2026 Flant JSC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** Matches every `e2e_report_*.json` file produced by the pipeline. */ +const REPORT_FILE_PATTERN = /^e2e_report_.*\.json$/; + +/** + * Returns the canonical report file name for a given storage type. + * @param {string} storageType + * @returns {string} + */ +function reportFileName(storageType) { + return `e2e_report_${storageType}.json`; +} + +/** + * Returns a regex that matches dated archive copies of a report file, + * e.g. `e2e_report_replicated_2026-04-15.json`. + * @param {string} storageType + * @returns {RegExp} + */ +function archivedReportPattern(storageType) { + const escaped = storageType.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); + return new RegExp(`^e2e_report_${escaped}_.*\\.json$`); +} + +const stageMessage = { + "bootstrap": "BOOTSTRAP CLUSTER", + "configure-sdn": "CONFIGURE SDN", + "storage-setup": "STORAGE SETUP", + "virtualization-setup": "VIRTUALIZATION SETUP", + "e2e-test": "E2E TEST", + "ready": "CLUSTER READY", + "artifact-missing": "TEST REPORTS NOT FOUND", +}; + +const clusterSetupStages = [ + "bootstrap", + "configure-sdn", + "storage-setup", + "virtualization-setup", +]; + +function zeroMetrics() { + return { + passed: 0, + failed: 0, + errors: 0, + skipped: 0, + total: 0, + successRate: 0, + }; +} + +function buildStatusMessage(status, stageLabel) { + if (status === "success") { + return `✅ ${stageLabel}`; + } + + if (status === "cancelled") { + return `⚠️ ${stageLabel} CANCELLED`; + } + + if (status === "skipped") { + return `⚠️ ${stageLabel} SKIPPED`; + } + + if (status === "missing") { + return `⚠️ ${stageLabel}`; + } + + if (status === "not-run") { + return `⚠️ ${stageLabel} NOT RUN`; + } + + return `❌ ${stageLabel} FAILED`; +} + +function normalizeJobResult(resultValue) { + const result = String(resultValue || "success").trim(); + if (result === "cancelled" || result === "skipped" || result === "success") { + return result; + } + + return "failure"; +} + +function buildClusterStatus(stageResults) { + for (const stageName of clusterSetupStages) { + const stageResult = normalizeJobResult(stageResults[stageName]); + if (stageResult !== "success") { + const stageLabel = stageMessage[stageName] || stageName; + const status = + stageResult === "cancelled" + ? "cancelled" + : stageResult === "skipped" + ? "skipped" + : "failure"; + return { + status, + stage: stageName, + stageLabel, + message: buildStatusMessage(stageResult, stageLabel), + reason: `cluster-stage-${status}`, + }; + } + } + + return { + status: "success", + stage: "ready", + stageLabel: stageMessage.ready, + message: buildStatusMessage("success", stageMessage.ready), + reason: "", + }; +} + +function buildTestStatus( + testResult, + reportSource, + clusterStatus, + metrics = {} +) { + const stageLabel = stageMessage["e2e-test"]; + + if (clusterStatus.status !== "success") { + return { + status: "not-run", + reason: `cluster-stage-${clusterStatus.status}`, + message: "E2E tests were not run because cluster setup did not finish", + }; + } + + const normalizedResult = normalizeJobResult(testResult); + + if (reportSource === "ginkgo-json") { + const hasReportedFailures = + Number(metrics.failed || 0) > 0 || Number(metrics.errors || 0) > 0; + const status = + normalizedResult === "success" && hasReportedFailures + ? "failure" + : normalizedResult; + + return { + status, + reason: status === "success" ? "" : "ginkgo-failed", + message: + status === "success" + ? "✅ E2E TESTS PASSED" + : buildStatusMessage(status, stageLabel), + }; + } + + if (reportSource === "ginkgo-json-invalid") { + return { + status: "missing", + reason: "ginkgo-report-invalid", + message: "⚠️ E2E TEST REPORT IS INVALID", + }; + } + + if (normalizedResult === "success") { + return { + status: "missing", + reason: "ginkgo-report-missing", + message: "⚠️ E2E TEST REPORT NOT FOUND", + }; + } + + if (normalizedResult === "cancelled") { + return { + status: "cancelled", + reason: "e2e-cancelled", + message: buildStatusMessage("cancelled", stageLabel), + }; + } + + if (normalizedResult === "skipped") { + return { + status: "not-run", + reason: "e2e-skipped", + message: buildStatusMessage("not-run", stageLabel), + }; + } + + return { + status: "failure", + reason: "ginkgo-report-missing", + message: "❌ E2E TESTS FAILED, GINKGO REPORT NOT FOUND", + }; +} + +function buildReportSummary(storageType, clusterStatus, testStatus) { + if (clusterStatus.status !== "success") { + return { + failedStage: clusterStatus.stage, + failedStageLabel: clusterStatus.stageLabel, + failedJobName: `${clusterStatus.stageLabel} (${storageType})`, + reportKind: "stage-failure", + status: clusterStatus.status, + statusMessage: clusterStatus.message, + }; + } + + if (testStatus.status === "missing") { + const stageLabel = stageMessage["artifact-missing"]; + return { + failedStage: "artifact-missing", + failedStageLabel: stageLabel, + failedJobName: `E2E test (${storageType})`, + reportKind: "artifact-missing", + status: "missing", + statusMessage: testStatus.message, + }; + } + + return { + failedStage: testStatus.status === "success" ? "success" : "e2e-test", + failedStageLabel: + testStatus.status === "success" ? "SUCCESS" : stageMessage["e2e-test"], + failedJobName: `E2E test (${storageType})`, + reportKind: "tests", + status: testStatus.status, + statusMessage: testStatus.message, + }; +} + +function isMissingReport(report) { + return ( + (report.testStatus && report.testStatus.status === "missing") || + (report.clusterStatus && report.clusterStatus.status === "missing") || + report.reportKind === "artifact-missing" || + report.failedStage === "artifact-missing" || + report.status === "missing" + ); +} + +function isClusterFailureReport(report) { + if (report.clusterStatus) { + return ( + report.clusterStatus.status !== "success" && + report.clusterStatus.status !== "missing" + ); + } + + return report.reportKind !== "tests" && !isMissingReport(report); +} + +function isTestResultReport(report) { + if (report.clusterStatus && report.clusterStatus.status !== "success") { + return false; + } + + if (report.testStatus) { + return ( + report.testStatus.status !== "not-run" && + report.testStatus.status !== "missing" + ); + } + + return report.reportKind === "tests"; +} + +module.exports = { + archivedReportPattern, + buildClusterStatus, + buildReportSummary, + buildStatusMessage, + buildTestStatus, + isClusterFailureReport, + isMissingReport, + isTestResultReport, + REPORT_FILE_PATTERN, + reportFileName, + zeroMetrics, +}; diff --git a/.github/scripts/js/eslint.config.cjs b/.github/scripts/js/eslint.config.cjs new file mode 100644 index 0000000000..4b2e3fb424 --- /dev/null +++ b/.github/scripts/js/eslint.config.cjs @@ -0,0 +1,34 @@ +module.exports = [ + { + ignores: ['node_modules/**'], + }, + { + files: ['**/*.js'], + languageOptions: { + ecmaVersion: 2022, + sourceType: 'commonjs', + globals: { + __dirname: 'readonly', + afterEach: 'readonly', + beforeEach: 'readonly', + Buffer: 'readonly', + console: 'readonly', + describe: 'readonly', + expect: 'readonly', + fetch: 'readonly', + global: 'readonly', + jest: 'readonly', + module: 'readonly', + process: 'readonly', + require: 'readonly', + setTimeout: 'readonly', + test: 'readonly', + }, + }, + rules: { + 'consistent-return': 'error', + 'no-shadow': 'error', + 'no-unused-vars': ['error', {argsIgnorePattern: '^_'}], + }, + }, +]; diff --git a/.github/scripts/js/package.json b/.github/scripts/js/package.json index 6a8471c4e1..7d57cc285c 100644 --- a/.github/scripts/js/package.json +++ b/.github/scripts/js/package.json @@ -4,7 +4,8 @@ "description": "", "main": "index.js", "scripts": { - "fmt": "prettier --write ./*.js", + "fmt": "prettier --write \"e2e/report/**/*.js\"", + "lint": "eslint \"e2e/report/**/*.js\"", "test": "jest" }, "keywords": [], @@ -15,6 +16,7 @@ "@actions/github": "^5.1.1", "@octokit/graphql": "^4.8.0", "@types/node": "^16.11.11", + "eslint": "^10.2.1", "jest": "28.1.2", "prettier": "^2.5.0" } diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index 0bf8ab0e0f..80144a7003 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -420,6 +420,7 @@ jobs: uses: ./.github/workflows/e2e-reusable-pipeline.yml with: storage_type: replicated + pipeline_job_name: "E2E Pipeline (Replicated)" nested_storageclass_name: nested-thin-r1 nested_cluster_network_name: cn-4006-for-e2e-test branch: main @@ -446,6 +447,7 @@ jobs: uses: ./.github/workflows/e2e-reusable-pipeline.yml with: storage_type: nfs + pipeline_job_name: "E2E Pipeline (NFS)" nested_storageclass_name: nfs nested_cluster_network_name: cn-4006-for-e2e-test branch: main @@ -472,8 +474,6 @@ jobs: - e2e-replicated - e2e-nfs if: ${{ always()}} - env: - STORAGE_TYPES: '["replicated", "nfs"]' steps: - uses: actions/checkout@v4 @@ -487,254 +487,14 @@ jobs: merge-multiple: false - name: Send results to channel - run: | - # Map storage types to CSI names - get_csi_name() { - local storage_type=$1 - case "$storage_type" in - "replicated") - echo "replicated.csi.storage.deckhouse.io" - ;; - "nfs") - echo "nfs.csi.storage.deckhouse.io" - ;; - *) - echo "$storage_type" - ;; - esac - } - - # Function to load and parse report from artifact - # Outputs: file content to stdout, debug messages to stderr - # Works with pattern-based artifact download (e2e-report-*) - # Artifacts are organized as: downloaded-artifacts/e2e-report--/e2e_report_.json - load_report_from_artifact() { - local storage_type=$1 - local base_path="downloaded-artifacts/" - - echo "[INFO] Searching for report for storage type: $storage_type" >&2 - echo "[DEBUG] Base path: $base_path" >&2 - - if [ ! -d "$base_path" ]; then - echo "[WARN] Base path does not exist: $base_path" >&2 - return 1 - fi - - local report_file="" - - # First, search in artifact directories matching pattern: e2e-report--* - # Pattern downloads create subdirectories named after the artifact - # e.g., downloaded-artifacts/e2e-report-replicated-/e2e_report_replicated.json - echo "[DEBUG] Searching in artifact directories matching pattern: e2e-report-${storage_type}-*" >&2 - local artifact_dir=$(find "$base_path" -type d -name "e2e-report-${storage_type}-*" 2>/dev/null | head -1) - if [ -n "$artifact_dir" ]; then - echo "[DEBUG] Found artifact dir: $artifact_dir" >&2 - report_file=$(find "$artifact_dir" -name "e2e_report_*.json" -type f 2>/dev/null | head -1) - if [ -n "$report_file" ] && [ -f "$report_file" ]; then - echo "[INFO] Found report file in artifact dir: $report_file" >&2 - cat "$report_file" - return 0 - fi - fi - - # Fallback: search for file by name pattern anywhere in base_path - echo "[DEBUG] Searching for file: e2e_report_${storage_type}.json" >&2 - report_file=$(find "$base_path" -type f -name "e2e_report_${storage_type}.json" 2>/dev/null | head -1) - if [ -n "$report_file" ] && [ -f "$report_file" ]; then - echo "[INFO] Found report file by name: $report_file" >&2 - cat "$report_file" - return 0 - fi - - echo "[WARN] Could not load report artifact for $storage_type" >&2 - return 1 - } - - # Function to create failure summary JSON (fallback) - create_failure_summary() { - local storage_type=$1 - local stage=$2 - local run_id=$3 - local csi=$(get_csi_name "$storage_type") - local date=$(date +"%Y-%m-%d") - local time=$(date +"%H:%M:%S") - local branch="${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" - local link="${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${run_id:-${GITHUB_RUN_ID}}" - - # Map stage to status message - local status_msg - case "$stage" in - "bootstrap") - status_msg=":x: BOOTSTRAP CLUSTER FAILED" - ;; - "storage-setup") - status_msg=":x: STORAGE SETUP FAILED" - ;; - "virtualization-setup") - status_msg=":x: VIRTUALIZATION SETUP FAILED" - ;; - "e2e-test") - status_msg=":x: E2E TEST FAILED" - ;; - *) - status_msg=":question: UNKNOWN" - ;; - esac - - jq -n \ - --arg csi "$csi" \ - --arg date "$date" \ - --arg time "$time" \ - --arg branch "$branch" \ - --arg status "$status_msg" \ - --arg link "$link" \ - '{CSI: $csi, Date: $date, StartTime: $time, Branch: $branch, Status: $status, Passed: 0, Failed: 0, Pending: 0, Skipped: 0, Link: $link}' - } - - - # Parse summary JSON and add to table - parse_summary() { - local summary_json=$1 - local storage_type=$2 - - if [ -z "$summary_json" ] || [ "$summary_json" == "null" ] || [ "$summary_json" == "" ]; then - echo "Warning: Empty summary for $storage_type" - return - fi - - # Try to parse as JSON (handle both JSON string and already parsed JSON) - if ! echo "$summary_json" | jq empty 2>/dev/null; then - echo "Warning: Invalid JSON for $storage_type: $summary_json" - echo "[DEBUG] json: $summary_json" - return - fi - - # Parse JSON fields - csi_raw=$(echo "$summary_json" | jq -r '.CSI // empty' 2>/dev/null) - if [ -z "$csi_raw" ] || [ "$csi_raw" == "null" ] || [ "$csi_raw" == "" ]; then - csi=$(get_csi_name "$storage_type") - else - csi="$csi_raw" - fi - - date=$(echo "$summary_json" | jq -r '.Date // ""' 2>/dev/null) - time=$(echo "$summary_json" | jq -r '.StartTime // ""' 2>/dev/null) - branch=$(echo "$summary_json" | jq -r '.Branch // ""' 2>/dev/null) - status=$(echo "$summary_json" | jq -r '.Status // ":question: UNKNOWN"' 2>/dev/null) - passed=$(echo "$summary_json" | jq -r '.Passed // 0' 2>/dev/null) - failed=$(echo "$summary_json" | jq -r '.Failed // 0' 2>/dev/null) - pending=$(echo "$summary_json" | jq -r '.Pending // 0' 2>/dev/null) - skipped=$(echo "$summary_json" | jq -r '.Skipped // 0' 2>/dev/null) - link=$(echo "$summary_json" | jq -r '.Link // ""' 2>/dev/null) - - # Set defaults if empty - [ -z "$passed" ] && passed=0 - [ -z "$failed" ] && failed=0 - [ -z "$pending" ] && pending=0 - [ -z "$skipped" ] && skipped=0 - [ -z "$status" ] && status=":question: UNKNOWN" - - # Format link - use CSI name as fallback if link is empty - if [ -z "$link" ] || [ "$link" == "" ]; then - link_text="$csi" - else - link_text="[:link: $csi]($link)" - fi - - # Add row to table - markdown_table+="| $link_text | $status | $passed | $failed | $pending | $skipped | $date | $time | $branch |\n" - } - - # Initialize markdown table - echo "[INFO] Generate markdown table" - markdown_table="" - header="| CSI | Status | Passed | Failed | Pending | Skipped | Date | Time | Branch|\n" - separator="|---|---|---|---|---|---|---|---|---|\n" - markdown_table+="$header" - markdown_table+="$separator" - - # Get current date for header - DATE=$(date +"%Y-%m-%d") - COMBINED_SUMMARY="## :dvp: **DVP | E2E on a nested cluster | $DATE**\n\n" - - echo "[INFO] Get storage types" - readarray -t storage_types < <(echo "$STORAGE_TYPES" | jq -r '.[]') - echo "[INFO] Storage types: " "${storage_types[@]}" - - echo "[INFO] Generate summary for each storage type" - for storage in "${storage_types[@]}"; do - echo "[INFO] Processing $storage" - - # Try to load report from artifact - # Debug messages go to stderr (visible in logs), JSON content goes to stdout - echo "[INFO] Attempting to load report for $storage" - structured_report=$(load_report_from_artifact "$storage" || true) - - if [ -n "$structured_report" ]; then - # Check if it's valid JSON - if echo "$structured_report" | jq empty 2>/dev/null; then - echo "[INFO] Report is valid JSON for $storage" - else - echo "[WARN] Report is not valid JSON for $storage" - echo "[DEBUG] Raw report content (first 200 chars):" - echo "$structured_report" | head -c 200 - echo "" - structured_report="" - fi - fi - - if [ -n "$structured_report" ] && echo "$structured_report" | jq empty 2>/dev/null; then - # Extract report data from structured file - report_json=$(echo "$structured_report" | jq -c '.report // empty') - failed_stage=$(echo "$structured_report" | jq -r '.failed_stage // empty') - workflow_run_id=$(echo "$structured_report" | jq -r '.workflow_run_id // empty') - - echo "[INFO] Loaded report for $storage (failed_stage: ${failed_stage}, run_id: ${workflow_run_id})" - - # Validate and parse report - if [ -n "$report_json" ] && [ "$report_json" != "" ] && [ "$report_json" != "null" ]; then - if echo "$report_json" | jq empty 2>/dev/null; then - echo "[INFO] Found valid report for $storage" - parse_summary "$report_json" "$storage" - else - echo "[WARN] Invalid report JSON for $storage, using failed stage info" - # Fallback to failed stage - if [ -n "$failed_stage" ] && [ "$failed_stage" != "" ] && [ "$failed_stage" != "success" ]; then - failed_summary=$(create_failure_summary "$storage" "$failed_stage" "$workflow_run_id") - parse_summary "$failed_summary" "$storage" - else - csi=$(get_csi_name "$storage") - markdown_table+="| $csi | :warning: INVALID REPORT | 0 | 0 | 0 | 0 | — | — | — |\n" - fi - fi - else - # No report in structured file, use failed stage - if [ -n "$failed_stage" ] && [ "$failed_stage" != "" ] && [ "$failed_stage" != "success" ]; then - echo "[INFO] Stage '$failed_stage' failed for $storage" - failed_summary=$(create_failure_summary "$storage" "$failed_stage" "$workflow_run_id") - parse_summary "$failed_summary" "$storage" - else - csi=$(get_csi_name "$storage") - markdown_table+="| $csi | :warning: NO REPORT | 0 | 0 | 0 | 0 | — | — | — |\n" - fi - fi - else - # Artifact not found or invalid, show warning - echo "[WARN] Could not load report artifact for $storage" - csi=$(get_csi_name "$storage") - markdown_table+="| $csi | :warning: ARTIFACT NOT FOUND | 0 | 0 | 0 | 0 | — | — | — |\n" - fi - done - - echo "[INFO] Combined summary" - COMBINED_SUMMARY+="${markdown_table}\n" - - echo -e "$COMBINED_SUMMARY" - - # Send to channel if webhook is configured - echo "[INFO] Send to webhook" - if [ -n "$LOOP_WEBHOOK_URL" ]; then - curl --request POST --header 'Content-Type: application/json' --data "{\"text\": \"${COMBINED_SUMMARY}\"}" "$LOOP_WEBHOOK_URL" - fi + id: render-report + uses: actions/github-script@v7 env: - LOOP_WEBHOOK_URL: ${{ secrets.LOOP_WEBHOOK_URL }} + EXPECTED_STORAGE_TYPES: '["replicated","nfs"]' + LOOP_API_BASE_URL: ${{ secrets.LOOP_API_BASE_URL }} + LOOP_CHANNEL_ID: ${{ secrets.LOOP_CHANNEL_ID }} + LOOP_TOKEN: ${{ secrets.LOOP_TOKEN }} + with: + script: | + const renderMessengerReport = require('./.github/scripts/js/e2e/report/messenger-report'); + await renderMessengerReport({core}); diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index d332a54246..2df5763ebc 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -113,6 +113,11 @@ on: type: string default: "https://mirror.hetzner.com/ubuntu/packages" description: "APT mirror base URL (without trailing slash)" + pipeline_job_name: + required: false + type: string + default: "" + description: "Display name of the calling pipeline job in the parent workflow (e.g. 'E2E Pipeline (Replicated)'). Used to resolve per-stage job URLs in the report." secrets: DEV_REGISTRY_DOCKER_CFG: required: true @@ -122,11 +127,6 @@ on: required: true BOOTSTRAP_DEV_PROXY: required: true - outputs: - artifact-name: - description: "Name of the uploaded artifact with E2E report" - value: ${{ jobs.prepare-report.outputs.artifact-name }} - env: BRANCH: ${{ inputs.branch }} VIRTUALIZATION_TAG: ${{ inputs.virtualization_tag }} @@ -1306,13 +1306,8 @@ jobs: USB_SUPPORTED: ${{ steps.detect-k8s-version.outputs.usb-supported }} working-directory: ./test/e2e/ run: | - GINKGO_RESULT=$(mktemp -p $RUNNER_TEMP) DATE=$(date +"%Y-%m-%d") - START_TIME=$(date +"%H:%M:%S") - summary_file_name_junit="e2e_summary_${CSI}_${DATE}.xml" - ginkgo_json_report="ginkgo_report_${CSI}_${DATE}.json" - summary_file_name_json="e2e_summary_${CSI}_${DATE}.json" - FOCUS="${{ inputs.e2e_focus_tests }}" + e2e_report_file="e2e_report_${CSI}_${DATE}.json" cp -a legacy/testdata /tmp/testdata @@ -1325,12 +1320,12 @@ jobs: ./scripts/precheck-prepare_ci.sh set +e + FOCUS="${{ inputs.e2e_focus_tests }}" GINKGO_ARGS=( -v --race --timeout="$TIMEOUT" - --json-report="$ginkgo_json_report" - --junit-report="$summary_file_name_junit" + --json-report="$e2e_report_file" ) if [ -n "${LABELS:-}" ]; then @@ -1341,67 +1336,22 @@ jobs: GINKGO_ARGS+=(--focus="$FOCUS") fi - go tool ginkgo "${GINKGO_ARGS[@]}" | tee $GINKGO_RESULT + go tool ginkgo "${GINKGO_ARGS[@]}" GINKGO_EXIT_CODE=$? set -e - RESULT=$(sed -e "s/\x1b\[[0-9;]*m//g" $GINKGO_RESULT | grep --color=never -E "FAIL!|SUCCESS!") - if [[ $RESULT == FAIL!* ]]; then - RESULT_STATUS=":x: FAIL!" - elif [[ $RESULT == SUCCESS!* ]]; then - RESULT_STATUS=":white_check_mark: SUCCESS!" - else - RESULT_STATUS=":question: UNKNOWN" - fi - - PASSED=$(echo "$RESULT" | grep -oP "\d+(?= Passed)") - FAILED=$(echo "$RESULT" | grep -oP "\d+(?= Failed)") - PENDING=$(echo "$RESULT" | grep -oP "\d+(?= Pending)") - SKIPPED=$(echo "$RESULT" | grep -oP "\d+(?= Skipped)") - - SUMMARY=$(jq -n \ - --arg csi "$CSI" \ - --arg date "$DATE" \ - --arg startTime "$START_TIME" \ - --arg branch "${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" \ - --arg status "$RESULT_STATUS" \ - --argjson passed "$PASSED" \ - --argjson failed "$FAILED" \ - --argjson pending "$PENDING" \ - --argjson skipped "$SKIPPED" \ - --arg link "$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" \ - '{ - CSI: $csi, - Date: $date, - StartTime: $startTime, - Branch: $branch, - Status: $status, - Passed: $passed, - Failed: $failed, - Pending: $pending, - Skipped: $skipped, - Link: $link - }' - ) - - echo "$SUMMARY" - echo "summary=$(echo "$SUMMARY" | jq -c .)" >> $GITHUB_OUTPUT - echo $SUMMARY > "${summary_file_name_json}" - echo "[INFO] Exit code: $GINKGO_EXIT_CODE" exit $GINKGO_EXIT_CODE - - name: Upload summary test results (junit/xml) + - name: Upload summary test results (json) uses: actions/upload-artifact@v4 id: e2e-report-artifact if: always() && steps.e2e-report.outcome != 'skipped' with: - name: e2e-test-results-${{ inputs.storage_type }}-${{ github.run_id }}-${{ steps.vars.outputs.e2e-start-time }} + name: e2e-test-results-${{ inputs.storage_type }}-${{ github.run_id }}-${{ inputs.date_start }} path: | - test/e2e/e2e_summary_*.json - test/e2e/ginkgo_report_*.json - test/e2e/e2e_summary_*.xml - test/e2e/*junit*.xml + test/e2e/e2e_report_*.json if-no-files-found: ignore + overwrite: true retention-days: 3 - name: Upload resources from failed tests @@ -1418,12 +1368,11 @@ jobs: runs-on: ubuntu-latest needs: - bootstrap + - configure-sdn - configure-storage - configure-virtualization - e2e-test if: always() - outputs: - artifact-name: ${{ steps.set-artifact-name.outputs.artifact-name }} steps: - uses: actions/checkout@v4 @@ -1436,154 +1385,19 @@ jobs: - name: Determine failed stage and prepare report id: determine-stage - run: | - # Get branch name - BRANCH_NAME="${{ github.head_ref || github.ref_name }}" - if [ -z "$BRANCH_NAME" ] || [ "$BRANCH_NAME" == "refs/heads/" ]; then - BRANCH_NAME="${{ github.ref_name }}" - fi - - # Function to create failure summary JSON with proper job URL - create_failure_summary() { - local stage=$1 - local status_msg=$2 - local job_name=$3 - local csi="${{ inputs.storage_type }}" - local date=$(date +"%Y-%m-%d") - local start_time=$(date +"%H:%M:%S") - local branch="$BRANCH_NAME" - # Create URL pointing to the failed job in the workflow run - # Format: https://github.com/{owner}/{repo}/actions/runs/{run_id} - # The job name will be visible in the workflow run view - local link="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" - - jq -n \ - --arg csi "$csi" \ - --arg date "$date" \ - --arg startTime "$start_time" \ - --arg branch "$branch" \ - --arg status "$status_msg" \ - --arg link "$link" \ - '{ - CSI: $csi, - Date: $date, - StartTime: $startTime, - Branch: $branch, - Status: $status, - Passed: 0, - Failed: 0, - Pending: 0, - Skipped: 0, - Link: $link - }' - } - - # Summary report is a flat JSON object; Ginkgo JSON report is an array. - is_summary_report() { - jq -e ' - type == "object" and - has("Status") and - has("Passed") and - has("Failed") and - has("Pending") and - has("Skipped") - ' >/dev/null 2>&1 - } - - # Try to find and load E2E test report - E2E_REPORT_FILE="" - REPORT_JSON="" - - # Search for the generated summary file and ignore the raw Ginkgo JSON report. - E2E_REPORT_FILE=$(find test/e2e -type f \ - -name "e2e_summary_${{ inputs.storage_type }}_*.json" \ - ! -name "*-ginkgo-report.json" \ - 2>/dev/null | sort | head -1) - - if [ -n "$E2E_REPORT_FILE" ] && [ -f "$E2E_REPORT_FILE" ]; then - echo "[INFO] Found E2E report file: $E2E_REPORT_FILE" - REPORT_JSON=$(jq -c . "$E2E_REPORT_FILE") - if echo "$REPORT_JSON" | is_summary_report; then - echo "[INFO] Loaded summary report from file" - echo "$REPORT_JSON" | jq . - else - echo "[WARN] Ignoring non-summary E2E report file: $E2E_REPORT_FILE" - REPORT_JSON="" - fi - fi - - # Function to process a stage - process_stage() { - local result_value="$1" - local stage_name="$2" - local status_msg="$3" - local job_name="$4" - local is_e2e_test="${5:-false}" - - if [ "$result_value" != "success" ]; then - FAILED_STAGE="$stage_name" - FAILED_JOB_NAME="$job_name (${{ inputs.storage_type }})" - - if [ -z "$REPORT_JSON" ] || [ "$REPORT_JSON" == "" ]; then - REPORT_JSON=$(create_failure_summary "$stage_name" "$status_msg" "$FAILED_JOB_NAME") - elif [ "$is_e2e_test" == "true" ]; then - # Special handling for e2e-test: update status if needed - CURRENT_STATUS=$(echo "$REPORT_JSON" | jq -r '.Status // ""') - if [[ "$CURRENT_STATUS" != *"FAIL"* ]] && [[ "$CURRENT_STATUS" != *"SUCCESS"* ]]; then - REPORT_JSON=$(echo "$REPORT_JSON" | jq -c '.Status = ":x: E2E TEST FAILED"') - fi - fi - return 0 # Stage failed - fi - return 1 # Stage succeeded - } - - # Determine which stage failed and prepare report - FAILED_STAGE="" - FAILED_JOB_NAME="" - - if process_stage "${{ needs.bootstrap.result }}" "bootstrap" ":x: BOOTSTRAP CLUSTER FAILED" "Bootstrap cluster"; then - : # Stage failed, handled in function - elif process_stage "${{ needs.configure-storage.result }}" "storage-setup" ":x: STORAGE SETUP FAILED" "Configure storage"; then - : # Stage failed, handled in function - elif process_stage "${{ needs.configure-virtualization.result }}" "virtualization-setup" ":x: VIRTUALIZATION SETUP FAILED" "Configure Virtualization"; then - : # Stage failed, handled in function - elif process_stage "${{ needs.e2e-test.result }}" "e2e-test" ":x: E2E TEST FAILED" "E2E test" "true"; then - : # Stage failed, handled in function - else - # All stages succeeded - FAILED_STAGE="success" - FAILED_JOB_NAME="E2E test (${{ inputs.storage_type }})" - if [ -z "$REPORT_JSON" ] || [ "$REPORT_JSON" == "" ]; then - REPORT_JSON=$(create_failure_summary "success" ":white_check_mark: SUCCESS!" "$FAILED_JOB_NAME") - fi - fi - - # Create structured report file with metadata - REPORT_FILE="e2e_report_${{ inputs.storage_type }}.json" - # Parse REPORT_JSON to ensure it's valid JSON before using it - REPORT_JSON_PARSED=$(echo "$REPORT_JSON" | jq -c .) - jq -n \ - --argjson report "$REPORT_JSON_PARSED" \ - --arg storage_type "${{ inputs.storage_type }}" \ - --arg failed_stage "$FAILED_STAGE" \ - --arg failed_job_name "$FAILED_JOB_NAME" \ - --arg workflow_run_id "${{ github.run_id }}" \ - --arg workflow_run_url "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" \ - '{ - storage_type: $storage_type, - failed_stage: $failed_stage, - failed_job_name: $failed_job_name, - workflow_run_id: $workflow_run_id, - workflow_run_url: $workflow_run_url, - report: $report - }' > "$REPORT_FILE" - - echo "report_file=$REPORT_FILE" >> $GITHUB_OUTPUT - echo "[INFO] Created report file: $REPORT_FILE" - echo "[INFO] Failed stage: $FAILED_STAGE" - echo "[INFO] Failed job: $FAILED_JOB_NAME" - cat "$REPORT_FILE" | jq . + uses: actions/github-script@v7 + env: + STORAGE_TYPE: ${{ inputs.storage_type }} + PIPELINE_JOB_NAME: ${{ inputs.pipeline_job_name }} + NEEDS_CONTEXT: ${{ toJSON(needs) }} + with: + script: | + const buildClusterReport = require('./.github/scripts/js/e2e/report/cluster-report'); + await buildClusterReport({ + core, + context, + github, + }); - name: Upload E2E report artifact id: upload-artifact @@ -1591,15 +1405,9 @@ jobs: with: name: e2e-report-${{ inputs.storage_type }}-${{ github.run_id }}-${{ inputs.date_start }} path: ${{ steps.determine-stage.outputs.report_file }} + overwrite: true retention-days: 3 - - name: Set artifact name output - id: set-artifact-name - run: | - ARTIFACT_NAME="e2e-report-${{ inputs.storage_type }}-${{ github.run_id }}-${{ inputs.date_start }}" - echo "artifact-name=$ARTIFACT_NAME" >> $GITHUB_OUTPUT - echo "[INFO] Artifact name: $ARTIFACT_NAME" - undeploy-cluster: name: Undeploy cluster runs-on: ubuntu-latest @@ -1609,7 +1417,7 @@ jobs: - configure-storage - configure-virtualization - e2e-test - if: (cancelled() || success()) && (needs.configure-sdn.result == 'success') + if: cancelled() || success() steps: - uses: actions/checkout@v4