From e78912f79b7a67f7a30930cab14a5394266be72f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Wed, 13 May 2026 18:07:35 -0300 Subject: [PATCH 1/4] ci: add daily LoC report workflow Adds a scheduled workflow that counts Rust LoC in the workspace and publishes a report to Slack daily and to Telegram weekly (Monday UTC). Day-over-day deltas are computed against the previous run's report, cached on the branch under `loc-report--` keys. Required secrets: ETHLAMBDA_GENERAL_SLACK_WEBHOOK prod Slack channel ETHLAMBDA_TEST_SLACK_WEBHOOK test Slack channel (manual runs) TELEGRAM_BOT_TOKEN bot token TELEGRAM_ETHLAMBDA_CHAT_ID prod Telegram chat TELEGRAM_ETHLAMBDA_TEST_CHAT_ID test Telegram chat (manual runs) --- .github/scripts/generate_loc_report.sh | 152 +++++++++++++++++++++++++ .github/scripts/publish_slack.sh | 19 ++++ .github/scripts/publish_telegram.sh | 28 +++++ .github/workflows/daily_loc_report.yml | 104 +++++++++++++++++ 4 files changed, 303 insertions(+) create mode 100755 .github/scripts/generate_loc_report.sh create mode 100755 .github/scripts/publish_slack.sh create mode 100755 .github/scripts/publish_telegram.sh create mode 100644 .github/workflows/daily_loc_report.yml diff --git a/.github/scripts/generate_loc_report.sh b/.github/scripts/generate_loc_report.sh new file mode 100755 index 00000000..b6bdf518 --- /dev/null +++ b/.github/scripts/generate_loc_report.sh @@ -0,0 +1,152 @@ +#!/usr/bin/env bash +# +# Counts Rust lines of code in the ethlambda workspace and produces report +# files for Slack, Telegram, and the GitHub Actions step summary. +# +# Inputs (optional): +# loc_report.json.old Previous run's report — used to compute deltas. +# +# Outputs: +# loc_report.json Machine-readable report for caching. +# loc_report_slack.json Slack Block Kit payload (daily message). +# loc_report_telegram.txt Telegram HTML body (weekly message). +# loc_report_github.txt Plain-text block for the workflow step summary. + +set -euo pipefail + +OLD_REPORT="loc_report.json.old" +NEW_REPORT="loc_report.json" + +count_loc() { + # Count Rust lines of code under $1. Excludes common non-product folders. + # If the path has no Rust files, returns 0. + # `-t Rust` (short form) is accepted by tokei v12 and v14. + tokei "$1" -t Rust --output json \ + -e tests -e benches -e examples 2>/dev/null \ + | jq '.Rust.code // 0' +} + +# Enumerate workspace members through cargo so the list stays in sync +# with Cargo.toml automatically. +CRATE_DIRS=$( + cargo metadata --no-deps --format-version 1 \ + | jq -r '.packages[] | .manifest_path | sub("/Cargo.toml$"; "")' \ + | sort +) + +CRATES_JSON='[]' +TOTAL=0 +while IFS= read -r dir; do + [[ -z "$dir" ]] && continue + rel="${dir#"$PWD/"}" + src="${dir}/src" + if [[ -d "$src" ]]; then + loc=$(count_loc "$src") + else + loc=0 + fi + TOTAL=$((TOTAL + loc)) + CRATES_JSON=$(jq --arg path "$rel" --argjson loc "$loc" \ + '. + [{path: $path, loc: $loc}]' <<< "$CRATES_JSON") +done <<< "$CRATE_DIRS" + +CRATES_JSON=$(jq 'sort_by(-.loc)' <<< "$CRATES_JSON") + +jq -n --argjson total "$TOTAL" --argjson crates "$CRATES_JSON" \ + '{total: $total, crates: $crates}' > "$NEW_REPORT" + +# Resolve previous totals (defaulting to current → zero deltas on first run). +OLD_TOTAL=$TOTAL +OLD_CRATES_JSON=$CRATES_JSON +if [[ -f "$OLD_REPORT" ]]; then + OLD_TOTAL=$(jq '.total' "$OLD_REPORT") + OLD_CRATES_JSON=$(jq '.crates' "$OLD_REPORT") +fi + +format_diff() { + local cur=$1 old=$2 + if (( cur > old )); then echo "(+$((cur - old)))" + elif (( cur < old )); then echo "(-$((old - cur)))" + else echo "" + fi +} + +TOTAL_DIFF=$(format_diff "$TOTAL" "$OLD_TOTAL") +COMMIT_SHA=${GITHUB_SHA:-$(git rev-parse HEAD)} +SHORT_SHA=${COMMIT_SHA:0:7} +DATE_UTC=$(date -u +"%Y-%m-%d") + +# Build per-crate annotated rows once and reuse for every format. +ROWS_JSON=$(jq --argjson old "$OLD_CRATES_JSON" ' + map( + . as $c + | ($old | map(select(.path == $c.path)) | .[0].loc // 0) as $old_loc + | . + { + old_loc: $old_loc, + diff: ($c.loc - $old_loc) + } + ) +' <<< "$CRATES_JSON") + +format_diff_jq=' + def diff_str: + if . > 0 then "(+" + (. | tostring) + ")" + elif . < 0 then "(-" + ((. | -.) | tostring) + ")" + else "" end; +' + +# GitHub step summary (plain text inside a code block). +{ + echo '```' + echo "ethlambda lines of code (${DATE_UTC}, ${SHORT_SHA})" + echo "============================================" + echo "Total Rust LoC: ${TOTAL} ${TOTAL_DIFF}" + echo + echo "Per-crate" + echo "---------" + jq -r "$format_diff_jq"' + .[] | "\(.path): \(.loc) \(.diff | diff_str)" + ' <<< "$ROWS_JSON" + echo + echo "Excluded folders: tests/, benches/, examples/" + echo '```' +} > loc_report_github.txt + +# Slack Block Kit payload. +CRATES_MRKDWN=$(jq -r "$format_diff_jq"' + map("*\(.path)*: \(.loc) \(.diff | diff_str)") | join("\n") +' <<< "$ROWS_JSON") + +SUMMARY_TEXT=$(printf '*Total Rust LoC:* %s %s\n_Date:_ %s • _Commit:_ `%s`' \ + "$TOTAL" "$TOTAL_DIFF" "$DATE_UTC" "$SHORT_SHA") + +jq -n \ + --arg summary "$SUMMARY_TEXT" \ + --arg crates "$CRATES_MRKDWN" \ + '{ + blocks: [ + { type: "header", text: { type: "plain_text", text: "Daily ethlambda LoC Report" } }, + { type: "divider" }, + { type: "section", text: { type: "mrkdwn", text: $summary } }, + { type: "header", text: { type: "plain_text", text: "Per-crate" } }, + { type: "section", text: { type: "mrkdwn", text: $crates } }, + { type: "context", elements: [ + { type: "mrkdwn", text: "_Excluded folders: tests/, benches/, examples/_" } + ]} + ] + }' > loc_report_slack.json + +# Telegram (HTML parse mode). +{ + echo "Weekly ethlambda LoC Report" + echo "Date: ${DATE_UTC} • Commit: ${SHORT_SHA}" + echo + echo "Total Rust LoC: ${TOTAL} ${TOTAL_DIFF}" + echo + echo "Per-crate" + jq -r "$format_diff_jq"' + .[] | "\(.path): \(.loc) \(.diff | diff_str)" + ' <<< "$ROWS_JSON" + echo + echo "Excluded folders: tests/, benches/, examples/" +} > loc_report_telegram.txt diff --git a/.github/scripts/publish_slack.sh b/.github/scripts/publish_slack.sh new file mode 100755 index 00000000..f6929009 --- /dev/null +++ b/.github/scripts/publish_slack.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +# +# POSTs a Slack Block Kit payload to an incoming webhook. +# +# Usage: publish_slack.sh + +set -euo pipefail + +WEBHOOK_URL="${1:?webhook URL required}" +PAYLOAD_FILE="${2:?payload file required}" + +if [[ -z "$WEBHOOK_URL" ]]; then + echo "::error::Slack webhook URL resolved to an empty value — check the secret configured for this trigger (scheduled vs manual)" + exit 1 +fi + +curl --fail-with-body -X POST "$WEBHOOK_URL" \ + -H 'Content-Type: application/json; charset=utf-8' \ + --data @"$PAYLOAD_FILE" diff --git a/.github/scripts/publish_telegram.sh b/.github/scripts/publish_telegram.sh new file mode 100755 index 00000000..a124df62 --- /dev/null +++ b/.github/scripts/publish_telegram.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# +# POSTs the contents of a file as an HTML-formatted Telegram message. +# +# Required env: +# TELEGRAM_BOT_TOKEN Bot token used to authenticate the request. +# TELEGRAM_ETHLAMBDA_CHAT_ID Destination chat ID. +# +# Usage: publish_telegram.sh + +set -euo pipefail + +MESSAGE_FILE="${1:?message file required}" + +if [[ -z "${TELEGRAM_BOT_TOKEN:-}" ]]; then + echo "::error::TELEGRAM_BOT_TOKEN secret is not set — skipping Telegram post" + exit 1 +fi + +if [[ -z "${TELEGRAM_ETHLAMBDA_CHAT_ID:-}" ]]; then + echo "::error::TELEGRAM_ETHLAMBDA_CHAT_ID resolved to an empty value — check that the appropriate secret is configured for this trigger (scheduled vs manual)" + exit 1 +fi + +curl --fail-with-body -X POST "https://api.telegram.org/bot${TELEGRAM_BOT_TOKEN}/sendMessage" \ + -d chat_id="$TELEGRAM_ETHLAMBDA_CHAT_ID" \ + -d parse_mode=HTML \ + --data-urlencode text="$(cat "$MESSAGE_FILE")" diff --git a/.github/workflows/daily_loc_report.yml b/.github/workflows/daily_loc_report.yml new file mode 100644 index 00000000..0e1e37a5 --- /dev/null +++ b/.github/workflows/daily_loc_report.yml @@ -0,0 +1,104 @@ +name: Daily Lines of Code Report + +on: + schedule: + # Every day at UTC midnight (Slack daily, Telegram on Monday only) + - cron: "0 0 * * *" + workflow_dispatch: + inputs: + target: + description: "Where to post (test channel/chat or prod)" + required: true + default: "test" + type: choice + options: + - test + - prod + post_telegram: + description: "Also post to Telegram on this manual run" + required: false + default: false + type: boolean + +permissions: + contents: read + actions: write + +env: + CARGO_NET_GIT_FETCH_WITH_CLI: "true" + CARGO_NET_RETRY: "10" + +jobs: + loc: + name: Count ethlambda LoC and publish report + runs-on: ubuntu-latest + steps: + - name: Checkout sources + uses: actions/checkout@v6 + + - name: Setup Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: "1.92.0" + + - name: Setup cache + uses: Swatinem/rust-cache@v2 + + - name: Install tokei + run: cargo install tokei --locked --version 12.1.2 + + - name: Restore previous LoC report + id: cache-loc-report + uses: actions/cache/restore@v5 + with: + path: loc_report.json + key: loc-report-${{ github.ref_name }}-${{ github.run_id }} + restore-keys: | + loc-report-${{ github.ref_name }}- + + - name: Stash previous report as .old for delta computation + if: steps.cache-loc-report.outputs.cache-hit != '' + run: mv loc_report.json loc_report.json.old + + - name: Generate LoC report + run: bash .github/scripts/generate_loc_report.sh + + - name: Save new LoC report to cache + if: success() + uses: actions/cache/save@v5 + with: + path: loc_report.json + key: loc-report-${{ github.ref_name }}-${{ github.run_id }} + + - name: Post results to workflow summary + run: cat loc_report_github.txt >> "$GITHUB_STEP_SUMMARY" + + - name: Post to Slack + env: + SLACK_WEBHOOK: >- + ${{ (github.event_name == 'schedule' || inputs.target == 'prod') + && secrets.ETHLAMBDA_GENERAL_SLACK_WEBHOOK + || secrets.ETHLAMBDA_TEST_SLACK_WEBHOOK }} + run: bash .github/scripts/publish_slack.sh "$SLACK_WEBHOOK" loc_report_slack.json + + - name: Post to Telegram (weekly, or manual opt-in) + env: + TELEGRAM_BOT_TOKEN: ${{ secrets.TELEGRAM_BOT_TOKEN }} + TELEGRAM_ETHLAMBDA_CHAT_ID: >- + ${{ (github.event_name == 'schedule' || inputs.target == 'prod') + && secrets.TELEGRAM_ETHLAMBDA_CHAT_ID + || secrets.TELEGRAM_ETHLAMBDA_TEST_CHAT_ID }} + run: | + # Scheduled runs only post to Telegram on Monday (UTC). + # Manual runs require post_telegram=true to opt in. + if [[ "${{ github.event_name }}" == "schedule" ]]; then + day_of_week=$(date -u +%u) # 1=Monday .. 7=Sunday + if [[ "$day_of_week" != "1" ]]; then + echo "Skipping Telegram post (scheduled run, only sent on Monday)" + exit 0 + fi + elif [[ "${{ inputs.post_telegram }}" != "true" ]]; then + echo "Skipping Telegram post (manual run, post_telegram not enabled)" + exit 0 + fi + bash .github/scripts/publish_telegram.sh loc_report_telegram.txt From 1442cb5f5eb045f72041749da9a62cea4f292153 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Fri, 15 May 2026 12:14:56 -0300 Subject: [PATCH 2/4] ci(loc): switch to cargo-warloc, split main vs tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the tokei + jq pipeline with cargo-warloc, which parses Rust with `syn` and natively classifies inline `#[cfg(test)]` blocks as test code. The report now shows per-crate counts (no tests) and two totals at the bottom: with and without tests. Reasons: - tokei can only exclude directory names, so unit tests written inline (the dominant pattern here) leaked into the "production" LoC. - Per-crate scanning of crate roots was also missing build.rs. Implementation moves from bash+jq to a small Python script — Python 3 ships on ubuntu-latest, so no extra setup is needed beyond the cargo-warloc install. --- .github/scripts/generate_loc_report.py | 191 +++++++++++++++++++++++++ .github/scripts/generate_loc_report.sh | 152 -------------------- .github/workflows/daily_loc_report.yml | 6 +- 3 files changed, 194 insertions(+), 155 deletions(-) create mode 100755 .github/scripts/generate_loc_report.py delete mode 100755 .github/scripts/generate_loc_report.sh diff --git a/.github/scripts/generate_loc_report.py b/.github/scripts/generate_loc_report.py new file mode 100755 index 00000000..44209a47 --- /dev/null +++ b/.github/scripts/generate_loc_report.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python3 +""" +Counts Rust lines of code in the ethlambda workspace via cargo-warloc and +produces report files for Slack, Telegram, and the GitHub Actions step summary. + +`cargo warloc` reports per-file `main`/`tests` line counts using a Rust AST +parser, so inline `#[cfg(test)]` blocks are correctly classified as test code. + +Inputs (optional): + loc_report.json.old Previous run's report. Used to compute deltas. + +Outputs: + loc_report.json Machine-readable report for caching. + loc_report_slack.json Slack Block Kit payload (daily). + loc_report_telegram.txt Telegram HTML body (weekly). + loc_report_github.txt Plain-text block for the workflow step summary. +""" + +from __future__ import annotations + +import html +import json +import os +import subprocess +from datetime import datetime, timezone +from pathlib import Path + + +def _run(cmd: list[str]) -> str: + return subprocess.check_output(cmd, text=True) + + +def warloc_by_file() -> dict: + return json.loads(_run(["cargo", "warloc", "--by-file", "-o", "json"])) + + +def workspace_crates() -> list[str]: + md = json.loads(_run(["cargo", "metadata", "--no-deps", "--format-version", "1"])) + cwd = os.getcwd() + "/" + crates = [] + for pkg in md["packages"]: + path = pkg["manifest_path"][: -len("/Cargo.toml")] + if path.startswith(cwd): + path = path[len(cwd):] + crates.append(path) + # Sort longest first so longest-prefix match wins when grouping files. + crates.sort(key=len, reverse=True) + return crates + + +def group_by_crate(by_file: dict, crates: list[str]) -> dict[str, dict[str, int]]: + buckets = {c: {"main": 0, "tests": 0} for c in crates} + for raw_path, stats in by_file["files"].items(): + path = raw_path[2:] if raw_path.startswith("./") else raw_path + owner = next((c for c in crates if path.startswith(c + "/")), None) + if owner is None: + continue + buckets[owner]["main"] += stats["main"]["code"] + buckets[owner]["tests"] += stats["tests"]["code"] + return buckets + + +def format_diff(cur: int, old: int) -> str: + if cur > old: + return f"(+{cur - old})" + if cur < old: + return f"(-{old - cur})" + return "" + + +def main() -> None: + by_file = warloc_by_file() + crates = workspace_crates() + buckets = group_by_crate(by_file, crates) + + rows = [ + {"path": c, "main": b["main"], "tests": b["tests"]} + for c, b in buckets.items() + ] + rows.sort(key=lambda r: -r["main"]) + + total_main = sum(r["main"] for r in rows) + total_tests = sum(r["tests"] for r in rows) + total_with_tests = total_main + total_tests + + new_report = { + "total_main": total_main, + "total_tests": total_tests, + "total_with_tests": total_with_tests, + "crates": rows, + } + Path("loc_report.json").write_text(json.dumps(new_report)) + + # Resolve previous values (default = current → blank deltas on first run). + old_path = Path("loc_report.json.old") + if old_path.exists(): + old = json.loads(old_path.read_text()) + old_main = old.get("total_main", total_main) + old_with = old.get("total_with_tests", total_with_tests) + old_crates = {c["path"]: c["main"] for c in old.get("crates", [])} + else: + old_main = total_main + old_with = total_with_tests + old_crates = {r["path"]: r["main"] for r in rows} + + main_diff = format_diff(total_main, old_main) + with_diff = format_diff(total_with_tests, old_with) + + sha = os.environ.get("GITHUB_SHA") or _run(["git", "rev-parse", "HEAD"]).strip() + short = sha[:7] + date_utc = datetime.now(timezone.utc).strftime("%Y-%m-%d") + + per_crate = [] + for r in rows: + old_loc = old_crates.get(r["path"], r["main"]) + per_crate.append({ + "path": r["path"], + "loc": r["main"], + "diff": format_diff(r["main"], old_loc), + }) + + # --- GitHub step summary ------------------------------------------------- + gh_lines = [ + "```", + f"ethlambda lines of code ({date_utc}, {short})", + "============================================", + "", + "Per-crate (no tests)", + "--------------------", + ] + gh_lines += [f"{r['path']}: {r['loc']} {r['diff']}".rstrip() for r in per_crate] + gh_lines += [ + "", + f"Total Rust LoC (no tests): {total_main} {main_diff}".rstrip(), + f"Total Rust LoC (with tests): {total_with_tests} {with_diff}".rstrip(), + "```", + ] + Path("loc_report_github.txt").write_text("\n".join(gh_lines) + "\n") + + # --- Slack Block Kit ------------------------------------------------------ + per_crate_slack = "\n".join( + f"*{r['path']}*: {r['loc']} {r['diff']}".rstrip() for r in per_crate + ) + totals_slack = ( + f"*Total (no tests):* {total_main} {main_diff}".rstrip() + + "\n" + + f"*Total (with tests):* {total_with_tests} {with_diff}".rstrip() + ) + slack_payload = { + "blocks": [ + {"type": "header", + "text": {"type": "plain_text", "text": "Daily ethlambda LoC Report"}}, + {"type": "section", + "text": {"type": "mrkdwn", + "text": f"_Date:_ {date_utc} • _Commit:_ `{short}`"}}, + {"type": "divider"}, + {"type": "header", + "text": {"type": "plain_text", "text": "Per-crate (no tests)"}}, + {"type": "section", + "text": {"type": "mrkdwn", "text": per_crate_slack}}, + {"type": "divider"}, + {"type": "section", + "text": {"type": "mrkdwn", "text": totals_slack}}, + ] + } + Path("loc_report_slack.json").write_text(json.dumps(slack_payload)) + + # --- Telegram (HTML parse mode) ------------------------------------------ + def esc(s: str) -> str: + return html.escape(s, quote=False) + + tg_lines = [ + "Weekly ethlambda LoC Report", + f"Date: {date_utc} • Commit: {esc(short)}", + "", + "Per-crate (no tests)", + ] + tg_lines += [ + f"{esc(r['path'])}: {r['loc']} {r['diff']}".rstrip() + for r in per_crate + ] + tg_lines += [ + "", + f"Total Rust LoC (no tests): {total_main} {main_diff}".rstrip(), + f"Total Rust LoC (with tests): {total_with_tests} {with_diff}".rstrip(), + ] + Path("loc_report_telegram.txt").write_text("\n".join(tg_lines) + "\n") + + +if __name__ == "__main__": + main() diff --git a/.github/scripts/generate_loc_report.sh b/.github/scripts/generate_loc_report.sh deleted file mode 100755 index b6bdf518..00000000 --- a/.github/scripts/generate_loc_report.sh +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/env bash -# -# Counts Rust lines of code in the ethlambda workspace and produces report -# files for Slack, Telegram, and the GitHub Actions step summary. -# -# Inputs (optional): -# loc_report.json.old Previous run's report — used to compute deltas. -# -# Outputs: -# loc_report.json Machine-readable report for caching. -# loc_report_slack.json Slack Block Kit payload (daily message). -# loc_report_telegram.txt Telegram HTML body (weekly message). -# loc_report_github.txt Plain-text block for the workflow step summary. - -set -euo pipefail - -OLD_REPORT="loc_report.json.old" -NEW_REPORT="loc_report.json" - -count_loc() { - # Count Rust lines of code under $1. Excludes common non-product folders. - # If the path has no Rust files, returns 0. - # `-t Rust` (short form) is accepted by tokei v12 and v14. - tokei "$1" -t Rust --output json \ - -e tests -e benches -e examples 2>/dev/null \ - | jq '.Rust.code // 0' -} - -# Enumerate workspace members through cargo so the list stays in sync -# with Cargo.toml automatically. -CRATE_DIRS=$( - cargo metadata --no-deps --format-version 1 \ - | jq -r '.packages[] | .manifest_path | sub("/Cargo.toml$"; "")' \ - | sort -) - -CRATES_JSON='[]' -TOTAL=0 -while IFS= read -r dir; do - [[ -z "$dir" ]] && continue - rel="${dir#"$PWD/"}" - src="${dir}/src" - if [[ -d "$src" ]]; then - loc=$(count_loc "$src") - else - loc=0 - fi - TOTAL=$((TOTAL + loc)) - CRATES_JSON=$(jq --arg path "$rel" --argjson loc "$loc" \ - '. + [{path: $path, loc: $loc}]' <<< "$CRATES_JSON") -done <<< "$CRATE_DIRS" - -CRATES_JSON=$(jq 'sort_by(-.loc)' <<< "$CRATES_JSON") - -jq -n --argjson total "$TOTAL" --argjson crates "$CRATES_JSON" \ - '{total: $total, crates: $crates}' > "$NEW_REPORT" - -# Resolve previous totals (defaulting to current → zero deltas on first run). -OLD_TOTAL=$TOTAL -OLD_CRATES_JSON=$CRATES_JSON -if [[ -f "$OLD_REPORT" ]]; then - OLD_TOTAL=$(jq '.total' "$OLD_REPORT") - OLD_CRATES_JSON=$(jq '.crates' "$OLD_REPORT") -fi - -format_diff() { - local cur=$1 old=$2 - if (( cur > old )); then echo "(+$((cur - old)))" - elif (( cur < old )); then echo "(-$((old - cur)))" - else echo "" - fi -} - -TOTAL_DIFF=$(format_diff "$TOTAL" "$OLD_TOTAL") -COMMIT_SHA=${GITHUB_SHA:-$(git rev-parse HEAD)} -SHORT_SHA=${COMMIT_SHA:0:7} -DATE_UTC=$(date -u +"%Y-%m-%d") - -# Build per-crate annotated rows once and reuse for every format. -ROWS_JSON=$(jq --argjson old "$OLD_CRATES_JSON" ' - map( - . as $c - | ($old | map(select(.path == $c.path)) | .[0].loc // 0) as $old_loc - | . + { - old_loc: $old_loc, - diff: ($c.loc - $old_loc) - } - ) -' <<< "$CRATES_JSON") - -format_diff_jq=' - def diff_str: - if . > 0 then "(+" + (. | tostring) + ")" - elif . < 0 then "(-" + ((. | -.) | tostring) + ")" - else "" end; -' - -# GitHub step summary (plain text inside a code block). -{ - echo '```' - echo "ethlambda lines of code (${DATE_UTC}, ${SHORT_SHA})" - echo "============================================" - echo "Total Rust LoC: ${TOTAL} ${TOTAL_DIFF}" - echo - echo "Per-crate" - echo "---------" - jq -r "$format_diff_jq"' - .[] | "\(.path): \(.loc) \(.diff | diff_str)" - ' <<< "$ROWS_JSON" - echo - echo "Excluded folders: tests/, benches/, examples/" - echo '```' -} > loc_report_github.txt - -# Slack Block Kit payload. -CRATES_MRKDWN=$(jq -r "$format_diff_jq"' - map("*\(.path)*: \(.loc) \(.diff | diff_str)") | join("\n") -' <<< "$ROWS_JSON") - -SUMMARY_TEXT=$(printf '*Total Rust LoC:* %s %s\n_Date:_ %s • _Commit:_ `%s`' \ - "$TOTAL" "$TOTAL_DIFF" "$DATE_UTC" "$SHORT_SHA") - -jq -n \ - --arg summary "$SUMMARY_TEXT" \ - --arg crates "$CRATES_MRKDWN" \ - '{ - blocks: [ - { type: "header", text: { type: "plain_text", text: "Daily ethlambda LoC Report" } }, - { type: "divider" }, - { type: "section", text: { type: "mrkdwn", text: $summary } }, - { type: "header", text: { type: "plain_text", text: "Per-crate" } }, - { type: "section", text: { type: "mrkdwn", text: $crates } }, - { type: "context", elements: [ - { type: "mrkdwn", text: "_Excluded folders: tests/, benches/, examples/_" } - ]} - ] - }' > loc_report_slack.json - -# Telegram (HTML parse mode). -{ - echo "Weekly ethlambda LoC Report" - echo "Date: ${DATE_UTC} • Commit: ${SHORT_SHA}" - echo - echo "Total Rust LoC: ${TOTAL} ${TOTAL_DIFF}" - echo - echo "Per-crate" - jq -r "$format_diff_jq"' - .[] | "\(.path): \(.loc) \(.diff | diff_str)" - ' <<< "$ROWS_JSON" - echo - echo "Excluded folders: tests/, benches/, examples/" -} > loc_report_telegram.txt diff --git a/.github/workflows/daily_loc_report.yml b/.github/workflows/daily_loc_report.yml index 0e1e37a5..eb298dd4 100644 --- a/.github/workflows/daily_loc_report.yml +++ b/.github/workflows/daily_loc_report.yml @@ -44,8 +44,8 @@ jobs: - name: Setup cache uses: Swatinem/rust-cache@v2 - - name: Install tokei - run: cargo install tokei --locked --version 12.1.2 + - name: Install cargo-warloc + run: cargo install cargo-warloc --locked --version 0.1.1 - name: Restore previous LoC report id: cache-loc-report @@ -61,7 +61,7 @@ jobs: run: mv loc_report.json loc_report.json.old - name: Generate LoC report - run: bash .github/scripts/generate_loc_report.sh + run: python3 .github/scripts/generate_loc_report.py - name: Save new LoC report to cache if: success() From 6e0af1694e94dd78b6e1098de794f5673c2c7226 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Fri, 15 May 2026 12:32:51 -0300 Subject: [PATCH 3/4] ci(loc): read Slack webhook from env, not argv Passing the webhook URL as `$1` made it visible in the process list (`ps aux`, `/proc//cmdline`) for the duration of the curl call. Switch to reading it from the `SLACK_WEBHOOK` env variable in the script, matching the existing `publish_telegram.sh` pattern. --- .github/scripts/publish_slack.sh | 15 +++++++++------ .github/workflows/daily_loc_report.yml | 2 +- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/.github/scripts/publish_slack.sh b/.github/scripts/publish_slack.sh index f6929009..a99459c2 100755 --- a/.github/scripts/publish_slack.sh +++ b/.github/scripts/publish_slack.sh @@ -2,18 +2,21 @@ # # POSTs a Slack Block Kit payload to an incoming webhook. # -# Usage: publish_slack.sh +# Required env: +# SLACK_WEBHOOK Incoming-webhook URL. Read from the env (not argv) so it +# doesn't leak into the process list. +# +# Usage: publish_slack.sh set -euo pipefail -WEBHOOK_URL="${1:?webhook URL required}" -PAYLOAD_FILE="${2:?payload file required}" +PAYLOAD_FILE="${1:?payload file required}" -if [[ -z "$WEBHOOK_URL" ]]; then - echo "::error::Slack webhook URL resolved to an empty value — check the secret configured for this trigger (scheduled vs manual)" +if [[ -z "${SLACK_WEBHOOK:-}" ]]; then + echo "::error::SLACK_WEBHOOK resolved to an empty value — check the secret configured for this trigger (scheduled vs manual)" exit 1 fi -curl --fail-with-body -X POST "$WEBHOOK_URL" \ +curl --fail-with-body -X POST "$SLACK_WEBHOOK" \ -H 'Content-Type: application/json; charset=utf-8' \ --data @"$PAYLOAD_FILE" diff --git a/.github/workflows/daily_loc_report.yml b/.github/workflows/daily_loc_report.yml index eb298dd4..0df914d2 100644 --- a/.github/workflows/daily_loc_report.yml +++ b/.github/workflows/daily_loc_report.yml @@ -79,7 +79,7 @@ jobs: ${{ (github.event_name == 'schedule' || inputs.target == 'prod') && secrets.ETHLAMBDA_GENERAL_SLACK_WEBHOOK || secrets.ETHLAMBDA_TEST_SLACK_WEBHOOK }} - run: bash .github/scripts/publish_slack.sh "$SLACK_WEBHOOK" loc_report_slack.json + run: bash .github/scripts/publish_slack.sh loc_report_slack.json - name: Post to Telegram (weekly, or manual opt-in) env: From 89e2f1f7b56dbf95c84dff9cafb6bc56ad14b248 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Fri, 15 May 2026 15:07:31 -0300 Subject: [PATCH 4/4] ci(loc): exclude test-fixtures crate and rpc test_driver.rs cargo-warloc classifies these as production code because they aren't inside a `tests/` dir or a `#[cfg(test)]` block, but they exist solely to support testing: - `crates/common/test-fixtures` is a fixtures crate consumed only by other crates' tests. - `crates/net/rpc/src/test_driver.rs` implements the Hive lean spec-assets test driver endpoints. Both are now folded into the "tests" bucket so they no longer inflate the "no tests" total. Definitions are two frozenset constants at the top of the script for easy extension. --- .github/scripts/generate_loc_report.py | 27 ++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/.github/scripts/generate_loc_report.py b/.github/scripts/generate_loc_report.py index 44209a47..cb7f5e5d 100755 --- a/.github/scripts/generate_loc_report.py +++ b/.github/scripts/generate_loc_report.py @@ -26,6 +26,20 @@ from pathlib import Path +# Crates whose entire contents are test infrastructure and should never +# appear in the "no tests" totals or per-crate listing. +TEST_ONLY_CRATES = frozenset({ + "crates/common/test-fixtures", +}) + +# Individual files that are test infrastructure but live next to production +# code inside an otherwise-production crate. Their lines are folded into the +# owning crate's `tests` bucket. +TEST_ONLY_FILES = frozenset({ + "crates/net/rpc/src/test_driver.rs", +}) + + def _run(cmd: list[str]) -> str: return subprocess.check_output(cmd, text=True) @@ -55,8 +69,13 @@ def group_by_crate(by_file: dict, crates: list[str]) -> dict[str, dict[str, int] owner = next((c for c in crates if path.startswith(c + "/")), None) if owner is None: continue - buckets[owner]["main"] += stats["main"]["code"] - buckets[owner]["tests"] += stats["tests"]["code"] + is_test_only = owner in TEST_ONLY_CRATES or path in TEST_ONLY_FILES + if is_test_only: + # All lines from this file/crate count as tests. + buckets[owner]["tests"] += stats["main"]["code"] + stats["tests"]["code"] + else: + buckets[owner]["main"] += stats["main"]["code"] + buckets[owner]["tests"] += stats["tests"]["code"] return buckets @@ -112,6 +131,10 @@ def main() -> None: per_crate = [] for r in rows: + # Test-only crates fold their lines into the tests bucket and have + # main == 0; skip them in the per-crate "no tests" listing. + if r["main"] == 0: + continue old_loc = old_crates.get(r["path"], r["main"]) per_crate.append({ "path": r["path"],