Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 38 additions & 0 deletions report/details.qmd
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,26 @@ from vip.reporting import load_results

data = load_results(Path("results.json"))

from vip.gherkin import parse_feature_file

# Build a lookup from nodeid prefix (file path) to parsed feature data.
_feature_cache: dict[str, dict] = {}


def _get_feature(nodeid: str) -> dict | None:
"""Return parsed feature data for a test's nodeid, or None."""
# nodeid looks like "tests/connect/test_auth.py::test_login"
py_file = nodeid.split("::")[0] if "::" in nodeid else nodeid
feature_file = py_file.rsplit(".", 1)[0] + ".feature"
if feature_file not in _feature_cache:
p = Path("..") / feature_file
if p.exists():
_feature_cache[feature_file] = parse_feature_file(p)
else:
_feature_cache[feature_file] = {}
return _feature_cache[feature_file] or None


if data.total == 0:
display(Markdown("> **No results found.** Run: `pytest --vip-report=report/results.json`"))
else:
Expand All @@ -35,5 +55,23 @@ else:
lines.append(f"| `{short}` | {icon} | {duration} |")
lines.append("")

# Show BDD scenario steps per test in expandable details.
for item in items:
feature = _get_feature(item.nodeid)
if not feature or not feature.get("scenarios"):
continue
scenario_title = item.scenario_title
if not scenario_title:
continue
for sc in feature["scenarios"]:
if sc["title"] == scenario_title and sc.get("steps"):
short = item.nodeid.split("::")[-1] if "::" in item.nodeid else item.nodeid
lines.append(f"<details><summary><code>{short}</code> — BDD steps</summary>\n")
lines.append(f"**Scenario:** {sc['title']}\n")
for step in sc["steps"]:
lines.append(f"- `{step}`")
lines.append("\n</details>\n")
break

display(Markdown("\n".join(lines)))
```
59 changes: 58 additions & 1 deletion report/index.qmd
Original file line number Diff line number Diff line change
Expand Up @@ -88,14 +88,71 @@ else:
```{python}
#| echo: false

from vip.reporting import load_troubleshooting

hints = load_troubleshooting(Path("../tests/troubleshooting.toml"))

failures = [r for r in data.results if r.outcome == "failed"]
if not failures:
display(Markdown("No failures."))
else:
lines = []
for f in failures:
lines.append(f"### `{f.nodeid}`\n")

# Look up troubleshooting hints by scenario title.
hint = hints.get(f.scenario_title, {}) if f.scenario_title else {}

if f.scenario_title:
desc = f" — {f.feature_description}" if f.feature_description else ""
lines.append(f"**What was tested:** {f.scenario_title}{desc}\n")

if f.longrepr:
lines.append(f"```\n{f.longrepr}\n```\n")
lines.append(f"**Error:**\n```\n{f.longrepr}\n```\n")

if hint:
if hint.get("likely_causes"):
lines.append("**Likely causes:**\n")
for cause in hint["likely_causes"]:
lines.append(f"- {cause}")
lines.append("")

if hint.get("suggested_steps"):
lines.append("**Suggested next steps:**\n")
for i, step in enumerate(hint["suggested_steps"], 1):
lines.append(f"{i}. {step}")
lines.append("")

if hint.get("docs_url"):
lines.append(f"**Documentation:** [{hint['docs_url']}]({hint['docs_url']})\n")

display(Markdown("\n".join(lines)))
```

```{python}
#| echo: false

import json as _json

if failures:
_failure_export = {
"deployment": data.deployment_name,
"generated_at": data.generated_at,
"failures": [],
}
for f in failures:
hint = hints.get(f.scenario_title, {}) if f.scenario_title else {}
_failure_export["failures"].append({
"test": f.nodeid,
"scenario": f.scenario_title,
"feature": f.feature_description,
"error_summary": (f.longrepr or "")[:500],
"troubleshooting": {
"likely_causes": hint.get("likely_causes", []),
"suggested_steps": hint.get("suggested_steps", []),
"docs_url": hint.get("docs_url"),
} if hint else None,
})
Path("failures.json").write_text(_json.dumps(_failure_export, indent=2) + "\n")
display(Markdown(f"_Wrote {len(failures)} failure(s) to `failures.json`._"))
```
Comment on lines +132 to +158
Copy link

Copilot AI Mar 11, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This code block sets #| output: false but then calls display(...) to announce that failures.json was written; with Quarto/Jupyter this output is typically suppressed, so the message won’t render. If you want users to see the note, drop output: false (keep echo: false), or remove the display(...) call if the block is meant to be silent.

Copilot uses AI. Check for mistakes.
111 changes: 111 additions & 0 deletions selftests/test_gherkin.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
"""Tests for vip.gherkin module."""

from __future__ import annotations

from vip.gherkin import parse_feature_file


class TestParseFeatureFile:
def test_basic_feature(self, tmp_path):
f = tmp_path / "test_basic.feature"
f.write_text(
"@connect\n"
"Feature: Connect authentication\n"
" As a Posit Team administrator\n"
" I want to verify Connect auth\n"
"\n"
" Scenario: User can log in\n"
" Given Connect is accessible\n"
" When a user enters credentials\n"
" Then the user is authenticated\n"
)
result = parse_feature_file(f)
assert result["title"] == "Connect authentication"
assert result["marker"] == "connect"
assert "Posit Team administrator" in result["description"]
assert len(result["scenarios"]) == 1
assert result["scenarios"][0]["title"] == "User can log in"
assert len(result["scenarios"][0]["steps"]) == 3

def test_multiple_scenarios(self, tmp_path):
f = tmp_path / "test_multi.feature"
f.write_text(
"@prerequisites\n"
"Feature: Components are reachable\n"
"\n"
" Scenario: Connect is reachable\n"
" Given Connect is configured\n"
" When I request the health endpoint\n"
" Then the server responds OK\n"
"\n"
" Scenario: Workbench is reachable\n"
" Given Workbench is configured\n"
" When I request the health endpoint\n"
" Then the server responds OK\n"
)
result = parse_feature_file(f)
assert len(result["scenarios"]) == 2
assert result["scenarios"][0]["title"] == "Connect is reachable"
assert result["scenarios"][1]["title"] == "Workbench is reachable"

def test_and_but_steps(self, tmp_path):
f = tmp_path / "test_steps.feature"
f.write_text(
"@connect\n"
"Feature: Steps test\n"
"\n"
" Scenario: Complex steps\n"
" Given a precondition\n"
" And another precondition\n"
" When something happens\n"
" Then result is expected\n"
" But not this other thing\n"
)
result = parse_feature_file(f)
steps = result["scenarios"][0]["steps"]
assert len(steps) == 5
assert steps[1].startswith("And ")
assert steps[4].startswith("But ")

def test_relative_to(self, tmp_path):
subdir = tmp_path / "tests" / "connect"
subdir.mkdir(parents=True)
f = subdir / "test_auth.feature"
f.write_text("@connect\nFeature: Auth\n\n Scenario: Login\n Given ready\n")
result = parse_feature_file(f, relative_to=tmp_path)
assert result["file"] == "tests/connect/test_auth.feature"

def test_no_description(self, tmp_path):
f = tmp_path / "test_no_desc.feature"
f.write_text(
"@security\n"
"Feature: HTTPS enforcement\n"
"\n"
" Scenario: All endpoints use HTTPS\n"
" Given the server URL\n"
" Then the scheme is HTTPS\n"
)
result = parse_feature_file(f)
assert result["title"] == "HTTPS enforcement"
assert result["description"] == ""

def test_scenario_outline(self, tmp_path):
f = tmp_path / "test_outline.feature"
f.write_text(
"@connect\n"
"Feature: Outline test\n"
"\n"
" Scenario Outline: Deploy <type> content\n"
" Given Connect is accessible\n"
" When I deploy a <type> bundle\n"
" Then the content is running\n"
"\n"
" Examples:\n"
" | type |\n"
" | shiny |\n"
" | rmd |\n"
)
result = parse_feature_file(f)
assert len(result["scenarios"]) == 1
assert result["scenarios"][0]["title"] == "Deploy <type> content"
assert len(result["scenarios"][0]["steps"]) == 3
21 changes: 21 additions & 0 deletions selftests/test_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,27 @@ def test_interactive_auth_option_registered(self, selftest_pytester):
result = selftest_pytester.runpytest("--help")
result.stdout.fnmatch_lines(["*--interactive-auth*"])

def test_json_report_includes_scenario_fields(self, selftest_pytester):
"""Results JSON includes scenario_title and feature_description keys."""
selftest_pytester.makepyfile(
"""
def test_plain():
assert True
"""
)
report_path = selftest_pytester.path / "results.json"
selftest_pytester.runpytest(
"--vip-config=vip.toml",
f"--vip-report={report_path}",
)
data = json.loads(report_path.read_text())
result = data["results"][0]
# Non-BDD tests should have the keys present but set to None.
assert "scenario_title" in result
assert "feature_description" in result
assert result["scenario_title"] is None
assert result["feature_description"] is None

def test_interactive_auth_requires_connect_url(self, selftest_pytester):
"""--interactive-auth fails fast when Connect URL is not configured."""
selftest_pytester.makepyfile(
Expand Down
91 changes: 90 additions & 1 deletion selftests/test_reporting.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from __future__ import annotations

from vip.reporting import ProductInfo, ReportData, TestResult, load_results
from vip.reporting import ProductInfo, ReportData, TestResult, load_results, load_troubleshooting


class TestTestResult:
Expand All @@ -21,6 +21,21 @@ def test_category_from_nested_nodeid(self):
)
assert r.category == "workbench"

def test_optional_fields_default_none(self):
r = TestResult(nodeid="a", outcome="passed")
assert r.scenario_title is None
assert r.feature_description is None

def test_optional_fields_set(self):
r = TestResult(
nodeid="a",
outcome="passed",
scenario_title="User can log in",
feature_description="Connect authentication",
)
assert r.scenario_title == "User can log in"
assert r.feature_description == "Connect authentication"


class TestReportData:
def test_counts(self):
Expand Down Expand Up @@ -113,6 +128,80 @@ def test_missing_file_returns_empty(self, tmp_path):
assert rd.deployment_name == "Posit Team"
assert rd.products == []

def test_load_with_optional_fields(self, tmp_path):
import json

data = {
"deployment_name": "Test",
"generated_at": "2026-01-01T00:00:00+00:00",
"exit_status": 0,
"products": {},
"results": [
{
"nodeid": "tests/connect/test_auth.py::test_login",
"outcome": "failed",
"duration": 1.0,
"longrepr": "AssertionError",
"markers": ["connect"],
"scenario_title": "User can log in via the web UI",
"feature_description": "Connect authentication",
},
{
"nodeid": "tests/connect/test_auth.py::test_api",
"outcome": "passed",
"duration": 0.5,
"markers": ["connect"],
},
],
}
p = tmp_path / "results.json"
p.write_text(json.dumps(data))
rd = load_results(p)
assert rd.results[0].scenario_title == "User can log in via the web UI"
assert rd.results[0].feature_description == "Connect authentication"
assert rd.results[1].scenario_title is None
assert rd.results[1].feature_description is None


class TestLoadTroubleshooting:
def test_load_valid_toml(self, tmp_path):
toml_file = tmp_path / "troubleshooting.toml"
toml_file.write_text(
'["Connect server is reachable"]\n'
'summary = "Verifies HTTP connectivity"\n'
'likely_causes = ["Connect is not running", "Wrong URL"]\n'
'suggested_steps = ["Check systemctl status"]\n'
'docs_url = "https://docs.example.com"\n'
)
hints = load_troubleshooting(toml_file)
assert "Connect server is reachable" in hints
entry = hints["Connect server is reachable"]
assert entry["summary"] == "Verifies HTTP connectivity"
assert len(entry["likely_causes"]) == 2
assert len(entry["suggested_steps"]) == 1
assert entry["docs_url"] == "https://docs.example.com"

def test_missing_file_returns_empty(self, tmp_path):
hints = load_troubleshooting(tmp_path / "nonexistent.toml")
assert hints == {}

def test_multiple_scenarios(self, tmp_path):
toml_file = tmp_path / "troubleshooting.toml"
toml_file.write_text(
'["Scenario A"]\nsummary = "A"\nlikely_causes = []\nsuggested_steps = []\n\n'
'["Scenario B"]\nsummary = "B"\nlikely_causes = []\nsuggested_steps = []\n'
)
hints = load_troubleshooting(toml_file)
assert len(hints) == 2
assert "Scenario A" in hints
assert "Scenario B" in hints

def test_malformed_toml_returns_empty(self, tmp_path):
toml_file = tmp_path / "bad.toml"
toml_file.write_text("this is not valid [[ toml {{")
hints = load_troubleshooting(toml_file)
assert hints == {}


class TestProductInfo:
def test_defaults(self):
Expand Down
Loading
Loading