add scheduled daily test workflow with auto issue management. add --json flag to test-apps.py
This commit is contained in:
187
scripts/process-test-results.py
Normal file
187
scripts/process-test-results.py
Normal file
@@ -0,0 +1,187 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Process test-apps.py JSON output and manage GitHub issues.
|
||||
|
||||
Creates issues for newly failing apps, closes issues for recovered apps.
|
||||
Designed to run in GitHub Actions as part of the scheduled test workflow.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
from typing import Any
|
||||
|
||||
from help_formatter import StyledHelpFormatter
|
||||
|
||||
ISSUE_LABEL = "automated-test-failure"
|
||||
TITLE_PREFIX = "[Automated Test Failure]"
|
||||
|
||||
|
||||
def _run_gh(args: list[str]) -> subprocess.CompletedProcess:
|
||||
return subprocess.run(
|
||||
["gh", *args],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
|
||||
def _parse_gh_json(result: subprocess.CompletedProcess) -> list | None:
|
||||
if result.returncode != 0 or not result.stdout.strip():
|
||||
return None
|
||||
try:
|
||||
return json.loads(result.stdout)
|
||||
except json.JSONDecodeError:
|
||||
return None
|
||||
|
||||
|
||||
def _ensure_label_exists() -> None:
|
||||
result = _run_gh(["label", "list", "--search", ISSUE_LABEL, "--json", "name"])
|
||||
labels = _parse_gh_json(result)
|
||||
if labels is None:
|
||||
_run_gh([
|
||||
"label", "create", ISSUE_LABEL,
|
||||
"--description", "Automatically created when a scheduled app test fails",
|
||||
"--color", "d93f0b",
|
||||
])
|
||||
return
|
||||
if not any(label["name"] == ISSUE_LABEL for label in labels):
|
||||
_run_gh([
|
||||
"label", "create", ISSUE_LABEL,
|
||||
"--description", "Automatically created when a scheduled app test fails",
|
||||
"--color", "d93f0b",
|
||||
])
|
||||
|
||||
|
||||
def _find_open_issue(app_name: str) -> int | None:
|
||||
"""Search for an open issue matching this app. Returns issue number or None."""
|
||||
search_title = f"{TITLE_PREFIX} {app_name}"
|
||||
result = _run_gh([
|
||||
"issue", "list",
|
||||
"--label", ISSUE_LABEL,
|
||||
"--state", "open",
|
||||
"--search", f"{search_title} in:title",
|
||||
"--json", "number,title",
|
||||
])
|
||||
issues = _parse_gh_json(result)
|
||||
if issues is None:
|
||||
return None
|
||||
for issue in issues:
|
||||
if app_name in issue.get("title", ""):
|
||||
return issue["number"]
|
||||
return None
|
||||
|
||||
|
||||
def _create_issue(app: dict[str, Any], run_url: str) -> None:
|
||||
title = f"{TITLE_PREFIX} {app['app_name']}"
|
||||
body = (
|
||||
f"The scheduled test run detected a failure for **{app['app_name']}**.\n\n"
|
||||
f"| Field | Value |\n"
|
||||
f"|-------|-------|\n"
|
||||
f"| App ID | `{app['app_id']}` |\n"
|
||||
f"| Source | {app['source']} |\n"
|
||||
f"| URL | {app['url']} |\n"
|
||||
f"| Error | {app.get('error', 'unknown')} |\n\n"
|
||||
)
|
||||
if app.get("warnings"):
|
||||
body += "**Warnings:**\n"
|
||||
for w in app["warnings"]:
|
||||
body += f"- {w}\n"
|
||||
body += "\n"
|
||||
body += f"[Workflow run]({run_url})\n"
|
||||
|
||||
_run_gh([
|
||||
"issue", "create",
|
||||
"--title", title,
|
||||
"--body", body,
|
||||
"--label", ISSUE_LABEL,
|
||||
])
|
||||
print(f" Created issue: {title}")
|
||||
|
||||
|
||||
def _close_issue(issue_number: int, app_name: str, run_url: str) -> None:
|
||||
comment = (
|
||||
f"**{app_name}** is passing again in the latest scheduled test run.\n\n"
|
||||
f"[Workflow run]({run_url})"
|
||||
)
|
||||
_run_gh(["issue", "close", str(issue_number), "--comment", comment])
|
||||
print(f" Closed issue #{issue_number}: {app_name} recovered")
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Process test results JSON and manage GitHub issues.",
|
||||
formatter_class=StyledHelpFormatter,
|
||||
)
|
||||
parser.add_argument(
|
||||
"results_file",
|
||||
help="Path to test-results.json from test-apps.py --json",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--run-url",
|
||||
default="",
|
||||
help="URL of the GitHub Actions workflow run",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dry-run",
|
||||
action="store_true",
|
||||
help="Print planned actions without creating or closing issues",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
with open(args.results_file, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
except (json.JSONDecodeError, FileNotFoundError) as e:
|
||||
print(f"Error loading results file: {e}")
|
||||
return 1
|
||||
|
||||
if "error" in data:
|
||||
print(f"Test run error: {data['error']}")
|
||||
return 1
|
||||
|
||||
results = data.get("results", [])
|
||||
if not results:
|
||||
print("No test results to process.")
|
||||
return 0
|
||||
|
||||
dry_run = args.dry_run
|
||||
|
||||
if dry_run:
|
||||
print("DRY RUN: no issues will be created or closed\n")
|
||||
else:
|
||||
_ensure_label_exists()
|
||||
|
||||
failed = [r for r in results if not r["passed"]]
|
||||
passed = [r for r in results if r["passed"]]
|
||||
|
||||
summary = data.get("summary", {})
|
||||
print(
|
||||
f"Processing {summary.get('total', len(results))} results: "
|
||||
f"{summary.get('passed', len(passed))} passed, "
|
||||
f"{summary.get('failed', len(failed))} failed"
|
||||
)
|
||||
|
||||
for app in failed:
|
||||
if dry_run:
|
||||
print(f" Would create issue: {TITLE_PREFIX} {app['app_name']}")
|
||||
print(f" Error: {app.get('error', 'unknown')}")
|
||||
else:
|
||||
existing = _find_open_issue(app["app_name"])
|
||||
if existing:
|
||||
print(f" Skipped {app['app_name']}: open issue #{existing} already exists")
|
||||
else:
|
||||
_create_issue(app, args.run_url)
|
||||
|
||||
for app in passed:
|
||||
if dry_run:
|
||||
print(f" Would check/close issue for: {app['app_name']} (passing)")
|
||||
else:
|
||||
existing = _find_open_issue(app["app_name"])
|
||||
if existing:
|
||||
_close_issue(existing, app["app_name"], args.run_url)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -181,6 +181,22 @@ class TestResult:
|
||||
self.warnings: list[str] = []
|
||||
self.duration_ms = 0
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return {
|
||||
"app_name": self.app_name,
|
||||
"app_id": self.app_id,
|
||||
"source": self.source,
|
||||
"url": self.url,
|
||||
"passed": self.passed,
|
||||
"version": self.version,
|
||||
"apk_count": self.apk_count,
|
||||
"apk_urls": self.apk_urls,
|
||||
"preferred_apk_index": self.preferred_apk_index,
|
||||
"error": self.error,
|
||||
"warnings": self.warnings,
|
||||
"duration_ms": self.duration_ms,
|
||||
}
|
||||
|
||||
def __repr__(self) -> str:
|
||||
status = "PASS" if self.passed else "FAIL"
|
||||
return f"{status}: {self.app_name} ({self.source})"
|
||||
@@ -571,6 +587,15 @@ def print_result(
|
||||
print(f" APK: {url}")
|
||||
|
||||
|
||||
def _print_json_error(message: str) -> None:
|
||||
output = {
|
||||
"summary": {"total": 0, "passed": 0, "failed": 0, "warned": 0, "wall_time_ms": 0, "cumulative_time_ms": 0},
|
||||
"results": [],
|
||||
"error": message,
|
||||
}
|
||||
print(json.dumps(output, indent=2))
|
||||
|
||||
|
||||
def main() -> int:
|
||||
load_dotenv()
|
||||
|
||||
@@ -609,6 +634,11 @@ def main() -> int:
|
||||
default=8,
|
||||
help="Number of parallel workers (default: 8, use 1 for serial)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--json",
|
||||
action="store_true",
|
||||
help="Output results as JSON (for CI/scripting)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
json_file = args.file
|
||||
@@ -617,12 +647,16 @@ def main() -> int:
|
||||
verbose = args.verbose
|
||||
show_apks = args.apks
|
||||
workers = max(args.jobs, 1)
|
||||
json_output = args.json
|
||||
|
||||
try:
|
||||
with open(json_file, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
except (json.JSONDecodeError, FileNotFoundError) as e:
|
||||
print(f"Error loading {json_file}: {e}")
|
||||
if json_output:
|
||||
_print_json_error(f"Error loading {json_file}: {e}")
|
||||
else:
|
||||
print(f"Error loading {json_file}: {e}")
|
||||
return 1
|
||||
|
||||
apps = data.get("apps", [])
|
||||
@@ -632,21 +666,27 @@ def main() -> int:
|
||||
apps = [a for a in apps if name_filter in a.get("name", "").lower()]
|
||||
|
||||
if not apps:
|
||||
print("No apps matched the filter.")
|
||||
if json_output:
|
||||
_print_json_error("No apps matched the filter.")
|
||||
else:
|
||||
print("No apps matched the filter.")
|
||||
return 1
|
||||
|
||||
has_token = bool(os.environ.get("GITHUB_TOKEN"))
|
||||
github_count = sum(1 for a in apps if _effective_source(a) == "GitHub")
|
||||
if github_count > 0 and not has_token:
|
||||
print(
|
||||
f"\033[33mNote\033[0m: {github_count} GitHub apps to test, "
|
||||
"but GITHUB_TOKEN is not set. You may hit rate limits.\n"
|
||||
" Set it with: export GITHUB_TOKEN=<your_token>\n"
|
||||
)
|
||||
if not json_output:
|
||||
has_token = bool(os.environ.get("GITHUB_TOKEN"))
|
||||
github_count = sum(1 for a in apps if _effective_source(a) == "GitHub")
|
||||
if github_count > 0 and not has_token:
|
||||
print(
|
||||
f"\033[33mNote\033[0m: {github_count} GitHub apps to test, "
|
||||
"but GITHUB_TOKEN is not set. You may hit rate limits.\n"
|
||||
" Set it with: export GITHUB_TOKEN=<your_token>\n"
|
||||
)
|
||||
|
||||
serial = workers == 1 or len(apps) == 1
|
||||
mode = "serial" if serial else f"{workers} workers"
|
||||
print(f"Testing {len(apps)} app(s) ({mode})...\n")
|
||||
|
||||
if not json_output:
|
||||
mode = "serial" if serial else f"{workers} workers"
|
||||
print(f"Testing {len(apps)} app(s) ({mode})...\n")
|
||||
|
||||
wall_start = time.monotonic()
|
||||
|
||||
@@ -655,7 +695,8 @@ def main() -> int:
|
||||
for app in apps:
|
||||
result = test_app(app)
|
||||
results.append(result)
|
||||
print_result(result, verbose=verbose, show_apks=show_apks)
|
||||
if not json_output:
|
||||
print_result(result, verbose=verbose, show_apks=show_apks)
|
||||
else:
|
||||
result_map: dict[str, TestResult] = {}
|
||||
with ThreadPoolExecutor(max_workers=workers) as pool:
|
||||
@@ -663,10 +704,10 @@ def main() -> int:
|
||||
for future in as_completed(futures):
|
||||
result = future.result()
|
||||
result_map[result.app_id] = result
|
||||
# Print in original order
|
||||
results = [result_map[app["id"]] for app in apps]
|
||||
for result in results:
|
||||
print_result(result, verbose=verbose, show_apks=show_apks)
|
||||
if not json_output:
|
||||
for result in results:
|
||||
print_result(result, verbose=verbose, show_apks=show_apks)
|
||||
|
||||
wall_ms = int((time.monotonic() - wall_start) * 1000)
|
||||
passed = sum(1 for r in results if r.passed)
|
||||
@@ -674,15 +715,29 @@ def main() -> int:
|
||||
warned = sum(1 for r in results if r.warnings)
|
||||
sum_time = sum(r.duration_ms for r in results)
|
||||
|
||||
print(f"\n{'=' * 60}")
|
||||
print(f"Results: {passed} passed, {failed} failed, {warned} with warnings")
|
||||
print(f"Time: {wall_ms / 1000:.1f}s wall, {sum_time / 1000:.1f}s cumulative")
|
||||
if json_output:
|
||||
output = {
|
||||
"summary": {
|
||||
"total": len(results),
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"warned": warned,
|
||||
"wall_time_ms": wall_ms,
|
||||
"cumulative_time_ms": sum_time,
|
||||
},
|
||||
"results": [r.to_dict() for r in results],
|
||||
}
|
||||
print(json.dumps(output, indent=2))
|
||||
else:
|
||||
print(f"\n{'=' * 60}")
|
||||
print(f"Results: {passed} passed, {failed} failed, {warned} with warnings")
|
||||
print(f"Time: {wall_ms / 1000:.1f}s wall, {sum_time / 1000:.1f}s cumulative")
|
||||
|
||||
if failed > 0:
|
||||
print(f"\nFailed apps:")
|
||||
for r in results:
|
||||
if not r.passed:
|
||||
print(f" - {r.app_name}: {r.error}")
|
||||
if failed > 0:
|
||||
print(f"\nFailed apps:")
|
||||
for r in results:
|
||||
if not r.passed:
|
||||
print(f" - {r.app_name}: {r.error}")
|
||||
|
||||
return 1 if failed > 0 else 0
|
||||
|
||||
|
||||
Reference in New Issue
Block a user