Skip to content

Checker

checker

Check engine — orchestrates all checks and produces ProjectResult.

CheckEngine

Orchestrates project checks and produces results.

Source code in packages/axm-init/src/axm_init/core/checker.py
class CheckEngine:
    """Orchestrates project checks and produces results."""

    def __init__(self, project_path: Path, *, category: str | None = None) -> None:
        self.project_path = project_path.resolve()
        self.category = category
        self.context = detect_context(self.project_path)
        self.workspace_root = find_workspace_root(self.project_path)

    def _is_excluded(self, check_name: str, exclusions: set[str]) -> bool:
        """Check if a check name matches any exclusion prefix."""
        return any(check_name.startswith(prefix) for prefix in exclusions)

    def _filter_checks(
        self,
        checks_to_run: dict[str, list[Callable[[Path], CheckResult]]],
        exclusions: set[str],
    ) -> tuple[list[Callable[[Path], CheckResult]], list[CheckResult], list[str]]:
        """Apply context-aware filtering, exclusions, and redirects."""
        all_fns: list[Callable[[Path], CheckResult]] = []
        excluded_results: list[CheckResult] = []
        excluded_names: list[str] = []

        for category, fns in checks_to_run.items():
            for fn in fns:
                check_name = _get_check_name(fn)

                # Apply exclusions
                if check_name and self._is_excluded(check_name, exclusions):
                    excluded_results.append(_make_excluded_result(check_name, category))
                    excluded_names.append(check_name)
                    continue

                # Workspace-only checks: skip for non-workspace contexts
                if category == "workspace" and self.context != ProjectContext.WORKSPACE:
                    continue

                # Skip inapplicable checks for workspace root
                if (
                    self.context == ProjectContext.WORKSPACE
                    and check_name in SKIP_FOR_WORKSPACE
                ):
                    continue

                # Redirect CI/tooling checks to workspace root for members
                if (
                    self.context == ProjectContext.MEMBER
                    and check_name in REDIRECT_FOR_MEMBER
                    and self.workspace_root is not None
                ):
                    all_fns.append(_redirect_to_root(fn, self.workspace_root))
                    continue

                all_fns.append(fn)

        return all_fns, excluded_results, excluded_names

    def run(self) -> ProjectResult:
        """Run all checks (or filtered by category) and return result."""
        if self.category:
            if self.category not in VALID_CATEGORIES:
                valid = ", ".join(sorted(VALID_CATEGORIES))
                msg = f"Unknown category '{self.category}'. Valid: {valid}"
                raise ValueError(msg)
            checks_to_run = {self.category: ALL_CHECKS[self.category]}
        else:
            checks_to_run = ALL_CHECKS

        exclusions = load_exclusions(self.project_path)
        all_fns, excluded_results, excluded_names = self._filter_checks(
            checks_to_run, exclusions
        )

        with ThreadPoolExecutor(max_workers=8) as pool:
            results = list(pool.map(lambda fn: fn(self.project_path), all_fns))

        results.extend(excluded_results)

        return ProjectResult.from_checks(
            self.project_path,
            results,
            context=self.context.value,
            workspace_root=self.workspace_root,
            excluded_checks=excluded_names,
        )
run()

Run all checks (or filtered by category) and return result.

Source code in packages/axm-init/src/axm_init/core/checker.py
def run(self) -> ProjectResult:
    """Run all checks (or filtered by category) and return result."""
    if self.category:
        if self.category not in VALID_CATEGORIES:
            valid = ", ".join(sorted(VALID_CATEGORIES))
            msg = f"Unknown category '{self.category}'. Valid: {valid}"
            raise ValueError(msg)
        checks_to_run = {self.category: ALL_CHECKS[self.category]}
    else:
        checks_to_run = ALL_CHECKS

    exclusions = load_exclusions(self.project_path)
    all_fns, excluded_results, excluded_names = self._filter_checks(
        checks_to_run, exclusions
    )

    with ThreadPoolExecutor(max_workers=8) as pool:
        results = list(pool.map(lambda fn: fn(self.project_path), all_fns))

    results.extend(excluded_results)

    return ProjectResult.from_checks(
        self.project_path,
        results,
        context=self.context.value,
        workspace_root=self.workspace_root,
        excluded_checks=excluded_names,
    )

format_agent(result)

Agent-optimized output: passed_count=N, failed=full detail.

Minimizes tokens by replacing the full passed-check list with a count. Only failures carry actionable detail.

Source code in packages/axm-init/src/axm_init/core/checker.py
def format_agent(result: ProjectResult) -> dict[str, Any]:
    """Agent-optimized output: passed_count=N, failed=full detail.

    Minimizes tokens by replacing the full passed-check list with a count.
    Only failures carry actionable detail.
    """
    return {
        "score": result.score,
        "grade": result.grade.value,
        "context": result.context,
        "workspace_root": str(result.workspace_root) if result.workspace_root else None,
        "excluded_checks": result.excluded_checks,
        "passed_count": sum(1 for c in result.checks if c.passed),
        "failed": [
            {
                "name": f.name,
                "message": f.message,
                "details": f.details,
                "fix": f.fix,
            }
            for f in result.failures
        ],
    }

format_json(result)

Format check result as JSON-serializable dict.

Source code in packages/axm-init/src/axm_init/core/checker.py
def format_json(result: ProjectResult) -> dict[str, Any]:
    """Format check result as JSON-serializable dict."""
    return {
        "project": str(result.project_path),
        "score": result.score,
        "grade": result.grade.value,
        "context": result.context,
        "workspace_root": str(result.workspace_root) if result.workspace_root else None,
        "excluded_checks": result.excluded_checks,
        "categories": {
            cat: {"earned": cs.earned, "total": cs.total}
            for cat, cs in result.categories.items()
        },
        "checks": [
            {
                "name": c.name,
                "category": c.category,
                "passed": c.passed,
                "earned": c.earned,
                "weight": c.weight,
                "message": c.message,
            }
            for c in result.checks
        ],
        "failures": [
            {
                "name": f.name,
                "weight": f.weight,
                "message": f.message,
                "details": f.details,
                "fix": f.fix,
            }
            for f in result.failures
        ],
    }

format_report(result, *, verbose=False)

Format check result as human-readable report.

Parameters:

Name Type Description Default
result ProjectResult

Project check result.

required
verbose bool

If True, list every individual check. If False (default), only show summary for passing categories and detail for failures.

False
Source code in packages/axm-init/src/axm_init/core/checker.py
def format_report(result: ProjectResult, *, verbose: bool = False) -> str:
    """Format check result as human-readable report.

    Args:
        result: Project check result.
        verbose: If True, list every individual check.
            If False (default), only show summary for passing categories
            and detail for failures.
    """
    lines: list[str] = [
        f"📋 AXM Check — {result.project_path.name}",
        f"   Path: {result.project_path}",
    ]

    if result.context:
        ctx_line = f"   Context: {result.context.upper()}"
        if result.workspace_root:
            ctx_line += f" (root: {result.workspace_root})"
        lines.append(ctx_line)

    lines.append("")

    # Category breakdown
    for cat_name, cat_score in result.categories.items():
        cat_checks = [c for c in result.checks if c.category == cat_name]
        lines.append(f"  {cat_name} ({cat_score.earned}/{cat_score.total})")
        lines.extend(_format_category_checks(cat_checks, verbose=verbose))
        lines.append("")

    # Score
    grade_emoji = {"A": "🏆", "B": "✅", "C": "⚠️", "D": "🔧", "F": "❌"}
    emoji = grade_emoji.get(result.grade.value, "")
    lines.append(f"  Score: {result.score}/100 — Grade {result.grade.value} {emoji}")
    lines.append("")

    # Failures
    if result.failures:
        lines.extend(_format_failures(result.failures))

    return "\n".join(lines)