Skip to content

Index

rules

Rules subpackage — modular project rule implementations.

BareExceptRule dataclass

Bases: ProjectRule

Detect bare except clauses (except: without type).

Source code in packages/axm-audit/src/axm_audit/core/rules/practices.py
@dataclass
@register_rule("practices")
class BareExceptRule(ProjectRule):
    """Detect bare except clauses (except: without type)."""

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "PRACTICE_BARE_EXCEPT"

    def check(self, project_path: Path) -> CheckResult:
        """Check for bare except clauses in the project."""
        early = self.check_src(project_path)
        if early is not None:
            return early

        src_path = project_path / "src"

        bare_excepts: list[dict[str, str | int]] = []
        py_files = get_python_files(src_path)

        for path in py_files:
            cache = get_ast_cache()
            tree = cache.get_or_parse(path) if cache else parse_file_safe(path)
            if tree is None:
                continue

            self._find_bare_excepts(tree, path, src_path, bare_excepts)

        count = len(bare_excepts)
        passed = count == 0
        score = max(0, 100 - count * 20)

        return CheckResult(
            rule_id=self.rule_id,
            passed=passed,
            message=f"{count} bare except(s) found",
            severity=Severity.WARNING if not passed else Severity.INFO,
            details={
                "bare_except_count": count,
                "locations": bare_excepts,
                "score": score,
            },
            fix_hint="Use specific exception types (e.g., except ValueError:)"
            if not passed
            else None,
        )

    def _find_bare_excepts(
        self,
        tree: ast.Module,
        path: Path,
        src_path: Path,
        bare_excepts: list[dict[str, str | int]],
    ) -> None:
        """Find bare except clauses in a syntax tree."""
        for node in ast.walk(tree):
            if isinstance(node, ast.ExceptHandler):
                # type is None means bare except:
                if node.type is None:
                    bare_excepts.append(
                        {
                            "file": str(path.relative_to(src_path)),
                            "line": node.lineno,
                        }
                    )
rule_id property

Unique identifier for this rule.

check(project_path)

Check for bare except clauses in the project.

Source code in packages/axm-audit/src/axm_audit/core/rules/practices.py
def check(self, project_path: Path) -> CheckResult:
    """Check for bare except clauses in the project."""
    early = self.check_src(project_path)
    if early is not None:
        return early

    src_path = project_path / "src"

    bare_excepts: list[dict[str, str | int]] = []
    py_files = get_python_files(src_path)

    for path in py_files:
        cache = get_ast_cache()
        tree = cache.get_or_parse(path) if cache else parse_file_safe(path)
        if tree is None:
            continue

        self._find_bare_excepts(tree, path, src_path, bare_excepts)

    count = len(bare_excepts)
    passed = count == 0
    score = max(0, 100 - count * 20)

    return CheckResult(
        rule_id=self.rule_id,
        passed=passed,
        message=f"{count} bare except(s) found",
        severity=Severity.WARNING if not passed else Severity.INFO,
        details={
            "bare_except_count": count,
            "locations": bare_excepts,
            "score": score,
        },
        fix_hint="Use specific exception types (e.g., except ValueError:)"
        if not passed
        else None,
    )

BlockingIORule dataclass

Bases: ProjectRule

Detect blocking I/O anti-patterns.

Finds: - time.sleep() inside async def functions. - HTTP calls (requests.* / httpx.*) without timeout= kwarg.

Source code in packages/axm-audit/src/axm_audit/core/rules/practices.py
@dataclass
@register_rule("practices")
class BlockingIORule(ProjectRule):
    """Detect blocking I/O anti-patterns.

    Finds:
    - ``time.sleep()`` inside ``async def`` functions.
    - HTTP calls (``requests.*`` / ``httpx.*``) without ``timeout=`` kwarg.
    """

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "PRACTICE_BLOCKING_IO"

    def check(self, project_path: Path) -> CheckResult:
        """Check for blocking I/O patterns in the project."""
        early = self.check_src(project_path)
        if early is not None:
            return early

        src_path = project_path / "src"

        violations: list[dict[str, str | int]] = []

        for path in get_python_files(src_path):
            cache = get_ast_cache()
            tree = cache.get_or_parse(path) if cache else parse_file_safe(path)
            if tree is None:
                continue
            rel = str(path.relative_to(src_path))
            self._check_async_sleep(tree, rel, violations)
            self._check_http_no_timeout(tree, rel, violations)

        count = len(violations)
        passed = count == 0
        score = max(0, 100 - count * 15)

        return CheckResult(
            rule_id=self.rule_id,
            passed=passed,
            message=f"{count} blocking-IO violation(s) found",
            severity=Severity.WARNING if not passed else Severity.INFO,
            details={"violations": violations, "score": score},
            fix_hint=(
                "Use asyncio.sleep() instead of time.sleep() in async context; "
                "add timeout= to HTTP calls"
            )
            if not passed
            else None,
        )

    # -- private helpers -------------------------------------------------------

    @staticmethod
    def _check_async_sleep(
        tree: ast.Module,
        rel: str,
        violations: list[dict[str, str | int]],
    ) -> None:
        """Find ``time.sleep()`` inside ``async def`` bodies."""
        for node in ast.walk(tree):
            if not isinstance(node, ast.AsyncFunctionDef):
                continue
            for child in ast.walk(node):
                if (
                    isinstance(child, ast.Call)
                    and isinstance(child.func, ast.Attribute)
                    and child.func.attr == "sleep"
                    and isinstance(child.func.value, ast.Name)
                    and child.func.value.id == "time"
                ):
                    violations.append(
                        {
                            "file": rel,
                            "line": child.lineno,
                            "issue": "time.sleep in async",
                        }
                    )

    @staticmethod
    def _check_http_no_timeout(
        tree: ast.Module,
        rel: str,
        violations: list[dict[str, str | int]],
    ) -> None:
        """Find HTTP calls without ``timeout=`` keyword argument."""
        for node in ast.walk(tree):
            if not (
                isinstance(node, ast.Call)
                and isinstance(node.func, ast.Attribute)
                and node.func.attr in _HTTP_METHODS
            ):
                continue

            if not _is_http_call(node.func.value):
                continue

            has_timeout = any(kw.arg == "timeout" for kw in node.keywords)
            if not has_timeout:
                violations.append(
                    {
                        "file": rel,
                        "line": node.lineno,
                        "issue": "HTTP call without timeout",
                    }
                )
rule_id property

Unique identifier for this rule.

check(project_path)

Check for blocking I/O patterns in the project.

Source code in packages/axm-audit/src/axm_audit/core/rules/practices.py
def check(self, project_path: Path) -> CheckResult:
    """Check for blocking I/O patterns in the project."""
    early = self.check_src(project_path)
    if early is not None:
        return early

    src_path = project_path / "src"

    violations: list[dict[str, str | int]] = []

    for path in get_python_files(src_path):
        cache = get_ast_cache()
        tree = cache.get_or_parse(path) if cache else parse_file_safe(path)
        if tree is None:
            continue
        rel = str(path.relative_to(src_path))
        self._check_async_sleep(tree, rel, violations)
        self._check_http_no_timeout(tree, rel, violations)

    count = len(violations)
    passed = count == 0
    score = max(0, 100 - count * 15)

    return CheckResult(
        rule_id=self.rule_id,
        passed=passed,
        message=f"{count} blocking-IO violation(s) found",
        severity=Severity.WARNING if not passed else Severity.INFO,
        details={"violations": violations, "score": score},
        fix_hint=(
            "Use asyncio.sleep() instead of time.sleep() in async context; "
            "add timeout= to HTTP calls"
        )
        if not passed
        else None,
    )

CircularImportRule dataclass

Bases: ProjectRule

Detect circular imports via import graph + Tarjan's SCC algorithm.

Source code in packages/axm-audit/src/axm_audit/core/rules/architecture.py
@dataclass
@register_rule("architecture")
class CircularImportRule(ProjectRule):
    """Detect circular imports via import graph + Tarjan's SCC algorithm."""

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "ARCH_CIRCULAR"

    def check(self, project_path: Path) -> CheckResult:
        """Check for circular imports in the project."""
        early = self.check_src(project_path)
        if early is not None:
            return early

        src_path = project_path / "src"
        cycles, score = self._analyze_cycles(src_path)
        passed = len(cycles) == 0

        return CheckResult(
            rule_id=self.rule_id,
            passed=passed,
            message=f"{len(cycles)} circular import(s) found",
            severity=Severity.ERROR if not passed else Severity.INFO,
            details={"cycles": cycles, "score": score},
            fix_hint="Break cycles by using lazy imports or restructuring"
            if cycles
            else None,
        )

    def _analyze_cycles(self, src_path: Path) -> tuple[list[list[str]], int]:
        """Build import graph and detect cycles."""
        graph = self._build_import_graph(src_path)
        cycles = _tarjan_scc(graph)
        score = max(0, 100 - len(cycles) * 20)
        return cycles, score

    def _build_import_graph(self, src_path: Path) -> dict[str, set[str]]:
        """Build the module import graph from source files."""
        graph: dict[str, set[str]] = defaultdict(set)

        for path in get_python_files(src_path):
            if path.name == "__init__.py":
                continue
            cache = get_ast_cache()
            tree = cache.get_or_parse(path) if cache else parse_file_safe(path)
            if tree is None:
                continue
            module_name = _get_module_name(path, src_path)
            if not module_name:
                continue
            for imp in _extract_imports(tree):
                graph[module_name].add(imp)
            if module_name not in graph:
                graph[module_name] = set()

        return dict(graph)
rule_id property

Unique identifier for this rule.

check(project_path)

Check for circular imports in the project.

Source code in packages/axm-audit/src/axm_audit/core/rules/architecture.py
def check(self, project_path: Path) -> CheckResult:
    """Check for circular imports in the project."""
    early = self.check_src(project_path)
    if early is not None:
        return early

    src_path = project_path / "src"
    cycles, score = self._analyze_cycles(src_path)
    passed = len(cycles) == 0

    return CheckResult(
        rule_id=self.rule_id,
        passed=passed,
        message=f"{len(cycles)} circular import(s) found",
        severity=Severity.ERROR if not passed else Severity.INFO,
        details={"cycles": cycles, "score": score},
        fix_hint="Break cycles by using lazy imports or restructuring"
        if cycles
        else None,
    )

ComplexityRule dataclass

Bases: ProjectRule

Analyse cyclomatic complexity via radon Python API.

Scoring: 100 - (high_complexity_count * 10), min 0. High complexity = CC >= 10 (industry standard).

Falls back to radon cc --json subprocess when the Python API is not importable (e.g. auditing a project that does not declare radon in its own dev dependencies).

Source code in packages/axm-audit/src/axm_audit/core/rules/complexity.py
@dataclass
@register_rule("complexity")
class ComplexityRule(ProjectRule):
    """Analyse cyclomatic complexity via radon Python API.

    Scoring: 100 - (high_complexity_count * 10), min 0.
    High complexity = CC >= 10 (industry standard).

    Falls back to ``radon cc --json`` subprocess when the Python API
    is not importable (e.g. auditing a project that does not declare
    ``radon`` in its own dev dependencies).
    """

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "QUALITY_COMPLEXITY"

    def check(self, project_path: Path) -> CheckResult:
        """Check project complexity with radon."""
        early = self.check_src(project_path)
        if early is not None:
            return early

        src_path = project_path / "src"

        # Try Python API first, fall back to subprocess
        cc_visit = _try_import_radon()
        if cc_visit is not None:
            return self._check_via_api(src_path, cc_visit)

        return self._check_via_subprocess(src_path)

    # ------------------------------------------------------------------
    # Private helpers
    # ------------------------------------------------------------------

    def _check_via_api(
        self,
        src_path: Path,
        cc_visit: Callable[..., list[Any]],
    ) -> CheckResult:
        """Analyse complexity using the radon Python API."""
        high_complexity_count = 0
        all_functions: list[dict[str, str | int]] = []

        for py_file in src_path.rglob("*.py"):
            try:
                source = py_file.read_text(encoding="utf-8")
                blocks = cc_visit(source)
            except (SyntaxError, UnicodeDecodeError):
                continue

            for block in blocks:
                if not hasattr(block, "complexity"):
                    continue
                cc: int = block.complexity
                if cc >= COMPLEXITY_THRESHOLD:
                    high_complexity_count += 1
                    classname = getattr(block, "classname", "")
                    name = f"{classname}.{block.name}" if classname else block.name
                    all_functions.append(
                        {
                            "file": py_file.name,
                            "function": name,
                            "cc": cc,
                        }
                    )

        return self._build_result(high_complexity_count, all_functions)

    def _check_via_subprocess(self, src_path: Path) -> CheckResult:
        """Analyse complexity by shelling out to ``radon cc --json``."""
        radon_bin = shutil.which("radon")
        if radon_bin is None:
            return CheckResult(
                rule_id=self.rule_id,
                passed=False,
                message=("radon not found — complexity analysis skipped"),
                severity=Severity.ERROR,
                details={"score": 0},
                fix_hint=(
                    "Run 'uv sync' at workspace root or "
                    "'uv pip install axm-audit' to make radon available"
                ),
            )

        try:
            proc = subprocess.run(  # noqa: S603
                [radon_bin, "cc", "--json", str(src_path)],
                capture_output=True,
                text=True,
                check=False,
            )
            data: dict[str, list[dict[str, object]]] = (
                json.loads(proc.stdout) if proc.stdout.strip() else {}
            )
        except (json.JSONDecodeError, OSError) as exc:
            logger.warning(
                "radon cc --json failed: %s",
                exc,
                exc_info=True,
            )
            return CheckResult(
                rule_id=self.rule_id,
                passed=False,
                message="radon cc --json failed",
                severity=Severity.ERROR,
                details={"score": 0},
                fix_hint="Check radon installation",
            )

        return self._process_radon_output(data)

    def _process_radon_output(
        self, data: dict[str, list[dict[str, object]]]
    ) -> CheckResult:
        """Process JSON output from radon cc."""
        high_complexity_count = 0
        all_functions: list[dict[str, str | int]] = []

        for file_path, blocks in data.items():
            file_name = Path(file_path).name
            for block in blocks:
                if not isinstance(block, dict):
                    continue
                raw_cc = block.get("complexity", 0)
                cc = int(raw_cc) if isinstance(raw_cc, int | float | str) else 0
                if cc >= COMPLEXITY_THRESHOLD:
                    high_complexity_count += 1
                    raw_name = str(block.get("name", ""))
                    classname = str(block.get("classname", ""))
                    name = f"{classname}.{raw_name}" if classname else raw_name
                    all_functions.append(
                        {
                            "file": file_name,
                            "function": name,
                            "cc": cc,
                        }
                    )

        return self._build_result(high_complexity_count, all_functions)

    def _build_result(
        self,
        high_complexity_count: int,
        all_functions: list[dict[str, str | int]],
    ) -> CheckResult:
        """Build the final ``CheckResult`` from computed metrics."""
        top_offenders = sorted(all_functions, key=lambda x: x["cc"], reverse=True)
        score = max(0, 100 - high_complexity_count * 10)
        passed = score >= PASS_THRESHOLD

        return CheckResult(
            rule_id=self.rule_id,
            passed=passed,
            message=(
                f"Complexity score: {score}/100 "
                f"({high_complexity_count} high-complexity functions)"
            ),
            severity=Severity.WARNING if not passed else Severity.INFO,
            details={
                "high_complexity_count": high_complexity_count,
                "top_offenders": top_offenders,
                "score": score,
            },
            fix_hint=(
                "Refactor complex functions into smaller units"
                if high_complexity_count > 0
                else None
            ),
        )
rule_id property

Unique identifier for this rule.

check(project_path)

Check project complexity with radon.

Source code in packages/axm-audit/src/axm_audit/core/rules/complexity.py
def check(self, project_path: Path) -> CheckResult:
    """Check project complexity with radon."""
    early = self.check_src(project_path)
    if early is not None:
        return early

    src_path = project_path / "src"

    # Try Python API first, fall back to subprocess
    cc_visit = _try_import_radon()
    if cc_visit is not None:
        return self._check_via_api(src_path, cc_visit)

    return self._check_via_subprocess(src_path)

CouplingMetricRule dataclass

Bases: ProjectRule

Measure module coupling via fan-in/fan-out analysis.

Scores based on the number of modules whose fan-out exceeds the threshold: score = 100 - N(over) * 5.

Source code in packages/axm-audit/src/axm_audit/core/rules/architecture.py
@dataclass
@register_rule("architecture")
class CouplingMetricRule(ProjectRule):
    """Measure module coupling via fan-in/fan-out analysis.

    Scores based on the number of modules whose fan-out exceeds
    the threshold: ``score = 100 - N(over) * 5``.
    """

    fan_out_threshold: int = 10

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "ARCH_COUPLING"

    def check(self, project_path: Path) -> CheckResult:
        """Check coupling metrics for the project."""
        early = self.check_src(project_path)
        if early is not None:
            return early

        src_path = project_path / "src"

        metrics = _compute_coupling_metrics(src_path, self.fan_out_threshold)
        n_over: int = metrics["n_over_threshold"]
        over: list[dict[str, Any]] = metrics["over_threshold"]
        avg: float = metrics["avg_coupling"]
        score = max(0, 100 - n_over * 5)

        if n_over:
            penalty = n_over * 5
            msg = f"Coupling: {n_over} module(s) above threshold (-{penalty} pts)"
        else:
            max_fo = metrics["max_fan_out"]
            msg = f"Coupling: 0 modules above threshold (max fan-out: {max_fo})"

        # Build fix_hint with module listing
        hint = None
        if over:
            lines = [f"  \u2022 {m['module']} (fan-out: {m['fan_out']})" for m in over]
            hint = "Reduce imports in:\n" + "\n".join(lines)

        return CheckResult(
            rule_id=self.rule_id,
            passed=n_over == 0,
            message=msg,
            severity=Severity.WARNING if n_over else Severity.INFO,
            details={
                "max_fan_out": metrics["max_fan_out"],
                "max_fan_in": metrics["max_fan_in"],
                "avg_coupling": round(avg, 2),
                "score": score,
                "n_over_threshold": n_over,
                "over_threshold": over,
            },
            fix_hint=hint,
        )
rule_id property

Unique identifier for this rule.

check(project_path)

Check coupling metrics for the project.

Source code in packages/axm-audit/src/axm_audit/core/rules/architecture.py
def check(self, project_path: Path) -> CheckResult:
    """Check coupling metrics for the project."""
    early = self.check_src(project_path)
    if early is not None:
        return early

    src_path = project_path / "src"

    metrics = _compute_coupling_metrics(src_path, self.fan_out_threshold)
    n_over: int = metrics["n_over_threshold"]
    over: list[dict[str, Any]] = metrics["over_threshold"]
    avg: float = metrics["avg_coupling"]
    score = max(0, 100 - n_over * 5)

    if n_over:
        penalty = n_over * 5
        msg = f"Coupling: {n_over} module(s) above threshold (-{penalty} pts)"
    else:
        max_fo = metrics["max_fan_out"]
        msg = f"Coupling: 0 modules above threshold (max fan-out: {max_fo})"

    # Build fix_hint with module listing
    hint = None
    if over:
        lines = [f"  \u2022 {m['module']} (fan-out: {m['fan_out']})" for m in over]
        hint = "Reduce imports in:\n" + "\n".join(lines)

    return CheckResult(
        rule_id=self.rule_id,
        passed=n_over == 0,
        message=msg,
        severity=Severity.WARNING if n_over else Severity.INFO,
        details={
            "max_fan_out": metrics["max_fan_out"],
            "max_fan_in": metrics["max_fan_in"],
            "avg_coupling": round(avg, 2),
            "score": score,
            "n_over_threshold": n_over,
            "over_threshold": over,
        },
        fix_hint=hint,
    )

DeadCodeRule

Bases: ProjectRule

Detect dead (unreferenced) code using axm-ast.

Gracefully skips if axm-ast is not available in the environment.

Scoring: 100 - (dead_symbols_count * 5), min 0.

Source code in packages/axm-audit/src/axm_audit/core/rules/dead_code.py
@register_rule("lint")
class DeadCodeRule(ProjectRule):
    """Detect dead (unreferenced) code using axm-ast.

    Gracefully skips if axm-ast is not available in the environment.

    Scoring: 100 - (dead_symbols_count * 5), min 0.
    """

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "QUALITY_DEAD_CODE"

    def _skip(self, reason: str) -> CheckResult:
        """Return graceful skip result."""
        return CheckResult(
            rule_id=self.rule_id,
            passed=True,  # Passing so it doesn't fail the build
            message=f"Skipped: {reason}",
            severity=Severity.INFO,
            details={"skipped": True, "reason": reason, "score": 100.0},
        )

    def check(self, project_path: Path) -> CheckResult:
        """Check for dead code using axm-ast dead-code via subprocess."""
        availability = self._check_availability(project_path)
        if availability is not None:
            return availability

        result = self._run_analysis(project_path)
        if isinstance(result, CheckResult):
            return result

        dead_symbols = self._parse_dead_symbols(result)
        if dead_symbols is None:
            return CheckResult(
                rule_id=self.rule_id,
                passed=False,
                message="Failed to parse axm-ast output",
                severity=Severity.ERROR,
                details={
                    "stdout": result.stdout,
                    "stderr": result.stderr,
                    "score": 0.0,
                },
            )
        return self._build_result(dead_symbols)

    def _check_availability(self, project_path: Path) -> CheckResult | None:
        """Return a skip result if axm-ast is not on the PATH, else None."""
        if shutil.which("axm-ast") is None:
            return self._skip("axm-ast is not available in the environment")
        return None

    def _run_analysis(
        self,
        project_path: Path,
    ) -> subprocess.CompletedProcess[str] | CheckResult:
        """Run axm-ast dead-code directly (resolved from PATH, not target venv)."""
        try:
            return subprocess.run(  # noqa: S603
                ["axm-ast", "dead-code", str(project_path), "--json"],  # noqa: S607
                capture_output=True,
                text=True,
                check=False,
                timeout=300,
            )
        except (FileNotFoundError, subprocess.TimeoutExpired):
            return CheckResult(
                rule_id=self.rule_id,
                passed=False,
                message="Failed to execute axm-ast",
                severity=Severity.ERROR,
                details={"score": 0.0},
                fix_hint="Ensure axm-ast is installed in the audit environment",
            )

    def _parse_dead_symbols(
        self,
        result: subprocess.CompletedProcess[str],
    ) -> list[dict[str, str]] | None:
        """Parse JSON output from axm-ast, returning the dead symbols list.

        Returns ``None`` when the output is not valid JSON.
        """
        try:
            out = result.stdout or "[]"
            data = json.loads(out)
        except json.JSONDecodeError:
            return None

        if isinstance(data, dict):
            symbols: list[dict[str, str]] = data.get("dead_symbols", [])
            return symbols
        return data if isinstance(data, list) else []

    def _build_result(self, dead_symbols: list[dict[str, str]]) -> CheckResult:
        """Build a CheckResult from the dead symbols list."""
        dead_count = len(dead_symbols)
        score = max(0.0, 100.0 - (dead_count * 5.0))
        passed = dead_count == 0

        message = (
            "No dead code detected."
            if passed
            else f"Found {dead_count} dead (unreferenced) symbol(s)."
        )

        details: dict[str, object] = {
            "score": score,
            "dead_count": dead_count,
            "symbols": dead_symbols,
        }

        if dead_symbols:
            details["top_offenders"] = dead_symbols[:10]

        return CheckResult(
            rule_id=self.rule_id,
            passed=passed,
            message=message,
            severity=Severity.WARNING if dead_count > 0 else Severity.INFO,
            details=details,
            fix_hint="Remove dead code or mark exported in __all__ if public API",
        )
rule_id property

Unique identifier for this rule.

check(project_path)

Check for dead code using axm-ast dead-code via subprocess.

Source code in packages/axm-audit/src/axm_audit/core/rules/dead_code.py
def check(self, project_path: Path) -> CheckResult:
    """Check for dead code using axm-ast dead-code via subprocess."""
    availability = self._check_availability(project_path)
    if availability is not None:
        return availability

    result = self._run_analysis(project_path)
    if isinstance(result, CheckResult):
        return result

    dead_symbols = self._parse_dead_symbols(result)
    if dead_symbols is None:
        return CheckResult(
            rule_id=self.rule_id,
            passed=False,
            message="Failed to parse axm-ast output",
            severity=Severity.ERROR,
            details={
                "stdout": result.stdout,
                "stderr": result.stderr,
                "score": 0.0,
            },
        )
    return self._build_result(dead_symbols)

DependencyAuditRule dataclass

Bases: ProjectRule

Scan dependencies for known vulnerabilities via pip-audit.

Scoring: 100 - (vuln_count * 15), min 0.

Source code in packages/axm-audit/src/axm_audit/core/rules/dependencies.py
@dataclass
@register_rule("deps")
class DependencyAuditRule(ProjectRule):
    """Scan dependencies for known vulnerabilities via pip-audit.

    Scoring: 100 - (vuln_count * 15), min 0.
    """

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "DEPS_AUDIT"

    def check(self, project_path: Path) -> CheckResult:
        """Check dependencies for known CVEs."""
        try:
            data = _run_pip_audit(project_path)
        except FileNotFoundError:
            return CheckResult(
                rule_id=self.rule_id,
                passed=False,
                message="pip-audit not available",
                severity=Severity.ERROR,
                details={"vuln_count": 0, "score": 0},
                fix_hint="Install with: uv add --dev pip-audit",
            )
        except RuntimeError as exc:
            return CheckResult(
                rule_id=self.rule_id,
                passed=False,
                message=str(exc),
                severity=Severity.ERROR,
                details={"vuln_count": 0, "score": 0},
                fix_hint="Check pip-audit installation: uv run pip-audit --version",
            )

        vulns = _parse_vulns(data)
        vuln_count = len(vulns)
        score = max(0, 100 - vuln_count * 15)

        return CheckResult(
            rule_id=self.rule_id,
            passed=score >= PASS_THRESHOLD,
            message=(
                "No known vulnerabilities"
                if vuln_count == 0
                else f"{vuln_count} vulnerable package(s) found"
            ),
            severity=Severity.WARNING if score < PASS_THRESHOLD else Severity.INFO,
            details={
                "vuln_count": vuln_count,
                "score": score,
                "top_vulns": [_summarize_vuln(v) for v in vulns[:5]],
            },
            fix_hint=("Run: pip-audit --fix to remediate" if vuln_count > 0 else None),
        )
rule_id property

Unique identifier for this rule.

check(project_path)

Check dependencies for known CVEs.

Source code in packages/axm-audit/src/axm_audit/core/rules/dependencies.py
def check(self, project_path: Path) -> CheckResult:
    """Check dependencies for known CVEs."""
    try:
        data = _run_pip_audit(project_path)
    except FileNotFoundError:
        return CheckResult(
            rule_id=self.rule_id,
            passed=False,
            message="pip-audit not available",
            severity=Severity.ERROR,
            details={"vuln_count": 0, "score": 0},
            fix_hint="Install with: uv add --dev pip-audit",
        )
    except RuntimeError as exc:
        return CheckResult(
            rule_id=self.rule_id,
            passed=False,
            message=str(exc),
            severity=Severity.ERROR,
            details={"vuln_count": 0, "score": 0},
            fix_hint="Check pip-audit installation: uv run pip-audit --version",
        )

    vulns = _parse_vulns(data)
    vuln_count = len(vulns)
    score = max(0, 100 - vuln_count * 15)

    return CheckResult(
        rule_id=self.rule_id,
        passed=score >= PASS_THRESHOLD,
        message=(
            "No known vulnerabilities"
            if vuln_count == 0
            else f"{vuln_count} vulnerable package(s) found"
        ),
        severity=Severity.WARNING if score < PASS_THRESHOLD else Severity.INFO,
        details={
            "vuln_count": vuln_count,
            "score": score,
            "top_vulns": [_summarize_vuln(v) for v in vulns[:5]],
        },
        fix_hint=("Run: pip-audit --fix to remediate" if vuln_count > 0 else None),
    )

DependencyHygieneRule dataclass

Bases: ProjectRule

Check for unused/missing/transitive dependencies via deptry.

Scoring: 100 - (issue_count * 10), min 0.

Source code in packages/axm-audit/src/axm_audit/core/rules/dependencies.py
@dataclass
@register_rule("deps")
class DependencyHygieneRule(ProjectRule):
    """Check for unused/missing/transitive dependencies via deptry.

    Scoring: 100 - (issue_count * 10), min 0.
    """

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "DEPS_HYGIENE"

    def check(self, project_path: Path) -> CheckResult:
        """Check dependency hygiene with deptry."""
        try:
            issues = _run_deptry(project_path)
        except FileNotFoundError:
            return CheckResult(
                rule_id=self.rule_id,
                passed=False,
                message="deptry not available",
                severity=Severity.ERROR,
                details={"issue_count": 0, "score": 0},
                fix_hint="Install with: uv add --dev deptry",
            )
        except (RuntimeError, json.JSONDecodeError) as exc:
            is_runtime = isinstance(exc, RuntimeError)
            msg = f"deptry failed: {exc}" if is_runtime else "deptry output parse error"
            return CheckResult(
                rule_id=self.rule_id,
                passed=False,
                message=msg,
                severity=Severity.ERROR,
                details={"issue_count": 0, "score": 0},
                fix_hint="Check deptry installation: uv run deptry --version",
            )

        issue_count = len(issues)
        score = max(0, 100 - issue_count * 10)

        return CheckResult(
            rule_id=self.rule_id,
            passed=score >= PASS_THRESHOLD,
            message=(
                "Clean dependencies (0 issues)"
                if issue_count == 0
                else f"{issue_count} dependency issue(s) found"
            ),
            severity=Severity.WARNING if score < PASS_THRESHOLD else Severity.INFO,
            details={
                "issue_count": issue_count,
                "score": score,
                "top_issues": [_format_issue(i) for i in issues[:5]],
            },
            fix_hint=("Run: deptry . to see details" if issue_count > 0 else None),
        )
rule_id property

Unique identifier for this rule.

check(project_path)

Check dependency hygiene with deptry.

Source code in packages/axm-audit/src/axm_audit/core/rules/dependencies.py
def check(self, project_path: Path) -> CheckResult:
    """Check dependency hygiene with deptry."""
    try:
        issues = _run_deptry(project_path)
    except FileNotFoundError:
        return CheckResult(
            rule_id=self.rule_id,
            passed=False,
            message="deptry not available",
            severity=Severity.ERROR,
            details={"issue_count": 0, "score": 0},
            fix_hint="Install with: uv add --dev deptry",
        )
    except (RuntimeError, json.JSONDecodeError) as exc:
        is_runtime = isinstance(exc, RuntimeError)
        msg = f"deptry failed: {exc}" if is_runtime else "deptry output parse error"
        return CheckResult(
            rule_id=self.rule_id,
            passed=False,
            message=msg,
            severity=Severity.ERROR,
            details={"issue_count": 0, "score": 0},
            fix_hint="Check deptry installation: uv run deptry --version",
        )

    issue_count = len(issues)
    score = max(0, 100 - issue_count * 10)

    return CheckResult(
        rule_id=self.rule_id,
        passed=score >= PASS_THRESHOLD,
        message=(
            "Clean dependencies (0 issues)"
            if issue_count == 0
            else f"{issue_count} dependency issue(s) found"
        ),
        severity=Severity.WARNING if score < PASS_THRESHOLD else Severity.INFO,
        details={
            "issue_count": issue_count,
            "score": score,
            "top_issues": [_format_issue(i) for i in issues[:5]],
        },
        fix_hint=("Run: deptry . to see details" if issue_count > 0 else None),
    )

DiffSizeRule dataclass

Bases: ProjectRule

Warn when uncommitted changes are too large.

Encourages smaller, focused commits/PRs.

Scoring: 100 if ≤ ideal lines changed, linear degrade to 0 at max lines. Defaults: ideal=400, max=1200. Overridable via [tool.axm-audit] in pyproject.toml.

Gracefully skips if not in a git repository or git is not installed.

Source code in packages/axm-audit/src/axm_audit/core/rules/quality.py
@dataclass
@register_rule("lint")
class DiffSizeRule(ProjectRule):
    """Warn when uncommitted changes are too large.

    Encourages smaller, focused commits/PRs.

    Scoring: 100 if ≤ *ideal* lines changed, linear degrade to 0 at *max*
    lines.  Defaults: ideal=400, max=1200.  Overridable via
    ``[tool.axm-audit]`` in ``pyproject.toml``.

    Gracefully skips if not in a git repository or git is not installed.
    """

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "QUALITY_DIFF_SIZE"

    def check(self, project_path: Path) -> CheckResult:
        """Check uncommitted diff size."""
        if shutil.which("git") is None:
            return self._skip("git not installed")

        if not self._is_git_repo(project_path):
            return self._skip("not a git repo")

        return self._measure_diff(project_path)

    # -- private helpers -------------------------------------------------------

    def _skip(self, reason: str) -> CheckResult:
        """Return graceful skip result."""
        return CheckResult(
            rule_id=self.rule_id,
            passed=True,
            message=f"{reason} — diff size check skipped",
            severity=Severity.INFO,
            details={"lines_changed": 0, "score": 100},
        )

    @staticmethod
    def _is_git_repo(project_path: Path) -> bool:
        """Check whether *project_path* is inside a git repository."""
        try:
            result = subprocess.run(
                ["git", "rev-parse", "--git-dir"],
                cwd=str(project_path),
                capture_output=True,
                text=True,
                check=False,
            )
            return result.returncode == 0
        except OSError:
            return False

    def _measure_diff(self, project_path: Path) -> CheckResult:
        """Run ``git diff --stat HEAD`` and score the result."""
        try:
            result = subprocess.run(
                ["git", "diff", "--stat", "HEAD"],
                cwd=str(project_path),
                capture_output=True,
                text=True,
                check=False,
            )
        except OSError:
            return self._skip("git command failed")

        stdout = result.stdout.strip()
        if not stdout:
            # No uncommitted changes
            return CheckResult(
                rule_id=self.rule_id,
                passed=True,
                message="No uncommitted changes",
                severity=Severity.INFO,
                details={"lines_changed": 0, "score": 100},
            )

        ideal, max_lines = _read_diff_config(project_path)
        lines_changed = self._parse_stat(stdout)
        score = self._compute_score(lines_changed, ideal, max_lines)
        passed = score >= PASS_THRESHOLD

        return CheckResult(
            rule_id=self.rule_id,
            passed=passed,
            message=f"Diff size: {lines_changed} lines changed (score {score}/100)",
            severity=Severity.WARNING if not passed else Severity.INFO,
            details={"lines_changed": lines_changed, "score": score},
            fix_hint=(
                f"Consider splitting into smaller commits (< {ideal} lines ideal)"
                if not passed
                else None
            ),
        )

    @staticmethod
    def _parse_stat(stdout: str) -> int:
        """Extract total lines changed from ``git diff --stat`` output."""
        last_line = stdout.strip().split("\n")[-1]
        match = _DIFF_STAT_RE.search(last_line)
        if not match:
            return 0
        insertions = int(match.group(2) or 0)
        deletions = int(match.group(3) or 0)
        return insertions + deletions

    @staticmethod
    def _compute_score(
        lines_changed: int,
        ideal: int = _DIFF_IDEAL,
        max_lines: int = _DIFF_MAX,
    ) -> int:
        """Compute score from lines changed: 100→0 over [ideal, max_lines]."""
        if lines_changed <= ideal:
            return 100
        if lines_changed >= max_lines:
            return 0
        return int(100 - (lines_changed - ideal) * 100 / (max_lines - ideal))
rule_id property

Unique identifier for this rule.

check(project_path)

Check uncommitted diff size.

Source code in packages/axm-audit/src/axm_audit/core/rules/quality.py
def check(self, project_path: Path) -> CheckResult:
    """Check uncommitted diff size."""
    if shutil.which("git") is None:
        return self._skip("git not installed")

    if not self._is_git_repo(project_path):
        return self._skip("not a git repo")

    return self._measure_diff(project_path)

DocstringCoverageRule dataclass

Bases: ProjectRule

Calculate docstring coverage for public functions.

Public functions are those not starting with underscore.

Source code in packages/axm-audit/src/axm_audit/core/rules/practices.py
@dataclass
@register_rule("practices")
class DocstringCoverageRule(ProjectRule):
    """Calculate docstring coverage for public functions.

    Public functions are those not starting with underscore.
    """

    min_coverage: float = 0.80

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "PRACTICE_DOCSTRING"

    def check(self, project_path: Path) -> CheckResult:
        """Check docstring coverage in the project."""
        early = self.check_src(project_path)
        if early is not None:
            return early

        src_path = project_path / "src"
        documented, missing = self._analyze_docstrings(src_path)
        return self._build_result(documented, missing)

    def _build_result(
        self,
        documented: int,
        missing: list[str],
    ) -> CheckResult:
        """Build CheckResult from docstring analysis."""
        total = documented + len(missing)
        coverage = documented / total if total > 0 else 1.0
        passed = coverage >= self.min_coverage
        score = int(coverage * 100)

        return CheckResult(
            rule_id=self.rule_id,
            passed=passed,
            message=f"Docstring coverage: {coverage:.0%} ({documented}/{total})",
            severity=Severity.WARNING if not passed else Severity.INFO,
            details={
                "coverage": round(coverage, 2),
                "total": total,
                "documented": documented,
                "missing": missing,
                "score": score,
            },
            fix_hint="Add docstrings to public functions" if missing else None,
        )

    def _analyze_docstrings(self, src_path: Path) -> tuple[int, list[str]]:
        """Analyze docstring coverage in source files.

        Returns:
            Tuple of (documented_count, list of missing function locations).
        """
        documented = 0
        missing: list[str] = []

        for path in get_python_files(src_path):
            cache = get_ast_cache()
            tree = cache.get_or_parse(path) if cache else parse_file_safe(path)
            if tree is None:
                continue

            rel_path = path.relative_to(src_path)
            for node in ast.walk(tree):
                if not isinstance(node, ast.FunctionDef | ast.AsyncFunctionDef):
                    continue
                if node.name.startswith("_"):
                    continue

                if self._has_docstring(node):
                    documented += 1
                else:
                    missing.append(f"{rel_path}:{node.name}")

        return documented, missing

    def _has_docstring(self, node: ast.FunctionDef | ast.AsyncFunctionDef) -> bool:
        """Check if a function node has a docstring."""
        if not node.body:
            return False
        first = node.body[0]
        return (
            isinstance(first, ast.Expr)
            and isinstance(first.value, ast.Constant)
            and isinstance(first.value.value, str)
        )
rule_id property

Unique identifier for this rule.

check(project_path)

Check docstring coverage in the project.

Source code in packages/axm-audit/src/axm_audit/core/rules/practices.py
def check(self, project_path: Path) -> CheckResult:
    """Check docstring coverage in the project."""
    early = self.check_src(project_path)
    if early is not None:
        return early

    src_path = project_path / "src"
    documented, missing = self._analyze_docstrings(src_path)
    return self._build_result(documented, missing)

DuplicationRule dataclass

Bases: ProjectRule

Detect copy-pasted code via AST structure hashing.

Extracts function and method bodies, normalises them by stripping variable names and locations, hashes each body, and reports groups whose hash appears more than once.

Scoring: 100 - (dup_groups * 10), min 0.

Source code in packages/axm-audit/src/axm_audit/core/rules/duplication.py
@dataclass
@register_rule("architecture")
class DuplicationRule(ProjectRule):
    """Detect copy-pasted code via AST structure hashing.

    Extracts function and method bodies, normalises them by stripping
    variable names and locations, hashes each body, and reports
    groups whose hash appears more than once.

    Scoring: ``100 - (dup_groups * 10)``, min 0.
    """

    min_lines: int = _MIN_DUP_LINES

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "ARCH_DUPLICATION"

    def check(self, project_path: Path) -> CheckResult:
        """Check for code duplication in the project."""
        early = self.check_src(project_path)
        if early is not None:
            return early

        src_path = project_path / "src"

        clones = self._find_duplicates(src_path)
        dup_count = len(clones)
        score = max(0, 100 - dup_count * 10)
        passed = dup_count == 0

        return CheckResult(
            rule_id=self.rule_id,
            passed=passed,
            message=f"{dup_count} duplicate block(s) found",
            severity=Severity.WARNING if not passed else Severity.INFO,
            details={"dup_count": dup_count, "clones": clones[:20], "score": score},
            fix_hint=(
                "Extract duplicated code into shared functions" if not passed else None
            ),
        )

    def _find_duplicates(self, src_path: Path) -> list[dict[str, str]]:
        """Hash function bodies and find duplicates."""
        seen = self._collect_function_hashes(src_path)

        clones: list[dict[str, str]] = []
        for entries in seen.values():
            if len(entries) < _MIN_CLONE_GROUP:
                continue
            first = entries[0]
            for other in entries[1:]:
                clones.append(
                    {
                        "source": f"{first[0]}:{first[1]}:{first[2]}",
                        "target": f"{other[0]}:{other[1]}:{other[2]}",
                    }
                )
        return clones

    def _collect_function_hashes(
        self,
        src_path: Path,
    ) -> dict[str, list[tuple[str, str, int]]]:
        """Scan source files and hash each function body."""
        seen: dict[str, list[tuple[str, str, int]]] = defaultdict(list)

        for path in _get_python_files(src_path):
            _cache = _get_ast_cache()
            tree = _cache.get_or_parse(path) if _cache else _parse_file_safe(path)
            if tree is None:
                continue
            rel = str(path.relative_to(src_path))
            self._hash_functions_in_tree(tree, rel, seen)

        return seen

    def _hash_functions_in_tree(
        self,
        tree: ast.Module,
        rel: str,
        seen: dict[str, list[tuple[str, str, int]]],
    ) -> None:
        """Hash each function body in a single AST and add to *seen*."""
        for node in ast.walk(tree):
            if not isinstance(node, ast.FunctionDef | ast.AsyncFunctionDef):
                continue
            end = getattr(node, "end_lineno", None) or node.lineno
            if end - node.lineno + 1 < self.min_lines:
                continue
            body_str = _normalize_ast(node)
            h = hashlib.md5(body_str.encode(), usedforsecurity=False).hexdigest()
            seen[h].append((rel, node.name, node.lineno))
rule_id property

Unique identifier for this rule.

check(project_path)

Check for code duplication in the project.

Source code in packages/axm-audit/src/axm_audit/core/rules/duplication.py
def check(self, project_path: Path) -> CheckResult:
    """Check for code duplication in the project."""
    early = self.check_src(project_path)
    if early is not None:
        return early

    src_path = project_path / "src"

    clones = self._find_duplicates(src_path)
    dup_count = len(clones)
    score = max(0, 100 - dup_count * 10)
    passed = dup_count == 0

    return CheckResult(
        rule_id=self.rule_id,
        passed=passed,
        message=f"{dup_count} duplicate block(s) found",
        severity=Severity.WARNING if not passed else Severity.INFO,
        details={"dup_count": dup_count, "clones": clones[:20], "score": score},
        fix_hint=(
            "Extract duplicated code into shared functions" if not passed else None
        ),
    )

FormattingRule dataclass

Bases: ProjectRule

Run ruff format --check and score based on unformatted file count.

Scoring: 100 - (unformatted_count * 5), min 0.

Source code in packages/axm-audit/src/axm_audit/core/rules/quality.py
@dataclass
@register_rule("lint")
class FormattingRule(ProjectRule):
    """Run ``ruff format --check`` and score based on unformatted file count.

    Scoring: 100 - (unformatted_count * 5), min 0.
    """

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "QUALITY_FORMAT"

    def check(self, project_path: Path) -> CheckResult:
        """Check project formatting with ruff format --check."""
        early = self.check_src(project_path)
        if early is not None:
            return early

        targets, checked = _get_audit_targets(project_path)

        result = run_in_project(
            ["ruff", "format", "--check", *targets],
            project_path,
            with_packages=["ruff"],
            capture_output=True,
            text=True,
            check=False,
        )

        unformatted_files = self._parse_unformatted_files(result)
        unformatted_count = len(unformatted_files)

        score = max(0, 100 - unformatted_count * 5)
        passed = score >= PASS_THRESHOLD

        return CheckResult(
            rule_id=self.rule_id,
            passed=passed,
            message=f"Format score: {score}/100 ({unformatted_count} unformatted)",
            severity=Severity.WARNING if not passed else Severity.INFO,
            details={
                "unformatted_count": unformatted_count,
                "unformatted_files": unformatted_files[:20],
                "score": score,
                "checked": checked,
            },
            fix_hint=(f"Run: ruff format {checked}" if unformatted_count > 0 else None),
        )

    @staticmethod
    def _parse_unformatted_files(
        result: subprocess.CompletedProcess[str],
    ) -> list[str]:
        """Extract unformatted file paths from ruff format --check output."""
        if result.returncode == 0:
            return []
        return [
            line.strip()
            for line in result.stdout.strip().split("\n")
            if line.strip()
            and not line.startswith("error")
            and not line.startswith("warning")
        ]
rule_id property

Unique identifier for this rule.

check(project_path)

Check project formatting with ruff format --check.

Source code in packages/axm-audit/src/axm_audit/core/rules/quality.py
def check(self, project_path: Path) -> CheckResult:
    """Check project formatting with ruff format --check."""
    early = self.check_src(project_path)
    if early is not None:
        return early

    targets, checked = _get_audit_targets(project_path)

    result = run_in_project(
        ["ruff", "format", "--check", *targets],
        project_path,
        with_packages=["ruff"],
        capture_output=True,
        text=True,
        check=False,
    )

    unformatted_files = self._parse_unformatted_files(result)
    unformatted_count = len(unformatted_files)

    score = max(0, 100 - unformatted_count * 5)
    passed = score >= PASS_THRESHOLD

    return CheckResult(
        rule_id=self.rule_id,
        passed=passed,
        message=f"Format score: {score}/100 ({unformatted_count} unformatted)",
        severity=Severity.WARNING if not passed else Severity.INFO,
        details={
            "unformatted_count": unformatted_count,
            "unformatted_files": unformatted_files[:20],
            "score": score,
            "checked": checked,
        },
        fix_hint=(f"Run: ruff format {checked}" if unformatted_count > 0 else None),
    )

GodClassRule dataclass

Bases: ProjectRule

Detect god classes (too many lines or methods).

Source code in packages/axm-audit/src/axm_audit/core/rules/architecture.py
@dataclass
@register_rule("architecture")
class GodClassRule(ProjectRule):
    """Detect god classes (too many lines or methods)."""

    max_lines: int = 500
    max_methods: int = 15

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "ARCH_GOD_CLASS"

    def check(self, project_path: Path) -> CheckResult:
        """Check for god classes in the project."""
        early = self.check_src(project_path)
        if early is not None:
            return early

        src_path = project_path / "src"

        god_classes = self._find_god_classes(src_path)

        score = max(0, 100 - len(god_classes) * 15)
        passed = len(god_classes) == 0

        return CheckResult(
            rule_id=self.rule_id,
            passed=passed,
            message=f"{len(god_classes)} god class(es) found",
            severity=Severity.WARNING if not passed else Severity.INFO,
            details={"god_classes": god_classes, "score": score},
            fix_hint="Split large classes into smaller, focused classes"
            if god_classes
            else None,
        )

    def _find_god_classes(self, src_path: Path) -> list[dict[str, str | int]]:
        """Identify god classes in the source directory."""
        god_classes: list[dict[str, str | int]] = []
        py_files = get_python_files(src_path)

        for path in py_files:
            cache = get_ast_cache()
            tree = cache.get_or_parse(path) if cache else parse_file_safe(path)
            if tree is None:
                continue

            for node in ast.walk(tree):
                if isinstance(node, ast.ClassDef):
                    self._check_class_node(node, path, src_path, god_classes)

        return god_classes

    def _check_class_node(
        self,
        node: ast.ClassDef,
        file_path: Path,
        src_root: Path,
        results: list[dict[str, str | int]],
    ) -> None:
        """Analyze a single class node for god class metrics."""
        # Count lines
        if hasattr(node, "end_lineno") and node.end_lineno:
            lines = node.end_lineno - node.lineno + 1
        else:
            lines = 0

        # Count methods
        methods = sum(
            1
            for child in node.body
            if isinstance(child, ast.FunctionDef | ast.AsyncFunctionDef)
        )

        if lines > self.max_lines or methods > self.max_methods:
            results.append(
                {
                    "name": node.name,
                    "file": str(file_path.relative_to(src_root)),
                    "lines": lines,
                    "methods": methods,
                }
            )
rule_id property

Unique identifier for this rule.

check(project_path)

Check for god classes in the project.

Source code in packages/axm-audit/src/axm_audit/core/rules/architecture.py
def check(self, project_path: Path) -> CheckResult:
    """Check for god classes in the project."""
    early = self.check_src(project_path)
    if early is not None:
        return early

    src_path = project_path / "src"

    god_classes = self._find_god_classes(src_path)

    score = max(0, 100 - len(god_classes) * 15)
    passed = len(god_classes) == 0

    return CheckResult(
        rule_id=self.rule_id,
        passed=passed,
        message=f"{len(god_classes)} god class(es) found",
        severity=Severity.WARNING if not passed else Severity.INFO,
        details={"god_classes": god_classes, "score": score},
        fix_hint="Split large classes into smaller, focused classes"
        if god_classes
        else None,
    )

LintingRule dataclass

Bases: ProjectRule

Run ruff and score based on issue count.

Scoring: 100 - (issue_count * 2), min 0.

Source code in packages/axm-audit/src/axm_audit/core/rules/quality.py
@dataclass
@register_rule("lint")
class LintingRule(ProjectRule):
    """Run ruff and score based on issue count.

    Scoring: 100 - (issue_count * 2), min 0.
    """

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "QUALITY_LINT"

    def check(self, project_path: Path) -> CheckResult:
        """Check project linting with ruff on src/ and tests/."""
        early = self.check_src(project_path)
        if early is not None:
            return early
        targets, checked = _get_audit_targets(project_path)

        result = run_in_project(
            ["ruff", "check", "--output-format=json", *targets],
            project_path,
            with_packages=["ruff"],
            capture_output=True,
            text=True,
            check=False,
        )

        try:
            issues = json.loads(result.stdout) if result.stdout.strip() else []
        except json.JSONDecodeError:
            issues = []

        issue_count = len(issues)
        score = max(0, 100 - issue_count * 2)
        passed = score >= PASS_THRESHOLD

        # Store individual violations (capped at 20) for agent mode
        formatted_issues: list[dict[str, str | int]] = [
            {
                "file": i.get("filename", ""),
                "line": i.get("location", {}).get("row", 0),
                "code": i.get("code", ""),
                "message": i.get("message", ""),
            }
            for i in issues[:20]
        ]

        return CheckResult(
            rule_id=self.rule_id,
            passed=passed,
            message=f"Lint score: {score}/100 ({issue_count} issues)",
            severity=Severity.WARNING if not passed else Severity.INFO,
            details={
                "issue_count": issue_count,
                "score": score,
                "checked": checked,
                "issues": formatted_issues,
            },
            fix_hint=f"Run: ruff check --fix {checked}" if issue_count > 0 else None,
        )
rule_id property

Unique identifier for this rule.

check(project_path)

Check project linting with ruff on src/ and tests/.

Source code in packages/axm-audit/src/axm_audit/core/rules/quality.py
def check(self, project_path: Path) -> CheckResult:
    """Check project linting with ruff on src/ and tests/."""
    early = self.check_src(project_path)
    if early is not None:
        return early
    targets, checked = _get_audit_targets(project_path)

    result = run_in_project(
        ["ruff", "check", "--output-format=json", *targets],
        project_path,
        with_packages=["ruff"],
        capture_output=True,
        text=True,
        check=False,
    )

    try:
        issues = json.loads(result.stdout) if result.stdout.strip() else []
    except json.JSONDecodeError:
        issues = []

    issue_count = len(issues)
    score = max(0, 100 - issue_count * 2)
    passed = score >= PASS_THRESHOLD

    # Store individual violations (capped at 20) for agent mode
    formatted_issues: list[dict[str, str | int]] = [
        {
            "file": i.get("filename", ""),
            "line": i.get("location", {}).get("row", 0),
            "code": i.get("code", ""),
            "message": i.get("message", ""),
        }
        for i in issues[:20]
    ]

    return CheckResult(
        rule_id=self.rule_id,
        passed=passed,
        message=f"Lint score: {score}/100 ({issue_count} issues)",
        severity=Severity.WARNING if not passed else Severity.INFO,
        details={
            "issue_count": issue_count,
            "score": score,
            "checked": checked,
            "issues": formatted_issues,
        },
        fix_hint=f"Run: ruff check --fix {checked}" if issue_count > 0 else None,
    )

LoggingPresenceRule dataclass

Bases: ProjectRule

Verify that substantial source modules import logging.

Exempts __init__.py, _version.py, and modules with fewer than 5 top-level definitions (functions + classes).

Source code in packages/axm-audit/src/axm_audit/core/rules/practices.py
@dataclass
@register_rule("practices")
class LoggingPresenceRule(ProjectRule):
    """Verify that substantial source modules import logging.

    Exempts ``__init__.py``, ``_version.py``, and modules with fewer
    than 5 top-level definitions (functions + classes).
    """

    min_defs: int = 5

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "PRACTICE_LOGGING"

    def check(self, project_path: Path) -> CheckResult:
        """Check logging presence in source modules."""
        early = self.check_src(project_path)
        if early is not None:
            return early

        src_path = project_path / "src"

        without_logging, total_checked = self._scan_logging_coverage(src_path)

        if total_checked == 0:
            return CheckResult(
                rule_id=self.rule_id,
                passed=True,
                message="No substantial modules to check",
                severity=Severity.INFO,
                details={"without_logging": [], "score": 100},
            )

        covered = total_checked - len(without_logging)
        coverage = covered / total_checked
        score = int(coverage * 100)
        passed = len(without_logging) == 0

        return CheckResult(
            rule_id=self.rule_id,
            passed=passed,
            message=f"Logging coverage: {coverage:.0%} ({covered}/{total_checked})",
            severity=Severity.WARNING if not passed else Severity.INFO,
            details={"without_logging": without_logging, "score": score},
            fix_hint="Add import logging to modules" if not passed else None,
        )

    # Known pure-data base classes (AST name check).
    _DATA_BASES: frozenset[str] = frozenset({"BaseModel", "TypedDict", "Enum"})

    def _should_check_module(
        self,
        path: Path,
        tree: ast.Module,
    ) -> bool:
        """Determine if a module is substantial enough to require logging."""
        if path.name in {"__init__.py", "_version.py"}:
            return False
        top_level = [
            node
            for node in ast.iter_child_nodes(tree)
            if isinstance(node, ast.FunctionDef | ast.AsyncFunctionDef | ast.ClassDef)
        ]
        if len(top_level) < self.min_defs:
            return False

        # Pure data module exemption: skip modules where every top-level
        # definition is a class inheriting from a single known data base.
        classes = [n for n in top_level if isinstance(n, ast.ClassDef)]
        if classes and len(classes) == len(top_level):
            if all(self._is_pure_data_class(c) for c in classes):
                return False

        return True

    def _is_pure_data_class(self, node: ast.ClassDef) -> bool:
        """Check if a class is a pure data definition (single data-base inheritance)."""
        if len(node.bases) != 1:
            return False
        base = node.bases[0]
        return isinstance(base, ast.Name) and base.id in self._DATA_BASES

    def _scan_logging_coverage(
        self,
        src_path: Path,
    ) -> tuple[list[str], int]:
        """Scan modules and return (without_logging, total_checked)."""
        without_logging: list[str] = []
        total_checked = 0

        for path in get_python_files(src_path):
            cache = get_ast_cache()
            tree = cache.get_or_parse(path) if cache else parse_file_safe(path)
            if tree is None:
                continue
            if not self._should_check_module(path, tree):
                continue

            total_checked += 1
            if not self._has_logging_import(tree):
                without_logging.append(str(path.relative_to(src_path)))

        return without_logging, total_checked

    @staticmethod
    def _has_logging_import(tree: ast.Module) -> bool:
        """Check if the module imports ``logging`` or ``structlog``."""
        for node in ast.iter_child_nodes(tree):
            if isinstance(node, ast.Import):
                for alias in node.names:
                    if alias.name in {"logging", "structlog"}:
                        return True
            elif isinstance(node, ast.ImportFrom):
                if node.module in {"logging", "structlog"}:
                    return True
        return False
rule_id property

Unique identifier for this rule.

check(project_path)

Check logging presence in source modules.

Source code in packages/axm-audit/src/axm_audit/core/rules/practices.py
def check(self, project_path: Path) -> CheckResult:
    """Check logging presence in source modules."""
    early = self.check_src(project_path)
    if early is not None:
        return early

    src_path = project_path / "src"

    without_logging, total_checked = self._scan_logging_coverage(src_path)

    if total_checked == 0:
        return CheckResult(
            rule_id=self.rule_id,
            passed=True,
            message="No substantial modules to check",
            severity=Severity.INFO,
            details={"without_logging": [], "score": 100},
        )

    covered = total_checked - len(without_logging)
    coverage = covered / total_checked
    score = int(coverage * 100)
    passed = len(without_logging) == 0

    return CheckResult(
        rule_id=self.rule_id,
        passed=passed,
        message=f"Logging coverage: {coverage:.0%} ({covered}/{total_checked})",
        severity=Severity.WARNING if not passed else Severity.INFO,
        details={"without_logging": without_logging, "score": score},
        fix_hint="Add import logging to modules" if not passed else None,
    )

ProjectRule

Bases: ABC

Base class for project invariants.

Each rule defines a single check that a project must satisfy.

Source code in packages/axm-audit/src/axm_audit/core/rules/base.py
class ProjectRule(ABC):
    """Base class for project invariants.

    Each rule defines a single check that a project must satisfy.
    """

    @property
    @abstractmethod
    def rule_id(self) -> str:
        """Unique identifier for this rule."""

    @property
    def category(self) -> str:
        """Scoring category, auto-injected by ``@register_rule``.

        Valid values: ``lint``, ``type``, ``complexity``, ``security``,
        ``deps``, ``testing``, ``architecture``, ``practices``,
        ``structure``, ``tooling``.
        """
        return getattr(self, "_registered_category", "")

    @abstractmethod
    def check(self, project_path: Path) -> CheckResult:
        """Execute the check against a project.

        Args:
            project_path: Root directory of the project to check.

        Returns:
            CheckResult with pass/fail status and message.
        """

    def check_src(self, project_path: Path) -> CheckResult | None:
        """Return an early ``CheckResult`` if ``src/`` does not exist.

        Call this at the top of ``check()`` to eliminate boilerplate::

            early = self.check_src(project_path)
            if early is not None:
                return early

        Returns:
            ``None`` if ``src/`` exists (rule should continue).
            A passing ``CheckResult`` if ``src/`` is missing.
        """
        src_path = project_path / "src"
        if src_path.exists():
            return None
        return CheckResult(
            rule_id=self.rule_id,
            passed=True,
            message="src/ directory not found",
            severity=Severity.INFO,
            details={"score": 100},
        )

    @classmethod
    def get_instances(cls) -> list[ProjectRule]:
        """Instantiate this rule.

        Override in subclasses that require constructor parameters
        (e.g. ``ToolAvailabilityRule``).

        Returns:
            List of rule instances — ``[cls()]`` by default.
        """
        return [cls()]
category property

Scoring category, auto-injected by @register_rule.

Valid values: lint, type, complexity, security, deps, testing, architecture, practices, structure, tooling.

rule_id abstractmethod property

Unique identifier for this rule.

check(project_path) abstractmethod

Execute the check against a project.

Parameters:

Name Type Description Default
project_path Path

Root directory of the project to check.

required

Returns:

Type Description
CheckResult

CheckResult with pass/fail status and message.

Source code in packages/axm-audit/src/axm_audit/core/rules/base.py
@abstractmethod
def check(self, project_path: Path) -> CheckResult:
    """Execute the check against a project.

    Args:
        project_path: Root directory of the project to check.

    Returns:
        CheckResult with pass/fail status and message.
    """
check_src(project_path)

Return an early CheckResult if src/ does not exist.

Call this at the top of check() to eliminate boilerplate::

early = self.check_src(project_path)
if early is not None:
    return early

Returns:

Type Description
CheckResult | None

None if src/ exists (rule should continue).

CheckResult | None

A passing CheckResult if src/ is missing.

Source code in packages/axm-audit/src/axm_audit/core/rules/base.py
def check_src(self, project_path: Path) -> CheckResult | None:
    """Return an early ``CheckResult`` if ``src/`` does not exist.

    Call this at the top of ``check()`` to eliminate boilerplate::

        early = self.check_src(project_path)
        if early is not None:
            return early

    Returns:
        ``None`` if ``src/`` exists (rule should continue).
        A passing ``CheckResult`` if ``src/`` is missing.
    """
    src_path = project_path / "src"
    if src_path.exists():
        return None
    return CheckResult(
        rule_id=self.rule_id,
        passed=True,
        message="src/ directory not found",
        severity=Severity.INFO,
        details={"score": 100},
    )
get_instances() classmethod

Instantiate this rule.

Override in subclasses that require constructor parameters (e.g. ToolAvailabilityRule).

Returns:

Type Description
list[ProjectRule]

List of rule instances — [cls()] by default.

Source code in packages/axm-audit/src/axm_audit/core/rules/base.py
@classmethod
def get_instances(cls) -> list[ProjectRule]:
    """Instantiate this rule.

    Override in subclasses that require constructor parameters
    (e.g. ``ToolAvailabilityRule``).

    Returns:
        List of rule instances — ``[cls()]`` by default.
    """
    return [cls()]

PyprojectCompletenessRule dataclass

Bases: ProjectRule

Validate PEP 621 field completeness in pyproject.toml.

Checks 9 fields: name, version/dynamic, description, requires-python, license, authors, classifiers, urls, readme. Scoring: (fields_present / 9) x 100.

Source code in packages/axm-audit/src/axm_audit/core/rules/structure.py
@dataclass
@register_rule("structure")
class PyprojectCompletenessRule(ProjectRule):
    """Validate PEP 621 field completeness in pyproject.toml.

    Checks 9 fields: name, version/dynamic, description, requires-python,
    license, authors, classifiers, urls, readme.
    Scoring: (fields_present / 9) x 100.
    """

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "STRUCTURE_PYPROJECT"

    def check(self, project_path: Path) -> CheckResult:
        """Check pyproject.toml completeness."""
        pyproject_path = project_path / "pyproject.toml"
        if not pyproject_path.exists():
            return CheckResult(
                rule_id=self.rule_id,
                passed=False,
                message="pyproject.toml not found",
                severity=Severity.ERROR,
                details={"fields_present": 0, "total_fields": 9, "score": 0},
                fix_hint="Create pyproject.toml with PEP 621 metadata",
            )

        try:
            data = tomllib.loads(pyproject_path.read_text())
        except (tomllib.TOMLDecodeError, OSError):
            return CheckResult(
                rule_id=self.rule_id,
                passed=False,
                message="pyproject.toml parse error",
                severity=Severity.ERROR,
                details={"fields_present": 0, "total_fields": 9, "score": 0},
                fix_hint="Fix pyproject.toml syntax",
            )

        present = _count_fields(data.get("project", {}))
        score = int((present / _TOTAL_FIELDS) * 100)

        return CheckResult(
            rule_id=self.rule_id,
            passed=score >= PASS_THRESHOLD,
            message=f"pyproject.toml completeness: {present}/{_TOTAL_FIELDS} fields",
            severity=Severity.WARNING if score < PASS_THRESHOLD else Severity.INFO,
            details={
                "fields_present": present,
                "total_fields": _TOTAL_FIELDS,
                "score": score,
            },
            fix_hint=(
                "Add missing PEP 621 fields to [project]"
                if score < PASS_THRESHOLD
                else None
            ),
        )
rule_id property

Unique identifier for this rule.

check(project_path)

Check pyproject.toml completeness.

Source code in packages/axm-audit/src/axm_audit/core/rules/structure.py
def check(self, project_path: Path) -> CheckResult:
    """Check pyproject.toml completeness."""
    pyproject_path = project_path / "pyproject.toml"
    if not pyproject_path.exists():
        return CheckResult(
            rule_id=self.rule_id,
            passed=False,
            message="pyproject.toml not found",
            severity=Severity.ERROR,
            details={"fields_present": 0, "total_fields": 9, "score": 0},
            fix_hint="Create pyproject.toml with PEP 621 metadata",
        )

    try:
        data = tomllib.loads(pyproject_path.read_text())
    except (tomllib.TOMLDecodeError, OSError):
        return CheckResult(
            rule_id=self.rule_id,
            passed=False,
            message="pyproject.toml parse error",
            severity=Severity.ERROR,
            details={"fields_present": 0, "total_fields": 9, "score": 0},
            fix_hint="Fix pyproject.toml syntax",
        )

    present = _count_fields(data.get("project", {}))
    score = int((present / _TOTAL_FIELDS) * 100)

    return CheckResult(
        rule_id=self.rule_id,
        passed=score >= PASS_THRESHOLD,
        message=f"pyproject.toml completeness: {present}/{_TOTAL_FIELDS} fields",
        severity=Severity.WARNING if score < PASS_THRESHOLD else Severity.INFO,
        details={
            "fields_present": present,
            "total_fields": _TOTAL_FIELDS,
            "score": score,
        },
        fix_hint=(
            "Add missing PEP 621 fields to [project]"
            if score < PASS_THRESHOLD
            else None
        ),
    )

SecurityPatternRule dataclass

Bases: ProjectRule

Detect hardcoded secrets via regex patterns.

Source code in packages/axm-audit/src/axm_audit/core/rules/practices.py
@dataclass
@register_rule("security")
class SecurityPatternRule(ProjectRule):
    """Detect hardcoded secrets via regex patterns."""

    patterns: list[str] = field(
        default_factory=lambda: [
            r"password\s*=\s*[\"'][^\"']+[\"']",
            r"secret\s*=\s*[\"'][^\"']+[\"']",
            r"api_key\s*=\s*[\"'][^\"']+[\"']",
            r"token\s*=\s*[\"'][^\"']+[\"']",
        ]
    )

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "PRACTICE_SECURITY"

    def check(self, project_path: Path) -> CheckResult:
        """Check for hardcoded secrets in the project."""
        early = self.check_src(project_path)
        if early is not None:
            return early

        src_path = project_path / "src"

        matches: list[dict[str, str | int]] = []
        py_files = get_python_files(src_path)

        for path in py_files:
            try:
                content = path.read_text()
            except (OSError, UnicodeDecodeError):
                continue

            for pattern in self.patterns:
                for match in re.finditer(pattern, content, re.IGNORECASE):
                    # Find line number
                    line_num = content[: match.start()].count("\n") + 1
                    matches.append(
                        {
                            "file": str(path.relative_to(src_path)),
                            "line": line_num,
                            "pattern": pattern.split(r"\s*")[0],  # Just the key name
                        }
                    )

        count = len(matches)
        passed = count == 0
        score = max(0, 100 - count * 25)

        return CheckResult(
            rule_id=self.rule_id,
            passed=passed,
            message=f"{count} potential secret(s) found",
            severity=Severity.ERROR if not passed else Severity.INFO,
            details={"secret_count": count, "matches": matches, "score": score},
            fix_hint="Use environment variables or secret managers"
            if not passed
            else None,
        )
rule_id property

Unique identifier for this rule.

check(project_path)

Check for hardcoded secrets in the project.

Source code in packages/axm-audit/src/axm_audit/core/rules/practices.py
def check(self, project_path: Path) -> CheckResult:
    """Check for hardcoded secrets in the project."""
    early = self.check_src(project_path)
    if early is not None:
        return early

    src_path = project_path / "src"

    matches: list[dict[str, str | int]] = []
    py_files = get_python_files(src_path)

    for path in py_files:
        try:
            content = path.read_text()
        except (OSError, UnicodeDecodeError):
            continue

        for pattern in self.patterns:
            for match in re.finditer(pattern, content, re.IGNORECASE):
                # Find line number
                line_num = content[: match.start()].count("\n") + 1
                matches.append(
                    {
                        "file": str(path.relative_to(src_path)),
                        "line": line_num,
                        "pattern": pattern.split(r"\s*")[0],  # Just the key name
                    }
                )

    count = len(matches)
    passed = count == 0
    score = max(0, 100 - count * 25)

    return CheckResult(
        rule_id=self.rule_id,
        passed=passed,
        message=f"{count} potential secret(s) found",
        severity=Severity.ERROR if not passed else Severity.INFO,
        details={"secret_count": count, "matches": matches, "score": score},
        fix_hint="Use environment variables or secret managers"
        if not passed
        else None,
    )

SecurityRule dataclass

Bases: ProjectRule

Run Bandit and score based on vulnerability severity.

Scoring: 100 - (high_count * 15 + medium_count * 5), min 0.

Source code in packages/axm-audit/src/axm_audit/core/rules/security.py
@dataclass
@register_rule("security")
class SecurityRule(ProjectRule):
    """Run Bandit and score based on vulnerability severity.

    Scoring: 100 - (high_count * 15 + medium_count * 5), min 0.
    """

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "QUALITY_SECURITY"

    def check(self, project_path: Path) -> CheckResult:
        """Check project security with Bandit."""
        early = self.check_src(project_path)
        if early is not None:
            return early

        src_path = project_path / "src"

        try:
            data = _run_bandit(src_path, project_path)
        except FileNotFoundError:
            return CheckResult(
                rule_id=self.rule_id,
                passed=False,
                message="bandit not available",
                severity=Severity.ERROR,
                details={"high_count": 0, "medium_count": 0, "score": 0},
                fix_hint="Install with: uv add --dev bandit",
            )
        except RuntimeError as exc:
            return CheckResult(
                rule_id=self.rule_id,
                passed=False,
                message=str(exc),
                severity=Severity.ERROR,
                details={"high_count": 0, "medium_count": 0, "score": 0},
                fix_hint="Check bandit installation: uv run bandit --version",
            )

        return _build_security_result(self.rule_id, data.get("results", []))
rule_id property

Unique identifier for this rule.

check(project_path)

Check project security with Bandit.

Source code in packages/axm-audit/src/axm_audit/core/rules/security.py
def check(self, project_path: Path) -> CheckResult:
    """Check project security with Bandit."""
    early = self.check_src(project_path)
    if early is not None:
        return early

    src_path = project_path / "src"

    try:
        data = _run_bandit(src_path, project_path)
    except FileNotFoundError:
        return CheckResult(
            rule_id=self.rule_id,
            passed=False,
            message="bandit not available",
            severity=Severity.ERROR,
            details={"high_count": 0, "medium_count": 0, "score": 0},
            fix_hint="Install with: uv add --dev bandit",
        )
    except RuntimeError as exc:
        return CheckResult(
            rule_id=self.rule_id,
            passed=False,
            message=str(exc),
            severity=Severity.ERROR,
            details={"high_count": 0, "medium_count": 0, "score": 0},
            fix_hint="Check bandit installation: uv run bandit --version",
        )

    return _build_security_result(self.rule_id, data.get("results", []))

TestCoverageRule dataclass

Bases: ProjectRule

Check test coverage via pytest-cov.

Scoring: coverage percentage directly (e.g., 90% → score 90). Pass threshold: 90%.

Source code in packages/axm-audit/src/axm_audit/core/rules/coverage.py
@dataclass
@register_rule("testing")
class TestCoverageRule(ProjectRule):
    """Check test coverage via pytest-cov.

    Scoring: coverage percentage directly (e.g., 90% → score 90).
    Pass threshold: 90%.
    """

    min_coverage: float = 90.0

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "QUALITY_COVERAGE"

    def check(self, project_path: Path) -> CheckResult:
        """Check test coverage and capture failures with pytest-cov.

        Delegates to ``run_tests(mode='compact')`` from the shared
        test runner for structured output, then converts the result
        to a ``CheckResult``.
        """
        from axm_audit.core.test_runner import run_tests

        report = run_tests(project_path, mode="compact", stop_on_first=False)
        return self._report_to_result(report)

    def _report_to_result(self, report: TestReport) -> CheckResult:
        """Convert a ``TestReport`` to a ``CheckResult``."""
        coverage_pct = report.coverage if report.coverage is not None else 0.0
        score = int(coverage_pct)
        has_failures = report.failed > 0 or report.errors > 0
        passed = coverage_pct >= self.min_coverage and not has_failures

        # Build failure details for backwards-compatible format
        failures: list[dict[str, str]] = [
            {"test": f.test, "traceback": f.message} for f in report.failures
        ]

        if report.coverage is None:
            return CheckResult(
                rule_id=self.rule_id,
                passed=False,
                message="No coverage data (pytest-cov not configured)",
                severity=Severity.WARNING,
                details={"coverage": 0.0, "score": 0, "failures": failures},
                fix_hint="Add pytest-cov: uv add --dev pytest-cov",
            )

        if has_failures:
            total_fails = report.failed + report.errors
            message = (
                f"Test coverage: {coverage_pct:.0f}% ({total_fails} test(s) failed)"
            )
        else:
            message = f"Test coverage: {coverage_pct:.0f}% ({score}/100)"

        fix_hints = self._generate_fix_hints(has_failures, coverage_pct)

        return CheckResult(
            rule_id=self.rule_id,
            passed=passed,
            message=message,
            severity=Severity.WARNING if not passed else Severity.INFO,
            details={
                "coverage": coverage_pct,
                "score": score,
                "failures": failures,
            },
            fix_hint=fix_hints,
        )

    def _generate_fix_hints(
        self, has_failures: bool, coverage_pct: float
    ) -> str | None:
        """Generate fix hints based on failures and coverage."""
        fix_hints: list[str] = []
        if has_failures:
            fix_hints.append("Fix failing tests")
        if coverage_pct < self.min_coverage:
            fix_hints.append(f"Increase test coverage to >= {self.min_coverage:.0f}%")
        return "; ".join(fix_hints) if fix_hints else None
rule_id property

Unique identifier for this rule.

check(project_path)

Check test coverage and capture failures with pytest-cov.

Delegates to run_tests(mode='compact') from the shared test runner for structured output, then converts the result to a CheckResult.

Source code in packages/axm-audit/src/axm_audit/core/rules/coverage.py
def check(self, project_path: Path) -> CheckResult:
    """Check test coverage and capture failures with pytest-cov.

    Delegates to ``run_tests(mode='compact')`` from the shared
    test runner for structured output, then converts the result
    to a ``CheckResult``.
    """
    from axm_audit.core.test_runner import run_tests

    report = run_tests(project_path, mode="compact", stop_on_first=False)
    return self._report_to_result(report)

TestMirrorRule dataclass

Bases: ProjectRule

Check that every source module has a corresponding test file.

For each src/<pkg>/foo.py, looks for tests/**/test_foo.py anywhere in the test tree (supports flat and nested layouts).

Private modules (leading underscores) are matched with the prefix stripped: _facade.py matches test_facade.py or test__facade.py.

Scoring: 100 - (missing_count * 15), min 0.

Source code in packages/axm-audit/src/axm_audit/core/rules/practices.py
@dataclass
@register_rule("practices")
class TestMirrorRule(ProjectRule):
    """Check that every source module has a corresponding test file.

    For each ``src/<pkg>/foo.py``, looks for ``tests/**/test_foo.py``
    anywhere in the test tree (supports flat and nested layouts).

    Private modules (leading underscores) are matched with the prefix
    stripped: ``_facade.py`` matches ``test_facade.py`` or
    ``test__facade.py``.

    Scoring: 100 - (missing_count * 15), min 0.
    """

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "PRACTICE_TEST_MIRROR"

    def check(self, project_path: Path) -> CheckResult:
        """Check test file coverage for source modules."""
        early = self.check_src(project_path)
        if early is not None:
            return early

        src_path = project_path / "src"

        tests_path = project_path / "tests"
        missing = self._find_untested_modules(src_path, tests_path)

        if not missing:
            return CheckResult(
                rule_id=self.rule_id,
                passed=True,
                message="All source modules have test files",
                severity=Severity.INFO,
            )

        score = max(0, 100 - len(missing) * 15)
        passed = score >= 90  # noqa: PLR2004

        hint_files = ", ".join(f"tests/test_{m}" for m in missing[:5])
        if len(missing) > 5:  # noqa: PLR2004
            hint_files += f" (+{len(missing) - 5} more)"

        return CheckResult(
            rule_id=self.rule_id,
            passed=passed,
            message=f"{len(missing)} source module(s) without tests",
            severity=Severity.WARNING if not passed else Severity.INFO,
            details={"missing": missing, "score": score},
            fix_hint=f"Create test files: {hint_files}",
        )

    @staticmethod
    def _collect_source_modules(src_path: Path) -> list[str]:
        """Collect non-exempt Python module basenames from ``src/``."""
        pkg_dirs = [
            d for d in src_path.iterdir() if d.is_dir() and d.name != "__pycache__"
        ]
        modules: list[str] = []
        for pkg_dir in pkg_dirs:
            for py_file in pkg_dir.rglob("*.py"):
                if py_file.name not in _TEST_MIRROR_EXEMPT:
                    modules.append(py_file.name)
        return modules

    @staticmethod
    def _collect_test_basenames(tests_path: Path) -> set[str]:
        """Collect all ``test_*.py`` basenames from the test tree."""
        if not tests_path.exists():
            return set()
        return {f.name for f in tests_path.rglob("test_*.py")}

    @classmethod
    def _find_untested_modules(
        cls,
        src_path: Path,
        tests_path: Path,
    ) -> list[str]:
        """Find source modules without corresponding test files.

        Args:
            src_path: The ``src/`` directory.
            tests_path: The ``tests/`` directory.

        Returns:
            List of module basenames (e.g. ``["foo.py", "bar.py"]``)
            that have no matching ``test_*.py`` file.
        """
        source_modules = cls._collect_source_modules(src_path)
        if not source_modules:
            return []

        test_basenames = cls._collect_test_basenames(tests_path)

        missing: list[str] = []
        for name in sorted(set(source_modules)):
            stripped = name.lstrip("_")
            candidates = {f"test_{stripped}", f"test_{name}"}
            if not candidates & test_basenames:
                missing.append(name)
        return missing
rule_id property

Unique identifier for this rule.

check(project_path)

Check test file coverage for source modules.

Source code in packages/axm-audit/src/axm_audit/core/rules/practices.py
def check(self, project_path: Path) -> CheckResult:
    """Check test file coverage for source modules."""
    early = self.check_src(project_path)
    if early is not None:
        return early

    src_path = project_path / "src"

    tests_path = project_path / "tests"
    missing = self._find_untested_modules(src_path, tests_path)

    if not missing:
        return CheckResult(
            rule_id=self.rule_id,
            passed=True,
            message="All source modules have test files",
            severity=Severity.INFO,
        )

    score = max(0, 100 - len(missing) * 15)
    passed = score >= 90  # noqa: PLR2004

    hint_files = ", ".join(f"tests/test_{m}" for m in missing[:5])
    if len(missing) > 5:  # noqa: PLR2004
        hint_files += f" (+{len(missing) - 5} more)"

    return CheckResult(
        rule_id=self.rule_id,
        passed=passed,
        message=f"{len(missing)} source module(s) without tests",
        severity=Severity.WARNING if not passed else Severity.INFO,
        details={"missing": missing, "score": score},
        fix_hint=f"Create test files: {hint_files}",
    )

ToolAvailabilityRule dataclass

Bases: ProjectRule

Check if a required CLI tool is available on PATH.

Source code in packages/axm-audit/src/axm_audit/core/rules/tooling.py
@dataclass
@register_rule("tooling")
class ToolAvailabilityRule(ProjectRule):
    """Check if a required CLI tool is available on PATH."""

    tool_name: str = ""
    critical: bool = True  # If True, severity=ERROR when missing; else WARNING

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return f"TOOL_{self.tool_name.upper()}"

    @classmethod
    def get_instances(cls) -> list[ProjectRule]:
        """Return one instance per required tool."""
        return [cls(tool_name=t) for t in _REQUIRED_TOOLS]

    def check(self, project_path: Path) -> CheckResult:
        """Check if the tool is available on the system PATH."""
        _ = project_path  # Not used for tool availability checks
        available = shutil.which(self.tool_name) is not None

        if available:
            return CheckResult(
                rule_id=self.rule_id,
                passed=True,
                message=f"{self.tool_name} found",
                severity=Severity.INFO,
            )

        severity = Severity.ERROR if self.critical else Severity.WARNING
        return CheckResult(
            rule_id=self.rule_id,
            passed=False,
            message=f"{self.tool_name} not found",
            severity=severity,
            fix_hint=f"Install with: uv tool install {self.tool_name}",
        )
rule_id property

Unique identifier for this rule.

check(project_path)

Check if the tool is available on the system PATH.

Source code in packages/axm-audit/src/axm_audit/core/rules/tooling.py
def check(self, project_path: Path) -> CheckResult:
    """Check if the tool is available on the system PATH."""
    _ = project_path  # Not used for tool availability checks
    available = shutil.which(self.tool_name) is not None

    if available:
        return CheckResult(
            rule_id=self.rule_id,
            passed=True,
            message=f"{self.tool_name} found",
            severity=Severity.INFO,
        )

    severity = Severity.ERROR if self.critical else Severity.WARNING
    return CheckResult(
        rule_id=self.rule_id,
        passed=False,
        message=f"{self.tool_name} not found",
        severity=severity,
        fix_hint=f"Install with: uv tool install {self.tool_name}",
    )
get_instances() classmethod

Return one instance per required tool.

Source code in packages/axm-audit/src/axm_audit/core/rules/tooling.py
@classmethod
def get_instances(cls) -> list[ProjectRule]:
    """Return one instance per required tool."""
    return [cls(tool_name=t) for t in _REQUIRED_TOOLS]

TypeCheckRule dataclass

Bases: ProjectRule

Run mypy with zero-tolerance for errors.

Scoring: 100 - (error_count * 5), min 0. Pass/fail: any error means failure (matches pre-commit mypy hook).

Source code in packages/axm-audit/src/axm_audit/core/rules/quality.py
@dataclass
@register_rule("type")
class TypeCheckRule(ProjectRule):
    """Run mypy with zero-tolerance for errors.

    Scoring: 100 - (error_count * 5), min 0.
    Pass/fail: any error means failure (matches pre-commit mypy hook).
    """

    @property
    def rule_id(self) -> str:
        """Unique identifier for this rule."""
        return "QUALITY_TYPE"

    def check(self, project_path: Path) -> CheckResult:
        """Check project type hints with mypy on src/ and tests/."""
        early = self.check_src(project_path)
        if early is not None:
            return early

        targets, checked = _get_audit_targets(project_path)

        result = run_in_project(
            ["mypy", "--no-error-summary", "--output", "json", *targets],
            project_path,
            capture_output=True,
            text=True,
            check=False,
        )

        error_count, errors = self._parse_mypy_errors(result.stdout)

        score = max(0, 100 - error_count * 5)
        passed = error_count == 0

        return CheckResult(
            rule_id=self.rule_id,
            passed=passed,
            message=f"Type score: {score}/100 ({error_count} errors)",
            severity=Severity.WARNING if not passed else Severity.INFO,
            details={
                "error_count": error_count,
                "score": score,
                "checked": checked,
                "errors": errors,
            },
            fix_hint=(
                "Add type hints to functions and fix type errors"
                if error_count > 0
                else None
            ),
        )

    @staticmethod
    def _parse_mypy_errors(
        stdout: str,
    ) -> tuple[int, list[dict[str, str | int]]]:
        """Parse mypy JSON output and extract errors."""
        error_count = 0
        errors: list[dict[str, str | int]] = []
        if not stdout.strip():
            return error_count, errors

        for line in stdout.strip().split("\n"):
            if not line.strip():
                continue
            try:
                entry = json.loads(line)
            except json.JSONDecodeError:
                continue
            if entry.get("severity") != "error":
                continue
            error_count += 1
            errors.append(
                {
                    "file": entry.get("file", ""),
                    "line": entry.get("line", 0),
                    "message": entry.get("message", ""),
                    "code": entry.get("code", ""),
                }
            )
        return error_count, errors
rule_id property

Unique identifier for this rule.

check(project_path)

Check project type hints with mypy on src/ and tests/.

Source code in packages/axm-audit/src/axm_audit/core/rules/quality.py
def check(self, project_path: Path) -> CheckResult:
    """Check project type hints with mypy on src/ and tests/."""
    early = self.check_src(project_path)
    if early is not None:
        return early

    targets, checked = _get_audit_targets(project_path)

    result = run_in_project(
        ["mypy", "--no-error-summary", "--output", "json", *targets],
        project_path,
        capture_output=True,
        text=True,
        check=False,
    )

    error_count, errors = self._parse_mypy_errors(result.stdout)

    score = max(0, 100 - error_count * 5)
    passed = error_count == 0

    return CheckResult(
        rule_id=self.rule_id,
        passed=passed,
        message=f"Type score: {score}/100 ({error_count} errors)",
        severity=Severity.WARNING if not passed else Severity.INFO,
        details={
            "error_count": error_count,
            "score": score,
            "checked": checked,
            "errors": errors,
        },
        fix_hint=(
            "Add type hints to functions and fix type errors"
            if error_count > 0
            else None
        ),
    )

get_registry()

Return the current rule registry (read-only view).

Callers must ensure that rule modules have been imported before calling this function so that @register_rule decorators have fired.

Source code in packages/axm-audit/src/axm_audit/core/rules/base.py
def get_registry() -> dict[str, list[type[ProjectRule]]]:
    """Return the current rule registry (read-only view).

    Callers must ensure that rule modules have been imported before
    calling this function so that ``@register_rule`` decorators have
    fired.
    """
    return _RULE_REGISTRY

register_rule(category)

Class decorator that registers a rule in the auto-discovery registry.

Also injects _registered_category on the class so that ProjectRule.category resolves automatically.

Parameters:

Name Type Description Default
category str

Unified category (e.g. "lint", "security").

required

Returns:

Type Description
Callable[[type[ProjectRule]], type[ProjectRule]]

The unmodified class — the decorator only appends to the registry

Callable[[type[ProjectRule]], type[ProjectRule]]

and sets the _registered_category attribute.

Source code in packages/axm-audit/src/axm_audit/core/rules/base.py
def register_rule(category: str) -> Callable[[type[ProjectRule]], type[ProjectRule]]:
    """Class decorator that registers a rule in the auto-discovery registry.

    Also injects ``_registered_category`` on the class so that
    ``ProjectRule.category`` resolves automatically.

    Args:
        category: Unified category (e.g. ``"lint"``, ``"security"``).

    Returns:
        The unmodified class — the decorator only appends to the registry
        and sets the ``_registered_category`` attribute.
    """

    def _decorator(cls: type[ProjectRule]) -> type[ProjectRule]:
        cls._registered_category = category  # type: ignore[attr-defined]
        bucket = _RULE_REGISTRY.setdefault(category, [])
        if cls not in bucket:
            bucket.append(cls)
        return cls

    return _decorator