Skip to content

Index

axm_audit

axm-audit: Code auditing and quality rules for Python projects.

This package provides comprehensive project auditing capabilities including: - Structure validation (files, directories) - Quality checks (linting, type checking, complexity) - Security analysis (Bandit integration, secrets detection) - Dependency scanning (pip-audit, deptry) - Test coverage enforcement (pytest-cov) - Architecture analysis (circular imports, god classes, coupling) - Best practices enforcement (docstrings, security patterns)

Example

from axm_audit import audit_project from pathlib import Path

result = audit_project(Path(".")) print(f"Score: {result.quality_score}/100 — Grade {result.grade}") Score: 95.0/100 — Grade A

AuditResult

Bases: BaseModel

Aggregated result of a project audit.

Contains all individual check results and computed summary.

Source code in packages/axm-audit/src/axm_audit/models/results.py
class AuditResult(BaseModel):
    """Aggregated result of a project audit.

    Contains all individual check results and computed summary.
    """

    project_path: str | None = Field(
        default=None, description="Path to the audited project"
    )
    checks: list[CheckResult] = Field(default_factory=list)

    @computed_field  # type: ignore[prop-decorator]
    @property
    def success(self) -> bool:
        """True if all checks passed."""
        return all(c.passed for c in self.checks)

    @computed_field  # type: ignore[prop-decorator]
    @property
    def total(self) -> int:
        """Total number of checks."""
        return len(self.checks)

    @computed_field  # type: ignore[prop-decorator]
    @property
    def failed(self) -> int:
        """Number of failed checks."""
        return sum(1 for c in self.checks if not c.passed)

    @computed_field  # type: ignore[prop-decorator]
    @property
    def quality_score(self) -> float | None:
        """Weighted average across 8 code-quality categories.

        Categories and weights:
            Linting (20%), Type Safety (15%), Complexity (15%),
            Security (10%), Dependencies (10%), Testing (15%),
            Architecture (10%), Practices (5%).

        Structure is NOT scored here (handled by axm-init).
        Returns None if no scored checks are present.
        """
        category_weights = {
            "lint": 0.20,
            "type": 0.15,
            "complexity": 0.15,
            "security": 0.10,
            "deps": 0.10,
            "testing": 0.15,
            "architecture": 0.10,
            "practices": 0.05,
        }

        # Collect scores by category (read from check.category)
        category_scores: dict[str, list[float]] = {}
        for check in self.checks:
            cat = check.category
            if cat and cat in category_weights and check.details:
                score = check.details.get("score")
                if score is not None:
                    category_scores.setdefault(cat, []).append(float(score))

        if not category_scores:
            return None

        # Weighted average: avg each category, then weight
        # Normalize by sum of present weights so filtered audits
        # (e.g. category="lint") are not penalized for missing categories.
        total = 0.0
        weight_sum = 0.0
        for cat, weight in category_weights.items():
            scores = category_scores.get(cat, [])
            if scores:
                total += (sum(scores) / len(scores)) * weight
                weight_sum += weight

        if weight_sum <= 0:
            return None
        return round(total / weight_sum, 1)

    @computed_field  # type: ignore[prop-decorator]
    @property
    def grade(self) -> str | None:
        """Letter grade derived from quality_score.

        A >= 90, B >= 80, C >= 70, D >= 60, F < 60.
        Returns None if quality_score is None.
        """
        score = self.quality_score
        if score is None:
            return None
        if score >= _GRADE_A:
            return "A"
        if score >= _GRADE_B:
            return "B"
        if score >= _GRADE_C:
            return "C"
        if score >= _GRADE_D:
            return "D"
        return "F"

    model_config = {"extra": "forbid"}
failed property

Number of failed checks.

grade property

Letter grade derived from quality_score.

A >= 90, B >= 80, C >= 70, D >= 60, F < 60. Returns None if quality_score is None.

quality_score property

Weighted average across 8 code-quality categories.

Categories and weights

Linting (20%), Type Safety (15%), Complexity (15%), Security (10%), Dependencies (10%), Testing (15%), Architecture (10%), Practices (5%).

Structure is NOT scored here (handled by axm-init). Returns None if no scored checks are present.

success property

True if all checks passed.

total property

Total number of checks.

CheckResult

Bases: BaseModel

Result of a single compliance check.

Designed for machine parsing by AI Agents.

Source code in packages/axm-audit/src/axm_audit/models/results.py
class CheckResult(BaseModel):
    """Result of a single compliance check.

    Designed for machine parsing by AI Agents.
    """

    rule_id: str = Field(..., description="Unique identifier for the rule")
    passed: bool = Field(..., description="Whether the check passed")
    message: str = Field(..., description="Human-readable result message")
    severity: Severity = Field(default=Severity.ERROR, description="Severity level")
    details: dict[str, Any] | None = Field(
        default=None, description="Structured data (cycles, metrics)"
    )
    fix_hint: str | None = Field(default=None, description="Actionable fix suggestion")
    category: str | None = Field(
        default=None, description="Scoring category (injected by auditor)"
    )

    model_config = {"extra": "forbid"}

Severity

Bases: StrEnum

Severity level for check results.

Source code in packages/axm-audit/src/axm_audit/models/results.py
class Severity(StrEnum):
    """Severity level for check results."""

    ERROR = "error"  # Blocks audit pass
    WARNING = "warning"  # Non-blocking issue
    INFO = "info"  # Informational only

audit_project(project_path, category=None, quick=False)

Audit a project against Python 2026 standards.

Rules execute in parallel via ThreadPoolExecutor for speed. Each rule is isolated — one failure does not prevent others. An ASTCache is shared across rules to avoid redundant parsing.

Parameters:

Name Type Description Default
project_path Path

Root directory of the project to audit.

required
category str | None

Optional category filter.

None
quick bool

If True, run only lint + type checks.

False

Returns:

Type Description
AuditResult

AuditResult containing all check results.

Raises:

Type Description
FileNotFoundError

If project_path does not exist.

Source code in packages/axm-audit/src/axm_audit/core/auditor.py
def audit_project(
    project_path: Path,
    category: str | None = None,
    quick: bool = False,
) -> AuditResult:
    """Audit a project against Python 2026 standards.

    Rules execute in parallel via ThreadPoolExecutor for speed.
    Each rule is isolated — one failure does not prevent others.
    An ``ASTCache`` is shared across rules to avoid redundant parsing.

    Args:
        project_path: Root directory of the project to audit.
        category: Optional category filter.
        quick: If True, run only lint + type checks.

    Returns:
        AuditResult containing all check results.

    Raises:
        FileNotFoundError: If project_path does not exist.
    """
    if not project_path.exists():
        raise FileNotFoundError(f"Project path does not exist: {project_path}")

    rules = get_rules_for_category(category, quick)

    cache = ASTCache()
    set_ast_cache(cache)
    try:
        with concurrent.futures.ThreadPoolExecutor() as pool:
            checks = list(pool.map(lambda r: _safe_check(r, project_path), rules))
    finally:
        set_ast_cache(None)

    return AuditResult(project_path=str(project_path), checks=checks)

get_rules_for_category(category, quick=False)

Get rules for a specific category or all rules.

Parameters:

Name Type Description Default
category str | None

Filter to specific category, or None for all.

required
quick bool

If True, only lint + type checks.

False

Returns:

Type Description
list[ProjectRule]

List of rule instances to run.

Raises:

Type Description
ValueError

If category is not valid.

Source code in packages/axm-audit/src/axm_audit/core/auditor.py
def get_rules_for_category(
    category: str | None, quick: bool = False
) -> list[ProjectRule]:
    """Get rules for a specific category or all rules.

    Args:
        category: Filter to specific category, or None for all.
        quick: If True, only lint + type checks.

    Returns:
        List of rule instances to run.

    Raises:
        ValueError: If category is not valid.
    """
    _ensure_registry_loaded()

    if quick:
        from axm_audit.core.rules.quality import LintingRule, TypeCheckRule

        return [LintingRule(), TypeCheckRule()]

    # Validate category
    if category is not None and category not in VALID_CATEGORIES:
        raise ValueError(
            f"Invalid category: {category}. "
            f"Valid categories: {', '.join(sorted(VALID_CATEGORIES))}"
        )

    if not category:
        return _build_all_rules()

    registry = get_registry()
    rule_classes = registry.get(category, [])
    rules: list[ProjectRule] = []
    for cls in rule_classes:
        rules.extend(cls.get_instances())
    return rules