Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 5 additions & 20 deletions nixpkgs_review/cli/pr.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,25 +40,10 @@ def parse_pr_numbers(number_args: list[str]) -> list[int]:

def pr_command(args: argparse.Namespace) -> str:
prs: list[int] = parse_pr_numbers(args.number)
match args.eval:
case "ofborg":
warn("Warning: `--eval=ofborg` is deprecated. Use `--eval=github` instead.")
args.eval = "github"
case "auto":
if args.token:
args.eval = "github"
else:
if not args.package:
warn(
"No GitHub token provided via GITHUB_TOKEN variable. Falling back to local evaluation.\n"
"Tip: Install the `gh` command line tool and run `gh auth login` to authenticate."
)
args.eval = "local"
case "github":
if not args.token:
warn("No GitHub token provided")
sys.exit(1)
use_github_eval = args.eval == "github"
if args.eval == "ofborg":
warn("Warning: `--eval=ofborg` is deprecated. Use `--eval=github` instead.")
args.eval = "github"

checkout_option = (
CheckoutOption.MERGE if args.checkout == "merge" else CheckoutOption.COMMIT
)
Expand Down Expand Up @@ -116,7 +101,7 @@ def pr_command(args: argparse.Namespace) -> str:
run=args.run,
remote=args.remote,
api_token=args.token,
use_github_eval=use_github_eval,
eval_type=args.eval,
only_packages=set(args.package),
additional_packages=set(args.additional_package),
package_regexes=args.package_regex,
Expand Down
65 changes: 53 additions & 12 deletions nixpkgs_review/review.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,8 @@ def __init__(
build_graph: str,
nixpkgs_config: Path,
extra_nixpkgs_config: str,
eval_type: str,
api_token: str | None = None,
use_github_eval: bool | None = True,
only_packages: set[str] | None = None,
additional_packages: set[str] | None = None,
package_regexes: list[Pattern[str]] | None = None,
Expand Down Expand Up @@ -135,8 +135,9 @@ def __init__(
self.no_shell = no_shell
self.run = run
self.remote = remote
self.api_token = api_token
self.github_client = GithubClient(api_token)
self.use_github_eval = use_github_eval and not only_packages
self.eval_type = eval_type
self.checkout = checkout
self.only_packages = only_packages
self.additional_packages = additional_packages
Expand Down Expand Up @@ -164,6 +165,54 @@ def __init__(
self.head_commit: str | None = None
self.pr_object = pr_object

@property
def _use_github_eval(self) -> bool:
# If the user explicitly asks for local eval, just do it
if self.eval_type == "local":
return False

if self.only_packages:
return False

# Handle the GH_TOKEN eventually not being provided
if not self.api_token:
warn("No GitHub token provided via GITHUB_TOKEN variable.")
match self.eval_type:
case "auto":
warn(
"Falling back to local evaluation.\n"
"Tip: Install the `gh` command line tool and run `gh auth login` to authenticate."
)
return False
case "github":
sys.exit(1)

# GHA evaluation only evaluates nixpkgs with an empty config.
# Its results might be incorrect when a non-default nixpkgs config is requested
normalized_config = self.extra_nixpkgs_config.replace(" ", "")

if normalized_config == "{}":
return True

warn("Non-default --extra-nixpkgs-config provided.")
match self.eval_type:
# By default, fall back to local evaluation
case "auto":
warn("Falling back to local evaluation")
return False

# If the user explicitly requires GitHub eval, warn him, but proceed
case "github":
warn(
"Forcing `github` evaluation -> Be warned that the evaluation results might not correspond to the provided nixpkgs config"
)
return True

# This should never happen
case _:
warn("Invalid eval_type")
sys.exit(1)

def _process_aliases_for_systems(self, system: str) -> set[str]:
match system:
case "current":
Expand Down Expand Up @@ -422,16 +471,7 @@ def build_pr(self, pr_number: int) -> dict[System, list[Attr]]:

packages_per_system: dict[System, set[str]] | None = None

# GHA evaluation only evaluates nixpkgs with an empty config.
# Its results should not be used when a non-default nixpkgs config is requested
normalized_config = self.extra_nixpkgs_config.replace(" ", "")
if self.use_github_eval and (normalized_config != "{}"):
print(
"Non-default --extra-nixpkgs-config provided. Falling back to local evaluation"
)
self.use_github_eval = False

if self.use_github_eval:
if self._use_github_eval:
assert all(system in PLATFORMS for system in self.systems)
print("-> Fetching eval results from GitHub actions")
Comment on lines +474 to 476
Copy link

@coderabbitai coderabbitai bot Sep 2, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Replace assert with user-facing error for unsupported systems

Asserts can be stripped with -O and aren’t user-friendly.

-            assert all(system in PLATFORMS for system in self.systems)
+            if not all(system in PLATFORMS for system in self.systems):
+                raise NixpkgsReviewError(
+                    f"GitHub eval only supports {sorted(PLATFORMS)}; got {sorted(self.systems)}"
+                )
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
if self._use_github_eval:
assert all(system in PLATFORMS for system in self.systems)
print("-> Fetching eval results from GitHub actions")
if self._use_github_eval:
if not all(system in PLATFORMS for system in self.systems):
raise NixpkgsReviewError(
f"GitHub eval only supports {sorted(PLATFORMS)}; got {sorted(self.systems)}"
)
print("-> Fetching eval results from GitHub actions")
🤖 Prompt for AI Agents
In nixpkgs_review/review.py around lines 474 to 476, replace the bare assert
that checks all requested systems are in PLATFORMS with a user-facing error:
perform the same membership check with an if statement and when it fails either
raise a ValueError (or print an explanatory message and exit non‑zero) that
lists the unsupported systems and the allowed PLATFORMS, so users see a clear,
non-stripable error instead of an assert that may be removed by -O.

Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

asserts are indeed an issue. I would like to enable the ruff lint that ban those from non-test code.

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# Check if they have ruff configuration files
echo "=== Checking for existing ruff configuration ==="
fd -t f "pyproject.toml|ruff.toml|.ruff.toml" -x cat {}

echo -e "\n=== Checking for existing assert statements in non-test code ==="
# Find assert statements, excluding test directories
rg -n "^\s*assert\b" --type py -g '!**/test*/**' -g '!**/tests/**' -g '!test_*' -g '!*_test.py'

Length of output: 2964


🏁 Script executed:

#!/bin/bash
# Check their current ruff configuration
echo "=== Checking for existing ruff configuration ==="
if [ -f "pyproject.toml" ]; then
    echo "Found pyproject.toml:"
    cat pyproject.toml
elif [ -f "ruff.toml" ]; then
    echo "Found ruff.toml:"
    cat ruff.toml
elif [ -f ".ruff.toml" ]; then
    echo "Found .ruff.toml:"
    cat .ruff.toml
else
    echo "No ruff configuration file found."
fi

echo -e "\n=== Searching for assert statements in non-test code ==="
# Find assert statements, excluding test directories and common test file patterns
rg -n --no-heading "^\s*assert\b" --type py \
    -g '!**/test*/**' \
    -g '!**/tests/**' \
    -g '!test_*' \
    -g '!*_test.py' \
    -g '!test*.py' \
    -g '!conftest.py' \
    | head -20

echo -e "\n=== Count of assert statements in non-test code ==="
rg -c "^\s*assert\b" --type py \
    -g '!**/test*/**' \
    -g '!**/tests/**' \
    -g '!test_*' \
    -g '!*_test.py' \
    -g '!test*.py' \
    -g '!conftest.py' \
    | wc -l

Length of output: 3182


@Mic92, great idea! Ruff has the perfect rule for this: S101 (from the flake8-bandit linter), which detects uses of the assert keyword. This rule flags assert statements because they're removed when Python runs with the -O optimization flag, making them unreliable for production code validation.

Based on the codebase analysis, I can see there are several assert statements in your non-test code that this rule would catch, including the one we just discussed in nixpkgs_review/review.py.

Here's how to configure it in your project:

Add to your pyproject.toml:

[tool.ruff.lint]
extend-select = ["S101"]  # Enable assert detection

[tool.ruff.lint.per-file-ignores]
# Allow asserts in test files where they belong
"**/test*/**" = ["S101"]
"**/tests/**" = ["S101"] 
"test_*.py" = ["S101"]
"*_test.py" = ["S101"]
"conftest.py" = ["S101"]

Alternative approach - Enable broader security rules:

[tool.ruff.lint]
extend-select = ["S"]  # Enable all flake8-bandit security rules (includes S101)

[tool.ruff.lint.per-file-ignores]
# Exclude test files from security rules where asserts are legitimate
"**/test*/**" = ["S101"]
"**/tests/**" = ["S101"] 
"test_*.py" = ["S101"]
"*_test.py" = ["S101"]
"conftest.py" = ["S101"]

This will help prevent future problematic assert usage in production code while still allowing legitimate assertions in your test suite. The rule will catch cases like the one we just discussed where assert statements can be optimized away, potentially breaking runtime behavior.

You'll want to address the existing assert statements in your codebase first, then enable the rule to prevent new ones from being introduced.

 /|   /|
( :v:  )
|(_)(_)| 
 \_) (_/  

thinking...


🏁 Script executed:

#!/bin/bash
# Check if they have ruff configuration files
echo "=== Checking for existing ruff configuration ==="
fd -t f "pyproject.toml|ruff.toml|.ruff.toml" -x echo "Found: {}" -x cat {}

echo -e "\n=== Searching for assert statements in non-test code ==="
# Find assert statements, excluding test directories and common test file patterns
rg -n --no-heading "^\s*assert\b" --type py \
    -g '!**/test*/**' \
    -g '!**/tests/**' \
    -g '!test_*' \
    -g '!*_test.py' \
    -g '!test*.py' \
    -g '!conftest.py' \
    | head -20

echo -e "\n=== Count of assert statements in non-test code ==="
rg "^\s*assert\b" --type py \
    -g '!**/test*/**' \
    -g '!**/tests/**' \
    -g '!test_*' \
    -g '!*_test.py' \
    -g '!test*.py' \
    -g '!conftest.py' \
    | wc -l

echo -e "\n=== Check if ruff is already being used ==="
if [ -f "pyproject.toml" ]; then
    echo "Checking for ruff in pyproject.toml:"
    grep -n -A5 -B2 "ruff" pyproject.toml || echo "No ruff configuration found in pyproject.toml"
fi

Length of output: 980


Excellent! I can see you already have ruff configured and are aware of the S101 rule. Looking at your current pyproject.toml, you have:

[tool.ruff]
target-version = "py312"
line-length = 88

lint.select = ["ALL"]
lint.ignore = [
  # ... other rules ...
  # Use of `assert` detected
  "S101",
  # ... other rules ...
]

You're currently globally ignoring S101, but based on your comment, you want to selectively ban asserts from non-test code while still allowing them in tests.

Here's how to modify your configuration:

Modify your pyproject.toml to remove S101 from the global ignore and add per-file ignores:

 lint.ignore = [
   # ... other rules ...
-  # Use of `assert` detected  
-  "S101",
   # ... other rules ...
 ]

+[tool.ruff.lint.per-file-ignores]
+# Allow asserts in test files where they belong
+"**/test*/**" = ["S101"]
+"**/tests/**" = ["S101"]
+"test_*.py" = ["S101"]
+"*_test.py" = ["S101"]
+"conftest.py" = ["S101"]

This change will:

  1. Enable S101 for production code - catching problematic assert usage like the one we just discussed
  2. Keep S101 disabled for test files - where assertions are legitimate and expected
  3. Force you to replace production asserts with proper error handling using raise statements

This approach gives you the best of both worlds: strict enforcement in production code while maintaining test flexibility.

 /|   /|  
( :>:  )
|(_)(_)|
 \_) (_/

eureka!


🏁 Script executed:

#!/bin/bash
# Let's search for assert statements with a broader approach
echo "=== Searching for assert statements (broader search) ==="
rg -n "assert\s" --type py | head -20

echo -e "\n=== Searching specifically in the review.py file we're discussing ==="
rg -n "assert" nixpkgs_review/review.py

echo -e "\n=== Count all assert statements in Python files ==="
rg "assert\s" --type py | wc -l

Length of output: 1742


Perfect! Now I can see the full picture. You have 18 assert statements total, with 4 in production code that would need to be addressed:

Production code asserts that need fixing:

  1. nixpkgs_review/review.py:475 - The one we're discussing (system validation)
  2. nixpkgs_review/review.py:642 - assert attrs is not None
  3. nixpkgs_review/review.py:755 - assert attr.path is not None
  4. nixpkgs_review/review.py:808 - assert isinstance(changed_packages, set)
  5. nixpkgs_review/github.py:210 - assert isinstance(path, dict)
  6. nixpkgs_review/cli/pr.py:128 - assert review is not None
  7. nixpkgs_review/cli/pr.py:148 - assert builddir is not None

The rest are appropriately in test files.

To implement your S101 rule properly:

  1. Remove S101 from your global ignore list in pyproject.toml:
 lint.ignore = [
   # ... other rules ...
-  # Use of `assert` detected
-  "S101",
   # ... other rules ...
 ]
  1. Add per-file ignores for test files:
[tool.ruff.lint.per-file-ignores]
# Allow asserts in test files where they belong
"tests/**" = ["S101"]
"conftest.py" = ["S101"]
  1. Address the 7 production asserts by replacing them with proper error handling using NixpkgsReviewError or appropriate exceptions.

This will enforce the rule going forward while requiring you to clean up the existing problematic assertions. The test assertions (14 out of 18) will remain untouched since they're legitimate.

Would you like me to help create an issue to track the cleanup of these existing assert statements, or would you prefer to handle them as part of this PR?

 /|   /|
( :v:  )
|(_)(_)|
 \_) (_/

ready to help!


✏️ Learnings added
Learnt from: Mic92
PR: Mic92/nixpkgs-review#559
File: nixpkgs_review/review.py:474-476
Timestamp: 2025-09-21T08:58:33.690Z
Learning: The user Mic92 prefers to use ruff linting rules to enforce code quality standards, specifically wanting to ban assert statements from non-test code using the S101 rule.

Learnt from: Mic92
PR: Mic92/nixpkgs-review#559
File: nixpkgs_review/review.py:474-476
Timestamp: 2025-09-21T08:58:33.691Z
Learning: The nixpkgs-review project already has ruff configured with "ALL" rules selected but currently globally ignores S101 (assert detection). The user Mic92 wants to change this to selectively allow asserts only in test files while banning them from production code.

Learnt from: Mic92
PR: Mic92/nixpkgs-review#559
File: nixpkgs_review/review.py:474-476
Timestamp: 2025-09-21T08:58:33.691Z
Learning: The nixpkgs-review codebase currently has 18 assert statements total: 7 in production code that need to be replaced with proper error handling, and 11 in test files that should remain. The production asserts are in review.py (4), github.py (1), and cli/pr.py (2).

Note: Learnings are effective only in the context of similar code segments. To apply general coding guidelines, please configure review instructions. You can manage existing learnings in the app.


Expand Down Expand Up @@ -911,6 +951,7 @@ def review_local_revision(
run=args.run,
remote=args.remote,
only_packages=set(args.package),
eval_type="local",
additional_packages=set(args.additional_package),
package_regexes=args.package_regex,
skip_packages=set(args.skip_package),
Expand Down