Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

### Bug Fixes

* **pytest:** report `xfailed` tests separately instead of counting them as failures ([#672](https://github.com/rtk-ai/rtk/issues/672))
* **curl:** skip JSON schema replacement when schema is larger than original payload ([#297](https://github.com/rtk-ai/rtk/issues/297))
* **toml-dsl:** fix regex overmatch on `tofu-plan/init/validate/fmt` and `mix-format/compile` — add `(\s|$)` word boundary to prevent matching subcommands (e.g. `tofu planet`, `mix formats`) ([#349](https://github.com/rtk-ai/rtk/issues/349))
* **toml-dsl:** remove 3 dead built-in filters (`docker-inspect`, `docker-compose-ps`, `pnpm-build`) — Clap routes these commands before `run_fallback`, so the TOML filters never fire ([#351](https://github.com/rtk-ai/rtk/issues/351))
Expand Down
94 changes: 78 additions & 16 deletions src/pytest_cmd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -108,9 +108,7 @@ fn filter_pytest_output(output: &str) -> String {
current_failure.clear();
}
continue;
} else if trimmed.starts_with("===")
&& (trimmed.contains("passed") || trimmed.contains("failed"))
{
} else if is_pytest_summary_line(trimmed) {
summary_line = trimmed.to_string();
continue;
}
Expand Down Expand Up @@ -163,14 +161,23 @@ fn filter_pytest_output(output: &str) -> String {
}

fn build_pytest_summary(summary: &str, _test_files: &[String], failures: &[String]) -> String {
// Parse summary line
let (passed, failed, skipped) = parse_summary_line(summary);
let (passed, failed, skipped, xfailed) = parse_summary_line(summary);

if failed == 0 && passed > 0 {
return format!("Pytest: {} passed", passed);
if failed == 0 && (passed > 0 || skipped > 0 || xfailed > 0) {
let mut parts = Vec::new();
if passed > 0 {
parts.push(format!("{} passed", passed));
}
if xfailed > 0 {
parts.push(format!("{} xfailed", xfailed));
}
if skipped > 0 {
parts.push(format!("{} skipped", skipped));
}
return format!("Pytest: {}", parts.join(", "));
}

if passed == 0 && failed == 0 {
if passed == 0 && failed == 0 && xfailed == 0 {
return "Pytest: No tests collected".to_string();
}

Expand All @@ -179,6 +186,9 @@ fn build_pytest_summary(summary: &str, _test_files: &[String], failures: &[Strin
if skipped > 0 {
result.push_str(&format!(", {} skipped", skipped));
}
if xfailed > 0 {
result.push_str(&format!(", {} xfailed", xfailed));
}
result.push('\n');
result.push_str("═══════════════════════════════════════\n");

Expand Down Expand Up @@ -241,10 +251,21 @@ fn build_pytest_summary(summary: &str, _test_files: &[String], failures: &[Strin
result.trim().to_string()
}

fn parse_summary_line(summary: &str) -> (usize, usize, usize) {
fn is_pytest_summary_line(line: &str) -> bool {
let normalized = line.trim_matches('=').trim();
normalized.contains(" in ")
&& (normalized.contains("passed")
|| normalized.contains("failed")
|| normalized.contains("skipped")
|| normalized.contains("xfailed")
|| normalized.contains("no tests ran"))
}

fn parse_summary_line(summary: &str) -> (usize, usize, usize, usize) {
let mut passed = 0;
let mut failed = 0;
let mut skipped = 0;
let mut xfailed = 0;

// Parse lines like "=== 4 passed, 1 failed in 0.50s ==="
let parts: Vec<&str> = summary.split(',').collect();
Expand All @@ -253,24 +274,32 @@ fn parse_summary_line(summary: &str) -> (usize, usize, usize) {
let words: Vec<&str> = part.split_whitespace().collect();
for (i, word) in words.iter().enumerate() {
if i > 0 {
if word.contains("passed") {
let normalized = word
.trim_matches(|c: char| !c.is_ascii_alphanumeric())
.to_ascii_lowercase();

if normalized == "passed" {
if let Ok(n) = words[i - 1].parse::<usize>() {
passed = n;
}
} else if word.contains("failed") {
} else if normalized == "failed" {
if let Ok(n) = words[i - 1].parse::<usize>() {
failed = n;
}
} else if word.contains("skipped") {
} else if normalized == "skipped" {
if let Ok(n) = words[i - 1].parse::<usize>() {
skipped = n;
}
} else if normalized == "xfailed" {
if let Ok(n) = words[i - 1].parse::<usize>() {
xfailed = n;
}
}
}
}
}

(passed, failed, skipped)
(passed, failed, skipped, xfailed)
}

#[cfg(test)]
Expand All @@ -292,6 +321,32 @@ tests/test_foo.py ..... [100%]
assert!(result.contains("5 passed"));
}

#[test]
fn test_filter_pytest_with_xfailed() {
let output = r#"=== test session starts ===
collected 2 items

tests/test_foo.py .x [100%]

1 passed, 1 xfailed in 0.50s"#;

let result = filter_pytest_output(output);
assert_eq!(result, "Pytest: 1 passed, 1 xfailed");
}

#[test]
fn test_filter_pytest_xfailed_only() {
let output = r#"=== test session starts ===
collected 1 item

tests/test_foo.py x [100%]

1 xfailed in 0.50s"#;

let result = filter_pytest_output(output);
assert_eq!(result, "Pytest: 1 xfailed");
}

#[test]
fn test_filter_pytest_with_failures() {
let output = r#"=== test session starts ===
Expand Down Expand Up @@ -358,14 +413,21 @@ collected 0 items

#[test]
fn test_parse_summary_line() {
assert_eq!(parse_summary_line("=== 5 passed in 0.50s ==="), (5, 0, 0));
assert_eq!(
parse_summary_line("=== 5 passed in 0.50s ==="),
(5, 0, 0, 0)
);
assert_eq!(
parse_summary_line("=== 4 passed, 1 failed in 0.50s ==="),
(4, 1, 0)
(4, 1, 0, 0)
);
assert_eq!(
parse_summary_line("=== 3 passed, 1 failed, 2 skipped in 1.0s ==="),
(3, 1, 2)
(3, 1, 2, 0)
);
assert_eq!(
parse_summary_line("=== 3 passed, 2 xfailed, 1 skipped in 1.0s ==="),
(3, 0, 1, 2)
);
}
}