Compare commits

..

3 Commits

Author SHA1 Message Date
Jozef Izso
12c7abe9ab Add conclusion output column to integration test summary table
- Added job outputs to expose conclusion from each test scenario
- Added new "Conclusion" column to summary table with colored badges
- Shows actual conclusion output (🟢 success / 🔴 failure /  N/A)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-14 15:12:30 +01:00
Jozef Izso
3b5ad0231b Update scenario 4 to be a regression test for issue #217
The bug has been fixed - conclusion output now correctly reflects
test failures independent of fail-on-error setting. Updated comments
and summary to indicate this is now a regression test.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-14 15:12:15 +01:00
Jozef Izso
c89704a410 Add integration tests for fail-on-error and fail-on-empty scenarios (#217)
Add workflow and fixtures to test the behavior of fail-on-error and
fail-on-empty parameters across different scenarios:

- Passing tests with fail-on-error true/false
- Failing tests with fail-on-error true/false
- Empty results with fail-on-empty true/false

Scenario 4 (failing tests + fail-on-error=false) is expected to fail
until issue #217 is fixed, documenting the bug where check conclusion
shows 'success' even when tests fail.

The workflow outputs a GitHub Actions summary with a markdown table
showing all test results.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-14 15:11:36 +01:00
6 changed files with 353 additions and 6 deletions

View File

@@ -0,0 +1,320 @@
name: Integration Tests (#217) - fail-on-error/fail-on-empty
on:
workflow_dispatch:
push:
pull_request:
paths:
- 'src/**'
- 'dist/**'
- 'action.yml'
- '.github/workflows/integration-tests.yml'
- '__tests__/fixtures/integration/**'
jobs:
# ============================================
# Scenario 1: Passing tests, fail-on-error=true
# Expected: Step passes, conclusion=success
# ============================================
test-passing-fail-on-error-true:
name: "Passing tests | fail-on-error=true"
runs-on: ubuntu-slim
outputs:
conclusion: ${{ steps.report.outputs.conclusion }}
steps:
- uses: actions/checkout@v6
- name: Run test reporter
id: report
uses: ./
with:
name: 'Integration Test - Passing (fail-on-error=true)'
path: '__tests__/fixtures/integration/passing-tests.xml'
reporter: java-junit
fail-on-error: 'true'
fail-on-empty: 'true'
- name: Validate results
run: |
echo "=== Test Results ==="
echo "Step outcome: success (would have failed otherwise)"
echo "Conclusion: ${{ steps.report.outputs.conclusion }}"
echo "Passed: ${{ steps.report.outputs.passed }}"
echo "Failed: ${{ steps.report.outputs.failed }}"
if [ "${{ steps.report.outputs.conclusion }}" != "success" ]; then
echo "FAIL: Expected conclusion 'success' but got '${{ steps.report.outputs.conclusion }}'"
exit 1
fi
echo "PASS: All validations passed"
# ============================================
# Scenario 2: Passing tests, fail-on-error=false
# Expected: Step passes, conclusion=success
# ============================================
test-passing-fail-on-error-false:
name: "Passing tests | fail-on-error=false"
runs-on: ubuntu-slim
outputs:
conclusion: ${{ steps.report.outputs.conclusion }}
steps:
- uses: actions/checkout@v6
- name: Run test reporter
id: report
uses: ./
with:
name: 'Integration Test - Passing (fail-on-error=false)'
path: '__tests__/fixtures/integration/passing-tests.xml'
reporter: java-junit
fail-on-error: 'false'
fail-on-empty: 'true'
- name: Validate results
run: |
echo "=== Test Results ==="
echo "Conclusion: ${{ steps.report.outputs.conclusion }}"
if [ "${{ steps.report.outputs.conclusion }}" != "success" ]; then
echo "FAIL: Expected conclusion 'success' but got '${{ steps.report.outputs.conclusion }}'"
exit 1
fi
echo "PASS: All validations passed"
# ============================================
# Scenario 3: Failing tests, fail-on-error=true
# Expected: Step FAILS, conclusion=failure
# ============================================
test-failing-fail-on-error-true:
name: "Failing tests | fail-on-error=true"
runs-on: ubuntu-slim
outputs:
conclusion: ${{ steps.report.outputs.conclusion }}
steps:
- uses: actions/checkout@v6
- name: Run test reporter
id: report
continue-on-error: true
uses: ./
with:
name: 'Integration Test - Failing (fail-on-error=true)'
path: '__tests__/fixtures/integration/failing-tests.xml'
reporter: java-junit
fail-on-error: 'true'
fail-on-empty: 'true'
- name: Validate results
run: |
echo "=== Test Results ==="
echo "Step outcome: ${{ steps.report.outcome }}"
echo "Conclusion: ${{ steps.report.outputs.conclusion }}"
echo "Failed count: ${{ steps.report.outputs.failed }}"
# Step should fail
if [ "${{ steps.report.outcome }}" != "failure" ]; then
echo "FAIL: Expected step to fail but got '${{ steps.report.outcome }}'"
exit 1
fi
# Conclusion should be failure
if [ "${{ steps.report.outputs.conclusion }}" != "failure" ]; then
echo "FAIL: Expected conclusion 'failure' but got '${{ steps.report.outputs.conclusion }}'"
exit 1
fi
echo "PASS: All validations passed"
# ============================================
# Scenario 4: Failing tests, fail-on-error=false
# Expected: Step passes, conclusion=failure
# Regression test for issue #217
# ============================================
test-failing-fail-on-error-false:
name: "Failing tests | fail-on-error=false [#217]"
runs-on: ubuntu-slim
outputs:
conclusion: ${{ steps.report.outputs.conclusion }}
steps:
- uses: actions/checkout@v6
- name: Run test reporter
id: report
continue-on-error: true
uses: ./
with:
name: 'Integration Test - Failing (fail-on-error=false)'
path: '__tests__/fixtures/integration/failing-tests.xml'
reporter: java-junit
fail-on-error: 'false'
fail-on-empty: 'true'
- name: Validate results
run: |
echo "=== Test Results ==="
echo "Step outcome: ${{ steps.report.outcome }}"
echo "Conclusion: ${{ steps.report.outputs.conclusion }}"
echo "Failed count: ${{ steps.report.outputs.failed }}"
# Step should pass (fail-on-error is false)
if [ "${{ steps.report.outcome }}" != "success" ]; then
echo "FAIL: Expected step to pass but got '${{ steps.report.outcome }}'"
exit 1
fi
# Conclusion SHOULD be 'failure' because tests failed
# Regression test for issue #217
if [ "${{ steps.report.outputs.conclusion }}" != "failure" ]; then
echo "========================================"
echo "REGRESSION DETECTED (Issue #217)"
echo "========================================"
echo "Expected conclusion 'failure' but got '${{ steps.report.outputs.conclusion }}'"
echo "The check conclusion should reflect test results,"
echo "independent of the fail-on-error setting."
echo "========================================"
exit 1
fi
echo "PASS: All validations passed"
# ============================================
# Scenario 5: Empty results, fail-on-empty=true
# Expected: Step FAILS
# ============================================
test-empty-fail-on-empty-true:
name: "Empty results | fail-on-empty=true"
runs-on: ubuntu-slim
outputs:
conclusion: ${{ steps.report.outputs.conclusion || 'N/A' }}
steps:
- uses: actions/checkout@v6
- name: Run test reporter
id: report
continue-on-error: true
uses: ./
with:
name: 'Integration Test - Empty (fail-on-empty=true)'
path: '__tests__/fixtures/integration/nonexistent-*.xml'
reporter: java-junit
fail-on-error: 'true'
fail-on-empty: 'true'
- name: Validate results
run: |
echo "=== Test Results ==="
echo "Step outcome: ${{ steps.report.outcome }}"
# Step should fail (no files found)
if [ "${{ steps.report.outcome }}" != "failure" ]; then
echo "FAIL: Expected step to fail but got '${{ steps.report.outcome }}'"
exit 1
fi
echo "PASS: Step correctly failed on empty results"
# ============================================
# Scenario 6: Empty results, fail-on-empty=false
# Expected: Step passes, conclusion=success
# ============================================
test-empty-fail-on-empty-false:
name: "Empty results | fail-on-empty=false"
runs-on: ubuntu-slim
outputs:
conclusion: ${{ steps.report.outputs.conclusion || 'N/A' }}
steps:
- uses: actions/checkout@v6
- name: Run test reporter
id: report
continue-on-error: true
uses: ./
with:
name: 'Integration Test - Empty (fail-on-empty=false)'
path: '__tests__/fixtures/integration/nonexistent-*.xml'
reporter: java-junit
fail-on-error: 'true'
fail-on-empty: 'false'
- name: Validate results
run: |
echo "=== Test Results ==="
echo "Step outcome: ${{ steps.report.outcome }}"
# Step should pass (fail-on-empty is false)
if [ "${{ steps.report.outcome }}" != "success" ]; then
echo "FAIL: Expected step to pass but got '${{ steps.report.outcome }}'"
exit 1
fi
echo "PASS: Step correctly passed with empty results"
# ============================================
# Summary job to report overall status
# ============================================
summary:
name: "Test Summary"
needs:
- test-passing-fail-on-error-true
- test-passing-fail-on-error-false
- test-failing-fail-on-error-true
- test-failing-fail-on-error-false
- test-empty-fail-on-empty-true
- test-empty-fail-on-empty-false
runs-on: ubuntu-slim
if: always()
steps:
- name: Generate summary
run: |
# Helper function to convert result to emoji
result_to_emoji() {
case "$1" in
success) echo "✅ Pass" ;;
failure) echo "❌ Fail" ;;
cancelled) echo "⚪ Cancelled" ;;
skipped) echo "⏭️ Skipped" ;;
*) echo "❓ Unknown" ;;
esac
}
# Helper function to format conclusion
conclusion_to_badge() {
case "$1" in
success) echo "🟢 success" ;;
failure) echo "🔴 failure" ;;
N/A) echo "⚫ N/A" ;;
*) echo "⚪ $1" ;;
esac
}
# Generate markdown summary
cat >> $GITHUB_STEP_SUMMARY << 'EOF'
# Integration Test Results
## fail-on-error / fail-on-empty Scenarios
| Scenario | Test Results | fail-on-error | fail-on-empty | Expected | Conclusion | Result |
|----------|--------------|---------------|---------------|----------|------------|--------|
EOF
echo "| 1 | All pass | \`true\` | \`true\` | Step: pass, Check: success | $(conclusion_to_badge "${{ needs.test-passing-fail-on-error-true.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-passing-fail-on-error-true.result }}") |" >> $GITHUB_STEP_SUMMARY
echo "| 2 | All pass | \`false\` | \`true\` | Step: pass, Check: success | $(conclusion_to_badge "${{ needs.test-passing-fail-on-error-false.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-passing-fail-on-error-false.result }}") |" >> $GITHUB_STEP_SUMMARY
echo "| 3 | Some fail | \`true\` | \`true\` | Step: fail, Check: failure | $(conclusion_to_badge "${{ needs.test-failing-fail-on-error-true.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-failing-fail-on-error-true.result }}") |" >> $GITHUB_STEP_SUMMARY
echo "| 4 | Some fail | \`false\` | \`true\` | Step: pass, Check: failure | $(conclusion_to_badge "${{ needs.test-failing-fail-on-error-false.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-failing-fail-on-error-false.result }}") |" >> $GITHUB_STEP_SUMMARY
echo "| 5 | Empty | \`true\` | \`true\` | Step: fail | $(conclusion_to_badge "${{ needs.test-empty-fail-on-empty-true.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-empty-fail-on-empty-true.result }}") |" >> $GITHUB_STEP_SUMMARY
echo "| 6 | Empty | \`true\` | \`false\` | Step: pass | $(conclusion_to_badge "${{ needs.test-empty-fail-on-empty-false.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-empty-fail-on-empty-false.result }}") |" >> $GITHUB_STEP_SUMMARY
cat >> $GITHUB_STEP_SUMMARY << 'EOF'
---
> **Scenario 4** is a regression test for [issue #217](https://github.com/dorny/test-reporter/issues/217).
> It verifies that `conclusion` output correctly reflects test failures, independent of `fail-on-error` setting.
> When `fail-on-error=false`, the step should pass but `conclusion` should still be `failure` if tests failed.
EOF
# Also print to console
echo "=== Integration Test Summary ==="
echo "Scenario 1 (pass, fail-on-error=true): ${{ needs.test-passing-fail-on-error-true.result }}"
echo "Scenario 2 (pass, fail-on-error=false): ${{ needs.test-passing-fail-on-error-false.result }}"
echo "Scenario 3 (fail, fail-on-error=true): ${{ needs.test-failing-fail-on-error-true.result }}"
echo "Scenario 4 (fail, fail-on-error=false): ${{ needs.test-failing-fail-on-error-false.result }} (regression test for #217)"
echo "Scenario 5 (empty, fail-on-empty=true): ${{ needs.test-empty-fail-on-empty-true.result }}"
echo "Scenario 6 (empty, fail-on-empty=false): ${{ needs.test-empty-fail-on-empty-false.result }}"

View File

@@ -0,0 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
<testsuites name="EmptySuite" tests="0" failures="0" errors="0" time="0">
<testsuite name="EmptySuite" tests="0" failures="0" errors="0" time="0">
</testsuite>
</testsuites>

View File

@@ -0,0 +1,14 @@
<?xml version="1.0" encoding="UTF-8"?>
<testsuites name="FailingSuite" tests="3" failures="1" errors="0" time="0.5">
<testsuite name="FailingSuite" tests="3" failures="1" errors="0" time="0.5">
<testcase name="should pass test 1" classname="FailingSuite" time="0.1"/>
<testcase name="should fail test 2" classname="FailingSuite" time="0.2">
<failure message="Assertion failed" type="AssertionError">
Expected: true
Received: false
at Object.test (/test/example.test.js:10:5)
</failure>
</testcase>
<testcase name="should pass test 3" classname="FailingSuite" time="0.2"/>
</testsuite>
</testsuites>

View File

@@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<testsuites name="PassingSuite" tests="3" failures="0" errors="0" time="0.5">
<testsuite name="PassingSuite" tests="3" failures="0" errors="0" time="0.5">
<testcase name="should pass test 1" classname="PassingSuite" time="0.1"/>
<testcase name="should pass test 2" classname="PassingSuite" time="0.2"/>
<testcase name="should pass test 3" classname="PassingSuite" time="0.2"/>
</testsuite>
</testsuites>

6
dist/index.js generated vendored
View File

@@ -94,15 +94,15 @@ class ArtifactProvider {
}
async load() {
const result = {};
const allArtifacts = await this.octokit.paginate(this.octokit.rest.actions.listWorkflowRunArtifacts, {
const resp = await this.octokit.rest.actions.listWorkflowRunArtifacts({
...github.context.repo,
run_id: this.runId
});
if (allArtifacts.length === 0) {
if (resp.data.artifacts.length === 0) {
core.warning(`No artifacts found in run ${this.runId}`);
return {};
}
const artifacts = allArtifacts.filter(a => this.artifactNameMatch(a.name));
const artifacts = resp.data.artifacts.filter(a => this.artifactNameMatch(a.name));
if (artifacts.length === 0) {
core.warning(`No artifact matches ${this.artifact}`);
return {};

View File

@@ -50,17 +50,17 @@ export class ArtifactProvider implements InputProvider {
async load(): Promise<ReportInput> {
const result: ReportInput = {}
const allArtifacts = await this.octokit.paginate(this.octokit.rest.actions.listWorkflowRunArtifacts, {
const resp = await this.octokit.rest.actions.listWorkflowRunArtifacts({
...github.context.repo,
run_id: this.runId
})
if (allArtifacts.length === 0) {
if (resp.data.artifacts.length === 0) {
core.warning(`No artifacts found in run ${this.runId}`)
return {}
}
const artifacts = allArtifacts.filter(a => this.artifactNameMatch(a.name))
const artifacts = resp.data.artifacts.filter(a => this.artifactNameMatch(a.name))
if (artifacts.length === 0) {
core.warning(`No artifact matches ${this.artifact}`)
return {}