mirror of
https://github.com/dorny/test-reporter.git
synced 2026-02-01 19:05:23 -08:00
Compare commits
10 Commits
fix/comple
...
feature/21
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
12c7abe9ab | ||
|
|
3b5ad0231b | ||
|
|
c89704a410 | ||
|
|
ee446707ff | ||
|
|
fe45e95373 | ||
|
|
e40a1da745 | ||
|
|
3445860437 | ||
|
|
9ef5c136b2 | ||
|
|
83e20c1534 | ||
|
|
79ea6a9d0e |
320
.github/workflows/integration-tests-issue-217.yml
vendored
Normal file
320
.github/workflows/integration-tests-issue-217.yml
vendored
Normal file
@@ -0,0 +1,320 @@
|
||||
name: Integration Tests (#217) - fail-on-error/fail-on-empty
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'src/**'
|
||||
- 'dist/**'
|
||||
- 'action.yml'
|
||||
- '.github/workflows/integration-tests.yml'
|
||||
- '__tests__/fixtures/integration/**'
|
||||
|
||||
jobs:
|
||||
# ============================================
|
||||
# Scenario 1: Passing tests, fail-on-error=true
|
||||
# Expected: Step passes, conclusion=success
|
||||
# ============================================
|
||||
test-passing-fail-on-error-true:
|
||||
name: "Passing tests | fail-on-error=true"
|
||||
runs-on: ubuntu-slim
|
||||
outputs:
|
||||
conclusion: ${{ steps.report.outputs.conclusion }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Run test reporter
|
||||
id: report
|
||||
uses: ./
|
||||
with:
|
||||
name: 'Integration Test - Passing (fail-on-error=true)'
|
||||
path: '__tests__/fixtures/integration/passing-tests.xml'
|
||||
reporter: java-junit
|
||||
fail-on-error: 'true'
|
||||
fail-on-empty: 'true'
|
||||
|
||||
- name: Validate results
|
||||
run: |
|
||||
echo "=== Test Results ==="
|
||||
echo "Step outcome: success (would have failed otherwise)"
|
||||
echo "Conclusion: ${{ steps.report.outputs.conclusion }}"
|
||||
echo "Passed: ${{ steps.report.outputs.passed }}"
|
||||
echo "Failed: ${{ steps.report.outputs.failed }}"
|
||||
|
||||
if [ "${{ steps.report.outputs.conclusion }}" != "success" ]; then
|
||||
echo "FAIL: Expected conclusion 'success' but got '${{ steps.report.outputs.conclusion }}'"
|
||||
exit 1
|
||||
fi
|
||||
echo "PASS: All validations passed"
|
||||
|
||||
# ============================================
|
||||
# Scenario 2: Passing tests, fail-on-error=false
|
||||
# Expected: Step passes, conclusion=success
|
||||
# ============================================
|
||||
test-passing-fail-on-error-false:
|
||||
name: "Passing tests | fail-on-error=false"
|
||||
runs-on: ubuntu-slim
|
||||
outputs:
|
||||
conclusion: ${{ steps.report.outputs.conclusion }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Run test reporter
|
||||
id: report
|
||||
uses: ./
|
||||
with:
|
||||
name: 'Integration Test - Passing (fail-on-error=false)'
|
||||
path: '__tests__/fixtures/integration/passing-tests.xml'
|
||||
reporter: java-junit
|
||||
fail-on-error: 'false'
|
||||
fail-on-empty: 'true'
|
||||
|
||||
- name: Validate results
|
||||
run: |
|
||||
echo "=== Test Results ==="
|
||||
echo "Conclusion: ${{ steps.report.outputs.conclusion }}"
|
||||
|
||||
if [ "${{ steps.report.outputs.conclusion }}" != "success" ]; then
|
||||
echo "FAIL: Expected conclusion 'success' but got '${{ steps.report.outputs.conclusion }}'"
|
||||
exit 1
|
||||
fi
|
||||
echo "PASS: All validations passed"
|
||||
|
||||
# ============================================
|
||||
# Scenario 3: Failing tests, fail-on-error=true
|
||||
# Expected: Step FAILS, conclusion=failure
|
||||
# ============================================
|
||||
test-failing-fail-on-error-true:
|
||||
name: "Failing tests | fail-on-error=true"
|
||||
runs-on: ubuntu-slim
|
||||
outputs:
|
||||
conclusion: ${{ steps.report.outputs.conclusion }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Run test reporter
|
||||
id: report
|
||||
continue-on-error: true
|
||||
uses: ./
|
||||
with:
|
||||
name: 'Integration Test - Failing (fail-on-error=true)'
|
||||
path: '__tests__/fixtures/integration/failing-tests.xml'
|
||||
reporter: java-junit
|
||||
fail-on-error: 'true'
|
||||
fail-on-empty: 'true'
|
||||
|
||||
- name: Validate results
|
||||
run: |
|
||||
echo "=== Test Results ==="
|
||||
echo "Step outcome: ${{ steps.report.outcome }}"
|
||||
echo "Conclusion: ${{ steps.report.outputs.conclusion }}"
|
||||
echo "Failed count: ${{ steps.report.outputs.failed }}"
|
||||
|
||||
# Step should fail
|
||||
if [ "${{ steps.report.outcome }}" != "failure" ]; then
|
||||
echo "FAIL: Expected step to fail but got '${{ steps.report.outcome }}'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Conclusion should be failure
|
||||
if [ "${{ steps.report.outputs.conclusion }}" != "failure" ]; then
|
||||
echo "FAIL: Expected conclusion 'failure' but got '${{ steps.report.outputs.conclusion }}'"
|
||||
exit 1
|
||||
fi
|
||||
echo "PASS: All validations passed"
|
||||
|
||||
# ============================================
|
||||
# Scenario 4: Failing tests, fail-on-error=false
|
||||
# Expected: Step passes, conclusion=failure
|
||||
# Regression test for issue #217
|
||||
# ============================================
|
||||
test-failing-fail-on-error-false:
|
||||
name: "Failing tests | fail-on-error=false [#217]"
|
||||
runs-on: ubuntu-slim
|
||||
outputs:
|
||||
conclusion: ${{ steps.report.outputs.conclusion }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Run test reporter
|
||||
id: report
|
||||
continue-on-error: true
|
||||
uses: ./
|
||||
with:
|
||||
name: 'Integration Test - Failing (fail-on-error=false)'
|
||||
path: '__tests__/fixtures/integration/failing-tests.xml'
|
||||
reporter: java-junit
|
||||
fail-on-error: 'false'
|
||||
fail-on-empty: 'true'
|
||||
|
||||
- name: Validate results
|
||||
run: |
|
||||
echo "=== Test Results ==="
|
||||
echo "Step outcome: ${{ steps.report.outcome }}"
|
||||
echo "Conclusion: ${{ steps.report.outputs.conclusion }}"
|
||||
echo "Failed count: ${{ steps.report.outputs.failed }}"
|
||||
|
||||
# Step should pass (fail-on-error is false)
|
||||
if [ "${{ steps.report.outcome }}" != "success" ]; then
|
||||
echo "FAIL: Expected step to pass but got '${{ steps.report.outcome }}'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Conclusion SHOULD be 'failure' because tests failed
|
||||
# Regression test for issue #217
|
||||
if [ "${{ steps.report.outputs.conclusion }}" != "failure" ]; then
|
||||
echo "========================================"
|
||||
echo "REGRESSION DETECTED (Issue #217)"
|
||||
echo "========================================"
|
||||
echo "Expected conclusion 'failure' but got '${{ steps.report.outputs.conclusion }}'"
|
||||
echo "The check conclusion should reflect test results,"
|
||||
echo "independent of the fail-on-error setting."
|
||||
echo "========================================"
|
||||
exit 1
|
||||
fi
|
||||
echo "PASS: All validations passed"
|
||||
|
||||
# ============================================
|
||||
# Scenario 5: Empty results, fail-on-empty=true
|
||||
# Expected: Step FAILS
|
||||
# ============================================
|
||||
test-empty-fail-on-empty-true:
|
||||
name: "Empty results | fail-on-empty=true"
|
||||
runs-on: ubuntu-slim
|
||||
outputs:
|
||||
conclusion: ${{ steps.report.outputs.conclusion || 'N/A' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Run test reporter
|
||||
id: report
|
||||
continue-on-error: true
|
||||
uses: ./
|
||||
with:
|
||||
name: 'Integration Test - Empty (fail-on-empty=true)'
|
||||
path: '__tests__/fixtures/integration/nonexistent-*.xml'
|
||||
reporter: java-junit
|
||||
fail-on-error: 'true'
|
||||
fail-on-empty: 'true'
|
||||
|
||||
- name: Validate results
|
||||
run: |
|
||||
echo "=== Test Results ==="
|
||||
echo "Step outcome: ${{ steps.report.outcome }}"
|
||||
|
||||
# Step should fail (no files found)
|
||||
if [ "${{ steps.report.outcome }}" != "failure" ]; then
|
||||
echo "FAIL: Expected step to fail but got '${{ steps.report.outcome }}'"
|
||||
exit 1
|
||||
fi
|
||||
echo "PASS: Step correctly failed on empty results"
|
||||
|
||||
# ============================================
|
||||
# Scenario 6: Empty results, fail-on-empty=false
|
||||
# Expected: Step passes, conclusion=success
|
||||
# ============================================
|
||||
test-empty-fail-on-empty-false:
|
||||
name: "Empty results | fail-on-empty=false"
|
||||
runs-on: ubuntu-slim
|
||||
outputs:
|
||||
conclusion: ${{ steps.report.outputs.conclusion || 'N/A' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Run test reporter
|
||||
id: report
|
||||
continue-on-error: true
|
||||
uses: ./
|
||||
with:
|
||||
name: 'Integration Test - Empty (fail-on-empty=false)'
|
||||
path: '__tests__/fixtures/integration/nonexistent-*.xml'
|
||||
reporter: java-junit
|
||||
fail-on-error: 'true'
|
||||
fail-on-empty: 'false'
|
||||
|
||||
- name: Validate results
|
||||
run: |
|
||||
echo "=== Test Results ==="
|
||||
echo "Step outcome: ${{ steps.report.outcome }}"
|
||||
|
||||
# Step should pass (fail-on-empty is false)
|
||||
if [ "${{ steps.report.outcome }}" != "success" ]; then
|
||||
echo "FAIL: Expected step to pass but got '${{ steps.report.outcome }}'"
|
||||
exit 1
|
||||
fi
|
||||
echo "PASS: Step correctly passed with empty results"
|
||||
|
||||
# ============================================
|
||||
# Summary job to report overall status
|
||||
# ============================================
|
||||
summary:
|
||||
name: "Test Summary"
|
||||
needs:
|
||||
- test-passing-fail-on-error-true
|
||||
- test-passing-fail-on-error-false
|
||||
- test-failing-fail-on-error-true
|
||||
- test-failing-fail-on-error-false
|
||||
- test-empty-fail-on-empty-true
|
||||
- test-empty-fail-on-empty-false
|
||||
runs-on: ubuntu-slim
|
||||
if: always()
|
||||
steps:
|
||||
- name: Generate summary
|
||||
run: |
|
||||
# Helper function to convert result to emoji
|
||||
result_to_emoji() {
|
||||
case "$1" in
|
||||
success) echo "✅ Pass" ;;
|
||||
failure) echo "❌ Fail" ;;
|
||||
cancelled) echo "⚪ Cancelled" ;;
|
||||
skipped) echo "⏭️ Skipped" ;;
|
||||
*) echo "❓ Unknown" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Helper function to format conclusion
|
||||
conclusion_to_badge() {
|
||||
case "$1" in
|
||||
success) echo "🟢 success" ;;
|
||||
failure) echo "🔴 failure" ;;
|
||||
N/A) echo "⚫ N/A" ;;
|
||||
*) echo "⚪ $1" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Generate markdown summary
|
||||
cat >> $GITHUB_STEP_SUMMARY << 'EOF'
|
||||
# Integration Test Results
|
||||
|
||||
## fail-on-error / fail-on-empty Scenarios
|
||||
|
||||
| Scenario | Test Results | fail-on-error | fail-on-empty | Expected | Conclusion | Result |
|
||||
|----------|--------------|---------------|---------------|----------|------------|--------|
|
||||
EOF
|
||||
|
||||
echo "| 1 | All pass | \`true\` | \`true\` | Step: pass, Check: success | $(conclusion_to_badge "${{ needs.test-passing-fail-on-error-true.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-passing-fail-on-error-true.result }}") |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| 2 | All pass | \`false\` | \`true\` | Step: pass, Check: success | $(conclusion_to_badge "${{ needs.test-passing-fail-on-error-false.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-passing-fail-on-error-false.result }}") |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| 3 | Some fail | \`true\` | \`true\` | Step: fail, Check: failure | $(conclusion_to_badge "${{ needs.test-failing-fail-on-error-true.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-failing-fail-on-error-true.result }}") |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| 4 | Some fail | \`false\` | \`true\` | Step: pass, Check: failure | $(conclusion_to_badge "${{ needs.test-failing-fail-on-error-false.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-failing-fail-on-error-false.result }}") |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| 5 | Empty | \`true\` | \`true\` | Step: fail | $(conclusion_to_badge "${{ needs.test-empty-fail-on-empty-true.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-empty-fail-on-empty-true.result }}") |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| 6 | Empty | \`true\` | \`false\` | Step: pass | $(conclusion_to_badge "${{ needs.test-empty-fail-on-empty-false.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-empty-fail-on-empty-false.result }}") |" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
cat >> $GITHUB_STEP_SUMMARY << 'EOF'
|
||||
|
||||
---
|
||||
|
||||
> **Scenario 4** is a regression test for [issue #217](https://github.com/dorny/test-reporter/issues/217).
|
||||
> It verifies that `conclusion` output correctly reflects test failures, independent of `fail-on-error` setting.
|
||||
> When `fail-on-error=false`, the step should pass but `conclusion` should still be `failure` if tests failed.
|
||||
|
||||
EOF
|
||||
|
||||
# Also print to console
|
||||
echo "=== Integration Test Summary ==="
|
||||
echo "Scenario 1 (pass, fail-on-error=true): ${{ needs.test-passing-fail-on-error-true.result }}"
|
||||
echo "Scenario 2 (pass, fail-on-error=false): ${{ needs.test-passing-fail-on-error-false.result }}"
|
||||
echo "Scenario 3 (fail, fail-on-error=true): ${{ needs.test-failing-fail-on-error-true.result }}"
|
||||
echo "Scenario 4 (fail, fail-on-error=false): ${{ needs.test-failing-fail-on-error-false.result }} (regression test for #217)"
|
||||
echo "Scenario 5 (empty, fail-on-empty=true): ${{ needs.test-empty-fail-on-empty-true.result }}"
|
||||
echo "Scenario 6 (empty, fail-on-empty=false): ${{ needs.test-empty-fail-on-empty-false.result }}"
|
||||
@@ -1,5 +1,12 @@
|
||||
# Changelog
|
||||
|
||||
## 2.3.0
|
||||
* Feature: Add Python support with `python-xunit` reporter (pytest) https://github.com/dorny/test-reporter/pull/643
|
||||
* Feature: Add pytest traceback parsing and `directory-mapping` option https://github.com/dorny/test-reporter/pull/238
|
||||
* Performance: Update sax.js to fix large XML file parsing https://github.com/dorny/test-reporter/pull/681
|
||||
* Documentation: Complete documentation for all supported reporters https://github.com/dorny/test-reporter/pull/691
|
||||
* Security: Bump js-yaml and mocha in /reports/mocha (fixes prototype pollution) https://github.com/dorny/test-reporter/pull/682
|
||||
|
||||
## 2.2.0
|
||||
* Feature: Add collapsed option to control report summary visibility https://github.com/dorny/test-reporter/pull/664
|
||||
* Fix badge encoding for values including underscore and hyphens https://github.com/dorny/test-reporter/pull/672
|
||||
|
||||
5
__tests__/fixtures/integration/empty-tests.xml
Normal file
5
__tests__/fixtures/integration/empty-tests.xml
Normal file
@@ -0,0 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<testsuites name="EmptySuite" tests="0" failures="0" errors="0" time="0">
|
||||
<testsuite name="EmptySuite" tests="0" failures="0" errors="0" time="0">
|
||||
</testsuite>
|
||||
</testsuites>
|
||||
14
__tests__/fixtures/integration/failing-tests.xml
Normal file
14
__tests__/fixtures/integration/failing-tests.xml
Normal file
@@ -0,0 +1,14 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<testsuites name="FailingSuite" tests="3" failures="1" errors="0" time="0.5">
|
||||
<testsuite name="FailingSuite" tests="3" failures="1" errors="0" time="0.5">
|
||||
<testcase name="should pass test 1" classname="FailingSuite" time="0.1"/>
|
||||
<testcase name="should fail test 2" classname="FailingSuite" time="0.2">
|
||||
<failure message="Assertion failed" type="AssertionError">
|
||||
Expected: true
|
||||
Received: false
|
||||
at Object.test (/test/example.test.js:10:5)
|
||||
</failure>
|
||||
</testcase>
|
||||
<testcase name="should pass test 3" classname="FailingSuite" time="0.2"/>
|
||||
</testsuite>
|
||||
</testsuites>
|
||||
8
__tests__/fixtures/integration/passing-tests.xml
Normal file
8
__tests__/fixtures/integration/passing-tests.xml
Normal file
@@ -0,0 +1,8 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<testsuites name="PassingSuite" tests="3" failures="0" errors="0" time="0.5">
|
||||
<testsuite name="PassingSuite" tests="3" failures="0" errors="0" time="0.5">
|
||||
<testcase name="should pass test 1" classname="PassingSuite" time="0.1"/>
|
||||
<testcase name="should pass test 2" classname="PassingSuite" time="0.2"/>
|
||||
<testcase name="should pass test 3" classname="PassingSuite" time="0.2"/>
|
||||
</testsuite>
|
||||
</testsuites>
|
||||
@@ -1,6 +1,5 @@
|
||||
name: Test Reporter
|
||||
description: |
|
||||
Shows test results in GitHub UI: .NET (xUnit, NUnit, MSTest), Dart, Flutter, Go, Java (JUnit), JavaScript (JEST, Mocha), Python (pytest, unittest), Ruby (RSpec), Swift
|
||||
description: Displays test results from popular testing frameworks directly in GitHub
|
||||
author: Michal Dorner <dorner.michal@gmail.com>
|
||||
inputs:
|
||||
artifact:
|
||||
|
||||
4
package-lock.json
generated
4
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "test-reporter",
|
||||
"version": "2.2.0",
|
||||
"version": "2.3.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "test-reporter",
|
||||
"version": "2.2.0",
|
||||
"version": "2.3.0",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@actions/core": "^1.11.1",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "test-reporter",
|
||||
"version": "2.2.0",
|
||||
"version": "2.3.0",
|
||||
"private": true,
|
||||
"description": "Presents test results from popular testing frameworks as Github check run",
|
||||
"main": "lib/main.js",
|
||||
|
||||
4826
reports/jest/package-lock.json
generated
4826
reports/jest/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
1453
reports/mocha/package-lock.json
generated
1453
reports/mocha/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -9,6 +9,6 @@
|
||||
"author": "Michal Dorner <dorner.michal@gmail.com>",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"mocha": "^8.3.0"
|
||||
"mocha": "^11.7.5"
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user