Skip to content

release: v6.25.0 - Scientific Benchmarking Command #214

release: v6.25.0 - Scientific Benchmarking Command

release: v6.25.0 - Scientific Benchmarking Command #214

name: SQLite-Style Quality Testing
on:
push:
branches: [main, develop]
pull_request:
branches: [main, develop]
env:
CARGO_TERM_COLOR: always
RUST_BACKTRACE: 1
jobs:
# ============================================================================
# Unit Tests - MANDATORY GATE
# ============================================================================
unit-tests:
name: Unit Tests (1000+ tests, 100% pass rate)
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
with:
components: rustfmt, clippy
- name: Cache Cargo dependencies
uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-
- name: Run all unit tests
run: cargo test --lib --verbose
- name: Check test count (target: 1000+)

Check failure on line 47 in .github/workflows/sqlite-quality-testing.yml

View workflow run for this annotation

GitHub Actions / .github/workflows/sqlite-quality-testing.yml

Invalid workflow file

You have an error in your yaml syntax on line 47
run: |
TEST_COUNT=$(cargo test --lib -- --list | grep -c ": test$" || true)
echo "Total unit tests: $TEST_COUNT"
if [ "$TEST_COUNT" -lt 1000 ]; then
echo "⚠️ WARNING: Only $TEST_COUNT tests (target: 1000+)"
# Don't fail yet, but warn
else
echo "✅ Test count exceeds target: $TEST_COUNT"
fi
- name: Verify 100% pass rate
run: |
cargo test --lib 2>&1 | tee test_output.txt
if grep -q "test result: FAILED" test_output.txt; then
echo "❌ Some tests failed!"
exit 1
fi
echo "✅ All tests passed"
# ============================================================================
# Property-Based Tests - MANDATORY GATE
# ============================================================================
property-tests:
name: Property Tests (10K+ generated cases)
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
- name: Cache Cargo dependencies
uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Run property-based tests
run: |
cargo test --test parser_properties --release --verbose
env:
PROPTEST_CASES: 1000
- name: Verify test case generation
run: |
echo "✅ Property tests completed"
echo "Note: proptest generates 1000+ cases per property"
# ============================================================================
# Integration Tests - MANDATORY GATE
# ============================================================================
integration-tests:
name: Integration Tests (Real-world workflows)
runs-on: ubuntu-latest
timeout-minutes: 20
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
- name: Cache Cargo dependencies
uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Run integration tests
run: cargo test --test makefile_parsing --verbose
- name: Verify integration scenarios
run: |
echo "✅ All integration tests passed"
# ============================================================================
# Performance Benchmarks - MANDATORY GATE
# ============================================================================
performance-benchmarks:
name: Performance Benchmarks (Regression detection)
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
- name: Cache Cargo dependencies
uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Run performance benchmarks
run: cargo test --test parse_performance --release -- --nocapture
- name: Generate performance report
run: |
cargo test --test parse_performance --release -- --ignored --nocapture \
| tee performance_report.txt || true
- name: Upload performance report
uses: actions/upload-artifact@v4
with:
name: performance-report-${{ github.sha }}
path: performance_report.txt
# ============================================================================
# Code Coverage - QUALITY METRIC
# ============================================================================
coverage:
name: Code Coverage (target: >85%)
runs-on: ubuntu-latest
timeout-minutes: 25
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
with:
components: llvm-tools-preview
- name: Cache Cargo dependencies
uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-coverage-${{ hashFiles('**/Cargo.lock') }}
- name: Install cargo-llvm-cov
run: cargo install cargo-llvm-cov || true
- name: Generate coverage report
run: |
cargo llvm-cov --lib --html --output-dir target/coverage
cargo llvm-cov --lib --json --output-path target/coverage.json
- name: Check coverage threshold
run: |
COVERAGE=$(cargo llvm-cov --lib --json | jq '.data[0].totals.lines.percent')
echo "Code coverage: $COVERAGE%"
if (( $(echo "$COVERAGE < 85.0" | bc -l) )); then
echo "⚠️ WARNING: Coverage $COVERAGE% is below target (85%)"
# Don't fail yet, but warn
else
echo "✅ Coverage exceeds target: $COVERAGE%"
fi
- name: Upload coverage report
uses: actions/upload-artifact@v4
with:
name: coverage-report-${{ github.sha }}
path: target/coverage/
# ============================================================================
# Mutation Testing - QUALITY METRIC (Weekly only)
# ============================================================================
mutation-tests:
name: Mutation Testing (target: >90% kill rate)
runs-on: ubuntu-latest
timeout-minutes: 120
# Only run on main branch to save CI time
if: github.ref == 'refs/heads/main'
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
- name: Cache Cargo dependencies
uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-mutants-${{ hashFiles('**/Cargo.lock') }}
- name: Install cargo-mutants
run: cargo install cargo-mutants || true
- name: Run mutation tests on parser
run: |
cargo mutants --file rash/src/make_parser/parser.rs -- --lib \
2>&1 | tee mutants_parser.txt || true
- name: Run mutation tests on AST
run: |
cargo mutants --file rash/src/ast/restricted.rs -- --lib \
2>&1 | tee mutants_ast.txt || true
- name: Check mutation score
run: |
echo "=== Mutation Testing Results ==="
if [ -f mutants.out/caught.txt ]; then
CAUGHT=$(wc -l < mutants.out/caught.txt)
TOTAL=$(cat mutants.out/*.txt 2>/dev/null | wc -l)
if [ "$TOTAL" -gt 0 ]; then
KILL_RATE=$(echo "scale=2; $CAUGHT * 100 / $TOTAL" | bc)
echo "Kill rate: $KILL_RATE% ($CAUGHT/$TOTAL)"
if (( $(echo "$KILL_RATE < 90.0" | bc -l) )); then
echo "⚠️ WARNING: Mutation score $KILL_RATE% (target: >90%)"
else
echo "✅ Mutation score exceeds target: $KILL_RATE%"
fi
fi
fi
- name: Upload mutation report
uses: actions/upload-artifact@v4
with:
name: mutation-report-${{ github.sha }}
path: |
mutants.out/
mutants_*.txt
# ============================================================================
# Clippy and Rustfmt - CODE QUALITY
# ============================================================================
code-quality:
name: Code Quality (clippy + rustfmt)
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
with:
components: rustfmt, clippy
- name: Check formatting
run: cargo fmt -- --check
- name: Run Clippy
run: cargo clippy --all-targets --all-features -- -D warnings
# ============================================================================
# Documentation Tests
# ============================================================================
doc-tests:
name: Documentation Tests
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
- name: Run doc tests
run: cargo test --doc
- name: Build documentation
run: cargo doc --no-deps --document-private-items
# ============================================================================
# Quality Report - SUMMARY
# ============================================================================
quality-summary:
name: Quality Summary Report
runs-on: ubuntu-latest
needs: [unit-tests, property-tests, integration-tests, performance-benchmarks, code-quality]
if: always()
steps:
- name: Generate quality summary
run: |
echo "# SQLite-Style Quality Testing Summary" | tee summary.md
echo "" | tee -a summary.md
echo "## Test Results" | tee -a summary.md
echo "" | tee -a summary.md
echo "- Unit Tests: ${{ needs.unit-tests.result }}" | tee -a summary.md
echo "- Property Tests: ${{ needs.property-tests.result }}" | tee -a summary.md
echo "- Integration Tests: ${{ needs.integration-tests.result }}" | tee -a summary.md
echo "- Performance Tests: ${{ needs.performance-benchmarks.result }}" | tee -a summary.md
echo "- Code Quality: ${{ needs.code-quality.result }}" | tee -a summary.md
echo "" | tee -a summary.md
echo "## Status" | tee -a summary.md
if [ "${{ needs.unit-tests.result }}" == "success" ] && \
[ "${{ needs.property-tests.result }}" == "success" ] && \
[ "${{ needs.integration-tests.result }}" == "success" ] && \
[ "${{ needs.performance-benchmarks.result }}" == "success" ] && \
[ "${{ needs.code-quality.result }}" == "success" ]; then
echo "✅ **ALL QUALITY GATES PASSED**" | tee -a summary.md
else
echo "❌ **QUALITY GATES FAILED**" | tee -a summary.md
exit 1
fi
- name: Upload summary
uses: actions/upload-artifact@v4
with:
name: quality-summary-${{ github.sha }}
path: summary.md