diff --git a/.github/workflows/performance-tests.yml b/.github/workflows/performance-tests.yml index c249446fe..d515f77e0 100644 --- a/.github/workflows/performance-tests.yml +++ b/.github/workflows/performance-tests.yml @@ -15,6 +15,14 @@ jobs: # Make sure 1. this is a PR, not an issue 2. it contains "/run performance test" anywhere in the body if: github.event.issue.pull_request && contains(github.event.comment.body, '/run performance test') runs-on: ubuntu-latest + outputs: + request_count: ${{ steps.output.outputs.request_count }} + failure_count: ${{ steps.output.outputs.failure_count }} + med_time: ${{ steps.output.outputs.med_time }} + avg_time: ${{ steps.output.outputs.avg_time }} + min_time: ${{ steps.output.outputs.min_time }} + max_time: ${{ steps.output.outputs.max_time }} + requests_per_sec: ${{ steps.output.outputs.requests_per_sec }} steps: - name: Set up WireGuard uses: egor-tensin/setup-wireguard@v1.2.0 @@ -69,6 +77,30 @@ jobs: touch output_failures.csv && chmod 666 output_failures.csv touch output_exceptions.csv && chmod 666 output_exceptions.csv docker run --net=host -v $PWD:/mnt/locust -e CSV="/mnt/locust/${CSV}" locust -f /mnt/locust/v4.py --host http://127.0.0.1:10080/ --users 10 --spawn-rate 1 --headless -i "$(cat ${CSV} | wc -l)" --csv=/mnt/locust/output + - name: Produce output for summary + id: output + uses: jannekem/run-python-script-action@v1 + with: + script: | + import os + + def write_string(name, value): + with open(os.environ['GITHUB_OUTPUT'], 'a') as fh: + print(f'{name}={value}', file=fh) + + def write_float(name, value): + write_string(name, "{:.2f}".format(float(value))) + + with open("delphi-admin/load-testing/locust/output_stats.csv", "r", encoding="utf-8", errors="ignore") as scraped: + final_line = scraped.readlines()[-1].split(",") + write_string('request_count', final_line[2]) + write_string('failure_count', final_line[3]) + write_float('med_time', final_line[4]) + write_float('avg_time', final_line[5]) + write_float('min_time', final_line[6]) + write_float('max_time', final_line[7]) + write_float('requests_per_sec', final_line[9]) + - name: Archive results as artifacts uses: actions/upload-artifact@v3 with: @@ -76,13 +108,40 @@ jobs: path: | delphi-admin/load-testing/locust/output_*.csv - comment-output: + comment-success: + runs-on: ubuntu-latest + if: success() + needs: run-perftests + steps: + - name: Comment run results + env: + GITHUB_WORKFLOW_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} + uses: actions/github-script@v5 + with: + github-token: ${{secrets.GITHUB_TOKEN}} + script: | + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: `✅ Performance tests complete! Result summary: + - Total requests: **${{ needs.run-perftests.outputs.request_count }}** + - Total failures: **${{ needs.run-perftests.outputs.failure_count }}** + - Min response time: **${{ needs.run-perftests.outputs.min_time }} ms** + - Max response time: **${{ needs.run-perftests.outputs.max_time }} ms** + - Average response time: **${{ needs.run-perftests.outputs.avg_time }} ms** + - Median response time: **${{ needs.run-perftests.outputs.med_time }} ms** + - Requests per second: **${{ needs.run-perftests.outputs.requests_per_sec }}** + + Click here to view full results: ${{ env.GITHUB_WORKFLOW_URL }}.` + }) + + comment-failure: runs-on: ubuntu-latest - if: success() || failure() # but not if skipped + if: failure() needs: run-perftests steps: - name: Comment run results - # URL that links to run results env: GITHUB_WORKFLOW_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} uses: actions/github-script@v5 @@ -93,5 +152,5 @@ jobs: issue_number: context.issue.number, owner: context.repo.owner, repo: context.repo.repo, - body: '✅ Performance tests complete! Click here to view results: ${{ env.GITHUB_WORKFLOW_URL }}' + body: `❌ Performance tests failed! Click here to view full results: ${{ env.GITHUB_WORKFLOW_URL }}.` })