Forbid the use of pborman/uuid and remove uses
#16630
Workflow file for this run
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: All Tests | |
| run-name: ${{ inputs.test_name }}${{ inputs.test_dbs }} | |
| on: | |
| pull_request: | |
| push: | |
| branches: | |
| - main | |
| - cloud/* | |
| - feature/* | |
| - release/* | |
| workflow_dispatch: | |
| inputs: | |
| commit: | |
| description: "Commit SHA" | |
| required: true | |
| run_single_functional_test: | |
| description: "Run a single functional test" | |
| type: boolean | |
| default: false | |
| run_single_unit_test: | |
| description: "Run a single unit test (INSTEAD of functional test)" | |
| type: boolean | |
| default: false | |
| unit_test_directory: | |
| description: "[Unit Test Only] Directory to run unit tests in" | |
| type: string | |
| default: "./temporal" | |
| n_runs: | |
| description: "Number of times to repeat the test per database type (except for unit tests, start with n=25 or lower to avoid OOMKill)" | |
| type: number | |
| default: 1 | |
| test_name: | |
| description: "Name of the test to run (i.e. 'TestAcquireShard_DeadlineExceededErrorSuite' or 'TestFunctionalSuite/TestUpdateWorkflow')" | |
| type: string | |
| timeout_minutes: | |
| description: "Test timeout in minutes" | |
| type: number | |
| default: 120 | |
| test_runner: | |
| description: "Which runner to use. Choose higher RAM if your n_runs is high." | |
| type: choice | |
| default: "16GB RAM (ubuntu-latest)" | |
| options: | |
| - "16GB RAM (ubuntu-latest)" | |
| - "64GB RAM (ubuntu-latest-16-cores)" | |
| test_dbs: | |
| description: 'List of DBs to test on (i.e. ["sqlite", "cassandra", "mysql8", "postgres12"])' | |
| type: string | |
| default: '["sqlite"]' | |
| concurrency: # Auto-cancel existing runs in the PR when a new commit is pushed | |
| group: run-tests-${{ github.head_ref || github.run_id }} | |
| cancel-in-progress: true | |
| env: | |
| # For workflow_dispatch: use the given commit. | |
| # For pull_request: use the head of the PR branch (not the merge branch which is the default!) | |
| # For push: use the pushed commit. | |
| COMMIT: ${{ github.event.inputs.commit || github.event.pull_request.head.sha || github.sha }} | |
| PR_BASE_COMMIT: ${{ github.event.pull_request.base.sha }} | |
| DOCKER_COMPOSE_FILE: ./develop/github/docker-compose.yml | |
| TEMPORAL_VERSION_CHECK_DISABLED: 1 | |
| MAX_TEST_ATTEMPTS: 3 | |
| jobs: | |
| set-up-single-test: | |
| name: Set up single test | |
| runs-on: ubuntu-latest | |
| outputs: | |
| shard_indices: ${{ steps.generate_output.outputs.shard_indices }} | |
| total_shards: ${{ steps.generate_output.outputs.shards }} | |
| github_timeout: ${{ steps.generate_output.outputs.github_timeout }} | |
| test_timeout: ${{ steps.generate_output.outputs.test_timeout }} | |
| single_test_args: ${{ steps.generate_output.outputs.single_test_args }} | |
| runs_on: ${{ steps.generate_output.outputs.runs_on }} | |
| dbs: ${{ inputs.test_dbs }} | |
| modified_unit_test_suites: ${{ env.modified_unit_test_suites }} | |
| modified_integration_test_suites: ${{ env.modified_integration_test_suites }} | |
| modified_functional_test_suites: ${{ env.modified_functional_test_suites }} | |
| modified_functional_ndc_test_suites: ${{ env.modified_functional_ndc_test_suites }} | |
| modified_functional_xdc_test_suites: ${{ env.modified_functional_xdc_test_suites }} | |
| steps: | |
| - id: generate_output | |
| run: | | |
| shards=3 | |
| timeout=35 # update this to TEST_TIMEOUT if you update the Makefile | |
| runs_on='["ubuntu-latest"]' | |
| if [[ "${{ inputs.run_single_functional_test }}" == "true" || "${{ inputs.run_single_unit_test }}" == "true" ]]; then | |
| shards=1 | |
| timeout=${{ inputs.timeout_minutes }} | |
| single_test_args="-run ${{ inputs.test_name }} -count ${{ inputs.n_runs }}" | |
| if [[ "${{ inputs.test_runner }}" == "64GB RAM (ubuntu-latest-16-cores)" ]]; then | |
| runs_on='[ "ubuntu-latest-16-cores" ]' | |
| fi | |
| fi | |
| { | |
| echo "shard_indices=[ $(seq -s, 0 $((shards-1))) ]" | |
| echo "shards=$shards" | |
| echo "github_timeout=$((timeout+5))" | |
| echo "test_timeout=${timeout}m" | |
| echo "single_test_args=$single_test_args" | |
| echo "runs_on=$runs_on" | |
| } >> "$GITHUB_OUTPUT" | |
| - id: cat_output | |
| run: | | |
| cat "$GITHUB_OUTPUT" | |
| - name: Checkout Code | |
| uses: actions/checkout@v4 | |
| with: | |
| token: ${{ secrets.GITHUB_TOKEN }} | |
| ref: ${{ env.COMMIT }} | |
| fetch-depth: 0 | |
| - name: Fetch base branch | |
| run: git fetch origin ${{ github.event.pull_request.base.ref }}:${{ github.event.pull_request.base.ref }} | |
| - name: Compute merge base | |
| if: ${{ github.event_name == 'pull_request' }} | |
| run: | | |
| MERGE_BASE="$(git merge-base "${{ env.COMMIT }}" "${{ github.event.pull_request.base.ref }}")" | |
| echo "MERGE_BASE=${MERGE_BASE}" >> "$GITHUB_ENV" | |
| set -exuo pipefail | |
| go run ./cmd/tools/test/find_altered_tests.go \ | |
| -c unit \ | |
| -c integration \ | |
| -c functional \ | |
| -c functional_ndc \ | |
| -c functional_xdc \ | |
| -s "${MERGE_BASE}" \ | |
| -t "${COMMIT}" | tee -a "$GITHUB_ENV" | |
| shell: bash | |
| pre-build: | |
| name: Pre-build for cache | |
| runs-on: ubuntu-latest | |
| steps: | |
| - uses: actions/checkout@v4 | |
| if: ${{ !inputs.run_single_functional_test && !inputs.run_single_unit_test }} | |
| with: | |
| token: ${{ secrets.GITHUB_TOKEN }} | |
| ref: ${{ env.COMMIT }} | |
| - uses: actions/setup-go@v5 | |
| if: ${{ !inputs.run_single_functional_test && !inputs.run_single_unit_test }} | |
| with: | |
| go-version-file: "go.mod" | |
| cache: false # do our own caching | |
| - name: Restore dependencies | |
| if: ${{ !inputs.run_single_functional_test && !inputs.run_single_unit_test }} | |
| id: restore-deps | |
| uses: actions/cache/restore@v4 | |
| with: | |
| path: ~/go/pkg/mod | |
| key: go-${{ runner.os }}${{ runner.arch }}-${{ hashFiles('go.mod') }}-deps-${{ hashFiles('go.sum') }} | |
| - run: make pre-build-functional-test-coverage | |
| if: ${{ !inputs.run_single_functional_test && !inputs.run_single_unit_test }} | |
| - name: Save dependencies | |
| uses: actions/cache/save@v4 | |
| if: ${{ !inputs.run_single_functional_test && !inputs.run_single_unit_test && steps.restore-deps.outputs.cache-hit != 'true' }} | |
| with: | |
| path: ~/go/pkg/mod | |
| key: ${{ steps.restore-deps.outputs.cache-primary-key }} | |
| - name: Save build outputs | |
| uses: actions/cache/save@v4 | |
| if: ${{ !inputs.run_single_functional_test && !inputs.run_single_unit_test }} | |
| with: | |
| path: ~/.cache/go-build | |
| key: go-${{ runner.os }}${{ runner.arch }}-build-${{ env.COMMIT }} | |
| misc-checks: | |
| name: Misc checks | |
| needs: pre-build | |
| runs-on: ubuntu-latest | |
| steps: | |
| - uses: actions/checkout@v4 | |
| if: ${{ !inputs.run_single_functional_test && !inputs.run_single_unit_test }} | |
| with: | |
| token: ${{ secrets.GITHUB_TOKEN }} | |
| ref: ${{ env.COMMIT }} | |
| # buf-breaking tries to compare HEAD against merge base so we need to be able to find it | |
| fetch-depth: 100 | |
| - uses: actions/setup-go@v5 | |
| if: ${{ !inputs.run_single_functional_test && !inputs.run_single_unit_test }} | |
| with: | |
| go-version-file: "go.mod" | |
| cache: false # do our own caching | |
| - name: Restore dependencies | |
| if: ${{ !inputs.run_single_functional_test && !inputs.run_single_unit_test }} | |
| uses: actions/cache/restore@v4 | |
| with: | |
| path: ~/go/pkg/mod | |
| key: go-${{ runner.os }}${{ runner.arch }}-${{ hashFiles('go.mod') }}-deps-${{ hashFiles('go.sum') }} | |
| - name: Restore build outputs | |
| if: ${{ !inputs.run_single_functional_test && !inputs.run_single_unit_test }} | |
| uses: actions/cache/restore@v4 | |
| with: | |
| path: ~/.cache/go-build | |
| key: go-${{ runner.os }}${{ runner.arch }}-build-${{ env.COMMIT }} | |
| - uses: arduino/setup-protoc@v3 | |
| if: ${{ !inputs.run_single_functional_test && !inputs.run_single_unit_test }} | |
| with: | |
| repo-token: ${{ secrets.GITHUB_TOKEN }} | |
| - run: GOOS=windows GOARCH=amd64 make clean-bins bins | |
| if: ${{ !inputs.run_single_functional_test && !inputs.run_single_unit_test }} | |
| - run: GOOS=darwin GOARCH=arm64 make clean-bins bins | |
| if: ${{ !inputs.run_single_functional_test && !inputs.run_single_unit_test }} | |
| - run: make clean-bins ci-build-misc | |
| if: ${{ !inputs.run_single_functional_test && !inputs.run_single_unit_test }} | |
| unit-test: | |
| if: ${{ inputs.run_single_functional_test != true }} | |
| name: Unit test | |
| needs: [pre-build, set-up-single-test] | |
| runs-on: ubuntu-latest | |
| steps: | |
| - uses: actions/checkout@v4 | |
| with: | |
| token: ${{ secrets.GITHUB_TOKEN }} | |
| ref: ${{ env.COMMIT }} | |
| - uses: actions/setup-go@v5 | |
| with: | |
| go-version-file: "go.mod" | |
| cache: false # do our own caching | |
| - name: Restore dependencies | |
| uses: actions/cache/restore@v4 | |
| with: | |
| path: ~/go/pkg/mod | |
| key: go-${{ runner.os }}${{ runner.arch }}-${{ hashFiles('go.mod') }}-deps-${{ hashFiles('go.sum') }} | |
| - name: Restore build outputs | |
| uses: actions/cache/restore@v4 | |
| with: | |
| path: ~/.cache/go-build | |
| key: go-${{ runner.os }}${{ runner.arch }}-build-${{ env.COMMIT }} | |
| - name: Run unit tests | |
| timeout-minutes: 20 | |
| run: make unit-test-coverage | |
| env: | |
| UNIT_TEST_DIRS: ${{ inputs.unit_test_directory }} | |
| TEST_ARGS: ${{ needs.set-up-single-test.outputs.single_test_args }} | |
| TEST_TIMEOUT: ${{ needs.set-up-single-test.outputs.test_timeout }} | |
| - name: Generate crash report | |
| if: failure() | |
| run: | |
| | # if the tests failed, we would expect one JUnit XML report per attempt; otherwise it must have crashed | |
| [ "$(find .testoutput -maxdepth 1 -name 'junit.*.xml' | wc -l)" -lt "$MAX_TEST_ATTEMPTS" ] && | |
| CRASH_REPORT_NAME="$GITHUB_JOB" make report-test-crash | |
| - name: Generate test summary | |
| uses: mikepenz/[email protected] | |
| if: failure() | |
| with: | |
| report_paths: ./.testoutput/junit.*.xml | |
| detailed_summary: true | |
| check_annotations: false | |
| annotate_only: true | |
| skip_annotations: true | |
| - name: Upload code coverage to Codecov | |
| uses: codecov/codecov-action@v5 | |
| with: | |
| token: ${{ secrets.CODECOV_TOKEN }} | |
| directory: ./.testoutput | |
| flags: unit-test | |
| - name: Upload test results to Codecov | |
| if: ${{ !cancelled() }} | |
| uses: codecov/test-results-action@v1 | |
| with: | |
| token: ${{ secrets.CODECOV_TOKEN }} | |
| directory: ./.testoutput | |
| flags: unit-test | |
| - name: Upload test results to GitHub | |
| # Can't pin to major because the action linter doesn't recognize the include-hidden-files flag. | |
| uses: actions/[email protected] | |
| if: ${{ !cancelled() && !inputs.run_single_unit_test }} | |
| with: | |
| name: junit-xml--${{github.run_id}}--${{github.run_attempt}}--unit-test | |
| path: ./.testoutput/junit.*.xml | |
| include-hidden-files: true | |
| retention-days: 28 | |
| # Ensure this doesn't contribute to the junit output. | |
| - name: Flaky Unit Test Detection | |
| if: ${{ !cancelled() && !inputs.run_single_unit_test && env.MODIFIED_TEST_SUITES != '' }} | |
| timeout-minutes: 30 | |
| run: | | |
| echo "Detecting flaky unit tests: ${{ needs.set-up-single-test.outputs.modified_unit_test_suites }}" | |
| make unit-test | |
| env: | |
| FAILED_TEST_RETRIES: "0" # not retrying failed tests intentionally here since we're trying to detect flakes | |
| TEST_ARGS: "-run=${{ needs.set-up-single-test.outputs.modified_unit_test_suites }} -count=10" | |
| TEST_TIMEOUT: 35 | |
| integration-test: | |
| if: ${{ inputs.run_single_functional_test != true && inputs.run_single_unit_test != true }} | |
| name: Integration test | |
| needs: [pre-build, set-up-single-test] | |
| runs-on: ubuntu-latest | |
| steps: | |
| - uses: actions/checkout@v4 | |
| with: | |
| token: ${{ secrets.GITHUB_TOKEN }} | |
| ref: ${{ env.COMMIT }} | |
| - name: Start containerized dependencies | |
| uses: hoverkraft-tech/[email protected] | |
| with: | |
| compose-file: ${{ env.DOCKER_COMPOSE_FILE }} | |
| services: | | |
| cassandra | |
| mysql | |
| postgresql | |
| down-flags: -v | |
| - uses: actions/setup-go@v5 | |
| with: | |
| go-version-file: "go.mod" | |
| cache: false # do our own caching | |
| - name: Restore dependencies | |
| uses: actions/cache/restore@v4 | |
| with: | |
| path: ~/go/pkg/mod | |
| key: go-${{ runner.os }}${{ runner.arch }}-${{ hashFiles('go.mod') }}-deps-${{ hashFiles('go.sum') }} | |
| - name: Restore build outputs | |
| uses: actions/cache/restore@v4 | |
| with: | |
| path: ~/.cache/go-build | |
| key: go-${{ runner.os }}${{ runner.arch }}-build-${{ env.COMMIT }} | |
| - name: Run integration test | |
| timeout-minutes: 15 | |
| run: make integration-test-coverage | |
| - name: Generate crash report | |
| if: failure() | |
| run: | |
| | # if the tests failed, we would expect one JUnit XML report per attempt; otherwise it must have crashed | |
| [ "$(find .testoutput -maxdepth 1 -name 'junit.*.xml' | wc -l)" -lt "$MAX_TEST_ATTEMPTS" ] && | |
| CRASH_REPORT_NAME="$GITHUB_JOB" make report-test-crash | |
| - name: Generate test summary | |
| uses: mikepenz/[email protected] | |
| if: failure() | |
| with: | |
| report_paths: ./.testoutput/junit.*.xml | |
| detailed_summary: true | |
| check_annotations: false | |
| annotate_only: true | |
| skip_annotations: true | |
| - name: Upload code coverage to Codecov | |
| uses: codecov/codecov-action@v5 | |
| with: | |
| token: ${{ secrets.CODECOV_TOKEN }} | |
| directory: ./.testoutput | |
| flags: integration-test | |
| - name: Upload test results to Codecov | |
| if: ${{ !cancelled() }} | |
| uses: codecov/test-results-action@v1 | |
| with: | |
| token: ${{ secrets.CODECOV_TOKEN }} | |
| directory: ./.testoutput | |
| flags: integration-test | |
| - name: Upload test results to GitHub | |
| # Can't pin to major because the action linter doesn't recognize the include-hidden-files flag. | |
| uses: actions/[email protected] | |
| if: ${{ !cancelled() }} | |
| with: | |
| name: junit-xml--${{github.run_id}}--${{github.run_attempt}}--integration-test | |
| path: ./.testoutput/junit.*.xml | |
| include-hidden-files: true | |
| retention-days: 28 | |
| # Ensure this doesn't contribute to the junit output. | |
| - name: Flaky Integration Test Detection | |
| if: ${{ env.MODIFIED_TEST_SUITES != '' }} | |
| timeout-minutes: 30 | |
| run: | | |
| echo "Detecting flaky integration tests: ${{ needs.set-up-single-test.outputs.modified_integration_test_suites }}" | |
| make integration-test-coverage | |
| env: | |
| FAILED_TEST_RETRIES: "0" # not retrying failed tests intentionally here since we're trying to detect flakes | |
| TEST_ARGS: "-run=${{ needs.set-up-single-test.outputs.modified_integration_test_suites }} -count=5" | |
| - name: Tear down docker compose | |
| if: ${{ always() }} | |
| run: | | |
| docker compose -f ${{ env.DOCKER_COMPOSE_FILE }} down -v | |
| # Root job name includes matrix details so it is unique per shard. | |
| # This MUST stay in sync with the `job_name` passed to the job-id action below. | |
| functional-test: | |
| if: ${{ inputs.run_single_unit_test != true }} | |
| # Display name shown in the UI. The job-id lookup uses this exact value. | |
| name: Functional test (${{ matrix.name }}, shard ${{ matrix.shard_index }}) | |
| needs: [pre-build, set-up-single-test] | |
| strategy: | |
| fail-fast: false | |
| matrix: | |
| name: | |
| - cass_es | |
| - cass_es8 | |
| - cass_os2 | |
| - sqlite | |
| - mysql8 | |
| - postgres12 | |
| - postgres12_pgx | |
| shard_index: ${{ fromJson(needs.set-up-single-test.outputs.shard_indices) }} | |
| runs-on: ${{ fromJson(needs.set-up-single-test.outputs.runs_on) }} | |
| include: | |
| - name: cass_es | |
| persistence_type: nosql | |
| persistence_driver: cassandra | |
| containers: [cassandra, elasticsearch] | |
| es_version: v7 | |
| # Cassandra tests need a larger instance | |
| runs_on_override: ubuntu-latest-8-cores | |
| - name: cass_es8 | |
| persistence_type: nosql | |
| persistence_driver: cassandra | |
| containers: [cassandra, elasticsearch8] | |
| es_version: v8 | |
| # Cassandra tests need a larger instance | |
| runs_on_override: ubuntu-latest-8-cores | |
| - name: cass_os2 | |
| persistence_type: nosql | |
| persistence_driver: cassandra | |
| containers: [cassandra, opensearch2] | |
| - name: sqlite | |
| persistence_type: sql | |
| persistence_driver: sqlite | |
| containers: [] | |
| - name: mysql8 | |
| persistence_type: sql | |
| persistence_driver: mysql8 | |
| containers: [mysql] | |
| - name: postgres12 | |
| persistence_type: sql | |
| persistence_driver: postgres12 | |
| containers: [postgresql] | |
| - name: postgres12_pgx | |
| persistence_type: sql | |
| persistence_driver: postgres12_pgx | |
| containers: [postgresql] | |
| runs-on: ${{ matrix.runs_on_override || matrix.runs-on }} | |
| env: | |
| TEST_TOTAL_SHARDS: ${{ needs.set-up-single-test.outputs.total_shards }} | |
| TEST_SHARD_INDEX: ${{ matrix.shard_index }} | |
| PERSISTENCE_TYPE: ${{ matrix.persistence_type }} | |
| PERSISTENCE_DRIVER: ${{ matrix.persistence_driver }} | |
| TEST_TIMEOUT: ${{ needs.set-up-single-test.outputs.test_timeout }} | |
| steps: | |
| - uses: ScribeMD/[email protected] | |
| if: ${{ inputs.run_single_functional_test != true || (inputs.run_single_functional_test == true && contains(fromJSON(needs.set-up-single-test.outputs.dbs), env.PERSISTENCE_DRIVER)) }} | |
| with: | |
| key: docker-${{ runner.os }}${{ runner.arch }}-${{ hashFiles(env.DOCKER_COMPOSE_FILE) }} | |
| - uses: actions/checkout@v4 | |
| if: ${{ inputs.run_single_functional_test != true || (inputs.run_single_functional_test == true && contains(fromJSON(needs.set-up-single-test.outputs.dbs), env.PERSISTENCE_DRIVER)) }} | |
| with: | |
| token: ${{ secrets.GITHUB_TOKEN }} | |
| ref: ${{ env.COMMIT }} | |
| - name: Start containerized dependencies | |
| if: ${{ toJson(matrix.containers) != '[]' && (inputs.run_single_functional_test != true || (inputs.run_single_functional_test == true && contains(fromJSON(needs.set-up-single-test.outputs.dbs), env.PERSISTENCE_DRIVER))) }} | |
| uses: hoverkraft-tech/[email protected] | |
| with: | |
| compose-file: ${{ env.DOCKER_COMPOSE_FILE }} | |
| services: "${{ join(matrix.containers, '\n') }}" | |
| down-flags: -v | |
| - uses: actions/setup-go@v5 | |
| if: ${{ inputs.run_single_functional_test != true || (inputs.run_single_functional_test == true && contains(fromJSON(needs.set-up-single-test.outputs.dbs), env.PERSISTENCE_DRIVER)) }} | |
| with: | |
| go-version-file: "go.mod" | |
| cache: false # do our own caching | |
| - name: Restore dependencies | |
| uses: actions/cache/restore@v4 | |
| with: | |
| path: ~/go/pkg/mod | |
| key: go-${{ runner.os }}${{ runner.arch }}-${{ hashFiles('go.mod') }}-deps-${{ hashFiles('go.sum') }} | |
| - name: Restore build outputs | |
| uses: actions/cache/restore@v4 | |
| with: | |
| path: ~/.cache/go-build | |
| key: go-${{ runner.os }}${{ runner.arch }}-build-${{ env.COMMIT }} | |
| - name: Get job ID | |
| if: ${{ inputs.run_single_functional_test != true || (inputs.run_single_functional_test == true && contains(fromJSON(needs.set-up-single-test.outputs.dbs), env.PERSISTENCE_DRIVER)) }} | |
| id: get_job_id | |
| uses: ./.github/actions/get-job-id | |
| with: | |
| job_name: Functional test (${{ matrix.name }}, shard ${{ matrix.shard_index }}) | |
| run_id: ${{ github.run_id }} | |
| - name: Run functional test | |
| if: ${{ inputs.run_single_functional_test != true || (inputs.run_single_functional_test == true && contains(fromJSON(needs.set-up-single-test.outputs.dbs), env.PERSISTENCE_DRIVER)) }} | |
| timeout-minutes: ${{ fromJSON(needs.set-up-single-test.outputs.github_timeout) }} # make sure this is larger than the test timeout in the Makefile | |
| run: make functional-test-coverage | |
| env: | |
| TEST_ARGS: ${{ needs.set-up-single-test.outputs.single_test_args }} | |
| TEMPORAL_TEST_OTEL_OUTPUT: ${{ github.workspace }}/.testoutput | |
| TEMPORAL_OTEL_DEBUG: true | |
| - name: Generate crash report | |
| if: failure() | |
| run: | |
| | # if the tests failed, we would expect one JUnit XML report per attempt; otherwise it must have crashed | |
| [ "$(find .testoutput -maxdepth 1 -name 'junit.*.xml' | wc -l)" -lt "$MAX_TEST_ATTEMPTS" ] && | |
| CRASH_REPORT_NAME="$GITHUB_JOB" make report-test-crash | |
| - name: Generate test summary | |
| uses: mikepenz/[email protected] | |
| if: failure() | |
| with: | |
| report_paths: ./.testoutput/junit.*.xml | |
| detailed_summary: true | |
| check_annotations: false | |
| annotate_only: true | |
| skip_annotations: true | |
| - name: Upload code coverage to Codecov | |
| uses: codecov/codecov-action@v5 | |
| with: | |
| token: ${{ secrets.CODECOV_TOKEN }} | |
| directory: ./.testoutput | |
| flags: functional-test | |
| - name: Upload test results to Codecov | |
| if: ${{ !cancelled() }} | |
| uses: codecov/test-results-action@v1 | |
| with: | |
| token: ${{ secrets.CODECOV_TOKEN }} | |
| directory: ./.testoutput | |
| flags: functional-test | |
| - name: Upload test results to GitHub | |
| # Can't pin to major because the action linter doesn't recognize the include-hidden-files flag. | |
| uses: actions/[email protected] | |
| if: ${{ !cancelled() && !inputs.run_single_functional_test }} | |
| with: | |
| name: junit-xml--${{github.run_id}}--${{ steps.get_job_id.outputs.job_id }}--${{github.run_attempt}}--${{matrix.name}}--${{matrix.shard_index}}--functional-test | |
| path: ./.testoutput/junit.*.xml | |
| include-hidden-files: true | |
| retention-days: 28 | |
| # Ensure this doesn't contribute to the junit output. | |
| - name: Flaky Functional Test Detection | |
| if: ${{ (env.MODIFIED_TEST_SUITES != '') && (inputs.run_single_functional_test != true || (inputs.run_single_functional_test == true && contains(fromJSON(needs.set-up-single-test.outputs.dbs), env.PERSISTENCE_DRIVER))) }} | |
| timeout-minutes: ${{ fromJSON(needs.set-up-single-test.outputs.github_timeout) }} # make sure this is larger than the test timeout in the Makefile | |
| run: | | |
| echo "Detecting flaky functional tests: ${{ needs.set-up-single-test.outputs.modified_integration_test_suites }}" | |
| make functional-test-coverage | |
| env: | |
| FAILED_TEST_RETRIES: "0" # not retrying failed tests intentionally here since we're trying to detect flakes | |
| TEST_ARGS: "-run=${{ needs.set-up-single-test.outputs.modified_functional_test_suites }} -count=5" | |
| # Upload OpenTelemetry traces. | |
| - name: Upload OpenTelemetry traces | |
| if: always() | |
| uses: actions/upload-artifact@v4 | |
| with: | |
| name: traces-json--${{github.run_id}}--${{ steps.get_job_id.outputs.job_id }}--${{matrix.shard_index}}--${{github.run_attempt}}--${{matrix.name}}--functional-test | |
| path: ./.testoutput/traces.*.json | |
| if-no-files-found: ignore | |
| include-hidden-files: true | |
| retention-days: 28 | |
| # XDC matrix job. Include `${{ matrix.name }}` in the display name so each | |
| # matrix variant has a unique name for the job-id lookup. | |
| functional-test-xdc: | |
| if: ${{ inputs.run_single_functional_test != true && inputs.run_single_unit_test != true }} | |
| # Display name shown in the UI. The job-id lookup uses this exact value. | |
| name: Functional test xdc (${{ matrix.name }}) | |
| needs: [pre-build, set-up-single-test] | |
| strategy: | |
| fail-fast: false | |
| matrix: | |
| name: [cass_es, cass_es8, cass_os2, mysql8, postgres12, postgres12_pgx] | |
| include: | |
| - name: cass_es | |
| persistence_type: nosql | |
| persistence_driver: elasticsearch | |
| parallel_flags: "" | |
| containers: [cassandra, elasticsearch] | |
| # Cassandra tests need a larger instance | |
| runs_on_override: ubuntu-latest-8-cores | |
| - name: cass_es8 | |
| persistence_type: nosql | |
| persistence_driver: elasticsearch | |
| parallel_flags: "" | |
| containers: [cassandra, elasticsearch8] | |
| # Cassandra tests need a larger instance | |
| runs_on_override: ubuntu-latest-8-cores | |
| - name: cass_os2 | |
| persistence_type: nosql | |
| persistence_driver: cassandra | |
| containers: [cassandra, opensearch2] | |
| - name: mysql8 | |
| persistence_type: sql | |
| persistence_driver: mysql8 | |
| parallel_flags: "" | |
| containers: [mysql] | |
| - name: postgres12 | |
| persistence_type: sql | |
| persistence_driver: postgres12 | |
| parallel_flags: "-parallel=2" # reduce parallelism for postgres | |
| containers: [postgresql] | |
| - name: postgres12_pgx | |
| persistence_type: sql | |
| persistence_driver: postgres12_pgx | |
| parallel_flags: "-parallel=2" # reduce parallelism for postgres | |
| containers: [postgresql] | |
| runs-on: ${{ matrix.runs_on_override || 'ubuntu-latest' }} | |
| env: | |
| PERSISTENCE_TYPE: ${{ matrix.persistence_type }} | |
| PERSISTENCE_DRIVER: ${{ matrix.persistence_driver }} | |
| TEST_PARALLEL_FLAGS: ${{ matrix.parallel_flags }} | |
| steps: | |
| - uses: actions/checkout@v4 | |
| with: | |
| token: ${{ secrets.GITHUB_TOKEN }} | |
| ref: ${{ env.COMMIT }} | |
| # Resolve the numeric job ID for this job instance. | |
| # IMPORTANT: `job_name` must exactly match the job's display name above. | |
| - name: Get job ID | |
| id: get_job_id | |
| uses: ./.github/actions/get-job-id | |
| with: | |
| job_name: Functional test xdc (${{ matrix.name }}) | |
| run_id: ${{ github.run_id }} | |
| - name: Start containerized dependencies | |
| uses: hoverkraft-tech/[email protected] | |
| with: | |
| compose-file: ${{ env.DOCKER_COMPOSE_FILE }} | |
| services: "${{ join(matrix.containers, '\n') }}" | |
| down-flags: -v | |
| - uses: actions/setup-go@v5 | |
| with: | |
| go-version-file: "go.mod" | |
| cache: false # do our own caching | |
| - name: Restore dependencies | |
| uses: actions/cache/restore@v4 | |
| with: | |
| path: ~/go/pkg/mod | |
| key: go-${{ runner.os }}${{ runner.arch }}-${{ hashFiles('go.mod') }}-deps-${{ hashFiles('go.sum') }} | |
| - name: Restore build outputs | |
| uses: actions/cache/restore@v4 | |
| with: | |
| path: ~/.cache/go-build | |
| key: go-${{ runner.os }}${{ runner.arch }}-build-${{ env.COMMIT }} | |
| - name: Run functional test xdc | |
| timeout-minutes: 35 # update this to TEST_TIMEOUT+5 if you update the Makefile | |
| run: make functional-test-xdc-coverage | |
| env: | |
| TEMPORAL_TEST_OTEL_OUTPUT: ${{ github.workspace }}/.testoutput | |
| TEMPORAL_OTEL_DEBUG: true | |
| - name: Generate crash report | |
| if: failure() | |
| run: | |
| | # if the tests failed, we would expect one JUnit XML report per attempt; otherwise it must have crashed | |
| [ "$(find .testoutput -maxdepth 1 -name 'junit.*.xml' | wc -l)" -lt "$MAX_TEST_ATTEMPTS" ] && | |
| CRASH_REPORT_NAME="$GITHUB_JOB" make report-test-crash | |
| - name: Generate test summary | |
| uses: mikepenz/[email protected] | |
| if: failure() | |
| with: | |
| report_paths: ./.testoutput/junit.*.xml | |
| detailed_summary: true | |
| check_annotations: false | |
| annotate_only: true | |
| skip_annotations: true | |
| - name: Upload code coverage to Codecov | |
| uses: codecov/codecov-action@v5 | |
| with: | |
| token: ${{ secrets.CODECOV_TOKEN }} | |
| directory: ./.testoutput | |
| flags: functional-test-xdc | |
| - name: Upload test results to Codecov | |
| if: ${{ !cancelled() }} | |
| uses: codecov/test-results-action@v1 | |
| with: | |
| token: ${{ secrets.CODECOV_TOKEN }} | |
| directory: ./.testoutput | |
| flags: functional-test-xdc | |
| - name: Upload test results to GitHub | |
| # Can't pin to major because the action linter doesn't recognize the include-hidden-files flag. | |
| uses: actions/[email protected] | |
| if: ${{ !cancelled() }} | |
| with: | |
| name: junit-xml--${{github.run_id}}--${{ steps.get_job_id.outputs.job_id }}--${{github.run_attempt}}--${{matrix.name}}--functional-test-xdc | |
| path: ./.testoutput | |
| include-hidden-files: true | |
| retention-days: 28 | |
| # Ensure this doesn't contribute to the junit output. | |
| - name: Flaky Functional XDC Test Detection | |
| if: ${{ env.MODIFIED_TEST_SUITES != '' }} | |
| timeout-minutes: 60 | |
| run: | | |
| echo "Detecting flaky functional xdc tests: ${{ needs.set-up-single-test.outputs.modified_functional_xdc_test_suites }}" | |
| make functional-test-xdc-coverage | |
| env: | |
| FAILED_TEST_RETRIES: "0" # not retrying failed tests intentionally here since we're trying to detect flakes | |
| TEST_ARGS: "-run=${{ needs.set-up-single-test.outputs.modified_functional_xdc_test_suites }} -count=5" | |
| # Upload OpenTelemetry traces. | |
| - name: Upload OpenTelemetry traces | |
| if: always() | |
| uses: actions/upload-artifact@v4 | |
| with: | |
| name: traces-json--${{github.run_id}}--${{ steps.get_job_id.outputs.job_id }}--${{github.run_attempt}}--${{matrix.name}}--functional-test-xdc | |
| path: ./.testoutput/traces.*.json | |
| if-no-files-found: ignore | |
| retention-days: 28 | |
| # NDC matrix job. Include `${{ matrix.name }}` in the display name so each | |
| # matrix variant has a unique name for the job-id lookup. | |
| functional-test-ndc: | |
| if: ${{ inputs.run_single_functional_test != true && inputs.run_single_unit_test != true }} | |
| # Display name shown in the UI. The job-id lookup uses this exact value. | |
| name: Functional test ndc (${{ matrix.name }}) | |
| needs: [pre-build, set-up-single-test] | |
| strategy: | |
| fail-fast: false | |
| matrix: | |
| name: | |
| - cass_es | |
| - cass_es8 | |
| - cass_os2 | |
| - mysql8 | |
| - postgres12 | |
| - postgres12_pgx | |
| include: | |
| - name: cass_es | |
| persistence_type: nosql | |
| persistence_driver: elasticsearch | |
| containers: [cassandra, elasticsearch] | |
| es_version: v7 | |
| # Cassandra tests need a larger instance | |
| runs_on_override: ubuntu-latest-8-cores | |
| - name: cass_es8 | |
| persistence_type: nosql | |
| persistence_driver: elasticsearch | |
| containers: [cassandra, elasticsearch8] | |
| es_version: v8 | |
| # Cassandra tests need a larger instance | |
| runs_on_override: ubuntu-latest-8-cores | |
| - name: cass_os2 | |
| persistence_type: nosql | |
| persistence_driver: cassandra | |
| containers: [cassandra, opensearch2] | |
| - name: mysql8 | |
| persistence_type: sql | |
| persistence_driver: mysql8 | |
| containers: [mysql] | |
| - name: postgres12 | |
| persistence_type: sql | |
| persistence_driver: postgres12 | |
| containers: [postgresql] | |
| - name: postgres12_pgx | |
| persistence_type: sql | |
| persistence_driver: postgres12_pgx | |
| containers: [postgresql] | |
| runs-on: ${{ matrix.runs_on_override || 'ubuntu-latest' }} | |
| env: | |
| PERSISTENCE_TYPE: ${{ matrix.persistence_type }} | |
| PERSISTENCE_DRIVER: ${{ matrix.persistence_driver }} | |
| ES_VERSION: ${{ matrix.es_version }} | |
| steps: | |
| - uses: actions/checkout@v4 | |
| with: | |
| token: ${{ secrets.GITHUB_TOKEN }} | |
| ref: ${{ env.COMMIT }} | |
| # Resolve the numeric job ID for this job instance. | |
| # IMPORTANT: `job_name` must exactly match the job's display name above. | |
| - name: Get job ID | |
| id: get_job_id | |
| uses: ./.github/actions/get-job-id | |
| with: | |
| job_name: Functional test ndc (${{ matrix.name }}) | |
| run_id: ${{ github.run_id }} | |
| - name: Start containerized dependencies | |
| uses: hoverkraft-tech/[email protected] | |
| with: | |
| compose-file: ${{ env.DOCKER_COMPOSE_FILE }} | |
| services: "${{ join(matrix.containers, '\n') }}" | |
| down-flags: -v | |
| - uses: actions/setup-go@v5 | |
| with: | |
| go-version-file: "go.mod" | |
| cache: false # do our own caching | |
| - name: Restore dependencies | |
| uses: actions/cache/restore@v4 | |
| with: | |
| path: ~/go/pkg/mod | |
| key: go-${{ runner.os }}${{ runner.arch }}-${{ hashFiles('go.mod') }}-deps-${{ hashFiles('go.sum') }} | |
| - name: Restore build outputs | |
| uses: actions/cache/restore@v4 | |
| with: | |
| path: ~/.cache/go-build | |
| key: go-${{ runner.os }}${{ runner.arch }}-build-${{ env.COMMIT }} | |
| - name: Run functional test ndc | |
| timeout-minutes: 15 | |
| run: make functional-test-ndc-coverage | |
| env: | |
| TEMPORAL_TEST_OTEL_OUTPUT: ${{ github.workspace }}/.testoutput | |
| TEMPORAL_OTEL_DEBUG: true | |
| - name: Generate crash report | |
| if: failure() | |
| run: | |
| | # if the tests failed, we would expect one JUnit XML report per attempt; otherwise it must have crashed | |
| [ "$(find .testoutput -maxdepth 1 -name 'junit.*.xml' | wc -l)" -lt "$MAX_TEST_ATTEMPTS" ] && | |
| CRASH_REPORT_NAME="$GITHUB_JOB" make report-test-crash | |
| - name: Upload code coverage to Codecov | |
| uses: codecov/codecov-action@v5 | |
| with: | |
| token: ${{ secrets.CODECOV_TOKEN }} | |
| directory: ./.testoutput | |
| flags: functional-test-ndc | |
| - name: Upload test results to Codecov | |
| if: ${{ !cancelled() }} | |
| uses: codecov/test-results-action@v1 | |
| with: | |
| token: ${{ secrets.CODECOV_TOKEN }} | |
| directory: ./.testoutput | |
| flags: functional-test-ndc | |
| - name: Upload test results to GitHub | |
| # Can't pin to major because the action linter doesn't recognize the include-hidden-files flag. | |
| uses: actions/[email protected] | |
| if: ${{ !cancelled() }} | |
| with: | |
| name: junit-xml--${{github.run_id}}--${{ steps.get_job_id.outputs.job_id }}--${{github.run_attempt}}--${{matrix.name}}--functional-test-ndc | |
| path: ./.testoutput/junit.*.xml | |
| include-hidden-files: true | |
| retention-days: 28 | |
| # Ensure this doesn't contribute to the junit output. | |
| - name: Flaky Functional NDC Test Detection | |
| if: ${{ env.MODIFIED_TEST_SUITES != '' }} | |
| timeout-minutes: 30 | |
| run: | | |
| echo "Detecting flaky functional ndc tests: ${{ needs.set-up-single-test.outputs.modified_functional_ndc_test_suites }}" | |
| make functional-test-ndc-coverage | |
| env: | |
| FAILED_TEST_RETRIES: "0" # not retrying failed tests intentionally here since we're trying to detect flakes | |
| TEST_ARGS: "-run=${{ needs.set-up-single-test.outputs.modified_functional_ndc_test_suites }} -count=5" | |
| # Upload OpenTelemetry traces. | |
| - name: Upload OpenTelemetry traces | |
| if: always() | |
| uses: actions/upload-artifact@v4 | |
| with: | |
| name: traces-json--${{github.run_id}}--${{ steps.get_job_id.outputs.job_id }}--${{github.run_attempt}}--${{matrix.name}}--functional-test-ndc | |
| path: ./.testoutput/traces.*.json | |
| if-no-files-found: ignore | |
| retention-days: 28 | |
| test-status: | |
| if: always() | |
| name: Test Status | |
| needs: | |
| - misc-checks | |
| - unit-test | |
| - integration-test | |
| - functional-test | |
| - functional-test-xdc | |
| - functional-test-ndc | |
| runs-on: ubuntu-latest | |
| env: | |
| RESULTS: ${{ toJSON(needs.*.result) }} | |
| steps: | |
| - name: Check results | |
| run: | | |
| if [[ "${{ inputs.run_single_functional_test }}" != "true" ]]; then | |
| # if in all-tests mode, all statuses must be success | |
| if [[ -n $(echo "$RESULTS" | jq '.[] | select (. != "success")') ]]; then | |
| exit 1 | |
| fi | |
| else | |
| # if in single-test mode, skipped status is ok, failure is failure | |
| if [[ -n $(echo "$RESULTS" | jq '.[] | select (. == "failure")') ]]; then | |
| exit 1 | |
| fi | |
| fi |