|
1 | | -name: Validate Azure OpenAI response |
| 1 | +name: Validate Azure OpenAI response |
2 | 2 |
|
3 | 3 | on: |
4 | | - workflow_dispatch: # run on demand from the Actions tab |
| 4 | + workflow_dispatch: |
5 | 5 |
|
6 | 6 | jobs: |
7 | 7 | run-validation: |
8 | 8 | runs-on: ubuntu-latest |
9 | | - environment: responses # 🔑 unlocks the environment‑scoped secrets |
| 9 | + environment: responses |
10 | 10 |
|
11 | | - # Expose the environment secrets as real process env‑vars |
12 | 11 | env: |
13 | 12 | AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }} |
14 | 13 | AZURE_OPENAI_V1_API_ENDPOINT: ${{ secrets.AZURE_OPENAI_V1_API_ENDPOINT }} |
15 | 14 | AZURE_OPENAI_API_MODEL: ${{ secrets.AZURE_OPENAI_API_MODEL }} |
16 | 15 |
|
17 | 16 | steps: |
18 | | - # 1 – check out the repo so the script is available |
19 | | - - uses: actions/checkout@v4 |
| 17 | + # 1 – check out the repo so the script is available |
| 18 | + - name: Checkout repository |
| 19 | + uses: actions/checkout@v4 |
20 | 20 |
|
21 | | - # 2 – set up Python |
22 | | - - uses: actions/setup-python@v5 |
23 | | - with: |
24 | | - python-version: "3.11" |
| 21 | + # 2 – set up Python |
| 22 | + - name: Setup Python 3.11 |
| 23 | + uses: actions/setup-python@v5 |
| 24 | + with: |
| 25 | + python-version: '3.11' |
25 | 26 |
|
26 | | - # 3 – install the script’s two lightweight deps |
27 | | - - name: Install requirements |
28 | | - run: | |
29 | | - python -m pip install --upgrade pip |
30 | | - pip install openai python-dotenv |
| 27 | + # 3 – install the script’s two lightweight deps |
| 28 | + - name: Install requirements |
| 29 | + run: | |
| 30 | + python -m pip install --upgrade pip |
| 31 | + pip install openai python-dotenv jq |
31 | 32 |
|
32 | | - # 4 – run the script, grade the result, assemble a report |
33 | | - - name: Execute script and capture outcome |
34 | | - id: test |
35 | | - shell: bash |
36 | | - run: | |
37 | | - set +e # we want to handle failures ourselves |
38 | | - TIMESTAMP=$(date -u +"%Y-%m-%dT%H:%M:%SZ") |
| 33 | + # 4 – run the script, grade the result, assemble a report |
| 34 | + - name: Execute script and capture outcome |
| 35 | + id: test |
| 36 | + shell: bash |
| 37 | + run: | |
| 38 | + set +e |
| 39 | + TIMESTAMP=$(date -u +"%Y-%m-%dT%H:%M:%SZ") |
39 | 40 |
|
40 | | - # === run the user script === |
41 | | - python responses-basic-aoai-v1.py > out.txt 2>&1 |
42 | | - EXIT_CODE=$? |
| 41 | + python responses-basic-aoai-v1.py > out.txt 2>&1 |
| 42 | + EXIT_CODE=$? |
43 | 43 |
|
44 | | - # === decide pass / fail === |
45 | | - if [[ $EXIT_CODE -eq 0 && -s out.txt ]]; then |
46 | | - PASS_FAIL="PASS" |
47 | | - else |
48 | | - PASS_FAIL="FAIL" |
49 | | - fi |
| 44 | + if [[ $EXIT_CODE -eq 0 && -s out.txt ]]; then |
| 45 | + PASS_FAIL="PASS" |
| 46 | + else |
| 47 | + PASS_FAIL="FAIL" |
| 48 | + fi |
50 | 49 |
|
51 | | - # === build JSON report === |
52 | | - jq -n \ |
53 | | - --arg date "$TIMESTAMP" \ |
54 | | - --arg output "$(cat out.txt | tr -d '\r')" \ |
55 | | - --arg pass_fail "$PASS_FAIL" \ |
56 | | - --argjson code "$EXIT_CODE" \ |
57 | | - '{test_run_date: $date, |
58 | | - output: $output, |
59 | | - pass_fail: $pass_fail, |
60 | | - error_code: $code}' > aoai-test-result.json |
| 50 | + jq -n \ |
| 51 | + --arg date "$TIMESTAMP" \ |
| 52 | + --arg output "$(cat out.txt | tr -d '\r')" \ |
| 53 | + --arg pass_fail "$PASS_FAIL" \ |
| 54 | + --argjson code "$EXIT_CODE" \ |
| 55 | + '{test_run_date: $date, |
| 56 | + output: $output, |
| 57 | + pass_fail: $pass_fail, |
| 58 | + error_code: $code}' \ |
| 59 | + > aoai-test-result.json |
61 | 60 |
|
62 | | - # 5 – make the report downloadable from the run summary |
63 | | - - name: Upload result artifact |
64 | | - uses: actions/upload-artifact@v4 |
65 | | - with: |
66 | | - name: aoai-response-test # folder name visible in the UI |
67 | | - path: aoai-test-result.json |
| 61 | + # 5 – make the report downloadable from the run summary |
| 62 | + - name: Upload result artifact |
| 63 | + uses: actions/upload-artifact@v4 |
| 64 | + with: |
| 65 | + name: aoai-response-test |
| 66 | + path: aoai-test-result.json |
| 67 | + |
| 68 | + # 6 – parse JSON and inject into README |
| 69 | + - name: Parse results and update README |
| 70 | + shell: bash |
| 71 | + env: |
| 72 | + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} |
| 73 | + run: | |
| 74 | + PASS=$(jq -r .pass_fail aoai-test-result.json) |
| 75 | + CODE=$(jq -r .error_code aoai-test-result.json) |
| 76 | + DATE=$(jq -r .test_run_date aoai-test-result.json) |
| 77 | +
|
| 78 | + read -r -d '' SNIPPET << EOF |
| 79 | +<!-- AOAI-RESULTS-START --> |
| 80 | +## ⚙️ Last Azure OpenAI Test |
| 81 | +- **Date:** $DATE |
| 82 | +- **Result:** $PASS |
| 83 | +- **Exit code:** $CODE |
| 84 | +<!-- AOAI-RESULTS-END --> |
| 85 | +EOF |
| 86 | + |
| 87 | + awk -v new="$SNIPPET" ' |
| 88 | + /<!-- AOAI-RESULTS-START -->/ { print new; skip=1; next } |
| 89 | + /<!-- AOAI-RESULTS-END -->/ { print; skip=0; next } |
| 90 | + skip { next } |
| 91 | + { print } |
| 92 | + ' README.md > README.tmp && mv README.tmp README.md |
| 93 | +
|
| 94 | + # 7 – commit & push the updated README back |
| 95 | + - name: Commit updated README |
| 96 | + uses: stefanzweifel/git-auto-commit-action@v4 |
| 97 | + with: |
| 98 | + commit_message: chore: update README with latest AOAI test results |
| 99 | + file_pattern: README.md |
| 100 | + author_name: GitHub Actions |
| 101 | + |
| 102 | + branch: ${{ github.ref_name }} |
0 commit comments