Smoke Claude #2616
Workflow file for this run
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # ___ _ _ | |
| # / _ \ | | (_) | |
| # | |_| | __ _ ___ _ __ | |_ _ ___ | |
| # | _ |/ _` |/ _ \ '_ \| __| |/ __| | |
| # | | | | (_| | __/ | | | |_| | (__ | |
| # \_| |_/\__, |\___|_| |_|\__|_|\___| | |
| # __/ | | |
| # _ _ |___/ | |
| # | | | | / _| | | |
| # | | | | ___ _ __ _ __| |_| | _____ ____ | |
| # | |/\| |/ _ \ '__| |/ /| _| |/ _ \ \ /\ / / ___| | |
| # \ /\ / (_) | | | | ( | | | | (_) \ V V /\__ \ | |
| # \/ \/ \___/|_| |_|\_\|_| |_|\___/ \_/\_/ |___/ | |
| # | |
| # This file was automatically generated by gh-aw. DO NOT EDIT. | |
| # | |
| # To update this file, edit the corresponding .md file and run: | |
| # gh aw compile | |
| # Not all edits will cause changes to this file. | |
| # | |
| # For more information: https://github.github.com/gh-aw/introduction/overview/ | |
| # | |
| # Smoke test workflow that validates Claude engine functionality by reviewing recent PRs twice daily | |
| # | |
| # Resolved workflow manifest: | |
| # Imports: | |
| # - shared/apm.md | |
| # - shared/gh.md | |
| # - shared/github-mcp-app.md | |
| # - shared/github-queries-mcp-script.md | |
| # - shared/go-make.md | |
| # - shared/mcp-pagination.md | |
| # - shared/mcp/serena-go.md | |
| # - shared/mcp/serena.md | |
| # - shared/mcp/tavily.md | |
| # - shared/reporting.md | |
| # | |
| # inlined-imports: true | |
| # | |
| # gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"8b4570801394464240788e4c538a220f7ae35f8abc29b9def1b603989de7841e","agent_id":"claude"} | |
| name: "Smoke Claude" | |
| "on": | |
| pull_request: | |
| # names: # Label filtering applied via job conditions | |
| # - smoke # Label filtering applied via job conditions | |
| types: | |
| - labeled | |
| schedule: | |
| - cron: "10 */12 * * *" | |
| workflow_dispatch: | |
| inputs: | |
| aw_context: | |
| default: "" | |
| description: Agent caller context (used internally by Agentic Workflows). | |
| required: false | |
| type: string | |
| permissions: {} | |
| concurrency: | |
| group: "gh-aw-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref || github.run_id }}" | |
| cancel-in-progress: true | |
| run-name: "Smoke Claude" | |
| jobs: | |
| activation: | |
| needs: pre_activation | |
| if: > | |
| needs.pre_activation.outputs.activated == 'true' && ((github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id) && | |
| (github.event_name != 'pull_request' || github.event.action != 'labeled' || github.event.label.name == 'smoke')) | |
| runs-on: ubuntu-slim | |
| permissions: | |
| contents: read | |
| discussions: write | |
| issues: write | |
| pull-requests: write | |
| outputs: | |
| body: ${{ steps.sanitized.outputs.body }} | |
| comment_id: ${{ steps.add-comment.outputs.comment-id }} | |
| comment_repo: ${{ steps.add-comment.outputs.comment-repo }} | |
| comment_url: ${{ steps.add-comment.outputs.comment-url }} | |
| lockdown_check_failed: ${{ steps.generate_aw_info.outputs.lockdown_check_failed == 'true' }} | |
| model: ${{ steps.generate_aw_info.outputs.model }} | |
| secret_verification_result: ${{ steps.validate-secret.outputs.verification_result }} | |
| text: ${{ steps.sanitized.outputs.text }} | |
| title: ${{ steps.sanitized.outputs.title }} | |
| steps: | |
| - name: Checkout actions folder | |
| uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 | |
| with: | |
| repository: github/gh-aw | |
| sparse-checkout: | | |
| actions | |
| persist-credentials: false | |
| - name: Setup Scripts | |
| uses: ./actions/setup | |
| with: | |
| destination: ${{ runner.temp }}/gh-aw/actions | |
| - name: Generate agentic run info | |
| id: generate_aw_info | |
| env: | |
| GH_AW_INFO_ENGINE_ID: "claude" | |
| GH_AW_INFO_ENGINE_NAME: "Claude Code" | |
| GH_AW_INFO_MODEL: ${{ vars.GH_AW_MODEL_AGENT_CLAUDE || 'auto' }} | |
| GH_AW_INFO_VERSION: "latest" | |
| GH_AW_INFO_AGENT_VERSION: "latest" | |
| GH_AW_INFO_WORKFLOW_NAME: "Smoke Claude" | |
| GH_AW_INFO_EXPERIMENTAL: "false" | |
| GH_AW_INFO_SUPPORTS_TOOLS_ALLOWLIST: "true" | |
| GH_AW_INFO_STAGED: "false" | |
| GH_AW_INFO_ALLOWED_DOMAINS: '["defaults","github","go","playwright"]' | |
| GH_AW_INFO_FIREWALL_ENABLED: "true" | |
| GH_AW_INFO_AWF_VERSION: "v0.25.10" | |
| GH_AW_INFO_AWMG_VERSION: "" | |
| GH_AW_INFO_FIREWALL_TYPE: "squid" | |
| GH_AW_COMPILED_STRICT: "false" | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| with: | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/generate_aw_info.cjs'); | |
| await main(core, context); | |
| - name: Add heart reaction for immediate feedback | |
| id: react | |
| if: github.event_name == 'issues' || github.event_name == 'issue_comment' || github.event_name == 'pull_request_review_comment' || github.event_name == 'discussion' || github.event_name == 'discussion_comment' || github.event_name == 'pull_request' && github.event.pull_request.head.repo.id == github.repository_id | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| env: | |
| GH_AW_REACTION: "heart" | |
| with: | |
| github-token: ${{ secrets.GITHUB_TOKEN }} | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/add_reaction.cjs'); | |
| await main(); | |
| - name: Validate ANTHROPIC_API_KEY secret | |
| id: validate-secret | |
| run: ${RUNNER_TEMP}/gh-aw/actions/validate_multi_secret.sh ANTHROPIC_API_KEY 'Claude Code' https://github.github.com/gh-aw/reference/engines/#anthropic-claude-code | |
| env: | |
| ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} | |
| - name: Checkout .github and .agents folders | |
| uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 | |
| with: | |
| persist-credentials: false | |
| sparse-checkout: | | |
| .github | |
| .agents | |
| actions/setup | |
| sparse-checkout-cone-mode: true | |
| fetch-depth: 1 | |
| - name: Check workflow file timestamps | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| env: | |
| GH_AW_WORKFLOW_FILE: "smoke-claude.lock.yml" | |
| with: | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/check_workflow_timestamp_api.cjs'); | |
| await main(); | |
| - name: Compute current body text | |
| id: sanitized | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| with: | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/compute_text.cjs'); | |
| await main(); | |
| - name: Add comment with workflow run link | |
| id: add-comment | |
| if: github.event_name == 'issues' || github.event_name == 'issue_comment' || github.event_name == 'pull_request_review_comment' || github.event_name == 'discussion' || github.event_name == 'discussion_comment' || github.event_name == 'pull_request' && github.event.pull_request.head.repo.id == github.repository_id | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| env: | |
| GH_AW_WORKFLOW_NAME: "Smoke Claude" | |
| GH_AW_SAFE_OUTPUT_MESSAGES: "{\"footer\":\"\\u003e 💥 *[THE END] — Illustrated by [{workflow_name}]({run_url})*{history_link}\",\"runStarted\":\"💥 **WHOOSH!** [{workflow_name}]({run_url}) springs into action on this {event_type}! *[Panel 1 begins...]*\",\"runSuccess\":\"🎬 **THE END** — [{workflow_name}]({run_url}) **MISSION: ACCOMPLISHED!** The hero saves the day! ✨\",\"runFailure\":\"💫 **TO BE CONTINUED...** [{workflow_name}]({run_url}) {status}! Our hero faces unexpected challenges...\"}" | |
| with: | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/add_workflow_run_comment.cjs'); | |
| await main(); | |
| - name: Create prompt with built-in context | |
| env: | |
| GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt | |
| GH_AW_SAFE_OUTPUTS: ${{ runner.temp }}/gh-aw/safeoutputs/outputs.jsonl | |
| GH_AW_GITHUB_ACTOR: ${{ github.actor }} | |
| GH_AW_GITHUB_EVENT_COMMENT_ID: ${{ github.event.comment.id }} | |
| GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER: ${{ github.event.discussion.number }} | |
| GH_AW_GITHUB_EVENT_ISSUE_NUMBER: ${{ github.event.issue.number }} | |
| GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER: ${{ github.event.pull_request.number }} | |
| GH_AW_GITHUB_REPOSITORY: ${{ github.repository }} | |
| GH_AW_GITHUB_RUN_ID: ${{ github.run_id }} | |
| GH_AW_GITHUB_SERVER_URL: ${{ github.server_url }} | |
| GH_AW_GITHUB_WORKSPACE: ${{ github.workspace }} | |
| # poutine:ignore untrusted_checkout_exec | |
| run: | | |
| bash ${RUNNER_TEMP}/gh-aw/actions/create_prompt_first.sh | |
| { | |
| cat << 'GH_AW_PROMPT_b727304f9785a29f_EOF' | |
| <system> | |
| GH_AW_PROMPT_b727304f9785a29f_EOF | |
| cat "${RUNNER_TEMP}/gh-aw/prompts/xpia.md" | |
| cat "${RUNNER_TEMP}/gh-aw/prompts/temp_folder_prompt.md" | |
| cat "${RUNNER_TEMP}/gh-aw/prompts/markdown.md" | |
| cat "${RUNNER_TEMP}/gh-aw/prompts/playwright_prompt.md" | |
| cat "${RUNNER_TEMP}/gh-aw/prompts/agentic_workflows_guide.md" | |
| cat "${RUNNER_TEMP}/gh-aw/prompts/cache_memory_prompt.md" | |
| cat "${RUNNER_TEMP}/gh-aw/prompts/safe_outputs_prompt.md" | |
| cat << 'GH_AW_PROMPT_b727304f9785a29f_EOF' | |
| <safe-output-tools> | |
| Tools: add_comment(max:2), create_issue, close_pull_request, update_pull_request, create_pull_request_review_comment(max:5), submit_pull_request_review, resolve_pull_request_review_thread(max:5), add_labels, add_reviewer(max:2), push_to_pull_request_branch, missing_tool, missing_data, noop, post_slack_message | |
| GH_AW_PROMPT_b727304f9785a29f_EOF | |
| cat "${RUNNER_TEMP}/gh-aw/prompts/safe_outputs_push_to_pr_branch.md" | |
| cat << 'GH_AW_PROMPT_b727304f9785a29f_EOF' | |
| </safe-output-tools> | |
| <github-context> | |
| The following GitHub context information is available for this workflow: | |
| {{#if __GH_AW_GITHUB_ACTOR__ }} | |
| - **actor**: __GH_AW_GITHUB_ACTOR__ | |
| {{/if}} | |
| {{#if __GH_AW_GITHUB_REPOSITORY__ }} | |
| - **repository**: __GH_AW_GITHUB_REPOSITORY__ | |
| {{/if}} | |
| {{#if __GH_AW_GITHUB_WORKSPACE__ }} | |
| - **workspace**: __GH_AW_GITHUB_WORKSPACE__ | |
| {{/if}} | |
| {{#if __GH_AW_GITHUB_EVENT_ISSUE_NUMBER__ }} | |
| - **issue-number**: #__GH_AW_GITHUB_EVENT_ISSUE_NUMBER__ | |
| {{/if}} | |
| {{#if __GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER__ }} | |
| - **discussion-number**: #__GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER__ | |
| {{/if}} | |
| {{#if __GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER__ }} | |
| - **pull-request-number**: #__GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER__ | |
| {{/if}} | |
| {{#if __GH_AW_GITHUB_EVENT_COMMENT_ID__ }} | |
| - **comment-id**: __GH_AW_GITHUB_EVENT_COMMENT_ID__ | |
| {{/if}} | |
| {{#if __GH_AW_GITHUB_RUN_ID__ }} | |
| - **workflow-run-id**: __GH_AW_GITHUB_RUN_ID__ | |
| {{/if}} | |
| </github-context> | |
| GH_AW_PROMPT_b727304f9785a29f_EOF | |
| cat "${RUNNER_TEMP}/gh-aw/prompts/github_mcp_tools_with_safeoutputs_prompt.md" | |
| cat << 'GH_AW_PROMPT_b727304f9785a29f_EOF' | |
| </system> | |
| ## Serena Code Analysis | |
| The Serena MCP server is configured for **["go"]** analysis in this workspace: | |
| - **Workspace**: `__GH_AW_GITHUB_WORKSPACE__` | |
| - **Memory**: `/tmp/gh-aw/cache-memory/serena/` | |
| ### Project Activation | |
| Before analyzing code, activate the Serena project: | |
| ``` | |
| Tool: activate_project | |
| Args: { "path": "__GH_AW_GITHUB_WORKSPACE__" } | |
| ``` | |
| ### Available Capabilities | |
| Serena provides IDE-grade Language Server Protocol (LSP) tools including: | |
| - **Symbol search**: `find_symbol` — locate functions, types, interfaces by name | |
| - **Navigation**: `find_referencing_symbols` — find all callers/usages of a symbol | |
| - **Type info**: `get_symbol_documentation` — hover-level type and doc information | |
| - **Code editing**: `replace_symbol_body`, `insert_after_symbol` — symbol-level edits | |
| - **Diagnostics**: `get_diagnostics` — compiler errors and linter warnings | |
| ### Analysis Guidelines | |
| 1. **Use semantic tools over text search** — prefer Serena's LSP tools over `grep` | |
| 2. **Activate project first** — always call `activate_project` before other tools | |
| 3. **Cross-reference findings** — validate with multiple tools for accuracy | |
| 4. **Focus on the relevant language files** — ignore unrelated file types | |
| ## MCP Response Size Limits | |
| MCP tool responses have a **25,000 token limit**. When GitHub API responses exceed this limit, workflows must retry with pagination parameters, wasting turns and tokens. | |
| ### Common Scenarios | |
| **Problem**: Fetching large result sets without pagination | |
| - `list_pull_requests` with many PRs (75,897 tokens in one case) | |
| - `pull_request_read` with large diff/comments (31,675 tokens observed) | |
| - `search_issues`, `search_code` with many results | |
| **Solution**: Use proactive pagination to stay under token limits | |
| ### Pagination Best Practices | |
| #### 1. Use `perPage` Parameter | |
| Limit results per request to prevent oversized responses: | |
| ```bash | |
| # Good: Fetch PRs in small batches | |
| list_pull_requests --perPage 10 | |
| # Good: Get issue with limited comments | |
| issue_read --method get_comments --perPage 20 | |
| # Bad: Default pagination may return too much data | |
| list_pull_requests # May exceed 25k tokens | |
| ``` | |
| #### 2. Common `perPage` Values | |
| - **10-20**: For detailed items (PRs with diffs, issues with comments) | |
| - **50-100**: For simpler list operations (commits, branches, labels) | |
| - **1-5**: For exploratory queries or schema discovery | |
| #### 3. Handle Pagination Loops | |
| When you need all results: | |
| ```bash | |
| # Step 1: Fetch first page | |
| result=$(list_pull_requests --perPage 20 --page 1) | |
| # Step 2: Check if more pages exist | |
| # Most list operations return metadata about total count or next page | |
| # Step 3: Fetch subsequent pages if needed | |
| result=$(list_pull_requests --perPage 20 --page 2) | |
| ``` | |
| ### Tool-Specific Guidance | |
| #### Pull Requests | |
| ```bash | |
| # Fetch recent PRs in small batches | |
| list_pull_requests --state all --perPage 10 --sort updated --direction desc | |
| # Get PR details without full diff/comments | |
| pull_request_read --method get --pullNumber 123 | |
| # Get PR files separately if needed | |
| pull_request_read --method get_files --pullNumber 123 --perPage 30 | |
| ``` | |
| #### Issues | |
| ```bash | |
| # List issues with pagination | |
| list_issues --perPage 20 --page 1 | |
| # Get issue comments in batches | |
| issue_read --method get_comments --issue_number 123 --perPage 20 | |
| ``` | |
| #### Code Search | |
| ```bash | |
| # Search with limited results | |
| search_code --query "function language:go" --perPage 10 | |
| ``` | |
| ### Error Messages to Watch For | |
| If you see these errors, add pagination: | |
| - `MCP tool "list_pull_requests" response (75897 tokens) exceeds maximum allowed tokens (25000)` | |
| - `MCP tool "pull_request_read" response (31675 tokens) exceeds maximum allowed tokens (25000)` | |
| - `Response too large for tool [tool_name]` | |
| ### Performance Tips | |
| 1. **Start small**: Use `perPage: 10` initially, increase if needed | |
| 2. **Fetch incrementally**: Get overview first, then details for specific items | |
| 3. **Avoid wildcards**: Don't fetch all data when you need specific items | |
| 4. **Use filters**: Combine `perPage` with state/label/date filters to reduce results | |
| ### Example Workflow Pattern | |
| ```markdown | |
| # Analyze Recent Pull Requests | |
| 1. Fetch 10 most recent PRs (stay under token limit) | |
| 2. For each PR, get summary without full diff | |
| 3. If detailed analysis needed, fetch files for specific PR separately | |
| 4. Process results incrementally rather than loading everything at once | |
| ``` | |
| This proactive approach eliminates retry loops and reduces token consumption. | |
| **IMPORTANT**: Always use the `mcpscripts-gh` tool for GitHub CLI commands instead of running `gh` directly via bash. The `mcpscripts-gh` tool has proper authentication configured with `GITHUB_TOKEN`, while bash commands do not have GitHub CLI authentication by default. | |
| **Correct**: | |
| ``` | |
| Use the mcpscripts-gh tool with args: "pr list --limit 5" | |
| Use the mcpscripts-gh tool with args: "issue view 123" | |
| ``` | |
| **Incorrect**: | |
| ``` | |
| Use the gh mcp-script tool with args: "pr list --limit 5" ❌ (Wrong tool name - use mcpscripts-gh) | |
| Run: gh pr list --limit 5 ❌ (No authentication in bash) | |
| Execute bash: gh issue view 123 ❌ (No authentication in bash) | |
| ``` | |
| ## Report Structure Guidelines | |
| ### 1. Header Levels | |
| **Use h3 (###) or lower for all headers in your issue report to maintain proper document hierarchy.** | |
| When creating GitHub issues or discussions: | |
| - Use `###` (h3) for main sections (e.g., "### Test Summary") | |
| - Use `####` (h4) for subsections (e.g., "#### Device-Specific Results") | |
| - Never use `##` (h2) or `#` (h1) in reports - these are reserved for titles | |
| ### 2. Progressive Disclosure | |
| **Wrap detailed test results in `<details><summary><b>Section Name</b></summary>` tags to improve readability and reduce scrolling.** | |
| Use collapsible sections for: | |
| - Verbose details (full test logs, raw data) | |
| - Secondary information (minor warnings, extra context) | |
| - Per-item breakdowns when there are many items | |
| Always keep critical information visible (summary, critical issues, key metrics). | |
| ### 3. Report Structure Pattern | |
| 1. **Overview**: 1-2 paragraphs summarizing key findings | |
| 2. **Critical Information**: Show immediately (summary stats, critical issues) | |
| 3. **Details**: Use `<details><summary><b>Section Name</b></summary>` for expanded content | |
| 4. **Context**: Add helpful metadata (workflow run, date, trigger) | |
| ### Design Principles (Airbnb-Inspired) | |
| Reports should: | |
| - **Build trust through clarity**: Most important info immediately visible | |
| - **Exceed expectations**: Add helpful context like trends, comparisons | |
| - **Create delight**: Use progressive disclosure to reduce overwhelm | |
| - **Maintain consistency**: Follow patterns across all reports | |
| ### Example Report Structure | |
| ```markdown | |
| ### Summary | |
| - Key metric 1: value | |
| - Key metric 2: value | |
| - Status: ✅/⚠️/❌ | |
| ### Critical Issues | |
| [Always visible - these are important] | |
| <details> | |
| <summary><b>View Detailed Results</b></summary> | |
| [Comprehensive details, logs, traces] | |
| </details> | |
| <details> | |
| <summary><b>View All Warnings</b></summary> | |
| [Minor issues and potential problems] | |
| </details> | |
| ### Recommendations | |
| [Actionable next steps - keep visible] | |
| ``` | |
| ## Workflow Run References | |
| - Format run IDs as links: `[§12345](https://github.com/owner/repo/actions/runs/12345)` | |
| - Include up to 3 most relevant run URLs at end under `**References:**` | |
| - Do NOT add footer attribution (system adds automatically) | |
| **IMPORTANT**: Always use the `mcpscripts-go` and `mcpscripts-make` tools for Go and Make commands instead of running them directly via bash. These mcp-script tools provide consistent execution and proper logging. | |
| **Correct**: | |
| ``` | |
| Use the mcpscripts-go tool with args: "test ./..." | |
| Use the mcpscripts-make tool with args: "build" | |
| Use the mcpscripts-make tool with args: "lint" | |
| Use the mcpscripts-make tool with args: "test-unit" | |
| ``` | |
| **Incorrect**: | |
| ``` | |
| Use the go mcp-script tool with args: "test ./..." ❌ (Wrong tool name - use mcpscripts-go) | |
| Run: go test ./... ❌ (Use mcpscripts-go instead) | |
| Execute bash: make build ❌ (Use mcpscripts-make instead) | |
| ``` | |
| ## Serena Go Code Analysis | |
| The Serena MCP server is configured for Go code analysis in this workspace: | |
| - **Workspace**: `__GH_AW_GITHUB_WORKSPACE__` | |
| - **Memory**: `/tmp/gh-aw/cache-memory/serena/` | |
| ### Project Activation | |
| Before analyzing code, activate the Serena project: | |
| ``` | |
| Tool: activate_project | |
| Args: { "path": "__GH_AW_GITHUB_WORKSPACE__" } | |
| ``` | |
| ### Analysis Constraints | |
| 1. **Only analyze `.go` files** — Ignore all other file types | |
| 2. **Skip test files** — Never analyze files ending in `_test.go` | |
| 3. **Focus on `pkg/` directory** — Primary analysis area | |
| 4. **Use Serena for semantic analysis** — Leverage LSP capabilities for deeper insights | |
| # Smoke Test: Claude Engine Validation. | |
| **IMPORTANT: Keep all outputs extremely short and concise. Use single-line responses where possible. No verbose explanations.** | |
| ## Test Requirements | |
| 1. **GitHub MCP Testing**: Review the last 2 merged pull requests in __GH_AW_GITHUB_REPOSITORY__ | |
| 2. **MCP Scripts GH CLI Testing**: Use the `mcpscripts-gh` tool to query 2 pull requests from __GH_AW_GITHUB_REPOSITORY__ (use args: "pr list --repo __GH_AW_GITHUB_REPOSITORY__ --limit 2 --json number,title,author") | |
| 3. **Serena MCP Testing**: | |
| - Use the Serena MCP server tool `activate_project` to initialize the workspace at `__GH_AW_GITHUB_WORKSPACE__` and verify it succeeds (do NOT use bash to run go commands - use Serena's MCP tools or the mcpscripts-go/mcpscripts-make tools from the go-make shared workflow) | |
| - After initialization, use the `find_symbol` tool to search for symbols (find which tool to call) and verify that at least 3 symbols are found in the results | |
| 4. **Make Build Testing**: Use the `mcpscripts-make` tool to build the project (use args: "build") and verify it succeeds | |
| 5. **Playwright Testing**: Use the playwright tools to navigate to https://github.com and verify the page title contains "GitHub" (do NOT try to install playwright - use the provided MCP tools) | |
| 6. **Tavily Web Search Testing**: Use the Tavily MCP server to perform a web search for "GitHub Agentic Workflows" and verify that results are returned with at least one item | |
| 7. **File Writing Testing**: Create a test file `/tmp/gh-aw/agent/smoke-test-claude-__GH_AW_GITHUB_RUN_ID__.txt` with content "Smoke test passed for Claude at $(date)" (create the directory if it doesn't exist) | |
| 8. **Bash Tool Testing**: Execute bash commands to verify file creation was successful (use `cat` to read the file back) | |
| 9. **Discussion Interaction Testing**: | |
| - Use the `github-discussion-query` mcp-script tool with params: `limit=1, jq=".[0]"` to get the latest discussion from __GH_AW_GITHUB_REPOSITORY__ | |
| - Extract the discussion number from the result (e.g., if the result is `{"number": 123, "title": "...", ...}`, extract 123) | |
| - Use the `add_comment` tool with `discussion_number: <extracted_number>` to add a fun, comic-book style comment stating that the smoke test agent was here | |
| 10. **Agentic Workflows MCP Testing**: | |
| - Call the `agentic-workflows` MCP tool using the `status` method with workflow name `smoke-claude` to query workflow status | |
| - If the tool returns an error or no results, mark this test as ❌ and note "Tool unavailable or workflow not found" but continue to the Output section | |
| - If the tool succeeds, extract key information from the response: total runs, success/failure counts, last run timestamp | |
| - Write a summary of the results to `/tmp/gh-aw/agent/smoke-claude-status-__GH_AW_GITHUB_RUN_ID__.txt` (create directory if needed) | |
| - Use bash to verify the file was created and display its contents | |
| 11. **Slack Script Safe Output Testing**: Use the `post_slack_message` safe-output tool to post a fictitious Slack message: | |
| - Use `channel: "#smoke-tests"` and `message: "💥 Smoke test __GH_AW_GITHUB_RUN_ID__ passed — Claude engine nominal!"` | |
| - Verify the tool call succeeds | |
| ## PR Review Safe Outputs Testing | |
| **IMPORTANT**: The following tests require an open pull request. First, use the GitHub MCP tool to find an open PR in __GH_AW_GITHUB_REPOSITORY__ (or use the triggering PR if this is a pull_request event). Store the PR number for use in subsequent tests. | |
| 12. **Update PR Testing**: Use the `update_pull_request` tool to update the PR's body by appending a test message: "✨ PR Review Safe Output Test - Run __GH_AW_GITHUB_RUN_ID__" | |
| - Use `pr_number: <pr_number>` to target the open PR | |
| - Use `operation: "append"` and `body: "\n\n---\n✨ PR Review Safe Output Test - Run __GH_AW_GITHUB_RUN_ID__"` | |
| - Verify the tool call succeeds | |
| 13. **PR Review Comment Testing**: Use the `create_pull_request_review_comment` tool to add review comments on the PR | |
| - Find a file in the PR's diff (use GitHub MCP to get PR files) | |
| - Add at least 2 review comments on different lines with constructive feedback | |
| - Use `pr_number: <pr_number>`, `path: "<file_path>"`, `line: <line_number>`, and `body: "<comment_text>"` | |
| - Verify the tool calls succeed | |
| 14. **Submit PR Review Testing**: Use the `submit_pull_request_review` tool to submit a consolidated review | |
| - Use `pr_number: <pr_number>`, `event: "COMMENT"`, and `body: "💥 Automated smoke test review - all systems nominal!"` | |
| - Verify the review is submitted successfully | |
| - Note: This will bundle all review comments from test #13 | |
| 15. **Resolve Review Thread Testing**: | |
| - Use the GitHub MCP tool to list review threads on the PR | |
| - If any threads exist, use the `resolve_pull_request_review_thread` tool to resolve one thread | |
| - Use `thread_id: "<thread_id>"` from an existing thread | |
| - If no threads exist, mark this test as ⚠️ (skipped - no threads to resolve) | |
| 16. **Add Reviewer Testing**: Use the `add_reviewer` tool to add a reviewer to the PR | |
| - Use `pr_number: <pr_number>` and `reviewers: ["copilot"]` (or another valid reviewer) | |
| - Verify the tool call succeeds | |
| - Note: May fail if reviewer is already assigned or doesn't have access | |
| 17. **Push to PR Branch Testing**: | |
| - Create a test file at `/tmp/test-pr-push-__GH_AW_GITHUB_RUN_ID__.txt` with content "Test file for PR push" | |
| - Use git commands to check if we're on the PR branch | |
| - Use the `push_to_pull_request_branch` tool to push this change | |
| - Use `pr_number: <pr_number>` and `commit_message: "test: Add smoke test file"` | |
| - Verify the push succeeds | |
| - Note: This test may be skipped if not on a PR branch or if the PR is from a fork | |
| 18. **Close PR Testing** (CONDITIONAL - only if a test PR exists): | |
| - If you can identify a test/bot PR that can be safely closed, use the `close_pull_request` tool | |
| - Use `pr_number: <test_pr_number>` and `comment: "Closing as part of smoke test - Run __GH_AW_GITHUB_RUN_ID__"` | |
| - If no suitable test PR exists, mark this test as ⚠️ (skipped - no safe PR to close) | |
| - **DO NOT close the triggering PR or any important PRs** | |
| ## Output | |
| **CRITICAL: You MUST create an issue regardless of test results - this is a required safe output.** | |
| 1. **ALWAYS create an issue** with a summary of the smoke test run: | |
| - Title: "Smoke Test: Claude - __GH_AW_GITHUB_RUN_ID__" | |
| - Body should include: | |
| - Test results (✅ for pass, ❌ for fail, ⚠️ for skipped) for each test (including PR review tests #12-18) | |
| - Overall status: PASS (all passed), PARTIAL (some skipped), or FAIL (any failed) | |
| - Run URL: __GH_AW_GITHUB_SERVER_URL__/__GH_AW_GITHUB_REPOSITORY__/actions/runs/__GH_AW_GITHUB_RUN_ID__ | |
| - Timestamp | |
| - Note which PR was used for PR review testing (if applicable) | |
| - If ANY test fails, include error details in the issue body | |
| - This issue MUST be created before any other safe output operations | |
| 2. **Only if this workflow was triggered by a pull_request event**: Use the `add_comment` tool to add a **very brief** comment (max 5-10 lines) to the triggering pull request (omit the `item_number` parameter to auto-target the triggering PR) with: | |
| - Test results for core tests #1-11 (✅ or ❌) | |
| - Test results for PR review tests #12-18 (✅, ❌, or ⚠️) | |
| - Overall status: PASS, PARTIAL, or FAIL | |
| 3. Use the `add_comment` tool with `item_number` set to the discussion number you extracted in step 9 to add a **fun comic-book style comment** to that discussion - be playful and use comic-book language like "💥 WHOOSH!" | |
| - If step 9 failed to extract a discussion number, skip this step | |
| If all non-skipped tests pass, use the `add_labels` tool to add the label `smoke-claude` to the pull request (omit the `item_number` parameter to auto-target the triggering PR if this workflow was triggered by a pull_request event). | |
| **Important**: If no action is needed after completing your analysis, you **MUST** call the `noop` safe-output tool with a brief explanation. Failing to call any safe-output tool is the most common cause of safe-output workflow failures. | |
| ```json | |
| {"noop": {"message": "No action needed: [brief explanation of what was analyzed and why]"}} | |
| ``` | |
| GH_AW_PROMPT_b727304f9785a29f_EOF | |
| } > "$GH_AW_PROMPT" | |
| - name: Interpolate variables and render templates | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| env: | |
| GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt | |
| GH_AW_GITHUB_WORKSPACE: ${{ github.workspace }} | |
| GH_AW_GITHUB_REPOSITORY: ${{ github.repository }} | |
| GH_AW_GITHUB_RUN_ID: ${{ github.run_id }} | |
| GH_AW_GITHUB_SERVER_URL: ${{ github.server_url }} | |
| with: | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/interpolate_prompt.cjs'); | |
| await main(); | |
| - name: Substitute placeholders | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| env: | |
| GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt | |
| GH_AW_ALLOWED_EXTENSIONS: '' | |
| GH_AW_CACHE_DESCRIPTION: '' | |
| GH_AW_CACHE_DIR: '/tmp/gh-aw/cache-memory/' | |
| GH_AW_GITHUB_ACTOR: ${{ github.actor }} | |
| GH_AW_GITHUB_EVENT_COMMENT_ID: ${{ github.event.comment.id }} | |
| GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER: ${{ github.event.discussion.number }} | |
| GH_AW_GITHUB_EVENT_ISSUE_NUMBER: ${{ github.event.issue.number }} | |
| GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER: ${{ github.event.pull_request.number }} | |
| GH_AW_GITHUB_REPOSITORY: ${{ github.repository }} | |
| GH_AW_GITHUB_RUN_ID: ${{ github.run_id }} | |
| GH_AW_GITHUB_SERVER_URL: ${{ github.server_url }} | |
| GH_AW_GITHUB_WORKSPACE: ${{ github.workspace }} | |
| GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ACTIVATED: ${{ needs.pre_activation.outputs.activated }} | |
| with: | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const substitutePlaceholders = require('${{ runner.temp }}/gh-aw/actions/substitute_placeholders.cjs'); | |
| // Call the substitution function | |
| return await substitutePlaceholders({ | |
| file: process.env.GH_AW_PROMPT, | |
| substitutions: { | |
| GH_AW_ALLOWED_EXTENSIONS: process.env.GH_AW_ALLOWED_EXTENSIONS, | |
| GH_AW_CACHE_DESCRIPTION: process.env.GH_AW_CACHE_DESCRIPTION, | |
| GH_AW_CACHE_DIR: process.env.GH_AW_CACHE_DIR, | |
| GH_AW_GITHUB_ACTOR: process.env.GH_AW_GITHUB_ACTOR, | |
| GH_AW_GITHUB_EVENT_COMMENT_ID: process.env.GH_AW_GITHUB_EVENT_COMMENT_ID, | |
| GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER: process.env.GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER, | |
| GH_AW_GITHUB_EVENT_ISSUE_NUMBER: process.env.GH_AW_GITHUB_EVENT_ISSUE_NUMBER, | |
| GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER: process.env.GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER, | |
| GH_AW_GITHUB_REPOSITORY: process.env.GH_AW_GITHUB_REPOSITORY, | |
| GH_AW_GITHUB_RUN_ID: process.env.GH_AW_GITHUB_RUN_ID, | |
| GH_AW_GITHUB_SERVER_URL: process.env.GH_AW_GITHUB_SERVER_URL, | |
| GH_AW_GITHUB_WORKSPACE: process.env.GH_AW_GITHUB_WORKSPACE, | |
| GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ACTIVATED: process.env.GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ACTIVATED | |
| } | |
| }); | |
| - name: Validate prompt placeholders | |
| env: | |
| GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt | |
| # poutine:ignore untrusted_checkout_exec | |
| run: bash ${RUNNER_TEMP}/gh-aw/actions/validate_prompt_placeholders.sh | |
| - name: Print prompt | |
| env: | |
| GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt | |
| # poutine:ignore untrusted_checkout_exec | |
| run: bash ${RUNNER_TEMP}/gh-aw/actions/print_prompt_summary.sh | |
| - name: Upload activation artifact | |
| if: success() | |
| uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7 | |
| with: | |
| name: activation | |
| path: | | |
| /tmp/gh-aw/aw_info.json | |
| /tmp/gh-aw/aw-prompts/prompt.txt | |
| retention-days: 1 | |
| agent: | |
| needs: | |
| - activation | |
| - apm | |
| runs-on: ubuntu-latest | |
| permissions: | |
| actions: read | |
| contents: read | |
| discussions: read | |
| issues: read | |
| pull-requests: read | |
| env: | |
| DEFAULT_BRANCH: ${{ github.event.repository.default_branch }} | |
| GH_AW_ASSETS_ALLOWED_EXTS: "" | |
| GH_AW_ASSETS_BRANCH: "" | |
| GH_AW_ASSETS_MAX_SIZE_KB: 0 | |
| GH_AW_MCP_LOG_DIR: /tmp/gh-aw/mcp-logs/safeoutputs | |
| GH_AW_WORKFLOW_ID_SANITIZED: smokeclaude | |
| outputs: | |
| checkout_pr_success: ${{ steps.checkout-pr.outputs.checkout_pr_success || 'true' }} | |
| has_patch: ${{ steps.collect_output.outputs.has_patch }} | |
| model: ${{ needs.activation.outputs.model }} | |
| output: ${{ steps.collect_output.outputs.output }} | |
| output_types: ${{ steps.collect_output.outputs.output_types }} | |
| steps: | |
| - name: Checkout actions folder | |
| uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 | |
| with: | |
| repository: github/gh-aw | |
| sparse-checkout: | | |
| actions | |
| persist-credentials: false | |
| - name: Setup Scripts | |
| uses: ./actions/setup | |
| with: | |
| destination: ${{ runner.temp }}/gh-aw/actions | |
| - name: Set runtime paths | |
| id: set-runtime-paths | |
| run: | | |
| echo "GH_AW_SAFE_OUTPUTS=${RUNNER_TEMP}/gh-aw/safeoutputs/outputs.jsonl" >> "$GITHUB_OUTPUT" | |
| echo "GH_AW_SAFE_OUTPUTS_CONFIG_PATH=${RUNNER_TEMP}/gh-aw/safeoutputs/config.json" >> "$GITHUB_OUTPUT" | |
| echo "GH_AW_SAFE_OUTPUTS_TOOLS_PATH=${RUNNER_TEMP}/gh-aw/safeoutputs/tools.json" >> "$GITHUB_OUTPUT" | |
| - name: Checkout repository | |
| uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 | |
| with: | |
| persist-credentials: false | |
| - name: Setup Go for CLI build | |
| uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0 | |
| with: | |
| go-version-file: go.mod | |
| cache: true | |
| - name: Build gh-aw CLI | |
| run: | | |
| echo "Building gh-aw CLI for linux/amd64..." | |
| mkdir -p dist | |
| VERSION=$(git describe --tags --always --dirty) | |
| CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build \ | |
| -ldflags "-s -w -X main.version=${VERSION}" \ | |
| -o dist/gh-aw-linux-amd64 \ | |
| ./cmd/gh-aw | |
| # Copy binary to root for direct execution in user-defined steps | |
| cp dist/gh-aw-linux-amd64 ./gh-aw | |
| chmod +x ./gh-aw | |
| echo "✓ Built gh-aw CLI successfully" | |
| - name: Setup Docker Buildx | |
| uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4 | |
| - name: Build gh-aw Docker image | |
| uses: docker/build-push-action@d08e5c354a6adb9ed34480a06d141179aa583294 # v7 | |
| with: | |
| context: . | |
| platforms: linux/amd64 | |
| push: false | |
| load: true | |
| tags: localhost/gh-aw:dev | |
| build-args: | | |
| BINARY=dist/gh-aw-linux-amd64 | |
| - name: Setup Go | |
| uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0 | |
| with: | |
| go-version: '1.25' | |
| cache: false | |
| - name: Capture GOROOT for AWF chroot mode | |
| run: echo "GOROOT=$(go env GOROOT)" >> "$GITHUB_ENV" | |
| - name: Create gh-aw temp directory | |
| run: bash ${RUNNER_TEMP}/gh-aw/actions/create_gh_aw_tmp_dir.sh | |
| - name: Configure gh CLI for GitHub Enterprise | |
| run: bash ${RUNNER_TEMP}/gh-aw/actions/configure_gh_for_ghe.sh | |
| env: | |
| GH_TOKEN: ${{ github.token }} | |
| - name: Download APM bundle artifact | |
| uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 | |
| with: | |
| name: ${{ needs.activation.outputs.artifact_prefix }}apm | |
| path: /tmp/gh-aw/apm-bundle | |
| - id: apm_bundle | |
| name: Find APM bundle path | |
| run: echo "path=$(ls /tmp/gh-aw/apm-bundle/*.tar.gz | head -1)" >> "$GITHUB_OUTPUT" | |
| - name: Restore APM packages | |
| uses: microsoft/apm-action@a190b0b1a91031057144dc136acf9757a59c9e4d # v1.4.1 | |
| with: | |
| bundle: ${{ steps.apm_bundle.outputs.path }} | |
| # Cache memory file share configuration from frontmatter processed below | |
| - name: Create cache-memory directory | |
| run: bash ${RUNNER_TEMP}/gh-aw/actions/create_cache_memory_dir.sh | |
| - name: Restore cache-memory file share data | |
| uses: actions/cache/restore@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 | |
| with: | |
| key: memory-none-nopolicy-${{ env.GH_AW_WORKFLOW_ID_SANITIZED }}-${{ github.run_id }} | |
| path: /tmp/gh-aw/cache-memory | |
| restore-keys: | | |
| memory-none-nopolicy-${{ env.GH_AW_WORKFLOW_ID_SANITIZED }}- | |
| - name: Setup cache-memory git repository | |
| env: | |
| GH_AW_CACHE_DIR: /tmp/gh-aw/cache-memory | |
| GH_AW_MIN_INTEGRITY: none | |
| run: bash ${RUNNER_TEMP}/gh-aw/actions/setup_cache_memory_git.sh | |
| - name: Configure Git credentials | |
| env: | |
| REPO_NAME: ${{ github.repository }} | |
| SERVER_URL: ${{ github.server_url }} | |
| run: | | |
| git config --global user.email "github-actions[bot]@users.noreply.github.com" | |
| git config --global user.name "github-actions[bot]" | |
| git config --global am.keepcr true | |
| # Re-authenticate git with GitHub token | |
| SERVER_URL_STRIPPED="${SERVER_URL#https://}" | |
| git remote set-url origin "https://x-access-token:${{ github.token }}@${SERVER_URL_STRIPPED}/${REPO_NAME}.git" | |
| echo "Git configured with standard GitHub Actions identity" | |
| - name: Checkout PR branch | |
| id: checkout-pr | |
| if: | | |
| github.event.pull_request || github.event.issue.pull_request | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| env: | |
| GH_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} | |
| with: | |
| github-token: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/checkout_pr_branch.cjs'); | |
| await main(); | |
| - name: Setup Node.js | |
| uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6.3.0 | |
| with: | |
| node-version: '24' | |
| package-manager-cache: false | |
| - name: Install AWF binary | |
| run: bash ${RUNNER_TEMP}/gh-aw/actions/install_awf_binary.sh v0.25.10 | |
| - name: Install Claude Code CLI | |
| run: npm install -g @anthropic-ai/claude-code@latest | |
| - name: Determine automatic lockdown mode for GitHub MCP Server | |
| id: determine-automatic-lockdown | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| env: | |
| GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN }} | |
| GH_AW_GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN }} | |
| with: | |
| script: | | |
| const determineAutomaticLockdown = require('${{ runner.temp }}/gh-aw/actions/determine_automatic_lockdown.cjs'); | |
| await determineAutomaticLockdown(github, context, core); | |
| - name: Download container images | |
| run: bash ${RUNNER_TEMP}/gh-aw/actions/download_docker_images.sh ghcr.io/github/gh-aw-firewall/agent:0.25.10 ghcr.io/github/gh-aw-firewall/api-proxy:0.25.10 ghcr.io/github/gh-aw-firewall/squid:0.25.10 ghcr.io/github/gh-aw-mcpg:v0.2.11 ghcr.io/github/github-mcp-server:v0.32.0 ghcr.io/github/serena-mcp-server:latest mcr.microsoft.com/playwright/mcp node:lts-alpine | |
| - name: Install gh-aw extension | |
| env: | |
| GH_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} | |
| run: | | |
| # Check if gh-aw extension is already installed | |
| if gh extension list | grep -q "github/gh-aw"; then | |
| echo "gh-aw extension already installed, upgrading..." | |
| gh extension upgrade gh-aw || true | |
| else | |
| echo "Installing gh-aw extension..." | |
| gh extension install github/gh-aw | |
| fi | |
| gh aw --version | |
| # Copy the gh-aw binary to ${RUNNER_TEMP}/gh-aw for MCP server containerization | |
| mkdir -p ${RUNNER_TEMP}/gh-aw | |
| GH_AW_BIN=$(which gh-aw 2>/dev/null || find ~/.local/share/gh/extensions/gh-aw -name 'gh-aw' -type f 2>/dev/null | head -1) | |
| if [ -n "$GH_AW_BIN" ] && [ -f "$GH_AW_BIN" ]; then | |
| cp "$GH_AW_BIN" ${RUNNER_TEMP}/gh-aw/gh-aw | |
| chmod +x ${RUNNER_TEMP}/gh-aw/gh-aw | |
| echo "Copied gh-aw binary to ${RUNNER_TEMP}/gh-aw/gh-aw" | |
| else | |
| echo "::error::Failed to find gh-aw binary for MCP server" | |
| exit 1 | |
| fi | |
| - name: Write Safe Outputs Config | |
| run: | | |
| mkdir -p ${RUNNER_TEMP}/gh-aw/safeoutputs | |
| mkdir -p /tmp/gh-aw/safeoutputs | |
| mkdir -p /tmp/gh-aw/mcp-logs/safeoutputs | |
| cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_982f8532560c93f1_EOF' | |
| {"add_comment":{"hide_older_comments":true,"max":2},"add_labels":{"allowed":["smoke-claude"]},"add_reviewer":{"max":2,"target":"*"},"close_pull_request":{"max":1,"staged":true},"create_issue":{"close_older_issues":true,"close_older_key":"smoke-claude","expires":2,"group":true,"labels":["automation","testing"],"max":1},"create_pull_request_review_comment":{"max":5,"side":"RIGHT","target":"*"},"missing_data":{},"missing_tool":{},"noop":{"max":1,"report-as-issue":"true"},"post_slack_message":{"description":"Post a message to a fictitious Slack channel (smoke test only — no real Slack integration)","inputs":{"channel":{"default":"#general","description":"Slack channel name to post to","required":false,"type":"string"},"message":{"description":"Message text to post","required":false,"type":"string"}}},"push_to_pull_request_branch":{"allowed_files":[".github/smoke-claude-push-test.md"],"if_no_changes":"warn","max_patch_size":1024,"protected_files":["package.json","bun.lockb","bunfig.toml","deno.json","deno.jsonc","deno.lock","global.json","NuGet.Config","Directory.Packages.props","mix.exs","mix.lock","go.mod","go.sum","stack.yaml","stack.yaml.lock","pom.xml","build.gradle","build.gradle.kts","settings.gradle","settings.gradle.kts","gradle.properties","package-lock.json","yarn.lock","pnpm-lock.yaml","npm-shrinkwrap.json","requirements.txt","Pipfile","Pipfile.lock","pyproject.toml","setup.py","setup.cfg","Gemfile","Gemfile.lock","uv.lock","CODEOWNERS"],"protected_path_prefixes":[".github/",".agents/"],"staged":true,"target":"*"},"resolve_pull_request_review_thread":{"max":5},"submit_pull_request_review":{"footer":"always","max":1},"update_pull_request":{"allow_body":true,"allow_title":true,"max":1,"target":"*"}} | |
| GH_AW_SAFE_OUTPUTS_CONFIG_982f8532560c93f1_EOF | |
| - name: Write Safe Outputs Tools | |
| run: | | |
| cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_5968965ac36dd63e_EOF' | |
| { | |
| "description_suffixes": { | |
| "add_comment": " CONSTRAINTS: Maximum 2 comment(s) can be added.", | |
| "add_labels": " CONSTRAINTS: Only these labels are allowed: [\"smoke-claude\"].", | |
| "add_reviewer": " CONSTRAINTS: Maximum 2 reviewer(s) can be added.", | |
| "close_pull_request": " CONSTRAINTS: Maximum 1 pull request(s) can be closed.", | |
| "create_issue": " CONSTRAINTS: Maximum 1 issue(s) can be created. Labels [\"automation\" \"testing\"] will be automatically added.", | |
| "create_pull_request_review_comment": " CONSTRAINTS: Maximum 5 review comment(s) can be created. Comments will be on the RIGHT side of the diff.", | |
| "resolve_pull_request_review_thread": " CONSTRAINTS: Maximum 5 review thread(s) can be resolved.", | |
| "submit_pull_request_review": " CONSTRAINTS: Maximum 1 review(s) can be submitted.", | |
| "update_pull_request": " CONSTRAINTS: Maximum 1 pull request(s) can be updated. Target: *." | |
| }, | |
| "repo_params": {}, | |
| "dynamic_tools": [ | |
| { | |
| "description": "Post a message to a fictitious Slack channel (smoke test only — no real Slack integration)", | |
| "inputSchema": { | |
| "additionalProperties": false, | |
| "properties": { | |
| "channel": { | |
| "default": "#general", | |
| "description": "Slack channel name to post to", | |
| "type": "string" | |
| }, | |
| "message": { | |
| "default": "", | |
| "description": "Message text to post", | |
| "type": "string" | |
| } | |
| }, | |
| "type": "object" | |
| }, | |
| "name": "post_slack_message" | |
| } | |
| ] | |
| } | |
| GH_AW_SAFE_OUTPUTS_TOOLS_META_5968965ac36dd63e_EOF | |
| cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_26f66c1d235bd7aa_EOF' | |
| { | |
| "add_comment": { | |
| "defaultMax": 1, | |
| "fields": { | |
| "body": { | |
| "required": true, | |
| "type": "string", | |
| "sanitize": true, | |
| "maxLength": 65000 | |
| }, | |
| "item_number": { | |
| "issueOrPRNumber": true | |
| }, | |
| "repo": { | |
| "type": "string", | |
| "maxLength": 256 | |
| } | |
| } | |
| }, | |
| "add_labels": { | |
| "defaultMax": 5, | |
| "fields": { | |
| "item_number": { | |
| "issueNumberOrTemporaryId": true | |
| }, | |
| "labels": { | |
| "required": true, | |
| "type": "array", | |
| "itemType": "string", | |
| "itemSanitize": true, | |
| "itemMaxLength": 128 | |
| }, | |
| "repo": { | |
| "type": "string", | |
| "maxLength": 256 | |
| } | |
| } | |
| }, | |
| "add_reviewer": { | |
| "defaultMax": 3, | |
| "fields": { | |
| "pull_request_number": { | |
| "issueOrPRNumber": true | |
| }, | |
| "repo": { | |
| "type": "string", | |
| "maxLength": 256 | |
| }, | |
| "reviewers": { | |
| "required": true, | |
| "type": "array", | |
| "itemType": "string", | |
| "itemSanitize": true, | |
| "itemMaxLength": 39 | |
| } | |
| } | |
| }, | |
| "close_pull_request": { | |
| "defaultMax": 1, | |
| "fields": { | |
| "body": { | |
| "required": true, | |
| "type": "string", | |
| "sanitize": true, | |
| "maxLength": 65000 | |
| }, | |
| "pull_request_number": { | |
| "optionalPositiveInteger": true | |
| }, | |
| "repo": { | |
| "type": "string", | |
| "maxLength": 256 | |
| } | |
| } | |
| }, | |
| "create_issue": { | |
| "defaultMax": 1, | |
| "fields": { | |
| "body": { | |
| "required": true, | |
| "type": "string", | |
| "sanitize": true, | |
| "maxLength": 65000 | |
| }, | |
| "labels": { | |
| "type": "array", | |
| "itemType": "string", | |
| "itemSanitize": true, | |
| "itemMaxLength": 128 | |
| }, | |
| "parent": { | |
| "issueOrPRNumber": true | |
| }, | |
| "repo": { | |
| "type": "string", | |
| "maxLength": 256 | |
| }, | |
| "temporary_id": { | |
| "type": "string" | |
| }, | |
| "title": { | |
| "required": true, | |
| "type": "string", | |
| "sanitize": true, | |
| "maxLength": 128 | |
| } | |
| } | |
| }, | |
| "create_pull_request_review_comment": { | |
| "defaultMax": 1, | |
| "fields": { | |
| "body": { | |
| "required": true, | |
| "type": "string", | |
| "sanitize": true, | |
| "maxLength": 65000 | |
| }, | |
| "line": { | |
| "required": true, | |
| "positiveInteger": true | |
| }, | |
| "path": { | |
| "required": true, | |
| "type": "string" | |
| }, | |
| "pull_request_number": { | |
| "optionalPositiveInteger": true | |
| }, | |
| "repo": { | |
| "type": "string", | |
| "maxLength": 256 | |
| }, | |
| "side": { | |
| "type": "string", | |
| "enum": [ | |
| "LEFT", | |
| "RIGHT" | |
| ] | |
| }, | |
| "start_line": { | |
| "optionalPositiveInteger": true | |
| } | |
| }, | |
| "customValidation": "startLineLessOrEqualLine" | |
| }, | |
| "missing_data": { | |
| "defaultMax": 20, | |
| "fields": { | |
| "alternatives": { | |
| "type": "string", | |
| "sanitize": true, | |
| "maxLength": 256 | |
| }, | |
| "context": { | |
| "type": "string", | |
| "sanitize": true, | |
| "maxLength": 256 | |
| }, | |
| "data_type": { | |
| "type": "string", | |
| "sanitize": true, | |
| "maxLength": 128 | |
| }, | |
| "reason": { | |
| "type": "string", | |
| "sanitize": true, | |
| "maxLength": 256 | |
| } | |
| } | |
| }, | |
| "missing_tool": { | |
| "defaultMax": 20, | |
| "fields": { | |
| "alternatives": { | |
| "type": "string", | |
| "sanitize": true, | |
| "maxLength": 512 | |
| }, | |
| "reason": { | |
| "required": true, | |
| "type": "string", | |
| "sanitize": true, | |
| "maxLength": 256 | |
| }, | |
| "tool": { | |
| "type": "string", | |
| "sanitize": true, | |
| "maxLength": 128 | |
| } | |
| } | |
| }, | |
| "noop": { | |
| "defaultMax": 1, | |
| "fields": { | |
| "message": { | |
| "required": true, | |
| "type": "string", | |
| "sanitize": true, | |
| "maxLength": 65000 | |
| } | |
| } | |
| }, | |
| "push_to_pull_request_branch": { | |
| "defaultMax": 1, | |
| "fields": { | |
| "branch": { | |
| "required": true, | |
| "type": "string", | |
| "sanitize": true, | |
| "maxLength": 256 | |
| }, | |
| "message": { | |
| "required": true, | |
| "type": "string", | |
| "sanitize": true, | |
| "maxLength": 65000 | |
| }, | |
| "pull_request_number": { | |
| "issueOrPRNumber": true | |
| } | |
| } | |
| }, | |
| "resolve_pull_request_review_thread": { | |
| "defaultMax": 10, | |
| "fields": { | |
| "thread_id": { | |
| "required": true, | |
| "type": "string" | |
| } | |
| } | |
| }, | |
| "submit_pull_request_review": { | |
| "defaultMax": 1, | |
| "fields": { | |
| "body": { | |
| "type": "string", | |
| "sanitize": true, | |
| "maxLength": 65000 | |
| }, | |
| "event": { | |
| "type": "string", | |
| "enum": [ | |
| "APPROVE", | |
| "REQUEST_CHANGES", | |
| "COMMENT" | |
| ] | |
| } | |
| } | |
| }, | |
| "update_pull_request": { | |
| "defaultMax": 1, | |
| "fields": { | |
| "body": { | |
| "type": "string", | |
| "sanitize": true, | |
| "maxLength": 65000 | |
| }, | |
| "draft": { | |
| "type": "boolean" | |
| }, | |
| "operation": { | |
| "type": "string", | |
| "enum": [ | |
| "replace", | |
| "append", | |
| "prepend" | |
| ] | |
| }, | |
| "pull_request_number": { | |
| "issueOrPRNumber": true | |
| }, | |
| "repo": { | |
| "type": "string", | |
| "maxLength": 256 | |
| }, | |
| "title": { | |
| "type": "string", | |
| "sanitize": true, | |
| "maxLength": 256 | |
| } | |
| }, | |
| "customValidation": "requiresOneOf:title,body" | |
| } | |
| } | |
| GH_AW_SAFE_OUTPUTS_VALIDATION_26f66c1d235bd7aa_EOF | |
| node ${RUNNER_TEMP}/gh-aw/actions/generate_safe_outputs_tools.cjs | |
| - name: Generate Safe Outputs MCP Server Config | |
| id: safe-outputs-config | |
| run: | | |
| # Generate a secure random API key (360 bits of entropy, 40+ chars) | |
| # Mask immediately to prevent timing vulnerabilities | |
| API_KEY=$(openssl rand -base64 45 | tr -d '/+=') | |
| echo "::add-mask::${API_KEY}" | |
| PORT=3001 | |
| # Set outputs for next steps | |
| { | |
| echo "safe_outputs_api_key=${API_KEY}" | |
| echo "safe_outputs_port=${PORT}" | |
| } >> "$GITHUB_OUTPUT" | |
| echo "Safe Outputs MCP server will run on port ${PORT}" | |
| - name: Start Safe Outputs MCP HTTP Server | |
| id: safe-outputs-start | |
| env: | |
| DEBUG: '*' | |
| GH_AW_SAFE_OUTPUTS_PORT: ${{ steps.safe-outputs-config.outputs.safe_outputs_port }} | |
| GH_AW_SAFE_OUTPUTS_API_KEY: ${{ steps.safe-outputs-config.outputs.safe_outputs_api_key }} | |
| GH_AW_SAFE_OUTPUTS_TOOLS_PATH: ${{ runner.temp }}/gh-aw/safeoutputs/tools.json | |
| GH_AW_SAFE_OUTPUTS_CONFIG_PATH: ${{ runner.temp }}/gh-aw/safeoutputs/config.json | |
| GH_AW_MCP_LOG_DIR: /tmp/gh-aw/mcp-logs/safeoutputs | |
| run: | | |
| # Environment variables are set above to prevent template injection | |
| export DEBUG | |
| export GH_AW_SAFE_OUTPUTS_PORT | |
| export GH_AW_SAFE_OUTPUTS_API_KEY | |
| export GH_AW_SAFE_OUTPUTS_TOOLS_PATH | |
| export GH_AW_SAFE_OUTPUTS_CONFIG_PATH | |
| export GH_AW_MCP_LOG_DIR | |
| bash ${RUNNER_TEMP}/gh-aw/actions/start_safe_outputs_server.sh | |
| - name: Setup MCP Scripts Config | |
| run: | | |
| mkdir -p ${RUNNER_TEMP}/gh-aw/mcp-scripts/logs | |
| cat > ${RUNNER_TEMP}/gh-aw/mcp-scripts/tools.json << 'GH_AW_MCP_SCRIPTS_TOOLS_1c659e76e3e02313_EOF' | |
| { | |
| "serverName": "mcpscripts", | |
| "version": "1.0.0", | |
| "logDir": "${RUNNER_TEMP}/gh-aw/mcp-scripts/logs", | |
| "tools": [ | |
| { | |
| "name": "gh", | |
| "description": "Execute any gh CLI command. This tool is accessible as 'mcpscripts-gh'. Provide the full command after 'gh' (e.g., args: 'pr list --limit 5'). The tool will run: gh \u003cargs\u003e. Use single quotes ' for complex args to avoid shell interpretation issues.", | |
| "inputSchema": { | |
| "properties": { | |
| "args": { | |
| "description": "Arguments to pass to gh CLI (without the 'gh' prefix). Examples: 'pr list --limit 5', 'issue view 123', 'api repos/{owner}/{repo}'", | |
| "type": "string" | |
| } | |
| }, | |
| "required": [ | |
| "args" | |
| ], | |
| "type": "object" | |
| }, | |
| "handler": "gh.sh", | |
| "env": { | |
| "GH_AW_GH_TOKEN": "GH_AW_GH_TOKEN", | |
| "GH_DEBUG": "GH_DEBUG" | |
| }, | |
| "timeout": 60 | |
| }, | |
| { | |
| "name": "github-discussion-query", | |
| "description": "Query GitHub discussions with jq filtering support. Without --jq, returns schema and data size info. Use --jq '.' to get all data, or specific jq expressions to filter.", | |
| "inputSchema": { | |
| "properties": { | |
| "jq": { | |
| "description": "jq filter expression to apply to output. If not provided, returns schema info instead of full data.", | |
| "type": "string" | |
| }, | |
| "limit": { | |
| "description": "Maximum number of discussions to fetch (default: 30)", | |
| "type": "number" | |
| }, | |
| "repo": { | |
| "description": "Repository in owner/repo format (defaults to current repository)", | |
| "type": "string" | |
| } | |
| }, | |
| "type": "object" | |
| }, | |
| "handler": "github-discussion-query.sh", | |
| "env": { | |
| "GH_TOKEN": "GH_TOKEN" | |
| }, | |
| "timeout": 60 | |
| }, | |
| { | |
| "name": "github-issue-query", | |
| "description": "Query GitHub issues with jq filtering support. Without --jq, returns schema and data size info. Use --jq '.' to get all data, or specific jq expressions to filter.", | |
| "inputSchema": { | |
| "properties": { | |
| "jq": { | |
| "description": "jq filter expression to apply to output. If not provided, returns schema info instead of full data.", | |
| "type": "string" | |
| }, | |
| "limit": { | |
| "description": "Maximum number of issues to fetch (default: 30)", | |
| "type": "number" | |
| }, | |
| "repo": { | |
| "description": "Repository in owner/repo format (defaults to current repository)", | |
| "type": "string" | |
| }, | |
| "state": { | |
| "description": "Issue state: open, closed, all (default: open)", | |
| "type": "string" | |
| } | |
| }, | |
| "type": "object" | |
| }, | |
| "handler": "github-issue-query.sh", | |
| "env": { | |
| "GH_TOKEN": "GH_TOKEN" | |
| }, | |
| "timeout": 60 | |
| }, | |
| { | |
| "name": "github-pr-query", | |
| "description": "Query GitHub pull requests with jq filtering support. Without --jq, returns schema and data size info. Use --jq '.' to get all data, or specific jq expressions to filter.", | |
| "inputSchema": { | |
| "properties": { | |
| "jq": { | |
| "description": "jq filter expression to apply to output. If not provided, returns schema info instead of full data.", | |
| "type": "string" | |
| }, | |
| "limit": { | |
| "description": "Maximum number of PRs to fetch (default: 30)", | |
| "type": "number" | |
| }, | |
| "repo": { | |
| "description": "Repository in owner/repo format (defaults to current repository)", | |
| "type": "string" | |
| }, | |
| "state": { | |
| "description": "PR state: open, closed, merged, all (default: open)", | |
| "type": "string" | |
| } | |
| }, | |
| "type": "object" | |
| }, | |
| "handler": "github-pr-query.sh", | |
| "env": { | |
| "GH_TOKEN": "GH_TOKEN" | |
| }, | |
| "timeout": 60 | |
| }, | |
| { | |
| "name": "go", | |
| "description": "Execute any Go command. This tool is accessible as 'mcpscripts-go'. Provide the full command after 'go' (e.g., args: 'test ./...'). The tool will run: go \u003cargs\u003e. Use single quotes ' for complex args to avoid shell interpretation issues.", | |
| "inputSchema": { | |
| "properties": { | |
| "args": { | |
| "description": "Arguments to pass to go CLI (without the 'go' prefix). Examples: 'test ./...', 'build ./cmd/gh-aw', 'mod tidy', 'fmt ./...', 'vet ./...'", | |
| "type": "string" | |
| } | |
| }, | |
| "required": [ | |
| "args" | |
| ], | |
| "type": "object" | |
| }, | |
| "handler": "go.sh", | |
| "timeout": 60 | |
| }, | |
| { | |
| "name": "make", | |
| "description": "Execute any Make target. This tool is accessible as 'mcpscripts-make'. Provide the target name(s) (e.g., args: 'build'). The tool will run: make \u003cargs\u003e. Use single quotes ' for complex args to avoid shell interpretation issues.", | |
| "inputSchema": { | |
| "properties": { | |
| "args": { | |
| "description": "Arguments to pass to make (target names and options). Examples: 'build', 'test-unit', 'lint', 'recompile', 'agent-finish', 'fmt build test-unit'", | |
| "type": "string" | |
| } | |
| }, | |
| "required": [ | |
| "args" | |
| ], | |
| "type": "object" | |
| }, | |
| "handler": "make.sh", | |
| "timeout": 60 | |
| } | |
| ] | |
| } | |
| GH_AW_MCP_SCRIPTS_TOOLS_1c659e76e3e02313_EOF | |
| cat > ${RUNNER_TEMP}/gh-aw/mcp-scripts/mcp-server.cjs << 'GH_AW_MCP_SCRIPTS_SERVER_eb49b7785c8b0fc8_EOF' | |
| const path = require("path"); | |
| const { startHttpServer } = require("./mcp_scripts_mcp_server_http.cjs"); | |
| const configPath = path.join(__dirname, "tools.json"); | |
| const port = parseInt(process.env.GH_AW_MCP_SCRIPTS_PORT || "3000", 10); | |
| const apiKey = process.env.GH_AW_MCP_SCRIPTS_API_KEY || ""; | |
| startHttpServer(configPath, { | |
| port: port, | |
| stateless: true, | |
| logDir: "${RUNNER_TEMP}/gh-aw/mcp-scripts/logs" | |
| }).catch(error => { | |
| console.error("Failed to start mcp-scripts HTTP server:", error); | |
| process.exit(1); | |
| }); | |
| GH_AW_MCP_SCRIPTS_SERVER_eb49b7785c8b0fc8_EOF | |
| chmod +x ${RUNNER_TEMP}/gh-aw/mcp-scripts/mcp-server.cjs | |
| - name: Setup MCP Scripts Tool Files | |
| run: | | |
| cat > ${RUNNER_TEMP}/gh-aw/mcp-scripts/gh.sh << 'GH_AW_MCP_SCRIPTS_SH_GH_765ed1799a8562a4_EOF' | |
| #!/bin/bash | |
| # Auto-generated mcp-script tool: gh | |
| # Execute any gh CLI command. This tool is accessible as 'mcpscripts-gh'. Provide the full command after 'gh' (e.g., args: 'pr list --limit 5'). The tool will run: gh <args>. Use single quotes ' for complex args to avoid shell interpretation issues. | |
| set -euo pipefail | |
| echo "gh $INPUT_ARGS" | |
| echo " token: ${GH_AW_GH_TOKEN:0:6}..." | |
| GH_TOKEN="$GH_AW_GH_TOKEN" gh $INPUT_ARGS | |
| GH_AW_MCP_SCRIPTS_SH_GH_765ed1799a8562a4_EOF | |
| chmod +x ${RUNNER_TEMP}/gh-aw/mcp-scripts/gh.sh | |
| cat > ${RUNNER_TEMP}/gh-aw/mcp-scripts/github-discussion-query.sh << 'GH_AW_MCP_SCRIPTS_SH_GITHUB-DISCUSSION-QUERY_ed0e1045bd79da4c_EOF' | |
| #!/bin/bash | |
| # Auto-generated mcp-script tool: github-discussion-query | |
| # Query GitHub discussions with jq filtering support. Without --jq, returns schema and data size info. Use --jq '.' to get all data, or specific jq expressions to filter. | |
| set -euo pipefail | |
| set -e | |
| # Default values | |
| REPO="${INPUT_REPO:-}" | |
| LIMIT="${INPUT_LIMIT:-30}" | |
| JQ_FILTER="${INPUT_JQ:-}" | |
| # Parse repository owner and name | |
| if [[ -n "$REPO" ]]; then | |
| OWNER=$(echo "$REPO" | cut -d'/' -f1) | |
| NAME=$(echo "$REPO" | cut -d'/' -f2) | |
| else | |
| # Get current repository from GitHub context | |
| OWNER="${GITHUB_REPOSITORY_OWNER:-}" | |
| NAME=$(echo "${GITHUB_REPOSITORY:-}" | cut -d'/' -f2) | |
| fi | |
| # Validate owner and name | |
| if [[ -z "$OWNER" || -z "$NAME" ]]; then | |
| echo "Error: Could not determine repository owner and name" >&2 | |
| exit 1 | |
| fi | |
| # Build GraphQL query for discussions | |
| GRAPHQL_QUERY=$(cat <<QUERY | |
| { | |
| repository(owner: "$OWNER", name: "$NAME") { | |
| discussions(first: $LIMIT, orderBy: {field: CREATED_AT, direction: DESC}) { | |
| nodes { | |
| number | |
| title | |
| author { | |
| login | |
| } | |
| createdAt | |
| updatedAt | |
| body | |
| category { | |
| name | |
| } | |
| labels(first: 10) { | |
| nodes { | |
| name | |
| } | |
| } | |
| comments { | |
| totalCount | |
| } | |
| answer { | |
| id | |
| } | |
| url | |
| } | |
| } | |
| } | |
| } | |
| QUERY | |
| ) | |
| # Execute GraphQL query via gh api | |
| GRAPHQL_OUTPUT=$(gh api graphql -f query="$GRAPHQL_QUERY") | |
| # Transform GraphQL output to match gh discussion list format | |
| OUTPUT=$(echo "$GRAPHQL_OUTPUT" | jq '[.data.repository.discussions.nodes[] | { | |
| number: .number, | |
| title: .title, | |
| author: .author, | |
| createdAt: .createdAt, | |
| updatedAt: .updatedAt, | |
| body: .body, | |
| category: .category, | |
| labels: .labels.nodes, | |
| comments: .comments, | |
| answer: .answer, | |
| url: .url | |
| }]') | |
| # Apply jq filter if specified | |
| if [[ -n "$JQ_FILTER" ]]; then | |
| jq "$JQ_FILTER" <<< "$OUTPUT" | |
| else | |
| # Return schema and size instead of full data | |
| ITEM_COUNT=$(jq 'length' <<< "$OUTPUT") | |
| DATA_SIZE=${#OUTPUT} | |
| # Validate values are numeric | |
| if ! [[ "$ITEM_COUNT" =~ ^[0-9]+$ ]]; then | |
| ITEM_COUNT=0 | |
| fi | |
| if ! [[ "$DATA_SIZE" =~ ^[0-9]+$ ]]; then | |
| DATA_SIZE=0 | |
| fi | |
| cat << EOF | |
| { | |
| "message": "No --jq filter provided. Use --jq to filter and retrieve data.", | |
| "item_count": $ITEM_COUNT, | |
| "data_size_bytes": $DATA_SIZE, | |
| "schema": { | |
| "type": "array", | |
| "description": "Array of discussion objects", | |
| "item_fields": { | |
| "number": "integer - Discussion number", | |
| "title": "string - Discussion title", | |
| "author": "object - Author info with login field", | |
| "createdAt": "string - ISO timestamp of creation", | |
| "updatedAt": "string - ISO timestamp of last update", | |
| "body": "string - Discussion body content", | |
| "category": "object - Category info with name field", | |
| "labels": "array - Array of label objects with name field", | |
| "comments": "object - Comments info with totalCount field", | |
| "answer": "object|null - Accepted answer if exists", | |
| "url": "string - Discussion URL" | |
| } | |
| }, | |
| "suggested_queries": [ | |
| {"description": "Get all data", "query": "."}, | |
| {"description": "Get discussion numbers and titles", "query": ".[] | {number, title}"}, | |
| {"description": "Get discussions by author", "query": ".[] | select(.author.login == \"USERNAME\")"}, | |
| {"description": "Get discussions in category", "query": ".[] | select(.category.name == \"Ideas\")"}, | |
| {"description": "Get answered discussions", "query": ".[] | select(.answer != null)"}, | |
| {"description": "Get unanswered discussions", "query": ".[] | select(.answer == null) | {number, title, category: .category.name}"}, | |
| {"description": "Count by category", "query": "group_by(.category.name) | map({category: .[0].category.name, count: length})"} | |
| ] | |
| } | |
| EOF | |
| fi | |
| GH_AW_MCP_SCRIPTS_SH_GITHUB-DISCUSSION-QUERY_ed0e1045bd79da4c_EOF | |
| chmod +x ${RUNNER_TEMP}/gh-aw/mcp-scripts/github-discussion-query.sh | |
| cat > ${RUNNER_TEMP}/gh-aw/mcp-scripts/github-issue-query.sh << 'GH_AW_MCP_SCRIPTS_SH_GITHUB-ISSUE-QUERY_98e0363d863bd74f_EOF' | |
| #!/bin/bash | |
| # Auto-generated mcp-script tool: github-issue-query | |
| # Query GitHub issues with jq filtering support. Without --jq, returns schema and data size info. Use --jq '.' to get all data, or specific jq expressions to filter. | |
| set -euo pipefail | |
| set -e | |
| # Default values | |
| REPO="${INPUT_REPO:-}" | |
| STATE="${INPUT_STATE:-open}" | |
| LIMIT="${INPUT_LIMIT:-30}" | |
| JQ_FILTER="${INPUT_JQ:-}" | |
| # JSON fields to fetch | |
| JSON_FIELDS="number,title,state,author,createdAt,updatedAt,closedAt,body,labels,assignees,comments,milestone,url" | |
| # Build and execute gh command | |
| if [[ -n "$REPO" ]]; then | |
| OUTPUT=$(gh issue list --state "$STATE" --limit "$LIMIT" --json "$JSON_FIELDS" --repo "$REPO") | |
| else | |
| OUTPUT=$(gh issue list --state "$STATE" --limit "$LIMIT" --json "$JSON_FIELDS") | |
| fi | |
| # Apply jq filter if specified | |
| if [[ -n "$JQ_FILTER" ]]; then | |
| jq "$JQ_FILTER" <<< "$OUTPUT" | |
| else | |
| # Return schema and size instead of full data | |
| ITEM_COUNT=$(jq 'length' <<< "$OUTPUT") | |
| DATA_SIZE=${#OUTPUT} | |
| # Validate values are numeric | |
| if ! [[ "$ITEM_COUNT" =~ ^[0-9]+$ ]]; then | |
| ITEM_COUNT=0 | |
| fi | |
| if ! [[ "$DATA_SIZE" =~ ^[0-9]+$ ]]; then | |
| DATA_SIZE=0 | |
| fi | |
| cat << EOF | |
| { | |
| "message": "No --jq filter provided. Use --jq to filter and retrieve data.", | |
| "item_count": $ITEM_COUNT, | |
| "data_size_bytes": $DATA_SIZE, | |
| "schema": { | |
| "type": "array", | |
| "description": "Array of issue objects", | |
| "item_fields": { | |
| "number": "integer - Issue number", | |
| "title": "string - Issue title", | |
| "state": "string - Issue state (OPEN, CLOSED)", | |
| "author": "object - Author info with login field", | |
| "createdAt": "string - ISO timestamp of creation", | |
| "updatedAt": "string - ISO timestamp of last update", | |
| "closedAt": "string|null - ISO timestamp of close", | |
| "body": "string - Issue body content", | |
| "labels": "array - Array of label objects with name field", | |
| "assignees": "array - Array of assignee objects with login field", | |
| "comments": "object - Comments info with totalCount field", | |
| "milestone": "object|null - Milestone info with title field", | |
| "url": "string - Issue URL" | |
| } | |
| }, | |
| "suggested_queries": [ | |
| {"description": "Get all data", "query": "."}, | |
| {"description": "Get issue numbers and titles", "query": ".[] | {number, title}"}, | |
| {"description": "Get open issues only", "query": ".[] | select(.state == \"OPEN\")"}, | |
| {"description": "Get issues by author", "query": ".[] | select(.author.login == \"USERNAME\")"}, | |
| {"description": "Get issues with label", "query": ".[] | select(.labels | map(.name) | index(\"bug\"))"}, | |
| {"description": "Get issues with many comments", "query": ".[] | select(.comments.totalCount > 5) | {number, title, comments: .comments.totalCount}"}, | |
| {"description": "Count by state", "query": "group_by(.state) | map({state: .[0].state, count: length})"} | |
| ] | |
| } | |
| EOF | |
| fi | |
| GH_AW_MCP_SCRIPTS_SH_GITHUB-ISSUE-QUERY_98e0363d863bd74f_EOF | |
| chmod +x ${RUNNER_TEMP}/gh-aw/mcp-scripts/github-issue-query.sh | |
| cat > ${RUNNER_TEMP}/gh-aw/mcp-scripts/github-pr-query.sh << 'GH_AW_MCP_SCRIPTS_SH_GITHUB-PR-QUERY_7e48ca8e8ddc5d29_EOF' | |
| #!/bin/bash | |
| # Auto-generated mcp-script tool: github-pr-query | |
| # Query GitHub pull requests with jq filtering support. Without --jq, returns schema and data size info. Use --jq '.' to get all data, or specific jq expressions to filter. | |
| set -euo pipefail | |
| set -e | |
| # Default values | |
| REPO="${INPUT_REPO:-}" | |
| STATE="${INPUT_STATE:-open}" | |
| LIMIT="${INPUT_LIMIT:-30}" | |
| JQ_FILTER="${INPUT_JQ:-}" | |
| # JSON fields to fetch | |
| JSON_FIELDS="number,title,state,author,createdAt,updatedAt,mergedAt,closedAt,headRefName,baseRefName,isDraft,reviewDecision,additions,deletions,changedFiles,labels,assignees,reviewRequests,url" | |
| # Build and execute gh command | |
| if [[ -n "$REPO" ]]; then | |
| OUTPUT=$(gh pr list --state "$STATE" --limit "$LIMIT" --json "$JSON_FIELDS" --repo "$REPO") | |
| else | |
| OUTPUT=$(gh pr list --state "$STATE" --limit "$LIMIT" --json "$JSON_FIELDS") | |
| fi | |
| # Apply jq filter if specified | |
| if [[ -n "$JQ_FILTER" ]]; then | |
| jq "$JQ_FILTER" <<< "$OUTPUT" | |
| else | |
| # Return schema and size instead of full data | |
| ITEM_COUNT=$(jq 'length' <<< "$OUTPUT") | |
| DATA_SIZE=${#OUTPUT} | |
| # Validate values are numeric | |
| if ! [[ "$ITEM_COUNT" =~ ^[0-9]+$ ]]; then | |
| ITEM_COUNT=0 | |
| fi | |
| if ! [[ "$DATA_SIZE" =~ ^[0-9]+$ ]]; then | |
| DATA_SIZE=0 | |
| fi | |
| cat << EOF | |
| { | |
| "message": "No --jq filter provided. Use --jq to filter and retrieve data.", | |
| "item_count": $ITEM_COUNT, | |
| "data_size_bytes": $DATA_SIZE, | |
| "schema": { | |
| "type": "array", | |
| "description": "Array of pull request objects", | |
| "item_fields": { | |
| "number": "integer - PR number", | |
| "title": "string - PR title", | |
| "state": "string - PR state (OPEN, CLOSED, MERGED)", | |
| "author": "object - Author info with login field", | |
| "createdAt": "string - ISO timestamp of creation", | |
| "updatedAt": "string - ISO timestamp of last update", | |
| "mergedAt": "string|null - ISO timestamp of merge", | |
| "closedAt": "string|null - ISO timestamp of close", | |
| "headRefName": "string - Source branch name", | |
| "baseRefName": "string - Target branch name", | |
| "isDraft": "boolean - Whether PR is a draft", | |
| "reviewDecision": "string|null - Review decision (APPROVED, CHANGES_REQUESTED, REVIEW_REQUIRED)", | |
| "additions": "integer - Lines added", | |
| "deletions": "integer - Lines deleted", | |
| "changedFiles": "integer - Number of files changed", | |
| "labels": "array - Array of label objects with name field", | |
| "assignees": "array - Array of assignee objects with login field", | |
| "reviewRequests": "array - Array of review request objects", | |
| "url": "string - PR URL" | |
| } | |
| }, | |
| "suggested_queries": [ | |
| {"description": "Get all data", "query": "."}, | |
| {"description": "Get PR numbers and titles", "query": ".[] | {number, title}"}, | |
| {"description": "Get open PRs only", "query": ".[] | select(.state == \"OPEN\")"}, | |
| {"description": "Get merged PRs", "query": ".[] | select(.mergedAt != null)"}, | |
| {"description": "Get PRs by author", "query": ".[] | select(.author.login == \"USERNAME\")"}, | |
| {"description": "Get large PRs", "query": ".[] | select(.changedFiles > 10) | {number, title, changedFiles}"}, | |
| {"description": "Count by state", "query": "group_by(.state) | map({state: .[0].state, count: length})"} | |
| ] | |
| } | |
| EOF | |
| fi | |
| GH_AW_MCP_SCRIPTS_SH_GITHUB-PR-QUERY_7e48ca8e8ddc5d29_EOF | |
| chmod +x ${RUNNER_TEMP}/gh-aw/mcp-scripts/github-pr-query.sh | |
| cat > ${RUNNER_TEMP}/gh-aw/mcp-scripts/go.sh << 'GH_AW_MCP_SCRIPTS_SH_GO_c5c33f025b10604b_EOF' | |
| #!/bin/bash | |
| # Auto-generated mcp-script tool: go | |
| # Execute any Go command. This tool is accessible as 'mcpscripts-go'. Provide the full command after 'go' (e.g., args: 'test ./...'). The tool will run: go <args>. Use single quotes ' for complex args to avoid shell interpretation issues. | |
| set -euo pipefail | |
| echo "go $INPUT_ARGS" | |
| go $INPUT_ARGS | |
| GH_AW_MCP_SCRIPTS_SH_GO_c5c33f025b10604b_EOF | |
| chmod +x ${RUNNER_TEMP}/gh-aw/mcp-scripts/go.sh | |
| cat > ${RUNNER_TEMP}/gh-aw/mcp-scripts/make.sh << 'GH_AW_MCP_SCRIPTS_SH_MAKE_e1b91b440c074c66_EOF' | |
| #!/bin/bash | |
| # Auto-generated mcp-script tool: make | |
| # Execute any Make target. This tool is accessible as 'mcpscripts-make'. Provide the target name(s) (e.g., args: 'build'). The tool will run: make <args>. Use single quotes ' for complex args to avoid shell interpretation issues. | |
| set -euo pipefail | |
| echo "make $INPUT_ARGS" | |
| make $INPUT_ARGS | |
| GH_AW_MCP_SCRIPTS_SH_MAKE_e1b91b440c074c66_EOF | |
| chmod +x ${RUNNER_TEMP}/gh-aw/mcp-scripts/make.sh | |
| - name: Generate MCP Scripts Server Config | |
| id: mcp-scripts-config | |
| run: | | |
| # Generate a secure random API key (360 bits of entropy, 40+ chars) | |
| # Mask immediately to prevent timing vulnerabilities | |
| API_KEY=$(openssl rand -base64 45 | tr -d '/+=') | |
| echo "::add-mask::${API_KEY}" | |
| PORT=3000 | |
| # Set outputs for next steps | |
| { | |
| echo "mcp_scripts_api_key=${API_KEY}" | |
| echo "mcp_scripts_port=${PORT}" | |
| } >> "$GITHUB_OUTPUT" | |
| echo "MCP Scripts server will run on port ${PORT}" | |
| - name: Start MCP Scripts HTTP Server | |
| id: mcp-scripts-start | |
| env: | |
| DEBUG: '*' | |
| GH_AW_MCP_SCRIPTS_PORT: ${{ steps.mcp-scripts-config.outputs.mcp_scripts_port }} | |
| GH_AW_MCP_SCRIPTS_API_KEY: ${{ steps.mcp-scripts-config.outputs.mcp_scripts_api_key }} | |
| GH_AW_GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
| GH_DEBUG: 1 | |
| GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
| run: | | |
| # Environment variables are set above to prevent template injection | |
| export DEBUG | |
| export GH_AW_MCP_SCRIPTS_PORT | |
| export GH_AW_MCP_SCRIPTS_API_KEY | |
| bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_scripts_server.sh | |
| - name: Start MCP Gateway | |
| id: start-mcp-gateway | |
| env: | |
| GH_AW_GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
| GH_AW_MCP_SCRIPTS_API_KEY: ${{ steps.mcp-scripts-start.outputs.api_key }} | |
| GH_AW_MCP_SCRIPTS_PORT: ${{ steps.mcp-scripts-start.outputs.port }} | |
| GH_AW_SAFE_OUTPUTS: ${{ steps.set-runtime-paths.outputs.GH_AW_SAFE_OUTPUTS }} | |
| GH_AW_SAFE_OUTPUTS_API_KEY: ${{ steps.safe-outputs-start.outputs.api_key }} | |
| GH_AW_SAFE_OUTPUTS_PORT: ${{ steps.safe-outputs-start.outputs.port }} | |
| GH_DEBUG: 1 | |
| GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
| GITHUB_MCP_GUARD_MIN_INTEGRITY: ${{ steps.determine-automatic-lockdown.outputs.min_integrity }} | |
| GITHUB_MCP_GUARD_REPOS: ${{ steps.determine-automatic-lockdown.outputs.repos }} | |
| GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} | |
| GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
| TAVILY_API_KEY: ${{ secrets.TAVILY_API_KEY }} | |
| run: | | |
| set -eo pipefail | |
| mkdir -p /tmp/gh-aw/mcp-config | |
| mkdir -p /tmp/gh-aw/mcp-logs/playwright | |
| # Export gateway environment variables for MCP config and gateway script | |
| export MCP_GATEWAY_PORT="80" | |
| export MCP_GATEWAY_DOMAIN="host.docker.internal" | |
| MCP_GATEWAY_API_KEY=$(openssl rand -base64 45 | tr -d '/+=') | |
| echo "::add-mask::${MCP_GATEWAY_API_KEY}" | |
| export MCP_GATEWAY_API_KEY | |
| export MCP_GATEWAY_PAYLOAD_DIR="/tmp/gh-aw/mcp-payloads" | |
| mkdir -p "${MCP_GATEWAY_PAYLOAD_DIR}" | |
| export MCP_GATEWAY_PAYLOAD_SIZE_THRESHOLD="524288" | |
| export DEBUG="*" | |
| export GH_AW_ENGINE="claude" | |
| export MCP_GATEWAY_DOCKER_COMMAND='docker run -i --rm --network host -v /var/run/docker.sock:/var/run/docker.sock -e MCP_GATEWAY_PORT -e MCP_GATEWAY_DOMAIN -e MCP_GATEWAY_API_KEY -e MCP_GATEWAY_PAYLOAD_DIR -e MCP_GATEWAY_PAYLOAD_SIZE_THRESHOLD -e DEBUG -e MCP_GATEWAY_LOG_DIR -e GH_AW_MCP_LOG_DIR -e GH_AW_SAFE_OUTPUTS -e GH_AW_SAFE_OUTPUTS_CONFIG_PATH -e GH_AW_SAFE_OUTPUTS_TOOLS_PATH -e GH_AW_ASSETS_BRANCH -e GH_AW_ASSETS_MAX_SIZE_KB -e GH_AW_ASSETS_ALLOWED_EXTS -e DEFAULT_BRANCH -e GITHUB_MCP_SERVER_TOKEN -e GITHUB_MCP_GUARD_MIN_INTEGRITY -e GITHUB_MCP_GUARD_REPOS -e GITHUB_REPOSITORY -e GITHUB_SERVER_URL -e GITHUB_SHA -e GITHUB_WORKSPACE -e GITHUB_TOKEN -e GITHUB_RUN_ID -e GITHUB_RUN_NUMBER -e GITHUB_RUN_ATTEMPT -e GITHUB_JOB -e GITHUB_ACTION -e GITHUB_EVENT_NAME -e GITHUB_EVENT_PATH -e GITHUB_ACTOR -e GITHUB_ACTOR_ID -e GITHUB_TRIGGERING_ACTOR -e GITHUB_WORKFLOW -e GITHUB_WORKFLOW_REF -e GITHUB_WORKFLOW_SHA -e GITHUB_REF -e GITHUB_REF_NAME -e GITHUB_REF_TYPE -e GITHUB_HEAD_REF -e GITHUB_BASE_REF -e GH_AW_MCP_SCRIPTS_PORT -e GH_AW_MCP_SCRIPTS_API_KEY -e GH_AW_SAFE_OUTPUTS_PORT -e GH_AW_SAFE_OUTPUTS_API_KEY -e GH_AW_GH_TOKEN -e GH_DEBUG -e GH_TOKEN -e TAVILY_API_KEY -v /tmp/gh-aw/mcp-payloads:/tmp/gh-aw/mcp-payloads:rw -v /opt:/opt:ro -v /tmp:/tmp:rw -v '"${GITHUB_WORKSPACE}"':'"${GITHUB_WORKSPACE}"':rw ghcr.io/github/gh-aw-mcpg:v0.2.11' | |
| cat << GH_AW_MCP_CONFIG_5b332f9ee71a220d_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh | |
| { | |
| "mcpServers": { | |
| "agenticworkflows": { | |
| "container": "localhost/gh-aw:dev", | |
| "mounts": ["\${GITHUB_WORKSPACE}:\${GITHUB_WORKSPACE}:rw", "/tmp/gh-aw:/tmp/gh-aw:rw"], | |
| "args": ["--network", "host", "-w", "\${GITHUB_WORKSPACE}"], | |
| "env": { | |
| "DEBUG": "*", | |
| "GITHUB_TOKEN": "$GITHUB_TOKEN", | |
| "GITHUB_ACTOR": "$GITHUB_ACTOR", | |
| "GITHUB_REPOSITORY": "$GITHUB_REPOSITORY" | |
| }, | |
| "guard-policies": { | |
| "write-sink": { | |
| "accept": [ | |
| "*" | |
| ] | |
| } | |
| } | |
| }, | |
| "github": { | |
| "container": "ghcr.io/github/github-mcp-server:v0.32.0", | |
| "env": { | |
| "GITHUB_HOST": "$GITHUB_SERVER_URL", | |
| "GITHUB_PERSONAL_ACCESS_TOKEN": "$GITHUB_MCP_SERVER_TOKEN", | |
| "GITHUB_READ_ONLY": "1", | |
| "GITHUB_TOOLSETS": "repos,pull_requests" | |
| }, | |
| "guard-policies": { | |
| "allow-only": { | |
| "min-integrity": "$GITHUB_MCP_GUARD_MIN_INTEGRITY", | |
| "repos": "$GITHUB_MCP_GUARD_REPOS" | |
| } | |
| } | |
| }, | |
| "mcpscripts": { | |
| "type": "http", | |
| "url": "http://host.docker.internal:$GH_AW_MCP_SCRIPTS_PORT", | |
| "headers": { | |
| "Authorization": "$GH_AW_MCP_SCRIPTS_API_KEY" | |
| }, | |
| "guard-policies": { | |
| "write-sink": { | |
| "accept": [ | |
| "*" | |
| ] | |
| } | |
| } | |
| }, | |
| "playwright": { | |
| "container": "mcr.microsoft.com/playwright/mcp", | |
| "args": [ | |
| "--init", | |
| "--network", | |
| "host", | |
| "--security-opt", | |
| "seccomp=unconfined", | |
| "--ipc=host" | |
| ], | |
| "entrypointArgs": [ | |
| "--output-dir", | |
| "/tmp/gh-aw/mcp-logs/playwright", | |
| "--no-sandbox" | |
| ], | |
| "mounts": ["/tmp/gh-aw/mcp-logs:/tmp/gh-aw/mcp-logs:rw"], | |
| "guard-policies": { | |
| "write-sink": { | |
| "accept": [ | |
| "*" | |
| ] | |
| } | |
| } | |
| }, | |
| "safeoutputs": { | |
| "type": "http", | |
| "url": "http://host.docker.internal:$GH_AW_SAFE_OUTPUTS_PORT", | |
| "headers": { | |
| "Authorization": "$GH_AW_SAFE_OUTPUTS_API_KEY" | |
| }, | |
| "guard-policies": { | |
| "write-sink": { | |
| "accept": [ | |
| "*" | |
| ] | |
| } | |
| } | |
| }, | |
| "serena": { | |
| "type": "stdio", | |
| "container": "ghcr.io/github/serena-mcp-server:latest", | |
| "entrypoint": "serena", | |
| "entrypointArgs": [ | |
| "start-mcp-server", | |
| "--context", | |
| "codex", | |
| "--project", | |
| "\${GITHUB_WORKSPACE}" | |
| ], | |
| "mounts": [ | |
| "\${GITHUB_WORKSPACE}:\${GITHUB_WORKSPACE}:rw" | |
| ], | |
| "args": [ | |
| "--network", | |
| "host" | |
| ], | |
| "guard-policies": { | |
| "write-sink": { | |
| "accept": [ | |
| "*" | |
| ] | |
| } | |
| } | |
| }, | |
| "tavily": { | |
| "type": "http", | |
| "url": "https://mcp.tavily.com/mcp/", | |
| "headers": { | |
| "Authorization": "Bearer ${{ secrets.TAVILY_API_KEY }}" | |
| }, | |
| "tools": [ | |
| "*" | |
| ], | |
| "guard-policies": { | |
| "write-sink": { | |
| "accept": [ | |
| "*" | |
| ] | |
| } | |
| } | |
| } | |
| }, | |
| "gateway": { | |
| "port": $MCP_GATEWAY_PORT, | |
| "domain": "${MCP_GATEWAY_DOMAIN}", | |
| "apiKey": "${MCP_GATEWAY_API_KEY}", | |
| "payloadDir": "${MCP_GATEWAY_PAYLOAD_DIR}" | |
| } | |
| } | |
| GH_AW_MCP_CONFIG_5b332f9ee71a220d_EOF | |
| - name: Download activation artifact | |
| uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1 | |
| with: | |
| name: activation | |
| path: /tmp/gh-aw | |
| - name: Clean git credentials | |
| continue-on-error: true | |
| run: bash ${RUNNER_TEMP}/gh-aw/actions/clean_git_credentials.sh | |
| - name: Execute Claude Code CLI | |
| id: agentic_execution | |
| # Allowed tools (sorted): | |
| # - Bash | |
| # - BashOutput | |
| # - Edit | |
| # - Edit(/tmp/gh-aw/cache-memory/*) | |
| # - ExitPlanMode | |
| # - Glob | |
| # - Grep | |
| # - KillBash | |
| # - LS | |
| # - MultiEdit | |
| # - MultiEdit(/tmp/gh-aw/cache-memory/*) | |
| # - NotebookEdit | |
| # - NotebookRead | |
| # - Read | |
| # - Read(/tmp/gh-aw/cache-memory/*) | |
| # - Task | |
| # - TodoWrite | |
| # - Write | |
| # - Write(/tmp/gh-aw/cache-memory/*) | |
| # - mcp__github__download_workflow_run_artifact | |
| # - mcp__github__get_code_scanning_alert | |
| # - mcp__github__get_commit | |
| # - mcp__github__get_dependabot_alert | |
| # - mcp__github__get_discussion | |
| # - mcp__github__get_discussion_comments | |
| # - mcp__github__get_file_contents | |
| # - mcp__github__get_job_logs | |
| # - mcp__github__get_label | |
| # - mcp__github__get_latest_release | |
| # - mcp__github__get_me | |
| # - mcp__github__get_notification_details | |
| # - mcp__github__get_pull_request | |
| # - mcp__github__get_pull_request_comments | |
| # - mcp__github__get_pull_request_diff | |
| # - mcp__github__get_pull_request_files | |
| # - mcp__github__get_pull_request_review_comments | |
| # - mcp__github__get_pull_request_reviews | |
| # - mcp__github__get_pull_request_status | |
| # - mcp__github__get_release_by_tag | |
| # - mcp__github__get_secret_scanning_alert | |
| # - mcp__github__get_tag | |
| # - mcp__github__get_workflow_run | |
| # - mcp__github__get_workflow_run_logs | |
| # - mcp__github__get_workflow_run_usage | |
| # - mcp__github__issue_read | |
| # - mcp__github__list_branches | |
| # - mcp__github__list_code_scanning_alerts | |
| # - mcp__github__list_commits | |
| # - mcp__github__list_dependabot_alerts | |
| # - mcp__github__list_discussion_categories | |
| # - mcp__github__list_discussions | |
| # - mcp__github__list_issue_types | |
| # - mcp__github__list_issues | |
| # - mcp__github__list_label | |
| # - mcp__github__list_notifications | |
| # - mcp__github__list_pull_requests | |
| # - mcp__github__list_releases | |
| # - mcp__github__list_secret_scanning_alerts | |
| # - mcp__github__list_starred_repositories | |
| # - mcp__github__list_tags | |
| # - mcp__github__list_workflow_jobs | |
| # - mcp__github__list_workflow_run_artifacts | |
| # - mcp__github__list_workflow_runs | |
| # - mcp__github__list_workflows | |
| # - mcp__github__pull_request_read | |
| # - mcp__github__search_code | |
| # - mcp__github__search_issues | |
| # - mcp__github__search_orgs | |
| # - mcp__github__search_pull_requests | |
| # - mcp__github__search_repositories | |
| # - mcp__github__search_users | |
| # - mcp__playwright__browser_click | |
| # - mcp__playwright__browser_close | |
| # - mcp__playwright__browser_console_messages | |
| # - mcp__playwright__browser_drag | |
| # - mcp__playwright__browser_evaluate | |
| # - mcp__playwright__browser_file_upload | |
| # - mcp__playwright__browser_fill_form | |
| # - mcp__playwright__browser_handle_dialog | |
| # - mcp__playwright__browser_hover | |
| # - mcp__playwright__browser_install | |
| # - mcp__playwright__browser_navigate | |
| # - mcp__playwright__browser_navigate_back | |
| # - mcp__playwright__browser_network_requests | |
| # - mcp__playwright__browser_press_key | |
| # - mcp__playwright__browser_resize | |
| # - mcp__playwright__browser_select_option | |
| # - mcp__playwright__browser_snapshot | |
| # - mcp__playwright__browser_tabs | |
| # - mcp__playwright__browser_take_screenshot | |
| # - mcp__playwright__browser_type | |
| # - mcp__playwright__browser_wait_for | |
| # - mcp__tavily | |
| timeout-minutes: 10 | |
| run: | | |
| set -o pipefail | |
| touch /tmp/gh-aw/agent-step-summary.md | |
| # shellcheck disable=SC1003 | |
| sudo -E awf --container-workdir "${GITHUB_WORKSPACE}" --mount "${RUNNER_TEMP}/gh-aw:${RUNNER_TEMP}/gh-aw:ro" --mount "${RUNNER_TEMP}/gh-aw:/host${RUNNER_TEMP}/gh-aw:ro" --tty --env-all --exclude-env ANTHROPIC_API_KEY --exclude-env GH_AW_GH_TOKEN --exclude-env GH_TOKEN --exclude-env GITHUB_MCP_SERVER_TOKEN --exclude-env MCP_GATEWAY_API_KEY --exclude-env TAVILY_API_KEY --allow-domains '*.githubusercontent.com,anthropic.com,api.anthropic.com,api.github.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,cdn.playwright.dev,codeload.github.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,docs.github.com,files.pythonhosted.org,ghcr.io,github-cloud.githubusercontent.com,github-cloud.s3.amazonaws.com,github.blog,github.com,github.githubassets.com,go.dev,golang.org,goproxy.io,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,lfs.github.com,mcp.tavily.com,objects.githubusercontent.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,pkg.go.dev,playwright.download.prss.microsoft.com,ppa.launchpad.net,proxy.golang.org,pypi.org,raw.githubusercontent.com,registry.npmjs.org,s.symcb.com,s.symcd.com,security.ubuntu.com,sentry.io,statsig.anthropic.com,storage.googleapis.com,sum.golang.org,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com,www.googleapis.com' --log-level info --proxy-logs-dir /tmp/gh-aw/sandbox/firewall/logs --audit-dir /tmp/gh-aw/sandbox/firewall/audit --enable-host-access --image-tag 0.25.10 --skip-pull --enable-api-proxy \ | |
| -- /bin/bash -c 'export PATH="$(find /opt/hostedtoolcache -maxdepth 4 -type d -name bin 2>/dev/null | tr '\''\n'\'' '\'':'\'')$PATH"; [ -n "$GOROOT" ] && export PATH="$GOROOT/bin:$PATH" || true && claude --print --disable-slash-commands --no-chrome --max-turns 100 --mcp-config /tmp/gh-aw/mcp-config/mcp-servers.json --allowed-tools '\''Bash,BashOutput,Edit,Edit(/tmp/gh-aw/cache-memory/*),ExitPlanMode,Glob,Grep,KillBash,LS,MultiEdit,MultiEdit(/tmp/gh-aw/cache-memory/*),NotebookEdit,NotebookRead,Read,Read(/tmp/gh-aw/cache-memory/*),Task,TodoWrite,Write,Write(/tmp/gh-aw/cache-memory/*),mcp__github__download_workflow_run_artifact,mcp__github__get_code_scanning_alert,mcp__github__get_commit,mcp__github__get_dependabot_alert,mcp__github__get_discussion,mcp__github__get_discussion_comments,mcp__github__get_file_contents,mcp__github__get_job_logs,mcp__github__get_label,mcp__github__get_latest_release,mcp__github__get_me,mcp__github__get_notification_details,mcp__github__get_pull_request,mcp__github__get_pull_request_comments,mcp__github__get_pull_request_diff,mcp__github__get_pull_request_files,mcp__github__get_pull_request_review_comments,mcp__github__get_pull_request_reviews,mcp__github__get_pull_request_status,mcp__github__get_release_by_tag,mcp__github__get_secret_scanning_alert,mcp__github__get_tag,mcp__github__get_workflow_run,mcp__github__get_workflow_run_logs,mcp__github__get_workflow_run_usage,mcp__github__issue_read,mcp__github__list_branches,mcp__github__list_code_scanning_alerts,mcp__github__list_commits,mcp__github__list_dependabot_alerts,mcp__github__list_discussion_categories,mcp__github__list_discussions,mcp__github__list_issue_types,mcp__github__list_issues,mcp__github__list_label,mcp__github__list_notifications,mcp__github__list_pull_requests,mcp__github__list_releases,mcp__github__list_secret_scanning_alerts,mcp__github__list_starred_repositories,mcp__github__list_tags,mcp__github__list_workflow_jobs,mcp__github__list_workflow_run_artifacts,mcp__github__list_workflow_runs,mcp__github__list_workflows,mcp__github__pull_request_read,mcp__github__search_code,mcp__github__search_issues,mcp__github__search_orgs,mcp__github__search_pull_requests,mcp__github__search_repositories,mcp__github__search_users,mcp__playwright__browser_click,mcp__playwright__browser_close,mcp__playwright__browser_console_messages,mcp__playwright__browser_drag,mcp__playwright__browser_evaluate,mcp__playwright__browser_file_upload,mcp__playwright__browser_fill_form,mcp__playwright__browser_handle_dialog,mcp__playwright__browser_hover,mcp__playwright__browser_install,mcp__playwright__browser_navigate,mcp__playwright__browser_navigate_back,mcp__playwright__browser_network_requests,mcp__playwright__browser_press_key,mcp__playwright__browser_resize,mcp__playwright__browser_select_option,mcp__playwright__browser_snapshot,mcp__playwright__browser_tabs,mcp__playwright__browser_take_screenshot,mcp__playwright__browser_type,mcp__playwright__browser_wait_for,mcp__tavily'\'' --debug-file /tmp/gh-aw/agent-stdio.log --verbose --permission-mode bypassPermissions --output-format stream-json "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"${GH_AW_MODEL_AGENT_CLAUDE:+ --model "$GH_AW_MODEL_AGENT_CLAUDE"}' 2>&1 | tee -a /tmp/gh-aw/agent-stdio.log | |
| env: | |
| ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} | |
| BASH_DEFAULT_TIMEOUT_MS: 60000 | |
| BASH_MAX_TIMEOUT_MS: 60000 | |
| DISABLE_BUG_COMMAND: 1 | |
| DISABLE_ERROR_REPORTING: 1 | |
| DISABLE_TELEMETRY: 1 | |
| GH_AW_GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
| GH_AW_MAX_TURNS: 100 | |
| GH_AW_MCP_CONFIG: /tmp/gh-aw/mcp-config/mcp-servers.json | |
| GH_AW_MODEL_AGENT_CLAUDE: ${{ vars.GH_AW_MODEL_AGENT_CLAUDE || '' }} | |
| GH_AW_PHASE: agent | |
| GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt | |
| GH_AW_SAFE_OUTPUTS: ${{ steps.set-runtime-paths.outputs.GH_AW_SAFE_OUTPUTS }} | |
| GH_AW_VERSION: dev | |
| GH_DEBUG: 1 | |
| GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
| GITHUB_AW: true | |
| GITHUB_STEP_SUMMARY: /tmp/gh-aw/agent-step-summary.md | |
| GITHUB_WORKSPACE: ${{ github.workspace }} | |
| GIT_AUTHOR_EMAIL: github-actions[bot]@users.noreply.github.com | |
| GIT_AUTHOR_NAME: github-actions[bot] | |
| GIT_COMMITTER_EMAIL: github-actions[bot]@users.noreply.github.com | |
| GIT_COMMITTER_NAME: github-actions[bot] | |
| MCP_TIMEOUT: 120000 | |
| MCP_TOOL_TIMEOUT: 60000 | |
| - name: Configure Git credentials | |
| env: | |
| REPO_NAME: ${{ github.repository }} | |
| SERVER_URL: ${{ github.server_url }} | |
| run: | | |
| git config --global user.email "github-actions[bot]@users.noreply.github.com" | |
| git config --global user.name "github-actions[bot]" | |
| git config --global am.keepcr true | |
| # Re-authenticate git with GitHub token | |
| SERVER_URL_STRIPPED="${SERVER_URL#https://}" | |
| git remote set-url origin "https://x-access-token:${{ github.token }}@${SERVER_URL_STRIPPED}/${REPO_NAME}.git" | |
| echo "Git configured with standard GitHub Actions identity" | |
| - name: Stop MCP Gateway | |
| if: always() | |
| continue-on-error: true | |
| env: | |
| MCP_GATEWAY_PORT: ${{ steps.start-mcp-gateway.outputs.gateway-port }} | |
| MCP_GATEWAY_API_KEY: ${{ steps.start-mcp-gateway.outputs.gateway-api-key }} | |
| GATEWAY_PID: ${{ steps.start-mcp-gateway.outputs.gateway-pid }} | |
| run: | | |
| bash ${RUNNER_TEMP}/gh-aw/actions/stop_mcp_gateway.sh "$GATEWAY_PID" | |
| - name: Redact secrets in logs | |
| if: always() | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| with: | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/redact_secrets.cjs'); | |
| await main(); | |
| env: | |
| GH_AW_SECRET_NAMES: 'ANTHROPIC_API_KEY,GH_AW_GITHUB_MCP_SERVER_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN,TAVILY_API_KEY' | |
| SECRET_ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} | |
| SECRET_GH_AW_GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN }} | |
| SECRET_GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN }} | |
| SECRET_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
| SECRET_TAVILY_API_KEY: ${{ secrets.TAVILY_API_KEY }} | |
| - name: Append agent step summary | |
| if: always() | |
| run: bash ${RUNNER_TEMP}/gh-aw/actions/append_agent_step_summary.sh | |
| - name: Copy Safe Outputs | |
| if: always() | |
| env: | |
| GH_AW_SAFE_OUTPUTS: ${{ steps.set-runtime-paths.outputs.GH_AW_SAFE_OUTPUTS }} | |
| run: | | |
| mkdir -p /tmp/gh-aw | |
| cp "$GH_AW_SAFE_OUTPUTS" /tmp/gh-aw/safeoutputs.jsonl 2>/dev/null || true | |
| - name: Ingest agent output | |
| id: collect_output | |
| if: always() | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| env: | |
| GH_AW_SAFE_OUTPUTS: ${{ steps.set-runtime-paths.outputs.GH_AW_SAFE_OUTPUTS }} | |
| GH_AW_ALLOWED_DOMAINS: "*.githubusercontent.com,127.0.0.1,::1,anthropic.com,api.anthropic.com,api.github.com,api.snapcraft.io,app.renovatebot.com,appveyor.com,archive.ubuntu.com,azure.archive.ubuntu.com,badgen.net,cdn.playwright.dev,circleci.com,codacy.com,codeclimate.com,codecov.io,codeload.github.com,coveralls.io,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,deepsource.io,docs.github.com,drone.io,files.pythonhosted.org,ghcr.io,github-cloud.githubusercontent.com,github-cloud.s3.amazonaws.com,github.blog,github.com,github.githubassets.com,go.dev,golang.org,goproxy.io,host.docker.internal,img.shields.io,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,lfs.github.com,localhost,mcp.tavily.com,objects.githubusercontent.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,pkg.go.dev,playwright.download.prss.microsoft.com,ppa.launchpad.net,proxy.golang.org,pypi.org,raw.githubusercontent.com,readthedocs.io,readthedocs.org,registry.npmjs.org,renovatebot.com,s.symcb.com,s.symcd.com,security.ubuntu.com,semaphoreci.com,sentry.io,shields.io,snyk.io,sonarcloud.io,sonarqube.com,statsig.anthropic.com,storage.googleapis.com,sum.golang.org,travis-ci.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com,www.googleapis.com" | |
| GITHUB_SERVER_URL: ${{ github.server_url }} | |
| GITHUB_API_URL: ${{ github.api_url }} | |
| with: | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/collect_ndjson_output.cjs'); | |
| await main(); | |
| - name: Parse agent logs for step summary | |
| if: always() | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| env: | |
| GH_AW_AGENT_OUTPUT: /tmp/gh-aw/agent-stdio.log | |
| with: | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/parse_claude_log.cjs'); | |
| await main(); | |
| - name: Parse MCP Scripts logs for step summary | |
| if: always() | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| with: | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/parse_mcp_scripts_logs.cjs'); | |
| await main(); | |
| - name: Parse MCP Gateway logs for step summary | |
| if: always() | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| with: | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/parse_mcp_gateway_log.cjs'); | |
| await main(); | |
| - name: Print firewall logs | |
| if: always() | |
| continue-on-error: true | |
| env: | |
| AWF_LOGS_DIR: /tmp/gh-aw/sandbox/firewall/logs | |
| run: | | |
| # Fix permissions on firewall logs so they can be uploaded as artifacts | |
| # AWF runs with sudo, creating files owned by root | |
| sudo chmod -R a+r /tmp/gh-aw/sandbox/firewall/logs 2>/dev/null || true | |
| # Only run awf logs summary if awf command exists (it may not be installed if workflow failed before install step) | |
| if command -v awf &> /dev/null; then | |
| awf logs summary | tee -a "$GITHUB_STEP_SUMMARY" | |
| else | |
| echo 'AWF binary not installed, skipping firewall log summary' | |
| fi | |
| - name: Parse token usage for step summary | |
| if: always() | |
| continue-on-error: true | |
| run: bash ${RUNNER_TEMP}/gh-aw/actions/parse_token_usage.sh | |
| - name: Write agent output placeholder if missing | |
| if: always() | |
| run: | | |
| if [ ! -f /tmp/gh-aw/agent_output.json ]; then | |
| echo '{"items":[]}' > /tmp/gh-aw/agent_output.json | |
| fi | |
| - name: Commit cache-memory changes | |
| if: always() | |
| env: | |
| GH_AW_CACHE_DIR: /tmp/gh-aw/cache-memory | |
| run: bash ${RUNNER_TEMP}/gh-aw/actions/commit_cache_memory_git.sh | |
| - name: Upload cache-memory data as artifact | |
| uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7 | |
| if: always() | |
| with: | |
| name: cache-memory | |
| path: /tmp/gh-aw/cache-memory | |
| - name: Upload agent artifacts | |
| if: always() | |
| continue-on-error: true | |
| uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7 | |
| with: | |
| name: agent | |
| path: | | |
| /tmp/gh-aw/aw-prompts/prompt.txt | |
| /tmp/gh-aw/mcp-logs/ | |
| /tmp/gh-aw/mcp-scripts/logs/ | |
| /tmp/gh-aw/agent-stdio.log | |
| /tmp/gh-aw/agent/ | |
| /tmp/gh-aw/safeoutputs.jsonl | |
| /tmp/gh-aw/agent_output.json | |
| /tmp/gh-aw/aw-*.patch | |
| /tmp/gh-aw/aw-*.bundle | |
| if-no-files-found: ignore | |
| - name: Upload firewall audit logs | |
| if: always() | |
| continue-on-error: true | |
| uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7 | |
| with: | |
| name: firewall-audit-logs | |
| path: | | |
| /tmp/gh-aw/sandbox/firewall/logs/ | |
| /tmp/gh-aw/sandbox/firewall/audit/ | |
| if-no-files-found: ignore | |
| apm: | |
| needs: activation | |
| runs-on: ubuntu-slim | |
| permissions: | |
| {} | |
| steps: | |
| - name: Configure GH_HOST for enterprise compatibility | |
| id: ghes-host-config | |
| shell: bash | |
| run: | | |
| # Derive GH_HOST from GITHUB_SERVER_URL so the gh CLI targets the correct | |
| # GitHub instance (GHES/GHEC). On github.com this is a harmless no-op. | |
| GH_HOST="${GITHUB_SERVER_URL#https://}" | |
| GH_HOST="${GH_HOST#http://}" | |
| echo "GH_HOST=${GH_HOST}" >> "$GITHUB_ENV" | |
| - name: Prepare APM package list | |
| id: apm_prep | |
| run: | | |
| DEPS=$(echo "$AW_APM_PACKAGES" | jq -r '.[] | "- " + .') | |
| { | |
| echo "deps<<APMDEPS" | |
| printf '%s\n' "$DEPS" | |
| echo "APMDEPS" | |
| } >> "$GITHUB_OUTPUT" | |
| env: | |
| AW_APM_PACKAGES: "[\"microsoft/apm-sample-package\"]" | |
| - name: Pack APM packages | |
| id: apm_pack | |
| uses: microsoft/apm-action@a190b0b1a91031057144dc136acf9757a59c9e4d # v1.4.1 | |
| env: | |
| GITHUB_TOKEN: ${{ secrets.GH_AW_PLUGINS_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} | |
| with: | |
| archive: "true" | |
| dependencies: ${{ steps.apm_prep.outputs.deps }} | |
| isolated: "true" | |
| pack: "true" | |
| target: all | |
| working-directory: /tmp/gh-aw/apm-workspace | |
| - name: Upload APM bundle artifact | |
| if: success() | |
| uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 | |
| with: | |
| name: ${{ needs.activation.outputs.artifact_prefix }}apm | |
| path: ${{ steps.apm_pack.outputs.bundle-path }} | |
| retention-days: "1" | |
| conclusion: | |
| needs: | |
| - activation | |
| - agent | |
| - apm | |
| - detection | |
| - safe_outputs | |
| - update_cache_memory | |
| if: always() && (needs.agent.result != 'skipped' || needs.activation.outputs.lockdown_check_failed == 'true') | |
| runs-on: ubuntu-slim | |
| permissions: | |
| contents: read | |
| discussions: write | |
| issues: write | |
| pull-requests: write | |
| concurrency: | |
| group: "gh-aw-conclusion-smoke-claude" | |
| cancel-in-progress: false | |
| outputs: | |
| noop_message: ${{ steps.noop.outputs.noop_message }} | |
| tools_reported: ${{ steps.missing_tool.outputs.tools_reported }} | |
| total_count: ${{ steps.missing_tool.outputs.total_count }} | |
| steps: | |
| - name: Checkout actions folder | |
| uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 | |
| with: | |
| repository: github/gh-aw | |
| sparse-checkout: | | |
| actions | |
| persist-credentials: false | |
| - name: Setup Scripts | |
| uses: ./actions/setup | |
| with: | |
| destination: ${{ runner.temp }}/gh-aw/actions | |
| - name: Download agent output artifact | |
| id: download-agent-output | |
| continue-on-error: true | |
| uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1 | |
| with: | |
| name: agent | |
| path: /tmp/gh-aw/ | |
| - name: Setup agent output environment variable | |
| id: setup-agent-output-env | |
| if: steps.download-agent-output.outcome == 'success' | |
| run: | | |
| mkdir -p /tmp/gh-aw/ | |
| find "/tmp/gh-aw/" -type f -print | |
| echo "GH_AW_AGENT_OUTPUT=/tmp/gh-aw/agent_output.json" >> "$GITHUB_OUTPUT" | |
| - name: Process No-Op Messages | |
| id: noop | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| env: | |
| GH_AW_AGENT_OUTPUT: ${{ steps.setup-agent-output-env.outputs.GH_AW_AGENT_OUTPUT }} | |
| GH_AW_NOOP_MAX: "1" | |
| GH_AW_WORKFLOW_NAME: "Smoke Claude" | |
| with: | |
| github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/noop.cjs'); | |
| await main(); | |
| - name: Record Missing Tool | |
| id: missing_tool | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| env: | |
| GH_AW_AGENT_OUTPUT: ${{ steps.setup-agent-output-env.outputs.GH_AW_AGENT_OUTPUT }} | |
| GH_AW_MISSING_TOOL_CREATE_ISSUE: "true" | |
| GH_AW_WORKFLOW_NAME: "Smoke Claude" | |
| with: | |
| github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/missing_tool.cjs'); | |
| await main(); | |
| - name: Handle Agent Failure | |
| id: handle_agent_failure | |
| if: always() | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| env: | |
| GH_AW_AGENT_OUTPUT: ${{ steps.setup-agent-output-env.outputs.GH_AW_AGENT_OUTPUT }} | |
| GH_AW_WORKFLOW_NAME: "Smoke Claude" | |
| GH_AW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} | |
| GH_AW_AGENT_CONCLUSION: ${{ needs.agent.result }} | |
| GH_AW_WORKFLOW_ID: "smoke-claude" | |
| GH_AW_ENGINE_ID: "claude" | |
| GH_AW_SECRET_VERIFICATION_RESULT: ${{ needs.activation.outputs.secret_verification_result }} | |
| GH_AW_CHECKOUT_PR_SUCCESS: ${{ needs.agent.outputs.checkout_pr_success }} | |
| GH_AW_CODE_PUSH_FAILURE_ERRORS: ${{ needs.safe_outputs.outputs.code_push_failure_errors }} | |
| GH_AW_CODE_PUSH_FAILURE_COUNT: ${{ needs.safe_outputs.outputs.code_push_failure_count }} | |
| GH_AW_LOCKDOWN_CHECK_FAILED: ${{ needs.activation.outputs.lockdown_check_failed }} | |
| GH_AW_SAFE_OUTPUT_MESSAGES: "{\"footer\":\"\\u003e 💥 *[THE END] — Illustrated by [{workflow_name}]({run_url})*{history_link}\",\"runStarted\":\"💥 **WHOOSH!** [{workflow_name}]({run_url}) springs into action on this {event_type}! *[Panel 1 begins...]*\",\"runSuccess\":\"🎬 **THE END** — [{workflow_name}]({run_url}) **MISSION: ACCOMPLISHED!** The hero saves the day! ✨\",\"runFailure\":\"💫 **TO BE CONTINUED...** [{workflow_name}]({run_url}) {status}! Our hero faces unexpected challenges...\"}" | |
| GH_AW_GROUP_REPORTS: "false" | |
| GH_AW_FAILURE_REPORT_AS_ISSUE: "true" | |
| GH_AW_TIMEOUT_MINUTES: "10" | |
| with: | |
| github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/handle_agent_failure.cjs'); | |
| await main(); | |
| - name: Handle No-Op Message | |
| id: handle_noop_message | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| env: | |
| GH_AW_AGENT_OUTPUT: ${{ steps.setup-agent-output-env.outputs.GH_AW_AGENT_OUTPUT }} | |
| GH_AW_WORKFLOW_NAME: "Smoke Claude" | |
| GH_AW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} | |
| GH_AW_AGENT_CONCLUSION: ${{ needs.agent.result }} | |
| GH_AW_NOOP_MESSAGE: ${{ steps.noop.outputs.noop_message }} | |
| GH_AW_NOOP_REPORT_AS_ISSUE: "true" | |
| with: | |
| github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/handle_noop_message.cjs'); | |
| await main(); | |
| - name: Update reaction comment with completion status | |
| id: conclusion | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| env: | |
| GH_AW_AGENT_OUTPUT: ${{ steps.setup-agent-output-env.outputs.GH_AW_AGENT_OUTPUT }} | |
| GH_AW_COMMENT_ID: ${{ needs.activation.outputs.comment_id }} | |
| GH_AW_COMMENT_REPO: ${{ needs.activation.outputs.comment_repo }} | |
| GH_AW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} | |
| GH_AW_WORKFLOW_NAME: "Smoke Claude" | |
| GH_AW_AGENT_CONCLUSION: ${{ needs.agent.result }} | |
| GH_AW_DETECTION_CONCLUSION: ${{ needs.detection.outputs.detection_conclusion }} | |
| GH_AW_SAFE_OUTPUT_MESSAGES: "{\"footer\":\"\\u003e 💥 *[THE END] — Illustrated by [{workflow_name}]({run_url})*{history_link}\",\"runStarted\":\"💥 **WHOOSH!** [{workflow_name}]({run_url}) springs into action on this {event_type}! *[Panel 1 begins...]*\",\"runSuccess\":\"🎬 **THE END** — [{workflow_name}]({run_url}) **MISSION: ACCOMPLISHED!** The hero saves the day! ✨\",\"runFailure\":\"💫 **TO BE CONTINUED...** [{workflow_name}]({run_url}) {status}! Our hero faces unexpected challenges...\"}" | |
| with: | |
| github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/notify_comment_error.cjs'); | |
| await main(); | |
| detection: | |
| needs: agent | |
| if: > | |
| always() && needs.agent.result != 'skipped' && (needs.agent.outputs.output_types != '' || needs.agent.outputs.has_patch == 'true') | |
| runs-on: ubuntu-latest | |
| permissions: | |
| contents: read | |
| outputs: | |
| detection_conclusion: ${{ steps.detection_conclusion.outputs.conclusion }} | |
| detection_success: ${{ steps.detection_conclusion.outputs.success }} | |
| steps: | |
| - name: Checkout actions folder | |
| uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 | |
| with: | |
| repository: github/gh-aw | |
| sparse-checkout: | | |
| actions | |
| persist-credentials: false | |
| - name: Setup Scripts | |
| uses: ./actions/setup | |
| with: | |
| destination: ${{ runner.temp }}/gh-aw/actions | |
| - name: Download agent output artifact | |
| id: download-agent-output | |
| continue-on-error: true | |
| uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1 | |
| with: | |
| name: agent | |
| path: /tmp/gh-aw/ | |
| - name: Setup agent output environment variable | |
| id: setup-agent-output-env | |
| if: steps.download-agent-output.outcome == 'success' | |
| run: | | |
| mkdir -p /tmp/gh-aw/ | |
| find "/tmp/gh-aw/" -type f -print | |
| echo "GH_AW_AGENT_OUTPUT=/tmp/gh-aw/agent_output.json" >> "$GITHUB_OUTPUT" | |
| - name: Checkout repository for patch context | |
| if: needs.agent.outputs.has_patch == 'true' | |
| uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 | |
| with: | |
| persist-credentials: false | |
| # --- Threat Detection --- | |
| - name: Download container images | |
| run: bash ${RUNNER_TEMP}/gh-aw/actions/download_docker_images.sh ghcr.io/github/gh-aw-firewall/agent:0.25.10 ghcr.io/github/gh-aw-firewall/api-proxy:0.25.10 ghcr.io/github/gh-aw-firewall/squid:0.25.10 | |
| - name: Check if detection needed | |
| id: detection_guard | |
| if: always() | |
| env: | |
| OUTPUT_TYPES: ${{ needs.agent.outputs.output_types }} | |
| HAS_PATCH: ${{ needs.agent.outputs.has_patch }} | |
| run: | | |
| if [[ -n "$OUTPUT_TYPES" || "$HAS_PATCH" == "true" ]]; then | |
| echo "run_detection=true" >> "$GITHUB_OUTPUT" | |
| echo "Detection will run: output_types=$OUTPUT_TYPES, has_patch=$HAS_PATCH" | |
| else | |
| echo "run_detection=false" >> "$GITHUB_OUTPUT" | |
| echo "Detection skipped: no agent outputs or patches to analyze" | |
| fi | |
| - name: Clear MCP configuration for detection | |
| if: always() && steps.detection_guard.outputs.run_detection == 'true' | |
| run: | | |
| rm -f /tmp/gh-aw/mcp-config/mcp-servers.json | |
| rm -f /home/runner/.copilot/mcp-config.json | |
| rm -f "$GITHUB_WORKSPACE/.gemini/settings.json" | |
| - name: Prepare threat detection files | |
| if: always() && steps.detection_guard.outputs.run_detection == 'true' | |
| run: | | |
| mkdir -p /tmp/gh-aw/threat-detection/aw-prompts | |
| cp /tmp/gh-aw/aw-prompts/prompt.txt /tmp/gh-aw/threat-detection/aw-prompts/prompt.txt 2>/dev/null || true | |
| cp /tmp/gh-aw/agent_output.json /tmp/gh-aw/threat-detection/agent_output.json 2>/dev/null || true | |
| for f in /tmp/gh-aw/aw-*.patch; do | |
| [ -f "$f" ] && cp "$f" /tmp/gh-aw/threat-detection/ 2>/dev/null || true | |
| done | |
| for f in /tmp/gh-aw/aw-*.bundle; do | |
| [ -f "$f" ] && cp "$f" /tmp/gh-aw/threat-detection/ 2>/dev/null || true | |
| done | |
| echo "Prepared threat detection files:" | |
| ls -la /tmp/gh-aw/threat-detection/ 2>/dev/null || true | |
| - name: Setup threat detection | |
| if: always() && steps.detection_guard.outputs.run_detection == 'true' | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| env: | |
| WORKFLOW_NAME: "Smoke Claude" | |
| WORKFLOW_DESCRIPTION: "Smoke test workflow that validates Claude engine functionality by reviewing recent PRs twice daily" | |
| HAS_PATCH: ${{ needs.agent.outputs.has_patch }} | |
| with: | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/setup_threat_detection.cjs'); | |
| await main(); | |
| - name: Ensure threat-detection directory and log | |
| if: always() && steps.detection_guard.outputs.run_detection == 'true' | |
| run: | | |
| mkdir -p /tmp/gh-aw/threat-detection | |
| touch /tmp/gh-aw/threat-detection/detection.log | |
| - name: Setup Node.js | |
| uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6.3.0 | |
| with: | |
| node-version: '24' | |
| package-manager-cache: false | |
| - name: Install AWF binary | |
| run: bash ${RUNNER_TEMP}/gh-aw/actions/install_awf_binary.sh v0.25.10 | |
| - name: Install Claude Code CLI | |
| run: npm install -g @anthropic-ai/claude-code@latest | |
| - name: Execute Claude Code CLI | |
| if: always() && steps.detection_guard.outputs.run_detection == 'true' | |
| id: detection_agentic_execution | |
| # Allowed tools (sorted): | |
| # - Bash | |
| # - BashOutput | |
| # - ExitPlanMode | |
| # - Glob | |
| # - Grep | |
| # - KillBash | |
| # - LS | |
| # - NotebookRead | |
| # - Read | |
| # - Task | |
| # - TodoWrite | |
| timeout-minutes: 20 | |
| run: | | |
| set -o pipefail | |
| touch /tmp/gh-aw/agent-step-summary.md | |
| # shellcheck disable=SC1003 | |
| sudo -E awf --container-workdir "${GITHUB_WORKSPACE}" --mount "${RUNNER_TEMP}/gh-aw:${RUNNER_TEMP}/gh-aw:ro" --mount "${RUNNER_TEMP}/gh-aw:/host${RUNNER_TEMP}/gh-aw:ro" --tty --env-all --exclude-env ANTHROPIC_API_KEY --allow-domains '*.githubusercontent.com,anthropic.com,api.anthropic.com,api.github.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,cdn.playwright.dev,codeload.github.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,files.pythonhosted.org,ghcr.io,github-cloud.githubusercontent.com,github-cloud.s3.amazonaws.com,github.com,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,lfs.github.com,objects.githubusercontent.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,playwright.download.prss.microsoft.com,ppa.launchpad.net,pypi.org,raw.githubusercontent.com,registry.npmjs.org,s.symcb.com,s.symcd.com,security.ubuntu.com,sentry.io,statsig.anthropic.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com' --log-level info --proxy-logs-dir /tmp/gh-aw/sandbox/firewall/logs --audit-dir /tmp/gh-aw/sandbox/firewall/audit --enable-host-access --image-tag 0.25.10 --skip-pull --enable-api-proxy \ | |
| -- /bin/bash -c 'export PATH="$(find /opt/hostedtoolcache -maxdepth 4 -type d -name bin 2>/dev/null | tr '\''\n'\'' '\'':'\'')$PATH"; [ -n "$GOROOT" ] && export PATH="$GOROOT/bin:$PATH" || true && claude --print --disable-slash-commands --no-chrome --allowed-tools Bash,BashOutput,ExitPlanMode,Glob,Grep,KillBash,LS,NotebookRead,Read,Task,TodoWrite --debug-file /tmp/gh-aw/threat-detection/detection.log --verbose --permission-mode bypassPermissions --output-format stream-json "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"${GH_AW_MODEL_DETECTION_CLAUDE:+ --model "$GH_AW_MODEL_DETECTION_CLAUDE"}' 2>&1 | tee -a /tmp/gh-aw/threat-detection/detection.log | |
| env: | |
| ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} | |
| BASH_DEFAULT_TIMEOUT_MS: 60000 | |
| BASH_MAX_TIMEOUT_MS: 60000 | |
| DISABLE_BUG_COMMAND: 1 | |
| DISABLE_ERROR_REPORTING: 1 | |
| DISABLE_TELEMETRY: 1 | |
| GH_AW_MODEL_DETECTION_CLAUDE: ${{ vars.GH_AW_MODEL_DETECTION_CLAUDE || '' }} | |
| GH_AW_PHASE: detection | |
| GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt | |
| GH_AW_VERSION: dev | |
| GITHUB_AW: true | |
| GITHUB_STEP_SUMMARY: /tmp/gh-aw/agent-step-summary.md | |
| GITHUB_WORKSPACE: ${{ github.workspace }} | |
| GIT_AUTHOR_EMAIL: github-actions[bot]@users.noreply.github.com | |
| GIT_AUTHOR_NAME: github-actions[bot] | |
| GIT_COMMITTER_EMAIL: github-actions[bot]@users.noreply.github.com | |
| GIT_COMMITTER_NAME: github-actions[bot] | |
| MCP_TIMEOUT: 120000 | |
| MCP_TOOL_TIMEOUT: 60000 | |
| - name: Upload threat detection log | |
| if: always() && steps.detection_guard.outputs.run_detection == 'true' | |
| uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7 | |
| with: | |
| name: detection | |
| path: /tmp/gh-aw/threat-detection/detection.log | |
| if-no-files-found: ignore | |
| - name: Parse and conclude threat detection | |
| id: detection_conclusion | |
| if: always() | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| env: | |
| RUN_DETECTION: ${{ steps.detection_guard.outputs.run_detection }} | |
| with: | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/parse_threat_detection_results.cjs'); | |
| await main(); | |
| pre_activation: | |
| if: > | |
| (github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id) && | |
| (github.event_name != 'pull_request' || github.event.action != 'labeled' || github.event.label.name == 'smoke') | |
| runs-on: ubuntu-slim | |
| permissions: | |
| contents: read | |
| outputs: | |
| activated: ${{ steps.check_membership.outputs.is_team_member == 'true' }} | |
| matched_command: '' | |
| steps: | |
| - name: Checkout actions folder | |
| uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 | |
| with: | |
| repository: github/gh-aw | |
| sparse-checkout: | | |
| actions | |
| persist-credentials: false | |
| - name: Setup Scripts | |
| uses: ./actions/setup | |
| with: | |
| destination: ${{ runner.temp }}/gh-aw/actions | |
| - name: Check team membership for workflow | |
| id: check_membership | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| env: | |
| GH_AW_REQUIRED_ROLES: "admin,maintainer,write" | |
| with: | |
| github-token: ${{ secrets.GITHUB_TOKEN }} | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/check_membership.cjs'); | |
| await main(); | |
| safe_outputs: | |
| needs: | |
| - agent | |
| - detection | |
| if: (!cancelled()) && needs.agent.result != 'skipped' && needs.detection.result == 'success' | |
| runs-on: ubuntu-slim | |
| permissions: | |
| contents: read | |
| discussions: write | |
| issues: write | |
| pull-requests: write | |
| timeout-minutes: 15 | |
| env: | |
| GH_AW_CALLER_WORKFLOW_ID: "${{ github.repository }}/smoke-claude" | |
| GH_AW_ENGINE_ID: "claude" | |
| GH_AW_ENGINE_MODEL: ${{ needs.agent.outputs.model }} | |
| GH_AW_SAFE_OUTPUT_MESSAGES: "{\"footer\":\"\\u003e 💥 *[THE END] — Illustrated by [{workflow_name}]({run_url})*{history_link}\",\"runStarted\":\"💥 **WHOOSH!** [{workflow_name}]({run_url}) springs into action on this {event_type}! *[Panel 1 begins...]*\",\"runSuccess\":\"🎬 **THE END** — [{workflow_name}]({run_url}) **MISSION: ACCOMPLISHED!** The hero saves the day! ✨\",\"runFailure\":\"💫 **TO BE CONTINUED...** [{workflow_name}]({run_url}) {status}! Our hero faces unexpected challenges...\"}" | |
| GH_AW_WORKFLOW_ID: "smoke-claude" | |
| GH_AW_WORKFLOW_NAME: "Smoke Claude" | |
| outputs: | |
| add_reviewer_reviewers_added: ${{ steps.process_safe_outputs.outputs.reviewers_added }} | |
| code_push_failure_count: ${{ steps.process_safe_outputs.outputs.code_push_failure_count }} | |
| code_push_failure_errors: ${{ steps.process_safe_outputs.outputs.code_push_failure_errors }} | |
| comment_id: ${{ steps.process_safe_outputs.outputs.comment_id }} | |
| comment_url: ${{ steps.process_safe_outputs.outputs.comment_url }} | |
| create_discussion_error_count: ${{ steps.process_safe_outputs.outputs.create_discussion_error_count }} | |
| create_discussion_errors: ${{ steps.process_safe_outputs.outputs.create_discussion_errors }} | |
| created_issue_number: ${{ steps.process_safe_outputs.outputs.created_issue_number }} | |
| created_issue_url: ${{ steps.process_safe_outputs.outputs.created_issue_url }} | |
| process_safe_outputs_processed_count: ${{ steps.process_safe_outputs.outputs.processed_count }} | |
| process_safe_outputs_temporary_id_map: ${{ steps.process_safe_outputs.outputs.temporary_id_map }} | |
| push_commit_sha: ${{ steps.process_safe_outputs.outputs.push_commit_sha }} | |
| push_commit_url: ${{ steps.process_safe_outputs.outputs.push_commit_url }} | |
| steps: | |
| - name: Checkout actions folder | |
| uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 | |
| with: | |
| repository: github/gh-aw | |
| sparse-checkout: | | |
| actions | |
| persist-credentials: false | |
| - name: Setup Scripts | |
| uses: ./actions/setup | |
| with: | |
| destination: ${{ runner.temp }}/gh-aw/actions | |
| - name: Download agent output artifact | |
| id: download-agent-output | |
| continue-on-error: true | |
| uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1 | |
| with: | |
| name: agent | |
| path: /tmp/gh-aw/ | |
| - name: Setup agent output environment variable | |
| id: setup-agent-output-env | |
| if: steps.download-agent-output.outcome == 'success' | |
| run: | | |
| mkdir -p /tmp/gh-aw/ | |
| find "/tmp/gh-aw/" -type f -print | |
| echo "GH_AW_AGENT_OUTPUT=/tmp/gh-aw/agent_output.json" >> "$GITHUB_OUTPUT" | |
| - name: Configure GH_HOST for enterprise compatibility | |
| id: ghes-host-config | |
| shell: bash | |
| run: | | |
| # Derive GH_HOST from GITHUB_SERVER_URL so the gh CLI targets the correct | |
| # GitHub instance (GHES/GHEC). On github.com this is a harmless no-op. | |
| GH_HOST="${GITHUB_SERVER_URL#https://}" | |
| GH_HOST="${GH_HOST#http://}" | |
| echo "GH_HOST=${GH_HOST}" >> "$GITHUB_ENV" | |
| - name: Setup Safe Outputs Custom Scripts | |
| run: | | |
| cat > ${RUNNER_TEMP}/gh-aw/actions/safe_output_script_post_slack_message.cjs << 'GH_AW_SAFE_OUTPUT_SCRIPT_POST_SLACK_MESSAGE_9e49b541f5e2b0bc_EOF' | |
| // @ts-check | |
| /// <reference types="./safe-output-script" /> | |
| // Auto-generated safe-output script handler: post-slack-message | |
| const { sanitizeContent } = require("./sanitize_content.cjs"); | |
| /** @type {import('./types/safe-output-script').SafeOutputScriptMain} */ | |
| async function main(config = {}) { | |
| const { channel, message } = config; | |
| return async function handlePostSlackMessage(item, resolvedTemporaryIds, temporaryIdMap) { | |
| const targetChannel = item.channel || "#general"; | |
| const text = item.message || "(no message)"; | |
| core.info(`[FICTITIOUS SLACK] → ${targetChannel}: ${text}`); | |
| return { success: true, channel: targetChannel, message: text }; | |
| }; | |
| } | |
| module.exports = { main }; | |
| GH_AW_SAFE_OUTPUT_SCRIPT_POST_SLACK_MESSAGE_9e49b541f5e2b0bc_EOF | |
| - name: Process Safe Outputs | |
| id: process_safe_outputs | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 | |
| env: | |
| GH_AW_AGENT_OUTPUT: ${{ steps.setup-agent-output-env.outputs.GH_AW_AGENT_OUTPUT }} | |
| GH_AW_ALLOWED_DOMAINS: "*.githubusercontent.com,127.0.0.1,::1,anthropic.com,api.anthropic.com,api.github.com,api.snapcraft.io,app.renovatebot.com,appveyor.com,archive.ubuntu.com,azure.archive.ubuntu.com,badgen.net,cdn.playwright.dev,circleci.com,codacy.com,codeclimate.com,codecov.io,codeload.github.com,coveralls.io,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,deepsource.io,docs.github.com,drone.io,files.pythonhosted.org,ghcr.io,github-cloud.githubusercontent.com,github-cloud.s3.amazonaws.com,github.blog,github.com,github.githubassets.com,go.dev,golang.org,goproxy.io,host.docker.internal,img.shields.io,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,lfs.github.com,localhost,mcp.tavily.com,objects.githubusercontent.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,pkg.go.dev,playwright.download.prss.microsoft.com,ppa.launchpad.net,proxy.golang.org,pypi.org,raw.githubusercontent.com,readthedocs.io,readthedocs.org,registry.npmjs.org,renovatebot.com,s.symcb.com,s.symcd.com,security.ubuntu.com,semaphoreci.com,sentry.io,shields.io,snyk.io,sonarcloud.io,sonarqube.com,statsig.anthropic.com,storage.googleapis.com,sum.golang.org,travis-ci.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com,www.googleapis.com" | |
| GITHUB_SERVER_URL: ${{ github.server_url }} | |
| GITHUB_API_URL: ${{ github.api_url }} | |
| GH_AW_SAFE_OUTPUT_SCRIPTS: "{\"post_slack_message\":\"safe_output_script_post_slack_message.cjs\"}" | |
| GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG: "{\"add_comment\":{\"hide_older_comments\":true,\"max\":2},\"add_labels\":{\"allowed\":[\"smoke-claude\"]},\"add_reviewer\":{\"max\":2,\"target\":\"*\"},\"close_pull_request\":{\"max\":1,\"staged\":true},\"create_issue\":{\"close_older_issues\":true,\"close_older_key\":\"smoke-claude\",\"expires\":2,\"group\":true,\"labels\":[\"automation\",\"testing\"],\"max\":1},\"create_pull_request_review_comment\":{\"max\":5,\"side\":\"RIGHT\",\"target\":\"*\"},\"missing_data\":{},\"missing_tool\":{},\"noop\":{\"max\":1,\"report-as-issue\":\"true\"},\"push_to_pull_request_branch\":{\"allowed_files\":[\".github/smoke-claude-push-test.md\"],\"if_no_changes\":\"warn\",\"max_patch_size\":1024,\"protected_files\":[\"package.json\",\"bun.lockb\",\"bunfig.toml\",\"deno.json\",\"deno.jsonc\",\"deno.lock\",\"global.json\",\"NuGet.Config\",\"Directory.Packages.props\",\"mix.exs\",\"mix.lock\",\"go.mod\",\"go.sum\",\"stack.yaml\",\"stack.yaml.lock\",\"pom.xml\",\"build.gradle\",\"build.gradle.kts\",\"settings.gradle\",\"settings.gradle.kts\",\"gradle.properties\",\"package-lock.json\",\"yarn.lock\",\"pnpm-lock.yaml\",\"npm-shrinkwrap.json\",\"requirements.txt\",\"Pipfile\",\"Pipfile.lock\",\"pyproject.toml\",\"setup.py\",\"setup.cfg\",\"Gemfile\",\"Gemfile.lock\",\"uv.lock\",\"CODEOWNERS\",\"CLAUDE.md\"],\"protected_path_prefixes\":[\".github/\",\".agents/\",\".claude/\"],\"staged\":true,\"target\":\"*\"},\"resolve_pull_request_review_thread\":{\"max\":5},\"submit_pull_request_review\":{\"footer\":\"always\",\"max\":1},\"update_pull_request\":{\"allow_body\":true,\"allow_title\":true,\"max\":1,\"target\":\"*\"}}" | |
| with: | |
| github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} | |
| script: | | |
| const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs'); | |
| setupGlobals(core, github, context, exec, io); | |
| const { main } = require('${{ runner.temp }}/gh-aw/actions/safe_output_handler_manager.cjs'); | |
| await main(); | |
| - name: Upload Safe Output Items | |
| if: always() | |
| uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7 | |
| with: | |
| name: safe-output-items | |
| path: /tmp/gh-aw/safe-output-items.jsonl | |
| if-no-files-found: ignore | |
| update_cache_memory: | |
| needs: | |
| - agent | |
| - detection | |
| if: > | |
| always() && (needs.detection.result == 'success' || needs.detection.result == 'skipped') && | |
| needs.agent.result == 'success' | |
| runs-on: ubuntu-slim | |
| permissions: | |
| contents: read | |
| env: | |
| GH_AW_WORKFLOW_ID_SANITIZED: smokeclaude | |
| steps: | |
| - name: Checkout actions folder | |
| uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 | |
| with: | |
| repository: github/gh-aw | |
| sparse-checkout: | | |
| actions | |
| persist-credentials: false | |
| - name: Setup Scripts | |
| uses: ./actions/setup | |
| with: | |
| destination: ${{ runner.temp }}/gh-aw/actions | |
| - name: Download cache-memory artifact (default) | |
| id: download_cache_default | |
| uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1 | |
| continue-on-error: true | |
| with: | |
| name: cache-memory | |
| path: /tmp/gh-aw/cache-memory | |
| - name: Check if cache-memory folder has content (default) | |
| id: check_cache_default | |
| shell: bash | |
| run: | | |
| if [ -d "/tmp/gh-aw/cache-memory" ] && [ "$(ls -A /tmp/gh-aw/cache-memory 2>/dev/null)" ]; then | |
| echo "has_content=true" >> "$GITHUB_OUTPUT" | |
| else | |
| echo "has_content=false" >> "$GITHUB_OUTPUT" | |
| fi | |
| - name: Save cache-memory to cache (default) | |
| if: steps.check_cache_default.outputs.has_content == 'true' | |
| uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 | |
| with: | |
| key: memory-none-nopolicy-${{ env.GH_AW_WORKFLOW_ID_SANITIZED }}-${{ github.run_id }} | |
| path: /tmp/gh-aw/cache-memory | |