Upstream Sync #42
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: Upstream Sync | |
| # Checks nightly for new upstream OpenClaw release tags. | |
| # When found, scans each commit with ClawGuard, then creates a PR to merge into yourclaw branch. | |
| on: | |
| schedule: | |
| # Run at 02:00 UTC daily | |
| - cron: "0 2 * * *" | |
| workflow_dispatch: | |
| inputs: | |
| force_tag: | |
| description: "Force sync to a specific upstream tag (e.g., v2026.2.27)" | |
| required: false | |
| type: string | |
| concurrency: | |
| group: upstream-sync | |
| cancel-in-progress: false | |
| env: | |
| UPSTREAM_REPO: https://github.com/openclaw/openclaw.git | |
| FORK_BRANCH: yourclaw | |
| jobs: | |
| check-upstream: | |
| name: Check for new upstream release | |
| runs-on: ubuntu-latest | |
| outputs: | |
| has_new_release: ${{ steps.check.outputs.has_new_release }} | |
| new_tag: ${{ steps.check.outputs.new_tag }} | |
| last_synced_tag: ${{ steps.check.outputs.last_synced_tag }} | |
| commit_count: ${{ steps.check.outputs.commit_count }} | |
| steps: | |
| - name: Checkout fork | |
| uses: actions/checkout@v4 | |
| with: | |
| ref: ${{ env.FORK_BRANCH }} | |
| fetch-depth: 0 | |
| - name: Add upstream remote | |
| run: git remote add upstream ${{ env.UPSTREAM_REPO }} | |
| - name: Fetch upstream tags | |
| run: git fetch upstream --tags --force | |
| - name: Check for new release tag | |
| id: check | |
| run: | | |
| set -euo pipefail | |
| # Read last synced tag | |
| LAST_SYNCED=$(cat yourclaw-patches/LAST_SYNCED_TAG 2>/dev/null | tr -d '[:space:]' || echo "") | |
| echo "last_synced_tag=${LAST_SYNCED}" >> "$GITHUB_OUTPUT" | |
| # Use forced tag if provided | |
| if [ -n "${{ inputs.force_tag }}" ]; then | |
| NEW_TAG="${{ inputs.force_tag }}" | |
| echo "Forced sync to tag: ${NEW_TAG}" | |
| else | |
| # Find latest stable upstream tag (only match exact vYYYY.M.D format) | |
| NEW_TAG=$(git tag -l 'v*' --sort=-version:refname | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$' | head -1) | |
| fi | |
| echo "Latest upstream stable tag: ${NEW_TAG}" | |
| echo "Last synced tag: ${LAST_SYNCED}" | |
| if [ "${NEW_TAG}" = "${LAST_SYNCED}" ] && [ -z "${{ inputs.force_tag }}" ]; then | |
| echo "Already up to date with ${NEW_TAG}" | |
| echo "has_new_release=false" >> "$GITHUB_OUTPUT" | |
| exit 0 | |
| fi | |
| # Count commits between the actual fork branch and the new tag. | |
| # We use fork-branch..tag (not last-synced-tag..tag) because LAST_SYNCED_TAG | |
| # can be stale or manually bumped without the content actually being synced. | |
| # This gives the true commit count that will appear in the PR. | |
| COMMIT_COUNT=$(git rev-list --count "origin/${FORK_BRANCH}..${NEW_TAG}" 2>/dev/null || echo "unknown") | |
| echo "new_tag=${NEW_TAG}" >> "$GITHUB_OUTPUT" | |
| echo "has_new_release=true" >> "$GITHUB_OUTPUT" | |
| echo "commit_count=${COMMIT_COUNT}" >> "$GITHUB_OUTPUT" | |
| echo "New release found: ${NEW_TAG} (${COMMIT_COUNT} commits vs ${FORK_BRANCH}, tag delta since ${LAST_SYNCED})" | |
| clawguard-scan: | |
| name: ClawGuard security scan | |
| needs: check-upstream | |
| if: needs.check-upstream.outputs.has_new_release == 'true' | |
| runs-on: ubuntu-latest | |
| outputs: | |
| scan_passed: ${{ steps.scan.outputs.passed }} | |
| scan_summary: ${{ steps.scan.outputs.summary }} | |
| steps: | |
| - name: Checkout fork | |
| uses: actions/checkout@v4 | |
| with: | |
| ref: ${{ env.FORK_BRANCH }} | |
| fetch-depth: 0 | |
| - name: Add upstream remote & fetch | |
| run: | | |
| git remote add upstream ${{ env.UPSTREAM_REPO }} | |
| git fetch upstream --tags --force | |
| - name: Setup Node.js | |
| uses: actions/setup-node@v4 | |
| with: | |
| node-version: "22" | |
| - name: Install ClawGuard scanner and security tools | |
| id: install-tools | |
| run: | | |
| # Install ClawGuard scanner from npm | |
| mkdir -p /tmp/clawguard-runner && cd /tmp/clawguard-runner | |
| npm init -y > /dev/null 2>&1 | |
| npm install @yourclaw/clawguard-scanner@latest | |
| SCANNER_VERSION=$(node -e "console.log(require('@yourclaw/clawguard-scanner/package.json').version)" 2>/dev/null || echo "unknown") | |
| echo "✅ @yourclaw/clawguard-scanner@${SCANNER_VERSION} installed" | |
| echo "scanner_version=${SCANNER_VERSION}" >> "$GITHUB_OUTPUT" | |
| # Install semgrep (SAST scanner) | |
| pip install semgrep > /dev/null 2>&1 || echo "⚠ semgrep install failed — will skip SAST scan" | |
| SEMGREP_VERSION=$(semgrep --version 2>/dev/null || echo "not installed") | |
| echo "semgrep_version=${SEMGREP_VERSION}" >> "$GITHUB_OUTPUT" | |
| [ "${SEMGREP_VERSION}" != "not installed" ] && echo "✅ semgrep@${SEMGREP_VERSION} installed" || true | |
| # Install gitleaks (secret detection) | |
| GITLEAKS_VERSION="8.24.0" | |
| curl -sSfL "https://github.com/gitleaks/gitleaks/releases/download/v${GITLEAKS_VERSION}/gitleaks_${GITLEAKS_VERSION}_linux_x64.tar.gz" \ | |
| | tar xz -C /usr/local/bin gitleaks 2>/dev/null || echo "⚠ gitleaks install failed — will skip secret scan" | |
| GITLEAKS_ACTUAL=$(gitleaks version 2>/dev/null || echo "not installed") | |
| echo "gitleaks_version=${GITLEAKS_ACTUAL}" >> "$GITHUB_OUTPUT" | |
| [ "${GITLEAKS_ACTUAL}" != "not installed" ] && echo "✅ gitleaks@${GITLEAKS_ACTUAL} installed" || true | |
| - name: Scan commits between tags | |
| id: scan | |
| env: | |
| LAST_TAG: ${{ needs.check-upstream.outputs.last_synced_tag }} | |
| NEW_TAG: ${{ needs.check-upstream.outputs.new_tag }} | |
| run: | | |
| set -euo pipefail | |
| SCAN_REPORT="" | |
| SCAN_PASSED=true | |
| ISSUES_FOUND=0 | |
| # Get the diff range for scanning. | |
| # Use the merge-base between the fork branch and the new tag to find | |
| # what's actually new. This is more reliable than LAST_SYNCED_TAG which | |
| # can be stale or manually bumped without actual content sync. | |
| MERGE_BASE=$(git merge-base "origin/${FORK_BRANCH}" "${NEW_TAG}" 2>/dev/null || echo "") | |
| if [ -n "${MERGE_BASE}" ]; then | |
| DIFF_RANGE="${MERGE_BASE}..${NEW_TAG}" | |
| elif [ -n "${LAST_TAG}" ]; then | |
| # Fallback to tag-based range | |
| DIFF_RANGE="${LAST_TAG}..${NEW_TAG}" | |
| else | |
| # First sync — scan last 50 commits | |
| DIFF_RANGE="$(git rev-list --max-count=50 ${NEW_TAG} | tail -1)..${NEW_TAG}" | |
| fi | |
| echo "Scanning commits in range: ${DIFF_RANGE}" | |
| echo "" | |
| # Write diff to temp file to avoid SIGPIPE issues with large diffs. | |
| # (588+ commits = huge diff; piping through head/grep triggers SIGPIPE | |
| # under set -o pipefail, causing exit 141) | |
| DIFF_FILE=$(mktemp) | |
| git diff "${DIFF_RANGE}" -- '*.ts' '*.js' '*.json' '*.yml' '*.yaml' > "${DIFF_FILE}" 2>/dev/null || true | |
| echo "Diff size: $(wc -c < "${DIFF_FILE}") bytes" | |
| # ─── Part 1: Diff-based pattern matching (fast, always runs) ─── | |
| # Pattern 1: Hardcoded credentials or API keys | |
| CRED_MATCHES=$(grep -iE '(api[_-]?key|secret[_-]?key|password|token)\s*[:=]\s*["\x27][A-Za-z0-9+/=]{20,}' "${DIFF_FILE}" 2>/dev/null | head -5 || true) | |
| if [ -n "${CRED_MATCHES}" ]; then | |
| SCAN_REPORT="${SCAN_REPORT}\n⚠️ CREDENTIAL LEAK: Possible hardcoded credentials detected" | |
| SCAN_REPORT="${SCAN_REPORT}\n\`\`\`\n${CRED_MATCHES}\n\`\`\`" | |
| ISSUES_FOUND=$((ISSUES_FOUND + 1)) | |
| echo "::warning::Possible hardcoded credentials detected in diff" | |
| fi | |
| # Pattern 2: eval() or Function() constructor usage | |
| EVAL_COUNT=$(grep -c '^\+.*\beval\s*(' "${DIFF_FILE}" 2>/dev/null || echo "0") | |
| FUNC_COUNT=$(grep -c '^\+.*\bnew\s*Function\s*(' "${DIFF_FILE}" 2>/dev/null || echo "0") | |
| if [ "$EVAL_COUNT" -gt 0 ] 2>/dev/null || [ "$FUNC_COUNT" -gt 0 ] 2>/dev/null; then | |
| EVAL_SAMPLES=$(grep -E '^\+.*(eval\s*\(|new\s*Function\s*\()' "${DIFF_FILE}" | head -5 || true) | |
| SCAN_REPORT="${SCAN_REPORT}\n⚠️ CODE INJECTION: ${EVAL_COUNT} eval() and ${FUNC_COUNT} new Function() calls added" | |
| SCAN_REPORT="${SCAN_REPORT}\n\`\`\`\n${EVAL_SAMPLES}\n\`\`\`" | |
| ISSUES_FOUND=$((ISSUES_FOUND + 1)) | |
| echo "::warning::${EVAL_COUNT} eval() and ${FUNC_COUNT} new Function() calls found" | |
| fi | |
| # Pattern 3: Suspicious network calls (data exfiltration) | |
| NETWORK_MATCHES=$(grep -E '^\+.*fetch\s*\(\s*["\x27]https?://' "${DIFF_FILE}" | grep -iv 'localhost\|127\.0\.0\.1\|github\.com\|npmjs\|googleapis' | head -5 || true) | |
| if [ -n "${NETWORK_MATCHES}" ]; then | |
| SCAN_REPORT="${SCAN_REPORT}\n⚠️ NETWORK: New external HTTP calls detected — review for data exfiltration" | |
| SCAN_REPORT="${SCAN_REPORT}\n\`\`\`\n${NETWORK_MATCHES}\n\`\`\`" | |
| ISSUES_FOUND=$((ISSUES_FOUND + 1)) | |
| echo "::warning::External HTTP calls detected in diff" | |
| fi | |
| # Pattern 4: Dependency changes (supply chain risk) | |
| DEP_DIFF_FILE=$(mktemp) | |
| git diff "${DIFF_RANGE}" -- 'package.json' 'pnpm-lock.yaml' > "${DEP_DIFF_FILE}" 2>/dev/null || true | |
| if grep -q '^\+' "${DEP_DIFF_FILE}" 2>/dev/null; then | |
| DEPS_ADDED=$(grep '^\+.*"[^"]*":' "${DEP_DIFF_FILE}" | grep -v '^\+\+\+' | wc -l || echo "0") | |
| DEPS_SAMPLES=$(grep '^\+.*"[^"]*":' "${DEP_DIFF_FILE}" | grep -v '^\+\+\+' | head -10 || true) | |
| SCAN_REPORT="${SCAN_REPORT}\nℹ️ SUPPLY CHAIN: ${DEPS_ADDED} dependency changes detected — manual review recommended" | |
| SCAN_REPORT="${SCAN_REPORT}\n\`\`\`\n${DEPS_SAMPLES}\n\`\`\`" | |
| fi | |
| rm -f "${DEP_DIFF_FILE}" | |
| # Pattern 5: Shell command injection vectors | |
| EXEC_MATCHES=$(grep -E '^\+.*(child_process|execSync|spawn\()' "${DIFF_FILE}" | head -5 || true) | |
| if [ -n "${EXEC_MATCHES}" ]; then | |
| EXEC_COUNT=$(echo "${EXEC_MATCHES}" | wc -l | tr -d ' ') | |
| SCAN_REPORT="${SCAN_REPORT}\n⚠️ COMMAND EXEC: ${EXEC_COUNT} new shell execution calls added" | |
| SCAN_REPORT="${SCAN_REPORT}\n\`\`\`\n${EXEC_MATCHES}\n\`\`\`" | |
| ISSUES_FOUND=$((ISSUES_FOUND + 1)) | |
| echo "::warning::${EXEC_COUNT} shell execution calls found" | |
| fi | |
| # Pattern 6: Filesystem access outside workspace | |
| FS_MATCHES=$(grep -E '^\+.*(\/etc\/|\/root\/|\/home\/|process\.env\.HOME)' "${DIFF_FILE}" | head -5 || true) | |
| if [ -n "${FS_MATCHES}" ]; then | |
| SCAN_REPORT="${SCAN_REPORT}\n⚠️ FS ACCESS: Possible filesystem access outside workspace boundaries" | |
| SCAN_REPORT="${SCAN_REPORT}\n\`\`\`\n${FS_MATCHES}\n\`\`\`" | |
| ISSUES_FOUND=$((ISSUES_FOUND + 1)) | |
| echo "::warning::Filesystem access outside workspace detected" | |
| fi | |
| rm -f "${DIFF_FILE}" | |
| echo "" | |
| echo "=== Diff-based scan complete: ${ISSUES_FOUND} issues ===" | |
| echo "" | |
| # ─── Part 2: ClawGuard scanner (deep analysis on changed .ts/.js files) ─── | |
| # Load scan suppressions (false positives we've already reviewed) | |
| IGNORE_FILE="yourclaw-patches/scan-ignore.json" | |
| if [ -f "${IGNORE_FILE}" ]; then | |
| SUPPRESSIONS=$(cat "${IGNORE_FILE}" | jq -c '.suppressions // []') | |
| SUPPRESSION_COUNT=$(echo "${SUPPRESSIONS}" | jq 'length') | |
| echo "Loaded ${SUPPRESSION_COUNT} scan suppressions from ${IGNORE_FILE}" | |
| else | |
| SUPPRESSIONS="[]" | |
| echo "No scan-ignore.json found" | |
| fi | |
| echo "=== Running ClawGuard scanner on changed files ===" | |
| CHANGED_FILES_FILE=$(mktemp) | |
| git diff --name-only "${DIFF_RANGE}" -- '*.ts' '*.js' > "${CHANGED_FILES_FILE}" 2>/dev/null || true | |
| # Limit to first 50 files | |
| CHANGED_FILES=$(head -50 "${CHANGED_FILES_FILE}") | |
| TOTAL_CHANGED=$(wc -l < "${CHANGED_FILES_FILE}" | tr -d ' ') | |
| rm -f "${CHANGED_FILES_FILE}" | |
| echo "Files to scan: ${TOTAL_CHANGED} (scanning first 50)" | |
| CG_CRITICAL=0 | |
| CG_HIGH=0 | |
| CG_MEDIUM=0 | |
| CG_REPORT="" | |
| for f in ${CHANGED_FILES}; do | |
| [ -f "$f" ] || continue | |
| echo " Scanning: $f" | |
| # Run full ClawGuard scanner (semgrep + gitleaks + prompt injection + npm audit) | |
| # Filters out suppressed findings from scan-ignore.json | |
| RESULT=$(NODE_PATH=/tmp/clawguard-runner/node_modules node -e " | |
| const { scanSkill } = require('@yourclaw/clawguard-scanner'); | |
| (async () => { | |
| try { | |
| const filePath = process.argv[1]; | |
| const suppressions = JSON.parse(process.argv[2] || '[]'); | |
| const report = await scanSkill(filePath, { builtinOnly: false, skipAI: true, scannerTimeout: 30000 }); | |
| // Filter out suppressed findings | |
| const relPath = filePath.replace(process.cwd() + '/', ''); | |
| let suppressed = 0; | |
| const findings = report.findings.filter(f => { | |
| const isSuppressed = suppressions.some(s => | |
| (s.rule === f.name || s.rule === f.id) && | |
| relPath.includes(s.file) | |
| ); | |
| if (isSuppressed) { suppressed++; return false; } | |
| return true; | |
| }); | |
| // Recalculate severity counts from filtered findings | |
| const counts = { critical: 0, high: 0, medium: 0, low: 0, info: 0 }; | |
| findings.forEach(f => { if (counts[f.severity] !== undefined) counts[f.severity]++; }); | |
| console.log(JSON.stringify({ | |
| status: findings.some(f => f.severity === 'critical' || f.severity === 'high') ? 'failed' : (findings.length > 0 ? 'warning' : 'passed'), | |
| ...counts, | |
| suppressed, | |
| scanners: report.scanners.map(s => ({ | |
| name: s.scanner, | |
| status: s.status, | |
| findingCount: s.findings.length, | |
| message: s.message || null | |
| })), | |
| findings: findings.map(f => ({ | |
| severity: f.severity, | |
| name: f.name, | |
| message: f.message, | |
| scanner: f.scanner, | |
| filePath: f.filePath || filePath | |
| })) | |
| })); | |
| } catch (e) { | |
| console.log(JSON.stringify({ status: 'error', error: e.message })); | |
| } | |
| })(); | |
| " "$(pwd)/${f}" "${SUPPRESSIONS}" 2>/dev/null || echo '{"status":"error","error":"scanner crashed"}') | |
| STATUS=$(echo "${RESULT}" | jq -r '.status // "error"' 2>/dev/null || echo "error") | |
| if [ "${STATUS}" = "error" ]; then | |
| ERROR_MSG=$(echo "${RESULT}" | jq -r '.error // "unknown"' 2>/dev/null || echo "unknown") | |
| echo " ⚠ Scanner error: ${ERROR_MSG}" | |
| continue | |
| fi | |
| CRIT=$(echo "${RESULT}" | jq -r '.critical // 0') | |
| HIGH=$(echo "${RESULT}" | jq -r '.high // 0') | |
| MED=$(echo "${RESULT}" | jq -r '.medium // 0') | |
| CG_CRITICAL=$((CG_CRITICAL + CRIT)) | |
| CG_HIGH=$((CG_HIGH + HIGH)) | |
| CG_MEDIUM=$((CG_MEDIUM + MED)) | |
| SUPPRESSED=$(echo "${RESULT}" | jq -r '.suppressed // 0' 2>/dev/null || echo "0") | |
| # Show which scanners ran | |
| echo "${RESULT}" | jq -r '.scanners[]? | " [\(.status)] \(.name): \(.findingCount) findings\(if .message then " — " + .message else "" end)"' 2>/dev/null || true | |
| if [ "$SUPPRESSED" -gt 0 ]; then | |
| echo " (${SUPPRESSED} finding(s) suppressed via scan-ignore.json)" | |
| fi | |
| # Show findings inline | |
| FINDINGS_COUNT=$(echo "${RESULT}" | jq '.findings | length' 2>/dev/null || echo "0") | |
| if [ "${FINDINGS_COUNT}" -gt 0 ]; then | |
| echo " Found ${FINDINGS_COUNT} issues (critical:${CRIT} high:${HIGH} medium:${MED})" | |
| echo "${RESULT}" | jq -r '.findings[] | " [\(.severity)] (\(.scanner)) \(.name): \(.message)"' 2>/dev/null || true | |
| FINDING_LINES=$(echo "${RESULT}" | jq -r '.findings[] | "- **[\(.severity)]** \(.name): \(.message) (\(.filePath))"' 2>/dev/null || true) | |
| CG_REPORT="${CG_REPORT}\n${FINDING_LINES}" | |
| else | |
| echo " ✅ Clean" | |
| fi | |
| done | |
| echo "" | |
| echo "=== ClawGuard scan complete: critical=${CG_CRITICAL} high=${CG_HIGH} medium=${CG_MEDIUM} ===" | |
| if [ "${CG_CRITICAL}" -gt 0 ] || [ "${CG_HIGH}" -gt 0 ]; then | |
| SCAN_REPORT="${SCAN_REPORT}\n\n🔴 **ClawGuard Scanner:** ${CG_CRITICAL} critical, ${CG_HIGH} high, ${CG_MEDIUM} medium findings" | |
| SCAN_REPORT="${SCAN_REPORT}\n${CG_REPORT}" | |
| ISSUES_FOUND=$((ISSUES_FOUND + CG_CRITICAL + CG_HIGH)) | |
| echo "::error::ClawGuard found ${CG_CRITICAL} critical and ${CG_HIGH} high severity issues" | |
| elif [ "${CG_MEDIUM}" -gt 0 ]; then | |
| SCAN_REPORT="${SCAN_REPORT}\n\n🟡 **ClawGuard Scanner:** ${CG_MEDIUM} medium findings (review recommended)" | |
| SCAN_REPORT="${SCAN_REPORT}\n${CG_REPORT}" | |
| else | |
| SCAN_REPORT="${SCAN_REPORT}\n\n✅ **ClawGuard Scanner:** No significant findings" | |
| fi | |
| # Summary | |
| if [ "${ISSUES_FOUND}" -gt 0 ]; then | |
| SCAN_PASSED=false | |
| SUMMARY="❌ ${ISSUES_FOUND} security issues found — manual review required" | |
| else | |
| SUMMARY="✅ No security issues detected" | |
| fi | |
| if [ -z "${SCAN_REPORT}" ]; then | |
| SCAN_REPORT="No issues found in scanned commits." | |
| fi | |
| echo "passed=${SCAN_PASSED}" >> "$GITHUB_OUTPUT" | |
| # Write summary to file (multiline output) | |
| { | |
| echo "summary<<CLAWGUARD_EOF" | |
| echo -e "## ClawGuard Security Scan Report\n" | |
| echo "**Range:** \`${DIFF_RANGE}\`" | |
| echo "**Result:** ${SUMMARY}" | |
| echo "" | |
| echo -e "${SCAN_REPORT}" | |
| echo "" | |
| echo "<details><summary>🔧 Scanner versions</summary>" | |
| echo "" | |
| echo "| Tool | Version |" | |
| echo "| ---- | ------- |" | |
| echo "| @yourclaw/clawguard-scanner | ${{ steps.install-tools.outputs.scanner_version }} |" | |
| echo "| semgrep | ${{ steps.install-tools.outputs.semgrep_version }} |" | |
| echo "| gitleaks | ${{ steps.install-tools.outputs.gitleaks_version }} |" | |
| echo "" | |
| echo "</details>" | |
| echo "CLAWGUARD_EOF" | |
| } >> "$GITHUB_OUTPUT" | |
| create-sync-pr: | |
| name: Create sync PR | |
| needs: [check-upstream, clawguard-scan] | |
| if: needs.check-upstream.outputs.has_new_release == 'true' | |
| runs-on: ubuntu-latest | |
| # Note: We use SYNC_PAT (a PAT with repo + workflow scopes) instead of GITHUB_TOKEN. | |
| # Reason 1: The yourclaw org restricts GITHUB_TOKEN to read-only (org-level policy). | |
| # Reason 2: GITHUB_TOKEN cannot push commits that modify .github/workflows/ files. | |
| # A fine-grained PAT or classic PAT with repo+workflow scopes solves both. | |
| steps: | |
| - name: Checkout fork | |
| uses: actions/checkout@v4 | |
| with: | |
| ref: ${{ env.FORK_BRANCH }} | |
| fetch-depth: 0 | |
| token: ${{ secrets.SYNC_PAT }} | |
| - name: Configure git | |
| run: | | |
| git config user.name "yourclaw-bot" | |
| git config user.email "[email protected]" | |
| - name: Add upstream & fetch | |
| run: | | |
| git remote add upstream ${{ env.UPSTREAM_REPO }} | |
| git fetch upstream --tags --force | |
| - name: Create sync branch and merge upstream tag | |
| id: merge | |
| env: | |
| NEW_TAG: ${{ needs.check-upstream.outputs.new_tag }} | |
| run: | | |
| set -euo pipefail | |
| SYNC_BRANCH="upstream-sync/${NEW_TAG}" | |
| echo "sync_branch=${SYNC_BRANCH}" >> "$GITHUB_OUTPUT" | |
| # Create sync branch from yourclaw | |
| git checkout -b "${SYNC_BRANCH}" | |
| # Attempt merge | |
| if git merge "${NEW_TAG}" --no-edit -m "sync: merge upstream ${NEW_TAG} into yourclaw"; then | |
| echo "has_conflicts=false" >> "$GITHUB_OUTPUT" | |
| else | |
| echo "Merge has conflicts — attempting auto-resolution..." | |
| # Layer 1: YourClaw-owned paths — keep ours (our patches take precedence) | |
| git checkout --ours yourclaw-patches/ Dockerfile.chainguard extensions/clawguard-registry/ 2>/dev/null || true | |
| git add yourclaw-patches/ Dockerfile.chainguard extensions/clawguard-registry/ 2>/dev/null || true | |
| # Layer 2: .github/workflows/ — accept upstream version temporarily | |
| # (the post-merge cleanup step deletes non-yourclaw workflows anyway) | |
| # Note: modify/delete conflicts need explicit "git add" after checkout --theirs | |
| # because git leaves them as "deleted by us" even after restoring the file. | |
| CONFLICTED_WORKFLOWS=$(git diff --name-only --diff-filter=U -- '.github/workflows/' 2>/dev/null || true) | |
| if [ -n "${CONFLICTED_WORKFLOWS}" ]; then | |
| echo "Auto-resolving workflow conflicts (accept upstream, will delete later):" | |
| while IFS= read -r wf; do | |
| [ -z "$wf" ] && continue | |
| echo " → ${wf}" | |
| git checkout --theirs "$wf" 2>/dev/null || true | |
| git add "$wf" 2>/dev/null || true | |
| done <<< "${CONFLICTED_WORKFLOWS}" | |
| fi | |
| # Layer 3: Content files we patch — accept upstream, re-apply patches later | |
| CONFLICTED_PATCHED=$(git diff --name-only --diff-filter=U -- \ | |
| docker-compose.yml Dockerfile Dockerfile.chainguard \ | |
| SECURITY.md .env.example .claude/settings.json 2>/dev/null || true) | |
| if [ -n "${CONFLICTED_PATCHED}" ]; then | |
| echo "Auto-resolving patched file conflicts (accept upstream, re-apply patches later):" | |
| while IFS= read -r pf; do | |
| [ -z "$pf" ] && continue | |
| echo " → ${pf}" | |
| git checkout --theirs "$pf" 2>/dev/null || true | |
| git add "$pf" 2>/dev/null || true | |
| done <<< "${CONFLICTED_PATCHED}" | |
| fi | |
| # Check for any remaining unresolved conflicts | |
| REMAINING=$(git diff --name-only --diff-filter=U 2>/dev/null || true) | |
| if [ -n "${REMAINING}" ]; then | |
| echo "" | |
| echo "::error::Unresolved merge conflicts that require manual intervention:" | |
| while IFS= read -r f; do | |
| [ -z "$f" ] && continue | |
| echo " ✗ ${f}" | |
| done <<< "${REMAINING}" | |
| echo "has_conflicts=true" >> "$GITHUB_OUTPUT" | |
| # Fail loudly — do NOT silently push an empty branch | |
| git merge --abort | |
| exit 1 | |
| else | |
| git commit --no-edit -m "sync: merge upstream ${NEW_TAG} (auto-resolved conflicts)" | |
| echo "has_conflicts=false" >> "$GITHUB_OUTPUT" | |
| fi | |
| fi | |
| # Apply YourClaw patches (root-level hardening + security subdir). | |
| # Root-level patches cover deployment hardening (docker-compose, | |
| # Dockerfile, SECURITY.md, .claude/settings.json, etc). | |
| # Security patches fix real vulnerabilities found in upstream code. | |
| # Patches that already match the working tree (already merged) are | |
| # silently skipped via --check. | |
| apply_patches() { | |
| local dir="$1" label="$2" | |
| if [ -d "${dir}" ] && ls "${dir}"/*.patch 1>/dev/null 2>&1; then | |
| echo "Applying ${label} patches from ${dir}/..." | |
| local applied=0 skipped=0 | |
| for patch in "${dir}"/*.patch; do | |
| local name="$(basename "$patch")" | |
| if git apply --check "$patch" 2>/dev/null; then | |
| git apply "$patch" | |
| echo " ✅ Applied: ${name}" | |
| applied=$((applied + 1)) | |
| else | |
| echo " ⚠️ Skipped: ${name} (already applied or does not apply cleanly)" | |
| skipped=$((skipped + 1)) | |
| fi | |
| done | |
| if [ "${applied}" -gt 0 ]; then | |
| git add -A | |
| git commit -m "chore: apply ${applied} YourClaw ${label} patches" || true | |
| fi | |
| echo "${label} patches: ${applied} applied, ${skipped} skipped" | |
| else | |
| echo "No ${label} patches to apply" | |
| fi | |
| } | |
| apply_patches "yourclaw-patches" "hardening" | |
| apply_patches "yourclaw-patches/security" "security" | |
| # Remove upstream workflow files after merge. | |
| # | |
| # Why this is necessary (not just a preference): | |
| # GitHub's GITHUB_TOKEN CANNOT push commits that modify .github/workflows/ files. | |
| # This is a hard security restriction — not a configuration issue. The only | |
| # alternative is a PAT with the `workflows` scope (set as SYNC_PAT secret). | |
| # | |
| # Why upstream's workflows don't help us anyway: | |
| # They're configured for openclaw/openclaw (their Docker registry, npm package, | |
| # Blacksmith CI runners). They'd fail on our fork. Our upstream-sync.yml and | |
| # release.yml are purpose-built for the YourClaw fork pipeline. | |
| OURS_WORKFLOWS="upstream-sync.yml release.yml" | |
| for f in .github/workflows/*.yml .github/workflows/*.yaml; do | |
| [ -f "$f" ] || continue | |
| BASENAME="$(basename "$f")" | |
| echo "${OURS_WORKFLOWS}" | grep -qw "${BASENAME}" && continue | |
| git rm -f "$f" | |
| done | |
| git commit -m "chore: remove upstream workflows (incompatible with fork)" --allow-empty || true | |
| # Update LAST_SYNCED_TAG | |
| echo "${NEW_TAG}" > yourclaw-patches/LAST_SYNCED_TAG | |
| git add yourclaw-patches/LAST_SYNCED_TAG | |
| git commit -m "chore: update LAST_SYNCED_TAG to ${NEW_TAG}" || true | |
| # Push sync branch | |
| git push origin "${SYNC_BRANCH}" --force | |
| - name: Create Pull Request | |
| env: | |
| GH_TOKEN: ${{ secrets.SYNC_PAT }} | |
| NEW_TAG: ${{ needs.check-upstream.outputs.new_tag }} | |
| LAST_TAG: ${{ needs.check-upstream.outputs.last_synced_tag }} | |
| COMMIT_COUNT: ${{ needs.check-upstream.outputs.commit_count }} | |
| SCAN_PASSED: ${{ needs.clawguard-scan.outputs.scan_passed }} | |
| SCAN_SUMMARY: ${{ needs.clawguard-scan.outputs.scan_summary }} | |
| HAS_CONFLICTS: ${{ steps.merge.outputs.has_conflicts }} | |
| SYNC_BRANCH: ${{ steps.merge.outputs.sync_branch }} | |
| run: | | |
| set -euo pipefail | |
| # Guard: if the sync branch has no content diff vs yourclaw (already up to date), | |
| # skip PR creation — GitHub rejects PRs with zero content diff between branches. | |
| # Note: git rev-list --count can be non-zero (merge commits) even when content | |
| # is identical, so we check git diff --stat as the authoritative test. | |
| REPO="${GITHUB_REPOSITORY}" | |
| CONTENT_DIFF=$(git diff --stat "${FORK_BRANCH}..${SYNC_BRANCH}" 2>/dev/null || echo "") | |
| if [ -z "${CONTENT_DIFF}" ]; then | |
| echo "✅ yourclaw already contains all changes from ${NEW_TAG} — nothing to PR" | |
| echo "${NEW_TAG}" > yourclaw-patches/LAST_SYNCED_TAG | |
| git add yourclaw-patches/LAST_SYNCED_TAG | |
| git commit -m "chore: update LAST_SYNCED_TAG to ${NEW_TAG} (already synced)" || true | |
| git push origin "${FORK_BRANCH}" | |
| # Clean up the empty sync branch | |
| git push origin --delete "${SYNC_BRANCH}" 2>/dev/null || true | |
| exit 0 | |
| fi | |
| if [ "${SCAN_PASSED}" = "true" ] && [ "${HAS_CONFLICTS}" = "false" ]; then | |
| LABEL="auto-merge" | |
| TITLE="sync: upstream ${NEW_TAG} (auto-merge ready)" | |
| else | |
| LABEL="needs-review" | |
| TITLE="sync: upstream ${NEW_TAG} (manual review required)" | |
| fi | |
| # Use branch-to-branch for accurate commit count (tag file can be stale) | |
| ACTUAL_COMMITS=$(git rev-list --count "${FORK_BRANCH}..${SYNC_BRANCH}" 2>/dev/null || echo "${COMMIT_COUNT}") | |
| BODY=$(cat <<PREOF | |
| ## Upstream Sync: ${LAST_TAG} → ${NEW_TAG} | |
| **Commits:** ${ACTUAL_COMMITS} (upstream tag has ${COMMIT_COUNT} commits vs ${FORK_BRANCH}) | |
| **Conflicts:** ${HAS_CONFLICTS} | |
| **Security Scan:** $([ "${SCAN_PASSED}" = "true" ] && echo "✅ Passed" || echo "❌ Issues found") | |
| ${SCAN_SUMMARY} | |
| ### Upstream Changes (last 30) | |
| \`\`\` | |
| $(git log --oneline "${FORK_BRANCH}..${SYNC_BRANCH}" 2>/dev/null | head -30 || echo "Unable to list commits") | |
| \`\`\` | |
| --- | |
| 🤖 Generated by [YourClaw Upstream Sync](https://github.com/yourclaw/openclaw/actions/workflows/upstream-sync.yml) | |
| PREOF | |
| ) | |
| # Check if PR already exists | |
| EXISTING_PR=$(gh pr list --repo "${REPO}" --head "${SYNC_BRANCH}" --json number --jq '.[0].number' 2>/dev/null || echo "") | |
| if [ -n "${EXISTING_PR}" ]; then | |
| echo "PR #${EXISTING_PR} already exists for ${SYNC_BRANCH} — updating" | |
| gh pr edit --repo "${REPO}" "${EXISTING_PR}" --title "${TITLE}" --body "${BODY}" | |
| else | |
| gh pr create \ | |
| --repo "${REPO}" \ | |
| --base "${FORK_BRANCH}" \ | |
| --head "${SYNC_BRANCH}" \ | |
| --title "${TITLE}" \ | |
| --body "${BODY}" || exit 1 | |
| # Add label via API (gh pr edit --add-label can fail silently with token issues) | |
| PR_NUMBER=$(gh pr list --repo "${REPO}" --head "${SYNC_BRANCH}" --json number --jq '.[0].number' 2>/dev/null || echo "") | |
| if [ -n "${PR_NUMBER}" ]; then | |
| gh api "repos/${REPO}/issues/${PR_NUMBER}/labels" \ | |
| --method POST --field "labels[]=${LABEL}" > /dev/null 2>&1 \ | |
| || echo "⚠ Could not add label '${LABEL}' (token may lack write permissions)" | |
| fi | |
| fi | |
| - name: Auto-merge if safe | |
| if: needs.clawguard-scan.outputs.scan_passed == 'true' && steps.merge.outputs.has_conflicts == 'false' | |
| env: | |
| GH_TOKEN: ${{ secrets.SYNC_PAT }} | |
| SYNC_BRANCH: ${{ steps.merge.outputs.sync_branch }} | |
| run: | | |
| REPO="${GITHUB_REPOSITORY}" | |
| PR_NUMBER=$(gh pr list --repo "${REPO}" --head "${SYNC_BRANCH}" --json number --jq '.[0].number') | |
| if [ -n "${PR_NUMBER}" ]; then | |
| if gh pr merge --repo "${REPO}" "${PR_NUMBER}" --squash --auto --delete-branch 2>&1; then | |
| echo "Auto-merge enabled for PR #${PR_NUMBER}" | |
| else | |
| echo "⚠ Auto-merge not available (enable in repo Settings → General → Allow auto-merge)" | |
| echo "PR #${PR_NUMBER} is ready for manual merge" | |
| fi | |
| fi |