bench #72714
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # Runs benchmarks. | |
| # | |
| # The reth-bench job replays real blocks via the Engine API against a reth node | |
| # backed by a local snapshot managed with schelk. | |
| # | |
| # It runs the baseline binary and the feature (candidate) binary on the | |
| # same block range (snapshot recovered between runs) to compare performance. | |
| on: | |
| issue_comment: | |
| types: [created] | |
| workflow_dispatch: | |
| inputs: | |
| blocks: | |
| description: "Number of blocks to benchmark (or 'big' for big blocks mode)" | |
| required: false | |
| default: "500" | |
| type: string | |
| warmup: | |
| description: "Number of warmup blocks" | |
| required: false | |
| default: "100" | |
| type: string | |
| baseline: | |
| description: "Baseline git ref (default: merge-base)" | |
| required: false | |
| default: "" | |
| type: string | |
| feature: | |
| description: "Feature git ref (default: branch head)" | |
| required: false | |
| default: "" | |
| type: string | |
| samply: | |
| description: "Enable samply profiling" | |
| required: false | |
| default: "false" | |
| type: boolean | |
| cores: | |
| description: "Limit reth to N CPU cores (0 = all available)" | |
| required: false | |
| default: "0" | |
| type: string | |
| reth_newPayload: | |
| description: "Use reth_newPayload RPC (server-side timing)" | |
| required: false | |
| default: "true" | |
| type: boolean | |
| wait_time: | |
| description: "Fixed wait time between blocks (e.g. 500ms, 1s)" | |
| required: false | |
| default: "" | |
| type: string | |
| baseline_args: | |
| description: "Extra CLI args for the baseline reth node" | |
| required: false | |
| default: "" | |
| type: string | |
| feature_args: | |
| description: "Extra CLI args for the feature reth node" | |
| required: false | |
| default: "" | |
| type: string | |
| env: | |
| CARGO_TERM_COLOR: always | |
| BASELINE: base | |
| SEED: reth | |
| RUSTC_WRAPPER: "sccache" | |
| BENCH_RUNNERS: 2 | |
| name: bench | |
| permissions: | |
| contents: read | |
| pull-requests: write | |
| jobs: | |
| reth-bench-ack: | |
| if: | | |
| (github.event_name == 'issue_comment' && github.event.issue.pull_request && (startsWith(github.event.comment.body, '@decofe bench') || startsWith(github.event.comment.body, 'derek bench'))) || | |
| github.event_name == 'workflow_dispatch' | |
| name: reth-bench-ack | |
| runs-on: ubuntu-latest | |
| outputs: | |
| pr: ${{ steps.args.outputs.pr }} | |
| actor: ${{ steps.args.outputs.actor }} | |
| blocks: ${{ steps.args.outputs.blocks }} | |
| warmup: ${{ steps.args.outputs.warmup }} | |
| baseline: ${{ steps.args.outputs.baseline }} | |
| feature: ${{ steps.args.outputs.feature }} | |
| baseline-name: ${{ steps.args.outputs.baseline-name }} | |
| feature-name: ${{ steps.args.outputs.feature-name }} | |
| samply: ${{ steps.args.outputs.samply }} | |
| cores: ${{ steps.args.outputs.cores }} | |
| big-blocks: ${{ steps.args.outputs.big-blocks }} | |
| reth-new-payload: ${{ steps.args.outputs.reth-new-payload }} | |
| wait-time: ${{ steps.args.outputs.wait-time }} | |
| baseline-args: ${{ steps.args.outputs.baseline-args }} | |
| feature-args: ${{ steps.args.outputs.feature-args }} | |
| comment-id: ${{ steps.ack.outputs.comment-id }} | |
| steps: | |
| - name: Check org membership | |
| if: github.event_name == 'issue_comment' | |
| uses: actions/github-script@v8 | |
| with: | |
| github-token: ${{ secrets.GITHUB_TOKEN }} | |
| script: | | |
| const user = context.payload.comment.user.login; | |
| try { | |
| const { status } = await github.rest.orgs.checkMembershipForUser({ | |
| org: 'paradigmxyz', | |
| username: user, | |
| }); | |
| if (status !== 204 && status !== 302) { | |
| core.setFailed(`@${user} is not a member of paradigmxyz`); | |
| } | |
| } catch (e) { | |
| core.setFailed(`@${user} is not a member of paradigmxyz`); | |
| } | |
| - name: Parse arguments | |
| id: args | |
| uses: actions/github-script@v8 | |
| with: | |
| github-token: ${{ secrets.DEREK_PAT }} | |
| script: | | |
| let pr, actor, blocks, warmup, baseline, feature, samply, cores, bigBlocks; | |
| if (context.eventName === 'workflow_dispatch') { | |
| actor = '${{ github.actor }}'; | |
| blocks = '${{ github.event.inputs.blocks }}' || '500'; | |
| warmup = '${{ github.event.inputs.warmup }}' || '100'; | |
| baseline = '${{ github.event.inputs.baseline }}'; | |
| feature = '${{ github.event.inputs.feature }}'; | |
| samply = '${{ github.event.inputs.samply }}' === 'true' ? 'true' : 'false'; | |
| cores = '${{ github.event.inputs.cores }}' || '0'; | |
| bigBlocks = blocks === 'big' ? 'true' : 'false'; | |
| var rethNewPayload = '${{ github.event.inputs.reth_newPayload }}' !== 'false' ? 'true' : 'false'; | |
| var waitTime = '${{ github.event.inputs.wait_time }}' || ''; | |
| var baselineNodeArgs = '${{ github.event.inputs.baseline_args }}' || ''; | |
| var featureNodeArgs = '${{ github.event.inputs.feature_args }}' || ''; | |
| // Find PR for the selected branch | |
| const branch = '${{ github.ref_name }}'; | |
| const { data: prs } = await github.rest.pulls.list({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| head: `${context.repo.owner}:${branch}`, | |
| state: 'open', | |
| per_page: 1, | |
| }); | |
| pr = prs.length ? String(prs[0].number) : ''; | |
| if (!pr) { | |
| core.info(`No open PR found for branch '${branch}', results will be in job summary`); | |
| } | |
| } else { | |
| pr = String(context.issue.number); | |
| actor = context.payload.comment.user.login; | |
| const body = context.payload.comment.body.trim(); | |
| const intArgs = new Set(['warmup', 'cores']); | |
| const intOrKeywordArgs = new Map([['blocks', new Set(['big'])]]); | |
| const refArgs = new Set(['baseline', 'feature']); | |
| const boolArgs = new Set(['samply']); | |
| const boolDefaultTrue = new Set(['reth_newPayload']); | |
| const durationArgs = new Set(['wait-time']); | |
| const stringArgs = new Set(['baseline-args', 'feature-args']); | |
| const defaults = { blocks: '500', warmup: '100', baseline: '', feature: '', samply: 'false', cores: '0', reth_newPayload: 'true', 'wait-time': '', 'baseline-args': '', 'feature-args': '' }; | |
| const unknown = []; | |
| const invalid = []; | |
| const args = body.replace(/^(?:@decofe|derek) bench\s*/, ''); | |
| // Parse args, handling quoted values like key="value with spaces" | |
| const parts = []; | |
| const argRegex = /(\S+?="[^"]*"|\S+?='[^']*'|\S+)/g; | |
| let m; | |
| while ((m = argRegex.exec(args)) !== null) parts.push(m[1]); | |
| for (const part of parts) { | |
| const eq = part.indexOf('='); | |
| if (eq === -1) { | |
| if (boolArgs.has(part)) { | |
| defaults[part] = 'true'; | |
| } else if (boolDefaultTrue.has(part)) { | |
| defaults[part] = 'true'; | |
| } else { | |
| unknown.push(part); | |
| } | |
| continue; | |
| } | |
| const key = part.slice(0, eq); | |
| let value = part.slice(eq + 1); | |
| // Strip surrounding quotes | |
| if ((value.startsWith('"') && value.endsWith('"')) || (value.startsWith("'") && value.endsWith("'"))) { | |
| value = value.slice(1, -1); | |
| } | |
| if (boolDefaultTrue.has(key)) { | |
| if (value === 'true' || value === 'false') { | |
| defaults[key] = value; | |
| } else { | |
| invalid.push(`\`${key}=${value}\` (must be true or false)`); | |
| } | |
| } else if (durationArgs.has(key)) { | |
| if (/^\d+(ms|s|m)$/.test(value)) { | |
| defaults[key] = value; | |
| } else { | |
| invalid.push(`\`${key}=${value}\` (must be a duration like 500ms, 1s, 2m)`); | |
| } | |
| } else if (intArgs.has(key)) { | |
| if (!/^\d+$/.test(value)) { | |
| invalid.push(`\`${key}=${value}\` (must be a positive integer)`); | |
| } else { | |
| defaults[key] = value; | |
| } | |
| } else if (intOrKeywordArgs.has(key)) { | |
| const keywords = intOrKeywordArgs.get(key); | |
| if (keywords.has(value)) { | |
| defaults[key] = value; | |
| } else if (/^\d+$/.test(value)) { | |
| defaults[key] = value; | |
| } else { | |
| invalid.push(`\`${key}=${value}\` (must be a positive integer or one of: ${[...keywords].join(', ')})`); | |
| } | |
| } else if (refArgs.has(key)) { | |
| if (!value) { | |
| invalid.push(`\`${key}=\` (must be a git ref)`); | |
| } else { | |
| defaults[key] = value; | |
| } | |
| } else if (stringArgs.has(key)) { | |
| defaults[key] = value; | |
| } else { | |
| unknown.push(key); | |
| } | |
| } | |
| const errors = []; | |
| if (unknown.length) errors.push(`Unknown argument(s): \`${unknown.join('`, `')}\``); | |
| if (invalid.length) errors.push(`Invalid value(s): ${invalid.join(', ')}`); | |
| if (errors.length) { | |
| const msg = `❌ **Invalid bench command**\n\n${errors.join('\n')}\n\n**Usage:** \`@decofe bench [blocks=N|big] [warmup=N] [baseline=REF] [feature=REF] [samply] [cores=N] [reth_newPayload=true|false] [wait-time=DURATION] [baseline-args="..."] [feature-args="..."]\``; | |
| await github.rest.issues.createComment({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| issue_number: context.issue.number, | |
| body: msg, | |
| }); | |
| core.setFailed(msg); | |
| return; | |
| } | |
| blocks = defaults.blocks; | |
| warmup = defaults.warmup; | |
| baseline = defaults.baseline; | |
| feature = defaults.feature; | |
| samply = defaults.samply; | |
| cores = defaults.cores; | |
| bigBlocks = blocks === 'big' ? 'true' : 'false'; | |
| var rethNewPayload = defaults.reth_newPayload; | |
| var waitTime = defaults['wait-time']; | |
| var baselineNodeArgs = defaults['baseline-args']; | |
| var featureNodeArgs = defaults['feature-args']; | |
| } | |
| // Resolve display names for baseline/feature | |
| let baselineName = baseline || 'main'; | |
| let featureName = feature; | |
| if (!featureName) { | |
| if (pr) { | |
| const { data: prData } = await github.rest.pulls.get({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| pull_number: parseInt(pr), | |
| }); | |
| featureName = prData.head.ref; | |
| } else { | |
| featureName = '${{ github.ref_name }}'; | |
| } | |
| } | |
| core.setOutput('pr', pr || ''); | |
| core.setOutput('actor', actor); | |
| core.setOutput('blocks', blocks); | |
| core.setOutput('warmup', warmup); | |
| core.setOutput('baseline', baseline); | |
| core.setOutput('feature', feature); | |
| core.setOutput('baseline-name', baselineName); | |
| core.setOutput('feature-name', featureName); | |
| core.setOutput('samply', samply); | |
| core.setOutput('cores', cores); | |
| core.setOutput('big-blocks', bigBlocks); | |
| core.setOutput('reth-new-payload', rethNewPayload); | |
| core.setOutput('wait-time', waitTime); | |
| core.setOutput('baseline-args', baselineNodeArgs); | |
| core.setOutput('feature-args', featureNodeArgs); | |
| - name: Acknowledge request | |
| id: ack | |
| uses: actions/github-script@v8 | |
| with: | |
| github-token: ${{ secrets.DEREK_PAT }} | |
| script: | | |
| if (context.eventName === 'issue_comment') { | |
| await github.rest.reactions.createForIssueComment({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| comment_id: context.payload.comment.id, | |
| content: 'eyes', | |
| }); | |
| } | |
| const pr = '${{ steps.args.outputs.pr }}'; | |
| if (!pr) return; | |
| const runUrl = `${context.serverUrl}/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`; | |
| // Count queued/waiting bench runs ahead of this one. | |
| // BENCH_RUNNERS is the number of self-hosted runners available. | |
| let queueMsg = ''; | |
| let ahead = 0; | |
| const numRunners = parseInt(process.env.BENCH_RUNNERS) || 1; | |
| try { | |
| const statuses = ['queued', 'in_progress', 'waiting', 'requested', 'pending']; | |
| const allRuns = []; | |
| for (const status of statuses) { | |
| const { data: { workflow_runs: r } } = await github.rest.actions.listWorkflowRuns({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| workflow_id: 'bench.yml', | |
| status, | |
| per_page: 100, | |
| }); | |
| allRuns.push(...r); | |
| } | |
| const benchRuns = allRuns.filter(r => r.event === 'issue_comment' || r.event === 'workflow_dispatch'); | |
| const thisRun = benchRuns.find(r => r.id === context.runId); | |
| const thisCreatedAt = thisRun ? new Date(thisRun.created_at) : new Date(); | |
| const totalAhead = benchRuns.filter(r => r.id !== context.runId && new Date(r.created_at) <= thisCreatedAt).length; | |
| ahead = Math.max(0, totalAhead - numRunners + 1); | |
| if (ahead > 0) { | |
| queueMsg = `\n🔢 **Queue position:** ${ahead} job(s) ahead (${numRunners} runner(s))`; | |
| } | |
| } catch (e) { | |
| core.info(`Skipping queue tracking: ${e.message}`); | |
| } | |
| const actor = '${{ steps.args.outputs.actor }}'; | |
| const blocks = '${{ steps.args.outputs.blocks }}'; | |
| const warmup = '${{ steps.args.outputs.warmup }}'; | |
| const baseline = '${{ steps.args.outputs.baseline-name }}'; | |
| const feature = '${{ steps.args.outputs.feature-name }}'; | |
| const samply = '${{ steps.args.outputs.samply }}' === 'true'; | |
| const bigBlocks = '${{ steps.args.outputs.big-blocks }}' === 'true'; | |
| const samplyNote = samply ? ', samply: `enabled`' : ''; | |
| const cores = '${{ steps.args.outputs.cores }}'; | |
| const coresNote = cores && cores !== '0' ? `, cores: \`${cores}\`` : ''; | |
| const rethNP = '${{ steps.args.outputs.reth-new-payload }}' !== 'false'; | |
| const rethNPNote = !rethNP ? ', reth_newPayload: `disabled`' : ''; | |
| const waitTimeVal = '${{ steps.args.outputs.wait-time }}'; | |
| const waitTimeNote = waitTimeVal ? `, wait-time: \`${waitTimeVal}\`` : ''; | |
| const baselineArgsVal = '${{ steps.args.outputs.baseline-args }}'; | |
| const baselineArgsNote = baselineArgsVal ? `, baseline-args: \`${baselineArgsVal}\`` : ''; | |
| const featureArgsVal = '${{ steps.args.outputs.feature-args }}'; | |
| const featureArgsNote = featureArgsVal ? `, feature-args: \`${featureArgsVal}\`` : ''; | |
| const blocksDesc = bigBlocks ? 'blocks: `big`' : `${blocks} blocks, ${warmup} warmup blocks`; | |
| const config = `**Config:** ${blocksDesc}, baseline: \`${baseline}\`, feature: \`${feature}\`${samplyNote}${coresNote}${rethNPNote}${waitTimeNote}${baselineArgsNote}${featureArgsNote}`; | |
| const { data: comment } = await github.rest.issues.createComment({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| issue_number: parseInt(pr), | |
| body: `cc @${actor}\n\n🚀 Benchmark queued! [View run](${runUrl})\n\n⏳ **Status:** Waiting for runner...${queueMsg}\n\n${config}`, | |
| }); | |
| core.setOutput('comment-id', String(comment.id)); | |
| core.setOutput('queue-position', String(ahead || 0)); | |
| - name: Poll queue position | |
| if: steps.ack.outputs.comment-id && steps.ack.outputs.queue-position != '0' | |
| uses: actions/github-script@v8 | |
| with: | |
| github-token: ${{ secrets.DEREK_PAT }} | |
| script: | | |
| const pr = '${{ steps.args.outputs.pr }}'; | |
| const commentId = parseInt('${{ steps.ack.outputs.comment-id }}'); | |
| const actor = '${{ steps.args.outputs.actor }}'; | |
| const blocks = '${{ steps.args.outputs.blocks }}'; | |
| const warmup = '${{ steps.args.outputs.warmup }}'; | |
| const baseline = '${{ steps.args.outputs.baseline-name }}'; | |
| const feature = '${{ steps.args.outputs.feature-name }}'; | |
| const samply = '${{ steps.args.outputs.samply }}' === 'true'; | |
| const bigBlocks = '${{ steps.args.outputs.big-blocks }}' === 'true'; | |
| const samplyNote = samply ? ', samply: `enabled`' : ''; | |
| const cores = '${{ steps.args.outputs.cores }}'; | |
| const coresNote = cores && cores !== '0' ? `, cores: \`${cores}\`` : ''; | |
| const rethNP = '${{ steps.args.outputs.reth-new-payload }}' !== 'false'; | |
| const rethNPNote = !rethNP ? ', reth_newPayload: `disabled`' : ''; | |
| const waitTimeVal = '${{ steps.args.outputs.wait-time }}'; | |
| const waitTimeNote = waitTimeVal ? `, wait-time: \`${waitTimeVal}\`` : ''; | |
| const baselineArgsVal = '${{ steps.args.outputs.baseline-args }}'; | |
| const baselineArgsNote = baselineArgsVal ? `, baseline-args: \`${baselineArgsVal}\`` : ''; | |
| const featureArgsVal = '${{ steps.args.outputs.feature-args }}'; | |
| const featureArgsNote = featureArgsVal ? `, feature-args: \`${featureArgsVal}\`` : ''; | |
| const blocksDesc = bigBlocks ? 'blocks: `big`' : `${blocks} blocks, ${warmup} warmup blocks`; | |
| const config = `**Config:** ${blocksDesc}, baseline: \`${baseline}\`, feature: \`${feature}\`${samplyNote}${coresNote}${rethNPNote}${waitTimeNote}${baselineArgsNote}${featureArgsNote}`; | |
| const runUrl = `${context.serverUrl}/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`; | |
| const numRunners = parseInt(process.env.BENCH_RUNNERS) || 1; | |
| async function getQueuePosition() { | |
| const statuses = ['queued', 'in_progress', 'waiting', 'requested', 'pending']; | |
| const allRuns = []; | |
| for (const status of statuses) { | |
| const { data: { workflow_runs: r } } = await github.rest.actions.listWorkflowRuns({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| workflow_id: 'bench.yml', | |
| status, | |
| per_page: 100, | |
| }); | |
| allRuns.push(...r); | |
| } | |
| const benchRuns = allRuns.filter(r => r.event === 'issue_comment' || r.event === 'workflow_dispatch'); | |
| const thisRun = benchRuns.find(r => r.id === context.runId); | |
| const thisCreatedAt = thisRun ? new Date(thisRun.created_at) : new Date(); | |
| const totalAhead = benchRuns.filter(r => r.id !== context.runId && new Date(r.created_at) <= thisCreatedAt).length; | |
| return { ahead: Math.max(0, totalAhead - numRunners + 1), numRunners }; | |
| } | |
| let lastPosition = parseInt('${{ steps.ack.outputs.queue-position }}'); | |
| const sleep = ms => new Promise(r => setTimeout(r, ms)); | |
| while (true) { | |
| await sleep(10_000); | |
| try { | |
| const { ahead, numRunners } = await getQueuePosition(); | |
| if (ahead !== lastPosition) { | |
| lastPosition = ahead; | |
| const queueMsg = ahead > 0 | |
| ? `\n🔢 **Queue position:** ${ahead} job(s) ahead (${numRunners} runner(s))` | |
| : ''; | |
| await github.rest.issues.updateComment({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| comment_id: commentId, | |
| body: `cc @${actor}\n\n🚀 Benchmark queued! [View run](${runUrl})\n\n⏳ **Status:** Waiting for runner...${queueMsg}\n\n${config}`, | |
| }); | |
| } | |
| if (ahead === 0) break; | |
| } catch (e) { | |
| core.info(`Queue poll error: ${e.message}`); | |
| } | |
| } | |
| reth-bench: | |
| needs: reth-bench-ack | |
| name: reth-bench | |
| runs-on: [self-hosted, Linux, X64] | |
| timeout-minutes: 120 | |
| env: | |
| BENCH_RPC_URL: https://ethereum.reth.rs/rpc | |
| SCHELK_MOUNT: /reth-bench | |
| BENCH_WORK_DIR: ${{ github.workspace }}/bench-work | |
| BENCH_PR: ${{ needs.reth-bench-ack.outputs.pr }} | |
| BENCH_ACTOR: ${{ needs.reth-bench-ack.outputs.actor }} | |
| BENCH_BLOCKS: ${{ needs.reth-bench-ack.outputs.blocks }} | |
| BENCH_WARMUP_BLOCKS: ${{ needs.reth-bench-ack.outputs.warmup }} | |
| BENCH_SAMPLY: ${{ needs.reth-bench-ack.outputs.samply }} | |
| BENCH_CORES: ${{ needs.reth-bench-ack.outputs.cores }} | |
| BENCH_BIG_BLOCKS: ${{ needs.reth-bench-ack.outputs.big-blocks }} | |
| BENCH_RETH_NEW_PAYLOAD: ${{ needs.reth-bench-ack.outputs.reth-new-payload }} | |
| BENCH_WAIT_TIME: ${{ needs.reth-bench-ack.outputs.wait-time }} | |
| BENCH_BASELINE_ARGS: ${{ needs.reth-bench-ack.outputs.baseline-args }} | |
| BENCH_FEATURE_ARGS: ${{ needs.reth-bench-ack.outputs.feature-args }} | |
| BENCH_COMMENT_ID: ${{ needs.reth-bench-ack.outputs.comment-id }} | |
| BENCH_METRICS_ADDR: "127.0.0.1:9100" | |
| steps: | |
| - name: Clean up previous bench-work | |
| run: sudo rm -rf "$BENCH_WORK_DIR" 2>/dev/null || true | |
| - name: Resolve checkout ref | |
| id: checkout-ref | |
| uses: actions/github-script@v8 | |
| with: | |
| script: | | |
| if (!process.env.BENCH_PR) { | |
| core.setOutput('ref', '${{ github.ref }}'); | |
| return; | |
| } | |
| const { data: pr } = await github.rest.pulls.get({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| pull_number: parseInt(process.env.BENCH_PR), | |
| }); | |
| // Always use head SHA — the merge ref (refs/pull/N/merge) may not | |
| // exist if the PR has conflicts, was force-pushed, or was | |
| // merged/closed between this step and checkout. | |
| core.info(`PR #${process.env.BENCH_PR} (${pr.state}), using head SHA ${pr.head.sha}`); | |
| core.setOutput('ref', pr.head.sha); | |
| - uses: actions/checkout@v6 | |
| with: | |
| submodules: true | |
| fetch-depth: 0 | |
| ref: ${{ steps.checkout-ref.outputs.ref }} | |
| - name: Resolve job URL and update status | |
| if: env.BENCH_COMMENT_ID | |
| uses: actions/github-script@v8 | |
| with: | |
| github-token: ${{ secrets.DEREK_PAT }} | |
| script: | | |
| const { data: jobs } = await github.rest.actions.listJobsForWorkflowRun({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| run_id: context.runId, | |
| }); | |
| const job = jobs.jobs.find(j => j.name === 'reth-bench'); | |
| const jobUrl = job ? job.html_url : `${context.serverUrl}/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`; | |
| core.exportVariable('BENCH_JOB_URL', jobUrl); | |
| const blocks = process.env.BENCH_BLOCKS; | |
| const warmup = process.env.BENCH_WARMUP_BLOCKS; | |
| const baseline = '${{ needs.reth-bench-ack.outputs.baseline-name }}'; | |
| const feature = '${{ needs.reth-bench-ack.outputs.feature-name }}'; | |
| const samply = process.env.BENCH_SAMPLY === 'true'; | |
| const bigBlocks = process.env.BENCH_BIG_BLOCKS === 'true'; | |
| const samplyNote = samply ? ', samply: `enabled`' : ''; | |
| const cores = process.env.BENCH_CORES || '0'; | |
| const coresNote = cores && cores !== '0' ? `, cores: \`${cores}\`` : ''; | |
| const rethNP = (process.env.BENCH_RETH_NEW_PAYLOAD || 'true') !== 'false'; | |
| const rethNPNote = !rethNP ? ', reth_newPayload: `disabled`' : ''; | |
| const waitTimeVal = process.env.BENCH_WAIT_TIME || ''; | |
| const waitTimeNote = waitTimeVal ? `, wait-time: \`${waitTimeVal}\`` : ''; | |
| const baselineArgsVal = process.env.BENCH_BASELINE_ARGS || ''; | |
| const baselineArgsNote = baselineArgsVal ? `, baseline-args: \`${baselineArgsVal}\`` : ''; | |
| const featureArgsVal = process.env.BENCH_FEATURE_ARGS || ''; | |
| const featureArgsNote = featureArgsVal ? `, feature-args: \`${featureArgsVal}\`` : ''; | |
| const blocksDesc = bigBlocks ? 'blocks: `big`' : `${blocks} blocks, ${warmup} warmup blocks`; | |
| core.exportVariable('BENCH_CONFIG', `**Config:** ${blocksDesc}, baseline: \`${baseline}\`, feature: \`${feature}\`${samplyNote}${coresNote}${rethNPNote}${waitTimeNote}${baselineArgsNote}${featureArgsNote}`); | |
| const { buildBody } = require('./.github/scripts/bench-update-status.js'); | |
| await github.rest.issues.updateComment({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| comment_id: parseInt(process.env.BENCH_COMMENT_ID), | |
| body: buildBody('Building binaries...'), | |
| }); | |
| - uses: dtolnay/rust-toolchain@stable | |
| - uses: mozilla-actions/sccache-action@v0.0.9 | |
| continue-on-error: true | |
| - name: Install dependencies | |
| env: | |
| DEREK_TOKEN: ${{ secrets.DEREK_TOKEN }} | |
| run: | | |
| mkdir -p "$HOME/.local/bin" | |
| # apt packages | |
| sudo apt-get update -qq | |
| sudo apt-get install -y --no-install-recommends \ | |
| python3 make jq zstd curl dmsetup \ | |
| linux-tools-"$(uname -r)" || \ | |
| sudo apt-get install -y --no-install-recommends linux-tools-generic | |
| # mc (MinIO client) | |
| if ! command -v mc &>/dev/null; then | |
| curl -sSfL https://dl.min.io/client/mc/release/linux-amd64/mc -o "$HOME/.local/bin/mc" | |
| chmod +x "$HOME/.local/bin/mc" | |
| fi | |
| # uv (Python package manager) | |
| if ! command -v uv &>/dev/null; then | |
| curl -LsSf https://astral.sh/uv/install.sh | env UV_INSTALL_DIR="$HOME/.local/bin" sh | |
| fi | |
| # Configure git auth for private repos | |
| git config --global url."https://x-access-token:${DEREK_TOKEN}@github.com/".insteadOf "https://github.com/" | |
| # thin-provisioning-tools (era_invalidate, required by schelk) | |
| if ! command -v era_invalidate &>/dev/null; then | |
| git clone --depth 1 https://github.com/jthornber/thin-provisioning-tools /tmp/tpt | |
| sudo make -C /tmp/tpt install | |
| rm -rf /tmp/tpt | |
| fi | |
| # schelk (snapshot rollback tool, invoked via sudo) | |
| if ! sudo sh -c 'command -v schelk' &>/dev/null; then | |
| cargo install --git https://github.com/tempoxyz/schelk --locked | |
| sudo install "$HOME/.cargo/bin/schelk" /usr/local/bin/ | |
| fi | |
| # samply (optional CPU profiler, invoked via sudo) | |
| if [ "${BENCH_SAMPLY:-false}" = "true" ] && ! sudo sh -c 'command -v samply' &>/dev/null; then | |
| cargo install samply --git https://github.com/DaniPopes/samply --branch edge --locked | |
| sudo install "$HOME/.cargo/bin/samply" /usr/local/bin/ | |
| fi | |
| # Verify all required tools are available | |
| - name: Check dependencies | |
| run: | | |
| export PATH="$HOME/.local/bin:$HOME/.cargo/bin:$PATH" | |
| echo "$HOME/.local/bin" >> "$GITHUB_PATH" | |
| echo "$HOME/.cargo/bin" >> "$GITHUB_PATH" | |
| missing=() | |
| for cmd in mc schelk cpupower taskset stdbuf python3 curl make uv pzstd jq; do | |
| command -v "$cmd" &>/dev/null || missing+=("$cmd") | |
| done | |
| if [ ${#missing[@]} -gt 0 ]; then | |
| echo "::error::Missing required tools: ${missing[*]}" | |
| exit 1 | |
| fi | |
| echo "All dependencies found" | |
| # Build binaries | |
| - name: Resolve PR head branch | |
| id: pr-info | |
| uses: actions/github-script@v8 | |
| with: | |
| script: | | |
| if (process.env.BENCH_PR) { | |
| const { data: pr } = await github.rest.pulls.get({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| pull_number: parseInt(process.env.BENCH_PR), | |
| }); | |
| core.setOutput('head-ref', pr.head.ref); | |
| core.setOutput('head-sha', pr.head.sha); | |
| } else { | |
| core.setOutput('head-ref', '${{ github.ref_name }}'); | |
| core.setOutput('head-sha', '${{ github.sha }}'); | |
| } | |
| - name: Resolve refs | |
| id: refs | |
| uses: actions/github-script@v8 | |
| with: | |
| script: | | |
| const { execSync } = require('child_process'); | |
| const run = (cmd) => execSync(cmd, { encoding: 'utf8' }).trim(); | |
| const baselineArg = '${{ needs.reth-bench-ack.outputs.baseline }}'; | |
| const featureArg = '${{ needs.reth-bench-ack.outputs.feature }}'; | |
| let baselineRef, baselineName, featureRef, featureName; | |
| if (baselineArg) { | |
| try { run(`git fetch origin "${baselineArg}" --quiet`); } catch {} | |
| try { | |
| baselineRef = run(`git rev-parse "${baselineArg}"`); | |
| } catch { | |
| baselineRef = run(`git rev-parse "origin/${baselineArg}"`); | |
| } | |
| baselineName = baselineArg; | |
| } else { | |
| try { | |
| baselineRef = run('git merge-base HEAD origin/main'); | |
| } catch { | |
| baselineRef = '${{ github.sha }}'; | |
| } | |
| baselineName = 'main'; | |
| } | |
| if (featureArg) { | |
| try { run(`git fetch origin "${featureArg}" --quiet`); } catch {} | |
| try { | |
| featureRef = run(`git rev-parse "${featureArg}"`); | |
| } catch { | |
| featureRef = run(`git rev-parse "origin/${featureArg}"`); | |
| } | |
| featureName = featureArg; | |
| } else { | |
| featureRef = '${{ steps.pr-info.outputs.head-sha }}'; | |
| featureName = '${{ steps.pr-info.outputs.head-ref }}'; | |
| } | |
| core.setOutput('baseline-ref', baselineRef); | |
| core.setOutput('baseline-name', baselineName); | |
| core.setOutput('feature-ref', featureRef); | |
| core.setOutput('feature-name', featureName); | |
| - name: Check if snapshot needs update | |
| id: snapshot-check | |
| run: | | |
| if .github/scripts/bench-reth-snapshot.sh --check; then | |
| echo "needed=false" >> "$GITHUB_OUTPUT" | |
| else | |
| echo "needed=true" >> "$GITHUB_OUTPUT" | |
| fi | |
| - name: Update status (snapshot needed) | |
| if: env.BENCH_COMMENT_ID && steps.snapshot-check.outputs.needed == 'true' | |
| uses: actions/github-script@v8 | |
| with: | |
| github-token: ${{ secrets.DEREK_PAT }} | |
| script: | | |
| const s = require('./.github/scripts/bench-update-status.js'); | |
| await s({github, context, status: 'Building binaries & downloading snapshot...'}); | |
| - name: Prepare source dirs | |
| run: | | |
| BASELINE_REF="${{ steps.refs.outputs.baseline-ref }}" | |
| if [ -d ../reth-baseline ]; then | |
| git -C ../reth-baseline fetch origin "$BASELINE_REF" | |
| else | |
| git clone . ../reth-baseline | |
| fi | |
| git -C ../reth-baseline checkout "$BASELINE_REF" | |
| FEATURE_REF="${{ steps.refs.outputs.feature-ref }}" | |
| if [ -d ../reth-feature ]; then | |
| git -C ../reth-feature fetch origin "$FEATURE_REF" | |
| else | |
| git clone . ../reth-feature | |
| fi | |
| git -C ../reth-feature checkout "$FEATURE_REF" | |
| - name: Build binaries and download snapshot in parallel | |
| id: build | |
| env: | |
| GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
| BENCH_REPO: ${{ github.repository }} | |
| SNAPSHOT_NEEDED: ${{ steps.snapshot-check.outputs.needed }} | |
| run: | | |
| BASELINE_DIR="$(cd ../reth-baseline && pwd)" | |
| FEATURE_DIR="$(cd ../reth-feature && pwd)" | |
| .github/scripts/bench-reth-build.sh baseline "${BASELINE_DIR}" "${{ steps.refs.outputs.baseline-ref }}" & | |
| PID_BASELINE=$! | |
| .github/scripts/bench-reth-build.sh feature "${FEATURE_DIR}" "${{ steps.refs.outputs.feature-ref }}" & | |
| PID_FEATURE=$! | |
| PID_SNAPSHOT= | |
| if [ "$SNAPSHOT_NEEDED" = "true" ]; then | |
| .github/scripts/bench-reth-snapshot.sh & | |
| PID_SNAPSHOT=$! | |
| fi | |
| FAIL=0 | |
| wait $PID_BASELINE || FAIL=1 | |
| wait $PID_FEATURE || FAIL=1 | |
| [ -n "$PID_SNAPSHOT" ] && { wait $PID_SNAPSHOT || FAIL=1; } | |
| if [ $FAIL -ne 0 ]; then | |
| echo "::error::One or more parallel tasks failed (builds / snapshot download)" | |
| exit 1 | |
| fi | |
| # System tuning for reproducible benchmarks | |
| - name: System setup | |
| run: | | |
| sudo cpupower frequency-set -g performance || true | |
| # Disable turbo boost (Intel and AMD paths) | |
| echo 1 | sudo tee /sys/devices/system/cpu/intel_pstate/no_turbo 2>/dev/null || true | |
| echo 0 | sudo tee /sys/devices/system/cpu/cpufreq/boost 2>/dev/null || true | |
| sudo swapoff -a || true | |
| echo 0 | sudo tee /proc/sys/kernel/randomize_va_space || true | |
| # Disable SMT (hyperthreading) | |
| for cpu in /sys/devices/system/cpu/cpu*/topology/thread_siblings_list; do | |
| first=$(cut -d, -f1 < "$cpu" | cut -d- -f1) | |
| current=$(echo "$cpu" | grep -o 'cpu[0-9]*' | grep -o '[0-9]*') | |
| if [ "$current" != "$first" ]; then | |
| echo 0 | sudo tee "/sys/devices/system/cpu/cpu${current}/online" || true | |
| fi | |
| done | |
| echo "Online CPUs: $(nproc)" | |
| # Disable transparent huge pages (compaction causes latency spikes) | |
| for p in /sys/kernel/mm/transparent_hugepage /sys/kernel/mm/transparent_hugepages; do | |
| [ -d "$p" ] && echo never | sudo tee "$p/enabled" && echo never | sudo tee "$p/defrag" && break | |
| done || true | |
| # Prevent deep C-states (avoids wake-up latency jitter) | |
| sudo sh -c 'exec 3<>/dev/cpu_dma_latency; echo -ne "\x00\x00\x00\x00" >&3; sleep infinity' & | |
| # Move all IRQs to core 0 (housekeeping core) | |
| for irq in /proc/irq/*/smp_affinity_list; do | |
| echo 0 | sudo tee "$irq" 2>/dev/null || true | |
| done | |
| # Stop noisy background services | |
| sudo systemctl stop irqbalance cron atd unattended-upgrades snapd 2>/dev/null || true | |
| # Log environment for reproducibility | |
| echo "=== Benchmark environment ===" | |
| uname -r | |
| lscpu | grep -E 'Model name|CPU\(s\)|MHz|NUMA' | |
| cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor | |
| cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq | |
| cat /sys/kernel/mm/transparent_hugepage/enabled 2>/dev/null || cat /sys/kernel/mm/transparent_hugepages/enabled 2>/dev/null || echo "THP: unknown" | |
| free -h | |
| # Clean up any leftover state | |
| - name: Pre-flight cleanup | |
| run: | | |
| sudo pkill -9 reth || true | |
| sleep 1 | |
| if mountpoint -q "$SCHELK_MOUNT"; then | |
| sudo umount -l "$SCHELK_MOUNT" || true | |
| sudo schelk recover -y || true | |
| fi | |
| rm -rf "$BENCH_WORK_DIR" | |
| mkdir -p "$BENCH_WORK_DIR" | |
| - name: Download big blocks | |
| if: env.BENCH_BIG_BLOCKS == 'true' | |
| run: | | |
| set -euo pipefail | |
| MC="mc --config-dir /home/ubuntu/.mc" | |
| BUCKET="minio/reth-snapshots/reth-1-minimal-nightly-previous-big-blocks.tar.zst" | |
| BIG_BLOCKS_DIR="${BENCH_WORK_DIR}/big-blocks" | |
| rm -rf "$BIG_BLOCKS_DIR"; mkdir -p "$BIG_BLOCKS_DIR" | |
| echo "Downloading big blocks from $BUCKET..." | |
| $MC cat "$BUCKET" | pzstd -d -p 6 | tar -xf - -C "$BIG_BLOCKS_DIR" | |
| echo "Big blocks downloaded to $BIG_BLOCKS_DIR" | |
| # Verify expected directory structure | |
| if [ ! -d "$BIG_BLOCKS_DIR/gas-ramp-dir" ] || [ ! -d "$BIG_BLOCKS_DIR/payloads" ]; then | |
| echo "::error::Big blocks archive missing expected gas-ramp-dir/ or payloads/ directories" | |
| ls -laR "$BIG_BLOCKS_DIR" | |
| exit 1 | |
| fi | |
| echo "Payload files: $(find "$BIG_BLOCKS_DIR/payloads" -name '*.json' | wc -l)" | |
| - name: Start metrics proxy | |
| run: | | |
| BENCH_ID="ci-${{ github.run_id }}" | |
| BENCH_REFERENCE_EPOCH=$(date +%s) | |
| echo "BENCH_ID=${BENCH_ID}" >> "$GITHUB_ENV" | |
| echo "BENCH_REFERENCE_EPOCH=${BENCH_REFERENCE_EPOCH}" >> "$GITHUB_ENV" | |
| LABELS_FILE="/tmp/bench-metrics-labels.json" | |
| echo '{}' > "$LABELS_FILE" | |
| echo "BENCH_LABELS_FILE=${LABELS_FILE}" >> "$GITHUB_ENV" | |
| python3 .github/scripts/bench-metrics-proxy.py \ | |
| --labels "$LABELS_FILE" \ | |
| --upstream "http://${BENCH_METRICS_ADDR}/" \ | |
| --subnet 10.10.0.0/24 \ | |
| --port 9090 & | |
| PROXY_PID=$! | |
| echo "BENCH_METRICS_PROXY_PID=${PROXY_PID}" >> "$GITHUB_ENV" | |
| echo "Metrics proxy started (PID $PROXY_PID)" | |
| - name: Update status (running benchmarks) | |
| if: success() && env.BENCH_COMMENT_ID | |
| uses: actions/github-script@v8 | |
| with: | |
| github-token: ${{ secrets.DEREK_PAT }} | |
| script: | | |
| const s = require('./.github/scripts/bench-update-status.js'); | |
| await s({github, context, status: 'Running benchmarks...'}); | |
| # Interleaved run order (B-F-F-B) to reduce systematic bias from | |
| # thermal drift and cache warming. | |
| - name: "Run benchmark: baseline (1/2)" | |
| id: run-baseline-1 | |
| env: | |
| BASELINE_REF: ${{ steps.refs.outputs.baseline-ref }} | |
| run: | | |
| cat > "$BENCH_LABELS_FILE" <<LABELS | |
| {"benchmark_run":"baseline-1","run_type":"baseline","git_ref":"${BASELINE_REF}","bench_sha":"${BASELINE_REF}","benchmark_id":"${BENCH_ID}","run_start_epoch":"$(date +%s)","reference_epoch":"${BENCH_REFERENCE_EPOCH}"} | |
| LABELS | |
| taskset -c 0 .github/scripts/bench-reth-run.sh baseline ../reth-baseline/target/profiling/reth "$BENCH_WORK_DIR/baseline-1" | |
| - name: "Run benchmark: feature (1/2)" | |
| id: run-feature-1 | |
| env: | |
| FEATURE_REF: ${{ steps.refs.outputs.feature-ref }} | |
| run: | | |
| cat > "$BENCH_LABELS_FILE" <<LABELS | |
| {"benchmark_run":"feature-1","run_type":"feature","git_ref":"${FEATURE_REF}","bench_sha":"${FEATURE_REF}","benchmark_id":"${BENCH_ID}","run_start_epoch":"$(date +%s)","reference_epoch":"${BENCH_REFERENCE_EPOCH}"} | |
| LABELS | |
| taskset -c 0 .github/scripts/bench-reth-run.sh feature ../reth-feature/target/profiling/reth "$BENCH_WORK_DIR/feature-1" | |
| - name: "Run benchmark: feature (2/2)" | |
| id: run-feature-2 | |
| env: | |
| FEATURE_REF: ${{ steps.refs.outputs.feature-ref }} | |
| run: | | |
| cat > "$BENCH_LABELS_FILE" <<LABELS | |
| {"benchmark_run":"feature-2","run_type":"feature","git_ref":"${FEATURE_REF}","bench_sha":"${FEATURE_REF}","benchmark_id":"${BENCH_ID}","run_start_epoch":"$(date +%s)","reference_epoch":"${BENCH_REFERENCE_EPOCH}"} | |
| LABELS | |
| taskset -c 0 .github/scripts/bench-reth-run.sh feature ../reth-feature/target/profiling/reth "$BENCH_WORK_DIR/feature-2" | |
| - name: "Run benchmark: baseline (2/2)" | |
| id: run-baseline-2 | |
| env: | |
| BASELINE_REF: ${{ steps.refs.outputs.baseline-ref }} | |
| run: | | |
| LAST_RUN_START=$(date +%s) | |
| echo "BENCH_LAST_RUN_START=${LAST_RUN_START}" >> "$GITHUB_ENV" | |
| cat > "$BENCH_LABELS_FILE" <<LABELS | |
| {"benchmark_run":"baseline-2","run_type":"baseline","git_ref":"${BASELINE_REF}","bench_sha":"${BASELINE_REF}","benchmark_id":"${BENCH_ID}","run_start_epoch":"${LAST_RUN_START}","reference_epoch":"${BENCH_REFERENCE_EPOCH}"} | |
| LABELS | |
| taskset -c 0 .github/scripts/bench-reth-run.sh baseline ../reth-baseline/target/profiling/reth "$BENCH_WORK_DIR/baseline-2" | |
| - name: Stop metrics proxy & generate Grafana URL | |
| id: metrics | |
| if: "!cancelled()" | |
| run: | | |
| kill "$BENCH_METRICS_PROXY_PID" 2>/dev/null || true | |
| LAST_RUN_DURATION=$(( $(date +%s) - BENCH_LAST_RUN_START )) | |
| FROM_MS=$(( BENCH_REFERENCE_EPOCH * 1000 )) | |
| TO_MS=$(( (BENCH_REFERENCE_EPOCH + LAST_RUN_DURATION) * 1000 )) | |
| GRAFANA_URL="https://tempoxyz.grafana.net/d/reth-bench-ghr/reth-bench-ghr?orgId=1&from=${FROM_MS}&to=${TO_MS}&timezone=browser&var-datasource=ef57fux92e9z4e&var-job=reth-bench&var-benchmark_id=${BENCH_ID}&var-benchmark_run=\$__all" | |
| echo "grafana-url=${GRAFANA_URL}" >> "$GITHUB_OUTPUT" | |
| echo "Grafana URL: ${GRAFANA_URL}" | |
| - name: Scan logs for errors | |
| if: "!cancelled()" | |
| run: | | |
| ERRORS_FILE="$BENCH_WORK_DIR/errors.md" | |
| found=false | |
| for run_dir in baseline-1 feature-1 feature-2 baseline-2; do | |
| LOG="$BENCH_WORK_DIR/$run_dir/node.log" | |
| if [ ! -f "$LOG" ]; then continue; fi | |
| panics=$(grep -c -E 'panicked at' "$LOG" || true) | |
| errors=$(grep -c ' ERROR ' "$LOG" || true) | |
| if [ "$panics" -gt 0 ] || [ "$errors" -gt 0 ]; then | |
| if [ "$found" = false ]; then | |
| printf '### ⚠️ Node Errors\n\n' >> "$ERRORS_FILE" | |
| found=true | |
| fi | |
| printf '<details><summary><b>%s</b>: %d panic(s), %d error(s)</summary>\n\n' "$run_dir" "$panics" "$errors" >> "$ERRORS_FILE" | |
| if [ "$panics" -gt 0 ]; then | |
| printf '**Panics:**\n```\n' >> "$ERRORS_FILE" | |
| grep -E 'panicked at' "$LOG" | head -10 >> "$ERRORS_FILE" | |
| printf '```\n' >> "$ERRORS_FILE" | |
| fi | |
| if [ "$errors" -gt 0 ]; then | |
| printf '**Errors (first 20):**\n```\n' >> "$ERRORS_FILE" | |
| grep ' ERROR ' "$LOG" | head -20 >> "$ERRORS_FILE" | |
| printf '```\n' >> "$ERRORS_FILE" | |
| fi | |
| printf '\n</details>\n\n' >> "$ERRORS_FILE" | |
| fi | |
| done | |
| - name: Upload samply profiles | |
| if: success() && env.BENCH_SAMPLY == 'true' | |
| run: | | |
| PROFILER_API="https://api.profiler.firefox.com" | |
| PROFILER_ACCEPT="Accept: application/vnd.firefox-profiler+json;version=1.0" | |
| for run_dir in baseline-1 baseline-2 feature-1 feature-2; do | |
| PROFILE="$BENCH_WORK_DIR/$run_dir/samply-profile.json.gz" | |
| if [ ! -f "$PROFILE" ]; then continue; fi | |
| PROFILE_SIZE=$(du -h "$PROFILE" | cut -f1) | |
| echo "Uploading $run_dir samply profile (${PROFILE_SIZE}) to Firefox Profiler..." | |
| # Upload compressed profile and get JWT back | |
| JWT=$(curl -sf -X POST \ | |
| -H "Content-Type: application/octet-stream" \ | |
| -H "$PROFILER_ACCEPT" \ | |
| --data-binary "@$PROFILE" \ | |
| "$PROFILER_API/compressed-store") || { | |
| echo "::warning::Failed to upload $run_dir profile to Firefox Profiler" | |
| continue | |
| } | |
| # Extract profileToken from JWT payload (header.payload.signature) | |
| PAYLOAD=$(echo "$JWT" | cut -d. -f2) | |
| # Fix base64 padding | |
| case $(( ${#PAYLOAD} % 4 )) in | |
| 2) PAYLOAD="${PAYLOAD}==" ;; | |
| 3) PAYLOAD="${PAYLOAD}=" ;; | |
| esac | |
| PROFILE_TOKEN=$(echo "$PAYLOAD" | base64 -d 2>/dev/null | python3 -c "import sys,json; print(json.load(sys.stdin)['profileToken'])") | |
| PROFILE_URL="https://profiler.firefox.com/public/${PROFILE_TOKEN}" | |
| echo "Profile uploaded: $PROFILE_URL" | |
| # Shorten the URL | |
| SHORT_URL=$(curl -sf -X POST \ | |
| -H "Content-Type: application/json" \ | |
| -H "$PROFILER_ACCEPT" \ | |
| -d "{\"longUrl\":\"$PROFILE_URL\"}" \ | |
| "$PROFILER_API/shorten" | python3 -c "import sys,json; print(json.load(sys.stdin)['shortUrl'])" 2>/dev/null) || SHORT_URL="$PROFILE_URL" | |
| echo "$SHORT_URL" > "$BENCH_WORK_DIR/$run_dir/samply-profile-url.txt" | |
| echo "Short profile URL for $run_dir: $SHORT_URL" | |
| done | |
| # Results & charts | |
| - name: Parse results | |
| id: results | |
| if: success() | |
| env: | |
| BASELINE_REF: ${{ steps.refs.outputs.baseline-ref }} | |
| BASELINE_NAME: ${{ steps.refs.outputs.baseline-name }} | |
| FEATURE_NAME: ${{ steps.refs.outputs.feature-name }} | |
| FEATURE_REF: ${{ steps.refs.outputs.feature-ref }} | |
| run: | | |
| git fetch origin "${BASELINE_NAME}" --quiet 2>/dev/null || true | |
| BASELINE_HEAD=$(git rev-parse "origin/${BASELINE_NAME}" 2>/dev/null || echo "") | |
| BEHIND_BASELINE=0 | |
| if [ -n "$BASELINE_HEAD" ] && [ "$BASELINE_REF" != "$BASELINE_HEAD" ]; then | |
| BEHIND_BASELINE=$(git rev-list --count "${BASELINE_REF}..${BASELINE_HEAD}" 2>/dev/null || echo "0") | |
| fi | |
| SUMMARY_ARGS="--output-summary $BENCH_WORK_DIR/summary.json" | |
| SUMMARY_ARGS="$SUMMARY_ARGS --output-markdown $BENCH_WORK_DIR/comment.md" | |
| SUMMARY_ARGS="$SUMMARY_ARGS --repo ${{ github.repository }}" | |
| SUMMARY_ARGS="$SUMMARY_ARGS --baseline-ref ${BASELINE_REF}" | |
| SUMMARY_ARGS="$SUMMARY_ARGS --baseline-name ${BASELINE_NAME}" | |
| SUMMARY_ARGS="$SUMMARY_ARGS --feature-name ${FEATURE_NAME}" | |
| SUMMARY_ARGS="$SUMMARY_ARGS --feature-ref ${FEATURE_REF}" | |
| SUMMARY_ARGS="$SUMMARY_ARGS --baseline-csv $BENCH_WORK_DIR/baseline-1/combined_latency.csv $BENCH_WORK_DIR/baseline-2/combined_latency.csv" | |
| SUMMARY_ARGS="$SUMMARY_ARGS --feature-csv $BENCH_WORK_DIR/feature-1/combined_latency.csv $BENCH_WORK_DIR/feature-2/combined_latency.csv" | |
| SUMMARY_ARGS="$SUMMARY_ARGS --gas-csv $BENCH_WORK_DIR/feature-1/total_gas.csv" | |
| if [ "$BEHIND_BASELINE" -gt 0 ]; then | |
| SUMMARY_ARGS="$SUMMARY_ARGS --behind-baseline $BEHIND_BASELINE" | |
| fi | |
| if [ "${BENCH_BIG_BLOCKS:-false}" = "true" ]; then | |
| SUMMARY_ARGS="$SUMMARY_ARGS --big-blocks" | |
| # Read gas ramp blocks count from first baseline run (same for all runs) | |
| GAS_RAMP_FILE="$BENCH_WORK_DIR/baseline-1/gas_ramp_blocks.txt" | |
| if [ -f "$GAS_RAMP_FILE" ]; then | |
| SUMMARY_ARGS="$SUMMARY_ARGS --gas-ramp-blocks $(cat "$GAS_RAMP_FILE" | tr -d '[:space:]')" | |
| fi | |
| fi | |
| GRAFANA_URL='${{ steps.metrics.outputs.grafana-url }}' | |
| if [ -n "$GRAFANA_URL" ]; then | |
| SUMMARY_ARGS="$SUMMARY_ARGS --grafana-url $GRAFANA_URL" | |
| fi | |
| # shellcheck disable=SC2086 | |
| python3 .github/scripts/bench-reth-summary.py $SUMMARY_ARGS | |
| - name: Generate charts | |
| if: success() | |
| env: | |
| BASELINE_NAME: ${{ steps.refs.outputs.baseline-name }} | |
| FEATURE_NAME: ${{ steps.refs.outputs.feature-name }} | |
| run: | | |
| CHART_ARGS="--output-dir $BENCH_WORK_DIR/charts" | |
| CHART_ARGS="$CHART_ARGS --feature $BENCH_WORK_DIR/feature-1/combined_latency.csv $BENCH_WORK_DIR/feature-2/combined_latency.csv" | |
| CHART_ARGS="$CHART_ARGS --baseline $BENCH_WORK_DIR/baseline-1/combined_latency.csv $BENCH_WORK_DIR/baseline-2/combined_latency.csv" | |
| CHART_ARGS="$CHART_ARGS --baseline-name ${BASELINE_NAME}" | |
| CHART_ARGS="$CHART_ARGS --feature-name ${FEATURE_NAME}" | |
| # shellcheck disable=SC2086 | |
| uv run --with matplotlib python3 .github/scripts/bench-reth-charts.py $CHART_ARGS | |
| - name: Upload results | |
| if: "!cancelled()" | |
| uses: actions/upload-artifact@v7 | |
| with: | |
| name: bench-reth-results | |
| path: ${{ env.BENCH_WORK_DIR }} | |
| - name: Push charts | |
| id: push-charts | |
| if: success() | |
| run: | | |
| PR_NUMBER="${BENCH_PR:-0}" | |
| RUN_ID=${{ github.run_id }} | |
| CHART_DIR="pr/${PR_NUMBER}/${RUN_ID}" | |
| CHARTS_REPO="https://x-access-token:${{ secrets.DEREK_TOKEN }}@github.com/decofe/reth-bench-charts.git" | |
| TMP_DIR=$(mktemp -d) | |
| if git clone --depth 1 "${CHARTS_REPO}" "${TMP_DIR}" 2>/dev/null; then | |
| true | |
| else | |
| git init "${TMP_DIR}" | |
| git -C "${TMP_DIR}" remote add origin "${CHARTS_REPO}" | |
| fi | |
| mkdir -p "${TMP_DIR}/${CHART_DIR}" | |
| cp "$BENCH_WORK_DIR"/charts/*.png "${TMP_DIR}/${CHART_DIR}/" | |
| git -C "${TMP_DIR}" add "${CHART_DIR}" | |
| git -C "${TMP_DIR}" -c user.name="github-actions" -c user.email="github-actions@github.com" \ | |
| commit -m "bench charts for PR #${PR_NUMBER} run ${RUN_ID}" | |
| git -C "${TMP_DIR}" push origin HEAD:main | |
| echo "sha=$(git -C "${TMP_DIR}" rev-parse HEAD)" >> "$GITHUB_OUTPUT" | |
| rm -rf "${TMP_DIR}" | |
| - name: Compare & comment | |
| if: success() | |
| uses: actions/github-script@v8 | |
| with: | |
| github-token: ${{ secrets.DEREK_PAT }} | |
| script: | | |
| const fs = require('fs'); | |
| let comment = ''; | |
| try { | |
| comment = fs.readFileSync(process.env.BENCH_WORK_DIR + '/comment.md', 'utf8'); | |
| } catch (e) { | |
| comment = '⚠️ Engine benchmark completed but failed to generate comparison.'; | |
| } | |
| const sha = '${{ steps.push-charts.outputs.sha }}'; | |
| const prNumber = process.env.BENCH_PR || '0'; | |
| const runId = '${{ github.run_id }}'; | |
| if (sha) { | |
| const baseUrl = `https://raw.githubusercontent.com/decofe/reth-bench-charts/${sha}/pr/${prNumber}/${runId}`; | |
| const charts = [ | |
| { file: 'latency_throughput.png', label: 'Latency, Throughput & Diff' }, | |
| { file: 'wait_breakdown.png', label: 'Wait Time Breakdown' }, | |
| { file: 'gas_vs_latency.png', label: 'Gas vs Latency' }, | |
| ]; | |
| let chartMarkdown = '\n\n### Charts\n\n'; | |
| for (const chart of charts) { | |
| chartMarkdown += `<details><summary>${chart.label}</summary>\n\n`; | |
| chartMarkdown += `\n\n`; | |
| chartMarkdown += `</details>\n\n`; | |
| } | |
| comment += chartMarkdown; | |
| } | |
| // Samply profile links (URLs point directly to Firefox Profiler) | |
| if (process.env.BENCH_SAMPLY === 'true') { | |
| const runs = ['baseline-1', 'feature-1', 'feature-2', 'baseline-2']; | |
| const links = []; | |
| for (const run of runs) { | |
| try { | |
| const url = fs.readFileSync(`${process.env.BENCH_WORK_DIR}/${run}/samply-profile-url.txt`, 'utf8').trim(); | |
| if (url) { | |
| links.push(`- **${run}**: [Firefox Profiler](${url})`); | |
| } | |
| } catch (e) {} | |
| } | |
| if (links.length > 0) { | |
| comment += `\n\n### Samply Profiles\n\n${links.join('\n')}\n`; | |
| } | |
| } | |
| // Grafana dashboard link | |
| const grafanaUrl = '${{ steps.metrics.outputs.grafana-url }}'; | |
| if (grafanaUrl) { | |
| comment += `\n\n### Grafana Dashboard\n\n[View real-time metrics](${grafanaUrl})\n`; | |
| } | |
| // Node errors (panics / ERROR logs) | |
| try { | |
| const errors = fs.readFileSync(process.env.BENCH_WORK_DIR + '/errors.md', 'utf8'); | |
| if (errors.trim()) { | |
| comment += '\n\n' + errors; | |
| } | |
| } catch (e) {} | |
| const jobUrl = process.env.BENCH_JOB_URL || `${context.serverUrl}/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`; | |
| const body = `cc @${process.env.BENCH_ACTOR}\n\n✅ Benchmark complete! [View job](${jobUrl})\n\n${comment}`; | |
| const ackCommentId = process.env.BENCH_COMMENT_ID; | |
| if (ackCommentId) { | |
| await github.rest.issues.updateComment({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| comment_id: parseInt(ackCommentId), | |
| body, | |
| }); | |
| } else { | |
| // No PR — write results to job summary | |
| await core.summary.addRaw(body).write(); | |
| } | |
| - name: Send Slack notification (success) | |
| if: success() | |
| uses: actions/github-script@v8 | |
| env: | |
| SLACK_BENCH_BOT_TOKEN: ${{ secrets.SLACK_BENCH_BOT_TOKEN }} | |
| SLACK_BENCH_CHANNEL: ${{ secrets.SLACK_BENCH_CHANNEL }} | |
| with: | |
| script: | | |
| const notify = require('./.github/scripts/bench-slack-notify.js'); | |
| await notify.success({ core, context }); | |
| - name: Update status (failed) | |
| if: failure() && env.BENCH_COMMENT_ID | |
| uses: actions/github-script@v8 | |
| with: | |
| github-token: ${{ secrets.DEREK_PAT }} | |
| script: | | |
| const steps_status = [ | |
| ['building binaries${{ steps.snapshot-check.outputs.needed == 'true' && ' & downloading snapshot' || '' }}', '${{ steps.build.outcome }}'], | |
| ['running baseline benchmark (1/2)', '${{ steps.run-baseline-1.outcome }}'], | |
| ['running feature benchmark (1/2)', '${{ steps.run-feature-1.outcome }}'], | |
| ['running feature benchmark (2/2)', '${{ steps.run-feature-2.outcome }}'], | |
| ['running baseline benchmark (2/2)', '${{ steps.run-baseline-2.outcome }}'], | |
| ]; | |
| const failed = steps_status.find(([, o]) => o === 'failure'); | |
| const failedStep = failed ? failed[0] : 'unknown step'; | |
| const fs = require('fs'); | |
| let errorDetails = ''; | |
| try { | |
| const errors = fs.readFileSync(process.env.BENCH_WORK_DIR + '/errors.md', 'utf8'); | |
| if (errors.trim()) { | |
| errorDetails = '\n\n' + errors; | |
| } | |
| } catch (e) {} | |
| const jobUrl = process.env.BENCH_JOB_URL || `${context.serverUrl}/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`; | |
| await github.rest.issues.updateComment({ | |
| owner: context.repo.owner, repo: context.repo.repo, | |
| comment_id: parseInt(process.env.BENCH_COMMENT_ID), | |
| body: `cc @${process.env.BENCH_ACTOR}\n\n❌ Benchmark failed while ${failedStep}. [View logs](${jobUrl})${errorDetails}`, | |
| }); | |
| - name: Send Slack notification (failure) | |
| if: failure() | |
| uses: actions/github-script@v8 | |
| env: | |
| SLACK_BENCH_BOT_TOKEN: ${{ secrets.SLACK_BENCH_BOT_TOKEN }} | |
| SLACK_BENCH_CHANNEL: ${{ secrets.SLACK_BENCH_CHANNEL }} | |
| with: | |
| script: | | |
| const steps_status = [ | |
| ['building binaries${{ steps.snapshot-check.outputs.needed == 'true' && ' & downloading snapshot' || '' }}', '${{ steps.build.outcome }}'], | |
| ['running baseline benchmark (1/2)', '${{ steps.run-baseline-1.outcome }}'], | |
| ['running feature benchmark (1/2)', '${{ steps.run-feature-1.outcome }}'], | |
| ['running feature benchmark (2/2)', '${{ steps.run-feature-2.outcome }}'], | |
| ['running baseline benchmark (2/2)', '${{ steps.run-baseline-2.outcome }}'], | |
| ]; | |
| const failed = steps_status.find(([, o]) => o === 'failure'); | |
| const failedStep = failed ? failed[0] : 'unknown step'; | |
| const notify = require('./.github/scripts/bench-slack-notify.js'); | |
| await notify.failure({ core, context, failedStep }); | |
| - name: Update status (cancelled) | |
| if: cancelled() && env.BENCH_COMMENT_ID | |
| uses: actions/github-script@v8 | |
| with: | |
| github-token: ${{ secrets.DEREK_PAT }} | |
| script: | | |
| const jobUrl = process.env.BENCH_JOB_URL || `${context.serverUrl}/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`; | |
| await github.rest.issues.updateComment({ | |
| owner: context.repo.owner, repo: context.repo.repo, | |
| comment_id: parseInt(process.env.BENCH_COMMENT_ID), | |
| body: `cc @${process.env.BENCH_ACTOR}\n\n⚠️ Benchmark cancelled. [View logs](${jobUrl})`, | |
| }); | |
| - name: Restore system settings | |
| if: always() | |
| run: | | |
| sudo systemctl start irqbalance cron atd 2>/dev/null || true |