From a02c83d77f105125ffe7b8f84c5d8a030ab400fb Mon Sep 17 00:00:00 2001 From: alexghr Date: Tue, 10 Mar 2026 19:54:43 +0000 Subject: [PATCH 01/41] chore: fix proving logs script --- spartan/scripts/extract_proving_metrics.ts | 43 ++++++++++++++-------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/spartan/scripts/extract_proving_metrics.ts b/spartan/scripts/extract_proving_metrics.ts index 46d1d01ae966..4216145bfb17 100755 --- a/spartan/scripts/extract_proving_metrics.ts +++ b/spartan/scripts/extract_proving_metrics.ts @@ -79,6 +79,7 @@ const config = parseArgs(process.argv); interface LogEntry { timestamp: string; + trace?: string; jsonPayload?: { message?: string; [key: string]: any; @@ -88,7 +89,7 @@ interface LogEntry { function buildFilter( textFilter: string, - opts?: { module?: string; pod?: string }, + opts?: { module?: string; pod?: string; trace?: string }, ): string { const pod = opts?.pod ?? config.pod; let filter = @@ -101,13 +102,16 @@ function buildFilter( if (opts?.module) { filter += ` AND jsonPayload.module="${opts.module}"`; } + if (opts?.trace) { + filter += ` AND trace="${opts.trace}"`; + } return filter; } async function queryLogs( name: string, textFilter: string, - opts?: { module?: string; pod?: string }, + opts?: { module?: string; pod?: string; trace?: string }, ): Promise { const filter = buildFilter(textFilter, opts); const cmd = [ @@ -134,7 +138,7 @@ async function queryLogs( // ── Epoch auto-detection ───────────────────────────────────────────────────── -async function scanForEpoch(): Promise<{ start: string; end: string }> { +async function scanForEpoch(): Promise<{ start: string; end: string; trace?: string }> { process.stderr.write( `Scanning for epoch in ${config.start} to ${config.end}...\n\n`, ); @@ -151,6 +155,7 @@ async function scanForEpoch(): Promise<{ start: string; end: string }> { epoch: number; txCount: number; timestamp: string; + trace?: string; }[] = []; for (const entry of epochStarts) { const m = msg(entry); @@ -163,6 +168,7 @@ async function scanForEpoch(): Promise<{ start: string; end: string }> { epoch: parseInt(epochMatch[1]), txCount: p.epochSizeTxs ?? 0, timestamp: entry.timestamp, + trace: entry.trace, }); } } @@ -178,7 +184,7 @@ async function scanForEpoch(): Promise<{ start: string; end: string }> { process.stderr.write( `Warning: epoch ${config.epoch} not found in scan window. Using full window.\n`, ); - return { start: config.start, end: config.end }; + return { start: config.start, end: config.end, trace: undefined }; } } else { target = starts.find((s) => s.txCount >= 1); @@ -186,12 +192,12 @@ async function scanForEpoch(): Promise<{ start: string; end: string }> { process.stderr.write( `Warning: no epoch with >=1 tx found in scan window. Using full window.\n`, ); - return { start: config.start, end: config.end }; + return { start: config.start, end: config.end, trace: undefined }; } } process.stderr.write( - `Found epoch ${target.epoch} (${target.txCount} txs) at ${target.timestamp}\n`, + `Found epoch ${target.epoch} (${target.txCount} txs) at ${target.timestamp}${target.trace ? ` trace=${target.trace}` : ""}\n`, ); // Find matching finalized entry @@ -226,7 +232,7 @@ async function scanForEpoch(): Promise<{ start: string; end: string }> { `Narrowed window: ${narrowedStart} to ${narrowedEnd}\n\n`, ); - return { start: narrowedStart, end: narrowedEnd }; + return { start: narrowedStart, end: narrowedEnd, trace: target.trace }; } // ── Pipeline order for proving job types ───────────────────────────────────── @@ -249,11 +255,15 @@ const PIPELINE_ORDER = [ // ── Query definitions ──────────────────────────────────────────────────────── -async function fetchAllData() { +async function fetchAllData(trace?: string) { process.stderr.write( `Fetching logs for ${config.pod} in ${config.namespace}\n`, ); - process.stderr.write(`Time range: ${config.start} to ${config.end}\n\n`); + process.stderr.write(`Time range: ${config.start} to ${config.end}\n`); + if (trace) { + process.stderr.write(`Trace filter: ${trace}\n`); + } + process.stderr.write("\n"); const brokerPod = `${config.namespace}-prover-broker-0`; @@ -268,15 +278,16 @@ async function fetchAllData() { brokerNewJobs, brokerCompleteJobs, ] = await Promise.all([ - queryLogs("epoch-start", "Starting epoch.*proving job"), - queryLogs("blob-fields", "Blob fields per checkpoint"), - queryLogs("blob-batching", "Final blob batching"), + queryLogs("epoch-start", "Starting epoch.*proving job", { trace }), + queryLogs("blob-fields", "Blob fields per checkpoint", { trace }), + queryLogs("blob-batching", "Final blob batching", { trace }), queryLogs("starting-block", "Starting block", { module: "prover-client:orchestrator", + trace, }), - queryLogs("processed-txs", "Processed.*successful txs"), - queryLogs("adding-txs", "Adding.*transactions to block"), - queryLogs("epoch-finalized", "Finalized proof for epoch"), + queryLogs("processed-txs", "Processed.*successful txs", { trace }), + queryLogs("adding-txs", "Adding.*transactions to block", { trace }), + queryLogs("epoch-finalized", "Finalized proof for epoch", { trace }), queryLogs("broker-new-jobs", "New proving job", { pod: brokerPod }), queryLogs("broker-complete-jobs", "Proving job complete", { pod: brokerPod, @@ -841,7 +852,7 @@ async function main() { config.start = scanResult.start; config.end = scanResult.end; - const data = await fetchAllData(); + const data = await fetchAllData(scanResult.trace); const output = formatOutput(data); console.log(output); } From 5eedb7dcb3f2ae724bd8bd33d323098b18f4e1ec Mon Sep 17 00:00:00 2001 From: Santiago Palladino Date: Mon, 16 Mar 2026 10:57:26 -0300 Subject: [PATCH 02/41] fix(validator): process block proposals from own validator keys in HA setups Co-Authored-By: Claude Opus 4.6 (1M context) --- .../aztec-node/src/aztec-node/server.ts | 3 + .../src/e2e_epochs/epochs_ha_sync.test.ts | 203 ++++++++++++++++++ .../end-to-end/src/e2e_epochs/epochs_test.ts | 15 +- .../src/sequencer/checkpoint_proposal_job.ts | 85 ++++---- yarn-project/validator-client/src/factory.ts | 3 + .../validator-client/src/validator.test.ts | 20 +- .../validator-client/src/validator.ts | 22 +- .../validator-ha-signer/src/factory.ts | 32 +++ 8 files changed, 337 insertions(+), 46 deletions(-) create mode 100644 yarn-project/end-to-end/src/e2e_epochs/epochs_ha_sync.test.ts diff --git a/yarn-project/aztec-node/src/aztec-node/server.ts b/yarn-project/aztec-node/src/aztec-node/server.ts index e6c2108e985d..b5ffef135e46 100644 --- a/yarn-project/aztec-node/src/aztec-node/server.ts +++ b/yarn-project/aztec-node/src/aztec-node/server.ts @@ -111,6 +111,7 @@ import { createBlockProposalHandler, createValidatorClient, } from '@aztec/validator-client'; +import type { SlashingProtectionDatabase } from '@aztec/validator-ha-signer/types'; import { createWorldStateSynchronizer } from '@aztec/world-state'; import { createPublicClient } from 'viem'; @@ -195,6 +196,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable { dateProvider?: DateProvider; p2pClientDeps?: P2PClientDeps; proverNodeDeps?: Partial; + slashingProtectionDb?: SlashingProtectionDatabase; } = {}, options: { prefilledPublicData?: PublicDataTreeLeaf[]; @@ -377,6 +379,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable { l1ToL2MessageSource: archiver, keyStoreManager, blobClient, + slashingProtectionDb: deps.slashingProtectionDb, }); // If we have a validator client, register it as a source of offenses for the slasher, diff --git a/yarn-project/end-to-end/src/e2e_epochs/epochs_ha_sync.test.ts b/yarn-project/end-to-end/src/e2e_epochs/epochs_ha_sync.test.ts new file mode 100644 index 000000000000..fa8b97f02252 --- /dev/null +++ b/yarn-project/end-to-end/src/e2e_epochs/epochs_ha_sync.test.ts @@ -0,0 +1,203 @@ +import type { Archiver } from '@aztec/archiver'; +import type { AztecNodeService } from '@aztec/aztec-node'; +import { AztecAddress, EthAddress } from '@aztec/aztec.js/addresses'; +import { NO_WAIT } from '@aztec/aztec.js/contracts'; +import { Fr } from '@aztec/aztec.js/fields'; +import type { Logger } from '@aztec/aztec.js/log'; +import { RollupContract } from '@aztec/ethereum/contracts'; +import type { Operator } from '@aztec/ethereum/deploy-aztec-l1-contracts'; +import { BlockNumber, SlotNumber } from '@aztec/foundation/branded-types'; +import { times, timesAsync } from '@aztec/foundation/collection'; +import { SecretValue } from '@aztec/foundation/config'; +import { retryUntil } from '@aztec/foundation/retry'; +import { bufferToHex } from '@aztec/foundation/string'; +import { TestContract } from '@aztec/noir-test-contracts.js/Test'; +import { getTimestampForSlot } from '@aztec/stdlib/epoch-helpers'; +import { createSharedSlashingProtectionDb } from '@aztec/validator-ha-signer/factory'; + +import { jest } from '@jest/globals'; +import { privateKeyToAccount } from 'viem/accounts'; + +import { type EndToEndContext, getPrivateKeyFromIndex } from '../fixtures/utils.js'; +import { TestWallet } from '../test-wallet/test_wallet.js'; +import { proveInteraction } from '../test-wallet/utils.js'; +import { EpochsTestContext } from './epochs_test.js'; + +jest.setTimeout(1000 * 60 * 20); + +const VALIDATOR_COUNT = 4; +const TX_COUNT = 6; + +/** + * E2E test for HA (High Availability) proposed chain sync. + * Verifies that nodes sharing validator keys with the proposer still process + * block proposals and sync to the proposed chain, rather than ignoring them. + */ +describe('e2e_epochs/epochs_ha_sync', () => { + let context: EndToEndContext; + let logger: Logger; + let rollup: RollupContract; + + let test: EpochsTestContext; + let validators: (Operator & { privateKey: `0x${string}` })[]; + let nodes: AztecNodeService[]; + let contract: TestContract; + let wallet: TestWallet; + let from: AztecAddress; + + async function setupTest() { + validators = times(VALIDATOR_COUNT, i => { + const privateKey = bufferToHex(getPrivateKeyFromIndex(i + 3)!); + const attester = EthAddress.fromString(privateKeyToAccount(privateKey).address); + return { attester, withdrawer: attester, privateKey, bn254SecretKey: new SecretValue(Fr.random().toBigInt()) }; + }); + + // Do NOT set skipPublishingCheckpointsPercent here: the initial sequencer needs to + // publish checkpoints during setup (account deployment). We disable it per-validator-node below. + test = await EpochsTestContext.setup({ + numberOfAccounts: 1, + initialValidators: validators, + mockGossipSubNetwork: true, + disableAnvilTestWatcher: true, + aztecEpochDuration: 4, + enforceTimeTable: true, + ethereumSlotDuration: 4, + aztecSlotDuration: 36, + blockDurationMs: 8000, + l1PublishingTime: 2, + attestationPropagationTime: 0.5, + aztecTargetCommitteeSize: VALIDATOR_COUNT, + minTxsPerBlock: 1, + maxTxsPerBlock: 2, + pxeOpts: { syncChainTip: 'proposed' }, + }); + + ({ context, logger, rollup } = test); + wallet = context.wallet; + from = context.accounts[0]; + + // Stop the initial non-validator sequencer. + logger.warn(`Stopping sequencer in initial aztec node.`); + await context.sequencer!.stop(); + + // Create 4 nodes in 2 HA pairs: each pair shares the same two validator keys. + const pk1 = validators[0].privateKey; + const pk2 = validators[1].privateKey; + const pk3 = validators[2].privateKey; + const pk4 = validators[3].privateKey; + + // Disable checkpoint publishing on validator nodes so we can assert proposed chain sync + // strictly before any checkpoint is published by the validators. + // Use different coinbase addresses per node so HA peers would build different blocks + // if the proposer's block isn't correctly propagated to its HA peer. + // Each HA pair shares a slashing protection DB so only one peer can sign per duty. + const baseOpts = { dontStartSequencer: true, skipPublishingCheckpointsPercent: 100 } as const; + const sharedDb1 = await createSharedSlashingProtectionDb(context.dateProvider); + const sharedDb2 = await createSharedSlashingProtectionDb(context.dateProvider); + + logger.warn(`Creating 4 validator nodes in 2 HA pairs.`); + nodes = [ + await test.createValidatorNode([pk1, pk2], { + ...baseOpts, + coinbase: EthAddress.fromNumber(1), + slashingProtectionDb: sharedDb1, + }), + await test.createValidatorNode([pk1, pk2], { + ...baseOpts, + coinbase: EthAddress.fromNumber(2), + slashingProtectionDb: sharedDb1, + }), + await test.createValidatorNode([pk3, pk4], { + ...baseOpts, + coinbase: EthAddress.fromNumber(3), + slashingProtectionDb: sharedDb2, + }), + await test.createValidatorNode([pk3, pk4], { + ...baseOpts, + coinbase: EthAddress.fromNumber(4), + slashingProtectionDb: sharedDb2, + }), + ]; + logger.warn(`Created 4 validator nodes.`); + + // Point the wallet at a validator node so it tracks proposed blocks. + wallet.updateNode(nodes[0]); + + // Register contract for sending txs. + contract = await test.registerTestContract(wallet); + logger.warn(`Test setup completed.`); + } + + afterEach(async () => { + jest.restoreAllMocks(); + await test?.teardown(); + }); + + it('HA peers sync to proposed chain from proposals signed by their own validator keys', async () => { + await setupTest(); + + // Record the checkpoint state after setup. Validators must produce proposed blocks + // beyond this point for the test to be meaningful. + const allArchivers = nodes.map(n => n.getBlockSource() as Archiver); + const initialCheckpointNumber = await rollup.getCheckpointNumber(); + const initialCheckpointedBlock = (await allArchivers[0].getL2Tips()).checkpointed.block.number; + logger.warn(`Initial state: checkpoint ${initialCheckpointNumber}, checkpointed block ${initialCheckpointedBlock}`); + + // Pre-prove and send transactions. + const txs = await timesAsync(TX_COUNT, i => + proveInteraction(context.wallet, contract.methods.emit_nullifier(new Fr(i + 1)), { from }), + ); + const txHashes = await Promise.all(txs.map(tx => tx.send({ wait: NO_WAIT }))); + logger.warn(`Sent ${txHashes.length} transactions.`); + + // Warp to 1 L1 slot before the start of the next L2 slot, so sequencers start cleanly. + const currentSlot = await rollup.getSlotNumber(); + const nextSlot = SlotNumber(currentSlot + 1); + const nextSlotTimestamp = getTimestampForSlot(nextSlot, test.constants); + await context.cheatCodes.eth.warp(Number(nextSlotTimestamp) - test.L1_BLOCK_TIME_IN_S, { + resetBlockInterval: true, + }); + logger.warn(`Warped to 1 L1 slot before L2 slot ${nextSlot}.`); + + // Start the sequencers on all nodes. + await Promise.all(nodes.map(n => n.getSequencer()!.start())); + logger.warn(`Started all sequencers.`); + + // Wait until all nodes have proposed blocks strictly beyond the checkpointed tip. + // This ensures we're checking blocks produced by validators via P2P proposals, + // not blocks synced from L1 checkpoints during setup. + await retryUntil( + async () => { + const tips = await Promise.all(allArchivers.map(a => a.getL2Tips())); + return tips.every( + t => t.proposed.number > initialCheckpointedBlock && t.proposed.number > t.checkpointed.block.number, + ); + }, + 'all nodes to sync proposed blocks beyond checkpointed tip', + test.L2_SLOT_DURATION_IN_S * 5, + 0.5, + ); + + logger.warn(`All nodes synced proposed blocks beyond checkpointed tip`); + + // Take the smallest proposed tip across all nodes and verify the block hash matches on all of them. + // This block is strictly proposed (not checkpointed), so it must have arrived via P2P. + const tips = await Promise.all(allArchivers.map(a => a.getL2Tips())); + const proposedNumbers = tips.map(t => t.proposed.number); + const minProposed = BlockNumber(Math.min(...proposedNumbers)); + expect(minProposed).toBeGreaterThan(initialCheckpointedBlock); + logger.warn(`Verifying block hashes at proposed block ${minProposed}.`, { proposedNumbers }); + + const headers = await Promise.all(allArchivers.map(a => a.getBlockHeader(minProposed))); + const hashes = await Promise.all(headers.map(h => h!.hash())); + for (let i = 1; i < hashes.length; i++) { + expect(hashes[i].toString()).toBe(hashes[0].toString()); + } + logger.warn(`All 4 nodes agree on block hash at proposed block ${minProposed}.`); + + // Verify that no new checkpoints have been published by validators (we disabled checkpoint publishing). + const currentCheckpointNumber = await rollup.getCheckpointNumber(); + expect(currentCheckpointNumber).toBe(initialCheckpointNumber); + logger.warn(`Verified no new checkpoints were published.`); + }); +}); diff --git a/yarn-project/end-to-end/src/e2e_epochs/epochs_test.ts b/yarn-project/end-to-end/src/e2e_epochs/epochs_test.ts index 0068706853bd..1a38395c9df3 100644 --- a/yarn-project/end-to-end/src/e2e_epochs/epochs_test.ts +++ b/yarn-project/end-to-end/src/e2e_epochs/epochs_test.ts @@ -28,6 +28,7 @@ import { type SequencerClient, type SequencerEvents, SequencerState } from '@azt import { type BlockParameter, EthAddress } from '@aztec/stdlib/block'; import { type L1RollupConstants, getProofSubmissionDeadlineTimestamp } from '@aztec/stdlib/epoch-helpers'; import { tryStop } from '@aztec/stdlib/interfaces/server'; +import type { SlashingProtectionDatabase } from '@aztec/validator-ha-signer/types'; import { join } from 'path'; import type { Hex } from 'viem'; @@ -238,13 +239,21 @@ export class EpochsTestContext { public createValidatorNode( privateKeys: `0x${string}`[], - opts: Partial & { dontStartSequencer?: boolean } = {}, + opts: Partial & { + dontStartSequencer?: boolean; + slashingProtectionDb?: SlashingProtectionDatabase; + } = {}, ) { this.logger.warn('Creating and syncing a validator node...'); return this.createNode({ ...opts, disableValidator: false, validatorPrivateKeys: new SecretValue(privateKeys) }); } - private async createNode(opts: Partial & { dontStartSequencer?: boolean } = {}) { + private async createNode( + opts: Partial & { + dontStartSequencer?: boolean; + slashingProtectionDb?: SlashingProtectionDatabase; + } = {}, + ) { const nodeIndex = this.nodes.length + 1; const actorPrefix = opts.disableValidator ? 'node' : 'validator'; const { mockGossipSubNetwork } = this.context; @@ -257,6 +266,7 @@ export class EpochsTestContext { ...resolvedConfig, dataDirectory: join(this.context.config.dataDirectory!, randomBytes(8).toString('hex')), validatorPrivateKeys: opts.validatorPrivateKeys ?? new SecretValue([]), + nodeId: resolvedConfig.nodeId || `${actorPrefix}-${nodeIndex}`, p2pEnabled, p2pIp, }, @@ -265,6 +275,7 @@ export class EpochsTestContext { p2pClientDeps: { p2pServiceFactory: mockGossipSubNetwork ? getMockPubSubP2PServiceFactory(mockGossipSubNetwork) : undefined, }, + slashingProtectionDb: opts.slashingProtectionDb, }, { prefilledPublicData: this.context.prefilledPublicData, diff --git a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts index e3a50a720d1b..97cbdd8887ab 100644 --- a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts +++ b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts @@ -40,7 +40,12 @@ import { type WorldStateSynchronizer, } from '@aztec/stdlib/interfaces/server'; import { type L1ToL2MessageSource, computeInHashFromL1ToL2Messages } from '@aztec/stdlib/messaging'; -import type { BlockProposalOptions, CheckpointProposal, CheckpointProposalOptions } from '@aztec/stdlib/p2p'; +import type { + BlockProposal, + BlockProposalOptions, + CheckpointProposal, + CheckpointProposalOptions, +} from '@aztec/stdlib/p2p'; import { orderAttestations, trimAttestations } from '@aztec/stdlib/p2p'; import type { L2BlockBuiltStats } from '@aztec/stdlib/stats'; import { type FailedTx, Tx } from '@aztec/stdlib/tx'; @@ -402,6 +407,7 @@ export class CheckpointProposalJob implements Traceable { const blocksInCheckpoint: L2Block[] = []; const txHashesAlreadyIncluded = new Set(); const initialBlockNumber = BlockNumber(this.syncedToBlockNumber + 1); + const slot = this.slot; // Last block in the checkpoint will usually be flagged as pending broadcast, so we send it along with the checkpoint proposal let blockPendingBroadcast: { block: L2Block; txs: Tx[] } | undefined = undefined; @@ -415,11 +421,7 @@ export class CheckpointProposalJob implements Traceable { const timingInfo = this.timetable.canStartNextBlock(secondsIntoSlot); if (!timingInfo.canStart) { - this.log.debug(`Not enough time left in slot to start another block`, { - slot: this.slot, - blocksBuilt, - secondsIntoSlot, - }); + this.log.debug(`Not enough time left in slot to start another block`, { slot, blocksBuilt, secondsIntoSlot }); break; } @@ -451,50 +453,37 @@ export class CheckpointProposalJob implements Traceable { } else if ('error' in buildResult) { // If there was an error building the block, just exit the loop and give up the rest of the slot if (!(buildResult.error instanceof SequencerInterruptedError)) { - this.log.warn(`Halting block building for slot ${this.slot}`, { - slot: this.slot, - blocksBuilt, - error: buildResult.error, - }); + this.log.warn(`Halting block building for slot ${slot}`, { slot, blocksBuilt, error: buildResult.error }); } break; } const { block, usedTxs } = buildResult; blocksInCheckpoint.push(block); - - // Sync the proposed block to the archiver to make it available - // We wait for the sync to succeed, as this helps catch consistency errors, even if it means we lose some time for block-building - // If this throws, we abort the entire checkpoint - await this.syncProposedBlockToArchiver(block); - usedTxs.forEach(tx => txHashesAlreadyIncluded.add(tx.txHash.toString())); - // If this is the last block, exit the loop now so we start collecting attestations + // If this is the last block, send the proposed block to the archiver, + // and exit the loop now so we can build the checkpoint and start collecting attestations. if (timingInfo.isLastBlock) { - this.log.verbose(`Completed final block ${blockNumber} for slot ${this.slot}`, { - slot: this.slot, - blockNumber, - blocksBuilt, - }); + await this.syncProposedBlockToArchiver(block); + this.log.verbose(`Completed final block ${blockNumber} for slot ${slot}`, { slot, blockNumber, blocksBuilt }); blockPendingBroadcast = { block, txs: usedTxs }; break; } - // For non-last blocks, broadcast the block proposal (unless we're in fisherman mode) - // If the block is the last one, we'll broadcast it along with the checkpoint at the end of the loop - if (!this.config.fishermanMode) { - const proposal = await this.validatorClient.createBlockProposal( - block.header, - block.indexWithinCheckpoint, - inHash, - block.archive.root, - usedTxs, - this.proposer, - blockProposalOptions, - ); - await this.p2pClient.broadcastProposal(proposal); - } + // Broadcast the block proposal (unless we're in fisherman mode) unless the block is the last one, + // in which case we'll broadcast it along with the checkpoint at the end of the loop. + // Note that we only send the block to the archiver if we manage to create the proposal, so if there's + // a HA error we don't pollute our archiver with a block that won't make it to the chain. + const proposal = await this.createBlockProposal(block, inHash, usedTxs, blockProposalOptions); + + // Sync the proposed block to the archiver to make it available, only after we've managed to sign the proposal. + // We wait for the sync to succeed, as this helps catch consistency errors, even if it means we lose some time for block-building. + // If this throws, we abort the entire checkpoint. + await this.syncProposedBlockToArchiver(block); + + // Once we have a signed proposal and the archiver agreed with our proposed block, then we broadcast it. + proposal && (await this.p2pClient.broadcastProposal(proposal)); // Wait until the next block's start time await this.waitUntilNextSubslot(timingInfo.deadline); @@ -508,6 +497,28 @@ export class CheckpointProposalJob implements Traceable { return { blocksInCheckpoint, blockPendingBroadcast }; } + /** Creates a block proposal for a given block via the validator client (unless in fisherman mode) */ + private createBlockProposal( + block: L2Block, + inHash: Fr, + usedTxs: Tx[], + blockProposalOptions: BlockProposalOptions, + ): Promise { + if (this.config.fishermanMode) { + this.log.info(`Skipping block proposal for block ${block.number} in fisherman mode`); + return Promise.resolve(undefined); + } + return this.validatorClient.createBlockProposal( + block.header, + block.indexWithinCheckpoint, + inHash, + block.archive.root, + usedTxs, + this.proposer, + blockProposalOptions, + ); + } + /** Sleeps until it is time to produce the next block in the slot */ @trackSpan('CheckpointProposalJob.waitUntilNextSubslot') private async waitUntilNextSubslot(nextSubslotStart: number) { diff --git a/yarn-project/validator-client/src/factory.ts b/yarn-project/validator-client/src/factory.ts index b7645d48c485..3d92e338716e 100644 --- a/yarn-project/validator-client/src/factory.ts +++ b/yarn-project/validator-client/src/factory.ts @@ -7,6 +7,7 @@ import type { L2BlockSink, L2BlockSource } from '@aztec/stdlib/block'; import type { ValidatorClientFullConfig, WorldStateSynchronizer } from '@aztec/stdlib/interfaces/server'; import type { L1ToL2MessageSource } from '@aztec/stdlib/messaging'; import type { TelemetryClient } from '@aztec/telemetry-client'; +import type { SlashingProtectionDatabase } from '@aztec/validator-ha-signer/types'; import { BlockProposalHandler } from './block_proposal_handler.js'; import type { FullNodeCheckpointsBuilder } from './checkpoint_builder.js'; @@ -59,6 +60,7 @@ export function createValidatorClient( epochCache: EpochCache; keyStoreManager: KeystoreManager | undefined; blobClient: BlobClientInterface; + slashingProtectionDb?: SlashingProtectionDatabase; }, ) { if (config.disableValidator || !deps.keyStoreManager) { @@ -79,5 +81,6 @@ export function createValidatorClient( deps.blobClient, deps.dateProvider, deps.telemetry, + deps.slashingProtectionDb, ); } diff --git a/yarn-project/validator-client/src/validator.test.ts b/yarn-project/validator-client/src/validator.test.ts index ab751b9b33ba..804869df8474 100644 --- a/yarn-project/validator-client/src/validator.test.ts +++ b/yarn-project/validator-client/src/validator.test.ts @@ -92,6 +92,7 @@ describe('ValidatorClient', () => { let checkpointsBuilder: MockProxy; let worldState: MockProxy; let validatorAccounts: PrivateKeyAccount[]; + let validatorPrivateKeys: `0x${string}`[]; let dateProvider: TestDateProvider; let txProvider: MockProxy; let keyStoreManager: KeystoreManager; @@ -135,7 +136,7 @@ describe('ValidatorClient', () => { haKeyStore.start.mockImplementation(() => Promise.resolve()); haKeyStore.stop.mockImplementation(() => Promise.resolve()); - const validatorPrivateKeys = [generatePrivateKey(), generatePrivateKey()]; + validatorPrivateKeys = [generatePrivateKey(), generatePrivateKey()]; validatorAccounts = validatorPrivateKeys.map(privateKey => privateKeyToAccount(privateKey)); haKeyStore.getAddresses.mockReturnValue(validatorAccounts.map(account => EthAddress.fromString(account.address))); @@ -387,6 +388,23 @@ describe('ValidatorClient', () => { expect(isValid).toBe(true); }); + it('should process block proposal from own validator key (HA peer)', async () => { + const selfSigner = new Secp256k1Signer(Buffer32.fromString(validatorPrivateKeys[0])); + const emptyInHash = computeInHashFromL1ToL2Messages([]); + const selfProposal = await makeBlockProposal({ + blockHeader: proposal.blockHeader, + inHash: emptyInHash, + signer: selfSigner, + }); + + epochCache.getProposerAttesterAddressInSlot.mockResolvedValue(selfSigner.address); + + const handleSpy = jest.spyOn(validatorClient.getBlockProposalHandler(), 'handleBlockProposal'); + const isValid = await validatorClient.validateBlockProposal(selfProposal, sender); + expect(isValid).toBe(true); + expect(handleSpy).toHaveBeenCalled(); + }); + it('should return early when escape hatch is open', async () => { epochCache.isEscapeHatchOpenAtSlot.mockResolvedValueOnce(true); diff --git a/yarn-project/validator-client/src/validator.ts b/yarn-project/validator-client/src/validator.ts index 60ee9c8ea2d2..2691c7a749e7 100644 --- a/yarn-project/validator-client/src/validator.ts +++ b/yarn-project/validator-client/src/validator.ts @@ -46,8 +46,12 @@ import type { CheckpointHeader } from '@aztec/stdlib/rollup'; import type { BlockHeader, CheckpointGlobalVariables, Tx } from '@aztec/stdlib/tx'; import { AttestationTimeoutError } from '@aztec/stdlib/validators'; import { type TelemetryClient, type Tracer, getTelemetryClient } from '@aztec/telemetry-client'; -import { createHASigner, createLocalSignerWithProtection } from '@aztec/validator-ha-signer/factory'; -import { DutyType, type SigningContext } from '@aztec/validator-ha-signer/types'; +import { + createHASigner, + createLocalSignerWithProtection, + createSignerFromSharedDb, +} from '@aztec/validator-ha-signer/factory'; +import { DutyType, type SigningContext, type SlashingProtectionDatabase } from '@aztec/validator-ha-signer/types'; import type { ValidatorHASigner } from '@aztec/validator-ha-signer/validator-ha-signer'; import { EventEmitter } from 'events'; @@ -197,6 +201,7 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) blobClient: BlobClientInterface, dateProvider: DateProvider = new DateProvider(), telemetry: TelemetryClient = getTelemetryClient(), + slashingProtectionDb?: SlashingProtectionDatabase, ) { const metrics = new ValidatorMetrics(telemetry); const blockProposalValidator = new BlockProposalValidator(epochCache, { @@ -219,7 +224,13 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) const nodeKeystoreAdapter = NodeKeystoreAdapter.fromKeyStoreManager(keyStoreManager); let slashingProtectionSigner: ValidatorHASigner; - if (config.haSigningEnabled) { + if (slashingProtectionDb) { + // Shared database mode: use a pre-existing database (e.g. for testing HA setups). + ({ signer: slashingProtectionSigner } = createSignerFromSharedDb(slashingProtectionDb, config, { + telemetryClient: telemetry, + dateProvider, + })); + } else if (config.haSigningEnabled) { // Multi-node HA mode: use PostgreSQL-backed distributed locking. // If maxStuckDutiesAgeMs is not explicitly set, compute it from Aztec slot duration const haConfig = { @@ -378,13 +389,12 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) return false; } - // Ignore proposals from ourselves (may happen in HA setups) + // Log self-proposals from HA peers (same validator key on different nodes) if (this.getValidatorAddresses().some(addr => addr.equals(proposer))) { - this.log.debug(`Ignoring block proposal from self for slot ${slotNumber}`, { + this.log.verbose(`Processing block proposal from HA peer for slot ${slotNumber}`, { proposer: proposer.toString(), slotNumber, }); - return false; } // Check if we're in the committee (for metrics purposes) diff --git a/yarn-project/validator-ha-signer/src/factory.ts b/yarn-project/validator-ha-signer/src/factory.ts index 3f9e09e69f00..11540567f38b 100644 --- a/yarn-project/validator-ha-signer/src/factory.ts +++ b/yarn-project/validator-ha-signer/src/factory.ts @@ -137,3 +137,35 @@ export async function createLocalSignerWithProtection( return { signer, db }; } + +/** + * Create an in-memory LMDB-backed SlashingProtectionDatabase that can be shared across + * multiple validator nodes in the same process. Used for testing HA setups. + */ +export async function createSharedSlashingProtectionDb( + dateProvider: DateProvider = new DateProvider(), +): Promise { + const kvStore = await createStore('shared-signing-protection', LmdbSlashingProtectionDatabase.SCHEMA_VERSION, { + dataStoreMapSizeKb: 1024 * 1024, + }); + return new LmdbSlashingProtectionDatabase(kvStore, dateProvider); +} + +/** + * Create a ValidatorHASigner backed by a pre-existing SlashingProtectionDatabase. + * Used for testing HA setups where multiple nodes share the same protection database. + */ +export function createSignerFromSharedDb( + db: SlashingProtectionDatabase, + config: Pick< + ValidatorHASignerConfig, + 'nodeId' | 'pollingIntervalMs' | 'signingTimeoutMs' | 'maxStuckDutiesAgeMs' | 'l1Contracts' + >, + deps?: CreateLocalSignerWithProtectionDeps, +): { signer: ValidatorHASigner; db: SlashingProtectionDatabase } { + const telemetryClient = deps?.telemetryClient ?? getTelemetryClient(); + const dateProvider = deps?.dateProvider ?? new DateProvider(); + const metrics = new HASignerMetrics(telemetryClient, config.nodeId, 'SharedSigningProtectionMetrics'); + const signer = new ValidatorHASigner(db, config, { metrics, dateProvider }); + return { signer, db }; +} From 0daebe9924cacf20c1330846e26b03637094b195 Mon Sep 17 00:00:00 2001 From: danielntmd Date: Mon, 16 Mar 2026 17:38:51 +0000 Subject: [PATCH 03/41] fix: tx collector bench test Wire peerFailedBanTimeMs as new env and set tx collector test ban time to 5 minutes -> 5 seconds. The test would flake due to timeout and aggregation of peers took 1 full minute on attempting to get peers per subtest despite never obtaining all peers. This is because the peer dial is serialized and limited to 5 for this test and peers may dial repeatedetly without success then get banned for 5 minutes, never being able to reconnect within the 1 minute wait. This should allow all peers to connect in time and lower the 1 minute timeout, resulting in less timeouts overall for the test. --- yarn-project/foundation/src/config/env_var.ts | 1 + .../p2p_client.proposal_tx_collector.bench.test.ts | 1 + yarn-project/p2p/src/config.ts | 8 ++++++++ .../p2p/src/services/peer-manager/peer_manager.ts | 5 +++-- 4 files changed, 13 insertions(+), 2 deletions(-) diff --git a/yarn-project/foundation/src/config/env_var.ts b/yarn-project/foundation/src/config/env_var.ts index 6cd085cb9e16..f692b1a4e0dd 100644 --- a/yarn-project/foundation/src/config/env_var.ts +++ b/yarn-project/foundation/src/config/env_var.ts @@ -131,6 +131,7 @@ export type EnvVar = | 'P2P_L2_QUEUE_SIZE' | 'P2P_MAX_PEERS' | 'P2P_PEER_CHECK_INTERVAL_MS' + | 'P2P_PEER_FAILED_BAN_TIME_MS' | 'P2P_PEER_PENALTY_VALUES' | 'P2P_QUERY_FOR_IP' | 'P2P_REQRESP_INDIVIDUAL_REQUEST_TIMEOUT_MS' diff --git a/yarn-project/p2p/src/client/test/tx_proposal_collector/p2p_client.proposal_tx_collector.bench.test.ts b/yarn-project/p2p/src/client/test/tx_proposal_collector/p2p_client.proposal_tx_collector.bench.test.ts index fd3b8457f75e..606fe511e047 100644 --- a/yarn-project/p2p/src/client/test/tx_proposal_collector/p2p_client.proposal_tx_collector.bench.test.ts +++ b/yarn-project/p2p/src/client/test/tx_proposal_collector/p2p_client.proposal_tx_collector.bench.test.ts @@ -95,6 +95,7 @@ describe('ProposalTxCollector Benchmarks', () => { bootstrapNodesAsFullPeers: true, maxPeerCount: PEERS_PER_RUN + 1, peerCheckIntervalMS: 1000, + peerFailedBanTimeMs: 5_000, dialTimeoutMs: 10_000, individualRequestTimeoutMs: 30_000, }; diff --git a/yarn-project/p2p/src/config.ts b/yarn-project/p2p/src/config.ts index 74f0562addad..a2df35643f91 100644 --- a/yarn-project/p2p/src/config.ts +++ b/yarn-project/p2p/src/config.ts @@ -67,6 +67,9 @@ export interface P2PConfig /** The frequency in which to check for new peers. */ peerCheckIntervalMS: number; + /** How long to ban a peer after it fails MAX_DIAL_ATTEMPTS dials. */ + peerFailedBanTimeMs: number; + /** Size of queue of L2 blocks to store. */ l2QueueSize: number; @@ -254,6 +257,11 @@ export const p2pConfigMappings: ConfigMappingsType = { description: 'The frequency in which to check for new peers.', ...numberConfigHelper(30_000), }, + peerFailedBanTimeMs: { + env: 'P2P_PEER_FAILED_BAN_TIME_MS', + description: 'How long to ban a peer after it fails maximum dial attempts.', + ...numberConfigHelper(5 * 60 * 1000), + }, l2QueueSize: { env: 'P2P_L2_QUEUE_SIZE', description: 'Size of queue of L2 blocks to store.', diff --git a/yarn-project/p2p/src/services/peer-manager/peer_manager.ts b/yarn-project/p2p/src/services/peer-manager/peer_manager.ts index 669f0e149a9c..0754438c7a1f 100644 --- a/yarn-project/p2p/src/services/peer-manager/peer_manager.ts +++ b/yarn-project/p2p/src/services/peer-manager/peer_manager.ts @@ -32,7 +32,7 @@ import { PeerScoreState, type PeerScoring } from './peer_scoring.js'; const MAX_DIAL_ATTEMPTS = 3; const MAX_CACHED_PEERS = 100; const MAX_CACHED_PEER_AGE_MS = 5 * 60 * 1000; // 5 minutes -const FAILED_PEER_BAN_TIME_MS = 5 * 60 * 1000; // 5 minutes timeout after failing MAX_DIAL_ATTEMPTS +const DEFAULT_FAILED_PEER_BAN_TIME_MS = 5 * 60 * 1000; // 5 minutes timeout after failing MAX_DIAL_ATTEMPTS const GOODBYE_DIAL_TIMEOUT_MS = 1000; const FAILED_AUTH_HANDSHAKE_EXPIRY_MS = 60 * 60 * 1000; // 1 hour @@ -776,7 +776,8 @@ export class PeerManager implements PeerManagerInterface { // Add to timed out peers this.timedOutPeers.set(id, { peerId: id, - timeoutUntilMs: this.dateProvider.now() + FAILED_PEER_BAN_TIME_MS, + timeoutUntilMs: + this.dateProvider.now() + (this.config.peerFailedBanTimeMs ?? DEFAULT_FAILED_PEER_BAN_TIME_MS), }); } } From f6c26a81be3dd77a092dfe5ad1aba8e45d6e5c37 Mon Sep 17 00:00:00 2001 From: Santiago Palladino Date: Mon, 16 Mar 2026 15:14:18 -0300 Subject: [PATCH 04/41] fix(p2p): fall back to maxTxsPerCheckpoint for per-block tx validation (#21605) ## Motivation When `VALIDATOR_MAX_TX_PER_BLOCK` is not set but `VALIDATOR_MAX_TX_PER_CHECKPOINT` is, the gossip-level proposal validator enforces no per-block transaction limit at all. A single block can't have more transactions than the entire checkpoint allows, so the checkpoint limit is a valid upper bound for per-block validation. ## Approach Use `validateMaxTxsPerCheckpoint` as a fallback when `validateMaxTxsPerBlock` is not set in the proposal validator construction. This applies at both construction sites: the P2P libp2p service (gossip validation) and the validator-client factory (block proposal handler). ## Changes - **p2p**: Added `validateMaxTxsPerCheckpoint` to `P2PConfig` interface and config mappings (reads from `VALIDATOR_MAX_TX_PER_CHECKPOINT` env var) - **p2p (libp2p_service)**: Use `validateMaxTxsPerBlock ?? validateMaxTxsPerCheckpoint` when constructing proposal validators - **validator-client (factory)**: Same fallback when constructing the `BlockProposalValidator` Co-authored-by: Claude Opus 4.6 (1M context) --- yarn-project/p2p/src/config.ts | 9 +++++++++ yarn-project/p2p/src/services/libp2p/libp2p_service.ts | 2 +- yarn-project/validator-client/src/factory.ts | 2 +- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/yarn-project/p2p/src/config.ts b/yarn-project/p2p/src/config.ts index 74f0562addad..23b96dfabc00 100644 --- a/yarn-project/p2p/src/config.ts +++ b/yarn-project/p2p/src/config.ts @@ -43,6 +43,9 @@ export interface P2PConfig /** Maximum transactions per block for validation. Overrides maxTxsPerBlock for gossip validation when set. */ validateMaxTxsPerBlock?: number; + /** Maximum transactions per checkpoint for validation. Used as fallback for maxTxsPerBlock when that is not set. */ + validateMaxTxsPerCheckpoint?: number; + /** Maximum L2 gas per block for validation. When set, txs exceeding this limit are rejected. */ validateMaxL2BlockGas?: number; @@ -214,6 +217,12 @@ export const p2pConfigMappings: ConfigMappingsType = { 'Maximum transactions per block for validation. Overrides maxTxsPerBlock for gossip validation when set.', parseEnv: (val: string) => (val ? parseInt(val, 10) : undefined), }, + validateMaxTxsPerCheckpoint: { + env: 'VALIDATOR_MAX_TX_PER_CHECKPOINT', + description: + 'Maximum transactions per checkpoint for validation. Used as fallback for maxTxsPerBlock when that is not set.', + parseEnv: (val: string) => (val ? parseInt(val, 10) : undefined), + }, validateMaxL2BlockGas: { env: 'VALIDATOR_MAX_L2_BLOCK_GAS', description: 'Maximum L2 gas per block for validation. When set, txs exceeding this limit are rejected.', diff --git a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts index e59fbaa99b4c..6f89b66a5cf3 100644 --- a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts +++ b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts @@ -226,7 +226,7 @@ export class LibP2PService extends WithTracer implements P2PService { const proposalValidatorOpts = { txsPermitted: !config.disableTransactions, - maxTxsPerBlock: config.validateMaxTxsPerBlock, + maxTxsPerBlock: config.validateMaxTxsPerBlock ?? config.validateMaxTxsPerCheckpoint, }; this.blockProposalValidator = new BlockProposalValidator(epochCache, proposalValidatorOpts); this.checkpointProposalValidator = new CheckpointProposalValidator(epochCache, proposalValidatorOpts); diff --git a/yarn-project/validator-client/src/factory.ts b/yarn-project/validator-client/src/factory.ts index b7645d48c485..6c706c5dc855 100644 --- a/yarn-project/validator-client/src/factory.ts +++ b/yarn-project/validator-client/src/factory.ts @@ -29,7 +29,7 @@ export function createBlockProposalHandler( const metrics = new ValidatorMetrics(deps.telemetry); const blockProposalValidator = new BlockProposalValidator(deps.epochCache, { txsPermitted: !config.disableTransactions, - maxTxsPerBlock: config.validateMaxTxsPerBlock, + maxTxsPerBlock: config.validateMaxTxsPerBlock ?? config.validateMaxTxsPerCheckpoint, }); return new BlockProposalHandler( deps.checkpointsBuilder, From 6845b723d2998b7870f7324637ef93638ef441e2 Mon Sep 17 00:00:00 2001 From: Michal Rzeszutko Date: Mon, 16 Mar 2026 20:37:08 +0100 Subject: [PATCH 05/41] chore: fixing M3 devcontainer builds (#21611) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Fix: ARM64 Mac (M3) Devcontainer Build Failures ## Problem Building inside a devcontainer on Mac with Apple M3 chip fails in multiple ways: 1. **SIGILL crashes** — The `bb-sol` build step crashes when running `honk_solidity_key_gen`, and E2E tests fail with `Illegal instruction` errors. 2. **Rust compilation failures** — The `noir` build fails with `can't find crate for serde` and similar errors when noir and avm-transpiler build in parallel, racing on the shared `CARGO_HOME`. ## Root Cause ### SVE instructions from zig `-target native` 1. CI runs on **AWS Graviton** (ARM64 with SVE vector extensions) 2. The zig compiler wrapper uses `-target native-linux-gnu.2.35`, which on Graviton enables **SVE instructions** 3. Mac M3 devcontainer (ARM64 **without SVE**) downloads the same cached binaries 4. Binaries contain SVE opcodes (e.g. `0x04be4000`) that Apple Silicon can't execute → **SIGILL** Cache keys already include architecture via `cache_content_hash` (which appends `$OSTYPE-$(uname -m)`), so amd64 vs arm64 caches never collide. The problem is specifically that two ARM64 machines (Graviton with SVE vs Apple Silicon without SVE) share the same architecture tag but have different CPU feature sets. The fix is to stop emitting CPU-specific instructions in the first place. ### Parallel Rust build race condition The top-level bootstrap runs `noir` and `avm-transpiler` builds in parallel. Both invoke `cargo build`, and both share the same `CARGO_HOME` (`~/.cargo`) which contains the crate registry and download cache. When both cargo processes run concurrently, they race on shared registry state, causing downstream crates (e.g. `serde-big-array`, `ecdsa`) to fail with `can't find crate` errors during compilation. This does not happen on CI where builds are cached, only on local fresh builds (e.g. `NO_CACHE=1`). ## Fixes ### 1. Zig compiler wrappers: explicit ARM64 target **Files:** `barretenberg/cpp/scripts/zig-cc.sh`, `barretenberg/cpp/scripts/zig-c++.sh` Changed `-target native-linux-gnu.2.35` to use explicit `aarch64-linux-gnu.2.35` on ARM64 Linux. This produces generic ARM64 code without CPU-specific extensions (SVE, etc.), ensuring binaries work on all ARM64 machines — Graviton, Apple Silicon, Ampere, etc. x86_64 behavior is unchanged (still uses `native`). ### 2. Extract native_cache_key variable in barretenberg bootstrap **File:** `barretenberg/cpp/bootstrap.sh` Extracted the repeated cache key pattern `barretenberg-$native_preset-$hash` into a single `native_cache_key` variable, used by `build_native_objects`, `build_native`, and related functions. Pure refactor, no change in cache key values. ### 3. Better error handling in init_honk.sh **File:** `barretenberg/sol/scripts/init_honk.sh` Added `set -eu` so the script fails immediately on error instead of silently continuing after SIGILL. Added an existence check for the `honk_solidity_key_gen` binary with a clear error message. ### 4. Serialize parallel cargo builds with flock **Files:** `noir/bootstrap.sh`, `avm-transpiler/bootstrap.sh` Both scripts wrap their `cargo build` invocations with `flock -x 200` on a shared lock file (`/tmp/rustup.lock`): ```bash ( flock -x 200 cd noir-repo && cargo build --locked --release --target-dir target ) 200>/tmp/rustup.lock ``` This acquires an exclusive file lock before running cargo, so if both `noir` and `avm-transpiler` builds run in parallel, one waits for the other to finish. The lock is automatically released when the subshell exits. This eliminates the `CARGO_HOME` race condition without requiring changes to the top-level parallelism. ## Notes ### E2E Tests The E2E test failures (SIGKILL from invalid instructions) have the same root cause as the SIGILL crashes — the `bb` binary used by tests was from the SVE-contaminated cache. After rebuilding with these fixes, E2E tests work. --------- Co-authored-by: Aztec Bot <49558828+AztecBot@users.noreply.github.com> Co-authored-by: ludamad --- barretenberg/cpp/CMakePresets.json | 1 - barretenberg/cpp/bootstrap.sh | 1 - barretenberg/cpp/cmake/arch.cmake | 14 +++++++++++++- barretenberg/cpp/scripts/zig-c++.sh | 2 ++ barretenberg/cpp/scripts/zig-cc.sh | 2 ++ barretenberg/sol/scripts/init_honk.sh | 16 ++++++++++++---- noir/bootstrap.sh | 7 ++++++- 7 files changed, 35 insertions(+), 8 deletions(-) diff --git a/barretenberg/cpp/CMakePresets.json b/barretenberg/cpp/CMakePresets.json index 420fb003d42e..62b4f4851719 100644 --- a/barretenberg/cpp/CMakePresets.json +++ b/barretenberg/cpp/CMakePresets.json @@ -19,7 +19,6 @@ }, "cacheVariables": { "CMAKE_BUILD_TYPE": "Release", - "TARGET_ARCH": "skylake", "ENABLE_PIC": "ON" } }, diff --git a/barretenberg/cpp/bootstrap.sh b/barretenberg/cpp/bootstrap.sh index a4c70bba834c..7a38e0bd4f80 100755 --- a/barretenberg/cpp/bootstrap.sh +++ b/barretenberg/cpp/bootstrap.sh @@ -7,7 +7,6 @@ else export native_preset=${NATIVE_PRESET:-clang20-no-avm} fi export hash=$(hash_str $(../../avm-transpiler/bootstrap.sh hash) $(cache_content_hash .rebuild_patterns)) -export native_build_dir=$(scripts/preset-build-dir) # Injects version number into a given bb binary. # Means we don't actually need to rebuild bb to release a new version if code hasn't changed. diff --git a/barretenberg/cpp/cmake/arch.cmake b/barretenberg/cpp/cmake/arch.cmake index 71b6dbb599ff..fe6488cbce3f 100644 --- a/barretenberg/cpp/cmake/arch.cmake +++ b/barretenberg/cpp/cmake/arch.cmake @@ -5,7 +5,19 @@ if(WASM) add_compile_options(-fno-exceptions -fno-slp-vectorize) endif() -if(NOT WASM AND NOT ARM AND TARGET_ARCH) +# Auto-detect TARGET_ARCH if not explicitly set. +# Use 'skylake' on x86_64 (matches our cross-compile presets) and 'generic' on ARM +# to avoid emitting CPU-specific instructions (e.g. SVE on Graviton) that break on +# other ARM machines like Apple Silicon. +if(NOT WASM AND NOT TARGET_ARCH) + if(ARM) + set(TARGET_ARCH "generic") + else() + set(TARGET_ARCH "skylake") + endif() +endif() + +if(NOT WASM AND TARGET_ARCH) message(STATUS "Target architecture: ${TARGET_ARCH}") add_compile_options(-march=${TARGET_ARCH}) endif() diff --git a/barretenberg/cpp/scripts/zig-c++.sh b/barretenberg/cpp/scripts/zig-c++.sh index 3c1a69cb9ad6..dc7e822e03a4 100755 --- a/barretenberg/cpp/scripts/zig-c++.sh +++ b/barretenberg/cpp/scripts/zig-c++.sh @@ -1,6 +1,8 @@ #!/bin/bash # Wrapper for zig c++ that pins glibc 2.35 on Linux (Ubuntu 22.04+ compat) # and uses native target on macOS. +# Note: arch.cmake handles -march selection (skylake on x86, generic on ARM) +# which overrides zig's native CPU detection, preventing CPU-specific instructions. if [[ "$(uname -s)" == "Linux" ]]; then exec zig c++ -target native-linux-gnu.2.35 "$@" else diff --git a/barretenberg/cpp/scripts/zig-cc.sh b/barretenberg/cpp/scripts/zig-cc.sh index 6f1444434676..bc39365b6c09 100755 --- a/barretenberg/cpp/scripts/zig-cc.sh +++ b/barretenberg/cpp/scripts/zig-cc.sh @@ -1,6 +1,8 @@ #!/bin/bash # Wrapper for zig cc that pins glibc 2.35 on Linux (Ubuntu 22.04+ compat) # and uses native target on macOS. +# Note: arch.cmake handles -march selection (skylake on x86, generic on ARM) +# which overrides zig's native CPU detection, preventing CPU-specific instructions. if [[ "$(uname -s)" == "Linux" ]]; then exec zig cc -target native-linux-gnu.2.35 "$@" else diff --git a/barretenberg/sol/scripts/init_honk.sh b/barretenberg/sol/scripts/init_honk.sh index cf24ef092aba..5711269ce03c 100755 --- a/barretenberg/sol/scripts/init_honk.sh +++ b/barretenberg/sol/scripts/init_honk.sh @@ -1,14 +1,22 @@ #!/usr/bin/env bash +set -eu # the verification key is the same for ultra and ultra zk SRS_PATH="$HOME/.bb-crs" OUTPUT_PATH="./src/honk" +KEYGEN="../cpp/build/bin/honk_solidity_key_gen" + +if [ ! -x "$KEYGEN" ]; then + echo "Error: honk_solidity_key_gen binary not found at $KEYGEN" >&2 + echo "Run barretenberg/cpp bootstrap first." >&2 + exit 1 +fi mkdir -p './src/honk/keys' -../cpp/build/bin/honk_solidity_key_gen add2 $OUTPUT_PATH $SRS_PATH -../cpp/build/bin/honk_solidity_key_gen blake $OUTPUT_PATH $SRS_PATH -../cpp/build/bin/honk_solidity_key_gen ecdsa $OUTPUT_PATH $SRS_PATH -../cpp/build/bin/honk_solidity_key_gen recursive $OUTPUT_PATH $SRS_PATH +$KEYGEN add2 $OUTPUT_PATH $SRS_PATH +$KEYGEN blake $OUTPUT_PATH $SRS_PATH +$KEYGEN ecdsa $OUTPUT_PATH $SRS_PATH +$KEYGEN recursive $OUTPUT_PATH $SRS_PATH echo "" echo "✓ VK generation complete" diff --git a/noir/bootstrap.sh b/noir/bootstrap.sh index 4fd19781f16a..1fcb30d95250 100755 --- a/noir/bootstrap.sh +++ b/noir/bootstrap.sh @@ -25,7 +25,12 @@ function build_native { set -euo pipefail if ! cache_download noir-$hash.tar.gz; then - (cd noir-repo && cargo build --locked --release --target-dir target) + # Serialize cargo operations to avoid race conditions with avm-transpiler + # which may run in parallel and share the same CARGO_HOME. + ( + flock -x 200 + cd noir-repo && cargo build --locked --release --target-dir target + ) 200>/tmp/rustup.lock cache_upload noir-$hash.tar.gz noir-repo/target/release/{nargo,acvm,noir-profiler} fi } From aa97afc6587b3c4a6a392367c17c2e014070bd1f Mon Sep 17 00:00:00 2001 From: AztecBot Date: Tue, 17 Mar 2026 01:27:11 +0000 Subject: [PATCH 06/41] fix: clamp finalized block to oldest available in world-state PR #21597 increased the finalized block lookback from epochDuration*2 to epochDuration*2*4. This caused the finalized block number to jump backwards past blocks that had already been pruned from world-state, causing advance_finalized_block to fail with 'Failed to read block data'. Two fixes: 1. TypeScript: clamp blockNumber to oldestHistoricalBlock before calling setFinalized, so we never request a pruned block. 2. C++: reorder checks in advance_finalized_block to check the no-op condition (already finalized past this block) before attempting to read block data. This makes the native layer resilient to receiving a stale finalized block number. --- .../cached_content_addressed_tree_store.hpp | 8 ++++---- .../server_world_state_synchronizer.ts | 14 ++++++++++++++ 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp index 2f08ef61ef2b..49a4dc1110cd 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp @@ -867,6 +867,10 @@ void ContentAddressedCachedTreeStore::advance_finalized_block(con ReadTransactionPtr readTx = create_read_transaction(); get_meta(uncommittedMeta); get_meta(committedMeta, *readTx, false); + // do nothing if the block is already finalized + if (committedMeta.finalizedBlockHeight >= blockNumber) { + return; + } if (!dataStore_->read_block_data(blockNumber, blockPayload, *readTx)) { throw std::runtime_error(format("Unable to advance finalized block: ", blockNumber, @@ -874,10 +878,6 @@ void ContentAddressedCachedTreeStore::advance_finalized_block(con forkConstantData_.name_)); } } - // do nothing if the block is already finalized - if (committedMeta.finalizedBlockHeight >= blockNumber) { - return; - } // can currently only finalize up to the unfinalized block height if (committedMeta.finalizedBlockHeight > committedMeta.unfinalizedBlockHeight) { diff --git a/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts b/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts index 511579f3d2e4..5bd76e38a48b 100644 --- a/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts +++ b/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts @@ -388,6 +388,20 @@ export class ServerWorldStateSynchronizer private async handleChainFinalized(blockNumber: BlockNumber) { this.log.verbose(`Finalized chain is now at block ${blockNumber}`); + // Clamp to the oldest block still available in world state. The finalized block number can + // jump backwards (e.g. when the finalization heuristic changes) and try to read block data + // that has already been pruned, which causes the native world state to throw. + const currentSummary = await this.merkleTreeDb.getStatusSummary(); + if (blockNumber < currentSummary.oldestHistoricalBlock) { + this.log.warn( + `Finalized block ${blockNumber} is older than the oldest available block ${currentSummary.oldestHistoricalBlock}. ` + + `Clamping to oldest available block.`, + ); + blockNumber = currentSummary.oldestHistoricalBlock; + } + if (blockNumber < 1) { + return; + } const summary = await this.merkleTreeDb.setFinalized(blockNumber); if (this.historyToKeep === undefined) { return; From 1ad718db90b5aaf63a256c3addbb3b2bd8594d0d Mon Sep 17 00:00:00 2001 From: AztecBot Date: Tue, 17 Mar 2026 01:49:55 +0000 Subject: [PATCH 07/41] test: add integration test for finalized block backwards jump past pruned blocks Tests that handleBlockStreamEvent with chain-finalized for a block older than the oldest available block does not throw, validating the clamping fix in handleChainFinalized. --- .../world-state/src/test/integration.test.ts | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/yarn-project/world-state/src/test/integration.test.ts b/yarn-project/world-state/src/test/integration.test.ts index 8ceb875d4669..893a50c8746d 100644 --- a/yarn-project/world-state/src/test/integration.test.ts +++ b/yarn-project/world-state/src/test/integration.test.ts @@ -252,6 +252,44 @@ describe('world-state integration', () => { await awaitSync(5, 4); await expectSynchedToBlock(5, 4); }); + + it('does not throw when finalized block jumps backwards past pruned blocks', async () => { + // Create 20 blocks and sync them all + await archiver.createBlocks(MAX_CHECKPOINT_COUNT); + await synchronizer.start(); + await awaitSync(MAX_CHECKPOINT_COUNT); + await expectSynchedToBlock(MAX_CHECKPOINT_COUNT); + + // Manually finalize to block 15 and prune historical blocks up to block 10 + // to simulate world-state having pruned old data. + await db.setFinalized(BlockNumber(15)); + await db.removeHistoricalBlocks(BlockNumber(10)); + + const summary = await db.getStatusSummary(); + log.info( + `After manual finalize+prune: oldest=${summary.oldestHistoricalBlock}, finalized=${summary.finalizedBlockNumber}`, + ); + expect(summary.oldestHistoricalBlock).toBe(10); + expect(summary.finalizedBlockNumber).toBe(15); + + // Now simulate the scenario from PR #21597: finalized block jumps backwards + // to a block M that is older than oldestHistoricalBlock. + // This should NOT throw — the clamping logic should handle it. + const backwardsFinalized = BlockNumber(5); + log.info( + `Sending chain-finalized for block ${backwardsFinalized} (below oldest ${summary.oldestHistoricalBlock})`, + ); + await expect( + synchronizer.handleBlockStreamEvent({ + type: 'chain-finalized', + block: { number: backwardsFinalized, hash: '' }, + }), + ).resolves.not.toThrow(); + + // Finalized block should remain at 15 (unchanged by the backwards event) + const afterSummary = await db.getStatusSummary(); + expect(afterSummary.finalizedBlockNumber).toBe(15); + }); }); }); From 92f87f86ba9247b1a53adc8526eca74d8345b30b Mon Sep 17 00:00:00 2001 From: Santiago Palladino Date: Mon, 16 Mar 2026 23:06:28 -0300 Subject: [PATCH 08/41] fix: avoid `Array.from` with untrusted sizes Calling `Array.from({length})` allocates length immediately. We were calling this method in the context of deserialization with untrusted input. This PR changes it so we use `new Array(size)` for untrusted input. A bit less efficient, but more secure. --- yarn-project/blob-lib/src/sponge_blob.ts | 4 +-- .../foundation/src/serialize/buffer_reader.ts | 35 +++++++++++++++---- .../src/trees/membership_witness.ts | 4 +-- .../stdlib/src/avm/avm_accumulated_data.ts | 8 ++--- .../src/avm/avm_circuit_public_inputs.ts | 4 +-- .../stdlib/src/kernel/claimed_length_array.ts | 2 +- .../hints/private_kernel_reset_hints.ts | 7 ++-- .../src/kernel/hints/read_request_hints.ts | 6 ++-- .../stdlib/src/kernel/padded_side_effects.ts | 6 ++-- .../kernel/private_to_avm_accumulated_data.ts | 6 ++-- .../private_to_public_accumulated_data.ts | 12 +++---- .../private_to_rollup_accumulated_data.ts | 10 +++--- yarn-project/stdlib/src/logs/private_log.ts | 2 +- yarn-project/stdlib/src/logs/public_log.ts | 12 ++++++- .../src/parity/parity_base_private_inputs.ts | 2 +- yarn-project/stdlib/src/proofs/chonk_proof.ts | 10 +++++- .../stdlib/src/rollup/base_rollup_hints.ts | 4 +-- .../block_root_rollup_private_inputs.ts | 16 ++++----- .../rollup/checkpoint_rollup_public_inputs.ts | 4 +-- .../checkpoint_root_rollup_private_inputs.ts | 8 ++--- .../src/rollup/root_rollup_public_inputs.ts | 4 +-- .../src/rollup/tree_snapshot_diff_hints.ts | 10 +++--- .../stdlib/src/tx/protocol_contracts.ts | 2 +- yarn-project/stdlib/src/vks/vk_data.ts | 2 +- 24 files changed, 112 insertions(+), 68 deletions(-) diff --git a/yarn-project/blob-lib/src/sponge_blob.ts b/yarn-project/blob-lib/src/sponge_blob.ts index 50a049bd829f..4578d30dabe7 100644 --- a/yarn-project/blob-lib/src/sponge_blob.ts +++ b/yarn-project/blob-lib/src/sponge_blob.ts @@ -91,8 +91,8 @@ export class Poseidon2Sponge { static fromBuffer(buffer: Buffer | BufferReader): Poseidon2Sponge { const reader = BufferReader.asReader(buffer); return new Poseidon2Sponge( - reader.readArray(3, Fr), - reader.readArray(4, Fr), + reader.readTuple(3, Fr), + reader.readTuple(4, Fr), reader.readNumber(), reader.readBoolean(), ); diff --git a/yarn-project/foundation/src/serialize/buffer_reader.ts b/yarn-project/foundation/src/serialize/buffer_reader.ts index 8cc8071db595..45a88457f668 100644 --- a/yarn-project/foundation/src/serialize/buffer_reader.ts +++ b/yarn-project/foundation/src/serialize/buffer_reader.ts @@ -286,16 +286,39 @@ export class BufferReader { } /** - * Read an array of a fixed size with elements of type T from the buffer. - * The 'itemDeserializer' object should have a 'fromBuffer' method that takes a BufferReader instance as input, - * and returns an instance of the desired deserialized data type T. - * This method will call the 'fromBuffer' method for each element in the array and return the resulting array. + * Read an array from the buffer using lazy allocation (new Array + loop). + * Safe for use with untrusted sizes — does not pre-allocate memory proportional to size. * - * @param size - The fixed number of elements in the array. + * @param size - The number of elements to read. * @param itemDeserializer - An object with a 'fromBuffer' method to deserialize individual elements of type T. * @returns An array of instances of type T. */ - public readArray( + public readArray( + size: number, + itemDeserializer: { + /** + * A function for deserializing data from a BufferReader instance. + */ + fromBuffer: (reader: BufferReader) => T; + }, + ): T[] { + const result = new Array(size); + for (let i = 0; i < size; i++) { + result[i] = itemDeserializer.fromBuffer(this); + } + return result; + } + + /** + * Read a fixed-size tuple from the buffer using dense allocation (Array.from). + * Only use with compile-time constant sizes — the size parameter MUST NOT come from untrusted input + * as Array.from pre-allocates memory proportional to size. + * + * @param size - The fixed number of elements (must be a compile-time constant). + * @param itemDeserializer - An object with a 'fromBuffer' method to deserialize individual elements of type T. + * @returns A densely-allocated tuple of instances of type T. + */ + public readTuple( size: N, itemDeserializer: { /** diff --git a/yarn-project/foundation/src/trees/membership_witness.ts b/yarn-project/foundation/src/trees/membership_witness.ts index bbc46f886b1e..c4500ad710d4 100644 --- a/yarn-project/foundation/src/trees/membership_witness.ts +++ b/yarn-project/foundation/src/trees/membership_witness.ts @@ -94,7 +94,7 @@ export class MembershipWitness { static fromBuffer(buffer: Buffer | BufferReader, size: N): MembershipWitness { const reader = BufferReader.asReader(buffer); const leafIndex = toBigIntBE(reader.readBytes(32)); - const siblingPath = reader.readArray(size, Fr); + const siblingPath = reader.readArray(size, Fr) as Tuple; return new MembershipWitness(size, leafIndex, siblingPath); } @@ -108,7 +108,7 @@ export class MembershipWitness { fromBuffer: (buffer: Buffer | BufferReader) => { const reader = BufferReader.asReader(buffer); const leafIndex = toBigIntBE(reader.readBytes(32)); - const siblingPath = reader.readArray(size, Fr); + const siblingPath = reader.readArray(size, Fr) as Tuple; return new MembershipWitness(size, leafIndex, siblingPath); }, }; diff --git a/yarn-project/stdlib/src/avm/avm_accumulated_data.ts b/yarn-project/stdlib/src/avm/avm_accumulated_data.ts index f072f5a00393..3e39ccb53948 100644 --- a/yarn-project/stdlib/src/avm/avm_accumulated_data.ts +++ b/yarn-project/stdlib/src/avm/avm_accumulated_data.ts @@ -88,11 +88,11 @@ export class AvmAccumulatedData { static fromBuffer(buffer: Buffer | BufferReader) { const reader = BufferReader.asReader(buffer); return new this( - reader.readArray(MAX_NOTE_HASHES_PER_TX, Fr), - reader.readArray(MAX_NULLIFIERS_PER_TX, Fr), - reader.readArray(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), + reader.readTuple(MAX_NOTE_HASHES_PER_TX, Fr), + reader.readTuple(MAX_NULLIFIERS_PER_TX, Fr), + reader.readTuple(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), reader.readObject(FlatPublicLogs), - reader.readArray(MAX_TOTAL_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX, PublicDataWrite), + reader.readTuple(MAX_TOTAL_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX, PublicDataWrite), ); } diff --git a/yarn-project/stdlib/src/avm/avm_circuit_public_inputs.ts b/yarn-project/stdlib/src/avm/avm_circuit_public_inputs.ts index 4dbeca81752c..eb6563aba31e 100644 --- a/yarn-project/stdlib/src/avm/avm_circuit_public_inputs.ts +++ b/yarn-project/stdlib/src/avm/avm_circuit_public_inputs.ts @@ -143,8 +143,8 @@ export class AvmCircuitPublicInputs { reader.readObject(AztecAddress), reader.readObject(Fr), reader.readObject(PublicCallRequestArrayLengths), - reader.readArray(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), - reader.readArray(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), + reader.readTuple(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), + reader.readTuple(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), reader.readObject(PublicCallRequest), reader.readObject(PrivateToAvmAccumulatedDataArrayLengths), reader.readObject(PrivateToAvmAccumulatedDataArrayLengths), diff --git a/yarn-project/stdlib/src/kernel/claimed_length_array.ts b/yarn-project/stdlib/src/kernel/claimed_length_array.ts index 109f7463137e..ef0c3ee67716 100644 --- a/yarn-project/stdlib/src/kernel/claimed_length_array.ts +++ b/yarn-project/stdlib/src/kernel/claimed_length_array.ts @@ -25,7 +25,7 @@ export class ClaimedLengthArray { arrayLength: N, ): ClaimedLengthArray { const reader = BufferReader.asReader(buffer); - const array = reader.readArray(arrayLength, deserializer); + const array = reader.readArray(arrayLength, deserializer) as Tuple; const claimedLength = reader.readNumber(); return new ClaimedLengthArray(array, claimedLength); } diff --git a/yarn-project/stdlib/src/kernel/hints/private_kernel_reset_hints.ts b/yarn-project/stdlib/src/kernel/hints/private_kernel_reset_hints.ts index 72c53ecb5a77..8fa1175f14b1 100644 --- a/yarn-project/stdlib/src/kernel/hints/private_kernel_reset_hints.ts +++ b/yarn-project/stdlib/src/kernel/hints/private_kernel_reset_hints.ts @@ -105,8 +105,11 @@ export class PrivateKernelResetHints< fromBuffer: buf => nullifierReadRequestHintsFromBuffer(buf, numNullifierReadRequestPending, numNullifierReadRequestSettled), }), - reader.readArray(numKeyValidationHints, KeyValidationHint), - reader.readArray(numTransientDataSquashingHints, TransientDataSquashingHint), + reader.readArray(numKeyValidationHints, KeyValidationHint) as Tuple, + reader.readArray(numTransientDataSquashingHints, TransientDataSquashingHint) as Tuple< + TransientDataSquashingHint, + TRANSIENT_DATA_HINTS_LEN + >, ); } } diff --git a/yarn-project/stdlib/src/kernel/hints/read_request_hints.ts b/yarn-project/stdlib/src/kernel/hints/read_request_hints.ts index 39b6663aa953..4f57bf2e327b 100644 --- a/yarn-project/stdlib/src/kernel/hints/read_request_hints.ts +++ b/yarn-project/stdlib/src/kernel/hints/read_request_hints.ts @@ -158,11 +158,11 @@ export class ReadRequestResetHints< > { const reader = BufferReader.asReader(buffer); return new ReadRequestResetHints( - reader.readArray(readRequestLen, ReadRequestAction), - reader.readArray(numPendingReads, PendingReadHint), + reader.readArray(readRequestLen, ReadRequestAction) as Tuple, + reader.readArray(numPendingReads, PendingReadHint) as Tuple, reader.readArray(numSettledReads, { fromBuffer: r => SettledReadHint.fromBuffer(r, treeHeight, leafPreimageFromBuffer), - }), + }) as Tuple, SETTLED_READ_HINTS_LEN>, ); } diff --git a/yarn-project/stdlib/src/kernel/padded_side_effects.ts b/yarn-project/stdlib/src/kernel/padded_side_effects.ts index f42207255465..48d2c2a2442d 100644 --- a/yarn-project/stdlib/src/kernel/padded_side_effects.ts +++ b/yarn-project/stdlib/src/kernel/padded_side_effects.ts @@ -19,9 +19,9 @@ export class PaddedSideEffects { static fromBuffer(buffer: Buffer | BufferReader) { const reader = BufferReader.asReader(buffer); return new PaddedSideEffects( - reader.readArray(MAX_NOTE_HASHES_PER_TX, Fr), - reader.readArray(MAX_NULLIFIERS_PER_TX, Fr), - reader.readArray(MAX_PRIVATE_LOGS_PER_TX, PrivateLog), + reader.readTuple(MAX_NOTE_HASHES_PER_TX, Fr), + reader.readTuple(MAX_NULLIFIERS_PER_TX, Fr), + reader.readTuple(MAX_PRIVATE_LOGS_PER_TX, PrivateLog), ); } diff --git a/yarn-project/stdlib/src/kernel/private_to_avm_accumulated_data.ts b/yarn-project/stdlib/src/kernel/private_to_avm_accumulated_data.ts index 9442439a039a..d64dbdad345c 100644 --- a/yarn-project/stdlib/src/kernel/private_to_avm_accumulated_data.ts +++ b/yarn-project/stdlib/src/kernel/private_to_avm_accumulated_data.ts @@ -94,9 +94,9 @@ export class PrivateToAvmAccumulatedData { static fromBuffer(buffer: Buffer | BufferReader) { const reader = BufferReader.asReader(buffer); return new PrivateToAvmAccumulatedData( - reader.readArray(MAX_NOTE_HASHES_PER_TX, Fr), - reader.readArray(MAX_NULLIFIERS_PER_TX, Fr), - reader.readArray(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), + reader.readTuple(MAX_NOTE_HASHES_PER_TX, Fr), + reader.readTuple(MAX_NULLIFIERS_PER_TX, Fr), + reader.readTuple(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), ); } diff --git a/yarn-project/stdlib/src/kernel/private_to_public_accumulated_data.ts b/yarn-project/stdlib/src/kernel/private_to_public_accumulated_data.ts index ab1b79190672..c35b96c5656d 100644 --- a/yarn-project/stdlib/src/kernel/private_to_public_accumulated_data.ts +++ b/yarn-project/stdlib/src/kernel/private_to_public_accumulated_data.ts @@ -76,12 +76,12 @@ export class PrivateToPublicAccumulatedData { static fromBuffer(buffer: Buffer | BufferReader) { const reader = BufferReader.asReader(buffer); return new PrivateToPublicAccumulatedData( - reader.readArray(MAX_NOTE_HASHES_PER_TX, Fr), - reader.readArray(MAX_NULLIFIERS_PER_TX, Fr), - reader.readArray(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), - reader.readArray(MAX_PRIVATE_LOGS_PER_TX, PrivateLog), - reader.readArray(MAX_CONTRACT_CLASS_LOGS_PER_TX, ScopedLogHash), - reader.readArray(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), + reader.readTuple(MAX_NOTE_HASHES_PER_TX, Fr), + reader.readTuple(MAX_NULLIFIERS_PER_TX, Fr), + reader.readTuple(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), + reader.readTuple(MAX_PRIVATE_LOGS_PER_TX, PrivateLog), + reader.readTuple(MAX_CONTRACT_CLASS_LOGS_PER_TX, ScopedLogHash), + reader.readTuple(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), ); } diff --git a/yarn-project/stdlib/src/kernel/private_to_rollup_accumulated_data.ts b/yarn-project/stdlib/src/kernel/private_to_rollup_accumulated_data.ts index 7807f6ad9b97..74d1aeed52cd 100644 --- a/yarn-project/stdlib/src/kernel/private_to_rollup_accumulated_data.ts +++ b/yarn-project/stdlib/src/kernel/private_to_rollup_accumulated_data.ts @@ -95,11 +95,11 @@ export class PrivateToRollupAccumulatedData { static fromBuffer(buffer: Buffer | BufferReader): PrivateToRollupAccumulatedData { const reader = BufferReader.asReader(buffer); return new PrivateToRollupAccumulatedData( - reader.readArray(MAX_NOTE_HASHES_PER_TX, Fr), - reader.readArray(MAX_NULLIFIERS_PER_TX, Fr), - reader.readArray(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), - reader.readArray(MAX_PRIVATE_LOGS_PER_TX, PrivateLog), - reader.readArray(MAX_CONTRACT_CLASS_LOGS_PER_TX, ScopedLogHash), + reader.readTuple(MAX_NOTE_HASHES_PER_TX, Fr), + reader.readTuple(MAX_NULLIFIERS_PER_TX, Fr), + reader.readTuple(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), + reader.readTuple(MAX_PRIVATE_LOGS_PER_TX, PrivateLog), + reader.readTuple(MAX_CONTRACT_CLASS_LOGS_PER_TX, ScopedLogHash), ); } diff --git a/yarn-project/stdlib/src/logs/private_log.ts b/yarn-project/stdlib/src/logs/private_log.ts index 10f915a1e6bd..be11dda2b0bf 100644 --- a/yarn-project/stdlib/src/logs/private_log.ts +++ b/yarn-project/stdlib/src/logs/private_log.ts @@ -73,7 +73,7 @@ export class PrivateLog { static fromBuffer(buffer: Buffer | BufferReader) { const reader = BufferReader.asReader(buffer); - return new PrivateLog(reader.readArray(PRIVATE_LOG_SIZE_IN_FIELDS, Fr), reader.readNumber()); + return new PrivateLog(reader.readTuple(PRIVATE_LOG_SIZE_IN_FIELDS, Fr), reader.readNumber()); } static random(tag = Fr.random()) { diff --git a/yarn-project/stdlib/src/logs/public_log.ts b/yarn-project/stdlib/src/logs/public_log.ts index 490e39a0bcae..19c9daaf5b32 100644 --- a/yarn-project/stdlib/src/logs/public_log.ts +++ b/yarn-project/stdlib/src/logs/public_log.ts @@ -1,4 +1,8 @@ -import { FLAT_PUBLIC_LOGS_PAYLOAD_LENGTH, PUBLIC_LOG_HEADER_LENGTH } from '@aztec/constants'; +import { + FLAT_PUBLIC_LOGS_PAYLOAD_LENGTH, + MAX_PUBLIC_LOG_SIZE_IN_FIELDS, + PUBLIC_LOG_HEADER_LENGTH, +} from '@aztec/constants'; import type { FieldsOf } from '@aztec/foundation/array'; import { Fr } from '@aztec/foundation/curves/bn254'; import { type ZodFor, schemas } from '@aztec/foundation/schemas'; @@ -76,6 +80,9 @@ export class FlatPublicLogs { static fromBuffer(buffer: Buffer | BufferReader) { const reader = BufferReader.asReader(buffer); const length = reader.readNumber(); + if (length > FLAT_PUBLIC_LOGS_PAYLOAD_LENGTH) { + throw new Error(`FlatPublicLogs length ${length} exceeds maximum ${FLAT_PUBLIC_LOGS_PAYLOAD_LENGTH}`); + } return this.fromUnpaddedPayload(reader.readArray(length, Fr)); } @@ -171,6 +178,9 @@ export class PublicLog { static fromBuffer(buffer: Buffer | BufferReader) { const reader = BufferReader.asReader(buffer); const fieldsLength = reader.readNumber(); + if (fieldsLength > MAX_PUBLIC_LOG_SIZE_IN_FIELDS) { + throw new Error(`PublicLog fields length ${fieldsLength} exceeds maximum ${MAX_PUBLIC_LOG_SIZE_IN_FIELDS}`); + } return new PublicLog(reader.readObject(AztecAddress), reader.readArray(fieldsLength, Fr)); } diff --git a/yarn-project/stdlib/src/parity/parity_base_private_inputs.ts b/yarn-project/stdlib/src/parity/parity_base_private_inputs.ts index a8a4dd68fd77..eece01c59c37 100644 --- a/yarn-project/stdlib/src/parity/parity_base_private_inputs.ts +++ b/yarn-project/stdlib/src/parity/parity_base_private_inputs.ts @@ -41,7 +41,7 @@ export class ParityBasePrivateInputs { */ static fromBuffer(buffer: Buffer | BufferReader) { const reader = BufferReader.asReader(buffer); - return new ParityBasePrivateInputs(reader.readArray(NUM_MSGS_PER_BASE_PARITY, Fr), Fr.fromBuffer(reader)); + return new ParityBasePrivateInputs(reader.readTuple(NUM_MSGS_PER_BASE_PARITY, Fr), Fr.fromBuffer(reader)); } /** diff --git a/yarn-project/stdlib/src/proofs/chonk_proof.ts b/yarn-project/stdlib/src/proofs/chonk_proof.ts index bb22a6187fa0..6afe0a67c026 100644 --- a/yarn-project/stdlib/src/proofs/chonk_proof.ts +++ b/yarn-project/stdlib/src/proofs/chonk_proof.ts @@ -56,7 +56,10 @@ export class ChonkProof { static fromBuffer(buffer: Buffer | BufferReader): ChonkProof { const reader = BufferReader.asReader(buffer); const proofLength = reader.readNumber(); - const proof = reader.readArray(proofLength, Fr); + if (proofLength !== CHONK_PROOF_LENGTH) { + throw new Error(`Invalid ChonkProof length from buffer: ${proofLength}, expected ${CHONK_PROOF_LENGTH}`); + } + const proof = reader.readArray(CHONK_PROOF_LENGTH, Fr); return new ChonkProof(proof); } @@ -106,6 +109,11 @@ export class ChonkProofWithPublicInputs { static fromBuffer(buffer: Buffer | BufferReader): ChonkProofWithPublicInputs { const reader = BufferReader.asReader(buffer); const proofLength = reader.readNumber(); + if (proofLength < CHONK_PROOF_LENGTH) { + throw new Error( + `Invalid ChonkProofWithPublicInputs length from buffer: ${proofLength}, expected at least ${CHONK_PROOF_LENGTH}`, + ); + } const proof = reader.readArray(proofLength, Fr); return new ChonkProofWithPublicInputs(proof); } diff --git a/yarn-project/stdlib/src/rollup/base_rollup_hints.ts b/yarn-project/stdlib/src/rollup/base_rollup_hints.ts index c92989ea7846..d846f9627e80 100644 --- a/yarn-project/stdlib/src/rollup/base_rollup_hints.ts +++ b/yarn-project/stdlib/src/rollup/base_rollup_hints.ts @@ -86,7 +86,7 @@ export class PrivateBaseRollupHints { reader.readObject(SpongeBlob), reader.readObject(TreeSnapshotDiffHints), reader.readObject(PublicDataTreeLeafPreimage), - reader.readArray(ARCHIVE_HEIGHT, Fr), + reader.readTuple(ARCHIVE_HEIGHT, Fr), makeTuple(MAX_CONTRACT_CLASS_LOGS_PER_TX, () => reader.readObject(ContractClassLogFields)), reader.readObject(BlockConstantData), ); @@ -163,7 +163,7 @@ export class PublicBaseRollupHints { return new PublicBaseRollupHints( reader.readObject(SpongeBlob), reader.readObject(AppendOnlyTreeSnapshot), - reader.readArray(ARCHIVE_HEIGHT, Fr), + reader.readTuple(ARCHIVE_HEIGHT, Fr), makeTuple(MAX_CONTRACT_CLASS_LOGS_PER_TX, () => reader.readObject(ContractClassLogFields)), ); } diff --git a/yarn-project/stdlib/src/rollup/block_root_rollup_private_inputs.ts b/yarn-project/stdlib/src/rollup/block_root_rollup_private_inputs.ts index 19a99769dac9..8a9e94eb48cf 100644 --- a/yarn-project/stdlib/src/rollup/block_root_rollup_private_inputs.ts +++ b/yarn-project/stdlib/src/rollup/block_root_rollup_private_inputs.ts @@ -60,8 +60,8 @@ export class BlockRootFirstRollupPrivateInputs { ProofData.fromBuffer(reader, ParityPublicInputs), [ProofData.fromBuffer(reader, TxRollupPublicInputs), ProofData.fromBuffer(reader, TxRollupPublicInputs)], AppendOnlyTreeSnapshot.fromBuffer(reader), - reader.readArray(L1_TO_L2_MSG_SUBTREE_ROOT_SIBLING_PATH_LENGTH, Fr), - reader.readArray(ARCHIVE_HEIGHT, Fr), + reader.readTuple(L1_TO_L2_MSG_SUBTREE_ROOT_SIBLING_PATH_LENGTH, Fr), + reader.readTuple(ARCHIVE_HEIGHT, Fr), ); } @@ -124,8 +124,8 @@ export class BlockRootSingleTxFirstRollupPrivateInputs { ProofData.fromBuffer(reader, ParityPublicInputs), ProofData.fromBuffer(reader, TxRollupPublicInputs), AppendOnlyTreeSnapshot.fromBuffer(reader), - reader.readArray(L1_TO_L2_MSG_SUBTREE_ROOT_SIBLING_PATH_LENGTH, Fr), - reader.readArray(ARCHIVE_HEIGHT, Fr), + reader.readTuple(L1_TO_L2_MSG_SUBTREE_ROOT_SIBLING_PATH_LENGTH, Fr), + reader.readTuple(ARCHIVE_HEIGHT, Fr), ); } @@ -206,8 +206,8 @@ export class BlockRootEmptyTxFirstRollupPrivateInputs { StateReference.fromBuffer(reader), CheckpointConstantData.fromBuffer(reader), reader.readUInt64(), - reader.readArray(L1_TO_L2_MSG_SUBTREE_ROOT_SIBLING_PATH_LENGTH, Fr), - reader.readArray(ARCHIVE_HEIGHT, Fr), + reader.readTuple(L1_TO_L2_MSG_SUBTREE_ROOT_SIBLING_PATH_LENGTH, Fr), + reader.readTuple(ARCHIVE_HEIGHT, Fr), ); } @@ -248,7 +248,7 @@ export class BlockRootRollupPrivateInputs { const reader = BufferReader.asReader(buffer); return new BlockRootRollupPrivateInputs( [ProofData.fromBuffer(reader, TxRollupPublicInputs), ProofData.fromBuffer(reader, TxRollupPublicInputs)], - reader.readArray(ARCHIVE_HEIGHT, Fr), + reader.readTuple(ARCHIVE_HEIGHT, Fr), ); } @@ -289,7 +289,7 @@ export class BlockRootSingleTxRollupPrivateInputs { const reader = BufferReader.asReader(buffer); return new BlockRootSingleTxRollupPrivateInputs( ProofData.fromBuffer(reader, TxRollupPublicInputs), - reader.readArray(ARCHIVE_HEIGHT, Fr), + reader.readTuple(ARCHIVE_HEIGHT, Fr), ); } diff --git a/yarn-project/stdlib/src/rollup/checkpoint_rollup_public_inputs.ts b/yarn-project/stdlib/src/rollup/checkpoint_rollup_public_inputs.ts index fe80522432ab..6626279ec6ce 100644 --- a/yarn-project/stdlib/src/rollup/checkpoint_rollup_public_inputs.ts +++ b/yarn-project/stdlib/src/rollup/checkpoint_rollup_public_inputs.ts @@ -65,8 +65,8 @@ export class CheckpointRollupPublicInputs { reader.readObject(AppendOnlyTreeSnapshot), reader.readObject(AppendOnlyTreeSnapshot), reader.readObject(AppendOnlyTreeSnapshot), - reader.readArray(MAX_CHECKPOINTS_PER_EPOCH, Fr), - reader.readArray(MAX_CHECKPOINTS_PER_EPOCH, FeeRecipient), + reader.readTuple(MAX_CHECKPOINTS_PER_EPOCH, Fr), + reader.readTuple(MAX_CHECKPOINTS_PER_EPOCH, FeeRecipient), reader.readObject(BlobAccumulator), reader.readObject(BlobAccumulator), reader.readObject(FinalBlobBatchingChallenges), diff --git a/yarn-project/stdlib/src/rollup/checkpoint_root_rollup_private_inputs.ts b/yarn-project/stdlib/src/rollup/checkpoint_root_rollup_private_inputs.ts index bb15e9110fb0..58cd518b44e0 100644 --- a/yarn-project/stdlib/src/rollup/checkpoint_root_rollup_private_inputs.ts +++ b/yarn-project/stdlib/src/rollup/checkpoint_root_rollup_private_inputs.ts @@ -81,15 +81,15 @@ export class CheckpointRootRollupHints { const reader = BufferReader.asReader(buffer); return new CheckpointRootRollupHints( BlockHeader.fromBuffer(reader), - reader.readArray(ARCHIVE_HEIGHT, Fr), + reader.readTuple(ARCHIVE_HEIGHT, Fr), reader.readObject(AppendOnlyTreeSnapshot), - reader.readArray(OUT_HASH_TREE_HEIGHT, Fr), + reader.readTuple(OUT_HASH_TREE_HEIGHT, Fr), reader.readObject(BlobAccumulator), reader.readObject(FinalBlobBatchingChallenges), // Below line gives error 'Type instantiation is excessively deep and possibly infinite. ts(2589)' - // reader.readArray(FIELDS_PER_BLOB, Fr), + // reader.readTuple(FIELDS_PER_BLOB, Fr), Array.from({ length: FIELDS_PER_BLOB * BLOBS_PER_CHECKPOINT }, () => Fr.fromBuffer(reader)), - reader.readArray(BLOBS_PER_CHECKPOINT, BLS12Point), + reader.readTuple(BLOBS_PER_CHECKPOINT, BLS12Point), Fr.fromBuffer(reader), ); } diff --git a/yarn-project/stdlib/src/rollup/root_rollup_public_inputs.ts b/yarn-project/stdlib/src/rollup/root_rollup_public_inputs.ts index 9c362ccd9e59..73063f3180e9 100644 --- a/yarn-project/stdlib/src/rollup/root_rollup_public_inputs.ts +++ b/yarn-project/stdlib/src/rollup/root_rollup_public_inputs.ts @@ -68,8 +68,8 @@ export class RootRollupPublicInputs { Fr.fromBuffer(reader), Fr.fromBuffer(reader), Fr.fromBuffer(reader), - reader.readArray(MAX_CHECKPOINTS_PER_EPOCH, Fr), - reader.readArray(MAX_CHECKPOINTS_PER_EPOCH, FeeRecipient), + reader.readTuple(MAX_CHECKPOINTS_PER_EPOCH, Fr), + reader.readTuple(MAX_CHECKPOINTS_PER_EPOCH, FeeRecipient), EpochConstantData.fromBuffer(reader), reader.readObject(FinalBlobAccumulator), ); diff --git a/yarn-project/stdlib/src/rollup/tree_snapshot_diff_hints.ts b/yarn-project/stdlib/src/rollup/tree_snapshot_diff_hints.ts index a07bcdaec5cb..02cbb148fbfb 100644 --- a/yarn-project/stdlib/src/rollup/tree_snapshot_diff_hints.ts +++ b/yarn-project/stdlib/src/rollup/tree_snapshot_diff_hints.ts @@ -85,14 +85,14 @@ export class TreeSnapshotDiffHints { static fromBuffer(buffer: Buffer | BufferReader): TreeSnapshotDiffHints { const reader = BufferReader.asReader(buffer); return new TreeSnapshotDiffHints( - reader.readArray(NOTE_HASH_SUBTREE_ROOT_SIBLING_PATH_LENGTH, Fr), - reader.readArray(MAX_NULLIFIERS_PER_TX, NullifierLeafPreimage), - reader.readArray(MAX_NULLIFIERS_PER_TX, { + reader.readTuple(NOTE_HASH_SUBTREE_ROOT_SIBLING_PATH_LENGTH, Fr), + reader.readTuple(MAX_NULLIFIERS_PER_TX, NullifierLeafPreimage), + reader.readTuple(MAX_NULLIFIERS_PER_TX, { fromBuffer: buffer => MembershipWitness.fromBuffer(buffer, NULLIFIER_TREE_HEIGHT), }), - reader.readArray(MAX_NULLIFIERS_PER_TX, Fr), + reader.readTuple(MAX_NULLIFIERS_PER_TX, Fr), reader.readNumbers(MAX_NULLIFIERS_PER_TX), - reader.readArray(NULLIFIER_SUBTREE_ROOT_SIBLING_PATH_LENGTH, Fr), + reader.readTuple(NULLIFIER_SUBTREE_ROOT_SIBLING_PATH_LENGTH, Fr), MembershipWitness.fromBuffer(reader, PUBLIC_DATA_TREE_HEIGHT), ); } diff --git a/yarn-project/stdlib/src/tx/protocol_contracts.ts b/yarn-project/stdlib/src/tx/protocol_contracts.ts index 2abdbffe87a2..85ba8cb78142 100644 --- a/yarn-project/stdlib/src/tx/protocol_contracts.ts +++ b/yarn-project/stdlib/src/tx/protocol_contracts.ts @@ -39,7 +39,7 @@ export class ProtocolContracts { static fromBuffer(buffer: Buffer | BufferReader): ProtocolContracts { const reader = BufferReader.asReader(buffer); - return new ProtocolContracts(reader.readArray(MAX_PROTOCOL_CONTRACTS, AztecAddress)); + return new ProtocolContracts(reader.readTuple(MAX_PROTOCOL_CONTRACTS, AztecAddress)); } toBuffer() { diff --git a/yarn-project/stdlib/src/vks/vk_data.ts b/yarn-project/stdlib/src/vks/vk_data.ts index 8f5adce80099..d2a7b60825c1 100644 --- a/yarn-project/stdlib/src/vks/vk_data.ts +++ b/yarn-project/stdlib/src/vks/vk_data.ts @@ -29,7 +29,7 @@ export class VkData { return new VkData( reader.readObject(VerificationKeyData), reader.readNumber(), - reader.readArray(VK_TREE_HEIGHT, Fr), + reader.readTuple(VK_TREE_HEIGHT, Fr), ); } From 2154ecf4cf87ad3059a3728a0ef4d3f6d59a5fe2 Mon Sep 17 00:00:00 2001 From: Santiago Palladino Date: Mon, 16 Mar 2026 23:14:21 -0300 Subject: [PATCH 09/41] fix: same change but for field reader --- .../foundation/src/serialize/field_reader.ts | 26 ++++++++++++++++--- .../stdlib/src/avm/avm_accumulated_data.ts | 4 +-- .../src/avm/avm_circuit_public_inputs.ts | 4 +-- .../stdlib/src/kernel/claimed_length_array.ts | 2 +- .../kernel/private_to_avm_accumulated_data.ts | 2 +- .../private_to_public_accumulated_data.ts | 8 +++--- .../stdlib/src/tx/protocol_contracts.ts | 2 +- 7 files changed, 33 insertions(+), 15 deletions(-) diff --git a/yarn-project/foundation/src/serialize/field_reader.ts b/yarn-project/foundation/src/serialize/field_reader.ts index 3ffb1395a04b..d150a913a48a 100644 --- a/yarn-project/foundation/src/serialize/field_reader.ts +++ b/yarn-project/foundation/src/serialize/field_reader.ts @@ -169,12 +169,30 @@ export class FieldReader { * @param itemDeserializer - An object with a 'fromFields' method to deserialize individual elements of type T. * @returns An array of instances of type T. */ - public readArray( + /** + * Read an array from the field array using lazy allocation (new Array + loop). + * Safe for use with untrusted sizes. + */ + public readArray( + size: number, + itemDeserializer: { + fromFields: (reader: FieldReader) => T; + }, + ): T[] { + const result = new Array(size); + for (let i = 0; i < size; i++) { + result[i] = itemDeserializer.fromFields(this); + } + return result; + } + + /** + * Read a fixed-size tuple from the field array using dense allocation (Array.from). + * Only use with compile-time constant sizes — the size parameter MUST NOT come from untrusted input. + */ + public readTuple( size: N, itemDeserializer: { - /** - * A function for deserializing data from a FieldReader instance. - */ fromFields: (reader: FieldReader) => T; }, ): Tuple { diff --git a/yarn-project/stdlib/src/avm/avm_accumulated_data.ts b/yarn-project/stdlib/src/avm/avm_accumulated_data.ts index 3e39ccb53948..f7c9b3ce730f 100644 --- a/yarn-project/stdlib/src/avm/avm_accumulated_data.ts +++ b/yarn-project/stdlib/src/avm/avm_accumulated_data.ts @@ -115,9 +115,9 @@ export class AvmAccumulatedData { return new this( reader.readFieldArray(MAX_NOTE_HASHES_PER_TX), reader.readFieldArray(MAX_NULLIFIERS_PER_TX), - reader.readArray(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), + reader.readTuple(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), reader.readObject(FlatPublicLogs), - reader.readArray(MAX_TOTAL_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX, PublicDataWrite), + reader.readTuple(MAX_TOTAL_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX, PublicDataWrite), ); } diff --git a/yarn-project/stdlib/src/avm/avm_circuit_public_inputs.ts b/yarn-project/stdlib/src/avm/avm_circuit_public_inputs.ts index eb6563aba31e..82b7a195fc2f 100644 --- a/yarn-project/stdlib/src/avm/avm_circuit_public_inputs.ts +++ b/yarn-project/stdlib/src/avm/avm_circuit_public_inputs.ts @@ -206,8 +206,8 @@ export class AvmCircuitPublicInputs { AztecAddress.fromFields(reader), reader.readField(), PublicCallRequestArrayLengths.fromFields(reader), - reader.readArray(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), - reader.readArray(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), + reader.readTuple(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), + reader.readTuple(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), PublicCallRequest.fromFields(reader), PrivateToAvmAccumulatedDataArrayLengths.fromFields(reader), PrivateToAvmAccumulatedDataArrayLengths.fromFields(reader), diff --git a/yarn-project/stdlib/src/kernel/claimed_length_array.ts b/yarn-project/stdlib/src/kernel/claimed_length_array.ts index ef0c3ee67716..66fab34851f9 100644 --- a/yarn-project/stdlib/src/kernel/claimed_length_array.ts +++ b/yarn-project/stdlib/src/kernel/claimed_length_array.ts @@ -42,7 +42,7 @@ export class ClaimedLengthArray { arrayLength: N, ): ClaimedLengthArray { const reader = FieldReader.asReader(fields); - const array = reader.readArray(arrayLength, deserializer); + const array = reader.readTuple(arrayLength, deserializer); const claimedLength = reader.readU32(); return new ClaimedLengthArray(array, claimedLength); } diff --git a/yarn-project/stdlib/src/kernel/private_to_avm_accumulated_data.ts b/yarn-project/stdlib/src/kernel/private_to_avm_accumulated_data.ts index d64dbdad345c..c7dfa601d38d 100644 --- a/yarn-project/stdlib/src/kernel/private_to_avm_accumulated_data.ts +++ b/yarn-project/stdlib/src/kernel/private_to_avm_accumulated_data.ts @@ -73,7 +73,7 @@ export class PrivateToAvmAccumulatedData { return new this( reader.readFieldArray(MAX_NOTE_HASHES_PER_TX), reader.readFieldArray(MAX_NULLIFIERS_PER_TX), - reader.readArray(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), + reader.readTuple(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), ); } diff --git a/yarn-project/stdlib/src/kernel/private_to_public_accumulated_data.ts b/yarn-project/stdlib/src/kernel/private_to_public_accumulated_data.ts index c35b96c5656d..1f8cab457db6 100644 --- a/yarn-project/stdlib/src/kernel/private_to_public_accumulated_data.ts +++ b/yarn-project/stdlib/src/kernel/private_to_public_accumulated_data.ts @@ -62,10 +62,10 @@ export class PrivateToPublicAccumulatedData { return new this( reader.readFieldArray(MAX_NOTE_HASHES_PER_TX), reader.readFieldArray(MAX_NULLIFIERS_PER_TX), - reader.readArray(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), - reader.readArray(MAX_PRIVATE_LOGS_PER_TX, PrivateLog), - reader.readArray(MAX_CONTRACT_CLASS_LOGS_PER_TX, ScopedLogHash), - reader.readArray(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), + reader.readTuple(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), + reader.readTuple(MAX_PRIVATE_LOGS_PER_TX, PrivateLog), + reader.readTuple(MAX_CONTRACT_CLASS_LOGS_PER_TX, ScopedLogHash), + reader.readTuple(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), ); } diff --git a/yarn-project/stdlib/src/tx/protocol_contracts.ts b/yarn-project/stdlib/src/tx/protocol_contracts.ts index 85ba8cb78142..5b64ff1d7893 100644 --- a/yarn-project/stdlib/src/tx/protocol_contracts.ts +++ b/yarn-project/stdlib/src/tx/protocol_contracts.ts @@ -30,7 +30,7 @@ export class ProtocolContracts { static fromFields(fields: Fr[] | FieldReader): ProtocolContracts { const reader = FieldReader.asReader(fields); - return new ProtocolContracts(reader.readArray(MAX_PROTOCOL_CONTRACTS, AztecAddress)); + return new ProtocolContracts(reader.readTuple(MAX_PROTOCOL_CONTRACTS, AztecAddress)); } toFields(): Fr[] { From 7cdaafcf0d15fda5b3226d265cc1dc27b1edaa45 Mon Sep 17 00:00:00 2001 From: AztecBot Date: Tue, 17 Mar 2026 09:36:28 +0000 Subject: [PATCH 10/41] fix: skip handleChainFinalized when block is behind oldest available When the finalized block jumps backwards past pruned state, return early instead of clamping and continuing into the pruning logic. The previous clamping fix avoided the setFinalized error but then removeHistoricalBlocks would fail trying to prune to a block that is already the oldest. Also guard removeHistoricalBlocks against being called with a block number that is not newer than the current oldest available block. --- .../server_world_state_synchronizer.ts | 22 +++++++++++-------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts b/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts index 5bd76e38a48b..1ed4fd7717a6 100644 --- a/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts +++ b/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts @@ -388,18 +388,16 @@ export class ServerWorldStateSynchronizer private async handleChainFinalized(blockNumber: BlockNumber) { this.log.verbose(`Finalized chain is now at block ${blockNumber}`); - // Clamp to the oldest block still available in world state. The finalized block number can - // jump backwards (e.g. when the finalization heuristic changes) and try to read block data - // that has already been pruned, which causes the native world state to throw. + // If the finalized block number is older than the oldest available block in world state, + // skip entirely. The finalized block number can jump backwards (e.g. when the finalization + // heuristic changes) and try to read block data that has already been pruned. When this + // happens, there is nothing useful to do — the native world state is already finalized + // past this point and pruning has already happened. const currentSummary = await this.merkleTreeDb.getStatusSummary(); - if (blockNumber < currentSummary.oldestHistoricalBlock) { + if (blockNumber < currentSummary.oldestHistoricalBlock || blockNumber < 1) { this.log.warn( - `Finalized block ${blockNumber} is older than the oldest available block ${currentSummary.oldestHistoricalBlock}. ` + - `Clamping to oldest available block.`, + `Finalized block ${blockNumber} is older than the oldest available block ${currentSummary.oldestHistoricalBlock}. Skipping.`, ); - blockNumber = currentSummary.oldestHistoricalBlock; - } - if (blockNumber < 1) { return; } const summary = await this.merkleTreeDb.setFinalized(blockNumber); @@ -435,6 +433,12 @@ export class ServerWorldStateSynchronizer } // Find the block at the start of the checkpoint and remove blocks up to this one const newHistoricBlock = historicCheckpoint.checkpoint.blocks[0]; + if (newHistoricBlock.number <= currentSummary.oldestHistoricalBlock) { + this.log.debug( + `Historic block ${newHistoricBlock.number} is not newer than oldest available ${currentSummary.oldestHistoricalBlock}. Skipping prune.`, + ); + return; + } this.log.verbose(`Pruning historic blocks to ${newHistoricBlock.number}`); const status = await this.merkleTreeDb.removeHistoricalBlocks(BlockNumber(newHistoricBlock.number)); this.log.debug(`World state summary `, status.summary); From e31df376560655fb0e84cb35a8760b729e9366bb Mon Sep 17 00:00:00 2001 From: Aztec Bot <49558828+AztecBot@users.noreply.github.com> Date: Tue, 17 Mar 2026 06:43:02 -0400 Subject: [PATCH 11/41] chore: demote finalized block skip log to trace (#21661) ## Summary Demotes the "Finalized block X is older than oldest available block Y. Skipping." log from `warn` to `trace`. This message fires on every block stream tick while the finalized block is behind the oldest available, filling up operator logs on deployed networks. ClaudeBox log: https://claudebox.work/s/8e97449f22ba9343?run=6 --- .../src/synchronizer/server_world_state_synchronizer.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts b/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts index 1ed4fd7717a6..37d55fce1eb4 100644 --- a/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts +++ b/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts @@ -395,7 +395,7 @@ export class ServerWorldStateSynchronizer // past this point and pruning has already happened. const currentSummary = await this.merkleTreeDb.getStatusSummary(); if (blockNumber < currentSummary.oldestHistoricalBlock || blockNumber < 1) { - this.log.warn( + this.log.trace( `Finalized block ${blockNumber} is older than the oldest available block ${currentSummary.oldestHistoricalBlock}. Skipping.`, ); return; From 08b84e85d1009491d774aad42fcab29f465852e0 Mon Sep 17 00:00:00 2001 From: Aztec Bot <49558828+AztecBot@users.noreply.github.com> Date: Tue, 17 Mar 2026 07:54:48 -0400 Subject: [PATCH 12/41] fix: skip -march auto-detection for cross-compilation presets (#21356) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary Fixes CI failure on merge-train/spartan caused by `-march=skylake` being injected into aarch64 cross-compilation builds (arm64-android, arm64-ios, arm64-macos). **Root cause:** The `arch.cmake` auto-detection added in #21611 defaults `TARGET_ARCH` to `skylake` when `ARM` is not detected. Cross-compile presets (ios, android) don't set `CMAKE_SYSTEM_PROCESSOR`, so ARM detection fails and `-march=skylake` gets passed to aarch64 Zig builds — which errors with `unknown CPU: 'skylake'`. For arm64-macos, `-march=generic` overrides Zig's `-mcpu=apple_a14`, breaking libdeflate. **Fix:** Gate auto-detection on `NOT CMAKE_CROSSCOMPILING`. Cross-compile toolchains handle architecture targeting via their own flags (e.g. Zig `-mcpu`). Presets that explicitly set `TARGET_ARCH` (amd64-linux, arm64-linux) are unaffected. Also restores `native_build_dir` variable dropped in the build infrastructure refactor. ## Test plan - Verified all cross-compile presets (arm64-android, arm64-ios, arm64-ios-sim, arm64-macos, x86_64-android) configure with zero `-march` flags - Verified native presets (default, amd64-linux, arm64-linux) still get correct `-march` values --- barretenberg/cpp/bootstrap.sh | 1 + barretenberg/cpp/cmake/arch.cmake | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/barretenberg/cpp/bootstrap.sh b/barretenberg/cpp/bootstrap.sh index 7a38e0bd4f80..3c533971fed9 100755 --- a/barretenberg/cpp/bootstrap.sh +++ b/barretenberg/cpp/bootstrap.sh @@ -7,6 +7,7 @@ else export native_preset=${NATIVE_PRESET:-clang20-no-avm} fi export hash=$(hash_str $(../../avm-transpiler/bootstrap.sh hash) $(cache_content_hash .rebuild_patterns)) +export native_build_dir=$(scripts/preset-build-dir $native_preset) # Injects version number into a given bb binary. # Means we don't actually need to rebuild bb to release a new version if code hasn't changed. diff --git a/barretenberg/cpp/cmake/arch.cmake b/barretenberg/cpp/cmake/arch.cmake index fe6488cbce3f..ed185b1fa970 100644 --- a/barretenberg/cpp/cmake/arch.cmake +++ b/barretenberg/cpp/cmake/arch.cmake @@ -5,11 +5,13 @@ if(WASM) add_compile_options(-fno-exceptions -fno-slp-vectorize) endif() -# Auto-detect TARGET_ARCH if not explicitly set. +# Auto-detect TARGET_ARCH if not explicitly set (native builds only). # Use 'skylake' on x86_64 (matches our cross-compile presets) and 'generic' on ARM # to avoid emitting CPU-specific instructions (e.g. SVE on Graviton) that break on # other ARM machines like Apple Silicon. -if(NOT WASM AND NOT TARGET_ARCH) +# Skip auto-detection when cross-compiling — the toolchain (e.g. Zig -mcpu) handles +# architecture targeting, and injecting -march here conflicts with it. +if(NOT WASM AND NOT TARGET_ARCH AND NOT CMAKE_CROSSCOMPILING) if(ARM) set(TARGET_ARCH "generic") else() From 169ca7ced1a0bd1fceeeb9922f6725ce86c15614 Mon Sep 17 00:00:00 2001 From: Santiago Palladino Date: Tue, 17 Mar 2026 10:41:39 -0300 Subject: [PATCH 13/41] fix(p2p): penalize peer on tx rejected by pool The pool should never reject a tx that passed validation. However, in case it does, we now add a warning and penalize the peer that sent us the invalid tx. --- yarn-project/p2p/src/services/libp2p/libp2p_service.ts | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts index 6f89b66a5cf3..3f13fa0c5049 100644 --- a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts +++ b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts @@ -976,6 +976,11 @@ export class LibP2PService extends WithTracer implements P2PService { } else if (wasIgnored) { return { result: TopicValidatorResult.Ignore, obj: tx }; } else { + this.logger.warn(`Gossiped tx ${txHash.toString()} unexpectedly rejected by pool`, { + source: source.toString(), + txHash: txHash.toString(), + }); + this.peerManager.penalizePeer(source, PeerErrorSeverity.HighToleranceError); return { result: TopicValidatorResult.Reject }; } }; From e4dcdee9a22f32ccefe586ab0e78e49b3e3dcc1a Mon Sep 17 00:00:00 2001 From: Santiago Palladino Date: Tue, 17 Mar 2026 10:42:59 -0300 Subject: [PATCH 14/41] chore: revert "add bounds when allocating arrays in deserialization" (#21622) (#21666) It was a red herring. We were not using `Array.from({ length })` but `Array.from({ length }, () => deserializer)`, and the deserializer would throw when reaching the end of the buffer, preventing the full allocation of the array. --- yarn-project/blob-lib/src/sponge_blob.ts | 4 +-- .../foundation/src/serialize/buffer_reader.ts | 35 ++++--------------- .../foundation/src/serialize/field_reader.ts | 26 +++----------- .../src/trees/membership_witness.ts | 4 +-- .../stdlib/src/avm/avm_accumulated_data.ts | 12 +++---- .../src/avm/avm_circuit_public_inputs.ts | 8 ++--- .../stdlib/src/kernel/claimed_length_array.ts | 4 +-- .../hints/private_kernel_reset_hints.ts | 7 ++-- .../src/kernel/hints/read_request_hints.ts | 6 ++-- .../stdlib/src/kernel/padded_side_effects.ts | 6 ++-- .../kernel/private_to_avm_accumulated_data.ts | 8 ++--- .../private_to_public_accumulated_data.ts | 20 +++++------ .../private_to_rollup_accumulated_data.ts | 10 +++--- yarn-project/stdlib/src/logs/private_log.ts | 2 +- yarn-project/stdlib/src/logs/public_log.ts | 12 +------ .../src/parity/parity_base_private_inputs.ts | 2 +- yarn-project/stdlib/src/proofs/chonk_proof.ts | 10 +----- .../stdlib/src/rollup/base_rollup_hints.ts | 4 +-- .../block_root_rollup_private_inputs.ts | 16 ++++----- .../rollup/checkpoint_rollup_public_inputs.ts | 4 +-- .../checkpoint_root_rollup_private_inputs.ts | 8 ++--- .../src/rollup/root_rollup_public_inputs.ts | 4 +-- .../src/rollup/tree_snapshot_diff_hints.ts | 10 +++--- .../stdlib/src/tx/protocol_contracts.ts | 4 +-- yarn-project/stdlib/src/vks/vk_data.ts | 2 +- 25 files changed, 83 insertions(+), 145 deletions(-) diff --git a/yarn-project/blob-lib/src/sponge_blob.ts b/yarn-project/blob-lib/src/sponge_blob.ts index 4578d30dabe7..50a049bd829f 100644 --- a/yarn-project/blob-lib/src/sponge_blob.ts +++ b/yarn-project/blob-lib/src/sponge_blob.ts @@ -91,8 +91,8 @@ export class Poseidon2Sponge { static fromBuffer(buffer: Buffer | BufferReader): Poseidon2Sponge { const reader = BufferReader.asReader(buffer); return new Poseidon2Sponge( - reader.readTuple(3, Fr), - reader.readTuple(4, Fr), + reader.readArray(3, Fr), + reader.readArray(4, Fr), reader.readNumber(), reader.readBoolean(), ); diff --git a/yarn-project/foundation/src/serialize/buffer_reader.ts b/yarn-project/foundation/src/serialize/buffer_reader.ts index 45a88457f668..8cc8071db595 100644 --- a/yarn-project/foundation/src/serialize/buffer_reader.ts +++ b/yarn-project/foundation/src/serialize/buffer_reader.ts @@ -286,39 +286,16 @@ export class BufferReader { } /** - * Read an array from the buffer using lazy allocation (new Array + loop). - * Safe for use with untrusted sizes — does not pre-allocate memory proportional to size. + * Read an array of a fixed size with elements of type T from the buffer. + * The 'itemDeserializer' object should have a 'fromBuffer' method that takes a BufferReader instance as input, + * and returns an instance of the desired deserialized data type T. + * This method will call the 'fromBuffer' method for each element in the array and return the resulting array. * - * @param size - The number of elements to read. + * @param size - The fixed number of elements in the array. * @param itemDeserializer - An object with a 'fromBuffer' method to deserialize individual elements of type T. * @returns An array of instances of type T. */ - public readArray( - size: number, - itemDeserializer: { - /** - * A function for deserializing data from a BufferReader instance. - */ - fromBuffer: (reader: BufferReader) => T; - }, - ): T[] { - const result = new Array(size); - for (let i = 0; i < size; i++) { - result[i] = itemDeserializer.fromBuffer(this); - } - return result; - } - - /** - * Read a fixed-size tuple from the buffer using dense allocation (Array.from). - * Only use with compile-time constant sizes — the size parameter MUST NOT come from untrusted input - * as Array.from pre-allocates memory proportional to size. - * - * @param size - The fixed number of elements (must be a compile-time constant). - * @param itemDeserializer - An object with a 'fromBuffer' method to deserialize individual elements of type T. - * @returns A densely-allocated tuple of instances of type T. - */ - public readTuple( + public readArray( size: N, itemDeserializer: { /** diff --git a/yarn-project/foundation/src/serialize/field_reader.ts b/yarn-project/foundation/src/serialize/field_reader.ts index d150a913a48a..3ffb1395a04b 100644 --- a/yarn-project/foundation/src/serialize/field_reader.ts +++ b/yarn-project/foundation/src/serialize/field_reader.ts @@ -169,30 +169,12 @@ export class FieldReader { * @param itemDeserializer - An object with a 'fromFields' method to deserialize individual elements of type T. * @returns An array of instances of type T. */ - /** - * Read an array from the field array using lazy allocation (new Array + loop). - * Safe for use with untrusted sizes. - */ - public readArray( - size: number, - itemDeserializer: { - fromFields: (reader: FieldReader) => T; - }, - ): T[] { - const result = new Array(size); - for (let i = 0; i < size; i++) { - result[i] = itemDeserializer.fromFields(this); - } - return result; - } - - /** - * Read a fixed-size tuple from the field array using dense allocation (Array.from). - * Only use with compile-time constant sizes — the size parameter MUST NOT come from untrusted input. - */ - public readTuple( + public readArray( size: N, itemDeserializer: { + /** + * A function for deserializing data from a FieldReader instance. + */ fromFields: (reader: FieldReader) => T; }, ): Tuple { diff --git a/yarn-project/foundation/src/trees/membership_witness.ts b/yarn-project/foundation/src/trees/membership_witness.ts index c4500ad710d4..bbc46f886b1e 100644 --- a/yarn-project/foundation/src/trees/membership_witness.ts +++ b/yarn-project/foundation/src/trees/membership_witness.ts @@ -94,7 +94,7 @@ export class MembershipWitness { static fromBuffer(buffer: Buffer | BufferReader, size: N): MembershipWitness { const reader = BufferReader.asReader(buffer); const leafIndex = toBigIntBE(reader.readBytes(32)); - const siblingPath = reader.readArray(size, Fr) as Tuple; + const siblingPath = reader.readArray(size, Fr); return new MembershipWitness(size, leafIndex, siblingPath); } @@ -108,7 +108,7 @@ export class MembershipWitness { fromBuffer: (buffer: Buffer | BufferReader) => { const reader = BufferReader.asReader(buffer); const leafIndex = toBigIntBE(reader.readBytes(32)); - const siblingPath = reader.readArray(size, Fr) as Tuple; + const siblingPath = reader.readArray(size, Fr); return new MembershipWitness(size, leafIndex, siblingPath); }, }; diff --git a/yarn-project/stdlib/src/avm/avm_accumulated_data.ts b/yarn-project/stdlib/src/avm/avm_accumulated_data.ts index f7c9b3ce730f..f072f5a00393 100644 --- a/yarn-project/stdlib/src/avm/avm_accumulated_data.ts +++ b/yarn-project/stdlib/src/avm/avm_accumulated_data.ts @@ -88,11 +88,11 @@ export class AvmAccumulatedData { static fromBuffer(buffer: Buffer | BufferReader) { const reader = BufferReader.asReader(buffer); return new this( - reader.readTuple(MAX_NOTE_HASHES_PER_TX, Fr), - reader.readTuple(MAX_NULLIFIERS_PER_TX, Fr), - reader.readTuple(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), + reader.readArray(MAX_NOTE_HASHES_PER_TX, Fr), + reader.readArray(MAX_NULLIFIERS_PER_TX, Fr), + reader.readArray(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), reader.readObject(FlatPublicLogs), - reader.readTuple(MAX_TOTAL_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX, PublicDataWrite), + reader.readArray(MAX_TOTAL_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX, PublicDataWrite), ); } @@ -115,9 +115,9 @@ export class AvmAccumulatedData { return new this( reader.readFieldArray(MAX_NOTE_HASHES_PER_TX), reader.readFieldArray(MAX_NULLIFIERS_PER_TX), - reader.readTuple(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), + reader.readArray(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), reader.readObject(FlatPublicLogs), - reader.readTuple(MAX_TOTAL_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX, PublicDataWrite), + reader.readArray(MAX_TOTAL_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX, PublicDataWrite), ); } diff --git a/yarn-project/stdlib/src/avm/avm_circuit_public_inputs.ts b/yarn-project/stdlib/src/avm/avm_circuit_public_inputs.ts index 82b7a195fc2f..4dbeca81752c 100644 --- a/yarn-project/stdlib/src/avm/avm_circuit_public_inputs.ts +++ b/yarn-project/stdlib/src/avm/avm_circuit_public_inputs.ts @@ -143,8 +143,8 @@ export class AvmCircuitPublicInputs { reader.readObject(AztecAddress), reader.readObject(Fr), reader.readObject(PublicCallRequestArrayLengths), - reader.readTuple(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), - reader.readTuple(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), + reader.readArray(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), + reader.readArray(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), reader.readObject(PublicCallRequest), reader.readObject(PrivateToAvmAccumulatedDataArrayLengths), reader.readObject(PrivateToAvmAccumulatedDataArrayLengths), @@ -206,8 +206,8 @@ export class AvmCircuitPublicInputs { AztecAddress.fromFields(reader), reader.readField(), PublicCallRequestArrayLengths.fromFields(reader), - reader.readTuple(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), - reader.readTuple(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), + reader.readArray(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), + reader.readArray(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), PublicCallRequest.fromFields(reader), PrivateToAvmAccumulatedDataArrayLengths.fromFields(reader), PrivateToAvmAccumulatedDataArrayLengths.fromFields(reader), diff --git a/yarn-project/stdlib/src/kernel/claimed_length_array.ts b/yarn-project/stdlib/src/kernel/claimed_length_array.ts index 66fab34851f9..109f7463137e 100644 --- a/yarn-project/stdlib/src/kernel/claimed_length_array.ts +++ b/yarn-project/stdlib/src/kernel/claimed_length_array.ts @@ -25,7 +25,7 @@ export class ClaimedLengthArray { arrayLength: N, ): ClaimedLengthArray { const reader = BufferReader.asReader(buffer); - const array = reader.readArray(arrayLength, deserializer) as Tuple; + const array = reader.readArray(arrayLength, deserializer); const claimedLength = reader.readNumber(); return new ClaimedLengthArray(array, claimedLength); } @@ -42,7 +42,7 @@ export class ClaimedLengthArray { arrayLength: N, ): ClaimedLengthArray { const reader = FieldReader.asReader(fields); - const array = reader.readTuple(arrayLength, deserializer); + const array = reader.readArray(arrayLength, deserializer); const claimedLength = reader.readU32(); return new ClaimedLengthArray(array, claimedLength); } diff --git a/yarn-project/stdlib/src/kernel/hints/private_kernel_reset_hints.ts b/yarn-project/stdlib/src/kernel/hints/private_kernel_reset_hints.ts index 8fa1175f14b1..72c53ecb5a77 100644 --- a/yarn-project/stdlib/src/kernel/hints/private_kernel_reset_hints.ts +++ b/yarn-project/stdlib/src/kernel/hints/private_kernel_reset_hints.ts @@ -105,11 +105,8 @@ export class PrivateKernelResetHints< fromBuffer: buf => nullifierReadRequestHintsFromBuffer(buf, numNullifierReadRequestPending, numNullifierReadRequestSettled), }), - reader.readArray(numKeyValidationHints, KeyValidationHint) as Tuple, - reader.readArray(numTransientDataSquashingHints, TransientDataSquashingHint) as Tuple< - TransientDataSquashingHint, - TRANSIENT_DATA_HINTS_LEN - >, + reader.readArray(numKeyValidationHints, KeyValidationHint), + reader.readArray(numTransientDataSquashingHints, TransientDataSquashingHint), ); } } diff --git a/yarn-project/stdlib/src/kernel/hints/read_request_hints.ts b/yarn-project/stdlib/src/kernel/hints/read_request_hints.ts index 4f57bf2e327b..39b6663aa953 100644 --- a/yarn-project/stdlib/src/kernel/hints/read_request_hints.ts +++ b/yarn-project/stdlib/src/kernel/hints/read_request_hints.ts @@ -158,11 +158,11 @@ export class ReadRequestResetHints< > { const reader = BufferReader.asReader(buffer); return new ReadRequestResetHints( - reader.readArray(readRequestLen, ReadRequestAction) as Tuple, - reader.readArray(numPendingReads, PendingReadHint) as Tuple, + reader.readArray(readRequestLen, ReadRequestAction), + reader.readArray(numPendingReads, PendingReadHint), reader.readArray(numSettledReads, { fromBuffer: r => SettledReadHint.fromBuffer(r, treeHeight, leafPreimageFromBuffer), - }) as Tuple, SETTLED_READ_HINTS_LEN>, + }), ); } diff --git a/yarn-project/stdlib/src/kernel/padded_side_effects.ts b/yarn-project/stdlib/src/kernel/padded_side_effects.ts index 48d2c2a2442d..f42207255465 100644 --- a/yarn-project/stdlib/src/kernel/padded_side_effects.ts +++ b/yarn-project/stdlib/src/kernel/padded_side_effects.ts @@ -19,9 +19,9 @@ export class PaddedSideEffects { static fromBuffer(buffer: Buffer | BufferReader) { const reader = BufferReader.asReader(buffer); return new PaddedSideEffects( - reader.readTuple(MAX_NOTE_HASHES_PER_TX, Fr), - reader.readTuple(MAX_NULLIFIERS_PER_TX, Fr), - reader.readTuple(MAX_PRIVATE_LOGS_PER_TX, PrivateLog), + reader.readArray(MAX_NOTE_HASHES_PER_TX, Fr), + reader.readArray(MAX_NULLIFIERS_PER_TX, Fr), + reader.readArray(MAX_PRIVATE_LOGS_PER_TX, PrivateLog), ); } diff --git a/yarn-project/stdlib/src/kernel/private_to_avm_accumulated_data.ts b/yarn-project/stdlib/src/kernel/private_to_avm_accumulated_data.ts index c7dfa601d38d..9442439a039a 100644 --- a/yarn-project/stdlib/src/kernel/private_to_avm_accumulated_data.ts +++ b/yarn-project/stdlib/src/kernel/private_to_avm_accumulated_data.ts @@ -73,7 +73,7 @@ export class PrivateToAvmAccumulatedData { return new this( reader.readFieldArray(MAX_NOTE_HASHES_PER_TX), reader.readFieldArray(MAX_NULLIFIERS_PER_TX), - reader.readTuple(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), + reader.readArray(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), ); } @@ -94,9 +94,9 @@ export class PrivateToAvmAccumulatedData { static fromBuffer(buffer: Buffer | BufferReader) { const reader = BufferReader.asReader(buffer); return new PrivateToAvmAccumulatedData( - reader.readTuple(MAX_NOTE_HASHES_PER_TX, Fr), - reader.readTuple(MAX_NULLIFIERS_PER_TX, Fr), - reader.readTuple(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), + reader.readArray(MAX_NOTE_HASHES_PER_TX, Fr), + reader.readArray(MAX_NULLIFIERS_PER_TX, Fr), + reader.readArray(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), ); } diff --git a/yarn-project/stdlib/src/kernel/private_to_public_accumulated_data.ts b/yarn-project/stdlib/src/kernel/private_to_public_accumulated_data.ts index 1f8cab457db6..ab1b79190672 100644 --- a/yarn-project/stdlib/src/kernel/private_to_public_accumulated_data.ts +++ b/yarn-project/stdlib/src/kernel/private_to_public_accumulated_data.ts @@ -62,10 +62,10 @@ export class PrivateToPublicAccumulatedData { return new this( reader.readFieldArray(MAX_NOTE_HASHES_PER_TX), reader.readFieldArray(MAX_NULLIFIERS_PER_TX), - reader.readTuple(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), - reader.readTuple(MAX_PRIVATE_LOGS_PER_TX, PrivateLog), - reader.readTuple(MAX_CONTRACT_CLASS_LOGS_PER_TX, ScopedLogHash), - reader.readTuple(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), + reader.readArray(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), + reader.readArray(MAX_PRIVATE_LOGS_PER_TX, PrivateLog), + reader.readArray(MAX_CONTRACT_CLASS_LOGS_PER_TX, ScopedLogHash), + reader.readArray(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), ); } @@ -76,12 +76,12 @@ export class PrivateToPublicAccumulatedData { static fromBuffer(buffer: Buffer | BufferReader) { const reader = BufferReader.asReader(buffer); return new PrivateToPublicAccumulatedData( - reader.readTuple(MAX_NOTE_HASHES_PER_TX, Fr), - reader.readTuple(MAX_NULLIFIERS_PER_TX, Fr), - reader.readTuple(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), - reader.readTuple(MAX_PRIVATE_LOGS_PER_TX, PrivateLog), - reader.readTuple(MAX_CONTRACT_CLASS_LOGS_PER_TX, ScopedLogHash), - reader.readTuple(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), + reader.readArray(MAX_NOTE_HASHES_PER_TX, Fr), + reader.readArray(MAX_NULLIFIERS_PER_TX, Fr), + reader.readArray(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), + reader.readArray(MAX_PRIVATE_LOGS_PER_TX, PrivateLog), + reader.readArray(MAX_CONTRACT_CLASS_LOGS_PER_TX, ScopedLogHash), + reader.readArray(MAX_ENQUEUED_CALLS_PER_TX, PublicCallRequest), ); } diff --git a/yarn-project/stdlib/src/kernel/private_to_rollup_accumulated_data.ts b/yarn-project/stdlib/src/kernel/private_to_rollup_accumulated_data.ts index 74d1aeed52cd..7807f6ad9b97 100644 --- a/yarn-project/stdlib/src/kernel/private_to_rollup_accumulated_data.ts +++ b/yarn-project/stdlib/src/kernel/private_to_rollup_accumulated_data.ts @@ -95,11 +95,11 @@ export class PrivateToRollupAccumulatedData { static fromBuffer(buffer: Buffer | BufferReader): PrivateToRollupAccumulatedData { const reader = BufferReader.asReader(buffer); return new PrivateToRollupAccumulatedData( - reader.readTuple(MAX_NOTE_HASHES_PER_TX, Fr), - reader.readTuple(MAX_NULLIFIERS_PER_TX, Fr), - reader.readTuple(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), - reader.readTuple(MAX_PRIVATE_LOGS_PER_TX, PrivateLog), - reader.readTuple(MAX_CONTRACT_CLASS_LOGS_PER_TX, ScopedLogHash), + reader.readArray(MAX_NOTE_HASHES_PER_TX, Fr), + reader.readArray(MAX_NULLIFIERS_PER_TX, Fr), + reader.readArray(MAX_L2_TO_L1_MSGS_PER_TX, ScopedL2ToL1Message), + reader.readArray(MAX_PRIVATE_LOGS_PER_TX, PrivateLog), + reader.readArray(MAX_CONTRACT_CLASS_LOGS_PER_TX, ScopedLogHash), ); } diff --git a/yarn-project/stdlib/src/logs/private_log.ts b/yarn-project/stdlib/src/logs/private_log.ts index be11dda2b0bf..10f915a1e6bd 100644 --- a/yarn-project/stdlib/src/logs/private_log.ts +++ b/yarn-project/stdlib/src/logs/private_log.ts @@ -73,7 +73,7 @@ export class PrivateLog { static fromBuffer(buffer: Buffer | BufferReader) { const reader = BufferReader.asReader(buffer); - return new PrivateLog(reader.readTuple(PRIVATE_LOG_SIZE_IN_FIELDS, Fr), reader.readNumber()); + return new PrivateLog(reader.readArray(PRIVATE_LOG_SIZE_IN_FIELDS, Fr), reader.readNumber()); } static random(tag = Fr.random()) { diff --git a/yarn-project/stdlib/src/logs/public_log.ts b/yarn-project/stdlib/src/logs/public_log.ts index 19c9daaf5b32..490e39a0bcae 100644 --- a/yarn-project/stdlib/src/logs/public_log.ts +++ b/yarn-project/stdlib/src/logs/public_log.ts @@ -1,8 +1,4 @@ -import { - FLAT_PUBLIC_LOGS_PAYLOAD_LENGTH, - MAX_PUBLIC_LOG_SIZE_IN_FIELDS, - PUBLIC_LOG_HEADER_LENGTH, -} from '@aztec/constants'; +import { FLAT_PUBLIC_LOGS_PAYLOAD_LENGTH, PUBLIC_LOG_HEADER_LENGTH } from '@aztec/constants'; import type { FieldsOf } from '@aztec/foundation/array'; import { Fr } from '@aztec/foundation/curves/bn254'; import { type ZodFor, schemas } from '@aztec/foundation/schemas'; @@ -80,9 +76,6 @@ export class FlatPublicLogs { static fromBuffer(buffer: Buffer | BufferReader) { const reader = BufferReader.asReader(buffer); const length = reader.readNumber(); - if (length > FLAT_PUBLIC_LOGS_PAYLOAD_LENGTH) { - throw new Error(`FlatPublicLogs length ${length} exceeds maximum ${FLAT_PUBLIC_LOGS_PAYLOAD_LENGTH}`); - } return this.fromUnpaddedPayload(reader.readArray(length, Fr)); } @@ -178,9 +171,6 @@ export class PublicLog { static fromBuffer(buffer: Buffer | BufferReader) { const reader = BufferReader.asReader(buffer); const fieldsLength = reader.readNumber(); - if (fieldsLength > MAX_PUBLIC_LOG_SIZE_IN_FIELDS) { - throw new Error(`PublicLog fields length ${fieldsLength} exceeds maximum ${MAX_PUBLIC_LOG_SIZE_IN_FIELDS}`); - } return new PublicLog(reader.readObject(AztecAddress), reader.readArray(fieldsLength, Fr)); } diff --git a/yarn-project/stdlib/src/parity/parity_base_private_inputs.ts b/yarn-project/stdlib/src/parity/parity_base_private_inputs.ts index eece01c59c37..a8a4dd68fd77 100644 --- a/yarn-project/stdlib/src/parity/parity_base_private_inputs.ts +++ b/yarn-project/stdlib/src/parity/parity_base_private_inputs.ts @@ -41,7 +41,7 @@ export class ParityBasePrivateInputs { */ static fromBuffer(buffer: Buffer | BufferReader) { const reader = BufferReader.asReader(buffer); - return new ParityBasePrivateInputs(reader.readTuple(NUM_MSGS_PER_BASE_PARITY, Fr), Fr.fromBuffer(reader)); + return new ParityBasePrivateInputs(reader.readArray(NUM_MSGS_PER_BASE_PARITY, Fr), Fr.fromBuffer(reader)); } /** diff --git a/yarn-project/stdlib/src/proofs/chonk_proof.ts b/yarn-project/stdlib/src/proofs/chonk_proof.ts index 6afe0a67c026..bb22a6187fa0 100644 --- a/yarn-project/stdlib/src/proofs/chonk_proof.ts +++ b/yarn-project/stdlib/src/proofs/chonk_proof.ts @@ -56,10 +56,7 @@ export class ChonkProof { static fromBuffer(buffer: Buffer | BufferReader): ChonkProof { const reader = BufferReader.asReader(buffer); const proofLength = reader.readNumber(); - if (proofLength !== CHONK_PROOF_LENGTH) { - throw new Error(`Invalid ChonkProof length from buffer: ${proofLength}, expected ${CHONK_PROOF_LENGTH}`); - } - const proof = reader.readArray(CHONK_PROOF_LENGTH, Fr); + const proof = reader.readArray(proofLength, Fr); return new ChonkProof(proof); } @@ -109,11 +106,6 @@ export class ChonkProofWithPublicInputs { static fromBuffer(buffer: Buffer | BufferReader): ChonkProofWithPublicInputs { const reader = BufferReader.asReader(buffer); const proofLength = reader.readNumber(); - if (proofLength < CHONK_PROOF_LENGTH) { - throw new Error( - `Invalid ChonkProofWithPublicInputs length from buffer: ${proofLength}, expected at least ${CHONK_PROOF_LENGTH}`, - ); - } const proof = reader.readArray(proofLength, Fr); return new ChonkProofWithPublicInputs(proof); } diff --git a/yarn-project/stdlib/src/rollup/base_rollup_hints.ts b/yarn-project/stdlib/src/rollup/base_rollup_hints.ts index d846f9627e80..c92989ea7846 100644 --- a/yarn-project/stdlib/src/rollup/base_rollup_hints.ts +++ b/yarn-project/stdlib/src/rollup/base_rollup_hints.ts @@ -86,7 +86,7 @@ export class PrivateBaseRollupHints { reader.readObject(SpongeBlob), reader.readObject(TreeSnapshotDiffHints), reader.readObject(PublicDataTreeLeafPreimage), - reader.readTuple(ARCHIVE_HEIGHT, Fr), + reader.readArray(ARCHIVE_HEIGHT, Fr), makeTuple(MAX_CONTRACT_CLASS_LOGS_PER_TX, () => reader.readObject(ContractClassLogFields)), reader.readObject(BlockConstantData), ); @@ -163,7 +163,7 @@ export class PublicBaseRollupHints { return new PublicBaseRollupHints( reader.readObject(SpongeBlob), reader.readObject(AppendOnlyTreeSnapshot), - reader.readTuple(ARCHIVE_HEIGHT, Fr), + reader.readArray(ARCHIVE_HEIGHT, Fr), makeTuple(MAX_CONTRACT_CLASS_LOGS_PER_TX, () => reader.readObject(ContractClassLogFields)), ); } diff --git a/yarn-project/stdlib/src/rollup/block_root_rollup_private_inputs.ts b/yarn-project/stdlib/src/rollup/block_root_rollup_private_inputs.ts index 8a9e94eb48cf..19a99769dac9 100644 --- a/yarn-project/stdlib/src/rollup/block_root_rollup_private_inputs.ts +++ b/yarn-project/stdlib/src/rollup/block_root_rollup_private_inputs.ts @@ -60,8 +60,8 @@ export class BlockRootFirstRollupPrivateInputs { ProofData.fromBuffer(reader, ParityPublicInputs), [ProofData.fromBuffer(reader, TxRollupPublicInputs), ProofData.fromBuffer(reader, TxRollupPublicInputs)], AppendOnlyTreeSnapshot.fromBuffer(reader), - reader.readTuple(L1_TO_L2_MSG_SUBTREE_ROOT_SIBLING_PATH_LENGTH, Fr), - reader.readTuple(ARCHIVE_HEIGHT, Fr), + reader.readArray(L1_TO_L2_MSG_SUBTREE_ROOT_SIBLING_PATH_LENGTH, Fr), + reader.readArray(ARCHIVE_HEIGHT, Fr), ); } @@ -124,8 +124,8 @@ export class BlockRootSingleTxFirstRollupPrivateInputs { ProofData.fromBuffer(reader, ParityPublicInputs), ProofData.fromBuffer(reader, TxRollupPublicInputs), AppendOnlyTreeSnapshot.fromBuffer(reader), - reader.readTuple(L1_TO_L2_MSG_SUBTREE_ROOT_SIBLING_PATH_LENGTH, Fr), - reader.readTuple(ARCHIVE_HEIGHT, Fr), + reader.readArray(L1_TO_L2_MSG_SUBTREE_ROOT_SIBLING_PATH_LENGTH, Fr), + reader.readArray(ARCHIVE_HEIGHT, Fr), ); } @@ -206,8 +206,8 @@ export class BlockRootEmptyTxFirstRollupPrivateInputs { StateReference.fromBuffer(reader), CheckpointConstantData.fromBuffer(reader), reader.readUInt64(), - reader.readTuple(L1_TO_L2_MSG_SUBTREE_ROOT_SIBLING_PATH_LENGTH, Fr), - reader.readTuple(ARCHIVE_HEIGHT, Fr), + reader.readArray(L1_TO_L2_MSG_SUBTREE_ROOT_SIBLING_PATH_LENGTH, Fr), + reader.readArray(ARCHIVE_HEIGHT, Fr), ); } @@ -248,7 +248,7 @@ export class BlockRootRollupPrivateInputs { const reader = BufferReader.asReader(buffer); return new BlockRootRollupPrivateInputs( [ProofData.fromBuffer(reader, TxRollupPublicInputs), ProofData.fromBuffer(reader, TxRollupPublicInputs)], - reader.readTuple(ARCHIVE_HEIGHT, Fr), + reader.readArray(ARCHIVE_HEIGHT, Fr), ); } @@ -289,7 +289,7 @@ export class BlockRootSingleTxRollupPrivateInputs { const reader = BufferReader.asReader(buffer); return new BlockRootSingleTxRollupPrivateInputs( ProofData.fromBuffer(reader, TxRollupPublicInputs), - reader.readTuple(ARCHIVE_HEIGHT, Fr), + reader.readArray(ARCHIVE_HEIGHT, Fr), ); } diff --git a/yarn-project/stdlib/src/rollup/checkpoint_rollup_public_inputs.ts b/yarn-project/stdlib/src/rollup/checkpoint_rollup_public_inputs.ts index 6626279ec6ce..fe80522432ab 100644 --- a/yarn-project/stdlib/src/rollup/checkpoint_rollup_public_inputs.ts +++ b/yarn-project/stdlib/src/rollup/checkpoint_rollup_public_inputs.ts @@ -65,8 +65,8 @@ export class CheckpointRollupPublicInputs { reader.readObject(AppendOnlyTreeSnapshot), reader.readObject(AppendOnlyTreeSnapshot), reader.readObject(AppendOnlyTreeSnapshot), - reader.readTuple(MAX_CHECKPOINTS_PER_EPOCH, Fr), - reader.readTuple(MAX_CHECKPOINTS_PER_EPOCH, FeeRecipient), + reader.readArray(MAX_CHECKPOINTS_PER_EPOCH, Fr), + reader.readArray(MAX_CHECKPOINTS_PER_EPOCH, FeeRecipient), reader.readObject(BlobAccumulator), reader.readObject(BlobAccumulator), reader.readObject(FinalBlobBatchingChallenges), diff --git a/yarn-project/stdlib/src/rollup/checkpoint_root_rollup_private_inputs.ts b/yarn-project/stdlib/src/rollup/checkpoint_root_rollup_private_inputs.ts index 58cd518b44e0..bb15e9110fb0 100644 --- a/yarn-project/stdlib/src/rollup/checkpoint_root_rollup_private_inputs.ts +++ b/yarn-project/stdlib/src/rollup/checkpoint_root_rollup_private_inputs.ts @@ -81,15 +81,15 @@ export class CheckpointRootRollupHints { const reader = BufferReader.asReader(buffer); return new CheckpointRootRollupHints( BlockHeader.fromBuffer(reader), - reader.readTuple(ARCHIVE_HEIGHT, Fr), + reader.readArray(ARCHIVE_HEIGHT, Fr), reader.readObject(AppendOnlyTreeSnapshot), - reader.readTuple(OUT_HASH_TREE_HEIGHT, Fr), + reader.readArray(OUT_HASH_TREE_HEIGHT, Fr), reader.readObject(BlobAccumulator), reader.readObject(FinalBlobBatchingChallenges), // Below line gives error 'Type instantiation is excessively deep and possibly infinite. ts(2589)' - // reader.readTuple(FIELDS_PER_BLOB, Fr), + // reader.readArray(FIELDS_PER_BLOB, Fr), Array.from({ length: FIELDS_PER_BLOB * BLOBS_PER_CHECKPOINT }, () => Fr.fromBuffer(reader)), - reader.readTuple(BLOBS_PER_CHECKPOINT, BLS12Point), + reader.readArray(BLOBS_PER_CHECKPOINT, BLS12Point), Fr.fromBuffer(reader), ); } diff --git a/yarn-project/stdlib/src/rollup/root_rollup_public_inputs.ts b/yarn-project/stdlib/src/rollup/root_rollup_public_inputs.ts index 73063f3180e9..9c362ccd9e59 100644 --- a/yarn-project/stdlib/src/rollup/root_rollup_public_inputs.ts +++ b/yarn-project/stdlib/src/rollup/root_rollup_public_inputs.ts @@ -68,8 +68,8 @@ export class RootRollupPublicInputs { Fr.fromBuffer(reader), Fr.fromBuffer(reader), Fr.fromBuffer(reader), - reader.readTuple(MAX_CHECKPOINTS_PER_EPOCH, Fr), - reader.readTuple(MAX_CHECKPOINTS_PER_EPOCH, FeeRecipient), + reader.readArray(MAX_CHECKPOINTS_PER_EPOCH, Fr), + reader.readArray(MAX_CHECKPOINTS_PER_EPOCH, FeeRecipient), EpochConstantData.fromBuffer(reader), reader.readObject(FinalBlobAccumulator), ); diff --git a/yarn-project/stdlib/src/rollup/tree_snapshot_diff_hints.ts b/yarn-project/stdlib/src/rollup/tree_snapshot_diff_hints.ts index 02cbb148fbfb..a07bcdaec5cb 100644 --- a/yarn-project/stdlib/src/rollup/tree_snapshot_diff_hints.ts +++ b/yarn-project/stdlib/src/rollup/tree_snapshot_diff_hints.ts @@ -85,14 +85,14 @@ export class TreeSnapshotDiffHints { static fromBuffer(buffer: Buffer | BufferReader): TreeSnapshotDiffHints { const reader = BufferReader.asReader(buffer); return new TreeSnapshotDiffHints( - reader.readTuple(NOTE_HASH_SUBTREE_ROOT_SIBLING_PATH_LENGTH, Fr), - reader.readTuple(MAX_NULLIFIERS_PER_TX, NullifierLeafPreimage), - reader.readTuple(MAX_NULLIFIERS_PER_TX, { + reader.readArray(NOTE_HASH_SUBTREE_ROOT_SIBLING_PATH_LENGTH, Fr), + reader.readArray(MAX_NULLIFIERS_PER_TX, NullifierLeafPreimage), + reader.readArray(MAX_NULLIFIERS_PER_TX, { fromBuffer: buffer => MembershipWitness.fromBuffer(buffer, NULLIFIER_TREE_HEIGHT), }), - reader.readTuple(MAX_NULLIFIERS_PER_TX, Fr), + reader.readArray(MAX_NULLIFIERS_PER_TX, Fr), reader.readNumbers(MAX_NULLIFIERS_PER_TX), - reader.readTuple(NULLIFIER_SUBTREE_ROOT_SIBLING_PATH_LENGTH, Fr), + reader.readArray(NULLIFIER_SUBTREE_ROOT_SIBLING_PATH_LENGTH, Fr), MembershipWitness.fromBuffer(reader, PUBLIC_DATA_TREE_HEIGHT), ); } diff --git a/yarn-project/stdlib/src/tx/protocol_contracts.ts b/yarn-project/stdlib/src/tx/protocol_contracts.ts index 5b64ff1d7893..2abdbffe87a2 100644 --- a/yarn-project/stdlib/src/tx/protocol_contracts.ts +++ b/yarn-project/stdlib/src/tx/protocol_contracts.ts @@ -30,7 +30,7 @@ export class ProtocolContracts { static fromFields(fields: Fr[] | FieldReader): ProtocolContracts { const reader = FieldReader.asReader(fields); - return new ProtocolContracts(reader.readTuple(MAX_PROTOCOL_CONTRACTS, AztecAddress)); + return new ProtocolContracts(reader.readArray(MAX_PROTOCOL_CONTRACTS, AztecAddress)); } toFields(): Fr[] { @@ -39,7 +39,7 @@ export class ProtocolContracts { static fromBuffer(buffer: Buffer | BufferReader): ProtocolContracts { const reader = BufferReader.asReader(buffer); - return new ProtocolContracts(reader.readTuple(MAX_PROTOCOL_CONTRACTS, AztecAddress)); + return new ProtocolContracts(reader.readArray(MAX_PROTOCOL_CONTRACTS, AztecAddress)); } toBuffer() { diff --git a/yarn-project/stdlib/src/vks/vk_data.ts b/yarn-project/stdlib/src/vks/vk_data.ts index d2a7b60825c1..8f5adce80099 100644 --- a/yarn-project/stdlib/src/vks/vk_data.ts +++ b/yarn-project/stdlib/src/vks/vk_data.ts @@ -29,7 +29,7 @@ export class VkData { return new VkData( reader.readObject(VerificationKeyData), reader.readNumber(), - reader.readTuple(VK_TREE_HEIGHT, Fr), + reader.readArray(VK_TREE_HEIGHT, Fr), ); } From 938dc3cf248faf3939120dbc5d5824f590fd7dad Mon Sep 17 00:00:00 2001 From: Santiago Palladino Date: Tue, 17 Mar 2026 10:43:33 -0300 Subject: [PATCH 15/41] fix: capture txs not available error reason in proposal handler (#21670) We were reporting txs not available as an unknown error. --- yarn-project/validator-client/src/block_proposal_handler.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/yarn-project/validator-client/src/block_proposal_handler.ts b/yarn-project/validator-client/src/block_proposal_handler.ts index 43c890bdafa8..1582c74b334c 100644 --- a/yarn-project/validator-client/src/block_proposal_handler.ts +++ b/yarn-project/validator-client/src/block_proposal_handler.ts @@ -487,7 +487,9 @@ export class BlockProposalHandler { } private getReexecuteFailureReason(err: any): BlockProposalValidationFailureReason { - if (err instanceof ReExInitialStateMismatchError) { + if (err instanceof TransactionsNotAvailableError) { + return 'txs_not_available'; + } else if (err instanceof ReExInitialStateMismatchError) { return 'initial_state_mismatch'; } else if (err instanceof ReExStateMismatchError) { return 'state_mismatch'; From 510f15c04f017c3aa496cadabc379ac7d562ac7e Mon Sep 17 00:00:00 2001 From: Santiago Palladino Date: Tue, 17 Mar 2026 11:01:35 -0300 Subject: [PATCH 16/41] chore(p2p): remove unused method Attestation validation is handled in `validateAndStoreCheckpointAttestation`. --- .../p2p/src/services/libp2p/libp2p_service.ts | 26 ------------------- 1 file changed, 26 deletions(-) diff --git a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts index 6f89b66a5cf3..9314043e840a 100644 --- a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts +++ b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts @@ -18,7 +18,6 @@ import { type CheckpointProposalCore, type Gossipable, P2PMessage, - type ValidationResult as P2PValidationResult, PeerErrorSeverity, PeerErrorSeverityByHarshness, TopicType, @@ -1742,31 +1741,6 @@ export class LibP2PService extends WithTracer implements P2PService { return PeerErrorSeverity.HighToleranceError; } - /** - * Validate a checkpoint attestation. - * - * @param attestation - The checkpoint attestation to validate. - * @returns True if the checkpoint attestation is valid, false otherwise. - */ - @trackSpan('Libp2pService.validateCheckpointAttestation', async (_, attestation) => ({ - [Attributes.SLOT_NUMBER]: attestation.payload.header.slotNumber, - [Attributes.BLOCK_ARCHIVE]: attestation.archive.toString(), - [Attributes.P2P_ID]: await attestation.p2pMessageLoggingIdentifier().then(i => i.toString()), - })) - public async validateCheckpointAttestation( - peerId: PeerId, - attestation: CheckpointAttestation, - ): Promise { - const result = await this.checkpointAttestationValidator.validate(attestation); - - if (result.result === 'reject') { - this.logger.warn(`Penalizing peer ${peerId} for checkpoint attestation validation failure`); - this.peerManager.penalizePeer(peerId, result.severity); - } - - return result; - } - public getPeerScore(peerId: PeerId): number { return this.node.services.pubsub.score.score(peerId.toString()); } From 4943a4f8480812c4896d98e9b9ffc254998c3c3f Mon Sep 17 00:00:00 2001 From: Alex Gherghisan Date: Tue, 17 Mar 2026 14:48:17 +0000 Subject: [PATCH 17/41] fix: estimate gas in bot and make BatchCall.simulate() return SimulationResult (#21676) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary - Modifies the bot factory to estimate gas for all transactions during setup (deploy, mint, add liquidity, etc.) instead of using default gas settings. - Makes `BatchCall.simulate()` always return `SimulationResult` (consistent with `ContractFunctionInteraction` and `DeployMethod`), instead of returning different shapes depending on whether gas estimation was requested. ## Test plan - [x] `yarn build` passes (no new type errors) - [x] `yarn workspace @aztec/aztec.js test src/contract/batch_call.test.ts` — all 7 tests pass - [ ] Spartan network deployment with bot enabled 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-authored-by: Claude Opus 4.6 (1M context) --- boxes/boxes/vanilla/app/main.ts | 2 +- .../aztec.js/src/contract/batch_call.test.ts | 12 +-- .../aztec.js/src/contract/batch_call.ts | 30 +++++-- yarn-project/bot/src/factory.ts | 89 ++++++++++++++----- .../end-to-end/src/e2e_state_vars.test.ts | 4 +- .../src/simulators/token_simulator.ts | 2 +- 6 files changed, 98 insertions(+), 41 deletions(-) diff --git a/boxes/boxes/vanilla/app/main.ts b/boxes/boxes/vanilla/app/main.ts index ddce11af435c..4c35c1045b97 100644 --- a/boxes/boxes/vanilla/app/main.ts +++ b/boxes/boxes/vanilla/app/main.ts @@ -198,7 +198,7 @@ async function updateVoteTally(wallet: Wallet, from: AztecAddress) { ) ); - const batchResult = await new BatchCall(wallet, payloads).simulate({ from }); + const { result: batchResult } = await new BatchCall(wallet, payloads).simulate({ from }); batchResult.forEach(({ result: value }, i) => { results[i + 1] = value; diff --git a/yarn-project/aztec.js/src/contract/batch_call.test.ts b/yarn-project/aztec.js/src/contract/batch_call.test.ts index 88b6e4427d60..cecd701b27ec 100644 --- a/yarn-project/aztec.js/src/contract/batch_call.test.ts +++ b/yarn-project/aztec.js/src/contract/batch_call.test.ts @@ -146,7 +146,7 @@ describe('BatchCall', () => { { name: 'simulateTx', result: txSimResult }, ] as any); - const results = await batchCall.simulate({ from: await AztecAddress.random() }); + const { result: results } = await batchCall.simulate({ from: await AztecAddress.random() }); // Verify wallet.batch was called once with both utility calls AND simulateTx expect(wallet.batch).toHaveBeenCalledTimes(1); @@ -212,7 +212,7 @@ describe('BatchCall', () => { { name: 'executeUtility', result: utilityResult2 }, ] as any); - const results = await batchCall.simulate({ from: await AztecAddress.random() }); + const { result: results } = await batchCall.simulate({ from: await AztecAddress.random() }); expect(wallet.batch).toHaveBeenCalledTimes(1); expect(wallet.batch).toHaveBeenCalledWith([ @@ -247,7 +247,7 @@ describe('BatchCall', () => { const utilityResult = UtilityExecutionResult.random(); wallet.batch.mockResolvedValue([{ name: 'executeUtility', result: utilityResult }] as any); - const results = await batchCall.simulate({ from: await AztecAddress.random() }); + const { result: results } = await batchCall.simulate({ from: await AztecAddress.random() }); expect(results).toHaveLength(1); expect(results[0].offchainEffects).toEqual([]); @@ -307,7 +307,7 @@ describe('BatchCall', () => { { name: 'simulateTx', result: txSimResult }, ] as any); - const results = await batchCall.simulate({ from: await AztecAddress.random() }); + const { result: results } = await batchCall.simulate({ from: await AztecAddress.random() }); expect(results).toHaveLength(3); expect(results[0].offchainMessages).toEqual([ @@ -349,7 +349,7 @@ describe('BatchCall', () => { wallet.batch.mockResolvedValue([{ name: 'simulateTx', result: txSimResult }] as any); - const results = await batchCall.simulate({ from: await AztecAddress.random() }); + const { result: results } = await batchCall.simulate({ from: await AztecAddress.random() }); expect(wallet.batch).toHaveBeenCalledTimes(1); expect(wallet.batch).toHaveBeenCalledWith([ @@ -376,7 +376,7 @@ describe('BatchCall', () => { it('should handle empty batch', async () => { batchCall = new BatchCall(wallet, []); - const results = await batchCall.simulate({ from: await AztecAddress.random() }); + const { result: results } = await batchCall.simulate({ from: await AztecAddress.random() }); expect(wallet.batch).not.toHaveBeenCalled(); expect(results).toEqual([]); diff --git a/yarn-project/aztec.js/src/contract/batch_call.ts b/yarn-project/aztec.js/src/contract/batch_call.ts index 3012c2dd8f01..ecf21515d616 100644 --- a/yarn-project/aztec.js/src/contract/batch_call.ts +++ b/yarn-project/aztec.js/src/contract/batch_call.ts @@ -3,9 +3,11 @@ import { ExecutionPayload, TxSimulationResult, UtilityExecutionResult, mergeExec import type { BatchedMethod, Wallet } from '../wallet/wallet.js'; import { BaseContractInteraction } from './base_contract_interaction.js'; +import { getGasLimits } from './get_gas_limits.js'; import { type RequestInteractionOptions, type SimulateInteractionOptions, + type SimulationResult, extractOffchainOutput, toSimulateOptions, } from './interaction_options.js'; @@ -45,7 +47,7 @@ export class BatchCall extends BaseContractInteraction { * @param options - An optional object containing additional configuration for the interaction. * @returns The results of all the interactions that make up the batch */ - public async simulate(options: SimulateInteractionOptions): Promise { + public async simulate(options: SimulateInteractionOptions): Promise { const { indexedExecutionPayloads, utility } = (await this.getExecutionPayloads()).reduce<{ /** Keep track of the number of private calls to retrieve the return values */ privateIndex: 0; @@ -119,10 +121,11 @@ export class BatchCall extends BaseContractInteraction { } // Process tx simulation result (it comes last if present) + let simulatedTx: TxSimulationResult | undefined; if (indexedExecutionPayloads.length > 0) { const txResultWrapper = batchResults[utility.length]; if (txResultWrapper.name === 'simulateTx') { - const simulatedTx = txResultWrapper.result as TxSimulationResult; + simulatedTx = txResultWrapper.result as TxSimulationResult; indexedExecutionPayloads.forEach(([request, callIndex, resultIndex]) => { const call = request.calls[0]; // As account entrypoints are private, for private functions we retrieve the return values from the first nested call @@ -130,21 +133,34 @@ export class BatchCall extends BaseContractInteraction { // For public functions we retrieve the first values directly from the public output. const rawReturnValues = call.type == FunctionType.PRIVATE - ? simulatedTx.getPrivateReturnValues()?.nested?.[resultIndex].values - : simulatedTx.getPublicReturnValues()?.[resultIndex].values; + ? simulatedTx!.getPrivateReturnValues()?.nested?.[resultIndex].values + : simulatedTx!.getPublicReturnValues()?.[resultIndex].values; results[callIndex] = { result: rawReturnValues ? decodeFromAbi(call.returnTypes, rawReturnValues) : [], ...extractOffchainOutput( - simulatedTx.offchainEffects, - simulatedTx.publicInputs.constants.anchorBlockHeader.globalVariables.timestamp, + simulatedTx!.offchainEffects, + simulatedTx!.publicInputs.constants.anchorBlockHeader.globalVariables.timestamp, ), }; }); } } - return results; + if ((options.includeMetadata || options.fee?.estimateGas) && simulatedTx) { + const { gasLimits, teardownGasLimits } = getGasLimits(simulatedTx, options.fee?.estimatedGasPadding); + this.log.verbose( + `Estimated gas limits for batch tx: DA=${gasLimits.daGas} L2=${gasLimits.l2Gas} teardownDA=${teardownGasLimits.daGas} teardownL2=${teardownGasLimits.l2Gas}`, + ); + return { + result: results, + estimatedGas: { gasLimits, teardownGasLimits }, + offchainEffects: [], + offchainMessages: [], + }; + } + + return { result: results, offchainEffects: [], offchainMessages: [] }; } protected async getExecutionPayloads(): Promise { diff --git a/yarn-project/bot/src/factory.ts b/yarn-project/bot/src/factory.ts index 99995c4bf8a6..c62e3b78a85b 100644 --- a/yarn-project/bot/src/factory.ts +++ b/yarn-project/bot/src/factory.ts @@ -27,7 +27,7 @@ import { PrivateTokenContract } from '@aztec/noir-contracts.js/PrivateToken'; import { TokenContract } from '@aztec/noir-contracts.js/Token'; import { TestContract } from '@aztec/noir-test-contracts.js/Test'; import type { ContractInstanceWithAddress } from '@aztec/stdlib/contract'; -import { GasSettings } from '@aztec/stdlib/gas'; +import { GasFees, GasSettings } from '@aztec/stdlib/gas'; import type { AztecNode, AztecNodeAdmin } from '@aztec/stdlib/interfaces/client'; import { deriveSigningKey } from '@aztec/stdlib/keys'; import { EmbeddedWallet } from '@aztec/wallets/embedded'; @@ -223,7 +223,12 @@ export class BotFactory { const paymentMethod = new FeeJuicePaymentMethodWithClaim(accountManager.address, claim); const deployMethod = await accountManager.getDeployMethod(); const maxFeesPerGas = (await this.aztecNode.getCurrentMinFees()).mul(1 + this.config.minFeePadding); - const gasSettings = GasSettings.default({ maxFeesPerGas }); + + const { estimatedGas } = await deployMethod.simulate({ + from: AztecAddress.ZERO, + fee: { estimateGas: true, paymentMethod }, + }); + const gasSettings = GasSettings.from({ ...estimatedGas!, maxFeesPerGas, maxPriorityFeesPerGas: GasFees.empty() }); await this.withNoMinTxsPerBlock(async () => { const { txHash } = await deployMethod.send({ @@ -231,7 +236,7 @@ export class BotFactory { fee: { gasSettings, paymentMethod }, wait: NO_WAIT, }); - this.log.info(`Sent tx for account deployment with hash ${txHash.toString()}`); + this.log.info(`Sent tx for account deployment with hash ${txHash.toString()}`, { gasSettings }); return waitForTx(this.aztecNode, txHash, { timeout: this.config.txMinedWaitSeconds }); }); this.log.info(`Account deployed at ${address}`); @@ -297,8 +302,9 @@ export class BotFactory { await deploy.register(); } else { this.log.info(`Deploying token contract at ${address.toString()}`); - const { txHash } = await deploy.send({ ...deployOpts, wait: NO_WAIT }); - this.log.info(`Sent tx for token setup with hash ${txHash.toString()}`); + const { estimatedGas } = await deploy.simulate({ ...deployOpts, fee: { estimateGas: true } }); + const { txHash } = await deploy.send({ ...deployOpts, fee: { gasSettings: estimatedGas }, wait: NO_WAIT }); + this.log.info(`Sent tx for token setup with hash ${txHash.toString()}`, { estimatedGas }); await this.withNoMinTxsPerBlock(async () => { await waitForTx(this.aztecNode, txHash, { timeout: this.config.txMinedWaitSeconds }); return token; @@ -338,10 +344,19 @@ export class BotFactory { const amm = AMMContract.at(instance.address, this.wallet); this.log.info(`AMM deployed at ${amm.address}`); - const { receipt: minterReceipt } = await lpToken.methods - .set_minter(amm.address, true) - .send({ from: deployer, wait: { timeout: this.config.txMinedWaitSeconds } }); - this.log.info(`Set LP token minter to AMM txHash=${minterReceipt.txHash.toString()}`); + const setMinterInteraction = lpToken.methods.set_minter(amm.address, true); + const { estimatedGas: setMinterGas } = await setMinterInteraction.simulate({ + from: deployer, + fee: { estimateGas: true }, + }); + const { receipt: minterReceipt } = await setMinterInteraction.send({ + from: deployer, + fee: { gasSettings: setMinterGas }, + wait: { timeout: this.config.txMinedWaitSeconds }, + }); + this.log.info(`Set LP token minter to AMM txHash=${minterReceipt.txHash.toString()}`, { + estimatedGas: setMinterGas, + }); this.log.info(`Liquidity token initialized`); return amm; @@ -409,22 +424,44 @@ export class BotFactory { .getFunctionCall(), }); - const { receipt: mintReceipt } = await new BatchCall(this.wallet, [ + const mintBatch = new BatchCall(this.wallet, [ token0.methods.mint_to_private(liquidityProvider, MINT_BALANCE), token1.methods.mint_to_private(liquidityProvider, MINT_BALANCE), - ]).send({ from: liquidityProvider, wait: { timeout: this.config.txMinedWaitSeconds } }); + ]); + const { estimatedGas: mintGas } = await mintBatch.simulate({ + from: liquidityProvider, + fee: { estimateGas: true }, + }); + const { receipt: mintReceipt } = await mintBatch.send({ + from: liquidityProvider, + fee: { gasSettings: mintGas }, + wait: { timeout: this.config.txMinedWaitSeconds }, + }); - this.log.info(`Sent mint tx: ${mintReceipt.txHash.toString()}`); + this.log.info(`Sent mint tx: ${mintReceipt.txHash.toString()}`, { estimatedGas: mintGas }); - const { receipt: addLiquidityReceipt } = await amm.methods - .add_liquidity(amount0Max, amount1Max, amount0Min, amount1Min, authwitNonce) - .send({ - from: liquidityProvider, - authWitnesses: [token0Authwit, token1Authwit], - wait: { timeout: this.config.txMinedWaitSeconds }, - }); + const addLiquidityInteraction = amm.methods.add_liquidity( + amount0Max, + amount1Max, + amount0Min, + amount1Min, + authwitNonce, + ); + const { estimatedGas: addLiquidityGas } = await addLiquidityInteraction.simulate({ + from: liquidityProvider, + fee: { estimateGas: true }, + authWitnesses: [token0Authwit, token1Authwit], + }); + const { receipt: addLiquidityReceipt } = await addLiquidityInteraction.send({ + from: liquidityProvider, + fee: { gasSettings: addLiquidityGas }, + authWitnesses: [token0Authwit, token1Authwit], + wait: { timeout: this.config.txMinedWaitSeconds }, + }); - this.log.info(`Sent tx to add liquidity to the AMM: ${addLiquidityReceipt.txHash.toString()}`); + this.log.info(`Sent tx to add liquidity to the AMM: ${addLiquidityReceipt.txHash.toString()}`, { + estimatedGas: addLiquidityGas, + }); this.log.info(`Liquidity added`); const [newT0Bal, newT1Bal, newLPBal] = await getPrivateBalances(); @@ -445,9 +482,10 @@ export class BotFactory { this.log.info(`Contract ${name} at ${address.toString()} already deployed`); await deploy.register(); } else { - this.log.info(`Deploying contract ${name} at ${address.toString()}`); + const { estimatedGas } = await deploy.simulate({ ...deployOpts, fee: { estimateGas: true } }); + this.log.info(`Deploying contract ${name} at ${address.toString()}`, { estimatedGas }); await this.withNoMinTxsPerBlock(async () => { - const { txHash } = await deploy.send({ ...deployOpts, wait: NO_WAIT }); + const { txHash } = await deploy.send({ ...deployOpts, fee: { gasSettings: estimatedGas }, wait: NO_WAIT }); this.log.info(`Sent contract ${name} setup tx with hash ${txHash.toString()}`); return waitForTx(this.aztecNode, txHash, { timeout: this.config.txMinedWaitSeconds }); }); @@ -491,13 +529,16 @@ export class BotFactory { // PrivateToken's mint accesses contract-level private storage vars (admin, total_supply). const additionalScopes = isStandardToken ? undefined : [token.address]; + const mintBatch = new BatchCall(token.wallet, calls); + const { estimatedGas } = await mintBatch.simulate({ from: minter, fee: { estimateGas: true }, additionalScopes }); await this.withNoMinTxsPerBlock(async () => { - const { txHash } = await new BatchCall(token.wallet, calls).send({ + const { txHash } = await mintBatch.send({ from: minter, additionalScopes, + fee: { gasSettings: estimatedGas }, wait: NO_WAIT, }); - this.log.info(`Sent token mint tx with hash ${txHash.toString()}`); + this.log.info(`Sent token mint tx with hash ${txHash.toString()}`, { estimatedGas }); return waitForTx(this.aztecNode, txHash, { timeout: this.config.txMinedWaitSeconds }); }); } diff --git a/yarn-project/end-to-end/src/e2e_state_vars.test.ts b/yarn-project/end-to-end/src/e2e_state_vars.test.ts index 6d2f58a7f37d..29435110942e 100644 --- a/yarn-project/end-to-end/src/e2e_state_vars.test.ts +++ b/yarn-project/end-to-end/src/e2e_state_vars.test.ts @@ -68,7 +68,7 @@ describe('e2e_state_vars', () => { contract.methods.get_public_immutable_constrained_private_indirect(), contract.methods.get_public_immutable(), ]).simulate({ from: defaultAccountAddress }) - ).map((r: any) => r.result); + ).result.map((r: any) => r.result); expect(a).toEqual(c); expect(b).toEqual({ account: c.account, value: c.value + 1n }); @@ -87,7 +87,7 @@ describe('e2e_state_vars', () => { contract.methods.get_public_immutable_constrained_public_indirect(), contract.methods.get_public_immutable(), ]).simulate({ from: defaultAccountAddress }) - ).map((r: any) => r.result); + ).result.map((r: any) => r.result); expect(a).toEqual(c); expect(b).toEqual({ account: c.account, value: c.value + 1n }); diff --git a/yarn-project/end-to-end/src/simulators/token_simulator.ts b/yarn-project/end-to-end/src/simulators/token_simulator.ts index a2065beb4426..a10bc0c7de17 100644 --- a/yarn-project/end-to-end/src/simulators/token_simulator.ts +++ b/yarn-project/end-to-end/src/simulators/token_simulator.ts @@ -110,7 +110,7 @@ export class TokenSimulator { chunk(calls, 5).map(batch => new BatchCall(this.defaultWallet, batch).simulate({ from: this.defaultAddress })), ) ) - .flat() + .flatMap(r => r.result) .map(r => r.result); expect(results[0]).toEqual(this.totalSupply); From 5fd6fd7b6b9bfd443254a61445d919fe3e1c3a20 Mon Sep 17 00:00:00 2001 From: Aztec Bot <49558828+AztecBot@users.noreply.github.com> Date: Tue, 17 Mar 2026 11:15:03 -0400 Subject: [PATCH 18/41] fix: prevent HA peer proposals from blocking equivocation in duplicate proposal test (#21673) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary When PR #21603 changed the validator to process (not ignore) block proposals from HA peers (same validator key), the `duplicate_proposal_slash` test broke. The second malicious node now processes the first node's proposal, adds the block to its archiver via `blockSource.addBlock()`, and the sequencer sees "slot was taken" — preventing it from ever building its own conflicting proposal. **Root cause**: `validateBlockProposal` no longer returns `false` for self-proposals (changed to process them for HA support). The block_proposal_handler re-executes the proposal and pushes it to the archiver. The sequencer then skips the slot. **Fix**: Set `skipPushProposedBlocksToArchiver=true` on the malicious nodes. This allows: 1. Node 1 builds and broadcasts its proposal 2. Node 2 receives it, re-executes (as HA peer), but does NOT add to archiver 3. Node 2's sequencer doesn't see "slot taken" → builds its own block with different coinbase 4. Node 2 broadcasts (allowed by `broadcastEquivocatedProposals=true`) 5. Honest nodes see both proposals → detect duplicate → offense recorded ## Test plan - The `duplicate_proposal_slash` e2e test should now pass consistently - Other slashing tests should be unaffected (only malicious nodes in this test are changed) ClaudeBox log: https://claudebox.work/s/ced449aa0eabbcb4?run=1 --- .../end-to-end/src/e2e_p2p/duplicate_proposal_slash.test.ts | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/yarn-project/end-to-end/src/e2e_p2p/duplicate_proposal_slash.test.ts b/yarn-project/end-to-end/src/e2e_p2p/duplicate_proposal_slash.test.ts index cc2868010c00..fc01b6f7fee1 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/duplicate_proposal_slash.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/duplicate_proposal_slash.test.ts @@ -132,6 +132,9 @@ describe('e2e_p2p_duplicate_proposal_slash', () => { coinbase: coinbase1, broadcastEquivocatedProposals: true, dontStartSequencer: true, + // Prevent HA peer proposals from being added to the archiver, so both + // malicious nodes build their own blocks instead of one yielding to the other. + skipPushProposedBlocksToArchiver: true, }, t.ctx.dateProvider, BOOT_NODE_UDP_PORT + 1, @@ -150,6 +153,9 @@ describe('e2e_p2p_duplicate_proposal_slash', () => { coinbase: coinbase2, broadcastEquivocatedProposals: true, dontStartSequencer: true, + // Prevent HA peer proposals from being added to the archiver, so both + // malicious nodes build their own blocks instead of one yielding to the other. + skipPushProposedBlocksToArchiver: true, }, t.ctx.dateProvider, BOOT_NODE_UDP_PORT + 2, From 34ee6a4ae9faeea334b715643f633a9d267aa971 Mon Sep 17 00:00:00 2001 From: Santiago Palladino Date: Tue, 17 Mar 2026 11:33:20 -0300 Subject: [PATCH 19/41] fix(p2p): penalize peers for errors during response reading Errors in readMessage (invalid status bytes, oversized snappy responses, corrupt data) were caught and silently converted to UNKNOWN status returns. Since sendRequestToPeer only calls handleResponseError in its own catch block, none of these errors resulted in peer penalties. The request was simply retried with another peer, allowing a malicious peer to waste bandwidth indefinitely. Co-Authored-By: Claude Opus 4.6 (1M context) --- yarn-project/p2p/src/services/encoding.ts | 10 ++++++- .../p2p/src/services/reqresp/reqresp.ts | 28 ++++++++++++------- 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/yarn-project/p2p/src/services/encoding.ts b/yarn-project/p2p/src/services/encoding.ts index 0aea0032d158..d21fbe695597 100644 --- a/yarn-project/p2p/src/services/encoding.ts +++ b/yarn-project/p2p/src/services/encoding.ts @@ -9,6 +9,14 @@ import { webcrypto } from 'node:crypto'; import { compressSync, uncompressSync } from 'snappy'; import xxhashFactory from 'xxhash-wasm'; +/** Thrown when a Snappy-compressed response exceeds the allowed decompressed size. */ +export class OversizedSnappyResponseError extends Error { + constructor(decompressedSize: number, maxSizeKb: number) { + super(`Decompressed size ${decompressedSize} exceeds maximum allowed size of ${maxSizeKb}kb`); + this.name = 'OversizedSnappyResponseError'; + } +} + // Load WASM const xxhash = await xxhashFactory(); @@ -86,7 +94,7 @@ export class SnappyTransform implements DataTransform { const { decompressedSize } = readSnappyPreamble(data); if (decompressedSize > maxSizeKb * 1024) { this.logger.warn(`Decompressed size ${decompressedSize} exceeds maximum allowed size of ${maxSizeKb}kb`); - throw new Error(`Decompressed size ${decompressedSize} exceeds maximum allowed size of ${maxSizeKb}kb`); + throw new OversizedSnappyResponseError(decompressedSize, maxSizeKb); } return Buffer.from(uncompressSync(data, { asBuffer: true })); diff --git a/yarn-project/p2p/src/services/reqresp/reqresp.ts b/yarn-project/p2p/src/services/reqresp/reqresp.ts index bd15d5ff6c4e..38354e1dd67f 100644 --- a/yarn-project/p2p/src/services/reqresp/reqresp.ts +++ b/yarn-project/p2p/src/services/reqresp/reqresp.ts @@ -16,7 +16,7 @@ import { IndividualReqRespTimeoutError, InvalidResponseError, } from '../../errors/reqresp.error.js'; -import { SnappyTransform } from '../encoding.js'; +import { OversizedSnappyResponseError, SnappyTransform } from '../encoding.js'; import type { PeerScoring } from '../peer-manager/peer_scoring.js'; import { DEFAULT_INDIVIDUAL_REQUEST_TIMEOUT_MS, @@ -553,16 +553,10 @@ export class ReqResp implements ReqRespInterface { data: message, }; } catch (e: any) { + // All errors (invalid status bytes, oversized snappy responses, corrupt data, etc.) + // are re-thrown so the caller can penalize the peer via handleResponseError. this.logger.debug(`Reading message failed: ${e.message}`); - - let status = ReqRespStatus.UNKNOWN; - if (e instanceof ReqRespStatusError) { - status = e.status; - } - - return { - status, - }; + throw e; } } @@ -780,6 +774,20 @@ export class ReqResp implements ReqRespInterface { return undefined; } + // Invalid status byte: the peer sent a status byte that doesn't match any known status code. + // This is a protocol violation, penalize harshly. + if (e instanceof ReqRespStatusError) { + this.logger.warn(`Invalid status byte from peer ${peerId.toString()} in ${subProtocol}: ${e.message}`, logTags); + return PeerErrorSeverity.LowToleranceError; + } + + // Oversized snappy response: the peer is sending data that exceeds the allowed size. + // This is a protocol violation that wastes bandwidth, so penalize harshly. + if (e instanceof OversizedSnappyResponseError) { + this.logger.warn(`Oversized response from peer ${peerId.toString()} in ${subProtocol}: ${e.message}`, logTags); + return PeerErrorSeverity.LowToleranceError; + } + return this.categorizeConnectionErrors(e, peerId, subProtocol); } From c3e214e863adf893c3eea55a633767270d94b623 Mon Sep 17 00:00:00 2001 From: Phil Windle Date: Tue, 17 Mar 2026 12:17:53 +0000 Subject: [PATCH 20/41] fix: batch checkpoint unwinding in handleEpochPrune (A-690) Replace the unbounded Promise.all in handleEpochPrune with batched retrieval (batch size 10), preventing memory pressure when proving significantly lags behind the local pending checkpoint. Co-Authored-By: Claude Opus 4.6 (1M context) --- .../archiver/src/modules/l1_synchronizer.ts | 33 ++++++++++++------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/yarn-project/archiver/src/modules/l1_synchronizer.ts b/yarn-project/archiver/src/modules/l1_synchronizer.ts index 092df4b083f8..2ac1bd15c9e1 100644 --- a/yarn-project/archiver/src/modules/l1_synchronizer.ts +++ b/yarn-project/archiver/src/modules/l1_synchronizer.ts @@ -333,17 +333,28 @@ export class ArchiverL1Synchronizer implements Traceable { const checkpointsToUnwind = localPendingCheckpointNumber - provenCheckpointNumber; - const checkpointPromises = Array.from({ length: checkpointsToUnwind }) - .fill(0) - .map((_, i) => this.store.getCheckpointData(CheckpointNumber(i + pruneFrom))); - const checkpoints = await Promise.all(checkpointPromises); - - const blockPromises = await Promise.all( - checkpoints - .filter(isDefined) - .map(cp => this.store.getBlocksForCheckpoint(CheckpointNumber(cp.checkpointNumber))), - ); - const newBlocks = blockPromises.filter(isDefined).flat(); + // Fetch checkpoints and blocks in bounded batches to avoid unbounded concurrent + // promises when the gap between local pending and proven checkpoint numbers is large. + const BATCH_SIZE = 10; + const newBlocks = []; + for (let offset = 0; offset < checkpointsToUnwind; offset += BATCH_SIZE) { + const batchSize = Math.min(BATCH_SIZE, checkpointsToUnwind - offset); + const checkpoints = ( + await Promise.all( + Array.from({ length: batchSize }, (_, i) => + this.store.getCheckpointData(CheckpointNumber(offset + i + pruneFrom)), + ), + ) + ).filter(isDefined); + + const batchBlocks = ( + await Promise.all( + checkpoints.map(cp => this.store.getBlocksForCheckpoint(CheckpointNumber(cp.checkpointNumber))), + ) + ).filter(isDefined); + + newBlocks.push(...batchBlocks.flat()); + } // Emit an event for listening services to react to the chain prune this.events.emit(L2BlockSourceEvents.L2PruneUnproven, { From 9f90527df51e7dd817bd2235385defc2ca6ed307 Mon Sep 17 00:00:00 2001 From: Maddiaa <47148561+Maddiaa0@users.noreply.github.com> Date: Tue, 17 Mar 2026 16:33:43 +0000 Subject: [PATCH 21/41] feat(sequencer): add build-ahead config and metrics (#20779) Baseline stuff for buildahead - adds enable bool - adds some metrics that will be required --- yarn-project/archiver/src/config.ts | 9 +++++- yarn-project/epoch-cache/src/config.ts | 12 +++++-- yarn-project/foundation/src/config/env_var.ts | 1 + .../src/client/sequencer-client.ts | 1 + yarn-project/sequencer-client/src/config.ts | 4 +++ .../sequencer-client/src/sequencer/metrics.ts | 14 +++++++++ yarn-project/stdlib/src/config/index.ts | 1 + .../stdlib/src/config/pipelining-config.ts | 31 +++++++++++++++++++ yarn-project/telemetry-client/src/metrics.ts | 10 ++++++ 9 files changed, 79 insertions(+), 4 deletions(-) create mode 100644 yarn-project/stdlib/src/config/pipelining-config.ts diff --git a/yarn-project/archiver/src/config.ts b/yarn-project/archiver/src/config.ts index e3ed77eb71dc..a2eb30464302 100644 --- a/yarn-project/archiver/src/config.ts +++ b/yarn-project/archiver/src/config.ts @@ -8,7 +8,12 @@ import { getConfigFromMappings, numberConfigHelper, } from '@aztec/foundation/config'; -import { type ChainConfig, chainConfigMappings } from '@aztec/stdlib/config'; +import { + type ChainConfig, + type PipelineConfig, + chainConfigMappings, + pipelineConfigMappings, +} from '@aztec/stdlib/config'; import type { ArchiverSpecificConfig } from '@aztec/stdlib/interfaces/server'; /** @@ -21,11 +26,13 @@ import type { ArchiverSpecificConfig } from '@aztec/stdlib/interfaces/server'; export type ArchiverConfig = ArchiverSpecificConfig & L1ReaderConfig & L1ContractsConfig & + PipelineConfig & // required to pass through to epoch cache BlobClientConfig & ChainConfig; export const archiverConfigMappings: ConfigMappingsType = { ...blobClientConfigMapping, + ...pipelineConfigMappings, archiverPollingIntervalMS: { env: 'ARCHIVER_POLLING_INTERVAL_MS', description: 'The polling interval in ms for retrieving new L2 blocks and encrypted logs.', diff --git a/yarn-project/epoch-cache/src/config.ts b/yarn-project/epoch-cache/src/config.ts index bd9b76c1cd58..bf6e92f13046 100644 --- a/yarn-project/epoch-cache/src/config.ts +++ b/yarn-project/epoch-cache/src/config.ts @@ -1,11 +1,17 @@ import { type L1ContractsConfig, getL1ContractsConfigEnvVars } from '@aztec/ethereum/config'; import { type L1ReaderConfig, getL1ReaderConfigFromEnv } from '@aztec/ethereum/l1-reader'; +import { type PipelineConfig, getPipelineConfigEnvVars } from '@aztec/stdlib/config'; export type EpochCacheConfig = Pick< - L1ReaderConfig & L1ContractsConfig, - 'l1RpcUrls' | 'l1ChainId' | 'viemPollingIntervalMS' | 'l1HttpTimeoutMS' | 'ethereumSlotDuration' + L1ReaderConfig & L1ContractsConfig & PipelineConfig, + | 'l1RpcUrls' + | 'l1ChainId' + | 'viemPollingIntervalMS' + | 'ethereumSlotDuration' + | 'l1HttpTimeoutMS' + | 'enableProposerPipelining' >; export function getEpochCacheConfigEnvVars(): EpochCacheConfig { - return { ...getL1ReaderConfigFromEnv(), ...getL1ContractsConfigEnvVars() }; + return { ...getL1ReaderConfigFromEnv(), ...getL1ContractsConfigEnvVars(), ...getPipelineConfigEnvVars() }; } diff --git a/yarn-project/foundation/src/config/env_var.ts b/yarn-project/foundation/src/config/env_var.ts index f692b1a4e0dd..159eba8cfb4a 100644 --- a/yarn-project/foundation/src/config/env_var.ts +++ b/yarn-project/foundation/src/config/env_var.ts @@ -219,6 +219,7 @@ export type EnvVar = | 'SEQ_PUBLISHER_ALLOW_INVALID_STATES' | 'SEQ_PUBLISHER_FORWARDER_ADDRESS' | 'SEQ_POLLING_INTERVAL_MS' + | 'SEQ_ENABLE_PROPOSER_PIPELINING' | 'SEQ_ENFORCE_TIME_TABLE' | 'SEQ_L1_PUBLISHING_TIME_ALLOWANCE_IN_SLOT' | 'SEQ_ATTESTATION_PROPAGATION_TIME' diff --git a/yarn-project/sequencer-client/src/client/sequencer-client.ts b/yarn-project/sequencer-client/src/client/sequencer-client.ts index 0efeafb01f10..239c332e57f6 100644 --- a/yarn-project/sequencer-client/src/client/sequencer-client.ts +++ b/yarn-project/sequencer-client/src/client/sequencer-client.ts @@ -118,6 +118,7 @@ export class SequencerClient { l1ChainId: chainId, viemPollingIntervalMS: config.viemPollingIntervalMS, ethereumSlotDuration: config.ethereumSlotDuration, + enableProposerPipelining: config.enableProposerPipelining, }, { dateProvider: deps.dateProvider }, )); diff --git a/yarn-project/sequencer-client/src/config.ts b/yarn-project/sequencer-client/src/config.ts index e0ce28583791..577275cc0c09 100644 --- a/yarn-project/sequencer-client/src/config.ts +++ b/yarn-project/sequencer-client/src/config.ts @@ -13,8 +13,10 @@ import { type P2PConfig, p2pConfigMappings } from '@aztec/p2p/config'; import { AztecAddress } from '@aztec/stdlib/aztec-address'; import { type ChainConfig, + type PipelineConfig, type SequencerConfig, chainConfigMappings, + pipelineConfigMappings, sharedSequencerConfigMappings, } from '@aztec/stdlib/config'; import type { ResolvedSequencerConfig } from '@aztec/stdlib/interfaces/server'; @@ -68,6 +70,7 @@ export type SequencerClientConfig = SequencerPublisherConfig & SequencerConfig & L1ReaderConfig & ChainConfig & + PipelineConfig & Pick & Pick; @@ -244,6 +247,7 @@ export const sequencerClientConfigMappings: ConfigMappingsType = { + enableProposerPipelining: { + env: 'SEQ_ENABLE_PROPOSER_PIPELINING', + description: 'Whether to enable build-ahead proposer pipelining.', + ...booleanConfigHelper(false), + }, +}; + +export const PipelineConfigSchema = zodFor()( + z.object({ + enableProposerPipelining: z.boolean(), + }), +); + +export function getPipelineConfigEnvVars(): PipelineConfig { + return getConfigFromMappings(pipelineConfigMappings); +} diff --git a/yarn-project/telemetry-client/src/metrics.ts b/yarn-project/telemetry-client/src/metrics.ts index f6678089bcf9..6bd63208404b 100644 --- a/yarn-project/telemetry-client/src/metrics.ts +++ b/yarn-project/telemetry-client/src/metrics.ts @@ -504,6 +504,16 @@ export const SEQUENCER_CHECKPOINT_SUCCESS_COUNT: MetricDefinition = { description: 'The number of times checkpoint publishing succeeded', valueType: ValueType.INT, }; +export const SEQUENCER_PIPELINE_DEPTH: MetricDefinition = { + name: 'aztec.sequencer.pipeline.depth', + description: 'Current pipeline depth when builder pipelining is enabled', + valueType: ValueType.INT, +}; +export const SEQUENCER_PIPELINE_DISCARDS_COUNT: MetricDefinition = { + name: 'aztec.sequencer.pipeline.discards_count', + description: 'The number of times a pipeline was discarded', + valueType: ValueType.INT, +}; // Fisherman fee analysis metrics export const FISHERMAN_FEE_ANALYSIS_WOULD_BE_INCLUDED: MetricDefinition = { From 09a1e9b8e77a24ab8186259db2207e071505099f Mon Sep 17 00:00:00 2001 From: Michal Rzeszutko Date: Tue, 17 Mar 2026 17:38:45 +0100 Subject: [PATCH 22/41] chore: fixing build on mac (#21685) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # fix: ARM64 devcontainer builds — skip `-march` on ARM and use explicit zig aarch64 target ## Summary Fixes SIGILL (Illegal Instruction) crashes and build failures on ARM64 Mac (M3/Apple Silicon) devcontainers caused by incorrect `-march` handling introduced in #21611. ## Problem PR #21611 originally fixed ARM64 devcontainer builds by using explicit `aarch64-linux-gnu.2.35` zig targets. During the merge, that approach was replaced with cmake-based auto-detection that sets `TARGET_ARCH=generic` on ARM and passes `-march=generic` to the compiler. This caused two distinct failures: ### 1. SIGILL crashes (`Illegal instruction`) The zig compiler wrappers still used `-target native-linux-gnu.2.35`, which auto-detects the host CPU. On CI (AWS Graviton with SVE extensions), this produces binaries containing SVE instructions. These cached binaries are then downloaded on Apple Silicon devcontainers (ARM64 without SVE), causing SIGILL when executed — e.g. `honk_solidity_key_gen` crashing during `barretenberg/sol` bootstrap. The `-march=generic` flag was supposed to override this, but `-march=generic` is **not a valid value on aarch64**. It's an x86 concept. LLVM/zig silently ignored it, so the native CPU detection still produced SVE instructions. ### 2. Build failures (`unknown CPU: 'armv8'`) Even attempting `-march=armv8-a` (a valid GCC/Clang aarch64 value) fails because zig uses its own CPU naming scheme (e.g. `generic`, `cortex_a72`, `apple_m3`), not GCC-style architecture strings. Zig interprets `-march=armv8-a` as CPU name `armv8`, which doesn't exist → `error: unknown CPU: 'armv8'`. **Bottom line:** The `-march` cmake approach fundamentally doesn't work with zig on ARM. Zig has its own architecture targeting via `-target`, which is the correct mechanism. ## What this PR changes ### 1. `arch.cmake` — Skip `-march` auto-detection on ARM Removed the ARM branch from the auto-detection. On x86_64, we still auto-detect `TARGET_ARCH=skylake`. On ARM, we don't set `TARGET_ARCH` at all, so no `-march` flag is passed — the zig wrappers handle architecture targeting instead. ### 2. `zig-cc.sh` / `zig-c++.sh` — Explicit aarch64 target on ARM Linux Restored the original fix from #21611 that was dropped during merge. On ARM64 Linux, the wrappers now use `-target aarch64-linux-gnu.2.35` instead of `-target native-linux-gnu.2.35`. This produces generic ARM64 code without CPU-specific extensions (SVE, etc.), ensuring cached binaries work on all ARM64 machines — Graviton, Apple Silicon, Ampere, etc. x86_64 behavior is unchanged (still uses `-target native`). ## Context: what happened after #21611 After #21611 merged with the cmake auto-detection approach, it triggered a cascade of follow-up PRs trying to fix the fallout: | PR | Status | Issue | |----|--------|-------| | #21621 | Merged | Introduced the auto-detect approach (replaced zig wrapper fix with cmake `-march`) | | #21356 | Merged | Added `NOT CMAKE_CROSSCOMPILING` guard for cross-compile failures | | #21637 | Open | Attempting to fix cross-compiles + restore `native_build_dir` | | #21660 | Open | Attempting to fix cross-compile targets | | #21632 | Open | Attempting to fix cross-compile targets | | #21662 | Open | Adding `CMAKE_SYSTEM_PROCESSOR` to ARM64 cross-compile presets | | #21653 | Open | Attempting to skip auto-detection when cross-compiling | | #21655 | Open | Attempting to skip auto-detection for cross-compilation targets | This PR supersedes the still-open PRs above by addressing the root cause: `-march` via cmake doesn't work with zig on ARM. The zig `-target` mechanism is the correct approach. --- barretenberg/cpp/cmake/arch.cmake | 15 +++++---------- barretenberg/cpp/scripts/zig-c++.sh | 11 ++++++++--- barretenberg/cpp/scripts/zig-cc.sh | 11 ++++++++--- 3 files changed, 21 insertions(+), 16 deletions(-) diff --git a/barretenberg/cpp/cmake/arch.cmake b/barretenberg/cpp/cmake/arch.cmake index ed185b1fa970..e1e7d0978fd7 100644 --- a/barretenberg/cpp/cmake/arch.cmake +++ b/barretenberg/cpp/cmake/arch.cmake @@ -5,18 +5,13 @@ if(WASM) add_compile_options(-fno-exceptions -fno-slp-vectorize) endif() -# Auto-detect TARGET_ARCH if not explicitly set (native builds only). -# Use 'skylake' on x86_64 (matches our cross-compile presets) and 'generic' on ARM -# to avoid emitting CPU-specific instructions (e.g. SVE on Graviton) that break on -# other ARM machines like Apple Silicon. +# Auto-detect TARGET_ARCH on x86_64 if not explicitly set (native builds only). +# On ARM, we skip -march entirely — the zig wrappers use an explicit aarch64 target +# to produce generic ARM64 code without CPU-specific extensions (e.g. SVE). # Skip auto-detection when cross-compiling — the toolchain (e.g. Zig -mcpu) handles # architecture targeting, and injecting -march here conflicts with it. -if(NOT WASM AND NOT TARGET_ARCH AND NOT CMAKE_CROSSCOMPILING) - if(ARM) - set(TARGET_ARCH "generic") - else() - set(TARGET_ARCH "skylake") - endif() +if(NOT WASM AND NOT TARGET_ARCH AND NOT ARM AND NOT CMAKE_CROSSCOMPILING) + set(TARGET_ARCH "skylake") endif() if(NOT WASM AND TARGET_ARCH) diff --git a/barretenberg/cpp/scripts/zig-c++.sh b/barretenberg/cpp/scripts/zig-c++.sh index dc7e822e03a4..1afa0f82f8fa 100755 --- a/barretenberg/cpp/scripts/zig-c++.sh +++ b/barretenberg/cpp/scripts/zig-c++.sh @@ -1,10 +1,15 @@ #!/bin/bash # Wrapper for zig c++ that pins glibc 2.35 on Linux (Ubuntu 22.04+ compat) # and uses native target on macOS. -# Note: arch.cmake handles -march selection (skylake on x86, generic on ARM) -# which overrides zig's native CPU detection, preventing CPU-specific instructions. +# On ARM64 Linux, use an explicit aarch64 target instead of 'native' to produce +# generic ARM64 code. This prevents CPU-specific instructions (e.g. SVE on Graviton) +# from being emitted, ensuring binaries work across all ARM64 machines including +# Apple Silicon in devcontainers. if [[ "$(uname -s)" == "Linux" ]]; then - exec zig c++ -target native-linux-gnu.2.35 "$@" + case "$(uname -m)" in + aarch64|arm64) exec zig c++ -target aarch64-linux-gnu.2.35 "$@" ;; + *) exec zig c++ -target native-linux-gnu.2.35 "$@" ;; + esac else exec zig c++ "$@" fi diff --git a/barretenberg/cpp/scripts/zig-cc.sh b/barretenberg/cpp/scripts/zig-cc.sh index bc39365b6c09..34dd6263c6ce 100755 --- a/barretenberg/cpp/scripts/zig-cc.sh +++ b/barretenberg/cpp/scripts/zig-cc.sh @@ -1,10 +1,15 @@ #!/bin/bash # Wrapper for zig cc that pins glibc 2.35 on Linux (Ubuntu 22.04+ compat) # and uses native target on macOS. -# Note: arch.cmake handles -march selection (skylake on x86, generic on ARM) -# which overrides zig's native CPU detection, preventing CPU-specific instructions. +# On ARM64 Linux, use an explicit aarch64 target instead of 'native' to produce +# generic ARM64 code. This prevents CPU-specific instructions (e.g. SVE on Graviton) +# from being emitted, ensuring binaries work across all ARM64 machines including +# Apple Silicon in devcontainers. if [[ "$(uname -s)" == "Linux" ]]; then - exec zig cc -target native-linux-gnu.2.35 "$@" + case "$(uname -m)" in + aarch64|arm64) exec zig cc -target aarch64-linux-gnu.2.35 "$@" ;; + *) exec zig cc -target native-linux-gnu.2.35 "$@" ;; + esac else exec zig cc "$@" fi From f503d617c7b9f032bcba4b40e56fa5d033dd8541 Mon Sep 17 00:00:00 2001 From: spypsy Date: Tue, 17 Mar 2026 16:55:24 +0000 Subject: [PATCH 23/41] fix: HA deadlock for last block edge case (#21690) change ordering for `lastBlock` case when creating a checkpoint proposal, so that we first sign the last block and then the checkpoint --- .../stdlib/src/p2p/checkpoint_proposal.ts | 43 ++++++++++--------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/yarn-project/stdlib/src/p2p/checkpoint_proposal.ts b/yarn-project/stdlib/src/p2p/checkpoint_proposal.ts index fdf4d679510f..69d5d6d936d6 100644 --- a/yarn-project/stdlib/src/p2p/checkpoint_proposal.ts +++ b/yarn-project/stdlib/src/p2p/checkpoint_proposal.ts @@ -178,29 +178,32 @@ export class CheckpointProposal extends Gossipable { blockNumber: lastBlockInfo?.blockHeader?.globalVariables.blockNumber ?? BlockNumber(0), dutyType: DutyType.CHECKPOINT_PROPOSAL, }; - const checkpointSignature = await payloadSigner(checkpointHash, checkpointContext); - if (!lastBlockInfo) { - return new CheckpointProposal(checkpointHeader, archiveRoot, feeAssetPriceModifier, checkpointSignature); + if (lastBlockInfo) { + // Sign block proposal before signing checkpoint proposal to ensure HA protection + const lastBlockProposal = await BlockProposal.createProposalFromSigner( + lastBlockInfo.blockHeader, + lastBlockInfo.indexWithinCheckpoint, + checkpointHeader.inHash, + archiveRoot, + lastBlockInfo.txHashes, + lastBlockInfo.txs, + payloadSigner, + ); + + const checkpointSignature = await payloadSigner(checkpointHash, checkpointContext); + + return new CheckpointProposal(checkpointHeader, archiveRoot, feeAssetPriceModifier, checkpointSignature, { + blockHeader: lastBlockInfo.blockHeader, + indexWithinCheckpoint: lastBlockInfo.indexWithinCheckpoint, + txHashes: lastBlockInfo.txHashes, + signature: lastBlockProposal.signature, + signedTxs: lastBlockProposal.signedTxs, + }); } - const lastBlockProposal = await BlockProposal.createProposalFromSigner( - lastBlockInfo.blockHeader, - lastBlockInfo.indexWithinCheckpoint, - checkpointHeader.inHash, - archiveRoot, - lastBlockInfo.txHashes, - lastBlockInfo.txs, - payloadSigner, - ); - - return new CheckpointProposal(checkpointHeader, archiveRoot, feeAssetPriceModifier, checkpointSignature, { - blockHeader: lastBlockInfo.blockHeader, - indexWithinCheckpoint: lastBlockInfo.indexWithinCheckpoint, - txHashes: lastBlockInfo.txHashes, - signature: lastBlockProposal.signature, - signedTxs: lastBlockProposal.signedTxs, - }); + const checkpointSignature = await payloadSigner(checkpointHash, checkpointContext); + return new CheckpointProposal(checkpointHeader, archiveRoot, feeAssetPriceModifier, checkpointSignature); } /** From e84fa885c9a1170a90625910b4ec63aa9850fb9d Mon Sep 17 00:00:00 2001 From: Santiago Palladino Date: Tue, 17 Mar 2026 14:56:08 -0300 Subject: [PATCH 24/41] chore(p2p): lower attestation pool per-slot caps to 2 Equivocation detection fires at count > 1 (i.e., the 2nd distinct entry). Nothing in the codebase uses counts beyond 2, so entries 3+ are dead storage. A cap of 2 is sufficient to store the honest entry plus one conflicting entry for detection. Co-Authored-By: Claude Opus 4.6 (1M context) --- .../attestation_pool/attestation_pool.test.ts | 6 +++--- .../mem_pools/attestation_pool/attestation_pool.ts | 6 +++--- .../attestation_pool/attestation_pool_test_suite.ts | 12 ++++++------ .../msg_validators/attestation_validator/README.md | 2 +- .../src/msg_validators/proposal_validator/README.md | 6 +++--- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool.test.ts b/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool.test.ts index cc7850c1e902..358366e54fcc 100644 --- a/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool.test.ts +++ b/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool.test.ts @@ -83,12 +83,12 @@ describe('Attestation Pool', () => { expect(result2.added).toBe(true); expect(result2.count).toBe(2); // This is the first duplicate - triggers slashing - // Third attestation from same signer (if we want to track more) + // Third attestation from same signer should be rejected (cap is 2) const archive3 = Fr.random(); const attestation3 = mockCheckpointAttestation(signer, slotNumber, archive3); const result3 = await attestationPool.tryAddCheckpointAttestation(attestation3); - expect(result3.added).toBe(true); - expect(result3.count).toBe(3); // Attestations from this signer + expect(result3.added).toBe(false); + expect(result3.count).toBe(2); // At cap, rejected }); it('should reject attestations when signer exceeds per-slot cap', async () => { diff --git a/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool.ts b/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool.ts index e61456ef5cbf..4f925245d443 100644 --- a/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool.ts +++ b/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool.ts @@ -26,10 +26,10 @@ export type TryAddResult = { count: number; }; -export const MAX_CHECKPOINT_PROPOSALS_PER_SLOT = 5; -export const MAX_BLOCK_PROPOSALS_PER_POSITION = 3; +export const MAX_CHECKPOINT_PROPOSALS_PER_SLOT = 2; +export const MAX_BLOCK_PROPOSALS_PER_POSITION = 2; /** Maximum attestations a single signer can make per slot before being rejected. */ -export const MAX_ATTESTATIONS_PER_SLOT_AND_SIGNER = 3; +export const MAX_ATTESTATIONS_PER_SLOT_AND_SIGNER = 2; /** Public API interface for attestation pools. Used for typing mocks and test implementations. */ export type AttestationPoolApi = Pick< diff --git a/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool_test_suite.ts b/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool_test_suite.ts index 20a198da71a0..02b1f1357f27 100644 --- a/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool_test_suite.ts +++ b/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool_test_suite.ts @@ -446,12 +446,12 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo const result2 = await ap.tryAddBlockProposal(proposal2); expect(result2.count).toBe(2); - // Add a third proposal for same position + // Third proposal for same position should be rejected (cap is 2) const proposal3 = await mockBlockProposalWithIndex(signers[2], slotNumber, indexWithinCheckpoint); const result3 = await ap.tryAddBlockProposal(proposal3); - expect(result3.added).toBe(true); - expect(result3.count).toBe(3); + expect(result3.added).toBe(false); + expect(result3.count).toBe(2); }); it('should return added=false when exceeding capacity', async () => { @@ -666,12 +666,12 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo const result2 = await ap.tryAddCheckpointProposal(proposal2); expect(result2.count).toBe(2); - // Add a third proposal for same slot + // Third proposal for same slot should be rejected (cap is 2) const proposal3 = await mockCheckpointProposalCoreForPool(signers[2], slotNumber); const result3 = await ap.tryAddCheckpointProposal(proposal3); - expect(result3.added).toBe(true); - expect(result3.count).toBe(3); + expect(result3.added).toBe(false); + expect(result3.count).toBe(2); }); it('should not count attestations as proposals for duplicate detection', async () => { diff --git a/yarn-project/p2p/src/msg_validators/attestation_validator/README.md b/yarn-project/p2p/src/msg_validators/attestation_validator/README.md index 18e8b1f0aa06..3f2df77a5fb3 100644 --- a/yarn-project/p2p/src/msg_validators/attestation_validator/README.md +++ b/yarn-project/p2p/src/msg_validators/attestation_validator/README.md @@ -24,7 +24,7 @@ This module validates `CheckpointAttestation` gossipsub messages. Attestations a |---|------|-------------| | 8 | Sender recoverable (pool-side) | Silent drop | | 9 | Not a duplicate (same slot + proposalId + signer) | IGNORE | -| 10 | Per-signer cap: `MAX_ATTESTATIONS_PER_SLOT_AND_SIGNER` = 3 | IGNORE | +| 10 | Per-signer cap: `MAX_ATTESTATIONS_PER_SLOT_AND_SIGNER` = 2 | IGNORE | Own attestations added via `addOwnCheckpointAttestations` bypass the per-signer cap. diff --git a/yarn-project/p2p/src/msg_validators/proposal_validator/README.md b/yarn-project/p2p/src/msg_validators/proposal_validator/README.md index fa7a9694dd63..e560f3babc89 100644 --- a/yarn-project/p2p/src/msg_validators/proposal_validator/README.md +++ b/yarn-project/p2p/src/msg_validators/proposal_validator/README.md @@ -28,7 +28,7 @@ Deserialization guards: `BlockProposal.fromBuffer` and `SignedTxs.fromBuffer` bo | # | Rule | Consequence | |---|------|-------------| | 9 | **Duplicate**: same archive root already stored | IGNORE (no penalty) | -| 10 | **Per-position cap**: max 3 proposals per (slot, indexWithinCheckpoint) | REJECT + HighToleranceError | +| 10 | **Per-position cap**: max 2 proposals per (slot, indexWithinCheckpoint) | REJECT + HighToleranceError | | 11 | **Equivocation**: >1 distinct proposal for same (slot, index) | ACCEPT (rebroadcast for detection). At count=2: `duplicateProposalCallback` fires -> slash event (`OffenseType.DUPLICATE_PROPOSAL`, configured via `slashDuplicateProposalPenalty`) | ### Stage 3: Validator-Client Processing (BlockProposalHandler) @@ -84,7 +84,7 @@ The checkpoint's embedded `lastBlock` is extracted via `getBlockProposal()` and | Rule | Consequence | File | |------|-------------|------| | Block proposal must pass `BlockProposalValidator.validate()` | If REJECT: entire checkpoint REJECTED | `libp2p_service.ts` | -| Block proposal must not exceed per-position cap (3) | Checkpoint REJECTED + HighToleranceError | same | +| Block proposal must not exceed per-position cap (2) | Checkpoint REJECTED + HighToleranceError | same | | Block equivocation detected (>1 proposals for same slot+index) | Checkpoint REJECTED (block itself is ACCEPT for re-broadcast) | same | ### Stage 3: Mempool (Attestation Pool) @@ -92,7 +92,7 @@ The checkpoint's embedded `lastBlock` is extracted via `getBlockProposal()` and | Rule | Consequence | File | |------|-------------|------| | Duplicate (same archive ID) | IGNORE (no penalty). Embedded block still processed if valid. | `attestation_pool.ts` | -| Per-slot cap: `MAX_CHECKPOINT_PROPOSALS_PER_SLOT` = 5 | REJECT + HighToleranceError. Embedded block still processed. | same | +| Per-slot cap: `MAX_CHECKPOINT_PROPOSALS_PER_SLOT` = 2 | REJECT + HighToleranceError. Embedded block still processed. | same | ### Stage 4: Equivocation Detection From 873839903417afdfe0498512abf03f4c07a804a4 Mon Sep 17 00:00:00 2001 From: PhilWindle <60546371+PhilWindle@users.noreply.github.com> Date: Tue, 17 Mar 2026 18:06:35 +0000 Subject: [PATCH 25/41] fix: process all contract classes in storeBroadcastedIndividualFunctions (A-683) (#21686) Remove early return in for...of loop that caused only the first contract class's functions to be stored when multiple classes had broadcasts in the same block. Fixes https://linear.app/aztec-labs/issue/A-683 --- yarn-project/archiver/src/modules/data_store_updater.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yarn-project/archiver/src/modules/data_store_updater.ts b/yarn-project/archiver/src/modules/data_store_updater.ts index ccb80e2b450c..32087d4e2b7a 100644 --- a/yarn-project/archiver/src/modules/data_store_updater.ts +++ b/yarn-project/archiver/src/modules/data_store_updater.ts @@ -457,7 +457,7 @@ export class ArchiverDataStoreUpdater { if (validFnCount > 0) { this.log.verbose(`Storing ${validFnCount} functions for contract class ${contractClassId.toString()}`); } - return await this.store.addFunctions(contractClassId, validPrivateFns, validUtilityFns); + await this.store.addFunctions(contractClassId, validPrivateFns, validUtilityFns); } return true; } From 4c2210e6532dd8bf78312cd1523db9d75aa46520 Mon Sep 17 00:00:00 2001 From: danielntmd Date: Tue, 17 Mar 2026 18:28:02 +0000 Subject: [PATCH 26/41] chore: add slack success post on nightly scenario --- spartan/bootstrap.sh | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/spartan/bootstrap.sh b/spartan/bootstrap.sh index 7d10efc6a3a9..b6a2399fc90c 100755 --- a/spartan/bootstrap.sh +++ b/spartan/bootstrap.sh @@ -120,14 +120,24 @@ function run_network_tests { fi } +function slack_notify_scenario_pass { + local label="$1" + if [[ "${REF_NAME:-}" == v* ]]; then + slack_notify "Scenario ${label} tests PASSED on *${REF_NAME}*" "#alerts-next-scenario" + fi +} + function network_tests_1 { run_network_tests "$1" "smoke.test.ts" "${NETWORK_TESTS_1[@]}" + slack_notify_scenario_pass "set-1" } function network_tests_2 { run_network_tests "$1" "smoke.test.ts" "${NETWORK_TESTS_2[@]}" + slack_notify_scenario_pass "set-2" } function network_tests { run_network_tests "$1" "smoke.test.ts" "${NETWORK_TESTS_1[@]}" "${NETWORK_TESTS_2[@]}" + slack_notify_scenario_pass "all" } function network_bench_cmds { From 33d776eb695a8250da581df24f57dcab021673e2 Mon Sep 17 00:00:00 2001 From: Santiago Palladino Date: Tue, 17 Mar 2026 18:23:03 -0300 Subject: [PATCH 27/41] fix(test): workaround slow mock creation Brings down test time from +1000s to 2s. --- .../skills/unit-test-implementation/SKILL.md | 22 +++++++++++++++++++ .../src/checkpoint_builder.test.ts | 4 +++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/yarn-project/.claude/skills/unit-test-implementation/SKILL.md b/yarn-project/.claude/skills/unit-test-implementation/SKILL.md index 0bc59961fb87..da1c1eab66f7 100644 --- a/yarn-project/.claude/skills/unit-test-implementation/SKILL.md +++ b/yarn-project/.claude/skills/unit-test-implementation/SKILL.md @@ -23,6 +23,28 @@ beforeEach(() => { }); ``` +### NEVER Pass Complex Objects as mock() Props + +`jest-mock-extended`'s `mock(props)` deep-processes any objects passed as initial properties. When those objects contain class instances with internal state (like `Fr`, `EthAddress`, `AztecAddress`, `GasFees`, `Buffer`, etc.), this causes **O(2^n) exponential slowdown** across tests — each test doubles the time of the previous one. + +```typescript +// ❌ NEVER: Passing complex domain objects as mock props +// This causes exponential test slowdown (1s → 2s → 4s → 8s → ...) +const constants = { chainId: new Fr(1), coinbase: EthAddress.random(), gasFees: GasFees.empty() }; +beforeEach(() => { + builder = mock({ checkpointNumber, constants }); +}); + +// ✅ GOOD: Create mock without props, then set properties directly +beforeEach(() => { + builder = mock(); + Object.defineProperty(builder, 'checkpointNumber', { value: checkpointNumber }); + Object.defineProperty(builder, 'constants', { value: constants }); +}); +``` + +Simple primitives (strings, numbers, booleans) and arrow functions are safe to pass as props. The issue is specifically with class instances that have complex prototypes. + ### When to Use Real Instances vs Mocks **Mock external dependencies** that are: diff --git a/yarn-project/validator-client/src/checkpoint_builder.test.ts b/yarn-project/validator-client/src/checkpoint_builder.test.ts index 9dab4a4778c9..58284ec6055e 100644 --- a/yarn-project/validator-client/src/checkpoint_builder.test.ts +++ b/yarn-project/validator-client/src/checkpoint_builder.test.ts @@ -101,7 +101,9 @@ describe('CheckpointBuilder', () => { } beforeEach(() => { - lightweightCheckpointBuilder = mock({ checkpointNumber, constants }); + lightweightCheckpointBuilder = mock(); + Object.defineProperty(lightweightCheckpointBuilder, 'checkpointNumber', { value: checkpointNumber }); + Object.defineProperty(lightweightCheckpointBuilder, 'constants', { value: constants }); lightweightCheckpointBuilder.getBlocks.mockReturnValue([]); fork = mock(); From b56ac49013ac8b20242756227d0eb76d94a27538 Mon Sep 17 00:00:00 2001 From: Santiago Palladino Date: Tue, 17 Mar 2026 18:41:08 -0300 Subject: [PATCH 28/41] fix(builder): persist contractsDB across blocks within a checkpoint (#21520) ## Motivation When building multiple blocks within a single checkpoint, the `CheckpointBuilder` was creating a new `PublicContractsDB` instance for each block. This meant that contracts deployed in an earlier block within the same checkpoint were not visible to subsequent blocks, causing calls to newly deployed contracts to fail. ## Approach Move the `PublicContractsDB` instance to be a persistent field on `CheckpointBuilder`, initialized once in the constructor and shared across all blocks in the checkpoint. Wrap block building in checkpoint/commit/revert semantics on the contracts DB so that failed blocks don't leak state. ## Changes - **validator-client**: Promote `contractsDB` from a local variable in `makeBlockBuilderDeps` to a class field on `CheckpointBuilder`. Wrap `buildBlock` in `createCheckpoint`/`commitCheckpoint`/`revertCheckpoint` calls on the contracts DB. - **validator-client (tests)**: Add tests verifying that the contracts DB checkpoint lifecycle is correctly managed across successful and failed block builds. - **end-to-end (tests)**: Add e2e test that deploys a contract and calls it in separate blocks within the same slot, validating cross-block contract visibility within a checkpoint. Fixes A-658 --------- Co-authored-by: Claude Opus 4.6 --- .../skills/unit-test-implementation/SKILL.md | 22 +++++ .../e2e_epochs/epochs_mbps.parallel.test.ts | 91 +++++++++++++++++++ .../public_processor/public_processor.ts | 4 +- .../src/checkpoint_builder.test.ts | 65 +++++++++++-- .../src/checkpoint_builder.ts | 13 ++- 5 files changed, 187 insertions(+), 8 deletions(-) diff --git a/yarn-project/.claude/skills/unit-test-implementation/SKILL.md b/yarn-project/.claude/skills/unit-test-implementation/SKILL.md index 0bc59961fb87..da1c1eab66f7 100644 --- a/yarn-project/.claude/skills/unit-test-implementation/SKILL.md +++ b/yarn-project/.claude/skills/unit-test-implementation/SKILL.md @@ -23,6 +23,28 @@ beforeEach(() => { }); ``` +### NEVER Pass Complex Objects as mock() Props + +`jest-mock-extended`'s `mock(props)` deep-processes any objects passed as initial properties. When those objects contain class instances with internal state (like `Fr`, `EthAddress`, `AztecAddress`, `GasFees`, `Buffer`, etc.), this causes **O(2^n) exponential slowdown** across tests — each test doubles the time of the previous one. + +```typescript +// ❌ NEVER: Passing complex domain objects as mock props +// This causes exponential test slowdown (1s → 2s → 4s → 8s → ...) +const constants = { chainId: new Fr(1), coinbase: EthAddress.random(), gasFees: GasFees.empty() }; +beforeEach(() => { + builder = mock({ checkpointNumber, constants }); +}); + +// ✅ GOOD: Create mock without props, then set properties directly +beforeEach(() => { + builder = mock(); + Object.defineProperty(builder, 'checkpointNumber', { value: checkpointNumber }); + Object.defineProperty(builder, 'constants', { value: constants }); +}); +``` + +Simple primitives (strings, numbers, booleans) and arrow functions are safe to pass as props. The issue is specifically with class instances that have complex prototypes. + ### When to Use Real Instances vs Mocks **Mock external dependencies** that are: diff --git a/yarn-project/end-to-end/src/e2e_epochs/epochs_mbps.parallel.test.ts b/yarn-project/end-to-end/src/e2e_epochs/epochs_mbps.parallel.test.ts index 4b729728b706..69aeef5670e0 100644 --- a/yarn-project/end-to-end/src/e2e_epochs/epochs_mbps.parallel.test.ts +++ b/yarn-project/end-to-end/src/e2e_epochs/epochs_mbps.parallel.test.ts @@ -9,6 +9,7 @@ import { isL1ToL2MessageReady } from '@aztec/aztec.js/messaging'; import { waitForTx } from '@aztec/aztec.js/node'; import { RollupContract } from '@aztec/ethereum/contracts'; import type { Operator } from '@aztec/ethereum/deploy-aztec-l1-contracts'; +import { waitUntilL1Timestamp } from '@aztec/ethereum/l1-tx-utils'; import { asyncMap } from '@aztec/foundation/async-map'; import { CheckpointNumber, SlotNumber } from '@aztec/foundation/branded-types'; import { times, timesAsync } from '@aztec/foundation/collection'; @@ -17,6 +18,8 @@ import { retryUntil } from '@aztec/foundation/retry'; import { bufferToHex } from '@aztec/foundation/string'; import { executeTimeout } from '@aztec/foundation/timer'; import { TestContract } from '@aztec/noir-test-contracts.js/Test'; +import { getSlotAtTimestamp, getTimestampForSlot } from '@aztec/stdlib/epoch-helpers'; +import { GasFees } from '@aztec/stdlib/gas'; import { TxStatus } from '@aztec/stdlib/tx'; import { jest } from '@jest/globals'; @@ -67,6 +70,7 @@ describe('e2e_epochs/epochs_mbps', () => { maxTxsPerBlock?: number; buildCheckpointIfEmpty?: boolean; deployCrossChainContract?: boolean; + skipPushProposedBlocksToArchiver?: boolean; }) { const { syncChainTip = 'checkpointed', deployCrossChainContract = false, ...setupOpts } = opts; @@ -493,4 +497,91 @@ describe('e2e_epochs/epochs_mbps', () => { const multiBlockCheckpoint = await assertMultipleBlocksPerSlot(2, logger); await waitForProvenCheckpoint(multiBlockCheckpoint); }); + + it('deploys a contract and calls it in separate blocks within a slot', async () => { + await setupTest({ + syncChainTip: 'checkpointed', + minTxsPerBlock: 1, + maxTxsPerBlock: 1, + }); + + // Prepare deploy tx for a new TestContract. Get the instance address so we can construct the call tx. + const highPriority = new GasFees(100, 100); + const lowPriority = new GasFees(1, 1); + + const deployMethod = TestContract.deploy(wallet); + const deployInstance = await deployMethod.getInstance(); + logger.warn(`Will deploy TestContract at ${deployInstance.address}`); + + // Register the contract on the PXE so we can prove the call interaction against it. + await wallet.registerContract(deployInstance, TestContract.artifact); + const deployedContract = TestContract.at(deployInstance.address, wallet); + + // Pre-prove both txs before starting sequencers. This ensures both arrive in the pool + // at the same time, so the sequencer can sort by priority fee for correct ordering. + logger.warn(`Pre-proving deploy tx (high priority) and call tx (low priority)`); + const deployTx = await proveInteraction(wallet, deployMethod, { + from, + fee: { gasSettings: { maxPriorityFeesPerGas: highPriority } }, + }); + const callTx = await proveInteraction(wallet, deployedContract.methods.emit_nullifier_public(new Fr(42)), { + from, + fee: { gasSettings: { maxPriorityFeesPerGas: lowPriority } }, + }); + logger.warn(`Pre-proved both txs`); + + // Start the sequencers + await Promise.all(nodes.map(n => n.getSequencer()!.start())); + logger.warn(`Started all sequencers`); + + // Wait until one L1 slot before the start of the next L2 slot. + // This ensures both txs land in the pending pool right before the proposer starts building. + // REFACTOR: This should go into a shared "waitUntilNextSlotStartsBuilding" utility + const currentL1Block = await test.l1Client.getBlock({ blockTag: 'latest' }); + const currentTimestamp = currentL1Block.timestamp; + const currentSlot = getSlotAtTimestamp(currentTimestamp, test.constants); + const nextSlot = SlotNumber(currentSlot + 1); + const nextSlotTimestamp = getTimestampForSlot(nextSlot, test.constants); + const targetTimestamp = nextSlotTimestamp - BigInt(test.L1_BLOCK_TIME_IN_S); + logger.warn(`Waiting until L1 timestamp ${targetTimestamp} (one L1 slot before L2 slot ${nextSlot})`, { + currentTimestamp, + currentSlot, + nextSlot, + nextSlotTimestamp, + targetTimestamp, + }); + await waitUntilL1Timestamp(test.l1Client, targetTimestamp, undefined, test.L2_SLOT_DURATION_IN_S * 3); + + // Send both pre-proved txs simultaneously, waiting for them to be checkpointed. + const timeout = test.L2_SLOT_DURATION_IN_S * 5; + logger.warn(`Sending both txs and waiting for checkpointed receipts`); + const [deployReceipt, callReceipt] = await executeTimeout( + () => Promise.all([deployTx.send({ wait: { timeout } }), callTx.send({ wait: { timeout } })]), + timeout * 1000, + ); + logger.warn(`Both txs checkpointed`, { + deployBlock: deployReceipt.blockNumber, + callBlock: callReceipt.blockNumber, + }); + + // Both txs should succeed (send throws on revert). Deploy should be in an earlier block. + expect(deployReceipt.blockNumber).toBeLessThan(callReceipt.blockNumber!); + + // Verify both blocks belong to the same checkpoint. + const deployCheckpointedBlock = await retryUntil( + async () => (await context.aztecNode.getCheckpointedBlocks(deployReceipt.blockNumber!, 1))[0], + 'deploy checkpointed block', + timeout, + ); + const callCheckpointedBlock = await retryUntil( + async () => (await context.aztecNode.getCheckpointedBlocks(callReceipt.blockNumber!, 1))[0], + 'call checkpointed block', + timeout, + ); + expect(deployCheckpointedBlock.checkpointNumber).toBe(callCheckpointedBlock.checkpointNumber); + logger.warn(`Both blocks in checkpoint ${deployCheckpointedBlock.checkpointNumber}`); + + // Wait for the checkpoint to be proven. + await waitForProvenCheckpoint(deployCheckpointedBlock.checkpointNumber); + }); }); diff --git a/yarn-project/simulator/src/public/public_processor/public_processor.ts b/yarn-project/simulator/src/public/public_processor/public_processor.ts index 20ce6fbaa3e4..f8b708d0a568 100644 --- a/yarn-project/simulator/src/public/public_processor/public_processor.ts +++ b/yarn-project/simulator/src/public/public_processor/public_processor.ts @@ -306,6 +306,9 @@ export class PublicProcessor implements Traceable { totalBlockGas = totalBlockGas.add(processedTx.gasUsed.totalGas); totalSizeInBytes += txSize; totalBlobFields += txBlobFields; + + // Commit the tx-level contracts checkpoint on success + this.contractsDB.commitCheckpoint(); } catch (err: any) { if (err?.name === 'PublicProcessorTimeoutError') { this.log.warn(`Stopping tx processing due to timeout.`); @@ -354,7 +357,6 @@ export class PublicProcessor implements Traceable { } finally { // Base case is we always commit the checkpoint. Using the ForkCheckpoint means this has no effect if the tx was previously reverted await checkpoint.commit(); - this.contractsDB.commitCheckpointOkIfNone(); } } diff --git a/yarn-project/validator-client/src/checkpoint_builder.test.ts b/yarn-project/validator-client/src/checkpoint_builder.test.ts index 9dab4a4778c9..f8b3ce509dc3 100644 --- a/yarn-project/validator-client/src/checkpoint_builder.test.ts +++ b/yarn-project/validator-client/src/checkpoint_builder.test.ts @@ -10,7 +10,7 @@ import { Fr } from '@aztec/foundation/curves/bn254'; import { EthAddress } from '@aztec/foundation/eth-address'; import { TestDateProvider } from '@aztec/foundation/timer'; import type { LightweightCheckpointBuilder } from '@aztec/prover-client/light'; -import type { PublicProcessor } from '@aztec/simulator/server'; +import type { PublicContractsDB, PublicProcessor } from '@aztec/simulator/server'; import { AztecAddress } from '@aztec/stdlib/aztec-address'; import { L2Block } from '@aztec/stdlib/block'; import type { ContractDataSource } from '@aztec/stdlib/contract'; @@ -22,17 +22,22 @@ import { type PublicProcessorLimits, type PublicProcessorValidator, } from '@aztec/stdlib/interfaces/server'; -import { TxHash } from '@aztec/stdlib/tx'; -import type { CheckpointGlobalVariables, GlobalVariables, ProcessedTx, Tx } from '@aztec/stdlib/tx'; +import { + type CheckpointGlobalVariables, + type GlobalVariables, + type ProcessedTx, + type Tx, + TxHash, +} from '@aztec/stdlib/tx'; import type { TelemetryClient } from '@aztec/telemetry-client'; -import { describe, expect, it } from '@jest/globals'; +import { describe, expect, it, jest } from '@jest/globals'; import { type MockProxy, mock } from 'jest-mock-extended'; import { CheckpointBuilder } from './checkpoint_builder.js'; describe('CheckpointBuilder', () => { - let checkpointBuilder: CheckpointBuilder; + let checkpointBuilder: TestCheckpointBuilder; let lightweightCheckpointBuilder: MockProxy; let fork: MockProxy; let config: FullNodeBlockBuilderConfig; @@ -57,6 +62,8 @@ describe('CheckpointBuilder', () => { }; class TestCheckpointBuilder extends CheckpointBuilder { + declare public contractsDB: PublicContractsDB; + public override makeBlockBuilderDeps(_globalVariables: GlobalVariables, _fork: MerkleTreeWriteOperations) { return Promise.resolve({ processor, validator }); } @@ -101,7 +108,9 @@ describe('CheckpointBuilder', () => { } beforeEach(() => { - lightweightCheckpointBuilder = mock({ checkpointNumber, constants }); + lightweightCheckpointBuilder = mock(); + Object.defineProperty(lightweightCheckpointBuilder, 'checkpointNumber', { value: checkpointNumber }); + Object.defineProperty(lightweightCheckpointBuilder, 'constants', { value: constants }); lightweightCheckpointBuilder.getBlocks.mockReturnValue([]); fork = mock(); @@ -117,6 +126,50 @@ describe('CheckpointBuilder', () => { setupBuilder(); }); + describe('contractsDB checkpointing', () => { + let createCheckpointSpy: jest.SpiedFunction<() => void>; + let commitCheckpointSpy: jest.SpiedFunction<() => void>; + let revertCheckpointSpy: jest.SpiedFunction<() => void>; + + beforeEach(() => { + const db = checkpointBuilder.contractsDB; + createCheckpointSpy = jest.spyOn(db, 'createCheckpoint'); + commitCheckpointSpy = jest.spyOn(db, 'commitCheckpoint'); + revertCheckpointSpy = jest.spyOn(db, 'revertCheckpoint'); + + lightweightCheckpointBuilder.getBlockCount.mockReturnValue(0); + }); + + async function mockSuccessfulBlock() { + const block = await L2Block.random(blockNumber); + lightweightCheckpointBuilder.addBlock.mockResolvedValue({ block, timings: {} }); + processor.process.mockResolvedValue([[{ hash: TxHash.random() } as ProcessedTx], [], [], [], []]); + return block; + } + + it('uses the same contractsDB across multiple block builds', async () => { + await mockSuccessfulBlock(); + await checkpointBuilder.buildBlock([], blockNumber, 1000n); + + await mockSuccessfulBlock(); + await checkpointBuilder.buildBlock([], BlockNumber(blockNumber + 1), 1001n); + + expect(createCheckpointSpy).toHaveBeenCalledTimes(2); + expect(commitCheckpointSpy).toHaveBeenCalledTimes(2); + expect(revertCheckpointSpy).not.toHaveBeenCalled(); + }); + + it('calls revertCheckpoint when public processor fails', async () => { + processor.process.mockRejectedValue(new Error('processor failure')); + + await expect(checkpointBuilder.buildBlock([], blockNumber, 1000n)).rejects.toThrow('processor failure'); + + expect(createCheckpointSpy).toHaveBeenCalledTimes(1); + expect(commitCheckpointSpy).not.toHaveBeenCalled(); + expect(revertCheckpointSpy).toHaveBeenCalledTimes(1); + }); + }); + describe('buildBlock', () => { it('builds a block successfully when transactions are processed', async () => { lightweightCheckpointBuilder.getBlockCount.mockReturnValue(0); diff --git a/yarn-project/validator-client/src/checkpoint_builder.ts b/yarn-project/validator-client/src/checkpoint_builder.ts index 906bcfe7da98..20a9625c8e71 100644 --- a/yarn-project/validator-client/src/checkpoint_builder.ts +++ b/yarn-project/validator-client/src/checkpoint_builder.ts @@ -46,6 +46,9 @@ export type { BuildBlockInCheckpointResult } from '@aztec/stdlib/interfaces/serv export class CheckpointBuilder implements ICheckpointBlockBuilder { private log: Logger; + /** Persistent contracts DB shared across all blocks in this checkpoint. */ + protected contractsDB: PublicContractsDB; + constructor( private checkpointBuilder: LightweightCheckpointBuilder, private fork: MerkleTreeWriteOperations, @@ -60,6 +63,7 @@ export class CheckpointBuilder implements ICheckpointBlockBuilder { ...bindings, instanceId: `checkpoint-${checkpointBuilder.checkpointNumber}`, }); + this.contractsDB = new PublicContractsDB(this.contractDataSource, this.log.getBindings()); } getConstantData(): CheckpointGlobalVariables { @@ -104,6 +108,8 @@ export class CheckpointBuilder implements ICheckpointBlockBuilder { ...this.capLimitsByCheckpointBudgets(opts), }; + // Create a block-level checkpoint on the contracts DB so we can roll back on failure + this.contractsDB.createCheckpoint(); // We execute all merkle tree operations on a world state fork checkpoint // This enables us to discard all modifications in the event that we fail to successfully process sufficient transactions const forkCheckpoint = await ForkCheckpoint.new(this.fork); @@ -112,6 +118,7 @@ export class CheckpointBuilder implements ICheckpointBlockBuilder { const [publicProcessorDuration, [processedTxs, failedTxs, usedTxs]] = await elapsed(() => processor.process(pendingTxs, cappedOpts, validator), ); + // Throw before updating state if we don't have enough valid txs const minValidTxs = opts.minValidTxs ?? 0; if (processedTxs.length < minValidTxs) { @@ -126,6 +133,8 @@ export class CheckpointBuilder implements ICheckpointBlockBuilder { expectedEndState: opts.expectedEndState, }); + this.contractsDB.commitCheckpoint(); + this.log.debug('Built block within checkpoint', { header: block.header.toInspect(), processedTxs: processedTxs.map(tx => tx.hash.toString()), @@ -140,6 +149,8 @@ export class CheckpointBuilder implements ICheckpointBlockBuilder { usedTxs, }; } catch (err) { + // Revert all changes to contracts db + this.contractsDB.revertCheckpoint(); // If we reached the point of committing the checkpoint, this does nothing // Otherwise it reverts any changes made to the fork for this failed block await forkCheckpoint.revert(); @@ -233,7 +244,7 @@ export class CheckpointBuilder implements ICheckpointBlockBuilder { ...(await getDefaultAllowedSetupFunctions()), ...(this.config.txPublicSetupAllowListExtend ?? []), ]; - const contractsDB = new PublicContractsDB(this.contractDataSource, this.log.getBindings()); + const contractsDB = this.contractsDB; const guardedFork = new GuardedMerkleTreeOperations(fork); const collectDebugLogs = this.debugLogStore.isEnabled; From 946ddb14a43651a65d6095645fabc1e6556ad0cc Mon Sep 17 00:00:00 2001 From: PhilWindle <60546371+PhilWindle@users.noreply.github.com> Date: Wed, 18 Mar 2026 02:06:20 +0000 Subject: [PATCH 29/41] fix: only delete logs from rolled-back blocks, not entire tag (A-686) (#21687) During reorg, deleteLogs was deleting the entire log entry for a tag instead of only removing logs from the rolled-back blocks. This caused logs from earlier blocks to be lost. Fixes https://linear.app/aztec-labs/issue/A-686 --------- Co-authored-by: Santiago Palladino --- .../src/store/kv_archiver_store.test.ts | 147 ++++++++++++++++-- yarn-project/archiver/src/store/log_store.ts | 53 +++++-- .../stdlib/src/logs/tx_scoped_l2_log.test.ts | 17 ++ .../stdlib/src/logs/tx_scoped_l2_log.ts | 16 ++ 4 files changed, 212 insertions(+), 21 deletions(-) create mode 100644 yarn-project/stdlib/src/logs/tx_scoped_l2_log.test.ts diff --git a/yarn-project/archiver/src/store/kv_archiver_store.test.ts b/yarn-project/archiver/src/store/kv_archiver_store.test.ts index 4e70ce10aa1c..92ae927f0ef3 100644 --- a/yarn-project/archiver/src/store/kv_archiver_store.test.ts +++ b/yarn-project/archiver/src/store/kv_archiver_store.test.ts @@ -1787,19 +1787,146 @@ describe('KVArchiverDataStore', () => { }); }); - it('deleteLogs', async () => { - const block = publishedCheckpoints[0].checkpoint.blocks[0]; - await store.addProposedBlock(block); - await expect(store.addLogs([block])).resolves.toEqual(true); + describe('deleteLogs', () => { + it('deletes public logs for a block', async () => { + const block = publishedCheckpoints[0].checkpoint.blocks[0]; + await store.addProposedBlock(block); + await expect(store.addLogs([block])).resolves.toEqual(true); + + expect((await store.getPublicLogs({ fromBlock: BlockNumber(1) })).logs.length).toEqual( + block.body.txEffects.map(txEffect => txEffect.publicLogs).flat().length, + ); + + await store.deleteLogs([block]); - expect((await store.getPublicLogs({ fromBlock: BlockNumber(1) })).logs.length).toEqual( - block.body.txEffects.map(txEffect => txEffect.publicLogs).flat().length, - ); + expect((await store.getPublicLogs({ fromBlock: BlockNumber(1) })).logs.length).toEqual(0); + }); - // This one is a pain for memory as we would never want to just delete memory in the middle. - await store.deleteLogs([block]); + it('deletes contract class logs for a block', async () => { + // Create a block that explicitly has contract class logs + const block = await L2Block.random(BlockNumber(1), { + txsPerBlock: 2, + txOptions: { numContractClassLogs: 1 }, + state: makeStateForBlock(1, 2), + }); + await store.addProposedBlock(block); + await store.addLogs([block]); + + const logsBefore = await store.getContractClassLogs({ fromBlock: BlockNumber(1) }); + expect(logsBefore.logs.length).toBeGreaterThan(0); + + await store.deleteLogs([block]); + + const logsAfter = await store.getContractClassLogs({ fromBlock: BlockNumber(1) }); + expect(logsAfter.logs.length).toEqual(0); + }); + + it('retains private logs from non-reorged block when same tag appears in reorged block', async () => { + const sharedTag = makePrivateLogTag(1, 0, 0); + + // Block 1 with a private log using sharedTag + const cp1 = await makeCheckpointWithLogs(1, { + numTxsPerBlock: 1, + privateLogs: { numLogsPerTx: 1 }, + }); + const block1 = cp1.checkpoint.blocks[0]; + + // Block 2 with a private log using the SAME tag + const cp2 = await makeCheckpointWithLogs(2, { + previousArchive: block1.archive, + numTxsPerBlock: 1, + privateLogs: { numLogsPerTx: 1 }, + }); + const block2 = cp2.checkpoint.blocks[0]; + // Override block2's private log tag to match block1's + block2.body.txEffects[0].privateLogs[0] = makePrivateLog(sharedTag); + + await addProposedBlocks(store, [block1, block2], { force: true }); + await store.addLogs([block1, block2]); + + // Both blocks' logs should be present + const logsBefore = await store.getPrivateLogsByTags([sharedTag]); + expect(logsBefore[0]).toHaveLength(2); + + // Reorg: delete block 2 + await store.deleteLogs([block2]); + + // Block 1's log should still be present + const logsAfter = await store.getPrivateLogsByTags([sharedTag]); + expect(logsAfter[0]).toHaveLength(1); + expect(logsAfter[0][0].blockNumber).toEqual(1); + }); - expect((await store.getPublicLogs({ fromBlock: BlockNumber(1) })).logs.length).toEqual(0); + it('retains public logs from non-reorged block when same tag appears in reorged block', async () => { + const contractAddress = AztecAddress.fromNumber(543254); + const sharedTag = makePublicLogTag(1, 0, 0); + + // Block 1 with a public log using sharedTag + const cp1 = await makeCheckpointWithLogs(1, { + numTxsPerBlock: 1, + publicLogs: { numLogsPerTx: 1, contractAddress }, + }); + const block1 = cp1.checkpoint.blocks[0]; + + // Block 2 with a public log using the SAME tag from the same contract + const cp2 = await makeCheckpointWithLogs(2, { + previousArchive: block1.archive, + numTxsPerBlock: 1, + publicLogs: { numLogsPerTx: 1, contractAddress }, + }); + const block2 = cp2.checkpoint.blocks[0]; + // Override block2's public log tag to match block1's + block2.body.txEffects[0].publicLogs[0] = makePublicLog(sharedTag, contractAddress); + + await addProposedBlocks(store, [block1, block2], { force: true }); + await store.addLogs([block1, block2]); + + // Both blocks' logs should be present + const logsBefore = await store.getPublicLogsByTagsFromContract(contractAddress, [sharedTag]); + expect(logsBefore[0]).toHaveLength(2); + + // Reorg: delete block 2 + await store.deleteLogs([block2]); + + // Block 1's log should still be present + const logsAfter = await store.getPublicLogsByTagsFromContract(contractAddress, [sharedTag]); + expect(logsAfter[0]).toHaveLength(1); + expect(logsAfter[0][0].blockNumber).toEqual(1); + }); + + it('deletes multiple blocks at once', async () => { + const cp1 = await makeCheckpointWithLogs(1, { + numTxsPerBlock: 2, + privateLogs: { numLogsPerTx: 1 }, + publicLogs: { numLogsPerTx: 1 }, + }); + const block1 = cp1.checkpoint.blocks[0]; + + const cp2 = await makeCheckpointWithLogs(2, { + previousArchive: block1.archive, + numTxsPerBlock: 2, + privateLogs: { numLogsPerTx: 1 }, + publicLogs: { numLogsPerTx: 1 }, + }); + const block2 = cp2.checkpoint.blocks[0]; + + await addProposedBlocks(store, [block1, block2], { force: true }); + await store.addLogs([block1, block2]); + + // Verify logs exist + expect((await store.getPublicLogs({ fromBlock: BlockNumber(1) })).logs.length).toBeGreaterThan(0); + + // Delete both blocks at once + await store.deleteLogs([block1, block2]); + + expect((await store.getPublicLogs({ fromBlock: BlockNumber(1) })).logs.length).toEqual(0); + }); + + it('is a no-op when deleting blocks with no logs', async () => { + const block = publishedCheckpoints[0].checkpoint.blocks[0]; + // Don't add logs, just try to delete + await expect(store.deleteLogs([block])).resolves.toEqual(true); + }); }); describe('getTxEffect', () => { diff --git a/yarn-project/archiver/src/store/log_store.ts b/yarn-project/archiver/src/store/log_store.ts index 5ef8656acc4d..271160cac351 100644 --- a/yarn-project/archiver/src/store/log_store.ts +++ b/yarn-project/archiver/src/store/log_store.ts @@ -1,6 +1,6 @@ import { INITIAL_L2_BLOCK_NUM } from '@aztec/constants'; import { BlockNumber } from '@aztec/foundation/branded-types'; -import { filterAsync } from '@aztec/foundation/collection'; +import { compactArray, filterAsync } from '@aztec/foundation/collection'; import { Fr } from '@aztec/foundation/curves/bn254'; import { createLogger } from '@aztec/foundation/log'; import { BufferReader, numToUInt32BE } from '@aztec/foundation/serialize'; @@ -313,18 +313,49 @@ export class LogStore { deleteLogs(blocks: L2Block[]): Promise { return this.db.transactionAsync(async () => { - await Promise.all( - blocks.map(async block => { - // Delete private logs - const privateKeys = (await this.#privateLogKeysByBlock.getAsync(block.number)) ?? []; - await Promise.all(privateKeys.map(tag => this.#privateLogsByTag.delete(tag))); - - // Delete public logs - const publicKeys = (await this.#publicLogKeysByBlock.getAsync(block.number)) ?? []; - await Promise.all(publicKeys.map(key => this.#publicLogsByContractAndTag.delete(key))); - }), + const blockNumbers = new Set(blocks.map(block => block.number)); + const firstBlockToDelete = Math.min(...blockNumbers); + + // Collect all unique private tags across all blocks being deleted + const allPrivateTags = new Set( + compactArray(await Promise.all(blocks.map(block => this.#privateLogKeysByBlock.getAsync(block.number)))).flat(), + ); + + // Trim private logs: for each tag, delete all instances including and after the first block being deleted. + // This hinges on the invariant that logs for a given tag are always inserted in order of block number, which is enforced in #addPrivateLogs. + for (const tag of allPrivateTags) { + const existing = await this.#privateLogsByTag.getAsync(tag); + if (existing === undefined || existing.length === 0) { + continue; + } + const lastIndexToKeep = existing.findLastIndex( + buf => TxScopedL2Log.getBlockNumberFromBuffer(buf) < firstBlockToDelete, + ); + const remaining = existing.slice(0, lastIndexToKeep + 1); + await (remaining.length > 0 ? this.#privateLogsByTag.set(tag, remaining) : this.#privateLogsByTag.delete(tag)); + } + + // Collect all unique public keys across all blocks being deleted + const allPublicKeys = new Set( + compactArray(await Promise.all(blocks.map(block => this.#publicLogKeysByBlock.getAsync(block.number)))).flat(), ); + // And do the same as we did with private logs + for (const key of allPublicKeys) { + const existing = await this.#publicLogsByContractAndTag.getAsync(key); + if (existing === undefined || existing.length === 0) { + continue; + } + const lastIndexToKeep = existing.findLastIndex( + buf => TxScopedL2Log.getBlockNumberFromBuffer(buf) < firstBlockToDelete, + ); + const remaining = existing.slice(0, lastIndexToKeep + 1); + await (remaining.length > 0 + ? this.#publicLogsByContractAndTag.set(key, remaining) + : this.#publicLogsByContractAndTag.delete(key)); + } + + // After trimming the tagged logs, we can delete the block-level keys that track which tags are in which blocks. await Promise.all( blocks.map(block => Promise.all([ diff --git a/yarn-project/stdlib/src/logs/tx_scoped_l2_log.test.ts b/yarn-project/stdlib/src/logs/tx_scoped_l2_log.test.ts new file mode 100644 index 000000000000..d7b42331e3cf --- /dev/null +++ b/yarn-project/stdlib/src/logs/tx_scoped_l2_log.test.ts @@ -0,0 +1,17 @@ +import { TxScopedL2Log } from './tx_scoped_l2_log.js'; + +describe('TxScopedL2Log', () => { + it('should serialize and deserialize correctly', () => { + const log = TxScopedL2Log.random(); + const buffer = log.toBuffer(); + const deserializedLog = TxScopedL2Log.fromBuffer(buffer); + expect(deserializedLog.equals(log)).toBe(true); + }); + + it('should extract block number from buffer correctly', () => { + const log = TxScopedL2Log.random(); + const buffer = log.toBuffer(); + const blockNumber = TxScopedL2Log.getBlockNumberFromBuffer(buffer); + expect(blockNumber).toBe(log.blockNumber); + }); +}); diff --git a/yarn-project/stdlib/src/logs/tx_scoped_l2_log.ts b/yarn-project/stdlib/src/logs/tx_scoped_l2_log.ts index 31481f4ab49d..7e9264ac94de 100644 --- a/yarn-project/stdlib/src/logs/tx_scoped_l2_log.ts +++ b/yarn-project/stdlib/src/logs/tx_scoped_l2_log.ts @@ -1,4 +1,5 @@ import { BlockNumber, BlockNumberSchema } from '@aztec/foundation/branded-types'; +import { times } from '@aztec/foundation/collection'; import { Fr } from '@aztec/foundation/curves/bn254'; import { schemas as foundationSchemas } from '@aztec/foundation/schemas'; import { @@ -83,6 +84,21 @@ export class TxScopedL2Log { return new TxScopedL2Log(txHash, blockNumber, blockTimestamp, logData, noteHashes, firstNullifier); } + static getBlockNumberFromBuffer(buffer: Buffer) { + return BlockNumber(buffer.readUint32BE(Fr.SIZE_IN_BYTES)); + } + + static random() { + return new TxScopedL2Log( + TxHash.fromField(Fr.random()), + BlockNumber(Math.floor(Math.random() * 100000) + 1), + BigInt(Math.floor(Date.now() / 1000)), + times(3, Fr.random), + times(3, Fr.random), + Fr.random(), + ); + } + equals(other: TxScopedL2Log) { return ( this.txHash.equals(other.txHash) && From 2e79200365ceb382a070eed1d96d4dc4bd9bedaf Mon Sep 17 00:00:00 2001 From: Santiago Palladino Date: Wed, 18 Mar 2026 09:04:33 -0300 Subject: [PATCH 30/41] fix(sequencer): fix checkpoint budget redistribution for multi-block slots (#21692) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Motivation Three bugs in how per-block gas/tx limits are computed and enforced during checkpoint building made the redistribution logic ineffective in multi-block-per-slot mode: 1. Config `maxBlocksPerCheckpoint` was not propagated to the checkpoint builder, so `remainingBlocks` always defaulted to 1 — making redistribution a no-op. 2. The static per-block limit computed in the sequencer-client at startup always equaled the first-block fair share, so redistribution could only tighten, never relax — later blocks couldn't use surplus budget from light early blocks. 3. Redistribution ran during validator re-execution with the proposer's multiplier logic, causing potential false rejections. ## Approach Delete the sequencer's `computeBlockLimits` — the checkpoint builder now derives per-block limits dynamically from checkpoint-level budgets. Move `maxBlocksPerCheckpoint` and `perBlockAllocationMultiplier` out of config into `BlockBuilderOptions` (passed from the sequencer's timetable at build time). Split behavior on `isBuildingProposal`: proposers get redistribution with multiplier; validators only cap by per-block limit + remaining checkpoint budget (no fair-share). Introduce `BlockBuilderOptions` as a discriminated union type: when `isBuildingProposal: true`, redistribution params (`maxBlocksPerCheckpoint`, `perBlockAllocationMultiplier`) are required; when `false`, they're absent. This makes it a compile-time error to forget redistribution params during proposal building or to accidentally include them during validation. ## Changes - **stdlib**: Split `PublicProcessorLimits` (processor-only fields) from `BlockBuilderOptions` (discriminated union with proposer/validator branches). Remove `maxBlocksPerCheckpoint` from `SequencerConfig`. Make `perBlockAllocationMultiplier` required on `ResolvedSequencerConfig`. - **sequencer-client**: Delete `computeBlockLimits`. Simplify `SequencerClient.new` to cap operator overrides at checkpoint limits. Pass `maxBlocksPerCheckpoint` and `perBlockAllocationMultiplier` via opts in `CheckpointProposalJob`. - **validator-client**: Rewrite `capLimitsByCheckpointBudgets` — first cap by remaining budget (always), then further cap by fair share only when proposing. Validator re-execution no longer applies redistribution. - **slasher**: Update `epoch_prune_watcher` buildBlock call to use new opts shape. - **validator-client (tests)**: Update tests to pass redistribution params via opts. Remove redundant tests. Add `validatorOpts`/`proposerOpts` helpers. - **end-to-end**: Add e2e test verifying redistribution allows late txs to fit in the last block, and a second test verifying validators accept blocks built with a larger proposer multiplier. - **validator-client (README)**: Update block building limits documentation. Co-authored-by: Claude Opus 4.6 (1M context) --- .../epochs_mbps_redistribution.test.ts | 372 ++++++++++++++++++ .../src/client/sequencer-client.test.ts | 116 ------ .../src/client/sequencer-client.ts | 113 ++---- yarn-project/sequencer-client/src/config.ts | 3 - .../checkpoint_proposal_job.timing.test.ts | 4 +- .../src/sequencer/checkpoint_proposal_job.ts | 13 +- .../src/test/mock_checkpoint_builder.ts | 6 +- .../src/watchers/epoch_prune_watcher.test.ts | 4 +- .../src/watchers/epoch_prune_watcher.ts | 5 +- .../stdlib/src/checkpoint/validate.ts | 4 +- .../stdlib/src/interfaces/block-builder.ts | 36 +- yarn-project/stdlib/src/interfaces/configs.ts | 7 +- yarn-project/validator-client/README.md | 20 +- .../src/block_proposal_handler.ts | 2 + .../src/checkpoint_builder.test.ts | 322 ++++++++++----- .../src/checkpoint_builder.ts | 58 ++- .../src/validator.integration.test.ts | 7 +- 17 files changed, 710 insertions(+), 382 deletions(-) create mode 100644 yarn-project/end-to-end/src/e2e_epochs/epochs_mbps_redistribution.test.ts delete mode 100644 yarn-project/sequencer-client/src/client/sequencer-client.test.ts diff --git a/yarn-project/end-to-end/src/e2e_epochs/epochs_mbps_redistribution.test.ts b/yarn-project/end-to-end/src/e2e_epochs/epochs_mbps_redistribution.test.ts new file mode 100644 index 000000000000..6a8d665eaa7c --- /dev/null +++ b/yarn-project/end-to-end/src/e2e_epochs/epochs_mbps_redistribution.test.ts @@ -0,0 +1,372 @@ +import type { Archiver } from '@aztec/archiver'; +import type { AztecNodeConfig, AztecNodeService } from '@aztec/aztec-node'; +import { AztecAddress, EthAddress } from '@aztec/aztec.js/addresses'; +import { NO_WAIT } from '@aztec/aztec.js/contracts'; +import { Fr } from '@aztec/aztec.js/fields'; +import type { Logger } from '@aztec/aztec.js/log'; +import { waitForTx } from '@aztec/aztec.js/node'; +import { RollupContract } from '@aztec/ethereum/contracts'; +import type { Operator } from '@aztec/ethereum/deploy-aztec-l1-contracts'; +import { waitUntilL1Timestamp } from '@aztec/ethereum/l1-tx-utils'; +import { asyncMap } from '@aztec/foundation/async-map'; +import { CheckpointNumber, SlotNumber } from '@aztec/foundation/branded-types'; +import { times, timesAsync } from '@aztec/foundation/collection'; +import { SecretValue } from '@aztec/foundation/config'; +import { retryUntil } from '@aztec/foundation/retry'; +import { sleep } from '@aztec/foundation/sleep'; +import { executeTimeout } from '@aztec/foundation/timer'; +import { TestContract } from '@aztec/noir-test-contracts.js/Test'; +import { getTimestampForSlot } from '@aztec/stdlib/epoch-helpers'; + +import { jest } from '@jest/globals'; +import { privateKeyToAccount } from 'viem/accounts'; + +import { type EndToEndContext, getPrivateKeyFromIndex } from '../fixtures/utils.js'; +import { TestWallet } from '../test-wallet/test_wallet.js'; +import { proveInteraction } from '../test-wallet/utils.js'; +import { EpochsTestContext } from './epochs_test.js'; + +jest.setTimeout(1000 * 60 * 20); + +const NODE_COUNT = 4; + +/** + * Number of txs to feed one-by-one during early sub-slots. + * These are sent at the start of each sub-slot so each block picks up exactly one. + */ +const EARLY_TX_COUNT = 2; + +/** + * Number of txs to dump into the mempool right before the last sub-slot. + * With redistribution working, the last block should have enough budget to include all of them. + * Without redistribution, the per-block gas cap starves the last block. + */ +const LATE_TX_COUNT = 4; + +/** Total txs pre-proved before the test begins. */ +const TOTAL_TX_COUNT = EARLY_TX_COUNT + LATE_TX_COUNT; + +/** + * Verifies that checkpoint budget redistribution allows late transactions to fit in the last block + * when earlier blocks in the checkpoint were light. + * + * The test configures a tight per-checkpoint tx limit across multiple blocks per checkpoint. Early + * blocks each receive a single tx, leaving most of the budget unconsumed. All remaining txs are then + * submitted just before the last sub-slot. With redistribution working, the last block inherits the + * unused budget from earlier blocks and can include all late txs. Without redistribution, each block + * is capped at the static per-block limit and the late txs are left out. + * + * Success is verified by confirming that all late txs land in the same block. + */ +describe('e2e_epochs/epochs_mbps_redistribution', () => { + let context: EndToEndContext; + let logger: Logger; + let rollup: RollupContract; + let archiver: Archiver; + + let test: EpochsTestContext; + let validators: (Operator & { privateKey: `0x${string}` })[]; + let nodes: AztecNodeService[]; + let contract: TestContract; + let wallet: TestWallet; + let from: AztecAddress; + + /** + * Sets up validators and the test context with MBPS + redistribution config. + * Uses a tight `maxTxsPerCheckpoint` so that the redistribution logic is exercised. + */ + async function setupTest( + nodeConfigOverride?: (index: number) => Partial, + contextConfigOverride?: Record, + ) { + validators = times(NODE_COUNT, i => { + const privateKey = `0x${getPrivateKeyFromIndex(i + 3)!.toString('hex')}` as `0x${string}`; + const attester = EthAddress.fromString(privateKeyToAccount(privateKey).address); + return { attester, withdrawer: attester, privateKey, bn254SecretKey: new SecretValue(Fr.random().toBigInt()) }; + }); + + // Timing calculation for 3 blocks per checkpoint with 8s sub-slots: + // - initializationOffset = 0.5s (test mode, ethereumSlotDuration < 8) + // - 3 blocks x 8s = 24s + // - checkpointFinalization = 0.5s (assemble) + 0 (p2p in test) + 2s (L1 publish) = 2.5s + // - finalBlockDuration = 8s (re-execution) + // - Total: 0.5 + 24 + 8 + 2.5 = 35s => use 36s + test = await EpochsTestContext.setup({ + numberOfAccounts: 1, + initialValidators: validators, + mockGossipSubNetwork: true, + disableAnvilTestWatcher: true, + startProverNode: true, + aztecEpochDuration: 4, + enforceTimeTable: true, + ethereumSlotDuration: 4, + aztecSlotDuration: 36, + blockDurationMs: 8000, + l1PublishingTime: 2, + attestationPropagationTime: 0.5, + aztecTargetCommitteeSize: 3, + // Allow empty blocks so that early sub-slots without txs still produce blocks. + minTxsPerBlock: 0, + // Tight checkpoint-level tx limit: forces redistribution to matter. + // With 3 blocks and multiplier 1.2: maxTxsPerBlock = ceil(TOTAL_TX_COUNT/3*1.2). + // The redistribution should cap early blocks, preserving budget for the last block. + maxTxsPerCheckpoint: TOTAL_TX_COUNT, + // PXE syncs on checkpointed chain tip. + pxeOpts: { syncChainTip: 'checkpointed' }, + ...contextConfigOverride, + }); + + ({ context, logger, rollup } = test); + wallet = context.wallet; + from = context.accounts[0]; + + // Halt the default sequencer (it's not a validator). + logger.warn(`Stopping sequencer in initial aztec node.`); + await context.sequencer!.stop(); + + // Start validator nodes (sequencers not started yet). + logger.warn(`Starting ${NODE_COUNT} validator nodes.`); + nodes = await asyncMap(validators, ({ privateKey }, i) => + test.createValidatorNode([privateKey], { dontStartSequencer: true, ...nodeConfigOverride?.(i) }), + ); + logger.warn(`Started ${NODE_COUNT} validator nodes.`, { validators: validators.map(v => v.attester.toString()) }); + + // Point the wallet at a validator node. + wallet.updateNode(nodes[0]); + archiver = nodes[0].getBlockSource() as Archiver; + + // Register the test contract. + contract = await test.registerTestContract(wallet); + logger.warn(`Test setup completed.`); + } + + afterEach(async () => { + jest.restoreAllMocks(); + await test?.teardown(); + }); + + it('redistributes checkpoint budget so late txs fit in the last block', async () => { + await setupTest(); + + // Pre-prove all transactions up front. + logger.warn(`Pre-proving ${TOTAL_TX_COUNT} transactions`); + const provenTxs = await timesAsync(TOTAL_TX_COUNT, i => + proveInteraction(wallet, contract.methods.emit_nullifier(new Fr(i + 1)), { from }), + ); + logger.warn(`Pre-proved ${provenTxs.length} transactions`); + + // Warp to just before the next L2 slot so sequencers start building promptly. + const currentSlot = await rollup.getSlotNumber(); + const nextSlot = SlotNumber(currentSlot + 1); + const slotStartTimestamp = getTimestampForSlot(nextSlot, test.constants); + // Warp to one L1 slot before the L2 slot starts (= the sequencer's build start). + const warpTo = slotStartTimestamp - BigInt(test.L1_BLOCK_TIME_IN_S); + logger.warn(`Warping to L1 timestamp ${warpTo} (one L1 slot before L2 slot ${nextSlot})`); + await waitUntilL1Timestamp(test.l1Client, warpTo, undefined, 60); + + // Start sequencers. + await Promise.all(nodes.map(n => n.getSequencer()!.start())); + logger.warn(`Started all sequencers`); + + // Feed one tx per sub-slot for the early blocks, waiting for each to be proposed before sending the next. + const earlyTxHashes = []; + for (let i = 0; i < EARLY_TX_COUNT; i++) { + logger.warn(`Sending early transaction ${i + 1}/${EARLY_TX_COUNT}`); + const txHash = await provenTxs[i].send({ wait: NO_WAIT }); + earlyTxHashes.push(txHash); + // Wait until the tx is proposed (mined) before sending the next one. + await retryUntil( + async () => (await Promise.all(nodes.map(n => n.getTxReceipt(txHash)))).some(receipt => receipt.isMined()), + 'tx proposed', + 30, + 0.5, + ); + logger.warn(`Early transaction ${i + 1}/${EARLY_TX_COUNT} confirmed proposed`); + } + logger.warn(`Sent ${earlyTxHashes.length} early transactions`); + + // Right before the last sub-slot, dump all remaining txs. + // With redistribution working, the last block's budget should be generous + // enough (early blocks consumed little), and all late txs should fit. + logger.warn(`Sending ${LATE_TX_COUNT} late transactions before the last sub-slot`); + const lateTxHashes = await Promise.all(provenTxs.slice(EARLY_TX_COUNT).map(tx => tx.send({ wait: NO_WAIT }))); + logger.warn(`Sent ${lateTxHashes.length} late transactions`); + + // Wait for ALL txs to be mined. + const allTxHashes = [...earlyTxHashes, ...lateTxHashes]; + const timeout = test.L2_SLOT_DURATION_IN_S * 5; + logger.warn(`Waiting for all ${allTxHashes.length} transactions to be mined (timeout=${timeout}s)`); + await executeTimeout( + () => Promise.all(allTxHashes.map(txHash => waitForTx(nodes[0], txHash, { timeout }))), + timeout * 1000, + ); + logger.warn(`All transactions have been mined`); + + // Verify that all late txs landed in the same block. + // This confirms the last block received the redistributed budget and could fit them all. + const lateReceipts = await Promise.all(lateTxHashes.map(h => nodes[0].getTxReceipt(h))); + const lateBlockNumbers = lateReceipts.map(r => r.blockNumber); + logger.warn(`Late tx block numbers: ${lateBlockNumbers.join(', ')}`); + expect(new Set(lateBlockNumbers).size).toBe(1); + }); + + /** + * Verifies that validators do NOT apply the proposer's fair-share multiplier when re-executing blocks. + * + * Two of the four validator nodes are configured with a very large `perBlockAllocationMultiplier` (10), + * allowing their proposer to pack multiple txs into a single block. The other two keep the default + * multiplier (1.2), which limits them to 1 tx per block given the tight `maxTxsPerCheckpoint`. + * + * With `maxTxsPerCheckpoint = 2` and 3 blocks per checkpoint: + * - Normal multiplier (1.2): per-block limit = ceil(2/3 * 1.2) = ceil(0.8) = 1 tx + * - High multiplier (10): per-block limit = ceil(2/3 * 10) = ceil(6.67) = 7 txs (capped by remaining = 2) + * + * The test watches checkpoints and identifies the proposer for each slot via EpochCache. + * It waits until it observes both: + * - A checkpoint by a high-multiplier proposer with at least one block having >1 tx + * - A checkpoint by a normal-multiplier proposer with all blocks having at most 1 tx + * + * If validators incorrectly applied their own multiplier during re-execution, checkpoints built by + * high-multiplier proposers would fail attestation and the chain would stall. + */ + it('validators accept blocks built with a larger proposer multiplier (no fair-share re-execution)', async () => { + const HIGH_MULTIPLIER = 10; + const MAX_TXS_PER_CHECKPOINT = 2; + + // Nodes 0 and 1 get a very large multiplier; nodes 2 and 3 keep the default (1.2). + await setupTest(i => (i < 2 ? { perBlockAllocationMultiplier: HIGH_MULTIPLIER } : {}), { + maxTxsPerCheckpoint: MAX_TXS_PER_CHECKPOINT, + }); + logger.warn( + `Set perBlockAllocationMultiplier=${HIGH_MULTIPLIER} on nodes 0,1; maxTxsPerCheckpoint=${MAX_TXS_PER_CHECKPOINT}`, + ); + + // Pre-prove an initial batch of transactions. + const INITIAL_TX_COUNT = 4; + let nullifierCounter = 200; + logger.warn(`Pre-proving ${INITIAL_TX_COUNT} initial transactions`); + const initialProvenTxs = await timesAsync(INITIAL_TX_COUNT, () => + proveInteraction(wallet, contract.methods.emit_nullifier(new Fr(nullifierCounter++)), { from }), + ); + logger.warn(`Pre-proved ${initialProvenTxs.length} transactions`); + + // Warp to just before the next L2 slot so sequencers start building promptly. + const currentSlot = await rollup.getSlotNumber(); + const nextSlot = SlotNumber(currentSlot + 1); + const slotStartTimestamp = getTimestampForSlot(nextSlot, test.constants); + const warpTo = slotStartTimestamp - BigInt(test.L1_BLOCK_TIME_IN_S); + logger.warn(`Warping to L1 timestamp ${warpTo} (one L1 slot before L2 slot ${nextSlot})`); + await waitUntilL1Timestamp(test.l1Client, warpTo, undefined, 60); + + // Start sequencers and send the initial batch. + await Promise.all(nodes.map(n => n.getSequencer()!.start())); + logger.warn(`Started all sequencers`); + + logger.warn(`Sending ${initialProvenTxs.length} initial transactions`); + await Promise.all(initialProvenTxs.map(tx => tx.send({ wait: NO_WAIT }))); + logger.warn(`Sent initial transactions`); + + // Background loop: keep the mempool topped up so proposers always have txs to include. + let done = false; + const keepMempoolFull = async () => { + while (!done) { + try { + const pendingCount = await nodes[0].getPendingTxCount(); + if (pendingCount < 3) { + const tx = await proveInteraction(wallet, contract.methods.emit_nullifier(new Fr(nullifierCounter++)), { + from, + }); + await tx.send({ wait: NO_WAIT }); + logger.verbose(`Topped up mempool (was ${pendingCount}, nullifier=${nullifierCounter - 1})`); + } + } catch (err) { + logger.verbose(`Mempool top-up error (will retry): ${err}`); + } + await sleep(1000); + } + }; + void keepMempoolFull(); + + // Build a lookup from attester address to validator index for proposer identification. + const attesterToIndex = new Map(); + for (let i = 0; i < validators.length; i++) { + attesterToIndex.set(validators[i].attester.toString().toLowerCase(), i); + } + + // Watch checkpoints and identify the proposer via EpochCache (L1 committee selection). + let seenHighMultiplier = false; + let seenNormalMultiplier = false; + let lastSeenCheckpoint = CheckpointNumber(0); + + const timeoutSeconds = test.L2_SLOT_DURATION_IN_S * 10; + logger.warn(`Watching checkpoints for up to ${timeoutSeconds}s until both proposer types are observed`); + + try { + await retryUntil( + async () => { + const checkpoints = await archiver.getCheckpoints(CheckpointNumber(1), 50); + for (const pc of checkpoints) { + if (pc.checkpoint.number <= lastSeenCheckpoint) { + continue; + } + lastSeenCheckpoint = pc.checkpoint.number; + + const blockTxCounts = pc.checkpoint.blocks.map(b => b.body.txEffects.length); + const totalTxs = blockTxCounts.reduce((a, b) => a + b, 0); + + // Skip empty checkpoints (no txs to analyze). + if (totalTxs === 0) { + logger.warn(`Checkpoint ${pc.checkpoint.number}: empty, skipping`); + continue; + } + + // Identify the proposer for this checkpoint's slot via EpochCache. + const slot = pc.checkpoint.header.slotNumber; + const proposer = await test.epochCache.getProposerAttesterAddressInSlot(slot); + if (!proposer) { + logger.warn(`Checkpoint ${pc.checkpoint.number}: could not determine proposer for slot ${slot}`); + continue; + } + const proposerIndex = attesterToIndex.get(proposer.toString().toLowerCase()); + const isHighMultiplier = proposerIndex !== undefined && proposerIndex < 2; + + logger.warn( + `Checkpoint ${pc.checkpoint.number} slot ${slot}: proposer=${proposer} (index=${proposerIndex}, ` + + `${isHighMultiplier ? 'HIGH' : 'NORMAL'} multiplier), blockTxCounts=[${blockTxCounts.join(',')}]`, + ); + + if (isHighMultiplier) { + // High-multiplier proposer: at least one block should have >1 tx. + const hasMultiTxBlock = blockTxCounts.some(count => count > 1); + if (hasMultiTxBlock) { + seenHighMultiplier = true; + logger.warn(`Observed high-multiplier checkpoint with multi-tx block`); + } + } else if (proposerIndex !== undefined) { + // Normal-multiplier proposer: each block should have at most 1 tx. + for (const count of blockTxCounts) { + expect(count).toBeLessThanOrEqual(1); + } + seenNormalMultiplier = true; + logger.warn(`Observed normal-multiplier checkpoint with per-block tx counts <= 1`); + } + } + + return seenHighMultiplier && seenNormalMultiplier ? true : undefined; + }, + 'both proposer types observed', + timeoutSeconds, + 1, + ); + } finally { + done = true; + } + + logger.warn( + `Test passed: observed checkpoints from both high-multiplier and normal-multiplier proposers. ` + + `High-multiplier proposers packed >1 tx per block; normal proposers used at most 1 tx per block.`, + ); + expect(seenHighMultiplier).toBe(true); + expect(seenNormalMultiplier).toBe(true); + }); +}); diff --git a/yarn-project/sequencer-client/src/client/sequencer-client.test.ts b/yarn-project/sequencer-client/src/client/sequencer-client.test.ts deleted file mode 100644 index f2eb896e4746..000000000000 --- a/yarn-project/sequencer-client/src/client/sequencer-client.test.ts +++ /dev/null @@ -1,116 +0,0 @@ -import { MAX_PROCESSABLE_DA_GAS_PER_CHECKPOINT } from '@aztec/constants'; -import { createLogger } from '@aztec/foundation/log'; - -import type { SequencerClientConfig } from '../config.js'; -import { computeBlockLimits } from './sequencer-client.js'; - -describe('computeBlockLimits', () => { - const log = createLogger('test'); - - /** Builds a minimal config with only the fields needed by computeBlockLimits. */ - function makeConfig(overrides: Partial = {}): SequencerClientConfig { - return { - ethereumSlotDuration: 12, - aztecSlotDuration: 72, - attestationPropagationTime: 3, - enforceTimeTable: true, - // No blockDurationMs -> single block mode -> maxNumberOfBlocks = 1 - ...overrides, - } as SequencerClientConfig; - } - - describe('L2 gas', () => { - it('derives maxL2BlockGas from rollupManaLimit when not explicitly set', () => { - const rollupManaLimit = 1_000_000; - // Single block mode (maxNumberOfBlocks=1), default multiplier=1.2: - // min(1_000_000, ceil(1_000_000 / 1 * 1.2)) = min(1_000_000, 1_200_000) = 1_000_000 - const result = computeBlockLimits(makeConfig(), rollupManaLimit, 12, log); - expect(result.maxL2BlockGas).toBe(rollupManaLimit); - }); - - it('uses explicit maxL2BlockGas when within rollupManaLimit', () => { - const result = computeBlockLimits(makeConfig({ maxL2BlockGas: 500_000 }), 1_000_000, 12, log); - expect(result.maxL2BlockGas).toBe(500_000); - }); - - it('caps explicit maxL2BlockGas at rollupManaLimit', () => { - const result = computeBlockLimits(makeConfig({ maxL2BlockGas: 2_000_000 }), 1_000_000, 12, log); - expect(result.maxL2BlockGas).toBe(1_000_000); - }); - }); - - describe('DA gas', () => { - const daLimit = MAX_PROCESSABLE_DA_GAS_PER_CHECKPOINT; - - it('derives maxDABlockGas from DA checkpoint limit when not explicitly set', () => { - // Single block mode (maxNumberOfBlocks=1), default multiplier=1.2: - // min(daLimit, ceil(daLimit / 1 * 1.2)) = min(daLimit, daLimit * 1.2) = daLimit - const result = computeBlockLimits(makeConfig(), 1_000_000, 12, log); - expect(result.maxDABlockGas).toBe(daLimit); - }); - - it('uses explicit maxDABlockGas when within DA checkpoint limit', () => { - const explicit = Math.floor(daLimit / 2); - const result = computeBlockLimits(makeConfig({ maxDABlockGas: explicit }), 1_000_000, 12, log); - expect(result.maxDABlockGas).toBe(explicit); - }); - - it('caps explicit maxDABlockGas at DA checkpoint limit', () => { - const result = computeBlockLimits(makeConfig({ maxDABlockGas: daLimit + 100_000 }), 1_000_000, 12, log); - expect(result.maxDABlockGas).toBe(daLimit); - }); - }); - - describe('TX count', () => { - it('uses explicit maxTxsPerBlock when set', () => { - const result = computeBlockLimits(makeConfig({ maxTxsPerBlock: 10 }), 1_000_000, 12, log); - expect(result.maxTxsPerBlock).toBe(10); - }); - - it('caps maxTxsPerBlock at maxTxsPerCheckpoint', () => { - const result = computeBlockLimits( - makeConfig({ maxTxsPerBlock: 50, maxTxsPerCheckpoint: 30 }), - 1_000_000, - 12, - log, - ); - expect(result.maxTxsPerBlock).toBe(30); - }); - - it('derives maxTxsPerBlock from maxTxsPerCheckpoint when per-block not set', () => { - // Multi-block mode with maxNumberOfBlocks=5, multiplier=1.2: - // min(100, ceil(100 / 5 * 1.2)) = min(100, 24) = 24 - const config = makeConfig({ - maxTxsPerCheckpoint: 100, - blockDurationMs: 8000, - }); - const result = computeBlockLimits(config, 1_000_000, 12, log); - expect(result.maxTxsPerBlock).toBe(24); - }); - }); - - describe('multi-block mode', () => { - it('distributes budget across blocks in multi-block mode', () => { - // With blockDurationMs=8000, aztecSlotDuration=72, ethereumSlotDuration=12, - // attestationPropagationTime=3, l1PublishingTime=12: - // checkpointFinalizationTime = 1 + 3*2 + 12 = 19 - // timeReservedAtEnd = 8 + 19 = 27 - // timeAvailableForBlocks = 72 - 1 - 27 = 44 - // maxNumberOfBlocks = floor(44 / 8) = 5 - // With multiplier=1.2 and rollupManaLimit=1_000_000: - // maxL2BlockGas = min(1_000_000, ceil(1_000_000 / 5 * 1.2)) = min(1_000_000, 240_000) = 240_000 - const config = makeConfig({ blockDurationMs: 8000 }); - const result = computeBlockLimits(config, 1_000_000, 12, log); - expect(result.maxL2BlockGas).toBe(240_000); - - const daLimit = MAX_PROCESSABLE_DA_GAS_PER_CHECKPOINT; - expect(result.maxDABlockGas).toBe(Math.min(daLimit, Math.ceil((daLimit / 5) * 1.2))); - }); - - it('returns maxBlocksPerCheckpoint from timetable', () => { - const config = makeConfig({ blockDurationMs: 8000 }); - const result = computeBlockLimits(config, 1_000_000, 12, log); - expect(result.maxBlocksPerCheckpoint).toBe(5); - }); - }); -}); diff --git a/yarn-project/sequencer-client/src/client/sequencer-client.ts b/yarn-project/sequencer-client/src/client/sequencer-client.ts index 239c332e57f6..a2d322ff5838 100644 --- a/yarn-project/sequencer-client/src/client/sequencer-client.ts +++ b/yarn-project/sequencer-client/src/client/sequencer-client.ts @@ -19,15 +19,10 @@ import type { L1ToL2MessageSource } from '@aztec/stdlib/messaging'; import { L1Metrics, type TelemetryClient } from '@aztec/telemetry-client'; import { FullNodeCheckpointsBuilder, NodeKeystoreAdapter, type ValidatorClient } from '@aztec/validator-client'; -import { - DefaultSequencerConfig, - type SequencerClientConfig, - getPublisherConfigFromSequencerConfig, -} from '../config.js'; +import { type SequencerClientConfig, getPublisherConfigFromSequencerConfig } from '../config.js'; import { GlobalVariableBuilder } from '../global_variable_builder/index.js'; import { SequencerPublisherFactory } from '../publisher/sequencer-publisher-factory.js'; import { Sequencer, type SequencerConfig } from '../sequencer/index.js'; -import { SequencerTimetable } from '../sequencer/timetable.js'; /** * Encapsulates the full sequencer and publisher. @@ -161,12 +156,7 @@ export class SequencerClient { const l1PublishingTimeBasedOnChain = isAnvilTestChain(config.l1ChainId) ? 1 : ethereumSlotDuration; const l1PublishingTime = config.l1PublishingTime ?? l1PublishingTimeBasedOnChain; - const { maxL2BlockGas, maxDABlockGas, maxTxsPerBlock, maxBlocksPerCheckpoint } = computeBlockLimits( - config, - rollupManaLimit, - l1PublishingTime, - log, - ); + const { maxL2BlockGas, maxDABlockGas, maxTxsPerBlock } = capPerBlockLimits(config, rollupManaLimit, log); const l1Constants = { l1GenesisTime, slotDuration: Number(slotDuration), ethereumSlotDuration, rollupManaLimit }; @@ -184,7 +174,7 @@ export class SequencerClient { deps.dateProvider, epochCache, rollupContract, - { ...config, l1PublishingTime, maxL2BlockGas, maxDABlockGas, maxTxsPerBlock, maxBlocksPerCheckpoint }, + { ...config, l1PublishingTime, maxL2BlockGas, maxDABlockGas, maxTxsPerBlock }, telemetryClient, log, ); @@ -249,88 +239,39 @@ export class SequencerClient { } /** - * Computes per-block L2 gas, DA gas, and TX count budgets based on the L1 rollup limits and the timetable. - * If the user explicitly set a limit, it is capped at the corresponding checkpoint limit. - * Otherwise, derives it as (checkpointLimit / maxBlocks) * multiplier, capped at the checkpoint limit. + * Caps operator-provided per-block limits at checkpoint-level limits. + * Returns undefined for any limit the operator didn't set — the checkpoint builder handles redistribution. */ -export function computeBlockLimits( +function capPerBlockLimits( config: SequencerClientConfig, rollupManaLimit: number, - l1PublishingTime: number, log: ReturnType, -): { maxL2BlockGas: number; maxDABlockGas: number; maxTxsPerBlock: number; maxBlocksPerCheckpoint: number } { - const maxNumberOfBlocks = new SequencerTimetable({ - ethereumSlotDuration: config.ethereumSlotDuration, - aztecSlotDuration: config.aztecSlotDuration, - l1PublishingTime, - p2pPropagationTime: config.attestationPropagationTime, - blockDurationMs: config.blockDurationMs, - enforce: config.enforceTimeTable ?? DefaultSequencerConfig.enforceTimeTable, - }).maxNumberOfBlocks; - - const multiplier = config.perBlockAllocationMultiplier ?? DefaultSequencerConfig.perBlockAllocationMultiplier; - - // Compute maxL2BlockGas - let maxL2BlockGas: number; - if (config.maxL2BlockGas !== undefined) { - if (config.maxL2BlockGas > rollupManaLimit) { - log.warn( - `Provided MAX_L2_BLOCK_GAS ${config.maxL2BlockGas} exceeds L1 rollup mana limit ${rollupManaLimit} (capping)`, - ); - maxL2BlockGas = rollupManaLimit; - } else { - maxL2BlockGas = config.maxL2BlockGas; - } - } else { - maxL2BlockGas = Math.min(rollupManaLimit, Math.ceil((rollupManaLimit / maxNumberOfBlocks) * multiplier)); +): { maxL2BlockGas: number | undefined; maxDABlockGas: number | undefined; maxTxsPerBlock: number | undefined } { + let maxL2BlockGas = config.maxL2BlockGas; + if (maxL2BlockGas !== undefined && maxL2BlockGas > rollupManaLimit) { + log.warn(`Provided MAX_L2_BLOCK_GAS ${maxL2BlockGas} exceeds rollup mana limit ${rollupManaLimit} (capping)`); + maxL2BlockGas = rollupManaLimit; } - // Compute maxDABlockGas - const daCheckpointLimit = MAX_PROCESSABLE_DA_GAS_PER_CHECKPOINT; - let maxDABlockGas: number; - if (config.maxDABlockGas !== undefined) { - if (config.maxDABlockGas > daCheckpointLimit) { - log.warn( - `Provided MAX_DA_BLOCK_GAS ${config.maxDABlockGas} exceeds DA checkpoint limit ${daCheckpointLimit} (capping)`, - ); - maxDABlockGas = daCheckpointLimit; - } else { - maxDABlockGas = config.maxDABlockGas; - } - } else { - maxDABlockGas = Math.min(daCheckpointLimit, Math.ceil((daCheckpointLimit / maxNumberOfBlocks) * multiplier)); + let maxDABlockGas = config.maxDABlockGas; + if (maxDABlockGas !== undefined && maxDABlockGas > MAX_PROCESSABLE_DA_GAS_PER_CHECKPOINT) { + log.warn( + `Provided MAX_DA_BLOCK_GAS ${maxDABlockGas} exceeds DA checkpoint limit ${MAX_PROCESSABLE_DA_GAS_PER_CHECKPOINT} (capping)`, + ); + maxDABlockGas = MAX_PROCESSABLE_DA_GAS_PER_CHECKPOINT; } - // Compute maxTxsPerBlock - const defaultMaxTxsPerBlock = 32; - let maxTxsPerBlock: number; - if (config.maxTxsPerBlock !== undefined) { - if (config.maxTxsPerCheckpoint !== undefined && config.maxTxsPerBlock > config.maxTxsPerCheckpoint) { - log.warn( - `Provided MAX_TX_PER_BLOCK ${config.maxTxsPerBlock} exceeds MAX_TX_PER_CHECKPOINT ${config.maxTxsPerCheckpoint} (capping)`, - ); - maxTxsPerBlock = config.maxTxsPerCheckpoint; - } else { - maxTxsPerBlock = config.maxTxsPerBlock; - } - } else if (config.maxTxsPerCheckpoint !== undefined) { - maxTxsPerBlock = Math.min( - config.maxTxsPerCheckpoint, - Math.ceil((config.maxTxsPerCheckpoint / maxNumberOfBlocks) * multiplier), + let maxTxsPerBlock = config.maxTxsPerBlock; + if ( + maxTxsPerBlock !== undefined && + config.maxTxsPerCheckpoint !== undefined && + maxTxsPerBlock > config.maxTxsPerCheckpoint + ) { + log.warn( + `Provided MAX_TX_PER_BLOCK ${maxTxsPerBlock} exceeds MAX_TX_PER_CHECKPOINT ${config.maxTxsPerCheckpoint} (capping)`, ); - } else { - maxTxsPerBlock = defaultMaxTxsPerBlock; + maxTxsPerBlock = config.maxTxsPerCheckpoint; } - log.info(`Computed block limits L2=${maxL2BlockGas} DA=${maxDABlockGas} maxTxs=${maxTxsPerBlock}`, { - maxL2BlockGas, - maxDABlockGas, - maxTxsPerBlock, - rollupManaLimit, - daCheckpointLimit, - maxNumberOfBlocks, - multiplier, - }); - - return { maxL2BlockGas, maxDABlockGas, maxTxsPerBlock, maxBlocksPerCheckpoint: maxNumberOfBlocks }; + return { maxL2BlockGas, maxDABlockGas, maxTxsPerBlock }; } diff --git a/yarn-project/sequencer-client/src/config.ts b/yarn-project/sequencer-client/src/config.ts index 577275cc0c09..68865991147d 100644 --- a/yarn-project/sequencer-client/src/config.ts +++ b/yarn-project/sequencer-client/src/config.ts @@ -122,9 +122,6 @@ export const sequencerConfigMappings: ConfigMappingsType = { 'Redistribute remaining checkpoint budget evenly across remaining blocks instead of allowing a single block to consume the entire remaining budget.', ...booleanConfigHelper(DefaultSequencerConfig.redistributeCheckpointBudget), }, - maxBlocksPerCheckpoint: { - description: 'Computed max number of blocks per checkpoint from timetable.', - }, coinbase: { env: 'COINBASE', parseEnv: (val: string) => (val ? EthAddress.fromString(val) : undefined), diff --git a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.timing.test.ts b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.timing.test.ts index 69ba62b93dc1..bed1f5c8cca4 100644 --- a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.timing.test.ts +++ b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.timing.test.ts @@ -14,8 +14,8 @@ import type { L2Block, L2BlockSink, L2BlockSource } from '@aztec/stdlib/block'; import type { L1RollupConstants } from '@aztec/stdlib/epoch-helpers'; import { GasFees } from '@aztec/stdlib/gas'; import type { + BlockBuilderOptions, MerkleTreeWriteOperations, - PublicProcessorLimits, ResolvedSequencerConfig, WorldStateSynchronizer, } from '@aztec/stdlib/interfaces/server'; @@ -80,7 +80,7 @@ class TimingAwareMockCheckpointBuilder extends MockCheckpointBuilder { pendingTxs: Iterable | AsyncIterable, blockNumber: BlockNumber, timestamp: bigint, - opts: PublicProcessorLimits, + opts: BlockBuilderOptions, ): Promise { const startTime = this.getSecondsIntoSlot(); diff --git a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts index 1967bd75eed7..9e24324d937e 100644 --- a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts +++ b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts @@ -34,8 +34,8 @@ import { type Checkpoint, validateCheckpoint } from '@aztec/stdlib/checkpoint'; import { getSlotStartBuildTimestamp } from '@aztec/stdlib/epoch-helpers'; import { Gas } from '@aztec/stdlib/gas'; import { + type BlockBuilderOptions, InsufficientValidTxsError, - type PublicProcessorLimits, type ResolvedSequencerConfig, type WorldStateSynchronizer, } from '@aztec/stdlib/interfaces/server'; @@ -270,7 +270,8 @@ export class CheckpointProposalJob implements Traceable { this.setStateFn(SequencerState.ASSEMBLING_CHECKPOINT, this.slot); const checkpoint = await checkpointBuilder.completeCheckpoint(); - // Final validation round for the checkpoint before we propose it, just for safety + // Final validation: per-block limits are only checked if the operator set them explicitly. + // Otherwise, checkpoint-level budgets were already enforced by the redistribution logic. try { validateCheckpoint(checkpoint, { rollupManaLimit: this.l1Constants.rollupManaLimit, @@ -574,11 +575,11 @@ export class CheckpointProposalJob implements Traceable { ); this.setStateFn(SequencerState.CREATING_BLOCK, this.slot); - // Per-block limits derived at startup by computeBlockLimits(), further capped + // Per-block limits are operator overrides (from SEQ_MAX_L2_BLOCK_GAS etc.) further capped // by remaining checkpoint-level budgets inside CheckpointBuilder before each block is built. // minValidTxs is passed into the builder so it can reject the block *before* updating state. const minValidTxs = forceCreate ? 0 : (this.config.minValidTxsPerBlock ?? minTxs); - const blockBuilderOptions: PublicProcessorLimits & { minValidTxs?: number } = { + const blockBuilderOptions: BlockBuilderOptions = { maxTransactions: this.config.maxTxsPerBlock, maxBlockGas: this.config.maxL2BlockGas !== undefined || this.config.maxDABlockGas !== undefined @@ -587,6 +588,8 @@ export class CheckpointProposalJob implements Traceable { deadline: buildDeadline, isBuildingProposal: true, minValidTxs, + maxBlocksPerCheckpoint: this.timetable.maxNumberOfBlocks, + perBlockAllocationMultiplier: this.config.perBlockAllocationMultiplier, }; // Actually build the block by executing txs. The builder throws InsufficientValidTxsError @@ -657,7 +660,7 @@ export class CheckpointProposalJob implements Traceable { pendingTxs: AsyncIterable, blockNumber: BlockNumber, blockTimestamp: bigint, - blockBuilderOptions: PublicProcessorLimits & { minValidTxs?: number }, + blockBuilderOptions: BlockBuilderOptions, ) { try { const workTimer = new Timer(); diff --git a/yarn-project/sequencer-client/src/test/mock_checkpoint_builder.ts b/yarn-project/sequencer-client/src/test/mock_checkpoint_builder.ts index 27b6bf911b07..f0a6afca82cc 100644 --- a/yarn-project/sequencer-client/src/test/mock_checkpoint_builder.ts +++ b/yarn-project/sequencer-client/src/test/mock_checkpoint_builder.ts @@ -4,11 +4,11 @@ import { unfreeze } from '@aztec/foundation/types'; import { L2Block } from '@aztec/stdlib/block'; import { Checkpoint } from '@aztec/stdlib/checkpoint'; import type { + BlockBuilderOptions, FullNodeBlockBuilderConfig, ICheckpointBlockBuilder, ICheckpointsBuilder, MerkleTreeWriteOperations, - PublicProcessorLimits, } from '@aztec/stdlib/interfaces/server'; import { CheckpointHeader } from '@aztec/stdlib/rollup'; import { makeAppendOnlyTreeSnapshot } from '@aztec/stdlib/testing'; @@ -32,7 +32,7 @@ export class MockCheckpointBuilder implements ICheckpointBlockBuilder { public buildBlockCalls: Array<{ blockNumber: BlockNumber; timestamp: bigint; - opts: PublicProcessorLimits & { minValidTxs?: number }; + opts: BlockBuilderOptions; }> = []; /** Track all consumed transaction hashes across buildBlock calls */ public consumedTxHashes: Set = new Set(); @@ -74,7 +74,7 @@ export class MockCheckpointBuilder implements ICheckpointBlockBuilder { pendingTxs: Iterable | AsyncIterable, blockNumber: BlockNumber, timestamp: bigint, - opts: PublicProcessorLimits & { minValidTxs?: number }, + opts: BlockBuilderOptions, ): Promise { this.buildBlockCalls.push({ blockNumber, timestamp, opts }); diff --git a/yarn-project/slasher/src/watchers/epoch_prune_watcher.test.ts b/yarn-project/slasher/src/watchers/epoch_prune_watcher.test.ts index 7b8b7004db56..aa106b929d17 100644 --- a/yarn-project/slasher/src/watchers/epoch_prune_watcher.test.ts +++ b/yarn-project/slasher/src/watchers/epoch_prune_watcher.test.ts @@ -186,7 +186,7 @@ describe('EpochPruneWatcher', () => { [tx], block.header.globalVariables.blockNumber, block.header.globalVariables.timestamp, - {}, + { isBuildingProposal: false, minValidTxs: 0 }, ); }); @@ -246,7 +246,7 @@ describe('EpochPruneWatcher', () => { [tx], blockFromL1.header.globalVariables.blockNumber, blockFromL1.header.globalVariables.timestamp, - {}, + { isBuildingProposal: false, minValidTxs: 0 }, ); }); }); diff --git a/yarn-project/slasher/src/watchers/epoch_prune_watcher.ts b/yarn-project/slasher/src/watchers/epoch_prune_watcher.ts index 0de0f6b27f65..b0eeb2b80709 100644 --- a/yarn-project/slasher/src/watchers/epoch_prune_watcher.ts +++ b/yarn-project/slasher/src/watchers/epoch_prune_watcher.ts @@ -211,7 +211,10 @@ export class EpochPruneWatcher extends (EventEmitter as new () => WatcherEmitter } const gv = blockFromL1.header.globalVariables; - const { block, failedTxs, numTxs } = await checkpointBuilder.buildBlock(txs, gv.blockNumber, gv.timestamp, {}); + const { block, failedTxs, numTxs } = await checkpointBuilder.buildBlock(txs, gv.blockNumber, gv.timestamp, { + isBuildingProposal: false, + minValidTxs: 0, + }); if (numTxs !== txs.length) { // This should be detected by state mismatch, but this makes it easier to debug. diff --git a/yarn-project/stdlib/src/checkpoint/validate.ts b/yarn-project/stdlib/src/checkpoint/validate.ts index 1ceb9fa4c102..4bfceeaf3d59 100644 --- a/yarn-project/stdlib/src/checkpoint/validate.ts +++ b/yarn-project/stdlib/src/checkpoint/validate.ts @@ -36,7 +36,7 @@ export function validateCheckpoint( ): void { validateCheckpointStructure(checkpoint); validateCheckpointLimits(checkpoint, opts); - validateCheckpointBlocksGasLimits(checkpoint, opts); + validateCheckpointBlocksLimits(checkpoint, opts); } /** @@ -125,7 +125,7 @@ export function validateCheckpointStructure(checkpoint: Checkpoint): void { } /** Validates checkpoint blocks gas limits */ -function validateCheckpointBlocksGasLimits( +function validateCheckpointBlocksLimits( checkpoint: Checkpoint, opts: { maxL2BlockGas?: number; diff --git a/yarn-project/stdlib/src/interfaces/block-builder.ts b/yarn-project/stdlib/src/interfaces/block-builder.ts index aa2857072c92..6a2f49bb4209 100644 --- a/yarn-project/stdlib/src/interfaces/block-builder.ts +++ b/yarn-project/stdlib/src/interfaces/block-builder.ts @@ -35,7 +35,8 @@ export interface IBlockFactory extends ProcessedTxHandler { setBlockCompleted(expectedBlockHeader?: BlockHeader): Promise; } -export interface PublicProcessorLimits { +/** Limits passed to the public processor for tx processing within a block. */ +export type PublicProcessorLimits = { /** Maximum number of txs to process. */ maxTransactions?: number; /** L2 and DA gas limits. */ @@ -46,7 +47,30 @@ export interface PublicProcessorLimits { deadline?: Date; /** Whether this processor is building a proposal (as opposed to re-executing one). Skipping txs due to gas or blob limits is only done during proposal building. */ isBuildingProposal?: boolean; -} +}; + +/** Base fields shared by both proposer and validator block builder options. */ +type BlockBuilderOptionsBase = PublicProcessorLimits & { + /** Minimum number of successfully processed txs required. Block is rejected if fewer succeed. */ + minValidTxs: number; +}; + +/** Proposer mode: redistribution params are required. */ +type ProposerBlockBuilderOptions = BlockBuilderOptionsBase & { + isBuildingProposal: true; + /** Maximum number of blocks per checkpoint, derived from the timetable. */ + maxBlocksPerCheckpoint: number; + /** Per-block gas budget multiplier. Budget = (remaining / remainingBlocks) * multiplier. */ + perBlockAllocationMultiplier: number; +}; + +/** Validator mode: no redistribution params needed. */ +type ValidatorBlockBuilderOptions = BlockBuilderOptionsBase & { + isBuildingProposal: false; +}; + +/** Options for building a block within a checkpoint. When proposing, redistribution params are required. */ +export type BlockBuilderOptions = ProposerBlockBuilderOptions | ValidatorBlockBuilderOptions; export interface PublicProcessorValidator { preprocessValidator?: TxValidator; @@ -64,9 +88,6 @@ export type FullNodeBlockBuilderConfig = Pick; export const FullNodeBlockBuilderConfigKeys: (keyof FullNodeBlockBuilderConfig)[] = [ @@ -82,9 +103,6 @@ export const FullNodeBlockBuilderConfigKeys: (keyof FullNodeBlockBuilderConfig)[ 'maxL2BlockGas', 'maxDABlockGas', 'rollupManaLimit', - 'redistributeCheckpointBudget', - 'perBlockAllocationMultiplier', - 'maxBlocksPerCheckpoint', ] as const; /** Thrown when the number of successfully processed transactions is below the required minimum. */ @@ -115,7 +133,7 @@ export interface ICheckpointBlockBuilder { pendingTxs: Iterable | AsyncIterable, blockNumber: BlockNumber, timestamp: bigint, - opts: PublicProcessorLimits & { minValidTxs?: number }, + opts: BlockBuilderOptions, ): Promise; } diff --git a/yarn-project/stdlib/src/interfaces/configs.ts b/yarn-project/stdlib/src/interfaces/configs.ts index b986445a4c6a..55f9142aca33 100644 --- a/yarn-project/stdlib/src/interfaces/configs.ts +++ b/yarn-project/stdlib/src/interfaces/configs.ts @@ -29,8 +29,6 @@ export interface SequencerConfig { perBlockAllocationMultiplier?: number; /** Redistribute remaining checkpoint budget evenly across remaining blocks instead of allowing a single block to consume the entire remaining budget. */ redistributeCheckpointBudget?: boolean; - /** Computed max number of blocks per checkpoint from timetable. */ - maxBlocksPerCheckpoint?: number; /** Recipient of block reward. */ coinbase?: EthAddress; /** Address to receive fees. */ @@ -99,7 +97,6 @@ export const SequencerConfigSchema = zodFor()( maxDABlockGas: z.number().optional(), perBlockAllocationMultiplier: z.number().optional(), redistributeCheckpointBudget: z.boolean().optional(), - maxBlocksPerCheckpoint: z.number().optional(), coinbase: schemas.EthAddress.optional(), feeRecipient: schemas.AztecAddress.optional(), acvmWorkingDirectory: z.string().optional(), @@ -148,9 +145,7 @@ type SequencerConfigOptionalKeys = | 'maxTxsPerCheckpoint' | 'maxL2BlockGas' | 'maxDABlockGas' - | 'perBlockAllocationMultiplier' - | 'redistributeCheckpointBudget' - | 'maxBlocksPerCheckpoint'; + | 'redistributeCheckpointBudget'; export type ResolvedSequencerConfig = Prettify< Required> & Pick diff --git a/yarn-project/validator-client/README.md b/yarn-project/validator-client/README.md index 0974b95f94b1..5f1ca28dcf5e 100644 --- a/yarn-project/validator-client/README.md +++ b/yarn-project/validator-client/README.md @@ -237,13 +237,11 @@ L1 enforces gas and blob capacity per checkpoint. The node enforces these during ### Per-block budgets -Per-block budgets prevent one block from consuming the entire checkpoint budget. +Per-block budgets prevent one block from consuming the entire checkpoint budget. The checkpoint builder dynamically computes per-block limits before each block based on the remaining checkpoint budget and the number of remaining blocks. -**Proposer**: `computeBlockLimits()` derives budgets at startup as `min(checkpointLimit, ceil(checkpointLimit / maxBlocks * multiplier))`, where `maxBlocks` comes from the timetable and `multiplier` defaults to 1.2. The multiplier greater than 1 allows early blocks to use more than their even share of the checkpoint budget, since different blocks hit different limit dimensions (L2 gas, DA gas, blob fields) — a strict even split would waste capacity. Operators can override via `SEQ_MAX_L2_BLOCK_GAS` / `SEQ_MAX_DA_BLOCK_GAS` / `SEQ_MAX_TX_PER_BLOCK` (capped at checkpoint limits). Per-block TX limits follow the same derivation pattern when `SEQ_MAX_TX_PER_CHECKPOINT` is set. +**Proposer**: When building a proposal (`isBuildingProposal: true`), the `CheckpointProposalJob` passes `maxBlocksPerCheckpoint` (from the timetable) and `perBlockAllocationMultiplier` (default 1.2) via opts to `CheckpointBuilder.buildBlock`. The builder computes a fair share as `min(perBlockLimit, ceil(remainingBudget / remainingBlocks * multiplier), remainingBudget)`. The multiplier greater than 1 allows early blocks to use more than their even share, since different blocks hit different limit dimensions (L2 gas, DA gas, blob fields) — a strict even split would waste capacity. As prior blocks consume budget, later blocks see tightened limits. This applies to all four dimensions (L2 gas, DA gas, blob fields, transaction count). Operators can set hard per-block caps via `SEQ_MAX_L2_BLOCK_GAS` / `SEQ_MAX_DA_BLOCK_GAS` / `SEQ_MAX_TX_PER_BLOCK` (capped at checkpoint limits at startup); these act as additional upper bounds alongside the redistribution. -**Validator**: Optionally enforces per-block limits via `VALIDATOR_MAX_L2_BLOCK_GAS`, `VALIDATOR_MAX_DA_BLOCK_GAS`, and `VALIDATOR_MAX_TX_PER_BLOCK`. When set, these are passed to `buildBlock` during re-execution and to `validateCheckpoint` for final validation. When unset, no per-block limit is enforced for that dimension (checkpoint-level protocol limits still apply). These are independent of the `SEQ_` vars so operators can tune proposer and validation limits separately. - -**Checkpoint-level capping**: `CheckpointBuilder.capLimitsByCheckpointBudgets()` always runs before tx processing, capping per-block limits by the remaining checkpoint budget. When `SEQ_REDISTRIBUTE_CHECKPOINT_BUDGET` is enabled (default: true), the remaining budget is distributed evenly across remaining blocks with the multiplier applied: `min(perBlockLimit, ceil(remainingBudget / remainingBlocks * multiplier), remainingBudget)`. This prevents early blocks from consuming the entire checkpoint budget, producing smoother distribution. When disabled, each block can consume up to the full remaining budget, ie caps by `checkpointBudget - sum(used by prior blocks)`. This applies to all four dimensions (L2 gas, DA gas, blob fields, transaction count). Validators always cap by the total remaining. +**Validator**: When re-executing a proposal (`isBuildingProposal` unset), `capLimitsByCheckpointBudgets` only caps by the per-block limit and the total remaining checkpoint budget — no redistribution or multiplier is applied. This avoids false rejections due to differences between proposer and validator fair-share calculations. Validators can optionally set hard per-block limits via `VALIDATOR_MAX_L2_BLOCK_GAS`, `VALIDATOR_MAX_DA_BLOCK_GAS`, and `VALIDATOR_MAX_TX_PER_BLOCK`. When unset, no per-block limit is enforced (checkpoint-level protocol limits still apply). These are independent of the `SEQ_` vars so operators can tune proposer and validation limits separately. ### Per-transaction enforcement @@ -255,12 +253,12 @@ Per-block budgets prevent one block from consuming the entire checkpoint budget. | Variable | Default | Description | | --- | --- | --- | -| `SEQ_MAX_L2_BLOCK_GAS` | *auto* | Per-block L2 gas. Auto-derived from `rollupManaLimit / maxBlocks * multiplier`. | -| `SEQ_MAX_DA_BLOCK_GAS` | *auto* | Per-block DA gas. Auto-derived from checkpoint DA limit / maxBlocks * multiplier. | -| `SEQ_MAX_TX_PER_BLOCK` | *none* | Per-block tx count. If `SEQ_MAX_TX_PER_CHECKPOINT` is set and per-block is not, derived as `ceil(checkpointLimit / maxBlocks * multiplier)`. | -| `SEQ_MAX_TX_PER_CHECKPOINT` | *none* | Total txs across all blocks in a checkpoint. When set, per-block tx limit is derived from it (unless explicitly overridden) and checkpoint-level capping is enforced. | -| `SEQ_PER_BLOCK_ALLOCATION_MULTIPLIER` | 1.2 | Multiplier for per-block budget computation. | -| `SEQ_REDISTRIBUTE_CHECKPOINT_BUDGET` | true | Redistribute remaining checkpoint budget evenly across remaining blocks instead of allowing one block to consume it all. | +| `SEQ_MAX_L2_BLOCK_GAS` | *none* | Hard per-block L2 gas cap. Capped at `rollupManaLimit` at startup. When unset, redistribution dynamically computes per-block limits. | +| `SEQ_MAX_DA_BLOCK_GAS` | *none* | Hard per-block DA gas cap. Capped at `MAX_PROCESSABLE_DA_GAS_PER_CHECKPOINT` at startup. When unset, redistribution handles it. | +| `SEQ_MAX_TX_PER_BLOCK` | *none* | Hard per-block tx count cap. Capped at `SEQ_MAX_TX_PER_CHECKPOINT` at startup (if set). | +| `SEQ_MAX_TX_PER_CHECKPOINT` | *none* | Total txs across all blocks in a checkpoint. When set, checkpoint-level capping and redistribution are enforced for tx count. | +| `SEQ_PER_BLOCK_ALLOCATION_MULTIPLIER` | 1.2 | Multiplier for per-block budget redistribution. Passed via opts to the checkpoint builder during proposal building. | +| `SEQ_REDISTRIBUTE_CHECKPOINT_BUDGET` | true | Legacy flag; redistribution is now always active during proposal building and inactive during validation. | | `VALIDATOR_MAX_L2_BLOCK_GAS` | *none* | Per-block L2 gas limit for validation. Proposals exceeding this are rejected. | | `VALIDATOR_MAX_DA_BLOCK_GAS` | *none* | Per-block DA gas limit for validation. Proposals exceeding this are rejected. | | `VALIDATOR_MAX_TX_PER_BLOCK` | *none* | Per-block tx count limit for validation. Proposals exceeding this are rejected. | diff --git a/yarn-project/validator-client/src/block_proposal_handler.ts b/yarn-project/validator-client/src/block_proposal_handler.ts index 1582c74b334c..642ec9410144 100644 --- a/yarn-project/validator-client/src/block_proposal_handler.ts +++ b/yarn-project/validator-client/src/block_proposal_handler.ts @@ -569,6 +569,8 @@ export class BlockProposalHandler { ? new Gas(this.config.validateMaxDABlockGas ?? Infinity, this.config.validateMaxL2BlockGas ?? Infinity) : undefined; const result = await checkpointBuilder.buildBlock(txs, blockNumber, blockHeader.globalVariables.timestamp, { + isBuildingProposal: false, + minValidTxs: 0, deadline, expectedEndState: blockHeader.state, maxTransactions: this.config.validateMaxTxsPerBlock, diff --git a/yarn-project/validator-client/src/checkpoint_builder.test.ts b/yarn-project/validator-client/src/checkpoint_builder.test.ts index f8b3ce509dc3..0a096e3dadf3 100644 --- a/yarn-project/validator-client/src/checkpoint_builder.test.ts +++ b/yarn-project/validator-client/src/checkpoint_builder.test.ts @@ -16,6 +16,7 @@ import { L2Block } from '@aztec/stdlib/block'; import type { ContractDataSource } from '@aztec/stdlib/contract'; import { Gas, GasFees } from '@aztec/stdlib/gas'; import { + type BlockBuilderOptions, type FullNodeBlockBuilderConfig, InsufficientValidTxsError, type MerkleTreeWriteOperations, @@ -69,7 +70,7 @@ describe('CheckpointBuilder', () => { } /** Expose for testing */ - public testCapLimits(opts: PublicProcessorLimits) { + public testCapLimits(opts: BlockBuilderOptions) { return this.capLimitsByCheckpointBudgets(opts); } } @@ -93,7 +94,6 @@ describe('CheckpointBuilder', () => { l1ChainId: 1, rollupVersion: 1, rollupManaLimit: 200_000_000, - redistributeCheckpointBudget: false, ...overrideConfig, }; @@ -107,6 +107,28 @@ describe('CheckpointBuilder', () => { ); } + /** Default opts for validator-mode tests (no redistribution). */ + function validatorOpts(overrides?: Partial & { minValidTxs?: number }): BlockBuilderOptions { + return { ...overrides, isBuildingProposal: false, minValidTxs: overrides?.minValidTxs ?? 0 }; + } + + /** Default opts for proposer-mode tests (with redistribution). */ + function proposerOpts( + overrides?: Partial & { + minValidTxs?: number; + maxBlocksPerCheckpoint?: number; + perBlockAllocationMultiplier?: number; + }, + ): BlockBuilderOptions { + return { + ...overrides, + isBuildingProposal: true, + maxBlocksPerCheckpoint: overrides?.maxBlocksPerCheckpoint ?? 5, + perBlockAllocationMultiplier: overrides?.perBlockAllocationMultiplier ?? 1.2, + minValidTxs: overrides?.minValidTxs ?? 0, + }; + } + beforeEach(() => { lightweightCheckpointBuilder = mock(); Object.defineProperty(lightweightCheckpointBuilder, 'checkpointNumber', { value: checkpointNumber }); @@ -185,7 +207,7 @@ describe('CheckpointBuilder', () => { [], // debugLogs ]); - const result = await checkpointBuilder.buildBlock([], blockNumber, 1000n); + const result = await checkpointBuilder.buildBlock([], blockNumber, 1000n, validatorOpts()); expect(result.block).toBe(expectedBlock); expect(result.numTxs).toBe(1); @@ -206,7 +228,7 @@ describe('CheckpointBuilder', () => { [], // debugLogs ]); - const result = await checkpointBuilder.buildBlock([], blockNumber, 1000n, { minValidTxs: 0 }); + const result = await checkpointBuilder.buildBlock([], blockNumber, 1000n, validatorOpts({ minValidTxs: 0 })); expect(result.block).toBe(expectedBlock); expect(result.numTxs).toBe(0); @@ -223,9 +245,9 @@ describe('CheckpointBuilder', () => { [], // debugLogs ]); - await expect(checkpointBuilder.buildBlock([], blockNumber, 1000n, { minValidTxs: 1 })).rejects.toThrow( - InsufficientValidTxsError, - ); + await expect( + checkpointBuilder.buildBlock([], blockNumber, 1000n, validatorOpts({ minValidTxs: 1 })), + ).rejects.toThrow(InsufficientValidTxsError); expect(lightweightCheckpointBuilder.addBlock).not.toHaveBeenCalled(); }); @@ -243,7 +265,7 @@ describe('CheckpointBuilder', () => { ]); const err = await checkpointBuilder - .buildBlock([], blockNumber, 1000n, { minValidTxs: 2 }) + .buildBlock([], blockNumber, 1000n, validatorOpts({ minValidTxs: 2 })) .catch((e: unknown) => e); expect(err).toBeInstanceOf(InsufficientValidTxsError); @@ -258,14 +280,14 @@ describe('CheckpointBuilder', () => { processor.process.mockResolvedValue([[], [], [], [], []]); - const result = await checkpointBuilder.buildBlock([], blockNumber, 1000n); + const result = await checkpointBuilder.buildBlock([], blockNumber, 1000n, validatorOpts()); expect(result.numTxs).toBe(0); expect(lightweightCheckpointBuilder.addBlock).toHaveBeenCalled(); }); }); - describe('capLimitsByCheckpointBudgets', () => { + describe('capLimitsByCheckpointBudgets (validator mode)', () => { const totalBlobCapacity = BLOBS_PER_CHECKPOINT * FIELDS_PER_BLOB - NUM_CHECKPOINT_END_MARKER_FIELDS; const firstBlockEndOverhead = getNumBlockEndBlobFields(true); const nonFirstBlockEndOverhead = getNumBlockEndBlobFields(false); @@ -279,8 +301,9 @@ describe('CheckpointBuilder', () => { createMockBlock({ manaUsed: priorManaUsed, txBlobFields: [10], blockBlobFieldCount: 20 }), ]); - const opts: PublicProcessorLimits = { maxBlockGas: new Gas(Infinity, 800_000) }; - const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits( + validatorOpts({ maxBlockGas: new Gas(Infinity, 800_000) }), + ); // Remaining mana = 1_000_000 - 600_000 = 400_000. Per-block = 800_000. Capped to 400_000. expect(capped.maxBlockGas!.l2Gas).toBe(400_000); @@ -295,8 +318,9 @@ describe('CheckpointBuilder', () => { createMockBlock({ manaUsed: priorManaUsed, txBlobFields: [10], blockBlobFieldCount: 20 }), ]); - const opts: PublicProcessorLimits = { maxBlockGas: new Gas(Infinity, 500_000) }; - const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits( + validatorOpts({ maxBlockGas: new Gas(Infinity, 500_000) }), + ); // Remaining mana = 800_000. Per-block = 500_000. Uses 500_000. expect(capped.maxBlockGas!.l2Gas).toBe(500_000); @@ -309,8 +333,9 @@ describe('CheckpointBuilder', () => { createMockBlock({ manaUsed: 100_000, txBlobFields: [10], blockBlobFieldCount: 20 }), ]); - const opts: PublicProcessorLimits = { maxBlockGas: new Gas(Infinity, 500_000) }; - const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits( + validatorOpts({ maxBlockGas: new Gas(Infinity, 500_000) }), + ); // Remaining mana = 200_000_000 - 100_000 >> 500_000, so per-block limit is used expect(capped.maxBlockGas!.l2Gas).toBe(500_000); @@ -327,8 +352,9 @@ describe('CheckpointBuilder', () => { ]); const perBlockDAGas = 500_000; - const opts: PublicProcessorLimits = { maxBlockGas: new Gas(perBlockDAGas, Infinity) }; - const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits( + validatorOpts({ maxBlockGas: new Gas(perBlockDAGas, Infinity) }), + ); // Remaining DA gas = MAX_PROCESSABLE_DA_GAS_PER_CHECKPOINT - priorDAGas const expectedRemainingDAGas = MAX_PROCESSABLE_DA_GAS_PER_CHECKPOINT - priorDAGas; @@ -344,8 +370,7 @@ describe('CheckpointBuilder', () => { createMockBlock({ manaUsed: priorManaUsed, txBlobFields: [100], blockBlobFieldCount: 110 }), ]); - const opts: PublicProcessorLimits = {}; - const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(validatorOpts()); expect(capped.maxBlockGas!.l2Gas).toBe(400_000); expect(capped.maxBlockGas!.daGas).toBe(MAX_PROCESSABLE_DA_GAS_PER_CHECKPOINT - 100 * DA_GAS_PER_FIELD); @@ -359,8 +384,9 @@ describe('CheckpointBuilder', () => { createMockBlock({ manaUsed: 0, txBlobFields: [], blockBlobFieldCount }), ]); - const opts: PublicProcessorLimits = { maxBlobFields: 99999 }; - const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits( + validatorOpts({ maxBlobFields: 99999 }), + ); // Second block: remaining = totalBlobCapacity - 100, minus non-first block end overhead const expectedMaxBlobFields = totalBlobCapacity - blockBlobFieldCount - nonFirstBlockEndOverhead; @@ -372,8 +398,7 @@ describe('CheckpointBuilder', () => { lightweightCheckpointBuilder.getBlocks.mockReturnValue([]); - const opts: PublicProcessorLimits = {}; - const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(validatorOpts()); // First block: full capacity minus first block end overhead const expectedMaxBlobFields = totalBlobCapacity - firstBlockEndOverhead; @@ -388,8 +413,9 @@ describe('CheckpointBuilder', () => { const block2 = createMockBlock({ manaUsed: 200_000, txBlobFields: [150], blockBlobFieldCount: 160 }); lightweightCheckpointBuilder.getBlocks.mockReturnValue([block1, block2]); - const opts: PublicProcessorLimits = { maxBlockGas: new Gas(Infinity, Infinity) }; - const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits( + validatorOpts({ maxBlockGas: new Gas(Infinity, Infinity) }), + ); // Remaining mana = 1_000_000 - 300_000 - 200_000 = 500_000 expect(capped.maxBlockGas!.l2Gas).toBe(500_000); @@ -413,7 +439,7 @@ describe('CheckpointBuilder', () => { const block1 = createMockBlock({ manaUsed: 0, txBlobFields: [], blockBlobFieldCount: block1BlobFieldCount }); lightweightCheckpointBuilder.getBlocks.mockReturnValue([block1]); - const afterOneBlock = (checkpointBuilder as TestCheckpointBuilder).testCapLimits({}); + const afterOneBlock = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(validatorOpts()); const expectedAfterOneBlock = totalBlobCapacity - block1BlobFieldCount - nonFirstBlockEndOverhead; expect(afterOneBlock.maxBlobFields).toBe(expectedAfterOneBlock); @@ -422,7 +448,7 @@ describe('CheckpointBuilder', () => { const block2 = createMockBlock({ manaUsed: 0, txBlobFields: [], blockBlobFieldCount: block2BlobFieldCount }); lightweightCheckpointBuilder.getBlocks.mockReturnValue([block1, block2]); - const afterTwoBlocks = (checkpointBuilder as TestCheckpointBuilder).testCapLimits({}); + const afterTwoBlocks = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(validatorOpts()); const expectedAfterTwoBlocks = totalBlobCapacity - block1BlobFieldCount - block2BlobFieldCount - nonFirstBlockEndOverhead; @@ -441,8 +467,7 @@ describe('CheckpointBuilder', () => { createMockBlock({ manaUsed: 0, txBlobFields: [10, 10, 10], blockBlobFieldCount: 40 }), ]); - const opts: PublicProcessorLimits = { maxTransactions: 15 }; - const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(validatorOpts({ maxTransactions: 15 })); // Remaining txs = 20 - 3 = 17. Per-block = 15. Capped to min(15, 17) = 15. expect(capped.maxTransactions).toBe(15); @@ -457,8 +482,7 @@ describe('CheckpointBuilder', () => { createMockBlock({ manaUsed: 0, txBlobFields: [10, 10, 10, 10], blockBlobFieldCount: 50 }), ]); - const opts: PublicProcessorLimits = { maxTransactions: 5 }; - const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(validatorOpts({ maxTransactions: 5 })); // Remaining txs = 10 - 8 = 2. Per-block = 5. Capped to min(5, 2) = 2. expect(capped.maxTransactions).toBe(2); @@ -472,8 +496,7 @@ describe('CheckpointBuilder', () => { createMockBlock({ manaUsed: 0, txBlobFields: [10, 10, 10, 10, 10], blockBlobFieldCount: 60 }), ]); - const opts: PublicProcessorLimits = {}; - const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(validatorOpts()); // Remaining txs = 15 - 5 = 10 expect(capped.maxTransactions).toBe(10); @@ -484,8 +507,7 @@ describe('CheckpointBuilder', () => { lightweightCheckpointBuilder.getBlocks.mockReturnValue([]); - const opts: PublicProcessorLimits = { maxTransactions: 99 }; - const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(validatorOpts({ maxTransactions: 99 })); // Passthrough: maxTransactions = 99 expect(capped.maxTransactions).toBe(99); @@ -496,50 +518,174 @@ describe('CheckpointBuilder', () => { lightweightCheckpointBuilder.getBlocks.mockReturnValue([]); - const opts: PublicProcessorLimits = {}; - const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(validatorOpts()); // Neither config nor caller sets it, so it remains undefined expect(capped.maxTransactions).toBeUndefined(); }); - }); - describe('redistributeCheckpointBudget', () => { - it('evenly splits budget with multiplier=1', () => { + it('does not apply redistribution multiplier in validator mode', () => { const rollupManaLimit = 1_000_000; - setupBuilder({ - redistributeCheckpointBudget: true, - perBlockAllocationMultiplier: 1, - maxBlocksPerCheckpoint: 5, - rollupManaLimit, - }); + setupBuilder({ rollupManaLimit }); - lightweightCheckpointBuilder.getBlocks.mockReturnValue([]); + lightweightCheckpointBuilder.getBlocks.mockReturnValue([ + createMockBlock({ manaUsed: 200_000, txBlobFields: [10], blockBlobFieldCount: 20 }), + ]); - const opts: PublicProcessorLimits = {}; - const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + // Validator mode should not redistribute — just remaining budget + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(validatorOpts()); - // Fair share = ceil(1_000_000 / 5 * 1) = 200_000 - expect(capped.maxBlockGas!.l2Gas).toBe(200_000); + // No fair share, just remaining budget = 800_000 + expect(capped.maxBlockGas!.l2Gas).toBe(800_000); }); + }); - it('computes fair share with multiplier=1.2, 5 max blocks, 2 existing', () => { - const rollupManaLimit = 1_000_000; - setupBuilder({ - redistributeCheckpointBudget: true, - perBlockAllocationMultiplier: 1.2, - maxBlocksPerCheckpoint: 5, - rollupManaLimit, + describe('multi-block gas redistribution through buildBlock', () => { + // This test exercises the production code path where: + // 1. CheckpointProposalJob passes maxBlocksPerCheckpoint and perBlockAllocationMultiplier via opts + // 2. CheckpointBuilder.capLimitsByCheckpointBudgets redistributes remaining budget across remaining blocks + + const rollupManaLimit = 1_000_000; + const maxBlocks = 5; + const multiplier = 1.2; + + // Opts that mimic what CheckpointProposalJob passes: operator per-block gas limit + redistribution params + const staticPerBlockL2Gas = Math.min(rollupManaLimit, Math.ceil((rollupManaLimit / maxBlocks) * multiplier)); + // = min(1_000_000, 240_000) = 240_000 + + const blockBuilderOpts: BlockBuilderOptions = proposerOpts({ + maxBlockGas: new Gas(Infinity, staticPerBlockL2Gas), + maxBlocksPerCheckpoint: maxBlocks, + perBlockAllocationMultiplier: multiplier, + }); + + it('tightens per-block L2 gas limit when prior blocks consumed more than their even share', async () => { + setupBuilder({ rollupManaLimit }); + + // Simulate: blocks 0 and 1 already built, each using 300k mana (above even share of 200k) + lightweightCheckpointBuilder.getBlocks.mockReturnValue([ + createMockBlock({ manaUsed: 300_000, txBlobFields: [10], blockBlobFieldCount: 20 }), + createMockBlock({ manaUsed: 300_000, txBlobFields: [10], blockBlobFieldCount: 20 }), + ]); + + const expectedBlock = await L2Block.random(blockNumber); + lightweightCheckpointBuilder.addBlock.mockResolvedValue({ block: expectedBlock, timings: {} }); + processor.process.mockResolvedValue([[{ hash: Fr.random() } as unknown as ProcessedTx], [], [], [], []]); + + // Build block 2 + await checkpointBuilder.buildBlock([], blockNumber, 1000n, blockBuilderOpts); + + // Remaining mana = 1M - 600k = 400k, with 3 blocks remaining (out of 5). + // Expected fair share = ceil(400k / 3 * 1.2) = ceil(160_000) = 160_000 + // Expected cap = min(staticPerBlockL2Gas=240k, fairShare=160k, remaining=400k) = 160_000 + const processCall = processor.process.mock.calls[0]; + const limitsPassedToProcessor = processCall[1] as PublicProcessorLimits; + expect(limitsPassedToProcessor.maxBlockGas!.l2Gas).toBe(160_000); + }); + + it('progressively tightens limits across all blocks in checkpoint', async () => { + setupBuilder({ rollupManaLimit }); + + const expectedBlock = await L2Block.random(blockNumber); + lightweightCheckpointBuilder.addBlock.mockResolvedValue({ block: expectedBlock, timings: {} }); + processor.process.mockResolvedValue([[{ hash: Fr.random() } as unknown as ProcessedTx], [], [], [], []]); + + const capturedL2GasLimits: number[] = []; + + // Build 5 blocks. Each block uses 200k mana (its even share). + for (let i = 0; i < maxBlocks; i++) { + // Set up prior blocks (each used 200k mana) + const priorBlocks = Array.from({ length: i }, () => + createMockBlock({ manaUsed: 200_000, txBlobFields: [10], blockBlobFieldCount: 20 }), + ); + lightweightCheckpointBuilder.getBlocks.mockReturnValue(priorBlocks); + + await checkpointBuilder.buildBlock([], BlockNumber(blockNumber + i), 1000n, blockBuilderOpts); + + const processCall = processor.process.mock.calls[i]; + const limits = processCall[1] as PublicProcessorLimits; + capturedL2GasLimits.push(limits.maxBlockGas!.l2Gas); + } + + // With correct redistribution (5 blocks, each using 200k mana): + // Block 0: remaining=1M, remainingBlocks=5, fairShare=ceil(1M/5*1.2)=240k, cap=min(240k,240k,1M)=240k + // Block 1: remaining=800k, remainingBlocks=4, fairShare=ceil(800k/4*1.2)=240k, cap=min(240k,240k,800k)=240k + // Block 2: remaining=600k, remainingBlocks=3, fairShare=ceil(600k/3*1.2)=240k, cap=min(240k,240k,600k)=240k + // Block 3: remaining=400k, remainingBlocks=2, fairShare=ceil(400k/2*1.2)=240k, cap=min(240k,240k,400k)=240k + // Block 4: remaining=200k, remainingBlocks=1, fairShare=ceil(200k/1*1.2)=240k, cap=min(240k,240k,200k)=200k + expect(capturedL2GasLimits).toEqual([240_000, 240_000, 240_000, 240_000, 200_000]); + }); + + it('prevents block starvation when early blocks are heavy', async () => { + setupBuilder({ rollupManaLimit }); + + const expectedBlock = await L2Block.random(blockNumber); + lightweightCheckpointBuilder.addBlock.mockResolvedValue({ block: expectedBlock, timings: {} }); + processor.process.mockResolvedValue([[{ hash: Fr.random() } as unknown as ProcessedTx], [], [], [], []]); + + const capturedL2GasLimits: number[] = []; + + // Build 5 blocks. First 2 blocks use 300k each (heavy), rest use whatever they get. + const manaUsedPerBlock = [300_000, 300_000, 0, 0, 0]; // only first 2 are "used" as prior blocks + + for (let i = 0; i < maxBlocks; i++) { + const priorBlocks = Array.from({ length: i }, (_, j) => + createMockBlock({ manaUsed: manaUsedPerBlock[j], txBlobFields: [10], blockBlobFieldCount: 20 }), + ); + lightweightCheckpointBuilder.getBlocks.mockReturnValue(priorBlocks); + + await checkpointBuilder.buildBlock([], BlockNumber(blockNumber + i), 1000n, blockBuilderOpts); + + const processCall = processor.process.mock.calls[i]; + const limits = processCall[1] as PublicProcessorLimits; + capturedL2GasLimits.push(limits.maxBlockGas!.l2Gas); + } + + // With correct redistribution and heavy early blocks (300k each): + // Block 0: remaining=1M, remainingBlocks=5, fairShare=ceil(1M/5*1.2)=240k, cap=min(240k,240k,1M)=240k + // Block 1: remaining=700k, remainingBlocks=4, fairShare=ceil(700k/4*1.2)=210k, cap=min(240k,210k,700k)=210k + // Block 2: remaining=400k, remainingBlocks=3, fairShare=ceil(400k/3*1.2)=160k, cap=min(240k,160k,400k)=160k + // Block 3: remaining=400k, remainingBlocks=2, fairShare=ceil(400k/2*1.2)=240k, cap=min(240k,240k,400k)=240k + // Block 4: remaining=400k, remainingBlocks=1, fairShare=ceil(400k/1*1.2)=480k, cap=min(240k,480k,400k)=240k + expect(capturedL2GasLimits[0]).toBe(240_000); // Block 0: full fair share + expect(capturedL2GasLimits[1]).toBe(210_000); // Block 1: tightened by redistribution + expect(capturedL2GasLimits[2]).toBe(160_000); // Block 2: tightened further + expect(capturedL2GasLimits[3]).toBe(240_000); // Block 3: relaxed (blocks 2-3 used nothing) + expect(capturedL2GasLimits[4]).toBe(240_000); // Block 4: still has plenty of budget + }); + + it('explicit per-block limit wins over redistribution when tighter', async () => { + setupBuilder({ rollupManaLimit }); + + const expectedBlock = await L2Block.random(blockNumber); + lightweightCheckpointBuilder.addBlock.mockResolvedValue({ block: expectedBlock, timings: {} }); + processor.process.mockResolvedValue([[{ hash: Fr.random() } as unknown as ProcessedTx], [], [], [], []]); + + // Explicit per-block limit (100k) is TIGHTER than redistribution. + // No prior blocks: remaining=1M, 5 remaining, fairShare=ceil(1M/5*1.2)=240k. + // cap = min(100k, 240k, 1M) = 100k — explicit wins. + lightweightCheckpointBuilder.getBlocks.mockReturnValue([]); + await checkpointBuilder.buildBlock([], blockNumber, 1000n, { + ...blockBuilderOpts, + maxBlockGas: new Gas(Infinity, 100_000), }); + expect((processor.process.mock.calls[0][1] as PublicProcessorLimits).maxBlockGas!.l2Gas).toBe(100_000); + }); + }); + + describe('proposer redistribution via opts', () => { + it('computes fair share with multiplier across remaining blocks', () => { + const rollupManaLimit = 1_000_000; + setupBuilder({ rollupManaLimit }); + // 2 existing blocks used 400_000 mana total lightweightCheckpointBuilder.getBlocks.mockReturnValue([ createMockBlock({ manaUsed: 200_000, txBlobFields: [10], blockBlobFieldCount: 20 }), createMockBlock({ manaUsed: 200_000, txBlobFields: [10], blockBlobFieldCount: 20 }), ]); - const opts: PublicProcessorLimits = {}; - const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(proposerOpts()); // remainingMana = 600_000, remainingBlocks = 3, multiplier = 1.2 // fairShare = ceil(600_000 / 3 * 1.2) = ceil(240_000) = 240_000 @@ -548,12 +694,7 @@ describe('CheckpointBuilder', () => { it('gives all remaining budget to last block (remainingBlocks=1)', () => { const rollupManaLimit = 1_000_000; - setupBuilder({ - redistributeCheckpointBudget: true, - perBlockAllocationMultiplier: 1.2, - maxBlocksPerCheckpoint: 3, - rollupManaLimit, - }); + setupBuilder({ rollupManaLimit }); // 2 existing blocks used 800_000 total lightweightCheckpointBuilder.getBlocks.mockReturnValue([ @@ -561,44 +702,23 @@ describe('CheckpointBuilder', () => { createMockBlock({ manaUsed: 400_000, txBlobFields: [10], blockBlobFieldCount: 20 }), ]); - const opts: PublicProcessorLimits = {}; - const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits( + proposerOpts({ maxBlocksPerCheckpoint: 3 }), + ); // remainingMana = 200_000, remainingBlocks = 1, multiplier = 1.2 // fairShare = ceil(200_000 / 1 * 1.2) = 240_000. min(200_000, 240_000, 200_000) = 200_000 expect(capped.maxBlockGas!.l2Gas).toBe(200_000); }); - it('uses old behavior when redistributeCheckpointBudget is false', () => { - const rollupManaLimit = 1_000_000; - setupBuilder({ - redistributeCheckpointBudget: false, - maxBlocksPerCheckpoint: 5, - rollupManaLimit, - }); - - lightweightCheckpointBuilder.getBlocks.mockReturnValue([ - createMockBlock({ manaUsed: 200_000, txBlobFields: [10], blockBlobFieldCount: 20 }), - ]); - - const opts: PublicProcessorLimits = {}; - const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); - - // Old behavior: no fair share, just remaining budget = 800_000 - expect(capped.maxBlockGas!.l2Gas).toBe(800_000); - }); - it('redistributes DA gas across remaining blocks', () => { - setupBuilder({ - redistributeCheckpointBudget: true, - perBlockAllocationMultiplier: 1, - maxBlocksPerCheckpoint: 4, - }); + setupBuilder(); lightweightCheckpointBuilder.getBlocks.mockReturnValue([]); - const opts: PublicProcessorLimits = {}; - const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits( + proposerOpts({ maxBlocksPerCheckpoint: 4, perBlockAllocationMultiplier: 1 }), + ); // fairShareDA = ceil(MAX_PROCESSABLE_DA_GAS_PER_CHECKPOINT / 4 * 1) const expectedDA = Math.ceil(MAX_PROCESSABLE_DA_GAS_PER_CHECKPOINT / 4); @@ -606,20 +726,16 @@ describe('CheckpointBuilder', () => { }); it('redistributes tx count across remaining blocks', () => { - setupBuilder({ - redistributeCheckpointBudget: true, - perBlockAllocationMultiplier: 1, - maxBlocksPerCheckpoint: 4, - maxTxsPerCheckpoint: 100, - }); + setupBuilder({ maxTxsPerCheckpoint: 100 }); // 1 existing block with 10 txs lightweightCheckpointBuilder.getBlocks.mockReturnValue([ createMockBlock({ manaUsed: 0, txBlobFields: new Array(10).fill(1), blockBlobFieldCount: 20 }), ]); - const opts: PublicProcessorLimits = {}; - const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits( + proposerOpts({ maxBlocksPerCheckpoint: 4, perBlockAllocationMultiplier: 1 }), + ); // remainingTxs = 90, remainingBlocks = 3, multiplier = 1 // fairShareTxs = ceil(90 / 3 * 1) = 30 diff --git a/yarn-project/validator-client/src/checkpoint_builder.ts b/yarn-project/validator-client/src/checkpoint_builder.ts index 20a9625c8e71..05489c21e809 100644 --- a/yarn-project/validator-client/src/checkpoint_builder.ts +++ b/yarn-project/validator-client/src/checkpoint_builder.ts @@ -20,6 +20,7 @@ import type { ContractDataSource } from '@aztec/stdlib/contract'; import type { L1RollupConstants } from '@aztec/stdlib/epoch-helpers'; import { Gas } from '@aztec/stdlib/gas'; import { + type BlockBuilderOptions, type BuildBlockInCheckpointResult, type FullNodeBlockBuilderConfig, FullNodeBlockBuilderConfigKeys, @@ -78,7 +79,7 @@ export class CheckpointBuilder implements ICheckpointBlockBuilder { pendingTxs: Iterable | AsyncIterable, blockNumber: BlockNumber, timestamp: bigint, - opts: PublicProcessorLimits & { expectedEndState?: StateReference; minValidTxs?: number } = {}, + opts: BlockBuilderOptions & { expectedEndState?: StateReference }, ): Promise { const slot = this.checkpointBuilder.constants.slotNumber; @@ -178,11 +179,12 @@ export class CheckpointBuilder implements ICheckpointBlockBuilder { /** * Caps per-block gas and blob field limits by remaining checkpoint-level budgets. - * Computes remaining L2 gas (mana), DA gas, and blob fields from blocks already added to the checkpoint, - * then returns opts with maxBlockGas and maxBlobFields capped accordingly. + * When building a proposal (isBuildingProposal=true), computes a fair share of remaining budget + * across remaining blocks scaled by the multiplier. When validating, only caps by per-block limit + * and remaining checkpoint budget (no redistribution or multiplier). */ protected capLimitsByCheckpointBudgets( - opts: PublicProcessorLimits, + opts: BlockBuilderOptions, ): Pick { const existingBlocks = this.checkpointBuilder.getBlocks(); @@ -203,39 +205,31 @@ export class CheckpointBuilder implements ICheckpointBlockBuilder { const blockEndOverhead = getNumBlockEndBlobFields(isFirstBlock); const maxBlobFieldsForTxs = totalBlobCapacity - usedBlobFields - blockEndOverhead; - // When redistributeCheckpointBudget is enabled (default), compute a fair share of remaining budget - // across remaining blocks scaled by the multiplier, instead of letting one block consume it all. - const redistribute = this.config.redistributeCheckpointBudget !== false; - const remainingBlocks = Math.max(1, (this.config.maxBlocksPerCheckpoint ?? 1) - existingBlocks.length); - const multiplier = this.config.perBlockAllocationMultiplier ?? 1.2; - - // Cap L2 gas by remaining checkpoint mana (with fair share when redistributing) - const fairShareL2 = redistribute ? Math.ceil((remainingMana / remainingBlocks) * multiplier) : Infinity; - const cappedL2Gas = Math.min(opts.maxBlockGas?.l2Gas ?? Infinity, fairShareL2, remainingMana); - - // Cap DA gas by remaining checkpoint DA gas budget (with fair share when redistributing) - const fairShareDA = redistribute ? Math.ceil((remainingDAGas / remainingBlocks) * multiplier) : Infinity; - const cappedDAGas = Math.min(opts.maxBlockGas?.daGas ?? remainingDAGas, fairShareDA, remainingDAGas); - - // Cap blob fields by remaining checkpoint blob capacity (with fair share when redistributing) - const fairShareBlobs = redistribute ? Math.ceil((maxBlobFieldsForTxs / remainingBlocks) * multiplier) : Infinity; - const cappedBlobFields = Math.min(opts.maxBlobFields ?? Infinity, fairShareBlobs, maxBlobFieldsForTxs); - - // Cap transaction count by remaining checkpoint tx budget (with fair share when redistributing) - let cappedMaxTransactions: number | undefined; - if (this.config.maxTxsPerCheckpoint !== undefined) { - const usedTxs = sum(existingBlocks.map(b => b.body.txEffects.length)); - const remainingTxs = Math.max(0, this.config.maxTxsPerCheckpoint - usedTxs); - const fairShareTxs = redistribute ? Math.ceil((remainingTxs / remainingBlocks) * multiplier) : Infinity; - cappedMaxTransactions = Math.min(opts.maxTransactions ?? Infinity, fairShareTxs, remainingTxs); - } else { - cappedMaxTransactions = opts.maxTransactions; + // Remaining txs + const usedTxs = sum(existingBlocks.map(b => b.body.txEffects.length)); + const remainingTxs = Math.max(0, (this.config.maxTxsPerCheckpoint ?? Infinity) - usedTxs); + + // Cap by per-block limit + remaining checkpoint budget + let cappedL2Gas = Math.min(opts.maxBlockGas?.l2Gas ?? Infinity, remainingMana); + let cappedDAGas = Math.min(opts.maxBlockGas?.daGas ?? Infinity, remainingDAGas); + let cappedBlobFields = Math.min(opts.maxBlobFields ?? Infinity, maxBlobFieldsForTxs); + let cappedMaxTransactions = Math.min(opts.maxTransactions ?? Infinity, remainingTxs); + + // Proposer mode: further cap by fair share of remaining budget across remaining blocks + if (opts.isBuildingProposal) { + const remainingBlocks = Math.max(1, opts.maxBlocksPerCheckpoint - existingBlocks.length); + const multiplier = opts.perBlockAllocationMultiplier; + + cappedL2Gas = Math.min(cappedL2Gas, Math.ceil((remainingMana / remainingBlocks) * multiplier)); + cappedDAGas = Math.min(cappedDAGas, Math.ceil((remainingDAGas / remainingBlocks) * multiplier)); + cappedBlobFields = Math.min(cappedBlobFields, Math.ceil((maxBlobFieldsForTxs / remainingBlocks) * multiplier)); + cappedMaxTransactions = Math.min(cappedMaxTransactions, Math.ceil((remainingTxs / remainingBlocks) * multiplier)); } return { maxBlockGas: new Gas(cappedDAGas, cappedL2Gas), maxBlobFields: cappedBlobFields, - maxTransactions: cappedMaxTransactions, + maxTransactions: Number.isFinite(cappedMaxTransactions) ? cappedMaxTransactions : undefined, }; } diff --git a/yarn-project/validator-client/src/validator.integration.test.ts b/yarn-project/validator-client/src/validator.integration.test.ts index 19ad2e1e40f6..5c3c16662f57 100644 --- a/yarn-project/validator-client/src/validator.integration.test.ts +++ b/yarn-project/validator-client/src/validator.integration.test.ts @@ -212,7 +212,12 @@ describe('ValidatorClient Integration', () => { l1ToL2Messages: Fr[] = [], ): Promise<{ block: L2Block; proposal: BlockProposal }> => { const inHash = computeInHashFromL1ToL2Messages(l1ToL2Messages); - const { block, usedTxs } = await checkpointBuilder.buildBlock(txs, blockNumber, timestamp, {}); + const { block, usedTxs } = await checkpointBuilder.buildBlock(txs, blockNumber, timestamp, { + isBuildingProposal: true, + maxBlocksPerCheckpoint: 1, + perBlockAllocationMultiplier: 1.2, + minValidTxs: 0, + }); const proposal = await proposer.validator.createBlockProposal( block.header, From 12c2e491ee8b59c081101e8bf0504d3286aa96a4 Mon Sep 17 00:00:00 2001 From: Phil Windle Date: Wed, 18 Mar 2026 12:10:23 +0000 Subject: [PATCH 31/41] Review changes --- .../archiver/src/modules/l1_synchronizer.ts | 31 +++++++------------ 1 file changed, 12 insertions(+), 19 deletions(-) diff --git a/yarn-project/archiver/src/modules/l1_synchronizer.ts b/yarn-project/archiver/src/modules/l1_synchronizer.ts index 2ac1bd15c9e1..33afde395721 100644 --- a/yarn-project/archiver/src/modules/l1_synchronizer.ts +++ b/yarn-project/archiver/src/modules/l1_synchronizer.ts @@ -3,6 +3,7 @@ import { EpochCache } from '@aztec/epoch-cache'; import { InboxContract, RollupContract } from '@aztec/ethereum/contracts'; import type { L1BlockId } from '@aztec/ethereum/l1-types'; import type { ViemPublicClient, ViemPublicDebugClient } from '@aztec/ethereum/types'; +import { asyncPool } from '@aztec/foundation/async-pool'; import { maxBigint } from '@aztec/foundation/bigint'; import { BlockNumber, CheckpointNumber, EpochNumber } from '@aztec/foundation/branded-types'; import { Buffer32 } from '@aztec/foundation/buffer'; @@ -336,25 +337,17 @@ export class ArchiverL1Synchronizer implements Traceable { // Fetch checkpoints and blocks in bounded batches to avoid unbounded concurrent // promises when the gap between local pending and proven checkpoint numbers is large. const BATCH_SIZE = 10; - const newBlocks = []; - for (let offset = 0; offset < checkpointsToUnwind; offset += BATCH_SIZE) { - const batchSize = Math.min(BATCH_SIZE, checkpointsToUnwind - offset); - const checkpoints = ( - await Promise.all( - Array.from({ length: batchSize }, (_, i) => - this.store.getCheckpointData(CheckpointNumber(offset + i + pruneFrom)), - ), - ) - ).filter(isDefined); - - const batchBlocks = ( - await Promise.all( - checkpoints.map(cp => this.store.getBlocksForCheckpoint(CheckpointNumber(cp.checkpointNumber))), - ) - ).filter(isDefined); - - newBlocks.push(...batchBlocks.flat()); - } + const indices = Array.from({ length: checkpointsToUnwind }, (_, i) => CheckpointNumber(i + pruneFrom)); + const checkpoints = (await asyncPool(BATCH_SIZE, indices, idx => this.store.getCheckpointData(idx))).filter( + isDefined, + ); + const newBlocks = ( + await asyncPool(BATCH_SIZE, checkpoints, cp => + this.store.getBlocksForCheckpoint(CheckpointNumber(cp.checkpointNumber)), + ) + ) + .filter(isDefined) + .flat(); // Emit an event for listening services to react to the chain prune this.events.emit(L2BlockSourceEvents.L2PruneUnproven, { From 05692f394a4fef9029ffd6f15fe3d1ee020cc634 Mon Sep 17 00:00:00 2001 From: Aztec Bot <49558828+AztecBot@users.noreply.github.com> Date: Wed, 18 Mar 2026 08:37:15 -0400 Subject: [PATCH 32/41] fix(sequencer): add missing opts arg to checkpoint_builder tests (#21733) ## Summary PR #21692 added a required 4th `opts: BlockBuilderOptions` parameter to `CheckpointBuilder.buildBlock()`, but three call sites in the test file were not updated, causing `TS2554: Expected 4 arguments, but got 3`. Adds `validatorOpts()` as the 4th argument to the three affected calls (lines 174, 177, 187). ## Test plan - All 30 tests in `checkpoint_builder.test.ts` pass - `yarn tsgo -b --emitDeclarationOnly` passes with no errors ClaudeBox log: https://claudebox.work/s/2cad3714097b4ca5?run=1 --- .../validator-client/src/checkpoint_builder.test.ts | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/yarn-project/validator-client/src/checkpoint_builder.test.ts b/yarn-project/validator-client/src/checkpoint_builder.test.ts index 0a096e3dadf3..e5151c230e6e 100644 --- a/yarn-project/validator-client/src/checkpoint_builder.test.ts +++ b/yarn-project/validator-client/src/checkpoint_builder.test.ts @@ -171,10 +171,10 @@ describe('CheckpointBuilder', () => { it('uses the same contractsDB across multiple block builds', async () => { await mockSuccessfulBlock(); - await checkpointBuilder.buildBlock([], blockNumber, 1000n); + await checkpointBuilder.buildBlock([], blockNumber, 1000n, validatorOpts()); await mockSuccessfulBlock(); - await checkpointBuilder.buildBlock([], BlockNumber(blockNumber + 1), 1001n); + await checkpointBuilder.buildBlock([], BlockNumber(blockNumber + 1), 1001n, validatorOpts()); expect(createCheckpointSpy).toHaveBeenCalledTimes(2); expect(commitCheckpointSpy).toHaveBeenCalledTimes(2); @@ -184,7 +184,9 @@ describe('CheckpointBuilder', () => { it('calls revertCheckpoint when public processor fails', async () => { processor.process.mockRejectedValue(new Error('processor failure')); - await expect(checkpointBuilder.buildBlock([], blockNumber, 1000n)).rejects.toThrow('processor failure'); + await expect(checkpointBuilder.buildBlock([], blockNumber, 1000n, validatorOpts())).rejects.toThrow( + 'processor failure', + ); expect(createCheckpointSpy).toHaveBeenCalledTimes(1); expect(commitCheckpointSpy).not.toHaveBeenCalled(); From 4a893dcfb51204f648dde95f29750a89282f0d14 Mon Sep 17 00:00:00 2001 From: Nikita Meshcheriakov Date: Wed, 18 Mar 2026 09:44:37 -0300 Subject: [PATCH 33/41] fix: race condition in fast tx collection (#21496) Ref: A-513 - Replaces MissingTxsTracker with RequestTracker that unifies missing tx tracking, deadline management, and cancellation signaling into a single object - Ensures cancellation propagates from the deepest stack level upward: inner workers and node loops settle before collectFast returns (no orphaned promises) - Makes node loop inter-retry sleep interruptible by racing against cancellationToken --- .../p2p_client.integration_batch_txs.test.ts | 5 +- .../proposal_tx_collector_worker.ts | 8 +- .../reqresp/batch-tx-requester/README.md | 53 +++- .../batch_tx_requester.test.ts | 182 +++++--------- .../batch-tx-requester/batch_tx_requester.ts | 92 +++---- .../reqresp/batch-tx-requester/interface.ts | 1 - .../reqresp/batch-tx-requester/missing_txs.ts | 12 +- .../tx_collection/fast_tx_collection.ts | 140 +++++------ .../tx_collection/missing_txs_tracker.ts | 52 ---- .../tx_collection/proposal_tx_collector.ts | 21 +- .../services/tx_collection/request_tracker.ts | 127 ++++++++++ .../tx_collection/slow_tx_collection.ts | 2 +- .../tx_collection/tx_collection.test.ts | 226 +++++++++++++++++- .../services/tx_collection/tx_collection.ts | 8 +- .../testbench/p2p_client_testbench_worker.ts | 8 +- 15 files changed, 571 insertions(+), 366 deletions(-) delete mode 100644 yarn-project/p2p/src/services/tx_collection/missing_txs_tracker.ts create mode 100644 yarn-project/p2p/src/services/tx_collection/request_tracker.ts diff --git a/yarn-project/p2p/src/client/test/p2p_client.integration_batch_txs.test.ts b/yarn-project/p2p/src/client/test/p2p_client.integration_batch_txs.test.ts index 59503a89f336..f6e76dbef7c8 100644 --- a/yarn-project/p2p/src/client/test/p2p_client.integration_batch_txs.test.ts +++ b/yarn-project/p2p/src/client/test/p2p_client.integration_batch_txs.test.ts @@ -22,7 +22,7 @@ import { BatchTxRequester } from '../../services/reqresp/batch-tx-requester/batc import type { BatchTxRequesterLibP2PService } from '../../services/reqresp/batch-tx-requester/interface.js'; import type { IBatchRequestTxValidator } from '../../services/reqresp/batch-tx-requester/tx_validator.js'; import type { ConnectionSampler } from '../../services/reqresp/connection-sampler/connection_sampler.js'; -import { MissingTxsTracker } from '../../services/tx_collection/missing_txs_tracker.js'; +import { RequestTracker } from '../../services/tx_collection/request_tracker.js'; import { generatePeerIdPrivateKeys } from '../../test-helpers/generate-peer-id-private-keys.js'; import { getPorts } from '../../test-helpers/get-ports.js'; import { makeEnrs } from '../../test-helpers/make-enrs.js'; @@ -231,10 +231,9 @@ describe('p2p client integration batch txs', () => { mockP2PService.reqResp = (client0 as any).p2pService.reqresp; const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missingTxHashes), + RequestTracker.create(missingTxHashes, new Date(Date.now() + 5_000)), blockProposal, undefined, // no pinned peer - 5_000, mockP2PService, logger, undefined, diff --git a/yarn-project/p2p/src/client/test/tx_proposal_collector/proposal_tx_collector_worker.ts b/yarn-project/p2p/src/client/test/tx_proposal_collector/proposal_tx_collector_worker.ts index a76672a1e1de..c756de610980 100644 --- a/yarn-project/p2p/src/client/test/tx_proposal_collector/proposal_tx_collector_worker.ts +++ b/yarn-project/p2p/src/client/test/tx_proposal_collector/proposal_tx_collector_worker.ts @@ -19,7 +19,7 @@ import type { P2PConfig } from '../../../config.js'; import { BatchTxRequesterCollector, SendBatchRequestCollector } from '../../../services/index.js'; import type { IBatchRequestTxValidator } from '../../../services/reqresp/batch-tx-requester/tx_validator.js'; import { RateLimitStatus } from '../../../services/reqresp/rate-limiter/rate_limiter.js'; -import { MissingTxsTracker } from '../../../services/tx_collection/missing_txs_tracker.js'; +import { RequestTracker } from '../../../services/tx_collection/request_tracker.js'; import { AlwaysTrueCircuitVerifier, BENCHMARK_CONSTANTS, @@ -213,10 +213,9 @@ async function runCollector(cmd: Extract collector.collectTxs( - MissingTxsTracker.fromArray(parsedTxHashes), + RequestTracker.create(parsedTxHashes, new Date(Date.now() + internalTimeoutMs)), parsedProposal, pinnedPeer, - internalTimeoutMs, ), timeoutMs, () => new Error(`Collector timed out after ${timeoutMs}ms`), @@ -231,10 +230,9 @@ async function runCollector(cmd: Extract collector.collectTxs( - MissingTxsTracker.fromArray(parsedTxHashes), + RequestTracker.create(parsedTxHashes, new Date(Date.now() + internalTimeoutMs)), parsedProposal, pinnedPeer, - internalTimeoutMs, ), timeoutMs, () => new Error(`Collector timed out after ${timeoutMs}ms`), diff --git a/yarn-project/p2p/src/services/reqresp/batch-tx-requester/README.md b/yarn-project/p2p/src/services/reqresp/batch-tx-requester/README.md index 3e28ca7f9e15..088c16b26c94 100644 --- a/yarn-project/p2p/src/services/reqresp/batch-tx-requester/README.md +++ b/yarn-project/p2p/src/services/reqresp/batch-tx-requester/README.md @@ -170,6 +170,37 @@ class BlockTxsResponse { The `BitVector` is a compact representation where each bit corresponds to a transaction index in the block proposal. This allows efficient capability advertisement without repeating full hashes. +## Cancellation + +All cancellation is managed by a single `RequestTracker` instance, shared across the entire collection +flow. The `RequestTracker` owns the deadline, tracks which txs are still missing, and exposes a +`cancellationToken` promise that resolves when the request should stop (deadline hit, all txs fetched, +or external `cancel()` call). + +Cancellation propagates from the deepest stack level upward: + +``` +RequestTracker.finish() + ├── resolves cancellationToken promise + │ + ├── BatchTxRequester workers (deepest) + │ ├── shouldStop() checks requestTracker.cancelled → exit loop + │ ├── sleepClampedToDeadline races sleep vs cancellationToken → wakes + │ └── semaphore.acquire races vs cancellationToken → wakes + │ │ + │ ▼ workers settle → txQueue.end() → generator returns + │ + ├── Node collection loops + │ ├── notFinished() checks requestTracker.cancelled → exit loop + │ └── inter-retry sleep races vs cancellationToken → wakes + │ │ + │ ▼ all node loops settle + │ + └── collectFast (outermost) + awaits Promise.allSettled([reqresp, nodes]) → settles after inner tasks + finally: requestTracker.cancel() (idempotent), cleanup +``` + ## Key Files | File | Description | @@ -179,15 +210,16 @@ The `BitVector` is a compact representation where each bit corresponds to a tran | `peer_collection.ts` | Manages peer classification (dumb/smart/bad) and rate limiting | | `interface.ts` | Type definitions for dependencies | | `../protocols/block_txs/` | Wire protocol definitions (`BlockTxsRequest`, `BlockTxsResponse`, `BitVector`) | +| `../../tx_collection/request_tracker.ts` | Centralized deadline, missing tx tracking, and cancellation signal | ## Stopping Conditions -The `BatchTxRequester` stops when any of these conditions are met: +The `BatchTxRequester` stops when any of these conditions are met, all managed by the `RequestTracker`: -1. **All transactions fetched** - Success! -2. **Deadline exceeded** - Timeout configured by caller -3. **Abort signal** - External cancellation -4. **No transactions to fetch** - Nothing was missing +1. **All transactions fetched** - `markFetched()` removes the last missing tx, triggering `finish()` +2. **Deadline exceeded** - `setTimeout` in `RequestTracker` fires, triggering `finish()` +3. **External cancellation** - `RequestTracker.cancel()` called (e.g., from `stop()`, `stopCollectingForBlocksUpTo`) +4. **No transactions to fetch** - Empty hash set at construction, `RequestTracker` finishes immediately ## Configuration @@ -228,11 +260,15 @@ Request to peer fails ## Usage Example ```typescript +const requestTracker = RequestTracker.create( + missingTxHashes, // TxHash[] - what we need + new Date(Date.now() + 5_000), // deadline +); + const requester = new BatchTxRequester( - missingTxHashes, // TxHash[] - what we need + requestTracker, // IRequestTracker - tracks missing txs, deadline, and cancellation blockTxsSource, // BlockTxsSource - the proposal or block we need txs for pinnedPeer, // PeerId | undefined - peer expected to have the txs - timeoutMs, // number - how long to try p2pService, // BatchTxRequesterLibP2PService ); @@ -273,6 +309,8 @@ const txs = await BatchTxRequester.collectAllTxs(requester.run()); │ 1. Try RPC nodes first (fast) │ │ Periodic polling of RPC nodes │ │ 2. Fall back to BatchTxRequester │ │ and peers for missing txs │ │ │ │ │ +│ Creates RequestTracker per │ │ │ +│ request with deadline │ │ │ └───────────────────┬───────────────┘ └─────────────────────────────────────┘ │ │ For 'proposal' and 'block' requests @@ -281,6 +319,7 @@ const txs = await BatchTxRequester.collectAllTxs(requester.run()); │ BatchTxRequester │ │ │ │ Aggressive parallel fetching from multiple peers │ +│ Shares RequestTracker with FastTxCollection for unified cancellation │ │ Uses BLOCK_TXS sub-protocol for efficient batching │ └───────────────────┬─────────────────────────────────────────────────────────┘ │ diff --git a/yarn-project/p2p/src/services/reqresp/batch-tx-requester/batch_tx_requester.test.ts b/yarn-project/p2p/src/services/reqresp/batch-tx-requester/batch_tx_requester.test.ts index f68b6dcddc40..56e38ebdb07e 100644 --- a/yarn-project/p2p/src/services/reqresp/batch-tx-requester/batch_tx_requester.test.ts +++ b/yarn-project/p2p/src/services/reqresp/batch-tx-requester/batch_tx_requester.test.ts @@ -17,7 +17,7 @@ import type { PeerId } from '@libp2p/interface'; import { type MockProxy, mock } from 'jest-mock-extended'; import { createSecp256k1PeerId } from '../../../index.js'; -import { MissingTxsTracker } from '../../tx_collection/missing_txs_tracker.js'; +import { RequestTracker } from '../../tx_collection/request_tracker.js'; import type { ConnectionSampler } from '../connection-sampler/connection_sampler.js'; import type { ReqRespInterface } from '../interface.js'; import { BitVector, BlockTxsRequest, BlockTxsResponse } from '../protocols/index.js'; @@ -110,25 +110,17 @@ describe('BatchTxRequester', () => { const clock = new TestClock(); - const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), - blockProposal, - undefined, - deadline, - mockP2PService, - logger, - clock, - { - smartParallelWorkerCount: 0, - dumbParallelWorkerCount: 1, - txValidator, - }, - ); + const tracker = RequestTracker.create(missing, new Date(Date.now() + deadline)); + const requester = new BatchTxRequester(tracker, blockProposal, undefined, mockP2PService, logger, clock, { + smartParallelWorkerCount: 0, + dumbParallelWorkerCount: 1, + txValidator, + }); const runPromise = BatchTxRequester.collectAllTxs(requester.run()); await retryUntil(() => (requestCount() === rounds ? true : undefined), 'waitFor', 10, 0.01); - clock.advanceTo(deadline + 1); + tracker.cancel(); await runPromise; @@ -165,25 +157,17 @@ describe('BatchTxRequester', () => { const clock = new TestClock(); - const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), - blockProposal, - undefined, - deadline, - mockP2PService, - logger, - clock, - { - smartParallelWorkerCount: 0, - dumbParallelWorkerCount: 3, - txValidator, - }, - ); + const tracker = RequestTracker.create(missing, new Date(Date.now() + deadline)); + const requester = new BatchTxRequester(tracker, blockProposal, undefined, mockP2PService, logger, clock, { + smartParallelWorkerCount: 0, + dumbParallelWorkerCount: 3, + txValidator, + }); const runPromise = BatchTxRequester.collectAllTxs(requester.run()); await retryUntil(() => (requestCount() == numberOfRounds * peers.length ? true : undefined), 'waitFor', 10, 0.01); - clock.advanceTo(deadline + 1); + tracker.cancel(); await runPromise; @@ -294,10 +278,9 @@ describe('BatchTxRequester', () => { }); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + RequestTracker.create(missing, new Date(Date.now() + deadline)), blockProposal, undefined, - deadline, mockP2PService, logger, new DateProvider(), @@ -348,10 +331,9 @@ describe('BatchTxRequester', () => { const semaphore = new TestSemaphore(new Semaphore(0)); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + RequestTracker.create(missing, new Date(Date.now() + deadline)), blockProposal, undefined, - deadline, mockP2PService, logger, new DateProvider(), @@ -400,10 +382,9 @@ describe('BatchTxRequester', () => { const semaphore = new TestSemaphore(new Semaphore(0)); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + RequestTracker.create(missing, new Date(Date.now() + deadline)), blockProposal, pinnedPeer, - deadline, mockP2PService, logger, dateProvider, @@ -473,10 +454,9 @@ describe('BatchTxRequester', () => { const semaphore = new TestSemaphore(new Semaphore(0)); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + RequestTracker.create(missing, new Date(Date.now() + deadline)), blockProposal, pinnedPeer, - deadline, mockP2PService, logger, dateProvider, @@ -529,10 +509,9 @@ describe('BatchTxRequester', () => { const semaphore = new TestSemaphore(new Semaphore(0)); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + RequestTracker.create(missing, new Date(Date.now() + deadline)), blockProposal, pinnedPeer, - deadline, mockP2PService, logger, dateProvider, @@ -585,10 +564,9 @@ describe('BatchTxRequester', () => { ); reqResp.sendRequestToPeer.mockImplementation(mockImplementation); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + RequestTracker.create(missing, new Date(Date.now() + deadline)), blockProposal, pinnedPeer, - deadline, mockP2PService, logger, dateProvider, @@ -662,10 +640,9 @@ describe('BatchTxRequester', () => { }); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + RequestTracker.create(missing, new Date(Date.now() + deadline)), blockProposal, pinnedPeer, - deadline, mockP2PService, logger, dateProvider, @@ -776,10 +753,9 @@ describe('BatchTxRequester', () => { }); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + RequestTracker.create(missing, new Date(Date.now() + deadline)), blockProposal, pinnedPeer, - deadline, mockP2PService, logger, dateProvider, @@ -916,10 +892,9 @@ describe('BatchTxRequester', () => { }); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + RequestTracker.create(missing, new Date(Date.now() + deadline)), blockProposal, pinnedPeer, - deadline, mockP2PService, logger, clock, @@ -990,26 +965,18 @@ describe('BatchTxRequester', () => { const clock = new TestClock(); - const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), - blockProposal, - undefined, - shortDeadline, - mockP2PService, - logger, - clock, - { - smartParallelWorkerCount: 1, - dumbParallelWorkerCount: 1, - txValidator, - }, - ); + const tracker = RequestTracker.create(missing, new Date(Date.now() + shortDeadline)); + const requester = new BatchTxRequester(tracker, blockProposal, undefined, mockP2PService, logger, clock, { + smartParallelWorkerCount: 1, + dumbParallelWorkerCount: 1, + txValidator, + }); const runPromise = BatchTxRequester.collectAllTxs(requester.run()); - // Wait for first request, then advance clock past deadline + // Wait for first request, then cancel the tracker await onFirstRequest; - clock.advanceTo(shortDeadline + 1); + tracker.cancel(); await runPromise; @@ -1033,15 +1000,15 @@ describe('BatchTxRequester', () => { const peers = await Promise.all([createSecp256k1PeerId(), createSecp256k1PeerId()]); connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue(peers); - // Create abort controller and immediately abort - const abortController = new AbortController(); - abortController.abort(); + // Create tracker and immediately cancel + const tracker = RequestTracker.create(missing, new Date(Date.now() + deadline)); + tracker.cancel(); let requestsMade = 0; // eslint-disable-next-line require-await reqResp.sendRequestToPeer.mockImplementation(async () => { requestsMade++; - // This should never be called since we abort immediately + // This should never be called since we cancel immediately return { status: ReqRespStatus.SUCCESS, data: Buffer.alloc(0), @@ -1049,17 +1016,15 @@ describe('BatchTxRequester', () => { }); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + tracker, blockProposal, undefined, - deadline, mockP2PService, logger, new DateProvider(), { smartParallelWorkerCount: 2, dumbParallelWorkerCount: 2, - abortSignal: abortController.signal, txValidator, }, ); @@ -1090,12 +1055,12 @@ describe('BatchTxRequester', () => { [peers[2].toString(), Array.from({ length: 10 }, (_, i) => i + 20)], ]); - const abortController = new AbortController(); + const tracker = RequestTracker.create(missing, new Date(Date.now() + deadline)); let requestCount = 0; reqResp.sendRequestToPeer.mockImplementation(async (peerId: any) => { if (requestCount === 1) { - abortController.abort(); + tracker.cancel(); } // Return successful response with transactions @@ -1112,7 +1077,7 @@ describe('BatchTxRequester', () => { requestCount++; - // Allow event loop to process abort signal + // Allow event loop to process cancellation await sleep(50); return { status: ReqRespStatus.SUCCESS, @@ -1121,25 +1086,23 @@ describe('BatchTxRequester', () => { }); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + tracker, blockProposal, undefined, - deadline, mockP2PService, logger, new DateProvider(), { smartParallelWorkerCount: 0, dumbParallelWorkerCount: 2, - abortSignal: abortController.signal, txValidator, }, ); const result = await BatchTxRequester.collectAllTxs(requester.run()); - // Verify abort was actually triggered - expect(abortController.signal.aborted).toBe(true); + // Verify cancellation was actually triggered + expect(tracker.checkCancelled()).toBe(true); expect(result).toBeDefined(); expect(result!.length).toBeGreaterThan(0); @@ -1169,36 +1132,26 @@ describe('BatchTxRequester', () => { const { mockImplementation } = createRequestLogger(blockProposal, new Set(), peerTransactions, 100); reqResp.sendRequestToPeer.mockImplementation(mockImplementation); - const abortController = new AbortController(); + const tracker = RequestTracker.create(missing, new Date(Date.now() + deadline)); // Create semaphore that starts with 0 permits to block smart workers const semaphore = new TestSemaphore(new Semaphore(0)); - const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), - blockProposal, - undefined, - deadline, - mockP2PService, - logger, - clock, - { - semaphore, - smartParallelWorkerCount: 2, - dumbParallelWorkerCount: 2, - peerCollection, - abortSignal: abortController.signal, - txValidator, - }, - ); + const requester = new BatchTxRequester(tracker, blockProposal, undefined, mockP2PService, logger, clock, { + semaphore, + smartParallelWorkerCount: 2, + dumbParallelWorkerCount: 2, + peerCollection, + txValidator, + }); const runPromise = BatchTxRequester.collectAllTxs(requester.run()); await sleep(1000); // Allow some time for smart workers to start and block on semaphore - abortController.abort(); // Trigger abort while smart workers are blocked + tracker.cancel(); // Trigger cancellation while smart workers are blocked const result = await runPromise; - // Verify abort was triggered - expect(abortController.signal.aborted).toBe(true); + // Verify cancellation was triggered + expect(tracker.checkCancelled()).toBe(true); // Verify peer was promoted to smart expect(peerCollection.smartPeersMarked).toContain(peers[0].toString()); @@ -1250,10 +1203,9 @@ describe('BatchTxRequester', () => { reqResp.sendRequestToPeer.mockImplementation(mockImplementation); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + RequestTracker.create(missing, new Date(Date.now() + deadline)), blockProposal, undefined, - deadline, mockP2PService, logger, new DateProvider(), @@ -1331,10 +1283,9 @@ describe('BatchTxRequester', () => { reqResp.sendRequestToPeer.mockImplementation(mockImplementation); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + RequestTracker.create(missing, new Date(Date.now() + deadline)), blockProposal, undefined, - deadline, mockP2PService, logger, new DateProvider(), @@ -1402,10 +1353,9 @@ describe('BatchTxRequester', () => { reqResp.sendRequestToPeer.mockImplementation(mockImplementation); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + RequestTracker.create(missing, new Date(Date.now() + deadline)), blockProposal, undefined, - deadline, mockP2PService, logger, new DateProvider(), @@ -1450,7 +1400,7 @@ describe('BatchTxRequester', () => { const peer = await createSecp256k1PeerId(); connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue([peer]); - const tracker = MissingTxsTracker.fromArray(missing); + const tracker = RequestTracker.create(missing, new Date(Date.now() + deadline)); // Peer has only first half of transactions const peerTransactions = new Map([[peer.toString(), Array.from({ length: TX_BATCH_SIZE }, (_, i) => i)]]); @@ -1462,7 +1412,6 @@ describe('BatchTxRequester', () => { tracker, blockProposal, undefined, - deadline, mockP2PService, logger, new DateProvider(), @@ -1519,10 +1468,9 @@ describe('BatchTxRequester', () => { reqResp.sendRequestToPeer.mockImplementation(mockImplementation); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + RequestTracker.create(missing, new Date(Date.now() + deadline)), blockProposal, pinnedPeer, - deadline, mockP2PService, logger, new DateProvider(), @@ -1571,10 +1519,9 @@ describe('BatchTxRequester', () => { reqResp.sendRequestToPeer.mockImplementation(mockImplementation); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + RequestTracker.create(missing, new Date(Date.now() + deadline)), blockProposal, pinnedPeer, - deadline, mockP2PService, logger, new DateProvider(), @@ -1653,10 +1600,9 @@ describe('BatchTxRequester', () => { }); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + RequestTracker.create(missing, new Date(Date.now() + deadline)), blockProposal, pinnedPeer, - deadline, mockP2PService, logger, clock, @@ -1709,10 +1655,9 @@ describe('BatchTxRequester', () => { reqResp.sendRequestToPeer.mockImplementation(mockImplementation); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + RequestTracker.create(missing, new Date(Date.now() + deadline)), blockProposal, pinnedPeer, - deadline, mockP2PService, logger, new DateProvider(), @@ -1765,10 +1710,9 @@ describe('BatchTxRequester', () => { reqResp.sendRequestToPeer.mockImplementation(mockImplementation); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + RequestTracker.create(missing, new Date(Date.now() + deadline)), blockProposal, pinnedPeer, - deadline, mockP2PService, logger, new DateProvider(), diff --git a/yarn-project/p2p/src/services/reqresp/batch-tx-requester/batch_tx_requester.ts b/yarn-project/p2p/src/services/reqresp/batch-tx-requester/batch_tx_requester.ts index 1f3fd2fb0886..98e34b1e9c2e 100644 --- a/yarn-project/p2p/src/services/reqresp/batch-tx-requester/batch_tx_requester.ts +++ b/yarn-project/p2p/src/services/reqresp/batch-tx-requester/batch_tx_requester.ts @@ -1,15 +1,14 @@ import { chunkWrapAround } from '@aztec/foundation/collection'; -import { TimeoutError } from '@aztec/foundation/error'; import { type Logger, createLogger } from '@aztec/foundation/log'; import { FifoMemoryQueue, type ISemaphore, Semaphore } from '@aztec/foundation/queue'; import { sleep } from '@aztec/foundation/sleep'; -import { DateProvider, executeTimeout } from '@aztec/foundation/timer'; +import { DateProvider } from '@aztec/foundation/timer'; import { PeerErrorSeverity } from '@aztec/stdlib/p2p'; import { Tx, TxArray, TxHash } from '@aztec/stdlib/tx'; import type { PeerId } from '@libp2p/interface'; -import type { IMissingTxsTracker } from '../../tx_collection/missing_txs_tracker.js'; +import type { IRequestTracker } from '../../tx_collection/request_tracker.js'; import { ReqRespSubProtocol } from '.././interface.js'; import { BlockTxsRequest, BlockTxsResponse, type BlockTxsSource } from '.././protocols/index.js'; import { ReqRespStatus } from '.././status.js'; @@ -42,16 +41,14 @@ import { BatchRequestTxValidator, type IBatchRequestTxValidator } from './tx_val * - Is the peer which was unable to send us successful response N times in a row * */ export class BatchTxRequester { + private readonly requestTracker: IRequestTracker; private readonly blockTxsSource: BlockTxsSource; private readonly pinnedPeer: PeerId | undefined; - private readonly timeoutMs: number; private readonly p2pService: BatchTxRequesterLibP2PService; private readonly logger: Logger; - private readonly dateProvider: DateProvider; private readonly opts: BatchTxRequesterOptions; private readonly peers: IPeerCollection; private readonly txsMetadata: ITxMetadataCollection; - private readonly deadline: number; private readonly smartRequesterSemaphore: ISemaphore; private readonly txQueue: FifoMemoryQueue; private readonly txValidator: IBatchRequestTxValidator; @@ -60,21 +57,19 @@ export class BatchTxRequester { private readonly txBatchSize: number; constructor( - missingTxsTracker: IMissingTxsTracker, + requestTracker: IRequestTracker, blockTxsSource: BlockTxsSource, pinnedPeer: PeerId | undefined, - timeoutMs: number, p2pService: BatchTxRequesterLibP2PService, logger?: Logger, dateProvider?: DateProvider, opts?: BatchTxRequesterOptions, ) { + this.requestTracker = requestTracker; this.blockTxsSource = blockTxsSource; this.pinnedPeer = pinnedPeer; - this.timeoutMs = timeoutMs; this.p2pService = p2pService; this.logger = logger ?? createLogger('p2p:reqresp_batch'); - this.dateProvider = dateProvider ?? new DateProvider(); this.opts = opts ?? {}; this.smartParallelWorkerCount = @@ -82,7 +77,6 @@ export class BatchTxRequester { this.dumbParallelWorkerCount = this.opts.dumbParallelWorkerCount ?? DEFAULT_BATCH_TX_REQUESTER_DUMB_PARALLEL_WORKER_COUNT; this.txBatchSize = this.opts.txBatchSize ?? DEFAULT_BATCH_TX_REQUESTER_TX_BATCH_SIZE; - this.deadline = this.dateProvider.now() + this.timeoutMs; this.txQueue = new FifoMemoryQueue(this.logger); this.txValidator = this.opts.txValidator ?? new BatchRequestTxValidator(this.p2pService.txValidatorConfig); @@ -93,12 +87,12 @@ export class BatchTxRequester { this.peers = new PeerCollection( this.p2pService.connectionSampler, this.pinnedPeer, - this.dateProvider, + dateProvider ?? new DateProvider(), badPeerThreshold, this.p2pService.peerScoring, ); } - this.txsMetadata = new MissingTxMetadataCollection(missingTxsTracker, this.txBatchSize); + this.txsMetadata = new MissingTxMetadataCollection(requestTracker, this.txBatchSize); this.smartRequesterSemaphore = this.opts.semaphore ?? new Semaphore(0); } @@ -106,40 +100,30 @@ export class BatchTxRequester { * Fetches all missing transactions and yields them one by one * */ public async *run(): AsyncGenerator { - // Our timeout is represented in milliseconds but queue expects seconds - // We also want to make sure we wait at least 1 second in case of very low timeouts - const timeoutQueueAfter = Math.max(Math.ceil(this.timeoutMs / 1_000), 1); try { if (this.txsMetadata.getMissingTxHashes().size === 0) { return undefined; } - // Start workers in background - const workersPromise = executeTimeout( - () => Promise.allSettled([this.smartRequester(), this.dumbRequester(), this.pinnedPeerRequester()]), - this.timeoutMs, - ).finally(() => { + // Start workers in background. Workers stop themselves via requestTracker.checkCancelled(). + const workersPromise = Promise.allSettled([ + this.smartRequester(), + this.dumbRequester(), + this.pinnedPeerRequester(), + ]).finally(() => { this.txQueue.end(); }); + // Yield txs as workers put them on the queue. The queue's end() drains remaining items + // before returning null, so we don't lose any txs. while (true) { - const tx = await this.txQueue.get(timeoutQueueAfter); + const tx = await this.txQueue.get(); - // null indicates that the queue has ended if (tx === null) { break; } yield tx; - - if (this.shouldStop()) { - // Drain queue before ending - let remaining; - while ((remaining = this.txQueue.getImmediate()) !== undefined) { - yield remaining; - } - break; - } } this.unlockSmartRequesterSemaphores(); @@ -360,7 +344,10 @@ export class BatchTxRequester { ) { try { this.logger.trace(`Smart worker ${workerIndex} started`); - await executeTimeout((_: AbortSignal) => this.smartRequesterSemaphore.acquire(), this.timeoutMs); + await Promise.race([this.smartRequesterSemaphore.acquire(), this.requestTracker.cancellationToken]); + if (this.requestTracker.checkCancelled()) { + return; + } this.logger.trace(`Smart worker ${workerIndex} acquired semaphore`); while (!this.shouldStop()) { @@ -384,7 +371,10 @@ export class BatchTxRequester { // // When a dumb peer responds with valid txIndices, it gets // promoted to smart and releases the semaphore, waking this worker. - await executeTimeout((_: AbortSignal) => this.smartRequesterSemaphore.acquire(), this.timeoutMs); + await Promise.race([this.smartRequesterSemaphore.acquire(), this.requestTracker.cancellationToken]); + if (this.requestTracker.checkCancelled()) { + break; + } this.logger.debug(`Worker loop smart: acquired next smart peer`); continue; } @@ -411,11 +401,7 @@ export class BatchTxRequester { }); } } catch (err: any) { - if (err instanceof TimeoutError) { - this.logger.debug(`Smart worker ${workerIndex} timed out waiting for semaphore`); - } else { - this.logger.error(`Smart worker ${workerIndex} encountered an error: ${err}`); - } + this.logger.error(`Smart worker ${workerIndex} encountered an error: ${err}`); } finally { this.logger.debug(`Smart worker ${workerIndex} finished`); } @@ -651,27 +637,14 @@ export class BatchTxRequester { } /* - * @returns true if all missing txs have been fetched */ - private fetchedAllTxs() { - return this.txsMetadata.getMissingTxHashes().size == 0; - } - - /* - * Checks if the BatchTxRequester should stop fetching missing txs - * Conditions for stopping are: - * - There have been no missing transactions to start with - * - All transactions have been fetched - * - The deadline has been hit (no more time to fetch) - * - This process has been cancelled via abortSignal - * - * @returns true if BatchTxRequester should stop, otherwise false*/ + * Checks if the BatchTxRequester should stop fetching missing txs. + * Delegates to requestTracker which covers: deadline hit, all txs fetched, or external cancellation. */ private shouldStop() { - const aborted = this.opts.abortSignal?.aborted ?? false; - if (aborted) { + if (this.requestTracker.checkCancelled()) { this.unlockSmartRequesterSemaphores(); } - return aborted || this.fetchedAllTxs() || this.dateProvider.now() > this.deadline; + return this.requestTracker.checkCancelled(); } /* @@ -689,10 +662,9 @@ export class BatchTxRequester { * This ensures we don't sleep past the deadline. * */ private async sleepClampedToDeadline(durationMs: number) { - const remaining = this.deadline - this.dateProvider.now(); - const thereIsTimeRemaining = remaining > 0; - if (thereIsTimeRemaining) { - await sleep(Math.min(durationMs, remaining)); + if (this.requestTracker.checkCancelled()) { + return; } + await Promise.race([sleep(durationMs), this.requestTracker.cancellationToken]); } } diff --git a/yarn-project/p2p/src/services/reqresp/batch-tx-requester/interface.ts b/yarn-project/p2p/src/services/reqresp/batch-tx-requester/interface.ts index fad59924637a..2899ffec3f70 100644 --- a/yarn-project/p2p/src/services/reqresp/batch-tx-requester/interface.ts +++ b/yarn-project/p2p/src/services/reqresp/batch-tx-requester/interface.ts @@ -49,7 +49,6 @@ export interface BatchTxRequesterOptions { //Injectable for testing purposes semaphore?: ISemaphore; peerCollection?: IPeerCollection; - abortSignal?: AbortSignal; /** Optional tx validator for testing - if not provided, one is created from p2pService.txValidatorConfig */ txValidator?: IBatchRequestTxValidator; } diff --git a/yarn-project/p2p/src/services/reqresp/batch-tx-requester/missing_txs.ts b/yarn-project/p2p/src/services/reqresp/batch-tx-requester/missing_txs.ts index 06a09effe8be..e858b26b7d12 100644 --- a/yarn-project/p2p/src/services/reqresp/batch-tx-requester/missing_txs.ts +++ b/yarn-project/p2p/src/services/reqresp/batch-tx-requester/missing_txs.ts @@ -2,7 +2,7 @@ import { type Tx, TxHash } from '@aztec/stdlib/tx'; import type { PeerId } from '@libp2p/interface'; -import type { IMissingTxsTracker } from '../../tx_collection/missing_txs_tracker.js'; +import type { IRequestTracker } from '../../tx_collection/request_tracker.js'; import { DEFAULT_BATCH_TX_REQUESTER_TX_BATCH_SIZE } from './config.js'; import type { ITxMetadataCollection } from './interface.js'; @@ -41,10 +41,10 @@ export class MissingTxMetadataCollection implements ITxMetadataCollection { private txMetadata = new Map(); constructor( - private missingTxsTracker: IMissingTxsTracker, + private requestTracker: IRequestTracker, private readonly txBatchSize: number = DEFAULT_BATCH_TX_REQUESTER_TX_BATCH_SIZE, ) { - missingTxsTracker.missingTxHashes.forEach(hash => this.txMetadata.set(hash, new MissingTxMetadata(hash))); + requestTracker.missingTxHashes.forEach(hash => this.txMetadata.set(hash, new MissingTxMetadata(hash))); } public getPrioritizingNotInFlightAndLowerRequestCount(txs: string[]): MissingTxMetadata[] { @@ -65,7 +65,7 @@ export class MissingTxMetadataCollection implements ITxMetadataCollection { } public getMissingTxHashes(): Set { - return this.missingTxsTracker.missingTxHashes; + return this.requestTracker.missingTxHashes; } public getTxsPeerHas(peer: PeerId): Set { @@ -128,7 +128,7 @@ export class MissingTxMetadataCollection implements ITxMetadataCollection { } public alreadyFetched(txHash: TxHash): boolean { - return !this.missingTxsTracker.isMissing(txHash.toString()); + return !this.requestTracker.isMissing(txHash.toString()); } public markFetched(peerId: PeerId, tx: Tx): boolean { @@ -144,7 +144,7 @@ export class MissingTxMetadataCollection implements ITxMetadataCollection { } txMeta.peers.add(peerId.toString()); - return this.missingTxsTracker.markFetched(tx); + return this.requestTracker.markFetched(tx); } public markPeerHas(peerId: PeerId, txHash: TxHash[]) { diff --git a/yarn-project/p2p/src/services/tx_collection/fast_tx_collection.ts b/yarn-project/p2p/src/services/tx_collection/fast_tx_collection.ts index ebd7b628f57a..d9f8b9fb5cb9 100644 --- a/yarn-project/p2p/src/services/tx_collection/fast_tx_collection.ts +++ b/yarn-project/p2p/src/services/tx_collection/fast_tx_collection.ts @@ -1,12 +1,9 @@ import { BlockNumber } from '@aztec/foundation/branded-types'; import { times } from '@aztec/foundation/collection'; -import { AbortError, TimeoutError } from '@aztec/foundation/error'; import { type Logger, createLogger } from '@aztec/foundation/log'; -import { promiseWithResolvers } from '@aztec/foundation/promise'; import { sleep } from '@aztec/foundation/sleep'; import { DateProvider, elapsed } from '@aztec/foundation/timer'; import type { L2BlockInfo } from '@aztec/stdlib/block'; -import type { BlockProposal } from '@aztec/stdlib/p2p'; import { type Tx, TxHash } from '@aztec/stdlib/tx'; import type { PeerId } from '@libp2p/interface'; @@ -14,12 +11,12 @@ import type { PeerId } from '@libp2p/interface'; import type { BatchTxRequesterConfig } from '../reqresp/batch-tx-requester/config.js'; import type { BatchTxRequesterLibP2PService } from '../reqresp/batch-tx-requester/interface.js'; import type { TxCollectionConfig } from './config.js'; -import { MissingTxsTracker } from './missing_txs_tracker.js'; import { BatchTxRequesterCollector, type MissingTxsCollector, SendBatchRequestCollector, } from './proposal_tx_collector.js'; +import { RequestTracker } from './request_tracker.js'; import type { FastCollectionRequest, FastCollectionRequestInput } from './tx_collection.js'; import type { TxAddContext, TxCollectionSink } from './tx_collection_sink.js'; import type { TxSource } from './tx_source.js'; @@ -48,7 +45,9 @@ export class FastTxCollection { } public async stop() { - this.requests.forEach(request => request.promise.reject(new AbortError(`Stopped collection service`))); + this.requests.forEach(request => { + request.requestTracker.cancel(); + }); await Promise.resolve(); } @@ -75,81 +74,65 @@ export class FastTxCollection { ? { ...input.blockProposal.toBlockInfo(), blockNumber: input.blockNumber } : { ...input.block.toBlockInfo() }; - // This promise is used to await for the collection to finish during the main collectFast method. - // It gets resolved in `foundTxs` when all txs have been collected, or rejected if the request is aborted or hits the deadline. - const promise = promiseWithResolvers(); - const timeoutTimer = setTimeout(() => promise.reject(new TimeoutError(`Timed out while collecting txs`)), timeout); - const request: FastCollectionRequest = { ...input, blockInfo, - promise, - missingTxTracker: MissingTxsTracker.fromArray(txHashes), - deadline: opts.deadline, + requestTracker: RequestTracker.create(txHashes, opts.deadline, this.dateProvider), }; const [duration] = await elapsed(() => this.collectFast(request, { ...opts })); - clearTimeout(timeoutTimer); this.log.verbose( - `Collected ${request.missingTxTracker.collectedTxs.length} txs out of ${txHashes.length} for ${input.type} at slot ${blockInfo.slotNumber}`, + `Collected ${request.requestTracker.collectedTxs.length} txs out of ${txHashes.length} for ${input.type} at slot ${blockInfo.slotNumber}`, { ...blockInfo, duration, requestType: input.type, - missingTxs: [...request.missingTxTracker.missingTxHashes], + missingTxs: [...request.requestTracker.missingTxHashes], }, ); - return request.missingTxTracker.collectedTxs; + return request.requestTracker.collectedTxs; } - protected async collectFast( - request: FastCollectionRequest, - opts: { proposal?: BlockProposal; deadline: Date; pinnedPeer?: PeerId }, - ) { + protected async collectFast(request: FastCollectionRequest, opts: { pinnedPeer?: PeerId }) { this.requests.add(request); const { blockInfo } = request; this.log.debug( - `Starting fast collection of ${request.missingTxTracker.numberOfMissingTxs} txs for ${request.type} at slot ${blockInfo.slotNumber}`, - { ...blockInfo, requestType: request.type, deadline: opts.deadline }, + `Starting fast collection of ${request.requestTracker.numberOfMissingTxs} txs for ${request.type} at slot ${blockInfo.slotNumber}`, + { ...blockInfo, requestType: request.type, deadline: request.requestTracker.deadline }, ); try { // Start blasting all nodes for the txs. We give them a little time to respond before we start reqresp. - // And keep an eye on the request promise to ensure we don't wait longer than the deadline or return as soon - // as we have collected all txs, whatever the source. - const nodeCollectionPromise = this.collectFastFromNodes(request, opts); + // We race against the cancellation token to exit as soon as all txs are collected, the deadline expires, + // or the request is externally cancelled. + const nodeCollectionPromise = this.collectFastFromNodes(request); const waitBeforeReqResp = sleep(this.config.txCollectionFastNodesTimeoutBeforeReqRespMs); - await Promise.race([request.promise.promise, waitBeforeReqResp]); + await Promise.race([request.requestTracker.cancellationToken, waitBeforeReqResp]); - // If we have collected all txs, we can stop here - if (request.missingTxTracker.allFetched()) { - this.log.debug(`All txs collected for slot ${blockInfo.slotNumber} without reqresp`, blockInfo); + // If we have collected all txs or the request was cancelled, we can stop here. + // Wait for node collection to settle so inner tasks finish before we return. + if (request.requestTracker.checkCancelled()) { + if (request.requestTracker.allFetched()) { + this.log.debug(`All txs collected for slot ${blockInfo.slotNumber} without reqresp`, blockInfo); + } + await nodeCollectionPromise; return; } // Start blasting reqresp for the remaining txs. Note that node collection keeps running in parallel. // We stop when we have collected all txs, timed out, or both node collection and reqresp have given up. - const collectionPromise = Promise.allSettled([this.collectFastViaReqResp(request, opts), nodeCollectionPromise]); - await Promise.race([collectionPromise, request.promise.promise]); + // Inner tasks observe requestTracker.checkCancelled() and stop themselves, so this settles shortly after cancellation. + await Promise.allSettled([this.collectFastViaReqResp(request, opts), nodeCollectionPromise]); } catch (err) { - // Log and swallow all errors - const logCtx = { + this.log.error(`Error collecting txs for ${request.type} for slot ${blockInfo.slotNumber}`, err, { ...blockInfo, - errorMessage: err instanceof Error ? err.message : undefined, - missingTxs: request.missingTxTracker.missingTxHashes.values().map(txHash => txHash.toString()), - }; - if (err instanceof Error && err.name === 'TimeoutError') { - this.log.warn(`Timed out collecting txs for ${request.type} at slot ${blockInfo.slotNumber}`, logCtx); - } else if (err instanceof Error && err.name === 'AbortError') { - this.log.warn(`Aborted collecting txs for ${request.type} at slot ${blockInfo.slotNumber}`, logCtx); - } else { - this.log.error(`Error collecting txs for ${request.type} for slot ${blockInfo.slotNumber}`, err, logCtx); - } + missingTxs: request.requestTracker.missingTxHashes.values().map(txHash => txHash.toString()), + }); } finally { // Ensure no unresolved promises and remove the request from the set - request.promise.resolve(); + request.requestTracker.cancel(); this.requests.delete(request); } } @@ -160,30 +143,28 @@ export class FastTxCollection { * the txs that have been requested less often whenever we need to send a new batch of requests. We ensure that no * tx is requested more than once at the same time to the same node. */ - private async collectFastFromNodes(request: FastCollectionRequest, opts: { deadline: Date }): Promise { + private async collectFastFromNodes(request: FastCollectionRequest): Promise { if (this.nodes.length === 0) { return; } // Keep a shared priority queue of all txs pending to be requested, sorted by the number of attempts made to collect them. - const attemptsPerTx = [...request.missingTxTracker.missingTxHashes].map(txHash => ({ + const attemptsPerTx = [...request.requestTracker.missingTxHashes].map(txHash => ({ txHash, attempts: 0, found: false, })); // Returns once we have finished all node loops. Each loop finishes when the deadline is hit, or all txs have been collected. - await Promise.allSettled(this.nodes.map(node => this.collectFastFromNode(request, node, attemptsPerTx, opts))); + await Promise.allSettled(this.nodes.map(node => this.collectFastFromNode(request, node, attemptsPerTx))); } private async collectFastFromNode( request: FastCollectionRequest, node: TxSource, attemptsPerTx: { txHash: string; attempts: number; found: boolean }[], - opts: { deadline: Date }, ) { - const notFinished = () => - this.dateProvider.now() <= +opts.deadline && !request.missingTxTracker.allFetched() && this.requests.has(request); + const notFinished = () => !request.requestTracker.checkCancelled(); const maxParallelRequests = this.config.txCollectionFastMaxParallelRequestsPerNode; const maxBatchSize = this.config.txCollectionNodeRpcMaxBatchSize; @@ -200,7 +181,7 @@ export class FastTxCollection { if (!txToRequest) { // No more txs to process break; - } else if (!request.missingTxTracker.isMissing(txToRequest.txHash)) { + } else if (!request.requestTracker.isMissing(txToRequest.txHash)) { // Mark as found if it was found somewhere else, we'll then remove it from the array. // We don't delete it now since 'array.splice' is pretty expensive, so we do it after sorting. txToRequest.found = true; @@ -235,7 +216,7 @@ export class FastTxCollection { async () => { const result = await node.getTxsByHash(txHashes.map(TxHash.fromString)); for (const tx of result.validTxs) { - request.missingTxTracker.markFetched(tx); + request.requestTracker.markFetched(tx); } return result; }, @@ -254,9 +235,12 @@ export class FastTxCollection { activeRequestsToThisNode.delete(requestedTx.txHash); } - // Sleep a bit until hitting the node again (or not, depending on config) + // Sleep a bit until hitting the node again, but wake up immediately on cancellation if (notFinished()) { - await sleep(this.config.txCollectionFastNodeIntervalMs); + await Promise.race([ + sleep(this.config.txCollectionFastNodeIntervalMs), + request.requestTracker.cancellationToken, + ]); } } }; @@ -266,21 +250,20 @@ export class FastTxCollection { } private async collectFastViaReqResp(request: FastCollectionRequest, opts: { pinnedPeer?: PeerId }) { - const timeoutMs = +request.deadline - this.dateProvider.now(); const pinnedPeer = opts.pinnedPeer; const blockInfo = request.blockInfo; const slotNumber = blockInfo.slotNumber; - if (timeoutMs < 100) { + if (request.requestTracker.timeoutMs < 100) { this.log.warn( `Not initiating fast reqresp for txs for ${request.type} at slot ${blockInfo.slotNumber} due to timeout`, - { timeoutMs, ...blockInfo }, + { timeoutMs: request.requestTracker.timeoutMs, ...blockInfo }, ); return; } this.log.debug( - `Starting fast reqresp for ${request.missingTxTracker.numberOfMissingTxs} txs for ${request.type} at slot ${blockInfo.slotNumber}`, - { ...blockInfo, timeoutMs, pinnedPeer }, + `Starting fast reqresp for ${request.requestTracker.numberOfMissingTxs} txs for ${request.type} at slot ${blockInfo.slotNumber}`, + { ...blockInfo, timeoutMs: request.requestTracker.timeoutMs, pinnedPeer }, ); try { @@ -289,34 +272,28 @@ export class FastTxCollection { let result: Tx[]; if (request.type === 'proposal') { result = await this.missingTxsCollector.collectTxs( - request.missingTxTracker, + request.requestTracker, request.blockProposal, pinnedPeer, - timeoutMs, ); } else if (request.type === 'block') { const blockTxsSource = { txHashes: request.block.body.txEffects.map(e => e.txHash), archive: request.block.archive.root, }; - result = await this.missingTxsCollector.collectTxs( - request.missingTxTracker, - blockTxsSource, - pinnedPeer, - timeoutMs, - ); + result = await this.missingTxsCollector.collectTxs(request.requestTracker, blockTxsSource, pinnedPeer); } else { throw new Error(`Unknown request type: ${(request as any).type}`); } return { validTxs: result, invalidTxHashes: [] }; }, - Array.from(request.missingTxTracker.missingTxHashes), + Array.from(request.requestTracker.missingTxHashes), { description: `reqresp for slot ${slotNumber}`, method: 'fast-req-resp', ...opts, ...request.blockInfo }, this.getAddContext(request), ); } catch (err) { this.log.error(`Error sending fast reqresp request for txs`, err, { - txs: [...request.missingTxTracker.missingTxHashes], + txs: [...request.requestTracker.missingTxHashes], ...blockInfo, }); } @@ -340,20 +317,19 @@ export class FastTxCollection { for (const tx of txs) { const txHash = tx.txHash.toString(); // Remove the tx hash from the missing set, and add it to the found set. - if (request.missingTxTracker.markFetched(tx)) { + if (request.requestTracker.markFetched(tx)) { this.log.trace(`Found tx ${txHash} for fast collection request`, { ...request.blockInfo, txHash: tx.txHash.toString(), type: request.type, }); - } - // If we found all txs for this request, we resolve the promise - if (request.missingTxTracker.allFetched()) { - this.log.trace(`All txs found for fast collection request`, { - ...request.blockInfo, - type: request.type, - }); - request.promise.resolve(); + if (request.requestTracker.allFetched()) { + this.log.trace(`All txs found for fast collection request`, { + ...request.blockInfo, + type: request.type, + }); + break; + } } } } @@ -366,8 +342,7 @@ export class FastTxCollection { public stopCollectingForBlocksUpTo(blockNumber: BlockNumber): void { for (const request of this.requests) { if (request.blockInfo.blockNumber <= blockNumber) { - request.promise.reject(new AbortError(`Stopped collecting txs up to block ${blockNumber}`)); - this.requests.delete(request); + request.requestTracker.cancel(); } } } @@ -379,8 +354,7 @@ export class FastTxCollection { public stopCollectingForBlocksAfter(blockNumber: BlockNumber): void { for (const request of this.requests) { if (request.blockInfo.blockNumber > blockNumber) { - request.promise.reject(new AbortError(`Stopped collecting txs after block ${blockNumber}`)); - this.requests.delete(request); + request.requestTracker.cancel(); } } } diff --git a/yarn-project/p2p/src/services/tx_collection/missing_txs_tracker.ts b/yarn-project/p2p/src/services/tx_collection/missing_txs_tracker.ts deleted file mode 100644 index d206a2fdccd9..000000000000 --- a/yarn-project/p2p/src/services/tx_collection/missing_txs_tracker.ts +++ /dev/null @@ -1,52 +0,0 @@ -import { TxHash } from '@aztec/stdlib/tx'; -import type { Tx } from '@aztec/stdlib/tx'; - -/** - * Tracks which transactions are still missing and need to be fetched. - * Allows external code to mark transactions as fetched, enabling coordination - * between multiple fetching mechanisms (e.g., BatchTxRequester and Rpc Node requests). - */ -export interface IMissingTxsTracker { - /** Returns the set of transaction hashes that are still missing. */ - get missingTxHashes(): Set; - /** Size of this.missingTxHashes */ - get numberOfMissingTxs(): number; - /** Are all requested txs are fetched */ - allFetched(): boolean; - /** Checks that transaction is still missing */ - isMissing(txHash: string): boolean; - /** Marks a transaction as fetched. Returns true if it was previously missing. */ - markFetched(tx: Tx): boolean; - /** Get list of collected txs */ - get collectedTxs(): Tx[]; -} - -export class MissingTxsTracker implements IMissingTxsTracker { - public readonly collectedTxs: Tx[] = []; - - private constructor(public readonly missingTxHashes: Set) {} - - public static fromArray(hashes: TxHash[] | string[]) { - return new MissingTxsTracker(new Set(hashes.map(hash => hash.toString()))); - } - - markFetched(tx: Tx): boolean { - if (this.missingTxHashes.delete(tx.txHash.toString())) { - this.collectedTxs.push(tx); - return true; - } - return false; - } - - get numberOfMissingTxs(): number { - return this.missingTxHashes.size; - } - - allFetched(): boolean { - return this.numberOfMissingTxs === 0; - } - - isMissing(txHash: string): boolean { - return this.missingTxHashes.has(txHash.toString()); - } -} diff --git a/yarn-project/p2p/src/services/tx_collection/proposal_tx_collector.ts b/yarn-project/p2p/src/services/tx_collection/proposal_tx_collector.ts index 5955631c4247..6ba1d6166878 100644 --- a/yarn-project/p2p/src/services/tx_collection/proposal_tx_collector.ts +++ b/yarn-project/p2p/src/services/tx_collection/proposal_tx_collector.ts @@ -9,7 +9,7 @@ import type { BatchTxRequesterConfig } from '../reqresp/batch-tx-requester/confi import type { BatchTxRequesterLibP2PService } from '../reqresp/batch-tx-requester/interface.js'; import type { IBatchRequestTxValidator } from '../reqresp/batch-tx-requester/tx_validator.js'; import { type BlockTxsSource, ReqRespSubProtocol, chunkTxHashesRequest } from '../reqresp/index.js'; -import type { IMissingTxsTracker } from './missing_txs_tracker.js'; +import type { IRequestTracker } from './request_tracker.js'; /** * Strategy interface for collecting missing transactions for a block or proposal. @@ -18,17 +18,15 @@ import type { IMissingTxsTracker } from './missing_txs_tracker.js'; export interface MissingTxsCollector { /** * Collect missing transactions for a block or proposal. - * @param missingTxsTracker - The missing transactions tracker + * @param requestTracker - The missing transactions tracker * @param blockTxsSource - The block or proposal containing the transactions * @param pinnedPeer - Optional peer expected to have the transactions - * @param timeoutMs - Timeout in milliseconds * @returns The collected transactions */ collectTxs( - missingTxsTracker: IMissingTxsTracker, + requestTracker: IRequestTracker, blockTxsSource: BlockTxsSource, pinnedPeer: PeerId | undefined, - timeoutMs: number, ): Promise; } @@ -46,10 +44,9 @@ export class BatchTxRequesterCollector implements MissingTxsCollector { ) {} async collectTxs( - missingTxsTracker: IMissingTxsTracker, + requestTracker: IRequestTracker, blockTxsSource: BlockTxsSource, pinnedPeer: PeerId | undefined, - timeoutMs: number, ): Promise { const { batchTxRequesterSmartParallelWorkerCount: smartParallelWorkerCount, @@ -59,10 +56,9 @@ export class BatchTxRequesterCollector implements MissingTxsCollector { } = this.batchTxRequesterConfig ?? {}; const batchRequester = new BatchTxRequester( - missingTxsTracker, + requestTracker, blockTxsSource, pinnedPeer, - timeoutMs, this.p2pService, this.log, this.dateProvider, @@ -94,16 +90,15 @@ export class SendBatchRequestCollector implements MissingTxsCollector { ) {} async collectTxs( - missingTxsTracker: IMissingTxsTracker, + requestTracker: IRequestTracker, _blockTxsSource: BlockTxsSource, pinnedPeer: PeerId | undefined, - timeoutMs: number, ): Promise { const txs = await this.p2pService.reqResp.sendBatchRequest( ReqRespSubProtocol.TX, - chunkTxHashesRequest(Array.from(missingTxsTracker.missingTxHashes).map(TxHash.fromString)), + chunkTxHashesRequest(Array.from(requestTracker.missingTxHashes).map(TxHash.fromString)), pinnedPeer, - timeoutMs, + requestTracker.timeoutMs, this.maxPeers, this.maxRetryAttempts, ); diff --git a/yarn-project/p2p/src/services/tx_collection/request_tracker.ts b/yarn-project/p2p/src/services/tx_collection/request_tracker.ts new file mode 100644 index 000000000000..27982295b546 --- /dev/null +++ b/yarn-project/p2p/src/services/tx_collection/request_tracker.ts @@ -0,0 +1,127 @@ +import { type PromiseWithResolvers, promiseWithResolvers } from '@aztec/foundation/promise'; +import type { DateProvider } from '@aztec/foundation/timer'; +import { TxHash } from '@aztec/stdlib/tx'; +import type { Tx } from '@aztec/stdlib/tx'; + +/** + * Tracks which transactions are still missing and need to be fetched. + * Manages the request deadline and serves as the sole source of cancellation signal. + * The request is cancelled when all txs are fetched or the deadline expires. + */ +export interface IRequestTracker { + /** Returns the set of transaction hashes that are still missing. */ + get missingTxHashes(): Set; + /** Size of this.missingTxHashes */ + get numberOfMissingTxs(): number; + /** Are all requested txs fetched */ + allFetched(): boolean; + /** Checks that transaction is still missing */ + isMissing(txHash: string): boolean; + /** Marks a transaction as fetched. Returns true if it was previously missing. */ + markFetched(tx: Tx): boolean; + /** Get list of collected txs */ + get collectedTxs(): Tx[]; + /** The deadline for this request. */ + get deadline(): Date; + /** Remaining time in milliseconds until deadline. Returns 0 if already past. */ + get timeoutMs(): number; + /** Checks whether the request is cancelled (deadline expired or all fetched). May trigger cancellation if deadline has passed. */ + checkCancelled(): boolean; + /** Resolves when deadline expires or all txs are fetched. */ + get cancellationToken(): Promise; + /** Externally cancel the request. */ + cancel(): void; +} + +export class RequestTracker implements IRequestTracker { + public readonly collectedTxs: Tx[] = []; + private done = false; + private readonly cancellationTokenPromise: PromiseWithResolvers; + private readonly deadlineTimer: ReturnType | undefined; + + private constructor( + public readonly missingTxHashes: Set, + public readonly deadline: Date, + private readonly dateProvider?: DateProvider, + ) { + this.cancellationTokenPromise = promiseWithResolvers(); + + if (missingTxHashes.size === 0) { + this.finish(); + return; + } + + const now = this.dateProvider?.now() ?? Date.now(); + const remaining = deadline.getTime() - now; + if (remaining <= 0) { + this.finish(); + } else { + this.deadlineTimer = setTimeout(() => this.finish(), remaining); + } + } + + public static create(hashes: TxHash[] | string[], deadline: Date, dateProvider?: DateProvider) { + return new RequestTracker(new Set(hashes.map(hash => hash.toString())), deadline, dateProvider); + } + + markFetched(tx: Tx): boolean { + if (this.missingTxHashes.delete(tx.txHash.toString())) { + this.collectedTxs.push(tx); + if (this.allFetched()) { + this.finish(); + } + return true; + } + return false; + } + + get numberOfMissingTxs(): number { + return this.missingTxHashes.size; + } + + allFetched(): boolean { + return this.numberOfMissingTxs === 0; + } + + isMissing(txHash: string): boolean { + return this.missingTxHashes.has(txHash.toString()); + } + + get timeoutMs(): number { + const now = this.dateProvider?.now() ?? Date.now(); + return Math.max(0, this.deadline.getTime() - now); + } + + checkCancelled(): boolean { + if (this.done) { + return true; + } + // Synchronous fallback: check deadline even if setTimeout hasn't fired yet. + // This prevents macrotask starvation in tight async loops from blocking cancellation. + const now = this.dateProvider?.now() ?? Date.now(); + if (now >= this.deadline.getTime()) { + this.finish(); + return true; + } + return false; + } + + get cancellationToken(): Promise { + return this.cancellationTokenPromise.promise; + } + + cancel(): void { + this.finish(); + } + + private finish() { + if (this.done) { + return; + } + this.done = true; + if (this.deadlineTimer) { + clearTimeout(this.deadlineTimer); + } + this.cancellationTokenPromise.resolve(); + } +} diff --git a/yarn-project/p2p/src/services/tx_collection/slow_tx_collection.ts b/yarn-project/p2p/src/services/tx_collection/slow_tx_collection.ts index 12f75ff61514..d2d2f3603e71 100644 --- a/yarn-project/p2p/src/services/tx_collection/slow_tx_collection.ts +++ b/yarn-project/p2p/src/services/tx_collection/slow_tx_collection.ts @@ -196,7 +196,7 @@ export class SlowTxCollection { // from mined unproven blocks it has seen in the past. const fastRequests = this.fastCollection.getFastCollectionRequests(); const fastCollectionTxs: Set = new Set( - fastRequests.values().flatMap(r => Array.from(r.missingTxTracker.missingTxHashes)), + fastRequests.values().flatMap(r => Array.from(r.requestTracker.missingTxHashes)), ); // Return all missing txs that are not in fastCollectionTxs and are ready for reqresp if requested diff --git a/yarn-project/p2p/src/services/tx_collection/tx_collection.test.ts b/yarn-project/p2p/src/services/tx_collection/tx_collection.test.ts index 5d80a38c6836..f09bdc14074c 100644 --- a/yarn-project/p2p/src/services/tx_collection/tx_collection.test.ts +++ b/yarn-project/p2p/src/services/tx_collection/tx_collection.test.ts @@ -6,7 +6,6 @@ import { sleep } from '@aztec/foundation/sleep'; import { TestDateProvider } from '@aztec/foundation/timer'; import { L2Block } from '@aztec/stdlib/block'; import { EmptyL1RollupConstants, type L1RollupConstants } from '@aztec/stdlib/epoch-helpers'; -import type { BlockProposal } from '@aztec/stdlib/p2p'; import { Tx, TxArray, TxHash } from '@aztec/stdlib/tx'; import { jest } from '@jest/globals'; @@ -450,7 +449,8 @@ describe('TxCollection', () => { setReqRespTxs([txs[1]]); const collected = await txCollection.collectFastForBlock(block, txHashes, { deadline }); - expect(dateProvider.now()).toBeGreaterThanOrEqual(+deadline); + // Allow 5ms tolerance: setTimeout in RequestTracker can fire slightly before dateProvider.now() catches up + expect(dateProvider.now()).toBeGreaterThanOrEqual(+deadline - 5); expect(nodes[0].getTxsByHash).toHaveBeenCalledWith(txHashes); expect(nodes[0].getTxsByHash).toHaveBeenCalledWith([txHashes[2]]); expectReqRespToHaveBeenCalledWith([txHashes[1], txHashes[2]]); @@ -509,6 +509,223 @@ describe('TxCollection', () => { expect(nodes[0].getTxsByHash).not.toHaveBeenCalled(); expect(reqResp.sendBatchRequest).not.toHaveBeenCalled(); }); + + describe('cancellation signals', () => { + /** Captures the FastCollectionRequest during collectFast, before it's removed in finally. */ + const captureRequest = () => { + let captured: FastCollectionRequest | undefined; + const origCollectFast = txCollection.fastCollection.collectFast.bind(txCollection.fastCollection); + jest.spyOn(txCollection.fastCollection, 'collectFast').mockImplementation((request, opts) => { + captured = request; + return origCollectFast(request, opts); + }); + return () => captured!; + }; + + // Step 1: notFinished() respects requestTracker.checkCancelled() + it('stops node collection loop when tracker is externally cancelled', async () => { + deadline = new Date(dateProvider.now() + 10_000); + const reqRespPromise = promiseWithResolvers(); + reqResp.sendBatchRequest.mockReturnValue(reqRespPromise.promise); + + const getRequest = captureRequest(); + const collectionPromise = txCollection.collectFastForBlock(block, txHashes, { deadline }); + + await sleep(200); + const request = getRequest(); + expect(request).toBeDefined(); + + request.requestTracker.cancel(); + reqRespPromise.resolve([]); + + const collected = await collectionPromise; + expect(dateProvider.now()).toBeLessThan(+deadline); + expect(collected).toEqual([]); + }); + + // Step 18: skips reqresp when all txs found during initial wait + it('skips reqresp when all txs are found during initial node wait', async () => { + config = { ...config, txCollectionFastNodesTimeoutBeforeReqRespMs: 10_000 }; + txCollection = new TestTxCollection(mockP2PService, nodes, constants, txPool, config, [], dateProvider); + + setNodeTxs(nodes[0], txs); + const collected = await txCollection.collectFastForBlock(block, txHashes, { deadline }); + + expect(reqResp.sendBatchRequest).not.toHaveBeenCalled(); + expect(collected).toEqual(txs); + }); + + // Step 18: skips reqresp when deadline expires during initial wait + it('skips reqresp when deadline expires during initial node wait', async () => { + deadline = new Date(dateProvider.now() + 200); + config = { ...config, txCollectionFastNodesTimeoutBeforeReqRespMs: 10_000 }; + txCollection = new TestTxCollection(mockP2PService, nodes, constants, txPool, config, [], dateProvider); + + const collected = await txCollection.collectFastForBlock(block, txHashes, { deadline }); + + expect(reqResp.sendBatchRequest).not.toHaveBeenCalled(); + expect(dateProvider.now()).toBeGreaterThanOrEqual(+deadline - 5); + expect(collected).toEqual([]); + }); + + // Node loop sleep between retries is interruptible by cancellation + it('cancellation wakes node loop sleep immediately', async () => { + deadline = new Date(dateProvider.now() + 30_000); + config = { + ...config, + txCollectionFastNodesTimeoutBeforeReqRespMs: 30_000, + txCollectionFastNodeIntervalMs: 30_000, + }; + txCollection = new TestTxCollection(mockP2PService, nodes, constants, txPool, config, [], dateProvider); + + // Nodes return nothing, so node loops will sleep for 30s between retries + const getRequest = captureRequest(); + const collectionPromise = txCollection.collectFastForBlock(block, txHashes, { deadline }); + + // Wait for first node RPC call to complete, then node loop enters 30s sleep + await sleep(200); + expect(nodes[0].getTxsByHash).toHaveBeenCalled(); + + const startTime = dateProvider.now(); + getRequest().requestTracker.cancel(); + await collectionPromise; + + // Should return almost immediately, not after 30s + expect(dateProvider.now() - startTime).toBeLessThan(1000); + }); + + // Step 2: cancellationToken in initial wait race (L124) + it('exits initial wait when tracker is cancelled before reqresp starts', async () => { + deadline = new Date(dateProvider.now() + 10_000); + config = { + ...config, + txCollectionFastNodesTimeoutBeforeReqRespMs: 10_000, + txCollectionFastNodeIntervalMs: 5_000, + }; + txCollection = new TestTxCollection(mockP2PService, nodes, constants, txPool, config, [], dateProvider); + + const getRequest = captureRequest(); + const collectionPromise = txCollection.collectFastForBlock(block, txHashes, { deadline }); + + await sleep(50); + const request = getRequest(); + expect(request).toBeDefined(); + // Reqresp should not have started yet — we're still in the initial wait + expect(reqResp.sendBatchRequest).not.toHaveBeenCalled(); + + request.requestTracker.cancel(); + await collectionPromise; + + // Should have exited without ever starting reqresp + expect(reqResp.sendBatchRequest).not.toHaveBeenCalled(); + expect(dateProvider.now()).toBeLessThan(+deadline); + }); + + // Step 3: cancellationToken in main wait race (L135) + it('exits main wait when tracker is cancelled during reqresp', async () => { + deadline = new Date(dateProvider.now() + 10_000); + config = { ...config, txCollectionFastNodesTimeoutBeforeReqRespMs: 1 }; + txCollection = new TestTxCollection(mockP2PService, nodes, constants, txPool, config, [], dateProvider); + + const reqRespPromise = promiseWithResolvers(); + reqResp.sendBatchRequest.mockReturnValue(reqRespPromise.promise); + + const getRequest = captureRequest(); + const collectionPromise = txCollection.collectFastForBlock(block, txHashes, { deadline }); + + await sleep(200); + expect(reqResp.sendBatchRequest).toHaveBeenCalled(); + + getRequest().requestTracker.cancel(); + reqRespPromise.resolve([]); + + await collectionPromise; + expect(dateProvider.now()).toBeLessThan(+deadline); + }); + + // Step 4: requestTracker.cancel() in finally block + it('tracker is cancelled after collectFast exits normally', async () => { + setNodeTxs(nodes[0], txs); + const getRequest = captureRequest(); + + await txCollection.collectFastForBlock(block, txHashes, { deadline }); + + expect(getRequest().requestTracker.checkCancelled()).toBe(true); + }); + + // Step 5: requestTracker.cancel() in stop() + it('stop() cancels all request trackers', async () => { + deadline = new Date(dateProvider.now() + 10_000); + const reqRespPromise = promiseWithResolvers(); + reqResp.sendBatchRequest.mockReturnValue(reqRespPromise.promise); + + const getRequest = captureRequest(); + const collectionPromise = txCollection.collectFastForBlock(block, txHashes, { deadline }); + + await sleep(100); + const request = getRequest(); + expect(request).toBeDefined(); + expect(request.requestTracker.checkCancelled()).toBe(false); + + await txCollection.stop(); + + expect(request.requestTracker.checkCancelled()).toBe(true); + reqRespPromise.resolve([]); + await collectionPromise; + }); + + // Step 8: stopCollectingForBlocksUpTo cancels in-flight fast collection + it('stopCollectingForBlocksUpTo cancels in-flight fast collection', async () => { + deadline = new Date(dateProvider.now() + 10_000); + const reqRespPromise = promiseWithResolvers(); + reqResp.sendBatchRequest.mockReturnValue(reqRespPromise.promise); + + const collectionPromise = txCollection.collectFastForBlock(block, txHashes, { deadline }); + + await sleep(100); + txCollection.stopCollectingForBlocksUpTo(block.number); + reqRespPromise.resolve([]); + + const collected = await collectionPromise; + expect(dateProvider.now()).toBeLessThan(+deadline); + expect(collected).toEqual([]); + }); + + // Step 9: stopCollectingForBlocksAfter cancels in-flight fast collection + it('stopCollectingForBlocksAfter cancels in-flight fast collection', async () => { + deadline = new Date(dateProvider.now() + 10_000); + const reqRespPromise = promiseWithResolvers(); + reqResp.sendBatchRequest.mockReturnValue(reqRespPromise.promise); + + const collectionPromise = txCollection.collectFastForBlock(block, txHashes, { deadline }); + + await sleep(100); + txCollection.stopCollectingForBlocksAfter(BlockNumber(block.number - 1)); + reqRespPromise.resolve([]); + + const collected = await collectionPromise; + expect(dateProvider.now()).toBeLessThan(+deadline); + expect(collected).toEqual([]); + }); + + // Step 17: request is cleaned up by finally block (not by stopCollectingForBlocks) + it('request is cleaned up by finally block after stopCollectingForBlocksUpTo', async () => { + deadline = new Date(dateProvider.now() + 10_000); + const reqRespPromise = promiseWithResolvers(); + reqResp.sendBatchRequest.mockReturnValue(reqRespPromise.promise); + + const collectionPromise = txCollection.collectFastForBlock(block, txHashes, { deadline }); + + await sleep(100); + expect(txCollection.fastCollection.requests.size).toBe(1); + + txCollection.stopCollectingForBlocksUpTo(block.number); + reqRespPromise.resolve([]); + await collectionPromise; + + expect(txCollection.fastCollection.requests.size).toBe(0); + }); + }); }); describe('file store collection', () => { @@ -577,10 +794,7 @@ describe('TxCollection', () => { class TestFastTxCollection extends FastTxCollection { // eslint-disable-next-line aztec-custom/no-non-primitive-in-collections declare requests: Set; - declare collectFast: ( - request: FastCollectionRequest, - opts: { proposal?: BlockProposal; deadline: Date; pinnedPeer?: PeerId }, - ) => Promise; + declare collectFast: (request: FastCollectionRequest, opts: { pinnedPeer?: PeerId }) => Promise; } class TestTxCollection extends TxCollection { diff --git a/yarn-project/p2p/src/services/tx_collection/tx_collection.ts b/yarn-project/p2p/src/services/tx_collection/tx_collection.ts index 9797b6bd34a5..c49d0e2e4a05 100644 --- a/yarn-project/p2p/src/services/tx_collection/tx_collection.ts +++ b/yarn-project/p2p/src/services/tx_collection/tx_collection.ts @@ -1,7 +1,7 @@ import { BlockNumber } from '@aztec/foundation/branded-types'; import { compactArray } from '@aztec/foundation/collection'; import { type Logger, createLogger } from '@aztec/foundation/log'; -import { type PromiseWithResolvers, RunningPromise } from '@aztec/foundation/promise'; +import { RunningPromise } from '@aztec/foundation/promise'; import { sleep } from '@aztec/foundation/sleep'; import { DateProvider } from '@aztec/foundation/timer'; import type { L2Block, L2BlockInfo } from '@aztec/stdlib/block'; @@ -19,7 +19,7 @@ import type { TxCollectionConfig } from './config.js'; import { FastTxCollection } from './fast_tx_collection.js'; import { FileStoreTxCollection } from './file_store_tx_collection.js'; import type { FileStoreTxSource } from './file_store_tx_source.js'; -import type { IMissingTxsTracker } from './missing_txs_tracker.js'; +import type { IRequestTracker } from './request_tracker.js'; import { SlowTxCollection, getProofDeadlineForSlot } from './slow_tx_collection.js'; import { type TxAddContext, TxCollectionSink } from './tx_collection_sink.js'; import type { TxSource } from './tx_source.js'; @@ -33,10 +33,8 @@ export type FastCollectionRequestInput = | { type: 'proposal'; blockProposal: BlockProposal; blockNumber: BlockNumber }; export type FastCollectionRequest = FastCollectionRequestInput & { - missingTxTracker: IMissingTxsTracker; - deadline: Date; + requestTracker: IRequestTracker; blockInfo: L2BlockInfo; - promise: PromiseWithResolvers; }; /** diff --git a/yarn-project/p2p/src/testbench/p2p_client_testbench_worker.ts b/yarn-project/p2p/src/testbench/p2p_client_testbench_worker.ts index 32c46967fc65..1d3224d94a33 100644 --- a/yarn-project/p2p/src/testbench/p2p_client_testbench_worker.ts +++ b/yarn-project/p2p/src/testbench/p2p_client_testbench_worker.ts @@ -40,7 +40,7 @@ import type { IBatchRequestTxValidator } from '../services/reqresp/batch-tx-requ import { RateLimitStatus } from '../services/reqresp/rate-limiter/rate_limiter.js'; import type { ReqResp } from '../services/reqresp/reqresp.js'; import type { PeerDiscoveryService } from '../services/service.js'; -import { MissingTxsTracker } from '../services/tx_collection/missing_txs_tracker.js'; +import { RequestTracker } from '../services/tx_collection/request_tracker.js'; import { AlwaysTrueCircuitVerifier } from '../test-helpers/index.js'; import { BENCHMARK_CONSTANTS, @@ -273,10 +273,9 @@ async function runAggregatorBenchmark( noopTxValidator, ); const fetchedTxs = await collector.collectTxs( - MissingTxsTracker.fromArray(txHashes), + RequestTracker.create(txHashes, new Date(Date.now() + timeoutMs)), blockProposal, pinnedPeer, - timeoutMs, ); const durationMs = timer.ms(); return { @@ -293,10 +292,9 @@ async function runAggregatorBenchmark( BENCHMARK_CONSTANTS.FIXED_MAX_RETRY_ATTEMPTS, ); const fetchedTxs = await collector.collectTxs( - MissingTxsTracker.fromArray(txHashes), + RequestTracker.create(txHashes, new Date(Date.now() + timeoutMs)), blockProposal, pinnedPeer, - timeoutMs, ); const durationMs = timer.ms(); return { From 2c0c00dfd6416b02fb5ae11f0c58c48b398e9151 Mon Sep 17 00:00:00 2001 From: Aztec Bot <49558828+AztecBot@users.noreply.github.com> Date: Wed, 18 Mar 2026 10:53:20 -0400 Subject: [PATCH 34/41] fix: increase default postgres disk size from 1Gi to 10Gi (#21741) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary The HA slashing protection Postgres DB on staging-public ran out of disk space (1Gi default) causing sequencers to stop producing blocks. This increases the default PVC size to 10Gi in both the Helm chart defaults and the Terraform module variable. ## Changes - `spartan/aztec-postgres/values.yaml`: persistence size 1Gi → 10Gi - `spartan/terraform/modules/validator-ha-postgres/variables.tf`: STORAGE_SIZE default 1Gi → 10Gi **Note:** Existing PVCs will need to be manually resized or recreated — this only affects new deployments. ClaudeBox log: https://claudebox.work/s/4e6dbeb8dfd49038?run=2 --- spartan/aztec-postgres/values.yaml | 2 +- spartan/terraform/modules/validator-ha-postgres/variables.tf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/spartan/aztec-postgres/values.yaml b/spartan/aztec-postgres/values.yaml index 045759c882bc..349a60ac0cc3 100644 --- a/spartan/aztec-postgres/values.yaml +++ b/spartan/aztec-postgres/values.yaml @@ -23,7 +23,7 @@ resources: persistence: enabled: true - size: 1Gi + size: 10Gi storageClass: "" # Use default service: diff --git a/spartan/terraform/modules/validator-ha-postgres/variables.tf b/spartan/terraform/modules/validator-ha-postgres/variables.tf index a88ef0845321..a0cc38d6f107 100644 --- a/spartan/terraform/modules/validator-ha-postgres/variables.tf +++ b/spartan/terraform/modules/validator-ha-postgres/variables.tf @@ -42,5 +42,5 @@ variable "MEMORY_LIMIT" { variable "STORAGE_SIZE" { type = string - default = "1Gi" + default = "10Gi" } From ac5100e1bea3889936bc2300d2bb49a3c2e615af Mon Sep 17 00:00:00 2001 From: Aztec Bot <49558828+AztecBot@users.noreply.github.com> Date: Wed, 18 Mar 2026 11:26:13 -0400 Subject: [PATCH 35/41] fix: update batch_tx_requester tests to use RequestTracker (#21734) ## Summary - Three tests in the "Smart peer demotion" describe block used the removed `MissingTxsTracker` class and an old `BatchTxRequester` constructor signature that included `deadline` as a separate argument. - Updated them to use `RequestTracker.create()` (which wraps the deadline into a `Date`) and the current constructor signature, matching all other tests in the file. ## Context PR #21496 refactored `BatchTxRequester` to take an `IRequestTracker` (which owns the deadline) instead of a separate deadline parameter, and removed `MissingTxsTracker`. Three tests in the "Smart peer demotion" section were not updated, causing `tsgo` type-check failures on `merge-train/spartan`. ClaudeBox log: https://claudebox.work/s/5d20c8f4f47c8f3a?run=1 --- .../batch-tx-requester/batch_tx_requester.test.ts | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/yarn-project/p2p/src/services/reqresp/batch-tx-requester/batch_tx_requester.test.ts b/yarn-project/p2p/src/services/reqresp/batch-tx-requester/batch_tx_requester.test.ts index 56e38ebdb07e..a26e1d79c872 100644 --- a/yarn-project/p2p/src/services/reqresp/batch-tx-requester/batch_tx_requester.test.ts +++ b/yarn-project/p2p/src/services/reqresp/batch-tx-requester/batch_tx_requester.test.ts @@ -1801,11 +1801,11 @@ describe('BatchTxRequester', () => { }; }); + const tracker = RequestTracker.create(missing, new Date(Date.now() + deadline)); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + tracker, blockProposal, undefined, - deadline, mockP2PService, logger, new DateProvider(), @@ -1896,11 +1896,11 @@ describe('BatchTxRequester', () => { }; }); + const tracker = RequestTracker.create(missing, new Date(Date.now() + deadline)); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + tracker, blockProposal, undefined, - deadline, mockP2PService, logger, new DateProvider(), @@ -1991,11 +1991,11 @@ describe('BatchTxRequester', () => { }; }); + const tracker = RequestTracker.create(missing, new Date(Date.now() + deadline)); const requester = new BatchTxRequester( - MissingTxsTracker.fromArray(missing), + tracker, blockProposal, undefined, - deadline, mockP2PService, logger, new DateProvider(), From 1a97dcf82fabc96821d2e334f88471a2f8ecbb9c Mon Sep 17 00:00:00 2001 From: spypsy Date: Wed, 18 Mar 2026 15:31:25 +0000 Subject: [PATCH 36/41] fix: deflake attempt for l1_tx_utils --- yarn-project/ethereum/src/l1_tx_utils/l1_tx_utils.test.ts | 3 +++ 1 file changed, 3 insertions(+) diff --git a/yarn-project/ethereum/src/l1_tx_utils/l1_tx_utils.test.ts b/yarn-project/ethereum/src/l1_tx_utils/l1_tx_utils.test.ts index 0e876f48cd1c..5b428bd52b10 100644 --- a/yarn-project/ethereum/src/l1_tx_utils/l1_tx_utils.test.ts +++ b/yarn-project/ethereum/src/l1_tx_utils/l1_tx_utils.test.ts @@ -1639,6 +1639,9 @@ describe('L1TxUtils', () => { state.txConfigOverrides.checkIntervalMs = 100; state.txConfigOverrides.txTimeoutMs = 60_000; state.txConfigOverrides.cancelTxOnTimeout = false; + // Limit to 1 speed-up to prevent a second speed-up from firing between the test dropping + // txs and the timeout, which would re-add a pending tx to the mempool and corrupt the nonce. + state.txConfigOverrides.maxSpeedUpAttempts = 1; expect(gasUtils.state).toBe(TxUtilsState.SENT); From e68eb570ba95dc0a3cd451ad40fdc7abfa0f6286 Mon Sep 17 00:00:00 2001 From: Santiago Palladino Date: Wed, 18 Mar 2026 13:07:22 -0300 Subject: [PATCH 37/41] chore: replace dead BOOTSTRAP_TO env var with bootstrap.sh build arg (#21744) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Motivation `BOOTSTRAP_TO=yarn-project ./bootstrap.sh` was used in several places to build up to yarn-project, but the env var is no longer read by any code — it became dead after the Makefile introduction. Running it just runs a full `./bootstrap.sh` ignoring the variable entirely. ## Approach Replace all occurrences with `./bootstrap.sh build yarn-project`, which calls `prep` (submodule update + toolchain checks) then `make yarn-project`. ## Changes - **bootstrap.sh**: Replace in `ci-docs` case - **container-builds/avm-fuzzing-container/src/Dockerfile**: Replace in build step - **yarn-project/CLAUDE.md**: Update developer instructions - **.claude/skills/{backport,fix-pr,rebase-pr}**: Update skill instructions --- .claude/skills/backport/SKILL.md | 2 +- bootstrap.sh | 2 +- container-builds/avm-fuzzing-container/src/Dockerfile | 2 +- yarn-project/.claude/skills/fix-pr/SKILL.md | 2 +- yarn-project/.claude/skills/rebase-pr/SKILL.md | 2 +- yarn-project/CLAUDE.md | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.claude/skills/backport/SKILL.md b/.claude/skills/backport/SKILL.md index 80d0666d12db..a0b6407642f8 100644 --- a/.claude/skills/backport/SKILL.md +++ b/.claude/skills/backport/SKILL.md @@ -147,7 +147,7 @@ git diff --name-only | grep -v '^yarn-project/' || true If changes exist outside yarn-project, run bootstrap from the repo root: ```bash -BOOTSTRAP_TO=yarn-project ./bootstrap.sh +./bootstrap.sh build yarn-project ``` Fix any build errors that arise from the backport adaptation. diff --git a/bootstrap.sh b/bootstrap.sh index e03d8a879c8e..01918cd9def2 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -827,7 +827,7 @@ case "$cmd" in "ci-docs") export CI=1 export USE_TEST_CACHE=1 - BOOTSTRAP_TO=yarn-project ./bootstrap.sh + ./bootstrap.sh build yarn-project docs/bootstrap.sh ci ;; "ci-barretenberg-debug") diff --git a/container-builds/avm-fuzzing-container/src/Dockerfile b/container-builds/avm-fuzzing-container/src/Dockerfile index 9c508e5faa9b..fd3cfea877eb 100644 --- a/container-builds/avm-fuzzing-container/src/Dockerfile +++ b/container-builds/avm-fuzzing-container/src/Dockerfile @@ -32,7 +32,7 @@ RUN git clone https://github.com/AztecProtocol/aztec-packages.git --depth 1 --br WORKDIR /root/aztec-packages # Run bootstrap up to yarn-project (release-image needs docker which isn't available) -RUN BOOTSTRAP_TO=yarn-project ./bootstrap.sh +RUN ./bootstrap.sh build yarn-project # Build the tx fuzzer WORKDIR /root/aztec-packages/barretenberg/cpp diff --git a/yarn-project/.claude/skills/fix-pr/SKILL.md b/yarn-project/.claude/skills/fix-pr/SKILL.md index 15df2bb38577..bfc822c1ad8c 100644 --- a/yarn-project/.claude/skills/fix-pr/SKILL.md +++ b/yarn-project/.claude/skills/fix-pr/SKILL.md @@ -69,7 +69,7 @@ git diff origin/...HEAD --name-only | grep -v '^yarn-project/' If yes, run bootstrap: ```bash -(cd $(git rev-parse --show-toplevel) && BOOTSTRAP_TO=yarn-project ./bootstrap.sh) +(cd $(git rev-parse --show-toplevel) && ./bootstrap.sh build yarn-project) ``` ### Phase 4: Fix Based on Failure Type diff --git a/yarn-project/.claude/skills/rebase-pr/SKILL.md b/yarn-project/.claude/skills/rebase-pr/SKILL.md index 5bf3e731536f..ab57da396c56 100644 --- a/yarn-project/.claude/skills/rebase-pr/SKILL.md +++ b/yarn-project/.claude/skills/rebase-pr/SKILL.md @@ -69,7 +69,7 @@ git diff origin/...HEAD --name-only | grep -v '^yarn-project/' If yes, run bootstrap from repo root: ```bash -(cd $(git rev-parse --show-toplevel) && BOOTSTRAP_TO=yarn-project ./bootstrap.sh) +(cd $(git rev-parse --show-toplevel) && ./bootstrap.sh build yarn-project) ``` ### Step 5: Verify Build diff --git a/yarn-project/CLAUDE.md b/yarn-project/CLAUDE.md index 961cad2a13fc..db8e14659e89 100644 --- a/yarn-project/CLAUDE.md +++ b/yarn-project/CLAUDE.md @@ -54,7 +54,7 @@ Git commands work from any subdirectory of a repo—there is no need to `cd` to - Rebasing on a branch that has changes outside `yarn-project` ```bash -(cd $(git rev-parse --show-toplevel) && BOOTSTRAP_TO=yarn-project ./bootstrap.sh) +(cd $(git rev-parse --show-toplevel) && ./bootstrap.sh build yarn-project) ``` Bootstrap takes several minutes to run. Be patient. From 5bb159bcb1ef968cb089d0f623506fdd3a352a3c Mon Sep 17 00:00:00 2001 From: Santiago Palladino Date: Wed, 18 Mar 2026 13:14:52 -0300 Subject: [PATCH 38/41] fix(sequencer): extract gas and blob configs from valid requests only (A-677) (#21747) ## Motivation The `sendRequests` method in the sequencer publisher correctly filters L1 publish requests by `lastValidL2Slot` to discard expired ones. However, gas and blob configs were extracted from the unfiltered request list, meaning expired requests' gas configurations leaked into the aggregated gas limit calculation. This could over-estimate gas and overpay for L1 transactions. Fixes A-677 ## Approach Changed the gas and blob config extraction to use the filtered `validRequests` list instead of the unfiltered `requestsToProcess` list, so only non-expired requests contribute to the aggregated gas limit. ## Changes - **sequencer-client**: Use `validRequests` instead of `requestsToProcess` when extracting `gasConfigs` and `blobConfigs` in `sendRequests` - **sequencer-client (tests)**: Added test verifying that expired requests' gas configs are excluded from the aggregated gas limit Co-authored-by: Claude Opus 4.6 (1M context) --- .../src/publisher/sequencer-publisher.test.ts | 48 +++++++++++++++++++ .../src/publisher/sequencer-publisher.ts | 4 +- 2 files changed, 50 insertions(+), 2 deletions(-) diff --git a/yarn-project/sequencer-client/src/publisher/sequencer-publisher.test.ts b/yarn-project/sequencer-client/src/publisher/sequencer-publisher.test.ts index 70fbaa1ebc2c..1665e6de07c0 100644 --- a/yarn-project/sequencer-client/src/publisher/sequencer-publisher.test.ts +++ b/yarn-project/sequencer-client/src/publisher/sequencer-publisher.test.ts @@ -519,6 +519,54 @@ describe('SequencerPublisher', () => { expect((publisher as any).requests.length).toEqual(0); }); + it('does not include gas config from expired requests', async () => { + const currentL2Slot = publisher.getCurrentL2Slot(); + + // Add an expired request with a gas config + publisher.addRequest({ + action: 'vote-offenses', + request: { + to: mockRollupAddress, + data: encodeFunctionData({ + abi: EmpireBaseAbi, + functionName: 'signal', + args: [EthAddress.random().toString()], + }), + }, + lastValidL2Slot: SlotNumber(1), // expired + gasConfig: { gasLimit: 500_000n }, + checkSuccess: () => true, + }); + + // Add a valid request with a gas config + publisher.addRequest({ + action: 'propose', + request: { + to: mockRollupAddress, + data: encodeFunctionData({ + abi: EmpireBaseAbi, + functionName: 'signal', + args: [EthAddress.random().toString()], + }), + }, + lastValidL2Slot: SlotNumber(Number(currentL2Slot) + 10), // valid + gasConfig: { gasLimit: 100_000n }, + checkSuccess: () => true, + }); + + forwardSpy.mockResolvedValue({ + receipt: proposeTxReceipt, + errorMsg: undefined, + }); + + await publisher.sendRequests(); + + expect(forwardSpy).toHaveBeenCalledTimes(1); + // The gas config should only include the valid request's gas (100_000), not the expired one (500_000) + const txConfig = forwardSpy.mock.calls[0][2]; + expect(txConfig?.gasLimit).toEqual(100_000n); + }); + it('does not signal for payload when quorum is reached', async () => { const { govPayload } = mockGovernancePayload(); diff --git a/yarn-project/sequencer-client/src/publisher/sequencer-publisher.ts b/yarn-project/sequencer-client/src/publisher/sequencer-publisher.ts index c0bd98f6bf6f..1baac9255c7b 100644 --- a/yarn-project/sequencer-client/src/publisher/sequencer-publisher.ts +++ b/yarn-project/sequencer-client/src/publisher/sequencer-publisher.ts @@ -399,8 +399,8 @@ export class SequencerPublisher { // @note - we can only have one blob config per bundle // find requests with gas and blob configs // See https://github.com/AztecProtocol/aztec-packages/issues/11513 - const gasConfigs = requestsToProcess.filter(request => request.gasConfig).map(request => request.gasConfig); - const blobConfigs = requestsToProcess.filter(request => request.blobConfig).map(request => request.blobConfig); + const gasConfigs = validRequests.filter(request => request.gasConfig).map(request => request.gasConfig); + const blobConfigs = validRequests.filter(request => request.blobConfig).map(request => request.blobConfig); if (blobConfigs.length > 1) { throw new Error('Multiple blob configs found'); From aed98c149955f32ce135eef6fb66b1ba387c6cba Mon Sep 17 00:00:00 2001 From: Aztec Bot <49558828+AztecBot@users.noreply.github.com> Date: Wed, 18 Mar 2026 12:52:41 -0400 Subject: [PATCH 39/41] fix(test): fix flaky keystore reload test (#21749) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary - Use deterministic BN254 secret keys instead of `Fr.random()` to eliminate randomness in committee ordering / proposer selection - Guard `teardown?.()` in afterAll to prevent `TypeError: teardown is not a function` when beforeAll times out - Increase jest timeout from 300s to 540s as safety margin ## Root Cause The test stakes 4 validators on L1 but only loads 3 into the keystore. When the RANDAO seed (derived from random BN254 keys) causes the missing 4th validator to be selected as proposer for consecutive slots, the sequencer cannot produce blocks. With 72s per L2 slot and a 300s timeout (~4 slot opportunities), there's a ~1/256 chance all slots have the wrong proposer, causing a timeout. The secondary `TypeError: teardown is not a function` error occurs because `teardown` is never assigned when `beforeAll` times out before `setup()` returns. ## Test plan - CI should pass — the deterministic keys produce a predictable committee ordering, and the increased timeout provides additional margin. ClaudeBox log: https://claudebox.work/s/91bb3fd09c0c7f41?run=1 --- .../end-to-end/src/e2e_sequencer/reload_keystore.test.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/yarn-project/end-to-end/src/e2e_sequencer/reload_keystore.test.ts b/yarn-project/end-to-end/src/e2e_sequencer/reload_keystore.test.ts index 221c3086d924..3497fbe42f30 100644 --- a/yarn-project/end-to-end/src/e2e_sequencer/reload_keystore.test.ts +++ b/yarn-project/end-to-end/src/e2e_sequencer/reload_keystore.test.ts @@ -32,7 +32,7 @@ const COMMITTEE_SIZE = VALIDATOR_COUNT; const INITIAL_KEYSTORE_COUNT = 3; describe('e2e_reload_keystore', () => { - jest.setTimeout(300_000); + jest.setTimeout(540_000); let teardown: () => Promise; let aztecNode: AztecNode; @@ -81,7 +81,7 @@ describe('e2e_reload_keystore', () => { attester: EthAddress.fromString(validatorAddresses[i]), withdrawer: EthAddress.fromString(validatorAddresses[i]), privateKey: key, - bn254SecretKey: new SecretValue(Fr.random().toBigInt()), + bn254SecretKey: new SecretValue(new Fr(i + 1).toBigInt()), })); ({ @@ -105,7 +105,7 @@ describe('e2e_reload_keystore', () => { }); afterAll(async () => { - await teardown(); + await teardown?.(); await rm(keyStoreDirectory, { recursive: true, force: true }); }); From e7aa6cc526a5598594be25dd0aaccde5cbe5a1df Mon Sep 17 00:00:00 2001 From: AztecBot Date: Wed, 18 Mar 2026 17:32:39 +0000 Subject: [PATCH 40/41] fix(test): fix flaky duplicate_attestation_slash test --- .../src/e2e_p2p/duplicate_attestation_slash.test.ts | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/yarn-project/end-to-end/src/e2e_p2p/duplicate_attestation_slash.test.ts b/yarn-project/end-to-end/src/e2e_p2p/duplicate_attestation_slash.test.ts index d705a95f8c3e..fba21745caff 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/duplicate_attestation_slash.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/duplicate_attestation_slash.test.ts @@ -143,6 +143,9 @@ describe('e2e_p2p_duplicate_attestation_slash', () => { attestToEquivocatedProposals: true, // Attest to all proposals - creates duplicate attestations broadcastEquivocatedProposals: true, // Don't abort checkpoint building on duplicate block proposals dontStartSequencer: true, + // Prevent HA peer proposals from being added to the archiver, so both + // malicious nodes build their own blocks instead of one yielding to the other. + skipPushProposedBlocksToArchiver: true, }, t.ctx.dateProvider!, BOOT_NODE_UDP_PORT + 1, @@ -162,6 +165,9 @@ describe('e2e_p2p_duplicate_attestation_slash', () => { attestToEquivocatedProposals: true, // Attest to all proposals - creates duplicate attestations broadcastEquivocatedProposals: true, // Don't abort checkpoint building on duplicate block proposals dontStartSequencer: true, + // Prevent HA peer proposals from being added to the archiver, so both + // malicious nodes build their own blocks instead of one yielding to the other. + skipPushProposedBlocksToArchiver: true, }, t.ctx.dateProvider!, BOOT_NODE_UDP_PORT + 2, From 4902133e2a57c9fcac36160da8f12b95b1e391e2 Mon Sep 17 00:00:00 2001 From: Maddiaa <47148561+Maddiaa0@users.noreply.github.com> Date: Wed, 18 Mar 2026 22:31:31 +0000 Subject: [PATCH 41/41] feat(pipeline): introduce pipeline views for building (#21026) ## Overview Epoch cache operations now return two views, the current slot and the pipelined slot / epoch ## Testing The main test which showcases this functionality is epoch_mbps.pipeline, this runs the sequencers with the pipeline mode enabled. At the moment it only expects 1 block per slot as it still waits until the proposal slot to send the checkpoint to l1. For this PR it uses a blocking sleep here, that stops all sequencers for this test. This is addressed in a pr stacked ontop. --- yarn-project/archiver/src/archiver.ts | 1 - .../aztec-node/src/sentinel/sentinel.test.ts | 16 +- .../aztec-node/src/sentinel/sentinel.ts | 8 +- .../e2e_epochs/epochs_mbps.parallel.test.ts | 16 +- .../epochs_mbps.pipeline.parallel.test.ts | 254 ++++++++++++++++++ .../e2e_l1_publisher/e2e_l1_publisher.test.ts | 13 +- .../end-to-end/src/e2e_synching.test.ts | 7 +- .../epoch-cache/src/epoch_cache.test.ts | 94 ++++++- yarn-project/epoch-cache/src/epoch_cache.ts | 102 +++++-- .../epoch-cache/src/test/test_epoch_cache.ts | 91 ++++++- yarn-project/ethereum/src/contracts/rollup.ts | 7 +- yarn-project/foundation/src/config/env_var.ts | 2 +- .../p2p/src/client/p2p_client.test.ts | 1 + yarn-project/p2p/src/client/p2p_client.ts | 10 +- .../p2p_client.integration_reqresp.test.ts | 4 +- .../proposal_tx_collector_worker.ts | 5 +- .../src/mem_pools/tx_pool_v2/interfaces.ts | 8 +- .../attestation_validator.test.ts | 93 +++---- .../attestation_validator.ts | 9 +- .../fisherman_attestation_validator.test.ts | 58 ++-- .../msg_validators/clock_tolerance.test.ts | 21 ++ .../p2p/src/msg_validators/clock_tolerance.ts | 7 +- .../proposal_validator.test.ts | 18 +- .../proposal_validator/proposal_validator.ts | 11 +- .../services/libp2p/libp2p_service.test.ts | 62 ++--- .../p2p/src/test-helpers/testbench-utils.ts | 31 ++- .../testbench/p2p_client_testbench_worker.ts | 6 +- .../sequencer-publisher-factory.test.ts | 1 + .../src/publisher/sequencer-publisher.test.ts | 42 +-- .../src/publisher/sequencer-publisher.ts | 22 +- .../sequencer/checkpoint_proposal_job.test.ts | 51 +++- .../checkpoint_proposal_job.timing.test.ts | 6 +- .../src/sequencer/checkpoint_proposal_job.ts | 180 +++++++++---- .../checkpoint_voter.ha.integration.test.ts | 2 + .../sequencer-client/src/sequencer/events.ts | 2 +- .../src/sequencer/sequencer.test.ts | 87 +++++- .../src/sequencer/sequencer.ts | 133 +++++---- .../stdlib/src/config/pipelining-config.ts | 4 +- .../txe/src/state_machine/mock_epoch_cache.ts | 45 +++- .../src/block_proposal_handler.ts | 14 +- .../src/validator.ha.integration.test.ts | 2 +- .../validator-client/src/validator.test.ts | 18 +- .../validator-client/src/validator.ts | 41 +-- 43 files changed, 1206 insertions(+), 399 deletions(-) create mode 100644 yarn-project/end-to-end/src/e2e_epochs/epochs_mbps.pipeline.parallel.test.ts diff --git a/yarn-project/archiver/src/archiver.ts b/yarn-project/archiver/src/archiver.ts index 307a9674d505..b34c42661457 100644 --- a/yarn-project/archiver/src/archiver.ts +++ b/yarn-project/archiver/src/archiver.ts @@ -95,7 +95,6 @@ export class Archiver extends ArchiverDataSourceBase implements L2BlockSink, Tra * @param dataStore - An archiver data store for storage & retrieval of blocks, encrypted logs & contract data. * @param config - Archiver configuration options. * @param blobClient - Client for retrieving blob data. - * @param epochCache - Cache for epoch-related data. * @param dateProvider - Provider for current date/time. * @param instrumentation - Instrumentation for metrics and tracing. * @param l1Constants - L1 rollup constants. diff --git a/yarn-project/aztec-node/src/sentinel/sentinel.test.ts b/yarn-project/aztec-node/src/sentinel/sentinel.test.ts index f3b7e2f5ea3a..671de2cff77c 100644 --- a/yarn-project/aztec-node/src/sentinel/sentinel.test.ts +++ b/yarn-project/aztec-node/src/sentinel/sentinel.test.ts @@ -1,4 +1,4 @@ -import type { EpochCache } from '@aztec/epoch-cache'; +import { EpochCache } from '@aztec/epoch-cache'; import { BlockNumber, CheckpointNumber, EpochNumber, SlotNumber } from '@aztec/foundation/branded-types'; import { compactArray, times } from '@aztec/foundation/collection'; import { Secp256k1Signer } from '@aztec/foundation/crypto/secp256k1-signer'; @@ -79,7 +79,15 @@ describe('sentinel', () => { rollupManaLimit: Number.MAX_SAFE_INTEGER, }; - epochCache.getEpochAndSlotNow.mockReturnValue({ epoch, slot, ts, nowMs: ts * 1000n }); + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch, + slot, + ts, + nowMs: ts * 1000n, + }); + epochCache.getSlotNow.mockReturnValue(slot); + epochCache.getEpochNow.mockReturnValue(epoch); + epochCache.isProposerPipeliningEnabled.mockReturnValue(false); epochCache.getL1Constants.mockReturnValue(l1Constants); sentinel = new TestSentinel(epochCache, archiver, p2p, store, config, blockStream); @@ -590,6 +598,10 @@ describe('sentinel', () => { ts, nowMs: ts * 1000n, }); + epochCache.getSlotNow.mockReturnValue(slot); + epochCache.getTargetSlot.mockReturnValue(slot); + epochCache.getEpochNow.mockReturnValue(epochNumber); + epochCache.getTargetEpoch.mockReturnValue(epochNumber); archiver.getBlockHeader.calledWith(blockNumber).mockResolvedValue(mockBlock.header); archiver.getL1Constants.mockResolvedValue(l1Constants); epochCache.getL1Constants.mockReturnValue(l1Constants); diff --git a/yarn-project/aztec-node/src/sentinel/sentinel.ts b/yarn-project/aztec-node/src/sentinel/sentinel.ts index eed7d863513a..8c07e06056d3 100644 --- a/yarn-project/aztec-node/src/sentinel/sentinel.ts +++ b/yarn-project/aztec-node/src/sentinel/sentinel.ts @@ -88,7 +88,7 @@ export class Sentinel extends (EventEmitter as new () => WatcherEmitter) impleme /** Loads initial slot and initializes blockstream. We will not process anything at or before the initial slot. */ protected async init() { - this.initialSlot = this.epochCache.getEpochAndSlotNow().slot; + this.initialSlot = this.epochCache.getSlotNow(); const startingBlock = BlockNumber(await this.archiver.getBlockNumber()); this.logger.info(`Starting validator sentinel with initial slot ${this.initialSlot} and block ${startingBlock}`); this.blockStream = new L2BlockStream(this.archiver, this.l2TipsStore, this, this.logger, { startingBlock }); @@ -264,7 +264,7 @@ export class Sentinel extends (EventEmitter as new () => WatcherEmitter) impleme * and we don't have that data if we were offline during the period. */ public async work() { - const { slot: currentSlot } = this.epochCache.getEpochAndSlotNow(); + const currentSlot = this.epochCache.getSlotNow(); try { // Manually sync the block stream to ensure we have the latest data. // Note we never `start` the blockstream, so it loops at the same pace as we do. @@ -436,7 +436,7 @@ export class Sentinel extends (EventEmitter as new () => WatcherEmitter) impleme ? fromEntries(await Promise.all(validators.map(async v => [v.toString(), await this.store.getHistory(v)]))) : await this.store.getHistories(); - const slotNow = this.epochCache.getEpochAndSlotNow().slot; + const slotNow = this.epochCache.getSlotNow(); fromSlot ??= SlotNumber(Math.max((this.lastProcessedSlot ?? slotNow) - this.store.getHistoryLength(), 0)); toSlot ??= this.lastProcessedSlot ?? slotNow; @@ -464,7 +464,7 @@ export class Sentinel extends (EventEmitter as new () => WatcherEmitter) impleme return undefined; } - const slotNow = this.epochCache.getEpochAndSlotNow().slot; + const slotNow = this.epochCache.getSlotNow(); const effectiveFromSlot = fromSlot ?? SlotNumber(Math.max((this.lastProcessedSlot ?? slotNow) - this.store.getHistoryLength(), 0)); const effectiveToSlot = toSlot ?? this.lastProcessedSlot ?? slotNow; diff --git a/yarn-project/end-to-end/src/e2e_epochs/epochs_mbps.parallel.test.ts b/yarn-project/end-to-end/src/e2e_epochs/epochs_mbps.parallel.test.ts index 69aeef5670e0..0f293e34e4aa 100644 --- a/yarn-project/end-to-end/src/e2e_epochs/epochs_mbps.parallel.test.ts +++ b/yarn-project/end-to-end/src/e2e_epochs/epochs_mbps.parallel.test.ts @@ -15,6 +15,7 @@ import { CheckpointNumber, SlotNumber } from '@aztec/foundation/branded-types'; import { times, timesAsync } from '@aztec/foundation/collection'; import { SecretValue } from '@aztec/foundation/config'; import { retryUntil } from '@aztec/foundation/retry'; +import { sleep } from '@aztec/foundation/sleep'; import { bufferToHex } from '@aztec/foundation/string'; import { executeTimeout } from '@aztec/foundation/timer'; import { TestContract } from '@aztec/noir-test-contracts.js/Test'; @@ -552,11 +553,20 @@ describe('e2e_epochs/epochs_mbps', () => { }); await waitUntilL1Timestamp(test.l1Client, targetTimestamp, undefined, test.L2_SLOT_DURATION_IN_S * 3); - // Send both pre-proved txs simultaneously, waiting for them to be checkpointed. + // Send the deploy tx first and give it time to propagate to all validators, + // then send the call tx. Priority fees are a safety net, but arrival ordering + // ensures the deploy tx is in the pool before the call tx regardless of gossip timing. const timeout = test.L2_SLOT_DURATION_IN_S * 5; - logger.warn(`Sending both txs and waiting for checkpointed receipts`); + logger.warn(`Sending deploy tx first, then call tx`); + const deployTxHash = await deployTx.send({ wait: NO_WAIT }); + await sleep(1000); + const callTxHash = await callTx.send({ wait: NO_WAIT }); const [deployReceipt, callReceipt] = await executeTimeout( - () => Promise.all([deployTx.send({ wait: { timeout } }), callTx.send({ wait: { timeout } })]), + () => + Promise.all([ + waitForTx(context.aztecNode, deployTxHash, { timeout }), + waitForTx(context.aztecNode, callTxHash, { timeout }), + ]), timeout * 1000, ); logger.warn(`Both txs checkpointed`, { diff --git a/yarn-project/end-to-end/src/e2e_epochs/epochs_mbps.pipeline.parallel.test.ts b/yarn-project/end-to-end/src/e2e_epochs/epochs_mbps.pipeline.parallel.test.ts new file mode 100644 index 000000000000..6c135a0b0d63 --- /dev/null +++ b/yarn-project/end-to-end/src/e2e_epochs/epochs_mbps.pipeline.parallel.test.ts @@ -0,0 +1,254 @@ +import type { Archiver } from '@aztec/archiver'; +import type { AztecNodeService } from '@aztec/aztec-node'; +import { AztecAddress, EthAddress } from '@aztec/aztec.js/addresses'; +import { NO_WAIT } from '@aztec/aztec.js/contracts'; +import { Fr } from '@aztec/aztec.js/fields'; +import type { Logger } from '@aztec/aztec.js/log'; +import { waitForTx } from '@aztec/aztec.js/node'; +import { RollupContract } from '@aztec/ethereum/contracts'; +import type { Operator } from '@aztec/ethereum/deploy-aztec-l1-contracts'; +import { asyncMap } from '@aztec/foundation/async-map'; +import { BlockNumber, CheckpointNumber, SlotNumber } from '@aztec/foundation/branded-types'; +import { times, timesAsync } from '@aztec/foundation/collection'; +import { SecretValue } from '@aztec/foundation/config'; +import { bufferToHex } from '@aztec/foundation/string'; +import { executeTimeout } from '@aztec/foundation/timer'; +import { TestContract } from '@aztec/noir-test-contracts.js/Test'; +import type { SequencerEvents } from '@aztec/sequencer-client'; +import { getTimestampForSlot } from '@aztec/stdlib/epoch-helpers'; + +import { jest } from '@jest/globals'; +import { privateKeyToAccount } from 'viem/accounts'; + +import { type EndToEndContext, getPrivateKeyFromIndex } from '../fixtures/utils.js'; +import { TestWallet } from '../test-wallet/test_wallet.js'; +import { proveInteraction } from '../test-wallet/utils.js'; +import { EpochsTestContext } from './epochs_test.js'; + +jest.setTimeout(1000 * 60 * 20); + +const NODE_COUNT = 4; +const EXPECTED_BLOCKS_PER_CHECKPOINT = 1; + +// Send enough transactions to trigger multiple blocks within a checkpoint assuming 2 txs per block. +const TX_COUNT = 10; + +/** + * E2E tests for proposer pipelining with Multiple Blocks Per Slot (MBPS). + * Verifies that with pipelining enabled, the block proposer in slot N is the validator + * scheduled on L1 for slot N+1 (the proposer view uses a +1 slot offset). + */ +describe('e2e_epochs/epochs_mbps_pipeline', () => { + let context: EndToEndContext; + let logger: Logger; + let rollup: RollupContract; + let archiver: Archiver; + + let test: EpochsTestContext; + let validators: (Operator & { privateKey: `0x${string}` })[]; + let nodes: AztecNodeService[]; + let contract: TestContract; + let wallet: TestWallet; + let from: AztecAddress; + + /** Creates validators and sets up the test context with MBPS and proposer pipelining. */ + async function setupTest(opts: { + syncChainTip: 'proposed' | 'checkpointed'; + minTxsPerBlock?: number; + maxTxsPerBlock?: number; + }) { + const { syncChainTip = 'checkpointed', ...setupOpts } = opts; + + validators = times(NODE_COUNT, i => { + const privateKey = bufferToHex(getPrivateKeyFromIndex(i + 3)!); + const attester = EthAddress.fromString(privateKeyToAccount(privateKey).address); + return { attester, withdrawer: attester, privateKey, bn254SecretKey: new SecretValue(Fr.random().toBigInt()) }; + }); + + test = await EpochsTestContext.setup({ + numberOfAccounts: 1, + initialValidators: validators, + enableProposerPipelining: true, // <- yehaw + mockGossipSubNetwork: true, + disableAnvilTestWatcher: true, + startProverNode: true, + aztecEpochDuration: 4, + enforceTimeTable: true, + ethereumSlotDuration: 4, + aztecSlotDuration: 36, + blockDurationMs: 8000, + l1PublishingTime: 2, + attestationPropagationTime: 0.5, + aztecTargetCommitteeSize: 3, + ...setupOpts, + pxeOpts: { syncChainTip }, + }); + + ({ context, logger, rollup } = test); + wallet = context.wallet; + from = context.accounts[0]; + + logger.warn(`Stopping sequencer in initial aztec node.`); + await context.sequencer!.stop(); + + logger.warn(`Initial setup complete. Starting ${NODE_COUNT} validator nodes.`); + // Clear inherited coinbase so each validator derives coinbase from its own attester key + nodes = await asyncMap(validators, ({ privateKey }) => + test.createValidatorNode([privateKey], { dontStartSequencer: true, coinbase: undefined }), + ); + logger.warn(`Started ${NODE_COUNT} validator nodes.`, { validators: validators.map(v => v.attester.toString()) }); + + wallet.updateNode(nodes[0]); + archiver = nodes[0].getBlockSource() as Archiver; + + contract = await test.registerTestContract(wallet); + logger.warn(`Test setup completed.`, { validators: validators.map(v => v.attester.toString()) }); + } + + /** Retrieves all checkpoints from the archiver, checks that one has the target block count, and returns its number. */ + async function assertMultipleBlocksPerSlot(targetBlockCount: number, logger: Logger): Promise { + const checkpoints = await archiver.getCheckpoints(CheckpointNumber(1), 50); + logger.warn(`Retrieved ${checkpoints.length} checkpoints from archiver`, { + checkpoints: checkpoints.map(pc => pc.checkpoint.getStats()), + }); + + let expectedBlockNumber = checkpoints[0].checkpoint.blocks[0].number; + let multiBlockCheckpointNumber: CheckpointNumber | undefined; + + for (const checkpoint of checkpoints) { + const blockCount = checkpoint.checkpoint.blocks.length; + if (blockCount >= targetBlockCount && multiBlockCheckpointNumber === undefined) { + multiBlockCheckpointNumber = checkpoint.checkpoint.number; + } + logger.warn(`Checkpoint ${checkpoint.checkpoint.number} has ${blockCount} blocks`, { + checkpoint: checkpoint.checkpoint.getStats(), + }); + + for (let i = 0; i < blockCount; i++) { + const block = checkpoint.checkpoint.blocks[i]; + expect(block.indexWithinCheckpoint).toBe(i); + expect(block.checkpointNumber).toBe(checkpoint.checkpoint.number); + expect(block.number).toBe(expectedBlockNumber); + expectedBlockNumber++; + } + } + + expect(multiBlockCheckpointNumber).toBeDefined(); + return multiBlockCheckpointNumber!; + } + + /** Waits until a specific multi-block checkpoint is proven. */ + async function waitForProvenCheckpoint(targetCheckpoint: CheckpointNumber) { + const provenTimeout = test.L2_SLOT_DURATION_IN_S * test.epochDuration * 4; + logger.warn(`Waiting for checkpoint ${targetCheckpoint} to be proven (timeout=${provenTimeout}s)`); + await test.waitUntilProvenCheckpointNumber(targetCheckpoint, provenTimeout); + logger.warn(`Proven checkpoint advanced to ${test.monitor.provenCheckpointNumber}`); + } + + /** + * Asserts pipelining by comparing the build slot (from block-proposed events) against + * the submission slot (from block headers). With pipelining, the block is built in slot N + * but its header carries submission slot N+1. + */ + async function assertProposerPipelining( + blockProposedEvents: { blockNumber: BlockNumber; slot: SlotNumber; buildSlot: SlotNumber }[], + logger: Logger, + ) { + const checkpoints = await archiver.getCheckpoints(CheckpointNumber(1), 50); + const allBlocks = checkpoints.flatMap(pc => pc.checkpoint.blocks); + + logger.warn(`assertProposerPipelining: ${allBlocks.length} blocks, ${blockProposedEvents.length} events`, { + blockNumbers: allBlocks.map(b => b.number), + eventBlockNumbers: blockProposedEvents.map(e => e.blockNumber), + }); + + let foundPipelining = false; + + for (const block of allBlocks) { + const headerSlot = block.header.globalVariables.slotNumber; // submission slot (N+1) + const coinbase = block.header.globalVariables.coinbase; + + // Find the block-proposed event for this block (use Number() for safe comparison) + const event = blockProposedEvents.find(e => Number(e.blockNumber) === Number(block.number)); + // if there is no event, then it was probably block number one - which was proposed in setup + if (!event) { + continue; + } + + const buildSlot = event.buildSlot; // build slot (N) + + // Verify the pipelining offset: block built in slot N, submitted in slot N+1 + expect(Number(headerSlot)).toBe(Number(buildSlot) + 1); + foundPipelining = true; + + // Verify coinbase matches the expected proposer for the submission slot + const expectedProposer = await rollup.getProposerAt(getTimestampForSlot(headerSlot, test.constants)); + expect(coinbase).toEqual(expectedProposer); + + logger.warn(`Block ${block.number}: buildSlot=${buildSlot}, submissionSlot=${headerSlot}, coinbase=${coinbase}`, { + blockNumber: block.number, + buildSlot, + headerSlot, + coinbase: coinbase.toString(), + expectedProposer: expectedProposer.toString(), + }); + } + + expect(foundPipelining).toBe(true); + logger.warn(`Pipelining assertion passed for ${allBlocks.length} blocks`); + } + + afterEach(async () => { + jest.restoreAllMocks(); + await test?.teardown(); + }); + + it('pipelining builds blocks using slot plus 1 proposer and proves them', async () => { + await setupTest({ syncChainTip: 'checkpointed', minTxsPerBlock: 1, maxTxsPerBlock: 2 }); + + // Subscribe to block-proposed events to capture build slots + const blockProposedEvents: { blockNumber: BlockNumber; slot: SlotNumber; buildSlot: SlotNumber }[] = []; + const sequencers = nodes.map(n => n.getSequencer()!); + for (const sequencer of sequencers) { + sequencer.getSequencer().on('block-proposed', (args: Parameters[0]) => { + logger.warn(`block-proposed event: blockNumber=${args.blockNumber}, slot=${args.slot}`, args); + blockProposedEvents.push({ + blockNumber: args.blockNumber, + slot: args.slot, + buildSlot: args.buildSlot, + }); + }); + } + + const initialCheckpointNumber = await rollup.getCheckpointNumber(); + logger.warn(`Initial checkpoint number: ${initialCheckpointNumber}`); + + // Pre-prove and send transactions + const txs = await timesAsync(TX_COUNT, i => + proveInteraction(context.wallet, contract.methods.emit_nullifier(new Fr(i + 1)), { from }), + ); + const txHashes = await Promise.all(txs.map(tx => tx.send({ wait: NO_WAIT }))); + logger.warn(`Sent ${txHashes.length} transactions`, { txs: txHashes }); + + // Start the sequencers + await Promise.all(sequencers.map(s => s.start())); + logger.warn(`Started all sequencers`); + + // Wait until all txs are mined + const timeout = test.L2_SLOT_DURATION_IN_S * 5; + await executeTimeout( + () => Promise.all(txHashes.map(txHash => waitForTx(context.aztecNode, txHash, { timeout }))), + timeout * 1000, + ); + logger.warn(`All txs have been mined`); + + // Verify MBPS works with pipelining + const multiBlockCheckpoint = await assertMultipleBlocksPerSlot(EXPECTED_BLOCKS_PER_CHECKPOINT, logger); + + // Verify the pipelining offset: build slot N vs submission slot N+1 + await assertProposerPipelining(blockProposedEvents, logger); + + // Verify proving still works end-to-end with pipelined proposers + await waitForProvenCheckpoint(multiBlockCheckpoint); + }); +}); diff --git a/yarn-project/end-to-end/src/e2e_l1_publisher/e2e_l1_publisher.test.ts b/yarn-project/end-to-end/src/e2e_l1_publisher/e2e_l1_publisher.test.ts index 59a7dd5c6178..45f01b5834fc 100644 --- a/yarn-project/end-to-end/src/e2e_l1_publisher/e2e_l1_publisher.test.ts +++ b/yarn-project/end-to-end/src/e2e_l1_publisher/e2e_l1_publisher.test.ts @@ -274,6 +274,7 @@ describe('L1Publisher integration', () => { { l1ChainId: chainId, ethereumSlotDuration: config.ethereumSlotDuration, + aztecSlotDuration: config.aztecSlotDuration, }, { blobClient, @@ -606,7 +607,7 @@ describe('L1Publisher integration', () => { const checkpointAttestations = validators.map(v => makeCheckpointAttestationFromCheckpoint(checkpoint, v)); const attestations = orderAttestations(checkpointAttestations, committee!); - const canPropose = await publisher.canProposeAtNextEthBlock(new Fr(GENESIS_ARCHIVE_ROOT), proposer!); + const canPropose = await publisher.canProposeAt(new Fr(GENESIS_ARCHIVE_ROOT), proposer!); expect(canPropose?.slot).toEqual(block.header.getSlot()); await publisher.validateBlockHeader(checkpoint.header); @@ -630,7 +631,7 @@ describe('L1Publisher integration', () => { const attestations = orderAttestations(checkpointAttestations, committee!).reverse(); const attestationsAndSigners = new CommitteeAttestationsAndSigners(attestations); - const canPropose = await publisher.canProposeAtNextEthBlock(new Fr(GENESIS_ARCHIVE_ROOT), proposer!); + const canPropose = await publisher.canProposeAt(new Fr(GENESIS_ARCHIVE_ROOT), proposer!); expect(canPropose?.slot).toEqual(block.header.getSlot()); await publisher.validateBlockHeader(checkpoint.header); @@ -645,7 +646,7 @@ describe('L1Publisher integration', () => { const checkpointAttestations = validators.map(v => makeCheckpointAttestationFromCheckpoint(checkpoint, v)); const attestations = orderAttestations(checkpointAttestations, committee!); - const canPropose = await publisher.canProposeAtNextEthBlock(new Fr(GENESIS_ARCHIVE_ROOT), proposer!); + const canPropose = await publisher.canProposeAt(new Fr(GENESIS_ARCHIVE_ROOT), proposer!); expect(canPropose?.slot).toEqual(block.header.getSlot()); await publisher.validateBlockHeader(checkpoint.header); @@ -670,7 +671,7 @@ describe('L1Publisher integration', () => { const checkpointAttestations = validators.map(v => makeCheckpointAttestationFromCheckpoint(checkpoint, v)); const attestations = orderAttestations(checkpointAttestations, committee!); - const canPropose = await publisher.canProposeAtNextEthBlock(new Fr(GENESIS_ARCHIVE_ROOT), proposer!); + const canPropose = await publisher.canProposeAt(new Fr(GENESIS_ARCHIVE_ROOT), proposer!); expect(canPropose?.slot).toEqual(block.header.getSlot()); await publisher.validateBlockHeader(checkpoint.header); @@ -742,8 +743,8 @@ describe('L1Publisher integration', () => { // We cannot propose directly, we need to assume the previous checkpoint is invalidated const genesis = new Fr(GENESIS_ARCHIVE_ROOT); logger.warn(`Checking can propose at next eth block on top of genesis ${genesis}`); - expect(await publisher.canProposeAtNextEthBlock(genesis, proposer!)).toBeUndefined(); - const canPropose = await publisher.canProposeAtNextEthBlock(genesis, proposer!, { forcePendingCheckpointNumber }); + expect(await publisher.canProposeAt(genesis, proposer!)).toBeUndefined(); + const canPropose = await publisher.canProposeAt(genesis, proposer!, { forcePendingCheckpointNumber }); expect(canPropose?.slot).toEqual(block.header.getSlot()); // Same for validation diff --git a/yarn-project/end-to-end/src/e2e_synching.test.ts b/yarn-project/end-to-end/src/e2e_synching.test.ts index c93560f43064..dbf533d16e94 100644 --- a/yarn-project/end-to-end/src/e2e_synching.test.ts +++ b/yarn-project/end-to-end/src/e2e_synching.test.ts @@ -72,7 +72,11 @@ import { TestWallet } from './test-wallet/test_wallet.js'; const AZTEC_GENERATE_TEST_DATA = !!process.env.AZTEC_GENERATE_TEST_DATA; const START_TIME = 1893456000; // 2030 01 01 00 00 const RUN_THE_BIG_ONE = !!process.env.RUN_THE_BIG_ONE; -const ETHEREUM_SLOT_DURATION = getL1ContractsConfigEnvVars().ethereumSlotDuration; + +const l1ContractsEnvVars = getL1ContractsConfigEnvVars(); +const ETHEREUM_SLOT_DURATION = l1ContractsEnvVars.ethereumSlotDuration; +const AZTEC_SLOT_DURATION = l1ContractsEnvVars.aztecSlotDuration; + const MINT_AMOUNT = 1000n; enum TxComplexity { @@ -442,6 +446,7 @@ describe('e2e_synching', () => { { l1ChainId: 31337, ethereumSlotDuration: ETHEREUM_SLOT_DURATION, + aztecSlotDuration: AZTEC_SLOT_DURATION, }, { blobClient, diff --git a/yarn-project/epoch-cache/src/epoch_cache.test.ts b/yarn-project/epoch-cache/src/epoch_cache.test.ts index e1e3ffba22e1..7cc79f3aa96f 100644 --- a/yarn-project/epoch-cache/src/epoch_cache.test.ts +++ b/yarn-project/epoch-cache/src/epoch_cache.test.ts @@ -10,7 +10,7 @@ import { afterEach, beforeEach, describe, expect, it, jest } from '@jest/globals import { type MockProxy, mock } from 'jest-mock-extended'; import type { GetBlockReturnType } from 'viem'; -import { EpochCache, type EpochCommitteeInfo } from './epoch_cache.js'; +import { EpochCache, type EpochCommitteeInfo, PROPOSER_PIPELINING_SLOT_OFFSET } from './epoch_cache.js'; class TestEpochCache extends EpochCache { public seedCache(epoch: EpochNumber, committeeInfo: EpochCommitteeInfo): void { @@ -20,6 +20,10 @@ class TestEpochCache extends EpochCache { public setCacheSize(size: number): void { this.config.cacheSize = size; } + + public setProposerPipelining(enabled: boolean): void { + this.enableProposerPipelining = enabled; + } } describe('EpochCache', () => { @@ -164,9 +168,7 @@ describe('EpochCache', () => { // generate a random slot greater than `epochDuration` const targetSlot = BigInt(epochDuration) + BigInt(Math.floor(Math.random() * 1000)); - const targetEpoch = targetSlot / BigInt(epochDuration); - const epochStartSlot = targetEpoch * BigInt(epochDuration); - const epochStartTimestamp = l1GenesisTime + epochStartSlot * BigInt(slotDuration); + const slotTimestamp = l1GenesisTime + targetSlot * BigInt(slotDuration); const expectedCommittee = [EthAddress.fromString('0x000000000000000000000000000000000000BEEF')]; const expectedSeed = Buffer32.fromBigInt(999n); @@ -176,10 +178,10 @@ describe('EpochCache', () => { await epochCache.getCommittee(SlotNumber.fromBigInt(targetSlot)); expect(rollupContract.getCommitteeAt).toHaveBeenCalledTimes(1); - expect(rollupContract.getCommitteeAt).toHaveBeenCalledWith(epochStartTimestamp); + expect(rollupContract.getCommitteeAt).toHaveBeenCalledWith(slotTimestamp); expect(rollupContract.getSampleSeedAt).toHaveBeenCalledTimes(1); - expect(rollupContract.getSampleSeedAt).toHaveBeenCalledWith(epochStartTimestamp); + expect(rollupContract.getSampleSeedAt).toHaveBeenCalledWith(slotTimestamp); }); it('should cache multiple epochs', async () => { @@ -285,4 +287,84 @@ describe('EpochCache', () => { /Cannot query committee for future epoch.*with timestamp.*\(current L1 time is/, ); }); + + describe('proposer pipelining', () => { + it('getTargetSlot() returns slotNow when pipelining disabled', () => { + const initialTime = Number(l1GenesisTime) * 1000; + jest.setSystemTime(initialTime); + + expect(epochCache.isProposerPipeliningEnabled()).toBe(false); + expect(epochCache.getTargetSlot()).toEqual(epochCache.getSlotNow()); + }); + + it('getTargetSlot() returns slotNow + 1 when pipelining enabled', () => { + epochCache.setProposerPipelining(true); + const initialTime = Number(l1GenesisTime) * 1000; + jest.setSystemTime(initialTime); + + const slotNow = epochCache.getSlotNow(); + expect(epochCache.getTargetSlot()).toEqual(SlotNumber(slotNow + PROPOSER_PIPELINING_SLOT_OFFSET)); + }); + + it('getTargetEpoch() returns epoch for slotNow + 1 when pipelining enabled', () => { + epochCache.setProposerPipelining(true); + // Set time to mid-epoch 0 + const midEpochSlot = 5; + const initialTime = (Number(l1GenesisTime) + midEpochSlot * SLOT_DURATION) * 1000; + jest.setSystemTime(initialTime); + + // Target slot is midEpochSlot + 1, still within epoch 0 + expect(epochCache.getTargetEpoch()).toEqual(EpochNumber(0)); + }); + + it('getTargetEpochAndSlotInNextL1Slot() returns nextL1Slot + 1 when pipelining enabled', () => { + epochCache.setProposerPipelining(true); + const initialTime = Number(l1GenesisTime) * 1000; + jest.setSystemTime(initialTime); + + const baseResult = epochCache.getEpochAndSlotInNextL1Slot(); + const targetResult = epochCache.getTargetEpochAndSlotInNextL1Slot(); + + expect(targetResult.slot).toEqual(SlotNumber(baseResult.slot + PROPOSER_PIPELINING_SLOT_OFFSET)); + }); + + it('getTargetEpochAndSlotInNextL1Slot() handles epoch boundary', () => { + epochCache.setProposerPipelining(true); + // Set time to last slot of epoch 0 (slot EPOCH_DURATION - 1) + const lastSlot = EPOCH_DURATION - 1; + const initialTime = (Number(l1GenesisTime) + lastSlot * SLOT_DURATION) * 1000; + jest.setSystemTime(initialTime); + + const targetResult = epochCache.getTargetEpochAndSlotInNextL1Slot(); + + // The target slot should be at least EPOCH_DURATION (first slot of epoch 1) + expect(targetResult.slot).toBeGreaterThanOrEqual(EPOCH_DURATION); + expect(targetResult.epoch).toEqual(EpochNumber(1)); + }); + + it('getTargetAndNextSlot() returns same as getCurrentAndNextSlot when pipelining disabled', () => { + const initialTime = Number(l1GenesisTime) * 1000; + jest.setSystemTime(initialTime); + + expect(epochCache.isProposerPipeliningEnabled()).toBe(false); + + const { currentSlot, nextSlot: currentNext } = epochCache.getCurrentAndNextSlot(); + const { targetSlot, nextSlot: targetNext } = epochCache.getTargetAndNextSlot(); + + expect(targetSlot).toEqual(currentSlot); + expect(targetNext).toEqual(currentNext); + }); + + it('getTargetAndNextSlot() applies pipeline offset when enabled', () => { + epochCache.setProposerPipelining(true); + const initialTime = Number(l1GenesisTime) * 1000; + jest.setSystemTime(initialTime); + + const slotNow = epochCache.getSlotNow(); + const { targetSlot, nextSlot } = epochCache.getTargetAndNextSlot(); + + expect(targetSlot).toEqual(SlotNumber(slotNow + PROPOSER_PIPELINING_SLOT_OFFSET)); + expect(nextSlot).toEqual(epochCache.getTargetEpochAndSlotInNextL1Slot().slot); + }); + }); }); diff --git a/yarn-project/epoch-cache/src/epoch_cache.ts b/yarn-project/epoch-cache/src/epoch_cache.ts index e961706d815c..3ecb033c1f02 100644 --- a/yarn-project/epoch-cache/src/epoch_cache.ts +++ b/yarn-project/epoch-cache/src/epoch_cache.ts @@ -12,16 +12,19 @@ import { getSlotAtTimestamp, getSlotRangeForEpoch, getTimestampForSlot, - getTimestampRangeForEpoch, } from '@aztec/stdlib/epoch-helpers'; import { createPublicClient, encodeAbiParameters, keccak256 } from 'viem'; import { type EpochCacheConfig, getEpochCacheConfigEnvVars } from './config.js'; +/** When proposer pipelining is enabled, the proposer builds one slot ahead. */ +export const PROPOSER_PIPELINING_SLOT_OFFSET = 1; + +/** Flat return type for compound epoch/slot getters. */ export type EpochAndSlot = { - epoch: EpochNumber; slot: SlotNumber; + epoch: EpochNumber; ts: bigint; }; @@ -37,11 +40,21 @@ export type SlotTag = 'now' | 'next' | SlotNumber; export interface EpochCacheInterface { getCommittee(slot: SlotTag | undefined): Promise; + getSlotNow(): SlotNumber; + getTargetSlot(): SlotNumber; + getEpochNow(): EpochNumber; + getTargetEpoch(): EpochNumber; getEpochAndSlotNow(): EpochAndSlot & { nowMs: bigint }; - getEpochAndSlotInNextL1Slot(): EpochAndSlot & { now: bigint }; + getEpochAndSlotInNextL1Slot(): EpochAndSlot & { nowSeconds: bigint }; + /** Returns epoch/slot info for the next L1 slot with pipeline offset applied. */ + getTargetEpochAndSlotInNextL1Slot(): EpochAndSlot & { nowSeconds: bigint }; + isProposerPipeliningEnabled(): boolean; + isEscapeHatchOpen(epoch: EpochNumber): Promise; + isEscapeHatchOpenAtSlot(slot: SlotTag): Promise; getProposerIndexEncoding(epoch: EpochNumber, slot: SlotNumber, seed: bigint): `0x${string}`; computeProposerIndex(slot: SlotNumber, epoch: EpochNumber, seed: bigint, size: bigint): bigint; getCurrentAndNextSlot(): { currentSlot: SlotNumber; nextSlot: SlotNumber }; + getTargetAndNextSlot(): { targetSlot: SlotNumber; nextSlot: SlotNumber }; getProposerAttesterAddressInSlot(slot: SlotNumber): Promise; getRegisteredValidators(): Promise; isInCommittee(slot: SlotTag, validator: EthAddress): Promise; @@ -65,6 +78,8 @@ export class EpochCache implements EpochCacheInterface { private lastValidatorRefresh = 0; private readonly log: Logger = createLogger('epoch-cache'); + protected enableProposerPipelining: boolean; + constructor( private rollup: RollupContract, private readonly l1constants: L1RollupConstants & { @@ -72,10 +87,12 @@ export class EpochCache implements EpochCacheInterface { lagInEpochsForRandao: number; }, private readonly dateProvider: DateProvider = new DateProvider(), - protected readonly config = { cacheSize: 12, validatorRefreshIntervalSeconds: 60 }, + protected readonly config = { cacheSize: 12, validatorRefreshIntervalSeconds: 60, enableProposerPipelining: false }, ) { + this.enableProposerPipelining = this.config.enableProposerPipelining; this.log.debug(`Initialized EpochCache`, { l1constants, + enableProposerPipelining: this.enableProposerPipelining, }); } @@ -135,13 +152,39 @@ export class EpochCache implements EpochCacheInterface { rollupManaLimit: Number(rollupManaLimit), }; - return new EpochCache(rollup, l1RollupConstants, deps.dateProvider); + return new EpochCache(rollup, l1RollupConstants, deps.dateProvider, { + cacheSize: 12, + validatorRefreshIntervalSeconds: 60, + enableProposerPipelining: config.enableProposerPipelining, + }); } public getL1Constants(): L1RollupConstants { return this.l1constants; } + public isProposerPipeliningEnabled(): boolean { + return this.enableProposerPipelining; + } + + public getSlotNow(): SlotNumber { + return this.getEpochAndSlotNow().slot; + } + + public getTargetSlot(): SlotNumber { + const slotNow = this.getSlotNow(); + const offset = this.isProposerPipeliningEnabled() ? PROPOSER_PIPELINING_SLOT_OFFSET : 0; + return SlotNumber(slotNow + offset); + } + + public getEpochNow(): EpochNumber { + return this.getEpochAndSlotNow().epoch; + } + + public getTargetEpoch(): EpochNumber { + return getEpochAtSlot(this.getTargetSlot(), this.l1constants); + } + public getEpochAndSlotNow(): EpochAndSlot & { nowMs: bigint } { const nowMs = BigInt(this.dateProvider.now()); const nowSeconds = nowMs / 1000n; @@ -153,23 +196,33 @@ export class EpochCache implements EpochCacheInterface { } private getEpochAndSlotAtSlot(slot: SlotNumber): EpochAndSlot { - const epoch = getEpochAtSlot(slot, this.l1constants); - const ts = getTimestampRangeForEpoch(epoch, this.l1constants)[0]; - return { epoch, ts, slot }; + return this.getEpochAndSlotAtTimestamp(getTimestampForSlot(slot, this.l1constants)); } - public getEpochAndSlotInNextL1Slot(): EpochAndSlot & { now: bigint } { - const now = this.nowInSeconds(); - const nextSlotTs = now + BigInt(this.l1constants.ethereumSlotDuration); - return { ...this.getEpochAndSlotAtTimestamp(nextSlotTs), now }; + public getEpochAndSlotInNextL1Slot(): EpochAndSlot & { nowSeconds: bigint } { + const nowSeconds = this.nowInSeconds(); + const nextSlotTs = nowSeconds + BigInt(this.l1constants.ethereumSlotDuration); + return { ...this.getEpochAndSlotAtTimestamp(nextSlotTs), nowSeconds }; + } + + public getTargetEpochAndSlotInNextL1Slot(): EpochAndSlot & { nowSeconds: bigint } { + if (!this.isProposerPipeliningEnabled()) { + return this.getEpochAndSlotInNextL1Slot(); + } + + const result = this.getEpochAndSlotInNextL1Slot(); + const offset = PROPOSER_PIPELINING_SLOT_OFFSET; + const targetSlot = SlotNumber(result.slot + offset); + return { ...result, slot: targetSlot, epoch: getEpochAtSlot(targetSlot, this.l1constants) }; } private getEpochAndSlotAtTimestamp(ts: bigint): EpochAndSlot { const slot = getSlotAtTimestamp(ts, this.l1constants); + const epoch = getEpochNumberAtTimestamp(ts, this.l1constants); return { - epoch: getEpochNumberAtTimestamp(ts, this.l1constants), - ts: getTimestampForSlot(slot, this.l1constants), slot, + epoch, + ts: getTimestampForSlot(slot, this.l1constants), }; } @@ -202,7 +255,7 @@ export class EpochCache implements EpochCacheInterface { public async isEscapeHatchOpenAtSlot(slot: SlotTag = 'now'): Promise { const epoch = slot === 'now' - ? this.getEpochAndSlotNow().epoch + ? this.getEpochNow() : slot === 'next' ? this.getEpochAndSlotInNextL1Slot().epoch : getEpochAtSlot(slot, this.l1constants); @@ -237,7 +290,7 @@ export class EpochCache implements EpochCacheInterface { return epochData; } - private getEpochAndTimestamp(slot: SlotTag = 'now') { + private getEpochAndTimestamp(slot: SlotTag = 'now'): { epoch: EpochNumber; ts: bigint } { if (slot === 'now') { return this.getEpochAndSlotNow(); } else if (slot === 'next') { @@ -287,13 +340,24 @@ export class EpochCache implements EpochCacheInterface { return BigInt(keccak256(this.getProposerIndexEncoding(epoch, slot, seed))) % size; } - /** Returns the current and next L2 slot numbers. */ + /** Returns the current and next L2 slot in next eth L1 Slot. */ public getCurrentAndNextSlot(): { currentSlot: SlotNumber; nextSlot: SlotNumber } { - const current = this.getEpochAndSlotNow(); + const currentSlot = this.getSlotNow(); const next = this.getEpochAndSlotInNextL1Slot(); return { - currentSlot: current.slot, + currentSlot, + nextSlot: next.slot, + }; + } + + /** Returns the taget and next L2 slot in the next L1 slot */ + public getTargetAndNextSlot(): { targetSlot: SlotNumber; nextSlot: SlotNumber } { + const targetSlot = this.getTargetSlot(); + const next = this.getTargetEpochAndSlotInNextL1Slot(); + + return { + targetSlot, nextSlot: next.slot, }; } diff --git a/yarn-project/epoch-cache/src/test/test_epoch_cache.ts b/yarn-project/epoch-cache/src/test/test_epoch_cache.ts index ecb2b3b47c2b..b9e50a06128f 100644 --- a/yarn-project/epoch-cache/src/test/test_epoch_cache.ts +++ b/yarn-project/epoch-cache/src/test/test_epoch_cache.ts @@ -3,7 +3,13 @@ import { EthAddress } from '@aztec/foundation/eth-address'; import type { L1RollupConstants } from '@aztec/stdlib/epoch-helpers'; import { getEpochAtSlot, getSlotAtTimestamp, getTimestampRangeForEpoch } from '@aztec/stdlib/epoch-helpers'; -import type { EpochAndSlot, EpochCacheInterface, EpochCommitteeInfo, SlotTag } from '../epoch_cache.js'; +import { + type EpochAndSlot, + type EpochCacheInterface, + type EpochCommitteeInfo, + PROPOSER_PIPELINING_SLOT_OFFSET, + type SlotTag, +} from '../epoch_cache.js'; /** Default L1 constants for testing. */ const DEFAULT_L1_CONSTANTS: L1RollupConstants = { @@ -32,6 +38,7 @@ export class TestEpochCache implements EpochCacheInterface { private seed: bigint = 0n; private registeredValidators: EthAddress[] = []; private l1Constants: L1RollupConstants; + private proposerPipeliningEnabled = false; constructor(l1Constants: Partial = {}) { this.l1Constants = { ...DEFAULT_L1_CONSTANTS, ...l1Constants }; @@ -104,6 +111,10 @@ export class TestEpochCache implements EpochCacheInterface { return this.l1Constants; } + setProposerPipeliningEnabled(enabled: boolean): void { + this.proposerPipeliningEnabled = enabled; + } + getCommittee(_slot?: SlotTag): Promise { const epoch = getEpochAtSlot(this.currentSlot, this.l1Constants); return Promise.resolve({ @@ -114,19 +125,58 @@ export class TestEpochCache implements EpochCacheInterface { }); } + getSlotNow(): SlotNumber { + return this.currentSlot; + } + + getTargetSlot(): SlotNumber { + return this.proposerPipeliningEnabled + ? SlotNumber(this.currentSlot + PROPOSER_PIPELINING_SLOT_OFFSET) + : this.currentSlot; + } + + getEpochNow(): EpochNumber { + return getEpochAtSlot(this.currentSlot, this.l1Constants); + } + + getTargetEpoch(): EpochNumber { + return getEpochAtSlot(this.getTargetSlot(), this.l1Constants); + } + + isProposerPipeliningEnabled(): boolean { + return this.proposerPipeliningEnabled; + } + getEpochAndSlotNow(): EpochAndSlot & { nowMs: bigint } { - const epoch = getEpochAtSlot(this.currentSlot, this.l1Constants); - const ts = getTimestampRangeForEpoch(epoch, this.l1Constants)[0]; - return { epoch, slot: this.currentSlot, ts, nowMs: ts * 1000n }; + const epochNow = getEpochAtSlot(this.currentSlot, this.l1Constants); + const ts = getTimestampRangeForEpoch(epochNow, this.l1Constants)[0]; + return { + epoch: epochNow, + slot: this.currentSlot, + ts, + nowMs: ts * 1000n, + }; } - getEpochAndSlotInNextL1Slot(): EpochAndSlot & { now: bigint } { - const now = getTimestampRangeForEpoch(getEpochAtSlot(this.currentSlot, this.l1Constants), this.l1Constants)[0]; - const nextSlotTs = now + BigInt(this.l1Constants.ethereumSlotDuration); + getEpochAndSlotInNextL1Slot(): EpochAndSlot & { nowSeconds: bigint } { + const nowTs = getTimestampRangeForEpoch(getEpochAtSlot(this.currentSlot, this.l1Constants), this.l1Constants)[0]; + const nextSlotTs = nowTs + BigInt(this.l1Constants.ethereumSlotDuration); const nextSlot = getSlotAtTimestamp(nextSlotTs, this.l1Constants); - const epoch = getEpochAtSlot(nextSlot, this.l1Constants); - const ts = getTimestampRangeForEpoch(epoch, this.l1Constants)[0]; - return { epoch, slot: nextSlot, ts, now }; + const epochNow = getEpochAtSlot(nextSlot, this.l1Constants); + const ts = getTimestampRangeForEpoch(epochNow, this.l1Constants)[0]; + return { + epoch: epochNow, + slot: nextSlot, + ts, + nowSeconds: nowTs, + }; + } + + getTargetEpochAndSlotInNextL1Slot(): EpochAndSlot & { nowSeconds: bigint } { + const result = this.getEpochAndSlotInNextL1Slot(); + const offset = this.isProposerPipeliningEnabled() ? PROPOSER_PIPELINING_SLOT_OFFSET : 0; + const targetSlot = SlotNumber(result.slot + offset); + return { ...result, slot: targetSlot, epoch: getEpochAtSlot(targetSlot, this.l1Constants) }; } getProposerIndexEncoding(epoch: EpochNumber, slot: SlotNumber, seed: bigint): `0x${string}` { @@ -142,9 +192,22 @@ export class TestEpochCache implements EpochCacheInterface { } getCurrentAndNextSlot(): { currentSlot: SlotNumber; nextSlot: SlotNumber } { + const currentSlot = this.getSlotNow(); + const next = this.getEpochAndSlotInNextL1Slot(); + + return { + currentSlot, + nextSlot: next.slot, + }; + } + + getTargetAndNextSlot(): { targetSlot: SlotNumber; nextSlot: SlotNumber } { + const targetSlot = this.getTargetSlot(); + const next = this.getTargetEpochAndSlotInNextL1Slot(); + return { - currentSlot: this.currentSlot, - nextSlot: SlotNumber(this.currentSlot + 1), + targetSlot, + nextSlot: next.slot, }; } @@ -165,6 +228,10 @@ export class TestEpochCache implements EpochCacheInterface { return Promise.resolve(validators.filter(v => committeeSet.has(v.toString()))); } + isEscapeHatchOpen(_epoch: EpochNumber): Promise { + return Promise.resolve(this.escapeHatchOpen); + } + isEscapeHatchOpenAtSlot(_slot?: SlotTag): Promise { return Promise.resolve(this.escapeHatchOpen); } diff --git a/yarn-project/ethereum/src/contracts/rollup.ts b/yarn-project/ethereum/src/contracts/rollup.ts index d72085466cbd..96d1c553ee1b 100644 --- a/yarn-project/ethereum/src/contracts/rollup.ts +++ b/yarn-project/ethereum/src/contracts/rollup.ts @@ -778,14 +778,15 @@ export class RollupContract { * timestamp of the next L1 block * @throws otherwise */ - public async canProposeAtNextEthBlock( + public async canProposeAt( archive: Buffer, account: `0x${string}` | Account, - slotDuration: number, + slotDuration: bigint, + slotOffset: bigint, opts: { forcePendingCheckpointNumber?: CheckpointNumber } = {}, ): Promise<{ slot: SlotNumber; checkpointNumber: CheckpointNumber; timeOfNextL1Slot: bigint }> { const latestBlock = await this.client.getBlock(); - const timeOfNextL1Slot = latestBlock.timestamp + BigInt(slotDuration); + const timeOfNextL1Slot = latestBlock.timestamp + slotDuration + slotOffset; const who = typeof account === 'string' ? account : account.address; try { diff --git a/yarn-project/foundation/src/config/env_var.ts b/yarn-project/foundation/src/config/env_var.ts index 159eba8cfb4a..9cc98c492272 100644 --- a/yarn-project/foundation/src/config/env_var.ts +++ b/yarn-project/foundation/src/config/env_var.ts @@ -205,6 +205,7 @@ export type EnvVar = | 'SENTINEL_ENABLED' | 'SENTINEL_HISTORY_LENGTH_IN_EPOCHS' | 'SENTINEL_HISTORIC_PROVEN_PERFORMANCE_LENGTH_IN_EPOCHS' + | 'SEQ_ENABLE_PROPOSER_PIPELINING' | 'SEQ_MAX_TX_PER_BLOCK' | 'SEQ_MAX_TX_PER_CHECKPOINT' | 'SEQ_MIN_TX_PER_BLOCK' @@ -219,7 +220,6 @@ export type EnvVar = | 'SEQ_PUBLISHER_ALLOW_INVALID_STATES' | 'SEQ_PUBLISHER_FORWARDER_ADDRESS' | 'SEQ_POLLING_INTERVAL_MS' - | 'SEQ_ENABLE_PROPOSER_PIPELINING' | 'SEQ_ENFORCE_TIME_TABLE' | 'SEQ_L1_PUBLISHING_TIME_ALLOWANCE_IN_SLOT' | 'SEQ_ATTESTATION_PROPAGATION_TIME' diff --git a/yarn-project/p2p/src/client/p2p_client.test.ts b/yarn-project/p2p/src/client/p2p_client.test.ts index 98ff2f85d7fa..a72485990008 100644 --- a/yarn-project/p2p/src/client/p2p_client.test.ts +++ b/yarn-project/p2p/src/client/p2p_client.test.ts @@ -49,6 +49,7 @@ describe('P2P Client', () => { epochCache = mock(); epochCache.getCurrentAndNextSlot.mockReturnValue({ currentSlot: SlotNumber(0), nextSlot: SlotNumber(1) }); + epochCache.getTargetAndNextSlot.mockReturnValue({ targetSlot: SlotNumber(0), nextSlot: SlotNumber(1) }); attestationPool = await createTestAttestationPool(); diff --git a/yarn-project/p2p/src/client/p2p_client.ts b/yarn-project/p2p/src/client/p2p_client.ts index b9a49ac95ea8..7d0ba924746c 100644 --- a/yarn-project/p2p/src/client/p2p_client.ts +++ b/yarn-project/p2p/src/client/p2p_client.ts @@ -696,12 +696,14 @@ export class P2PClient extends WithTracer implements P2P { /** Checks if the slot has changed and calls prepareForSlot if so. */ private async maybeCallPrepareForSlot(): Promise { - const { currentSlot } = this.epochCache.getCurrentAndNextSlot(); - if (currentSlot <= this.lastSlotProcessed) { + // If we have a pending checkpoint available, we want to prepare the target slot - otherwise we prepare the current slot + // Knowledege of pending checkpoints is in the PR above + const { targetSlot } = this.epochCache.getTargetAndNextSlot(); + if (targetSlot <= this.lastSlotProcessed) { return; } - this.lastSlotProcessed = currentSlot; - await this.txPool.prepareForSlot(currentSlot); + this.lastSlotProcessed = targetSlot; + await this.txPool.prepareForSlot(targetSlot); } private async startServiceIfSynched() { diff --git a/yarn-project/p2p/src/client/test/p2p_client.integration_reqresp.test.ts b/yarn-project/p2p/src/client/test/p2p_client.integration_reqresp.test.ts index 78fc5ac35473..c6454f17a1d2 100644 --- a/yarn-project/p2p/src/client/test/p2p_client.integration_reqresp.test.ts +++ b/yarn-project/p2p/src/client/test/p2p_client.integration_reqresp.test.ts @@ -46,7 +46,9 @@ describe('p2p client integration reqresp', () => { logger = createLogger('p2p:test:integration-reqresp'); p2pBaseConfig = { ...emptyChainConfig, ...getP2PDefaultConfig() }; - epochCache.getEpochAndSlotInNextL1Slot.mockReturnValue({ ts: BigInt(0) } as EpochAndSlot & { now: bigint }); + epochCache.getEpochAndSlotInNextL1Slot.mockReturnValue({ ts: BigInt(0) } as EpochAndSlot & { + nowSeconds: bigint; + }); epochCache.getRegisteredValidators.mockResolvedValue([]); epochCache.getL1Constants.mockReturnValue({ l1StartBlock: 0n, diff --git a/yarn-project/p2p/src/client/test/tx_proposal_collector/proposal_tx_collector_worker.ts b/yarn-project/p2p/src/client/test/tx_proposal_collector/proposal_tx_collector_worker.ts index c756de610980..44ad0f6eccc1 100644 --- a/yarn-project/p2p/src/client/test/tx_proposal_collector/proposal_tx_collector_worker.ts +++ b/yarn-project/p2p/src/client/test/tx_proposal_collector/proposal_tx_collector_worker.ts @@ -1,4 +1,5 @@ import { MockL2BlockSource } from '@aztec/archiver/test'; +import type { EpochCache } from '@aztec/epoch-cache'; import { SecretValue } from '@aztec/foundation/config'; import { createLogger } from '@aztec/foundation/log'; import { sleep } from '@aztec/foundation/sleep'; @@ -14,6 +15,7 @@ import { type TelemetryClient, getTelemetryClient } from '@aztec/telemetry-clien import type { PeerId } from '@libp2p/interface'; import { peerIdFromString } from '@libp2p/peer-id'; +import { mock } from 'jest-mock-extended'; import type { P2PConfig } from '../../../config.js'; import { BatchTxRequesterCollector, SendBatchRequestCollector } from '../../../services/index.js'; @@ -27,7 +29,6 @@ import { InMemoryTxPool, UNLIMITED_RATE_LIMIT_QUOTA, calculateInternalTimeout, - createMockEpochCache, createMockWorldStateSynchronizer, } from '../../../test-helpers/index.js'; import { createP2PClient } from '../../index.js'; @@ -98,7 +99,7 @@ function sendMessage(message: WorkerResponse): Promise { async function startClient(config: P2PConfig, clientIndex: number) { txPool = new InMemoryTxPool(); attestationPool = new InMemoryAttestationPool(); - const epochCache = createMockEpochCache(); + const epochCache = mock(); const worldState = createMockWorldStateSynchronizer(); const l2BlockSource = new MockL2BlockSource(); const proofVerifier = new AlwaysTrueCircuitVerifier(); diff --git a/yarn-project/p2p/src/mem_pools/tx_pool_v2/interfaces.ts b/yarn-project/p2p/src/mem_pools/tx_pool_v2/interfaces.ts index cee420a796b4..41d1640ad42b 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool_v2/interfaces.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool_v2/interfaces.ts @@ -160,10 +160,10 @@ export interface TxPoolV2 extends TypedEventEmitter { handleMinedBlock(block: L2Block): Promise; /** - * Prepares the pool for a new slot. - * Unprotects transactions from earlier slots and validates them before - * returning to pending state. - * @param slotNumber - The slot number to prepare for + * Prepares the pool for a new slot by unprotecting transactions from earlier + * slots and re-validating them before returning to pending state. + * @param slotNumber - The pipeline slot we are building for (i.e. the slot + * the resulting blocks will target on L1). */ prepareForSlot(slotNumber: SlotNumber): Promise; diff --git a/yarn-project/p2p/src/msg_validators/attestation_validator/attestation_validator.test.ts b/yarn-project/p2p/src/msg_validators/attestation_validator/attestation_validator.test.ts index 426ede996e09..f69b53930c38 100644 --- a/yarn-project/p2p/src/msg_validators/attestation_validator/attestation_validator.test.ts +++ b/yarn-project/p2p/src/msg_validators/attestation_validator/attestation_validator.test.ts @@ -1,30 +1,29 @@ -import type { EpochCache } from '@aztec/epoch-cache'; +import type { EpochCacheInterface } from '@aztec/epoch-cache'; import { NoCommitteeError } from '@aztec/ethereum/contracts'; -import { SlotNumber } from '@aztec/foundation/branded-types'; +import { EpochNumber, SlotNumber } from '@aztec/foundation/branded-types'; import { Secp256k1Signer } from '@aztec/foundation/crypto/secp256k1-signer'; import { PeerErrorSeverity } from '@aztec/stdlib/p2p'; import { CheckpointHeader } from '@aztec/stdlib/rollup'; import { makeCheckpointAttestation } from '@aztec/stdlib/testing'; -import { mock } from 'jest-mock-extended'; +import { type MockProxy, mock } from 'jest-mock-extended'; import { CheckpointAttestationValidator } from './attestation_validator.js'; describe('CheckpointAttestationValidator', () => { - let epochCache: EpochCache; + let epochCache: MockProxy; let validator: CheckpointAttestationValidator; let proposer: Secp256k1Signer; let attester: Secp256k1Signer; beforeEach(() => { - epochCache = mock(); + epochCache = mock(); validator = new CheckpointAttestationValidator(epochCache); proposer = Secp256k1Signer.random(); attester = Secp256k1Signer.random(); }); it('returns high tolerance error if slot number is not current or next slot (outside clock tolerance)', async () => { - // Create an attestation for slot 97 (previous slot) const header = CheckpointHeader.random({ slotNumber: SlotNumber(97) }); const mockAttestation = makeCheckpointAttestation({ header, @@ -32,26 +31,24 @@ describe('CheckpointAttestationValidator', () => { proposerSigner: proposer, }); - // Mock epoch cache to return different slot numbers - (epochCache.getCurrentAndNextSlot as jest.Mock).mockReturnValue({ - currentSlot: SlotNumber(98), + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: SlotNumber(98), nextSlot: SlotNumber(99), }); - // Mock getEpochAndSlotNow to return time OUTSIDE clock tolerance (1000ms elapsed) - (epochCache.getEpochAndSlotNow as jest.Mock).mockReturnValue({ - epoch: 1, + epochCache.getTargetSlot.mockReturnValue(SlotNumber(98)); + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: EpochNumber(1), slot: SlotNumber(98), - ts: 1000n, // slot started at 1000 seconds + ts: 1000n, nowMs: 1001000n, // 1000ms elapsed, outside 500ms tolerance }); - (epochCache.isInCommittee as jest.Mock).mockResolvedValue(true); + epochCache.isInCommittee.mockResolvedValue(true); const result = await validator.validate(mockAttestation); expect(result).toEqual({ result: 'reject', severity: PeerErrorSeverity.HighToleranceError }); }); it('returns ignore if previous slot attestation is within clock tolerance', async () => { - // Create an attestation for slot 97 (previous slot) const header = CheckpointHeader.random({ slotNumber: SlotNumber(97) }); const mockAttestation = makeCheckpointAttestation({ header, @@ -59,27 +56,25 @@ describe('CheckpointAttestationValidator', () => { proposerSigner: proposer, }); - // Mock epoch cache - attestation is for previous slot (97) when current is 98 - (epochCache.getCurrentAndNextSlot as jest.Mock).mockReturnValue({ - currentSlot: SlotNumber(98), + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: SlotNumber(98), nextSlot: SlotNumber(99), }); - // Mock getEpochAndSlotNow to return time WITHIN clock tolerance (100ms elapsed) - (epochCache.getEpochAndSlotNow as jest.Mock).mockReturnValue({ - epoch: 1, + epochCache.getTargetSlot.mockReturnValue(SlotNumber(98)); + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: EpochNumber(1), slot: SlotNumber(98), - ts: 1000n, // slot started at 1000 seconds + ts: 1000n, nowMs: 1000100n, // 100ms elapsed, within 500ms tolerance }); - (epochCache.isInCommittee as jest.Mock).mockResolvedValue(true); - (epochCache.getProposerAttesterAddressInSlot as jest.Mock).mockResolvedValue(proposer.address); + epochCache.isInCommittee.mockResolvedValue(true); + epochCache.getProposerAttesterAddressInSlot.mockResolvedValue(proposer.address); const result = await validator.validate(mockAttestation); expect(result).toEqual({ result: 'ignore' }); }); it('returns high tolerance error if attester is not in committee', async () => { - // The slot is correct, but the attester is not in the committee const header = CheckpointHeader.random({ slotNumber: SlotNumber(100) }); const mockAttestation = makeCheckpointAttestation({ header, @@ -87,19 +82,17 @@ describe('CheckpointAttestationValidator', () => { proposerSigner: proposer, }); - // Mock epoch cache to return matching slot number but invalid committee membership - (epochCache.getCurrentAndNextSlot as jest.Mock).mockReturnValue({ - currentSlot: SlotNumber(100), + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: SlotNumber(100), nextSlot: SlotNumber(101), }); - (epochCache.isInCommittee as jest.Mock).mockResolvedValue(false); + epochCache.isInCommittee.mockResolvedValue(false); const result = await validator.validate(mockAttestation); expect(result).toEqual({ result: 'reject', severity: PeerErrorSeverity.HighToleranceError }); }); - it('returns undefined if checkpoint attestation is valid (current slot)', async () => { - // Create an attestation for slot 100 + it('returns accept if checkpoint attestation is valid (current slot)', async () => { const header = CheckpointHeader.random({ slotNumber: SlotNumber(100) }); const mockAttestation = makeCheckpointAttestation({ header, @@ -107,20 +100,18 @@ describe('CheckpointAttestationValidator', () => { proposerSigner: proposer, }); - // Mock epoch cache for valid case with current slot - (epochCache.getCurrentAndNextSlot as jest.Mock).mockReturnValue({ - currentSlot: SlotNumber(100), + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: SlotNumber(100), nextSlot: SlotNumber(101), }); - (epochCache.isInCommittee as jest.Mock).mockResolvedValue(true); - (epochCache.getProposerAttesterAddressInSlot as jest.Mock).mockResolvedValue(proposer.address); + epochCache.isInCommittee.mockResolvedValue(true); + epochCache.getProposerAttesterAddressInSlot.mockResolvedValue(proposer.address); const result = await validator.validate(mockAttestation); expect(result).toEqual({ result: 'accept' }); }); - it('returns undefined if checkpoint attestation is valid (next slot)', async () => { - // Setup attestation for next slot + it('returns accept if checkpoint attestation is valid (next slot)', async () => { const header = CheckpointHeader.random({ slotNumber: SlotNumber(101) }); const mockAttestation = makeCheckpointAttestation({ header, @@ -128,13 +119,12 @@ describe('CheckpointAttestationValidator', () => { proposerSigner: proposer, }); - // Mock epoch cache for valid case with next slot - (epochCache.getCurrentAndNextSlot as jest.Mock).mockReturnValue({ - currentSlot: SlotNumber(100), + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: SlotNumber(100), nextSlot: SlotNumber(101), }); - (epochCache.isInCommittee as jest.Mock).mockResolvedValue(true); - (epochCache.getProposerAttesterAddressInSlot as jest.Mock).mockResolvedValue(proposer.address); + epochCache.isInCommittee.mockResolvedValue(true); + epochCache.getProposerAttesterAddressInSlot.mockResolvedValue(proposer.address); const result = await validator.validate(mockAttestation); expect(result).toEqual({ result: 'accept' }); @@ -149,19 +139,17 @@ describe('CheckpointAttestationValidator', () => { proposerSigner: wrongProposer, }); - // Mock epoch cache with different proposer - (epochCache.getCurrentAndNextSlot as jest.Mock).mockReturnValue({ - currentSlot: SlotNumber(100), + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: SlotNumber(100), nextSlot: SlotNumber(101), }); - (epochCache.isInCommittee as jest.Mock).mockResolvedValue(true); + epochCache.isInCommittee.mockResolvedValue(true); const result = await validator.validate(mockAttestation); expect(result).toEqual({ result: 'reject', severity: PeerErrorSeverity.HighToleranceError }); }); it('returns low tolerance error if no committee exists', async () => { - // Create an attestation const header = CheckpointHeader.random({ slotNumber: SlotNumber(100) }); const mockAttestation = makeCheckpointAttestation({ header, @@ -169,13 +157,12 @@ describe('CheckpointAttestationValidator', () => { proposerSigner: proposer, }); - // Mock epoch cache to throw NoCommitteeError - (epochCache.getCurrentAndNextSlot as jest.Mock).mockReturnValue({ - currentSlot: SlotNumber(100), + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: SlotNumber(100), nextSlot: SlotNumber(101), }); - (epochCache.isInCommittee as jest.Mock).mockReturnValue(true); - (epochCache.getProposerAttesterAddressInSlot as jest.Mock).mockRejectedValue(new NoCommitteeError()); + epochCache.isInCommittee.mockResolvedValue(true); + epochCache.getProposerAttesterAddressInSlot.mockRejectedValue(new NoCommitteeError()); const result = await validator.validate(mockAttestation); expect(result).toEqual({ result: 'reject', severity: PeerErrorSeverity.LowToleranceError }); diff --git a/yarn-project/p2p/src/msg_validators/attestation_validator/attestation_validator.ts b/yarn-project/p2p/src/msg_validators/attestation_validator/attestation_validator.ts index c5b05276deb2..0f6fbcab8b94 100644 --- a/yarn-project/p2p/src/msg_validators/attestation_validator/attestation_validator.ts +++ b/yarn-project/p2p/src/msg_validators/attestation_validator/attestation_validator.ts @@ -23,13 +23,14 @@ export class CheckpointAttestationValidator implements P2PValidator { - let epochCache: MockProxy; + let epochCache: MockProxy; let attestationPool: MockProxy; let validator: FishermanAttestationValidator; let proposer: Secp256k1Signer; let attester: Secp256k1Signer; beforeEach(() => { - epochCache = mock(); + epochCache = mock(); attestationPool = mock(); validator = new FishermanAttestationValidator(epochCache, attestationPool, getTelemetryClient()); proposer = Secp256k1Signer.random(); @@ -34,7 +34,6 @@ describe('FishermanAttestationValidator', () => { describe('base validation', () => { it('returns high tolerance error if slot number is not current or next slot (outside clock tolerance)', async () => { - // Create an attestation for slot 97 const header = CheckpointHeader.random({ slotNumber: SlotNumber(97) }); const mockAttestation = makeCheckpointAttestation({ header, @@ -42,16 +41,15 @@ describe('FishermanAttestationValidator', () => { proposerSigner: proposer, }); - // Mock epoch cache to return different slot numbers - epochCache.getCurrentAndNextSlot.mockReturnValue({ - currentSlot: SlotNumber(98), + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: SlotNumber(98), nextSlot: SlotNumber(99), }); - // Mock getEpochAndSlotNow to return time OUTSIDE clock tolerance (1000ms elapsed) + epochCache.getTargetSlot.mockReturnValue(SlotNumber(98)); epochCache.getEpochAndSlotNow.mockReturnValue({ - epoch: 1 as any, + epoch: EpochNumber(1), slot: SlotNumber(98), - ts: 1000n, // slot started at 1000 seconds + ts: 1000n, nowMs: 1001000n, // 1000ms elapsed, outside 500ms tolerance }); epochCache.isInCommittee.mockResolvedValue(true); @@ -72,8 +70,8 @@ describe('FishermanAttestationValidator', () => { proposerSigner: proposer, }); - epochCache.getCurrentAndNextSlot.mockReturnValue({ - currentSlot: SlotNumber(100), + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: SlotNumber(100), nextSlot: SlotNumber(101), }); epochCache.getProposerAttesterAddressInSlot.mockResolvedValue(proposer.address); @@ -94,8 +92,8 @@ describe('FishermanAttestationValidator', () => { proposerSigner: wrongProposer, }); - epochCache.getCurrentAndNextSlot.mockReturnValue({ - currentSlot: SlotNumber(100), + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: SlotNumber(100), nextSlot: SlotNumber(101), }); epochCache.getProposerAttesterAddressInSlot.mockResolvedValue(proposer.address); @@ -111,16 +109,15 @@ describe('FishermanAttestationValidator', () => { describe('fisherman payload validation', () => { beforeEach(() => { - // Setup valid base validation for all fisherman tests - epochCache.getCurrentAndNextSlot.mockReturnValue({ - currentSlot: SlotNumber(100), + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: SlotNumber(100), nextSlot: SlotNumber(101), }); epochCache.isInCommittee.mockResolvedValue(true); epochCache.getProposerAttesterAddressInSlot.mockResolvedValue(proposer.address); }); - it('returns undefined if attestation payload matches proposal payload', async () => { + it('returns accept if attestation payload matches proposal payload', async () => { const checkpointHeader = makeCheckpointHeader(1, { slotNumber: SlotNumber(100) }); const blockHeader = makeBlockHeader(1); const archive = Fr.random(); @@ -131,7 +128,6 @@ describe('FishermanAttestationValidator', () => { archive, }); - // Create a matching checkpoint proposal with the same payload const mockProposal = await makeCheckpointProposal({ checkpointHeader, signer: proposer, @@ -144,13 +140,12 @@ describe('FishermanAttestationValidator', () => { const result = await validator.validate(mockAttestation); expect(result).toEqual({ result: 'accept' }); - // Should have checked the proposal expect(attestationPool.getCheckpointProposal).toHaveBeenCalledWith(mockAttestation.archive.toString()); }); it('returns low tolerance error if attestation payload does not match proposal payload', async () => { const checkpointHeader1 = makeCheckpointHeader(1, { slotNumber: SlotNumber(100) }); - const checkpointHeader2 = makeCheckpointHeader(2, { slotNumber: SlotNumber(100) }); // Different seed = different header + const checkpointHeader2 = makeCheckpointHeader(2, { slotNumber: SlotNumber(100) }); const blockHeader2 = makeBlockHeader(2); const mockAttestation = makeCheckpointAttestation({ @@ -159,7 +154,6 @@ describe('FishermanAttestationValidator', () => { proposerSigner: proposer, }); - // Create a proposal with a different payload const mockProposal = await makeCheckpointProposal({ checkpointHeader: checkpointHeader2, signer: proposer, @@ -171,11 +165,10 @@ describe('FishermanAttestationValidator', () => { const result = await validator.validate(mockAttestation); expect(result).toEqual({ result: 'reject', severity: PeerErrorSeverity.LowToleranceError }); - // Should have checked the proposal expect(attestationPool.getCheckpointProposal).toHaveBeenCalledWith(mockAttestation.archive.toString()); }); - it('returns undefined if proposal is not found yet (attestation arrived before proposal)', async () => { + it('returns accept if proposal is not found yet (attestation arrived before proposal)', async () => { const checkpointHeader = makeCheckpointHeader(1, { slotNumber: SlotNumber(100) }); const mockAttestation = makeCheckpointAttestation({ header: checkpointHeader, @@ -183,13 +176,11 @@ describe('FishermanAttestationValidator', () => { proposerSigner: proposer, }); - // Proposal not found in pool yet attestationPool.getCheckpointProposal.mockResolvedValue(undefined); const result = await validator.validate(mockAttestation); expect(result).toEqual({ result: 'accept' }); - // Should have tried to check the proposal expect(attestationPool.getCheckpointProposal).toHaveBeenCalledWith(mockAttestation.archive.toString()); }); @@ -202,11 +193,10 @@ describe('FishermanAttestationValidator', () => { proposerSigner: proposer, }); - // Create a proposal with the same header but different archive const mockProposal = await makeCheckpointProposal({ checkpointHeader, signer: proposer, - archiveRoot: Fr.random(), // Different archive + archiveRoot: Fr.random(), lastBlock: { blockHeader }, }); @@ -218,7 +208,7 @@ describe('FishermanAttestationValidator', () => { it('detects payload mismatch with different header hash', async () => { const checkpointHeader1 = makeCheckpointHeader(1, { slotNumber: SlotNumber(100) }); - const checkpointHeader2 = makeCheckpointHeader(2, { slotNumber: SlotNumber(100) }); // Same slot but different content + const checkpointHeader2 = makeCheckpointHeader(2, { slotNumber: SlotNumber(100) }); const blockHeader2 = makeBlockHeader(2); const mockAttestation = makeCheckpointAttestation({ @@ -227,7 +217,6 @@ describe('FishermanAttestationValidator', () => { proposerSigner: proposer, }); - // Create a proposal with a different header (different hash) const mockProposal = await makeCheckpointProposal({ checkpointHeader: checkpointHeader2, signer: proposer, @@ -236,7 +225,6 @@ describe('FishermanAttestationValidator', () => { attestationPool.getCheckpointProposal.mockResolvedValue(mockProposal); - // Headers are different, so payloads should be different const result = await validator.validate(mockAttestation); expect(result).toEqual({ result: 'reject', severity: PeerErrorSeverity.LowToleranceError }); }); @@ -244,9 +232,8 @@ describe('FishermanAttestationValidator', () => { describe('edge cases', () => { beforeEach(() => { - // Setup valid base validation - epochCache.getCurrentAndNextSlot.mockReturnValue({ - currentSlot: SlotNumber(100), + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: SlotNumber(100), nextSlot: SlotNumber(101), }); epochCache.isInCommittee.mockResolvedValue(true); @@ -261,7 +248,6 @@ describe('FishermanAttestationValidator', () => { proposerSigner: proposer, }); - // Simulate pool throwing an error attestationPool.getCheckpointProposal.mockRejectedValue(new Error('Pool error')); await expect(validator.validate(mockAttestation)).rejects.toThrow('Pool error'); diff --git a/yarn-project/p2p/src/msg_validators/clock_tolerance.test.ts b/yarn-project/p2p/src/msg_validators/clock_tolerance.test.ts index a2a172a7b040..cfdbf4bcd2bb 100644 --- a/yarn-project/p2p/src/msg_validators/clock_tolerance.test.ts +++ b/yarn-project/p2p/src/msg_validators/clock_tolerance.test.ts @@ -17,6 +17,8 @@ describe('clock_tolerance', () => { beforeEach(() => { epochCache = mock(); + // Default getTargetSlot to return SlotNumber(100) - tests override as needed + epochCache.getTargetSlot.mockReturnValue(SlotNumber(100)); }); it('returns true for previous slot message within tolerance window (100ms elapsed)', () => { @@ -182,5 +184,24 @@ describe('clock_tolerance', () => { expect(isWithinClockTolerance(messageSlot, currentSlot, epochCache)).toBe(true); }); + + it('returns false when getTargetSlot() does not match currentSlot argument (sanity check)', () => { + const currentSlot = SlotNumber(100); + const messageSlot = SlotNumber(99); // previous slot + + // Simulate a race: caller read target slot as 100, but epoch cache now returns 101 + // (e.g., pipelining was enabled between the two reads) + epochCache.getTargetSlot.mockReturnValue(SlotNumber(101)); + + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: 1 as any, + slot: currentSlot, + ts: 1000n, + nowMs: 1000000n, // 0ms elapsed, within tolerance + }); + + // Even though timing is within tolerance, the sanity check fails + expect(isWithinClockTolerance(messageSlot, currentSlot, epochCache)).toBe(false); + }); }); }); diff --git a/yarn-project/p2p/src/msg_validators/clock_tolerance.ts b/yarn-project/p2p/src/msg_validators/clock_tolerance.ts index 89282176469d..dc00e9e6ce2b 100644 --- a/yarn-project/p2p/src/msg_validators/clock_tolerance.ts +++ b/yarn-project/p2p/src/msg_validators/clock_tolerance.ts @@ -36,10 +36,11 @@ export function isWithinClockTolerance( } // Check how far we are into the current slot (in milliseconds) - const { ts: slotStartTs, nowMs, slot } = epochCache.getEpochAndSlotNow(); + const { ts: slotStartTs, nowMs } = epochCache.getEpochAndSlotNow(); + const targetSlot = epochCache.getTargetSlot(); - // Sanity check: ensure the epoch cache's current slot matches the expected current slot - if (slot !== currentSlot) { + // Sanity check: ensure the epoch cache's target slot matches the expected current slot + if (targetSlot !== currentSlot) { return false; } diff --git a/yarn-project/p2p/src/msg_validators/proposal_validator/proposal_validator.test.ts b/yarn-project/p2p/src/msg_validators/proposal_validator/proposal_validator.test.ts index 8df14cd951f0..4210645babbe 100644 --- a/yarn-project/p2p/src/msg_validators/proposal_validator/proposal_validator.test.ts +++ b/yarn-project/p2p/src/msg_validators/proposal_validator/proposal_validator.test.ts @@ -1,6 +1,6 @@ import type { EpochCacheInterface } from '@aztec/epoch-cache'; import { NoCommitteeError } from '@aztec/ethereum/contracts'; -import { SlotNumber } from '@aztec/foundation/branded-types'; +import { EpochNumber, SlotNumber } from '@aztec/foundation/branded-types'; import { Secp256k1Signer } from '@aztec/foundation/crypto/secp256k1-signer'; import { EthAddress } from '@aztec/foundation/eth-address'; import { PeerErrorSeverity } from '@aztec/stdlib/p2p'; @@ -42,7 +42,17 @@ describe('ProposalValidator', () => { beforeEach(() => { epochCache = mock(); validator = new ProposalValidator(epochCache, { txsPermitted: true, maxTxsPerBlock: undefined }, 'test'); - epochCache.getCurrentAndNextSlot.mockReturnValue({ currentSlot, nextSlot }); + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: EpochNumber(1), + slot: currentSlot, + ts: 0n, + nowMs: 0n, + }); + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: currentSlot, + nextSlot, + }); + epochCache.getTargetSlot.mockReturnValue(currentSlot); }); describe.each([ @@ -61,7 +71,7 @@ describe('ProposalValidator', () => { const proposal = await factory(previousSlot, Secp256k1Signer.random()); epochCache.getEpochAndSlotNow.mockReturnValue({ - epoch: 1 as any, + epoch: EpochNumber(1), slot: currentSlot, ts: 1000n, nowMs: 1001000n, // 1000ms elapsed, outside 500ms tolerance @@ -78,7 +88,7 @@ describe('ProposalValidator', () => { const proposal = await factory(previousSlot, signer); epochCache.getEpochAndSlotNow.mockReturnValue({ - epoch: 1 as any, + epoch: EpochNumber(1), slot: currentSlot, ts: 1000n, nowMs: 1000100n, // 100ms elapsed, within 500ms tolerance diff --git a/yarn-project/p2p/src/msg_validators/proposal_validator/proposal_validator.ts b/yarn-project/p2p/src/msg_validators/proposal_validator/proposal_validator.ts index 45c38dd61529..0f2c5d47c5bf 100644 --- a/yarn-project/p2p/src/msg_validators/proposal_validator/proposal_validator.ts +++ b/yarn-project/p2p/src/msg_validators/proposal_validator/proposal_validator.ts @@ -31,13 +31,14 @@ export class ProposalValidator { /** Validates header-level fields: slot, signature, and proposer. */ public async validate(proposal: BlockProposal | CheckpointProposalCore): Promise { try { - // Slot check - const { currentSlot, nextSlot } = this.epochCache.getCurrentAndNextSlot(); + // Slot check: use target slots since proposals target pipeline slots (slot + 1 when pipelining) + const { targetSlot, nextSlot } = this.epochCache.getTargetAndNextSlot(); + const slotNumber = proposal.slotNumber; - if (slotNumber !== currentSlot && slotNumber !== nextSlot) { + if (slotNumber !== targetSlot && slotNumber !== nextSlot) { // Check if message is for previous slot and within clock tolerance - if (!isWithinClockTolerance(slotNumber, currentSlot, this.epochCache)) { - this.logger.warn(`Penalizing peer for invalid slot number ${slotNumber}`, { currentSlot, nextSlot }); + if (!isWithinClockTolerance(slotNumber, targetSlot, this.epochCache)) { + this.logger.warn(`Penalizing peer for invalid slot number ${slotNumber}`, { targetSlot, nextSlot }); return { result: 'reject', severity: PeerErrorSeverity.HighToleranceError }; } this.logger.verbose(`Ignoring proposal for previous slot ${slotNumber} within clock tolerance`); diff --git a/yarn-project/p2p/src/services/libp2p/libp2p_service.test.ts b/yarn-project/p2p/src/services/libp2p/libp2p_service.test.ts index a0e812112a30..f3330dd612b7 100644 --- a/yarn-project/p2p/src/services/libp2p/libp2p_service.test.ts +++ b/yarn-project/p2p/src/services/libp2p/libp2p_service.test.ts @@ -568,7 +568,7 @@ describe('LibP2PService', () => { let blockReceivedCallback: jest.Mock; let duplicateProposalCallback: jest.Mock; - const currentSlot = SlotNumber(100); + const targetSlot = SlotNumber(100); const nextSlot = SlotNumber(101); beforeEach(() => { @@ -578,14 +578,9 @@ describe('LibP2PService', () => { mockTxPool.protectTxs.mockResolvedValue([]); mockEpochCache = mock(); - mockEpochCache.getCurrentAndNextSlot.mockReturnValue({ currentSlot, nextSlot }); mockEpochCache.getProposerAttesterAddressInSlot.mockResolvedValue(signer.address); - mockEpochCache.getEpochAndSlotNow.mockReturnValue({ - epoch: 1 as any, - slot: currentSlot, - ts: 1000n, - nowMs: 1000100n, // 100ms elapsed, within tolerance - }); + mockEpochCache.getTargetAndNextSlot.mockReturnValue({ targetSlot: targetSlot, nextSlot }); + mockEpochCache.getTargetSlot.mockReturnValue(targetSlot); mockPeerManager = mock(); reportMessageValidationResultSpy = jest.fn(); @@ -605,7 +600,7 @@ describe('LibP2PService', () => { }); it('processes valid block: invokes callback and marks txs non-evictable', async () => { - const header = makeBlockHeader(1, { slotNumber: currentSlot }); + const header = makeBlockHeader(1, { slotNumber: targetSlot }); const proposal = await makeBlockProposal({ signer, blockHeader: header }); await service.processBlockFromPeer(proposal.toBuffer(), 'msg-1', mockPeerId); @@ -626,7 +621,7 @@ describe('LibP2PService', () => { }); it('equivocated block: re-broadcasts but does NOT process', async () => { - const header = makeBlockHeader(1, { slotNumber: currentSlot }); + const header = makeBlockHeader(1, { slotNumber: targetSlot }); const indexWithinCheckpoint = IndexWithinCheckpoint(0); // First proposal - should be processed normally @@ -660,14 +655,14 @@ describe('LibP2PService', () => { // Verify duplicate callback was invoked expect(duplicateProposalCallback).toHaveBeenCalledWith({ - slot: currentSlot, + slot: targetSlot, proposer: signer.address, type: 'block', }); }); it('duplicate exact block: returns Ignore, no processing', async () => { - const header = makeBlockHeader(1, { slotNumber: currentSlot }); + const header = makeBlockHeader(1, { slotNumber: targetSlot }); const proposal = await makeBlockProposal({ signer, blockHeader: header }); // First submission @@ -693,7 +688,7 @@ describe('LibP2PService', () => { }); it('cap exceeded: penalizes peer and rejects', async () => { - const header = makeBlockHeader(1, { slotNumber: currentSlot }); + const header = makeBlockHeader(1, { slotNumber: targetSlot }); const indexWithinCheckpoint = IndexWithinCheckpoint(0); // Add MAX_BLOCK_PROPOSALS_PER_POSITION proposals @@ -741,7 +736,7 @@ describe('LibP2PService', () => { }); it('duplicateProposalCallback invoked exactly once per equivocation event', async () => { - const header = makeBlockHeader(1, { slotNumber: currentSlot }); + const header = makeBlockHeader(1, { slotNumber: targetSlot }); const indexWithinCheckpoint = IndexWithinCheckpoint(0); // First proposal - callback NOT invoked @@ -764,7 +759,7 @@ describe('LibP2PService', () => { await service.processBlockFromPeer(proposal2.toBuffer(), 'msg-2', mockPeerId); expect(duplicateProposalCallback).toHaveBeenCalledTimes(1); expect(duplicateProposalCallback).toHaveBeenCalledWith({ - slot: currentSlot, + slot: targetSlot, proposer: signer.address, type: 'block', }); @@ -783,7 +778,7 @@ describe('LibP2PService', () => { }); it('validation failure penalizes peer with correct severity', async () => { - const header = makeBlockHeader(1, { slotNumber: currentSlot }); + const header = makeBlockHeader(1, { slotNumber: targetSlot }); // Create block signed by wrong signer const wrongSigner = Secp256k1Signer.random(); const proposal = await makeBlockProposal({ signer: wrongSigner, blockHeader: header }); @@ -807,7 +802,7 @@ describe('LibP2PService', () => { let checkpointReceivedCallback: jest.Mock; let duplicateProposalCallback: jest.Mock; - const currentSlot = SlotNumber(100); + const targetSlot = SlotNumber(100); const nextSlot = SlotNumber(101); beforeEach(() => { @@ -817,14 +812,9 @@ describe('LibP2PService', () => { mockTxPool.protectTxs.mockResolvedValue([]); mockEpochCache = mock(); - mockEpochCache.getCurrentAndNextSlot.mockReturnValue({ currentSlot, nextSlot }); mockEpochCache.getProposerAttesterAddressInSlot.mockResolvedValue(signer.address); - mockEpochCache.getEpochAndSlotNow.mockReturnValue({ - epoch: 1 as any, - slot: currentSlot, - ts: 1000n, - nowMs: 1000100n, - }); + mockEpochCache.getTargetAndNextSlot.mockReturnValue({ targetSlot, nextSlot }); + mockEpochCache.getTargetSlot.mockReturnValue(targetSlot); mockPeerManager = mock(); reportMessageValidationResultSpy = jest.fn(); @@ -846,7 +836,7 @@ describe('LibP2PService', () => { }); it('processes valid checkpoint: invokes callback and propagates attestations', async () => { - const checkpointHeader = makeCheckpointHeader(1, { slotNumber: currentSlot }); + const checkpointHeader = makeCheckpointHeader(1, { slotNumber: targetSlot }); const proposal = await makeCheckpointProposal({ signer, checkpointHeader }); await service.handleGossipedCheckpointProposal(proposal.toBuffer(), 'msg-1', mockPeerId); @@ -864,7 +854,7 @@ describe('LibP2PService', () => { }); it('equivocated checkpoint: re-broadcasts but does NOT process', async () => { - const checkpointHeader = makeCheckpointHeader(1, { slotNumber: currentSlot }); + const checkpointHeader = makeCheckpointHeader(1, { slotNumber: targetSlot }); // First checkpoint const checkpoint1 = await makeCheckpointProposal({ @@ -882,7 +872,7 @@ describe('LibP2PService', () => { // Second checkpoint at same slot (equivocation) const checkpoint2 = await makeCheckpointProposal({ signer, - checkpointHeader: makeCheckpointHeader(1, { slotNumber: currentSlot }), + checkpointHeader: makeCheckpointHeader(1, { slotNumber: targetSlot }), archiveRoot: Fr.random(), }); await service.handleGossipedCheckpointProposal(checkpoint2.toBuffer(), 'msg-2', mockPeerId); @@ -895,15 +885,15 @@ describe('LibP2PService', () => { // Verify duplicate callback was invoked expect(duplicateProposalCallback).toHaveBeenCalledWith({ - slot: currentSlot, + slot: targetSlot, proposer: signer.address, type: 'checkpoint', }); }); it('checkpoint with lastBlock: processes both when valid', async () => { - const checkpointHeader = makeCheckpointHeader(1, { slotNumber: currentSlot }); - const blockHeader = makeBlockHeader(1, { slotNumber: currentSlot }); + const checkpointHeader = makeCheckpointHeader(1, { slotNumber: targetSlot }); + const blockHeader = makeBlockHeader(1, { slotNumber: targetSlot }); const proposal = await makeCheckpointProposal({ signer, checkpointHeader, @@ -928,8 +918,8 @@ describe('LibP2PService', () => { }); it('lastBlock processed even when checkpoint cap exceeded', async () => { - const checkpointHeader = makeCheckpointHeader(1, { slotNumber: currentSlot }); - const blockHeader = makeBlockHeader(1, { slotNumber: currentSlot }); + const checkpointHeader = makeCheckpointHeader(1, { slotNumber: targetSlot }); + const blockHeader = makeBlockHeader(1, { slotNumber: targetSlot }); // Fill checkpoint slot to MAX_CHECKPOINT_PROPOSALS_PER_SLOT for (let i = 0; i < MAX_CHECKPOINT_PROPOSALS_PER_SLOT; i++) { @@ -937,7 +927,7 @@ describe('LibP2PService', () => { mockEpochCache.getProposerAttesterAddressInSlot.mockResolvedValue(individualSigner.address); const proposal = await makeCheckpointProposal({ signer: individualSigner, - checkpointHeader: makeCheckpointHeader(1, { slotNumber: currentSlot }), + checkpointHeader: makeCheckpointHeader(1, { slotNumber: targetSlot }), archiveRoot: Fr.random(), }); await service.handleGossipedCheckpointProposal(proposal.toBuffer(), `msg-${i}`, mockPeerId); @@ -986,8 +976,8 @@ describe('LibP2PService', () => { }); it('checkpoint rejected when lastBlock is equivocated', async () => { - const checkpointHeader = makeCheckpointHeader(1, { slotNumber: currentSlot }); - const blockHeader = makeBlockHeader(1, { slotNumber: currentSlot }); + const checkpointHeader = makeCheckpointHeader(1, { slotNumber: targetSlot }); + const blockHeader = makeBlockHeader(1, { slotNumber: targetSlot }); const indexWithinCheckpoint = IndexWithinCheckpoint(4); // Pre-add a block at same position @@ -1023,7 +1013,7 @@ describe('LibP2PService', () => { }); it('validation failure penalizes peer with correct severity', async () => { - const checkpointHeader = makeCheckpointHeader(1, { slotNumber: currentSlot }); + const checkpointHeader = makeCheckpointHeader(1, { slotNumber: targetSlot }); // Create checkpoint signed by wrong signer const wrongSigner = Secp256k1Signer.random(); const proposal = await makeCheckpointProposal({ signer: wrongSigner, checkpointHeader }); diff --git a/yarn-project/p2p/src/test-helpers/testbench-utils.ts b/yarn-project/p2p/src/test-helpers/testbench-utils.ts index b375e80df0ca..86903280deed 100644 --- a/yarn-project/p2p/src/test-helpers/testbench-utils.ts +++ b/yarn-project/p2p/src/test-helpers/testbench-utils.ts @@ -273,17 +273,41 @@ export class InMemoryAttestationPool { * Creates a mock EpochCache for testing. */ export function createMockEpochCache(): EpochCacheInterface { - return { + const cache: EpochCacheInterface = { getCommittee: () => Promise.resolve({ committee: [], seed: 1n, epoch: EpochNumber.ZERO, isEscapeHatchOpen: false }), getProposerIndexEncoding: () => '0x' as `0x${string}`, - getEpochAndSlotNow: () => ({ epoch: EpochNumber.ZERO, slot: SlotNumber.ZERO, ts: 0n, nowMs: 0n }), + getSlotNow: () => SlotNumber.ZERO, + getTargetSlot: () => SlotNumber.ZERO, + getEpochNow: () => EpochNumber.ZERO, + getTargetEpoch: () => EpochNumber.ZERO, + getEpochAndSlotNow: () => ({ + epoch: EpochNumber.ZERO, + slot: SlotNumber.ZERO, + ts: 0n, + nowMs: 0n, + }), + isProposerPipeliningEnabled: () => false, computeProposerIndex: () => 0n, getCurrentAndNextSlot: () => ({ currentSlot: SlotNumber.ZERO, nextSlot: SlotNumber.ZERO }), + getTargetAndNextSlot: () => ({ targetSlot: SlotNumber.ZERO, nextSlot: SlotNumber.ZERO }), getProposerAttesterAddressInSlot: () => Promise.resolve(undefined), - getEpochAndSlotInNextL1Slot: () => ({ epoch: EpochNumber.ZERO, slot: SlotNumber.ZERO, ts: 0n, now: 0n }), + getEpochAndSlotInNextL1Slot: () => ({ + epoch: EpochNumber.ZERO, + slot: SlotNumber.ZERO, + ts: 0n, + nowSeconds: 0n, + }), + getTargetEpochAndSlotInNextL1Slot: () => ({ + epoch: EpochNumber.ZERO, + slot: SlotNumber.ZERO, + ts: 0n, + nowSeconds: 0n, + }), isInCommittee: () => Promise.resolve(false), getRegisteredValidators: () => Promise.resolve([]), filterInCommittee: () => Promise.resolve([]), + isEscapeHatchOpen: () => Promise.resolve(false), + isEscapeHatchOpenAtSlot: () => Promise.resolve(false), getL1Constants: () => ({ l1StartBlock: 0n, l1GenesisTime: 0n, @@ -295,6 +319,7 @@ export function createMockEpochCache(): EpochCacheInterface { rollupManaLimit: Number.MAX_SAFE_INTEGER, }), }; + return cache; } /** diff --git a/yarn-project/p2p/src/testbench/p2p_client_testbench_worker.ts b/yarn-project/p2p/src/testbench/p2p_client_testbench_worker.ts index 1d3224d94a33..14d733793413 100644 --- a/yarn-project/p2p/src/testbench/p2p_client_testbench_worker.ts +++ b/yarn-project/p2p/src/testbench/p2p_client_testbench_worker.ts @@ -4,7 +4,7 @@ * Used when running testbench commands. */ import { MockL2BlockSource } from '@aztec/archiver/test'; -import type { EpochCacheInterface } from '@aztec/epoch-cache'; +import type { EpochCache, EpochCacheInterface } from '@aztec/epoch-cache'; import { BlockNumber } from '@aztec/foundation/branded-types'; import { SecretValue } from '@aztec/foundation/config'; import { Secp256k1Signer } from '@aztec/foundation/crypto/secp256k1-signer'; @@ -28,6 +28,7 @@ import { type TelemetryClient, getTelemetryClient } from '@aztec/telemetry-clien import type { Message, PeerId } from '@libp2p/interface'; import { TopicValidatorResult } from '@libp2p/interface'; import { peerIdFromString } from '@libp2p/peer-id'; +import { mock } from 'jest-mock-extended'; import type { P2PClient } from '../client/index.js'; import type { P2PConfig } from '../config.js'; @@ -49,7 +50,6 @@ import { InMemoryAttestationPool, InMemoryTxPool, UNLIMITED_RATE_LIMIT_QUOTA, - createMockEpochCache, createMockWorldStateSynchronizer, filterTxsByDistribution, } from '../test-helpers/index.js'; @@ -344,7 +344,7 @@ process.on('message', async msg => { workerConfig = config; workerTxPool = new InMemoryTxPool(); workerAttestationPool = new InMemoryAttestationPool(); - const epochCache = createMockEpochCache(); + const epochCache = mock(); const worldState = createMockWorldStateSynchronizer(); const l2BlockSource = new MockL2BlockSource(); diff --git a/yarn-project/sequencer-client/src/publisher/sequencer-publisher-factory.test.ts b/yarn-project/sequencer-client/src/publisher/sequencer-publisher-factory.test.ts index a06a0b62e2e7..131963b5defd 100644 --- a/yarn-project/sequencer-client/src/publisher/sequencer-publisher-factory.test.ts +++ b/yarn-project/sequencer-client/src/publisher/sequencer-publisher-factory.test.ts @@ -34,6 +34,7 @@ describe('SequencerPublisherFactory', () => { beforeEach(() => { mockConfig = { ethereumSlotDuration: 12, + aztecSlotDuration: 36, } as SequencerClientConfig; mockPublisherManager = mock>(); mockBlobClient = mock(); diff --git a/yarn-project/sequencer-client/src/publisher/sequencer-publisher.test.ts b/yarn-project/sequencer-client/src/publisher/sequencer-publisher.test.ts index 1665e6de07c0..75fdbb221715 100644 --- a/yarn-project/sequencer-client/src/publisher/sequencer-publisher.test.ts +++ b/yarn-project/sequencer-client/src/publisher/sequencer-publisher.test.ts @@ -118,11 +118,11 @@ describe('SequencerPublisher', () => { rollupAddress: EthAddress.ZERO.toString(), governanceProposerAddress: mockGovernanceProposerAddress, }, - + aztecSlotDuration: 36, ...defaultL1TxUtilsConfig, } as unknown as TxSenderConfig & PublisherConfig & - Pick & + Pick & L1TxUtilsConfig; rollup = mock(); @@ -138,7 +138,13 @@ describe('SequencerPublisher', () => { slashFactoryContract = mock(); const epochCache = mock(); - epochCache.getEpochAndSlotNow.mockReturnValue({ epoch: EpochNumber(1), slot: SlotNumber(2), ts: 3n, nowMs: 3000n }); + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: EpochNumber(1), + slot: SlotNumber(2), + ts: 3n, + nowMs: 3000n, + }); + epochCache.getSlotNow.mockReturnValue(SlotNumber(2)); epochCache.getCommittee.mockResolvedValue({ committee: [], seed: 1n, @@ -320,6 +326,7 @@ describe('SequencerPublisher', () => { ts: 3n, nowMs: 3000n, }); + epochCache.getSlotNow.mockReturnValue(SlotNumber(2)); epochCache.getCommittee.mockResolvedValue({ committee: [], seed: 1n, @@ -327,19 +334,22 @@ describe('SequencerPublisher', () => { isEscapeHatchOpen: false, }); - rotatingPublisher = new SequencerPublisher({ ethereumSlotDuration: 12, l1ChainId: 1 } as any, { - blobClient, - rollupContract: rollup, - l1TxUtils, - epochCache, - slashingProposerContract, - governanceProposerContract, - slashFactoryContract, - dateProvider: new TestDateProvider(), - metrics: l1Metrics, - lastActions: {}, - getNextPublisher, - }); + rotatingPublisher = new SequencerPublisher( + { ethereumSlotDuration: 12, aztecSlotDuration: 36, l1ChainId: 1 } as any, + { + blobClient, + rollupContract: rollup, + l1TxUtils, + epochCache, + slashingProposerContract, + governanceProposerContract, + slashFactoryContract, + dateProvider: new TestDateProvider(), + metrics: l1Metrics, + lastActions: {}, + getNextPublisher, + }, + ); }); it('rotates to next publisher when forward throws and retries successfully', async () => { diff --git a/yarn-project/sequencer-client/src/publisher/sequencer-publisher.ts b/yarn-project/sequencer-client/src/publisher/sequencer-publisher.ts index 1baac9255c7b..971a338d8647 100644 --- a/yarn-project/sequencer-client/src/publisher/sequencer-publisher.ts +++ b/yarn-project/sequencer-client/src/publisher/sequencer-publisher.ts @@ -133,6 +133,7 @@ export class SequencerPublisher { protected log: Logger; protected ethereumSlotDuration: bigint; + protected aztecSlotDuration: bigint; private blobClient: BlobClientInterface; @@ -166,7 +167,7 @@ export class SequencerPublisher { constructor( private config: Pick & - Pick & { l1ChainId: number }, + Pick & { l1ChainId: number }, deps: { telemetry?: TelemetryClient; blobClient: BlobClientInterface; @@ -185,6 +186,7 @@ export class SequencerPublisher { ) { this.log = deps.log ?? createLogger('sequencer:publisher'); this.ethereumSlotDuration = BigInt(config.ethereumSlotDuration); + this.aztecSlotDuration = BigInt(config.aztecSlotDuration); this.epochCache = deps.epochCache; this.lastActions = deps.lastActions; @@ -286,7 +288,7 @@ export class SequencerPublisher { } public getCurrentL2Slot(): SlotNumber { - return this.epochCache.getEpochAndSlotNow().slot; + return this.epochCache.getSlotNow(); } /** @@ -596,20 +598,23 @@ export class SequencerPublisher { } /** - * @notice Will call `canProposeAtNextEthBlock` to make sure that it is possible to propose + * @notice Will call `canProposeAt` to make sure that it is possible to propose * @param tipArchive - The archive to check * @returns The slot and block number if it is possible to propose, undefined otherwise */ - public canProposeAtNextEthBlock( + public canProposeAt( tipArchive: Fr, msgSender: EthAddress, - opts: { forcePendingCheckpointNumber?: CheckpointNumber } = {}, + opts: { forcePendingCheckpointNumber?: CheckpointNumber; pipelined?: boolean } = {}, ) { // TODO: #14291 - should loop through multiple keys to check if any of them can propose const ignoredErrors = ['SlotAlreadyInChain', 'InvalidProposer', 'InvalidArchive']; + const pipelined = opts.pipelined ?? this.epochCache.isProposerPipeliningEnabled(); + const slotOffset = pipelined ? this.aztecSlotDuration : 0n; + return this.rollupContract - .canProposeAtNextEthBlock(tipArchive.toBuffer(), msgSender.toString(), Number(this.ethereumSlotDuration), { + .canProposeAt(tipArchive.toBuffer(), msgSender.toString(), this.ethereumSlotDuration, slotOffset, { forcePendingCheckpointNumber: opts.forcePendingCheckpointNumber, }) .catch(err => { @@ -623,6 +628,7 @@ export class SequencerPublisher { return undefined; }); } + /** * @notice Will simulate `validateHeader` to make sure that the block header is valid * @dev This is a convenience function that can be used by the sequencer to validate a "partial" header. @@ -811,7 +817,9 @@ export class SequencerPublisher { attestationsAndSignersSignature: Signature, options: { forcePendingCheckpointNumber?: CheckpointNumber }, ): Promise { - const ts = BigInt((await this.l1TxUtils.getBlock()).timestamp + this.ethereumSlotDuration); + // Anchor the simulation timestamp to the checkpoint's own slot start time + // rather than the current L1 block timestamp, which may overshoot into the next slot if the build ran late. + const ts = checkpoint.header.timestamp; const blobFields = checkpoint.toBlobFields(); const blobs = await getBlobsPerL1Block(blobFields); const blobInput = getPrefixedEthBlobCommitments(blobs); diff --git a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.test.ts b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.test.ts index cc62248bb632..5821f87fb080 100644 --- a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.test.ts +++ b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.test.ts @@ -1,4 +1,4 @@ -import type { EpochCache } from '@aztec/epoch-cache'; +import { EpochCache } from '@aztec/epoch-cache'; import { BlockNumber, CheckpointNumber, @@ -438,6 +438,42 @@ describe('CheckpointProposalJob', () => { expect(call.previousCheckpointOutHashes).toHaveLength(1); expect(call.previousCheckpointOutHashes[0]).toEqual(previousCheckpoint.getCheckpointOutHash()); }); + + it('uses targetEpoch for previousCheckpointOutHashes when pipelining crosses epoch boundary', async () => { + // Pipelining scenario: wall-clock is in epoch 0, but target slot is in epoch 1. + // The key fix: getCheckpointsDataForEpoch must be called with targetEpoch, not epochNow. + const epochNow = EpochNumber(0); + const targetEpoch = EpochNumber(1); + // Target slot is first slot of epoch 1 (epochDuration = 16) + const targetSlot = SlotNumber(l1Constants.epochDuration); + // Wall-clock slot is the last slot of epoch 0 + const slotNow = SlotNumber(l1Constants.epochDuration - 1); + + checkpointNumber = CheckpointNumber(2); + const previousCheckpoint = await Checkpoint.random(CheckpointNumber(1)); + + l2BlockSource.getCheckpointsDataForEpoch.mockResolvedValue([toCheckpointData(previousCheckpoint)]); + + job = createCheckpointProposalJob({ slotNow, targetSlot, epochNow, targetEpoch }); + job.setTimetable( + new SequencerTimetable({ + ethereumSlotDuration, + aztecSlotDuration: slotDuration, + l1PublishingTime: ethereumSlotDuration, + enforce: config.enforceTimeTable, + }), + ); + + // Build block successfully + const { txs, block } = await setupTxsAndBlock(p2p, globalVariables, 1, chainId); + checkpointBuilder.seedBlocks([block], [txs]); + validatorClient.collectAttestations.mockResolvedValue(getAttestations(block)); + + await job.execute(); + + // Verify getCheckpointsDataForEpoch was called with targetEpoch (1), not epochNow (0) + expect(l2BlockSource.getCheckpointsDataForEpoch).toHaveBeenCalledWith(targetEpoch); + }); }); /** @@ -512,13 +548,20 @@ describe('CheckpointProposalJob', () => { * Called in beforeEach to create the job, and tests can use job.updateConfig() * to modify config after creation. */ - function createCheckpointProposalJob(): TestCheckpointProposalJob { + function createCheckpointProposalJob(overrides?: { + slotNow?: SlotNumber; + targetSlot?: SlotNumber; + epochNow?: EpochNumber; + targetEpoch?: EpochNumber; + }): TestCheckpointProposalJob { const setStateFn = jest.fn(); const eventEmitter = new EventEmitter() as TypedEventEmitter; return new TestCheckpointProposalJob( - epoch, - SlotNumber(newSlotNumber), + overrides?.slotNow ?? SlotNumber(newSlotNumber), + overrides?.targetSlot ?? SlotNumber(newSlotNumber), + overrides?.epochNow ?? epoch, + overrides?.targetEpoch ?? epoch, checkpointNumber, lastBlockNumber, proposer, diff --git a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.timing.test.ts b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.timing.test.ts index bed1f5c8cca4..2a3bd79c7a9e 100644 --- a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.timing.test.ts +++ b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.timing.test.ts @@ -1,4 +1,4 @@ -import type { EpochCache } from '@aztec/epoch-cache'; +import { EpochCache } from '@aztec/epoch-cache'; import { BlockNumber, CheckpointNumber, EpochNumber, SlotNumber } from '@aztec/foundation/branded-types'; import { Secp256k1Signer } from '@aztec/foundation/crypto/secp256k1-signer'; import { Fr } from '@aztec/foundation/curves/bn254'; @@ -289,8 +289,10 @@ describe('CheckpointProposalJob Timing Tests', () => { return new TimingTestCheckpointProposalJob( dateProvider, getSecondsIntoSlot, - epoch, slotNumber, + slotNumber, + epoch, + epoch, checkpointNumber, BlockNumber.ZERO, proposer, diff --git a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts index 9e24324d937e..24026200d942 100644 --- a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts +++ b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts @@ -31,7 +31,7 @@ import { MaliciousCommitteeAttestationsAndSigners, } from '@aztec/stdlib/block'; import { type Checkpoint, validateCheckpoint } from '@aztec/stdlib/checkpoint'; -import { getSlotStartBuildTimestamp } from '@aztec/stdlib/epoch-helpers'; +import { getSlotStartBuildTimestamp, getTimestampForSlot } from '@aztec/stdlib/epoch-helpers'; import { Gas } from '@aztec/stdlib/gas'; import { type BlockBuilderOptions, @@ -77,8 +77,10 @@ export class CheckpointProposalJob implements Traceable { protected readonly log: Logger; constructor( - private readonly epoch: EpochNumber, - private readonly slot: SlotNumber, + private readonly slotNow: SlotNumber, + private readonly targetSlot: SlotNumber, + private readonly epochNow: EpochNumber, + private readonly targetEpoch: EpochNumber, private readonly checkpointNumber: CheckpointNumber, private readonly syncedToBlockNumber: BlockNumber, // TODO(palla/mbps): Can we remove the proposer in favor of attestorAddress? Need to check fisherman-node flows. @@ -106,7 +108,20 @@ export class CheckpointProposalJob implements Traceable { public readonly tracer: Tracer, bindings?: LoggerBindings, ) { - this.log = createLogger('sequencer:checkpoint-proposal', { ...bindings, instanceId: `slot-${slot}` }); + this.log = createLogger('sequencer:checkpoint-proposal', { + ...bindings, + instanceId: `slot-${this.slotNow}`, + }); + } + + /** The wall-clock slot during which the proposer builds. */ + private get slot(): SlotNumber { + return this.slotNow; + } + + /** The wall-clock epoch. */ + private get epoch(): EpochNumber { + return this.epochNow; } /** @@ -119,7 +134,7 @@ export class CheckpointProposalJob implements Traceable { // In fisherman mode, we simulate slashing but don't actually publish to L1 // These are constant for the whole slot, so we only enqueue them once const votesPromises = new CheckpointVoter( - this.slot, + this.targetSlot, this.publisher, this.attestorAddress, this.validatorClient, @@ -146,6 +161,29 @@ export class CheckpointProposalJob implements Traceable { return; } + // If pipelining, wait until the submission slot so L1 recognizes the pipelined proposer + if (this.epochCache.isProposerPipeliningEnabled()) { + const submissionSlotTimestamp = + getTimestampForSlot(this.targetSlot, this.l1Constants) - BigInt(this.l1Constants.ethereumSlotDuration); + this.log.info(`Waiting until submission slot ${this.targetSlot} for L1 submission`, { + slot: this.slot, + submissionSlot: this.targetSlot, + submissionSlotTimestamp, + }); + await sleepUntil(new Date(Number(submissionSlotTimestamp) * 1000), this.dateProvider.nowAsDate()); + + // After waking, verify the parent checkpoint wasn't pruned during the sleep. + // We check L1's pending tip directly instead of canProposeAt, which also validates the proposer + // identity and would fail because the timestamp resolves to a different slot's proposer. + const l1Tips = await this.publisher.rollupContract.getTips(); + if (l1Tips.pending < this.checkpointNumber - 1) { + this.log.warn( + `Parent checkpoint was pruned during pipelining sleep (L1 pending=${l1Tips.pending}, expected>=${this.checkpointNumber - 1}), skipping L1 submission for checkpoint ${this.checkpointNumber}`, + ); + return undefined; + } + } + // Then send everything to L1 const l1Response = await this.publisher.sendRequests(); const proposedAction = l1Response?.successfulActions.find(a => a === 'propose'); @@ -164,7 +202,7 @@ export class CheckpointProposalJob implements Traceable { return { // nullish operator needed for tests [Attributes.COINBASE]: this.validatorClient.getCoinbaseForAttestor(this.attestorAddress)?.toString(), - [Attributes.SLOT_NUMBER]: this.slot, + [Attributes.SLOT_NUMBER]: this.targetSlot, }; }) private async proposeCheckpoint(): Promise { @@ -174,8 +212,15 @@ export class CheckpointProposalJob implements Traceable { const feeRecipient = this.validatorClient.getFeeRecipientForAttestor(this.attestorAddress); // Start the checkpoint - this.setStateFn(SequencerState.INITIALIZING_CHECKPOINT, this.slot); - this.metrics.incOpenSlot(this.slot, this.proposer?.toString() ?? 'unknown'); + this.setStateFn(SequencerState.INITIALIZING_CHECKPOINT, this.targetSlot); + this.log.info(`Starting checkpoint proposal`, { + buildSlot: this.slot, + submissionSlot: this.targetSlot, + pipelining: this.epochCache.isProposerPipeliningEnabled(), + proposer: this.proposer?.toString(), + coinbase: coinbase.toString(), + }); + this.metrics.incOpenSlot(this.targetSlot, this.proposer?.toString() ?? 'unknown'); // Enqueues checkpoint invalidation (constant for the whole slot) if (this.invalidateCheckpoint && !this.config.skipInvalidateBlockAsProposer) { @@ -186,7 +231,7 @@ export class CheckpointProposalJob implements Traceable { const checkpointGlobalVariables = await this.globalsBuilder.buildCheckpointGlobalVariables( coinbase, feeRecipient, - this.slot, + this.targetSlot, ); // Collect L1 to L2 messages for the checkpoint and compute their hash @@ -194,7 +239,7 @@ export class CheckpointProposalJob implements Traceable { const inHash = computeInHashFromL1ToL2Messages(l1ToL2Messages); // Collect the out hashes of all the checkpoints before this one in the same epoch - const previousCheckpointOutHashes = (await this.l2BlockSource.getCheckpointsDataForEpoch(this.epoch)) + const previousCheckpointOutHashes = (await this.l2BlockSource.getCheckpointsDataForEpoch(this.targetEpoch)) .filter(c => c.checkpointNumber < this.checkpointNumber) .map(c => c.checkpointOutHash); @@ -251,8 +296,8 @@ export class CheckpointProposalJob implements Traceable { } if (blocksInCheckpoint.length === 0) { - this.log.warn(`No blocks were built for slot ${this.slot}`, { slot: this.slot }); - this.eventEmitter.emit('checkpoint-empty', { slot: this.slot }); + this.log.warn(`No blocks were built for slot ${this.targetSlot}`, { slot: this.targetSlot }); + this.eventEmitter.emit('checkpoint-empty', { slot: this.targetSlot }); return undefined; } @@ -260,14 +305,14 @@ export class CheckpointProposalJob implements Traceable { if (minBlocksForCheckpoint !== undefined && blocksInCheckpoint.length < minBlocksForCheckpoint) { this.log.warn( `Checkpoint has fewer blocks than minimum (${blocksInCheckpoint.length} < ${minBlocksForCheckpoint}), skipping proposal`, - { slot: this.slot, blocksBuilt: blocksInCheckpoint.length, minBlocksForCheckpoint }, + { slot: this.targetSlot, blocksBuilt: blocksInCheckpoint.length, minBlocksForCheckpoint }, ); return undefined; } // Assemble and broadcast the checkpoint proposal, including the last block that was not // broadcasted yet, and wait to collect the committee attestations. - this.setStateFn(SequencerState.ASSEMBLING_CHECKPOINT, this.slot); + this.setStateFn(SequencerState.ASSEMBLING_CHECKPOINT, this.targetSlot); const checkpoint = await checkpointBuilder.completeCheckpoint(); // Final validation: per-block limits are only checked if the operator set them explicitly. @@ -298,10 +343,10 @@ export class CheckpointProposalJob implements Traceable { // Do not collect attestations nor publish to L1 in fisherman mode if (this.config.fishermanMode) { this.log.info( - `Built checkpoint for slot ${this.slot} with ${blocksInCheckpoint.length} blocks. ` + + `Built checkpoint for slot ${this.targetSlot} with ${blocksInCheckpoint.length} blocks. ` + `Skipping proposal in fisherman mode.`, { - slot: this.slot, + slot: this.targetSlot, checkpoint: checkpoint.header.toInspect(), blocksBuilt: blocksInCheckpoint.length, }, @@ -330,7 +375,7 @@ export class CheckpointProposalJob implements Traceable { const blockProposedAt = this.dateProvider.now(); await this.p2pClient.broadcastCheckpointProposal(proposal); - this.setStateFn(SequencerState.COLLECTING_ATTESTATIONS, this.slot); + this.setStateFn(SequencerState.COLLECTING_ATTESTATIONS, this.targetSlot); const attestations = await this.waitForAttestations(proposal); const blockAttestedAt = this.dateProvider.now(); @@ -343,7 +388,7 @@ export class CheckpointProposalJob implements Traceable { attestationsSignature = await this.validatorClient.signAttestationsAndSigners( attestations, signer, - this.slot, + this.targetSlot, this.checkpointNumber, ); } catch (err) { @@ -356,10 +401,10 @@ export class CheckpointProposalJob implements Traceable { } // Enqueue publishing the checkpoint to L1 - this.setStateFn(SequencerState.PUBLISHING_CHECKPOINT, this.slot); + this.setStateFn(SequencerState.PUBLISHING_CHECKPOINT, this.targetSlot); const aztecSlotDuration = this.l1Constants.slotDuration; - const slotStartBuildTimestamp = this.getSlotStartBuildTimestamp(); - const txTimeoutAt = new Date((slotStartBuildTimestamp + aztecSlotDuration) * 1000); + const submissionSlotStart = Number(getTimestampForSlot(this.targetSlot, this.l1Constants)); + const txTimeoutAt = new Date((submissionSlotStart + aztecSlotDuration) * 1000); // If we have been configured to potentially skip publishing checkpoint then roll the dice here if ( @@ -408,7 +453,6 @@ export class CheckpointProposalJob implements Traceable { const blocksInCheckpoint: L2Block[] = []; const txHashesAlreadyIncluded = new Set(); const initialBlockNumber = BlockNumber(this.syncedToBlockNumber + 1); - const slot = this.slot; // Last block in the checkpoint will usually be flagged as pending broadcast, so we send it along with the checkpoint proposal let blockPendingBroadcast: { block: L2Block; txs: Tx[] } | undefined = undefined; @@ -422,7 +466,11 @@ export class CheckpointProposalJob implements Traceable { const timingInfo = this.timetable.canStartNextBlock(secondsIntoSlot); if (!timingInfo.canStart) { - this.log.debug(`Not enough time left in slot to start another block`, { slot, blocksBuilt, secondsIntoSlot }); + this.log.debug(`Not enough time left in slot to start another block`, { + slot: this.targetSlot, + blocksBuilt, + secondsIntoSlot, + }); break; } @@ -454,7 +502,11 @@ export class CheckpointProposalJob implements Traceable { } else if ('error' in buildResult) { // If there was an error building the block, just exit the loop and give up the rest of the slot if (!(buildResult.error instanceof SequencerInterruptedError)) { - this.log.warn(`Halting block building for slot ${slot}`, { slot, blocksBuilt, error: buildResult.error }); + this.log.warn(`Halting block building for slot ${this.targetSlot}`, { + slot: this.targetSlot, + blocksBuilt, + error: buildResult.error, + }); } break; } @@ -463,11 +515,15 @@ export class CheckpointProposalJob implements Traceable { blocksInCheckpoint.push(block); usedTxs.forEach(tx => txHashesAlreadyIncluded.add(tx.txHash.toString())); - // If this is the last block, send the proposed block to the archiver, - // and exit the loop now so we can build the checkpoint and start collecting attestations. + // If this is the last block, sync it to the archiver and exit the loop + // so we can build the checkpoint and start collecting attestations. if (timingInfo.isLastBlock) { await this.syncProposedBlockToArchiver(block); - this.log.verbose(`Completed final block ${blockNumber} for slot ${slot}`, { slot, blockNumber, blocksBuilt }); + this.log.verbose(`Completed final block ${blockNumber} for slot ${this.targetSlot}`, { + slot: this.targetSlot, + blockNumber, + blocksBuilt, + }); blockPendingBroadcast = { block, txs: usedTxs }; break; } @@ -490,8 +546,8 @@ export class CheckpointProposalJob implements Traceable { await this.waitUntilNextSubslot(timingInfo.deadline); } - this.log.verbose(`Block building loop completed for slot ${this.slot}`, { - slot: this.slot, + this.log.verbose(`Block building loop completed for slot ${this.targetSlot}`, { + slot: this.targetSlot, blocksBuilt: blocksInCheckpoint.length, }); @@ -523,8 +579,10 @@ export class CheckpointProposalJob implements Traceable { /** Sleeps until it is time to produce the next block in the slot */ @trackSpan('CheckpointProposalJob.waitUntilNextSubslot') private async waitUntilNextSubslot(nextSubslotStart: number) { - this.setStateFn(SequencerState.WAITING_UNTIL_NEXT_BLOCK, this.slot); - this.log.verbose(`Waiting until time for the next block at ${nextSubslotStart}s into slot`, { slot: this.slot }); + this.setStateFn(SequencerState.WAITING_UNTIL_NEXT_BLOCK, this.targetSlot); + this.log.verbose(`Waiting until time for the next block at ${nextSubslotStart}s into slot`, { + slot: this.targetSlot, + }); await this.waitUntilTimeInSlot(nextSubslotStart); } @@ -545,7 +603,7 @@ export class CheckpointProposalJob implements Traceable { opts; this.log.verbose( - `Preparing block ${blockNumber} index ${indexWithinCheckpoint} at checkpoint ${this.checkpointNumber} for slot ${this.slot}`, + `Preparing block ${blockNumber} index ${indexWithinCheckpoint} at checkpoint ${this.checkpointNumber} for slot ${this.targetSlot}`, { ...checkpointBuilder.getConstantData(), ...opts }, ); @@ -554,10 +612,10 @@ export class CheckpointProposalJob implements Traceable { const { availableTxs, canStartBuilding, minTxs } = await this.waitForMinTxs(opts); if (!canStartBuilding) { this.log.warn( - `Not enough txs to build block ${blockNumber} at index ${indexWithinCheckpoint} in slot ${this.slot} (got ${availableTxs} txs but needs ${minTxs})`, - { blockNumber, slot: this.slot, indexWithinCheckpoint }, + `Not enough txs to build block ${blockNumber} at index ${indexWithinCheckpoint} in slot ${this.targetSlot} (got ${availableTxs} txs but needs ${minTxs})`, + { blockNumber, slot: this.targetSlot, indexWithinCheckpoint }, ); - this.eventEmitter.emit('block-tx-count-check-failed', { minTxs, availableTxs, slot: this.slot }); + this.eventEmitter.emit('block-tx-count-check-failed', { minTxs, availableTxs, slot: this.targetSlot }); this.metrics.recordBlockProposalFailed('insufficient_txs'); return undefined; } @@ -570,10 +628,10 @@ export class CheckpointProposalJob implements Traceable { ); this.log.debug( - `Building block ${blockNumber} at index ${indexWithinCheckpoint} for slot ${this.slot} with ${availableTxs} available txs`, - { slot: this.slot, blockNumber, indexWithinCheckpoint }, + `Building block ${blockNumber} at index ${indexWithinCheckpoint} for slot ${this.targetSlot} with ${availableTxs} available txs`, + { slot: this.targetSlot, blockNumber, indexWithinCheckpoint }, ); - this.setStateFn(SequencerState.CREATING_BLOCK, this.slot); + this.setStateFn(SequencerState.CREATING_BLOCK, this.targetSlot); // Per-block limits are operator overrides (from SEQ_MAX_L2_BLOCK_GAS etc.) further capped // by remaining checkpoint-level budgets inside CheckpointBuilder before each block is built. @@ -608,16 +666,19 @@ export class CheckpointProposalJob implements Traceable { if (buildResult.status === 'insufficient-valid-txs') { this.log.warn( - `Block ${blockNumber} at index ${indexWithinCheckpoint} on slot ${this.slot} has too few valid txs to be proposed`, + `Block ${blockNumber} at index ${indexWithinCheckpoint} on slot ${this.targetSlot} has too few valid txs to be proposed`, { - slot: this.slot, + slot: this.targetSlot, blockNumber, numTxs: buildResult.processedCount, indexWithinCheckpoint, minValidTxs, }, ); - this.eventEmitter.emit('block-build-failed', { reason: `Insufficient valid txs`, slot: this.slot }); + this.eventEmitter.emit('block-build-failed', { + reason: `Insufficient valid txs`, + slot: this.targetSlot, + }); this.metrics.recordBlockProposalFailed('insufficient_valid_txs'); return undefined; } @@ -637,17 +698,24 @@ export class CheckpointProposalJob implements Traceable { const manaPerSec = block.header.totalManaUsed.toNumberUnsafe() / (blockBuildDuration / 1000); this.log.info( - `Built block ${block.number} at checkpoint ${this.checkpointNumber} for slot ${this.slot} with ${numTxs} txs`, + `Built block ${block.number} at checkpoint ${this.checkpointNumber} for slot ${this.targetSlot} with ${numTxs} txs`, { blockHash, txHashes, manaPerSec, ...blockStats }, ); - this.eventEmitter.emit('block-proposed', { blockNumber: block.number, slot: this.slot }); + this.eventEmitter.emit('block-proposed', { + blockNumber: block.number, + slot: this.targetSlot, + buildSlot: this.slotNow, + }); this.metrics.recordBuiltBlock(blockBuildDuration, block.header.totalManaUsed.toNumberUnsafe()); return { block, usedTxs }; } catch (err: any) { - this.eventEmitter.emit('block-build-failed', { reason: err.message, slot: this.slot }); - this.log.error(`Error building block`, err, { blockNumber, slot: this.slot }); + this.eventEmitter.emit('block-build-failed', { + reason: err.message, + slot: this.targetSlot, + }); + this.log.error(`Error building block`, err, { blockNumber, slot: this.targetSlot }); this.metrics.recordBlockProposalFailed(err.name || 'unknown_error'); this.metrics.recordFailedBlock(); return { error: err }; @@ -707,10 +775,10 @@ export class CheckpointProposalJob implements Traceable { } // Wait a bit before checking again - this.setStateFn(SequencerState.WAITING_FOR_TXS, this.slot); + this.setStateFn(SequencerState.WAITING_FOR_TXS, this.targetSlot); this.log.verbose( - `Waiting for enough txs to build block ${blockNumber} at index ${indexWithinCheckpoint} in slot ${this.slot} (have ${availableTxs} but need ${minTxs})`, - { blockNumber, slot: this.slot, indexWithinCheckpoint }, + `Waiting for enough txs to build block ${blockNumber} at index ${indexWithinCheckpoint} in slot ${this.targetSlot} (have ${availableTxs} but need ${minTxs})`, + { blockNumber, slot: this.targetSlot, indexWithinCheckpoint }, ); await this.waitForTxsPollingInterval(); availableTxs = await this.p2pClient.getPendingTxCount(); @@ -910,19 +978,19 @@ export class CheckpointProposalJob implements Traceable { private async handleCheckpointEndAsFisherman(checkpoint: Checkpoint | undefined) { // Perform L1 fee analysis before clearing requests // The callback is invoked asynchronously after the next block is mined - const feeAnalysis = await this.publisher.analyzeL1Fees(this.slot, analysis => + const feeAnalysis = await this.publisher.analyzeL1Fees(this.targetSlot, analysis => this.metrics.recordFishermanFeeAnalysis(analysis), ); if (checkpoint) { - this.log.info(`Validation checkpoint building SUCCEEDED for slot ${this.slot}`, { + this.log.info(`Validation checkpoint building SUCCEEDED for slot ${this.targetSlot}`, { ...checkpoint.toCheckpointInfo(), ...checkpoint.getStats(), feeAnalysisId: feeAnalysis?.id, }); } else { - this.log.warn(`Validation block building FAILED for slot ${this.slot}`, { - slot: this.slot, + this.log.warn(`Validation block building FAILED for slot ${this.targetSlot}`, { + slot: this.targetSlot, feeAnalysisId: feeAnalysis?.id, }); this.metrics.recordCheckpointProposalFailed('block_build_failed'); @@ -936,15 +1004,15 @@ export class CheckpointProposalJob implements Traceable { */ private handleHASigningError(err: any, errorContext: string): boolean { if (err instanceof DutyAlreadySignedError) { - this.log.info(`${errorContext} for slot ${this.slot} already signed by another HA node, yielding`, { - slot: this.slot, + this.log.info(`${errorContext} for slot ${this.targetSlot} already signed by another HA node, yielding`, { + slot: this.targetSlot, signedByNode: err.signedByNode, }); return true; } if (err instanceof SlashingProtectionError) { - this.log.info(`${errorContext} for slot ${this.slot} blocked by slashing protection, yielding`, { - slot: this.slot, + this.log.info(`${errorContext} for slot ${this.targetSlot} blocked by slashing protection, yielding`, { + slot: this.targetSlot, existingMessageHash: err.existingMessageHash, attemptedMessageHash: err.attemptedMessageHash, }); diff --git a/yarn-project/sequencer-client/src/sequencer/checkpoint_voter.ha.integration.test.ts b/yarn-project/sequencer-client/src/sequencer/checkpoint_voter.ha.integration.test.ts index 473b8ecade30..40d88b89a911 100644 --- a/yarn-project/sequencer-client/src/sequencer/checkpoint_voter.ha.integration.test.ts +++ b/yarn-project/sequencer-client/src/sequencer/checkpoint_voter.ha.integration.test.ts @@ -278,6 +278,7 @@ describe('CheckpointVoter HA Integration', () => { requiredConfirmations: 1, maxL1TxInclusionWaitPulseSeconds: 60, ethereumSlotDuration: DefaultL1ContractsConfig.ethereumSlotDuration, + aztecSlotDuration: TEST_L1_CONSTANTS.slotDuration, fishermanMode: false, l1ChainId: 1, }; @@ -292,6 +293,7 @@ describe('CheckpointVoter HA Integration', () => { ts: BigInt(Math.floor(Date.now() / 1000)), nowMs: BigInt(Date.now()), }); + epochCache.getSlotNow.mockReturnValue(slot); const slashFactoryContract = mock(); diff --git a/yarn-project/sequencer-client/src/sequencer/events.ts b/yarn-project/sequencer-client/src/sequencer/events.ts index 7c1e22cf5ca5..d91b00ee190a 100644 --- a/yarn-project/sequencer-client/src/sequencer/events.ts +++ b/yarn-project/sequencer-client/src/sequencer/events.ts @@ -13,7 +13,7 @@ export type SequencerEvents = { ['proposer-rollup-check-failed']: (args: { reason: string; slot: SlotNumber }) => void; ['block-tx-count-check-failed']: (args: { minTxs: number; availableTxs: number; slot: SlotNumber }) => void; ['block-build-failed']: (args: { reason: string; slot: SlotNumber }) => void; - ['block-proposed']: (args: { blockNumber: BlockNumber; slot: SlotNumber }) => void; + ['block-proposed']: (args: { blockNumber: BlockNumber; slot: SlotNumber; buildSlot: SlotNumber }) => void; ['checkpoint-empty']: (args: { slot: SlotNumber }) => void; ['checkpoint-publish-failed']: (args: { slot: SlotNumber; diff --git a/yarn-project/sequencer-client/src/sequencer/sequencer.test.ts b/yarn-project/sequencer-client/src/sequencer/sequencer.test.ts index c30a93fb770e..d9ff3cb58919 100644 --- a/yarn-project/sequencer-client/src/sequencer/sequencer.test.ts +++ b/yarn-project/sequencer-client/src/sequencer/sequencer.test.ts @@ -171,14 +171,24 @@ describe('sequencer', () => { epoch: EpochNumber(1), slot: SlotNumber(1), ts: 1000n, - now: 1000n, + nowSeconds: 1000n, })); + epochCache.getTargetSlot.mockReturnValue(SlotNumber(1)); + epochCache.getTargetEpoch.mockReturnValue(EpochNumber(1)); + epochCache.getTargetEpochAndSlotInNextL1Slot.mockImplementation(() => ({ + epoch: EpochNumber(1), + slot: SlotNumber(1), + ts: 1000n, + nowSeconds: 1000n, + })); + epochCache.isProposerPipeliningEnabled.mockReturnValue(false); epochCache.getCommittee.mockResolvedValue({ committee, seed: 1n, epoch: EpochNumber(1), isEscapeHatchOpen: false, }); + epochCache.getProposerAttesterAddressInSlot.mockResolvedValue(undefined); publisher = mockDeep(); publisher.epochCache = epochCache; @@ -187,7 +197,7 @@ describe('sequencer', () => { publisher.enqueueProposeCheckpoint.mockResolvedValue(undefined); publisher.enqueueGovernanceCastSignal.mockResolvedValue(true); publisher.enqueueSlashingActions.mockResolvedValue(true); - publisher.canProposeAtNextEthBlock.mockResolvedValue({ + publisher.canProposeAt.mockResolvedValue({ slot: SlotNumber(newSlotNumber), checkpointNumber: CheckpointNumber.fromBlockNumber(newBlockNumber), timeOfNextL1Slot: 1000n, @@ -302,6 +312,8 @@ describe('sequencer', () => { validatorClient.createBlockProposal.mockImplementation(() => Promise.resolve(createBlockProposal())); validatorClient.createCheckpointProposal.mockImplementation(() => Promise.resolve(createCheckpointProposal())); validatorClient.signAttestationsAndSigners.mockImplementation(() => Promise.resolve(getSignatures()[0].signature)); + validatorClient.getCoinbaseForAttestor.mockReturnValue(coinbase); + validatorClient.getFeeRecipientForAttestor.mockReturnValue(feeRecipient); slasherClient = mock(); slasherClient.getProposerActions.mockResolvedValue([]); @@ -352,21 +364,21 @@ describe('sequencer', () => { expect(checkpointBuilder.buildBlockCalls).toHaveLength(0); expect(publisher.enqueueProposeCheckpoint).not.toHaveBeenCalled(); - expect(publisher.canProposeAtNextEthBlock).not.toHaveBeenCalled(); + expect(publisher.canProposeAt).not.toHaveBeenCalled(); }); it('builds a checkpoint when it is their turn', async () => { await setupSingleTxBlock(); // Not your turn! canProposeAtNextEthBlock returns undefined - publisher.canProposeAtNextEthBlock.mockResolvedValue(undefined); + publisher.canProposeAt.mockResolvedValue(undefined); await sequencer.work(); // When it's not our turn, we should not build the checkpoint expect(checkpointBuilder.buildBlockCalls).toHaveLength(0); // Now it's our turn! - publisher.canProposeAtNextEthBlock.mockResolvedValue({ + publisher.canProposeAt.mockResolvedValue({ slot: block.header.globalVariables.slotNumber, checkpointNumber: CheckpointNumber.fromBlockNumber(block.header.globalVariables.blockNumber), timeOfNextL1Slot: 1000n, @@ -474,7 +486,7 @@ describe('sequencer', () => { pub.enqueueProposeCheckpoint.mockResolvedValue(undefined); pub.enqueueGovernanceCastSignal.mockResolvedValue(true); pub.enqueueSlashingActions.mockResolvedValue(true); - pub.canProposeAtNextEthBlock.mockResolvedValue({ + pub.canProposeAt.mockResolvedValue({ slot: SlotNumber(newSlotNumber + i), checkpointNumber: CheckpointNumber.fromBlockNumber(BlockNumber(newBlockNumber)), timeOfNextL1Slot: 1000n, @@ -491,8 +503,34 @@ describe('sequencer', () => { // Configure epoch cache to return different slots epochCache.getEpochAndSlotInNextL1Slot .mockReset() - .mockReturnValueOnce({ epoch: EpochNumber(1), slot: SlotNumber(1), ts: 1000n, now: 1000n }) - .mockReturnValueOnce({ epoch: EpochNumber(1), slot: SlotNumber(2), ts: 1000n, now: 1000n }); + .mockReturnValueOnce({ + epoch: EpochNumber(1), + slot: SlotNumber(1), + ts: 1000n, + nowSeconds: 1000n, + }) + .mockReturnValueOnce({ + epoch: EpochNumber(1), + slot: SlotNumber(2), + ts: 1000n, + nowSeconds: 1000n, + }); + epochCache.getTargetSlot.mockReset().mockReturnValueOnce(SlotNumber(1)).mockReturnValueOnce(SlotNumber(2)); + epochCache.getTargetEpoch.mockReturnValue(EpochNumber(1)); + epochCache.getTargetEpochAndSlotInNextL1Slot + .mockReset() + .mockReturnValueOnce({ + epoch: EpochNumber(1), + slot: SlotNumber(1), + ts: 1000n, + nowSeconds: 1000n, + }) + .mockReturnValueOnce({ + epoch: EpochNumber(1), + slot: SlotNumber(2), + ts: 1000n, + nowSeconds: 1000n, + }); sequencer.updateConfig({ enforceTimeTable: false, maxTxsPerBlock: 4 }); @@ -887,6 +925,35 @@ describe('sequencer', () => { expect(publisher.enqueueProposeCheckpoint).toHaveBeenCalled(); }); }); + + describe('view-based proposer lookup', () => { + it('passes target slot to getProposerAttesterAddressInSlot', async () => { + const proposer = signer.address; + validatorClient.getValidatorAddresses.mockReturnValue([proposer]); + epochCache.getProposerAttesterAddressInSlot.mockResolvedValue(proposer); + + await sequencer.checkCanProposeForTest(SlotNumber(2)); + + expect(epochCache.getProposerAttesterAddressInSlot).toHaveBeenCalledWith(SlotNumber(2)); + }); + + it('when pipelining enabled, checkCanPropose receives target slot with pipeline offset', async () => { + const proposer = signer.address; + validatorClient.getValidatorAddresses.mockReturnValue([proposer]); + epochCache.getProposerAttesterAddressInSlot.mockResolvedValue(proposer); + epochCache.isProposerPipeliningEnabled.mockReturnValue(true); + epochCache.getTargetEpochAndSlotInNextL1Slot.mockReturnValue({ + epoch: EpochNumber(1), + slot: SlotNumber(2), + ts: 1000n, + nowSeconds: 1000n, + }); + + await sequencer.checkCanProposeForTest(SlotNumber(2)); + + expect(epochCache.getProposerAttesterAddressInSlot).toHaveBeenCalledWith(SlotNumber(2)); + }); + }); }); class TestSequencer extends Sequencer { @@ -902,4 +969,8 @@ class TestSequencer extends Sequencer { this.setState(SequencerState.IDLE, undefined, { force: true }); return super.work(); } + + public checkCanProposeForTest(slot: SlotNumber) { + return this.checkCanPropose(slot); + } } diff --git a/yarn-project/sequencer-client/src/sequencer/sequencer.ts b/yarn-project/sequencer-client/src/sequencer/sequencer.ts index d75788ea3cf4..1392a1be2ec7 100644 --- a/yarn-project/sequencer-client/src/sequencer/sequencer.ts +++ b/yarn-project/sequencer-client/src/sequencer/sequencer.ts @@ -192,10 +192,18 @@ export class Sequencer extends (EventEmitter as new () => TypedEventEmitter TypedEventEmitter this.lastEpochForStrategyComparison) + (this.lastEpochForStrategyComparison === undefined || targetEpoch > this.lastEpochForStrategyComparison) ) { - this.logStrategyComparison(epoch, checkpointProposalJob.getPublisher()); - this.lastEpochForStrategyComparison = epoch; + this.logStrategyComparison(targetEpoch, checkpointProposalJob.getPublisher()); + this.lastEpochForStrategyComparison = targetEpoch; } return checkpoint; @@ -227,43 +235,48 @@ export class Sequencer extends (EventEmitter as new () => TypedEventEmitter { - // Check we have not already processed this slot (cheapest check) + // Check we have not already processed this target slot (cheapest check) // We only check this if enforce timetable is set, since we want to keep processing the same slot if we are not // running against actual time (eg when we use sandbox-style automining) if ( this.lastSlotForCheckpointProposalJob && - this.lastSlotForCheckpointProposalJob >= slot && + this.lastSlotForCheckpointProposalJob >= targetSlot && this.config.enforceTimeTable ) { - this.log.trace(`Slot ${slot} has already been processed`); + this.log.trace(`Target slot ${targetSlot} has already been processed`); return undefined; } - // But if we have already proposed for this slot, the we definitely have to skip it, automining or not - if (this.lastCheckpointProposed && this.lastCheckpointProposed.header.slotNumber >= slot) { - this.log.trace(`Slot ${slot} has already been published as checkpoint ${this.lastCheckpointProposed.number}`); + // But if we have already proposed for this slot, then we definitely have to skip it, automining or not + if (this.lastCheckpointProposed && this.lastCheckpointProposed.header.slotNumber >= targetSlot) { + this.log.trace( + `Slot ${targetSlot} has already been published as checkpoint ${this.lastCheckpointProposed.number}`, + ); return undefined; } // Check all components are synced to latest as seen by the archiver (queries all subsystems) const syncedTo = await this.checkSync({ ts, slot }); if (!syncedTo) { - await this.tryVoteWhenSyncFails({ slot, ts }); + await this.tryVoteWhenSyncFails({ slot, targetSlot, ts }); return undefined; } - // If escape hatch is open for this epoch, do not start checkpoint proposal work and do not attempt invalidations. + // If escape hatch is open for the target epoch, do not start checkpoint proposal work and do not attempt invalidations. // Still perform governance/slashing voting (as proposer) once per slot. - const isEscapeHatchOpen = await this.epochCache.isEscapeHatchOpen(epoch); + // When pipelining, we check the target epoch (slot+1's epoch) since that's the epoch we're building for. + const isEscapeHatchOpen = await this.epochCache.isEscapeHatchOpen(targetEpoch); if (isEscapeHatchOpen) { this.setState(SequencerState.PROPOSER_CHECK, slot); - const [canPropose, proposer] = await this.checkCanPropose(slot); + const [canPropose, proposer] = await this.checkCanPropose(targetSlot); if (canPropose) { await this.tryVoteWhenEscapeHatchOpen({ slot, proposer }); } else { @@ -280,17 +293,18 @@ export class Sequencer extends (EventEmitter as new () => TypedEventEmitter TypedEventEmitter= slot) { + // Check that the target slot is not taken by a block already (should never happen, since only us can propose for this slot) + if (syncedTo.blockData && syncedTo.blockData.header.getSlot() >= targetSlot) { this.log.warn( - `Cannot propose block at next L2 slot ${slot} since that slot was taken by block ${syncedTo.blockNumber}`, + `Cannot propose block at target slot ${targetSlot} since that slot was taken by block ${syncedTo.blockNumber}`, { ...logCtx, block: syncedTo.blockData.header.toInspect() }, ); this.metrics.recordCheckpointPrecheckFailed('slot_already_taken'); @@ -325,13 +339,11 @@ export class Sequencer extends (EventEmitter as new () => TypedEventEmitter TypedEventEmitter TypedEventEmitter TypedEventEmitter TypedEventEmitter TypedEventEmitter { + protected async checkCanPropose(targetSlot: SlotNumber): Promise<[boolean, EthAddress | undefined]> { let proposer: EthAddress | undefined; try { - proposer = await this.epochCache.getProposerAttesterAddressInSlot(slot); + proposer = await this.epochCache.getProposerAttesterAddressInSlot(targetSlot); } catch (e) { if (e instanceof NoCommitteeError) { - if (this.lastSlotForNoCommitteeWarning !== slot) { - this.lastSlotForNoCommitteeWarning = slot; - this.log.warn(`Cannot propose at next L2 slot ${slot} since the committee does not exist on L1`); + if (this.lastSlotForNoCommitteeWarning !== targetSlot) { + this.lastSlotForNoCommitteeWarning = targetSlot; + this.log.warn(`Cannot propose at target slot ${targetSlot} since the committee does not exist on L1`); } return [false, undefined]; } - this.log.error(`Error getting proposer for slot ${slot}`, e); + this.log.error(`Error getting proposer for target slot ${targetSlot}`, e); return [false, undefined]; } @@ -578,10 +604,15 @@ export class Sequencer extends (EventEmitter as new () => TypedEventEmitter addr.equals(proposer)); if (!weAreProposer) { - this.log.debug(`Cannot propose at slot ${slot} since we are not a proposer`, { validatorAddresses, proposer }); + this.log.debug(`Cannot propose at target slot ${targetSlot} since we are not a proposer`, { + targetSlot, + validatorAddresses, + proposer, + }); return [false, proposer]; } + this.log.debug(`We are the proposer for target slot ${targetSlot}`, { targetSlot, proposer }); return [true, proposer]; } @@ -590,8 +621,8 @@ export class Sequencer extends (EventEmitter as new () => TypedEventEmitter ({ [Attributes.SLOT_NUMBER]: slot })) - protected async tryVoteWhenSyncFails(args: { slot: SlotNumber; ts: bigint }): Promise { - const { slot } = args; + protected async tryVoteWhenSyncFails(args: { slot: SlotNumber; targetSlot: SlotNumber; ts: bigint }): Promise { + const { slot, targetSlot } = args; // Prevent duplicate attempts in the same slot if (this.lastSlotForFallbackVote === slot) { @@ -619,7 +650,7 @@ export class Sequencer extends (EventEmitter as new () => TypedEventEmitter TypedEventEmitter { + getCommittee(_slot: SlotTag = 'now'): Promise { return Promise.resolve({ committee: undefined, seed: 0n, @@ -17,6 +17,22 @@ export class MockEpochCache implements EpochCacheInterface { }); } + getSlotNow(): SlotNumber { + return SlotNumber(0); + } + + getTargetSlot(): SlotNumber { + return SlotNumber(0); + } + + getEpochNow(): EpochNumber { + return EpochNumber.ZERO; + } + + getTargetEpoch(): EpochNumber { + return EpochNumber.ZERO; + } + getEpochAndSlotNow(): EpochAndSlot & { nowMs: bigint } { return { epoch: EpochNumber.ZERO, @@ -26,15 +42,23 @@ export class MockEpochCache implements EpochCacheInterface { }; } - getEpochAndSlotInNextL1Slot(): EpochAndSlot & { now: bigint } { + getEpochAndSlotInNextL1Slot(): EpochAndSlot & { nowSeconds: bigint } { return { epoch: EpochNumber.ZERO, slot: SlotNumber(0), ts: 0n, - now: 0n, + nowSeconds: 0n, }; } + getTargetEpochAndSlotInNextL1Slot(): EpochAndSlot & { nowSeconds: bigint } { + return this.getEpochAndSlotInNextL1Slot(); + } + + isProposerPipeliningEnabled(): boolean { + return false; + } + getProposerIndexEncoding(_epoch: EpochNumber, _slot: SlotNumber, _seed: bigint): `0x${string}` { return '0x00'; } @@ -50,6 +74,13 @@ export class MockEpochCache implements EpochCacheInterface { }; } + getTargetAndNextSlot(): { targetSlot: SlotNumber; nextSlot: SlotNumber } { + return { + targetSlot: SlotNumber(0), + nextSlot: SlotNumber(0), + }; + } + getProposerAttesterAddressInSlot(_slot: SlotNumber): Promise { return Promise.resolve(undefined); } @@ -66,6 +97,14 @@ export class MockEpochCache implements EpochCacheInterface { return Promise.resolve([]); } + isEscapeHatchOpen(_epoch: EpochNumber): Promise { + return Promise.resolve(false); + } + + isEscapeHatchOpenAtSlot(_slot: SlotTag): Promise { + return Promise.resolve(false); + } + getL1Constants(): L1RollupConstants { return EmptyL1RollupConstants; } diff --git a/yarn-project/validator-client/src/block_proposal_handler.ts b/yarn-project/validator-client/src/block_proposal_handler.ts index 642ec9410144..844c8adec1e4 100644 --- a/yarn-project/validator-client/src/block_proposal_handler.ts +++ b/yarn-project/validator-client/src/block_proposal_handler.ts @@ -166,11 +166,15 @@ export class BlockProposalHandler { // since a pending checkpoint prune may remove blocks we'd otherwise find. // This affects mostly the block_number_already_exists check, since a pending // checkpoint prune could remove a block that would conflict with this proposal. - // TODO(@Maddiaa0): This may break staggered slots. - const blockSourceSync = await this.waitForBlockSourceSync(slotNumber); - if (!blockSourceSync) { - this.log.warn(`Block source is not synced, skipping processing`, proposalInfo); - return { isValid: false, reason: 'block_source_not_synced' }; + // When pipelining is enabled, the proposer builds ahead of L1 submission, so the + // block source won't have synced to the proposed slot yet. Skip the sync wait to + // avoid eating into the attestation window. + if (!this.epochCache.isProposerPipeliningEnabled()) { + const blockSourceSync = await this.waitForBlockSourceSync(slotNumber); + if (!blockSourceSync) { + this.log.warn(`Block source is not synced, skipping processing`, proposalInfo); + return { isValid: false, reason: 'block_source_not_synced' }; + } } // Check that the parent proposal is a block we know, otherwise reexecution would fail. diff --git a/yarn-project/validator-client/src/validator.ha.integration.test.ts b/yarn-project/validator-client/src/validator.ha.integration.test.ts index 1bf1fbd5229b..07e997c25709 100644 --- a/yarn-project/validator-client/src/validator.ha.integration.test.ts +++ b/yarn-project/validator-client/src/validator.ha.integration.test.ts @@ -5,7 +5,7 @@ * rather than mocks to verify the HA coordination works correctly. */ import type { BlobClientInterface } from '@aztec/blob-client/client'; -import type { EpochCache } from '@aztec/epoch-cache'; +import { EpochCache } from '@aztec/epoch-cache'; import { IndexWithinCheckpoint } from '@aztec/foundation/branded-types'; import { SecretValue } from '@aztec/foundation/config'; import { Fr } from '@aztec/foundation/curves/bn254'; diff --git a/yarn-project/validator-client/src/validator.test.ts b/yarn-project/validator-client/src/validator.test.ts index 804869df8474..b76ac2bce67c 100644 --- a/yarn-project/validator-client/src/validator.test.ts +++ b/yarn-project/validator-client/src/validator.test.ts @@ -114,10 +114,12 @@ describe('ValidatorClient', () => { }); worldState = mock(); epochCache = mock(); + epochCache.filterInCommittee.mockImplementation((_slot, addresses) => Promise.resolve(addresses)); epochCache.getL1Constants.mockReturnValue({ epochDuration: 8 } satisfies Parameters< typeof getEpochAtSlot >[1] as any); + blockSource = mock(); blockSource.getCheckpointedBlocksForEpoch.mockResolvedValue([]); blockSource.getCheckpointsDataForEpoch.mockResolvedValue([]); @@ -341,8 +343,8 @@ describe('ValidatorClient', () => { ); epochCache.isInCommittee.mockResolvedValue(true); - epochCache.getCurrentAndNextSlot.mockReturnValue({ - currentSlot: proposal.slotNumber, + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: proposal.slotNumber, nextSlot: SlotNumber(proposal.slotNumber + 1), }); epochCache.getProposerAttesterAddressInSlot.mockResolvedValue(proposal.getSender()); @@ -671,8 +673,8 @@ describe('ValidatorClient', () => { it('should return false if the proposer is not the current proposer', async () => { epochCache.getProposerAttesterAddressInSlot.mockImplementation(_ => Promise.resolve(EthAddress.random())); - epochCache.getCurrentAndNextSlot.mockReturnValue({ - currentSlot: proposal.slotNumber, + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: proposal.slotNumber, nextSlot: SlotNumber(proposal.slotNumber + 1), }); @@ -691,8 +693,8 @@ describe('ValidatorClient', () => { it('should return false if the proposal is not for the current or next slot', async () => { epochCache.getProposerAttesterAddressInSlot.mockResolvedValue(proposal.getSender()); - epochCache.getCurrentAndNextSlot.mockReturnValue({ - currentSlot: SlotNumber(proposal.slotNumber + 20), + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: SlotNumber(proposal.slotNumber + 20), nextSlot: SlotNumber(proposal.slotNumber + 21), }); @@ -753,8 +755,8 @@ describe('ValidatorClient', () => { // Update epochCache mock for the new proposal epochCache.getProposerAttesterAddressInSlot.mockResolvedValue(nonFirstBlockProposal.getSender()); - epochCache.getCurrentAndNextSlot.mockReturnValue({ - currentSlot: nonFirstBlockProposal.slotNumber, + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: nonFirstBlockProposal.slotNumber, nextSlot: SlotNumber(nonFirstBlockProposal.slotNumber + 1), }); diff --git a/yarn-project/validator-client/src/validator.ts b/yarn-project/validator-client/src/validator.ts index 2691c7a749e7..e699d0bdf0bc 100644 --- a/yarn-project/validator-client/src/validator.ts +++ b/yarn-project/validator-client/src/validator.ts @@ -484,26 +484,26 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) proposal: CheckpointProposalCore, _proposalSender: PeerId, ): Promise { - const slotNumber = proposal.slotNumber; + const proposalSlotNumber = proposal.slotNumber; const proposer = proposal.getSender(); // If escape hatch is open for this slot's epoch, do not attest. - if (await this.epochCache.isEscapeHatchOpenAtSlot(slotNumber)) { - this.log.warn(`Escape hatch open for slot ${slotNumber}, skipping checkpoint attestation handling`); + if (await this.epochCache.isEscapeHatchOpenAtSlot(proposalSlotNumber)) { + this.log.warn(`Escape hatch open for slot ${proposalSlotNumber}, skipping checkpoint attestation handling`); return undefined; } // Reject proposals with invalid signatures if (!proposer) { - this.log.warn(`Received checkpoint proposal with invalid signature for slot ${slotNumber}`); + this.log.warn(`Received checkpoint proposal with invalid signature for proposal slot ${proposalSlotNumber}`); return undefined; } // Ignore proposals from ourselves (may happen in HA setups) if (this.getValidatorAddresses().some(addr => addr.equals(proposer))) { - this.log.debug(`Ignoring block proposal from self for slot ${slotNumber}`, { + this.log.debug(`Ignoring block proposal from self for slot ${proposalSlotNumber}`, { proposer: proposer.toString(), - slotNumber, + proposalSlotNumber, }); return undefined; } @@ -511,28 +511,28 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) // Validate fee asset price modifier is within allowed range if (!validateFeeAssetPriceModifier(proposal.feeAssetPriceModifier)) { this.log.warn( - `Received checkpoint proposal with invalid feeAssetPriceModifier ${proposal.feeAssetPriceModifier} for slot ${slotNumber}`, + `Received checkpoint proposal with invalid feeAssetPriceModifier ${proposal.feeAssetPriceModifier} for slot ${proposalSlotNumber}`, ); return undefined; } - // Check that I have any address in current committee before attesting - const inCommittee = await this.epochCache.filterInCommittee(slotNumber, this.getValidatorAddresses()); + // Check that I have any address in the committee where this checkpoint will land before attesting + const inCommittee = await this.epochCache.filterInCommittee(proposalSlotNumber, this.getValidatorAddresses()); const partOfCommittee = inCommittee.length > 0; const proposalInfo = { - slotNumber, + proposalSlotNumber, archive: proposal.archive.toString(), proposer: proposer.toString(), }; - this.log.info(`Received checkpoint proposal for slot ${slotNumber}`, { + this.log.info(`Received checkpoint proposal for slot ${proposalSlotNumber}`, { ...proposalInfo, fishermanMode: this.config.fishermanMode || false, }); // Validate the checkpoint proposal before attesting (unless skipCheckpointProposalValidation is set) if (this.config.skipCheckpointProposalValidation) { - this.log.warn(`Skipping checkpoint proposal validation for slot ${slotNumber}`, proposalInfo); + this.log.warn(`Skipping checkpoint proposal validation for slot ${proposalSlotNumber}`, proposalInfo); } else { const validationResult = await this.validateCheckpointProposal(proposal, proposalInfo); if (!validationResult.isValid) { @@ -554,16 +554,19 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) } // Provided all of the above checks pass, we can attest to the proposal - this.log.info(`${partOfCommittee ? 'Attesting to' : 'Validated'} checkpoint proposal for slot ${slotNumber}`, { - ...proposalInfo, - inCommittee: partOfCommittee, - fishermanMode: this.config.fishermanMode || false, - }); + this.log.info( + `${partOfCommittee ? 'Attesting to' : 'Validated'} checkpoint proposal for slot ${proposalSlotNumber}`, + { + ...proposalInfo, + inCommittee: partOfCommittee, + fishermanMode: this.config.fishermanMode || false, + }, + ); this.metrics.incSuccessfulAttestations(inCommittee.length); // Track epoch participation per attester: count each (attester, epoch) pair at most once - const proposalEpoch = getEpochAtSlot(slotNumber, this.epochCache.getL1Constants()); + const proposalEpoch = getEpochAtSlot(proposalSlotNumber, this.epochCache.getL1Constants()); for (const attester of inCommittee) { const key = attester.toString(); const lastEpoch = this.lastAttestedEpochByAttester.get(key); @@ -591,7 +594,7 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) if (this.config.fishermanMode) { // bail out early and don't save attestations to the pool in fisherman mode - this.log.info(`Creating checkpoint attestations for slot ${slotNumber}`, { + this.log.info(`Creating checkpoint attestations for slot ${proposalSlotNumber}`, { ...proposalInfo, attestors: attestors.map(a => a.toString()), });