From 092cde08938a8eed201b4b79a7b53dd099ac73c6 Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Thu, 5 Mar 2026 11:53:36 +0100 Subject: [PATCH 01/10] Use subscription with forced inclusion; refactorings --- block/internal/common/event.go | 21 +- block/internal/da/async_block_retriever.go | 227 +++++----- .../internal/da/async_block_retriever_test.go | 245 +++++++---- .../internal/da/forced_inclusion_retriever.go | 11 +- .../da/forced_inclusion_retriever_test.go | 30 +- block/internal/da/subscriber.go | 391 +++++++++++++++++ block/internal/syncing/da_follower.go | 402 ++++-------------- block/internal/syncing/da_retriever.go | 115 ++--- block/internal/syncing/da_retriever_mock.go | 84 ---- .../internal/syncing/da_retriever_tracing.go | 8 - .../syncing/da_retriever_tracing_test.go | 4 - block/internal/syncing/raft_retriever.go | 22 +- block/internal/syncing/syncer.go | 10 +- block/internal/syncing/syncer_backoff_test.go | 74 ++-- .../internal/syncing/syncer_benchmark_test.go | 19 +- .../syncing/syncer_forced_inclusion_test.go | 8 +- block/internal/syncing/syncer_test.go | 184 +++----- block/public.go | 3 +- pkg/sequencers/based/sequencer.go | 2 +- pkg/sequencers/based/sequencer_test.go | 6 + pkg/sequencers/single/sequencer.go | 4 +- pkg/sequencers/single/sequencer_test.go | 18 + 22 files changed, 954 insertions(+), 934 deletions(-) create mode 100644 block/internal/da/subscriber.go diff --git a/block/internal/common/event.go b/block/internal/common/event.go index f02a181de8..3cc18f4641 100644 --- a/block/internal/common/event.go +++ b/block/internal/common/event.go @@ -1,6 +1,10 @@ package common -import "github.com/evstack/ev-node/types" +import ( + "context" + + "github.com/evstack/ev-node/types" +) // EventSource represents the origin of a block event type EventSource string @@ -24,3 +28,18 @@ type DAHeightEvent struct { // Optional DA height hints from P2P. first is the DA height hint for the header, second is the DA height hint for the data DaHeightHints [2]uint64 } + +// EventSink receives parsed DA events with backpressure support. +type EventSink interface { + PipeEvent(ctx context.Context, event DAHeightEvent) error +} + +// EventSinkFunc adapts a plain function to the EventSink interface. +// Useful in tests: +// +// sink := common.EventSinkFunc(func(ctx context.Context, ev common.DAHeightEvent) error { return nil }) +type EventSinkFunc func(ctx context.Context, event DAHeightEvent) error + +func (f EventSinkFunc) PipeEvent(ctx context.Context, event DAHeightEvent) error { + return f(ctx, event) +} diff --git a/block/internal/da/async_block_retriever.go b/block/internal/da/async_block_retriever.go index 4fd81d3e9c..a0dcf7a578 100644 --- a/block/internal/da/async_block_retriever.go +++ b/block/internal/da/async_block_retriever.go @@ -4,8 +4,6 @@ import ( "context" "errors" "fmt" - "sync" - "sync/atomic" "time" ds "github.com/ipfs/go-datastore" @@ -14,14 +12,13 @@ import ( "github.com/rs/zerolog" "google.golang.org/protobuf/proto" - "github.com/evstack/ev-node/pkg/config" datypes "github.com/evstack/ev-node/pkg/da/types" pb "github.com/evstack/ev-node/types/pb/evnode/v1" ) // AsyncBlockRetriever provides background prefetching of DA blocks type AsyncBlockRetriever interface { - Start() + Start(ctx context.Context) Stop() GetCachedBlock(ctx context.Context, daHeight uint64) (*BlockData, error) UpdateCurrentHeight(height uint64) @@ -35,29 +32,23 @@ type BlockData struct { } // asyncBlockRetriever handles background prefetching of individual DA blocks -// from a specific namespace. +// from a specific namespace. Wraps a da.Subscriber for the subscription +// plumbing and implements SubscriberHandler for caching. type asyncBlockRetriever struct { - client Client - logger zerolog.Logger - namespace []byte - daStartHeight uint64 + subscriber *Subscriber + client Client + logger zerolog.Logger + namespace []byte // In-memory cache for prefetched block data cache ds.Batching - // Background fetcher control - ctx context.Context - cancel context.CancelFunc - wg sync.WaitGroup - - // Current DA height tracking (accessed atomically) - currentDAHeight atomic.Uint64 + // Current DA height tracking (accessed atomically via subscriber). + // Updated externally via UpdateCurrentHeight. + daStartHeight uint64 - // Prefetch window - how many blocks ahead to prefetch + // Prefetch window - how many blocks ahead to speculatively fetch. prefetchWindow uint64 - - // Polling interval for checking new DA heights - pollInterval time.Duration } // NewAsyncBlockRetriever creates a new async block retriever with in-memory cache. @@ -65,61 +56,69 @@ func NewAsyncBlockRetriever( client Client, logger zerolog.Logger, namespace []byte, - config config.Config, + daBlockTime time.Duration, daStartHeight uint64, prefetchWindow uint64, ) AsyncBlockRetriever { if prefetchWindow == 0 { - prefetchWindow = 10 // Default: prefetch next 10 blocks + prefetchWindow = 10 } - ctx, cancel := context.WithCancel(context.Background()) - - fetcher := &asyncBlockRetriever{ + f := &asyncBlockRetriever{ client: client, logger: logger.With().Str("component", "async_block_retriever").Logger(), namespace: namespace, daStartHeight: daStartHeight, cache: dsync.MutexWrap(ds.NewMapDatastore()), - ctx: ctx, - cancel: cancel, prefetchWindow: prefetchWindow, - pollInterval: config.DA.BlockTime.Duration, } - fetcher.currentDAHeight.Store(daStartHeight) - return fetcher + + var namespaces [][]byte + if len(namespace) > 0 { + namespaces = [][]byte{namespace} + } + + f.subscriber = NewSubscriber(SubscriberConfig{ + Client: client, + Logger: logger, + Namespaces: namespaces, + DABlockTime: daBlockTime, + Handler: f, + }) + f.subscriber.SetStartHeight(daStartHeight) + + return f } -// Start begins the background prefetching process. -func (f *asyncBlockRetriever) Start() { - f.wg.Add(1) - go f.backgroundFetchLoop() +// Start begins the subscription follow loop and catchup loop. +func (f *asyncBlockRetriever) Start(ctx context.Context) { + if err := f.subscriber.Start(ctx); err != nil { + f.logger.Warn().Err(err).Msg("failed to start subscriber") + } f.logger.Debug(). Uint64("da_start_height", f.daStartHeight). Uint64("prefetch_window", f.prefetchWindow). - Dur("poll_interval", f.pollInterval). Msg("async block retriever started") } -// Stop gracefully stops the background prefetching process. +// Stop gracefully stops the background goroutines. func (f *asyncBlockRetriever) Stop() { f.logger.Debug().Msg("stopping async block retriever") - f.cancel() - f.wg.Wait() + f.subscriber.Stop() } -// UpdateCurrentHeight updates the current DA height for prefetching. +// UpdateCurrentHeight updates the current DA height. func (f *asyncBlockRetriever) UpdateCurrentHeight(height uint64) { - // Use atomic compare-and-swap to update only if the new height is greater for { - current := f.currentDAHeight.Load() + current := f.subscriber.LocalDAHeight() if height <= current { return } - if f.currentDAHeight.CompareAndSwap(current, height) { + if f.subscriber.CompareAndSwapLocalHeight(current, height) { f.logger.Debug(). Uint64("new_height", height). Msg("updated current DA height") + f.cleanupOldBlocks(height) return } } @@ -149,7 +148,6 @@ func (f *asyncBlockRetriever) GetCachedBlock(ctx context.Context, daHeight uint6 return nil, fmt.Errorf("failed to get cached block: %w", err) } - // Deserialize the cached block var pbBlock pb.BlockData if err := proto.Unmarshal(data, &pbBlock); err != nil { return nil, fmt.Errorf("failed to unmarshal cached block: %w", err) @@ -169,138 +167,107 @@ func (f *asyncBlockRetriever) GetCachedBlock(ctx context.Context, daHeight uint6 return block, nil } -// backgroundFetchLoop runs in the background and prefetches blocks ahead of time. -func (f *asyncBlockRetriever) backgroundFetchLoop() { - defer f.wg.Done() - - ticker := time.NewTicker(f.pollInterval) - defer ticker.Stop() +// --------------------------------------------------------------------------- +// SubscriberHandler implementation +// --------------------------------------------------------------------------- - for { - select { - case <-f.ctx.Done(): - return - case <-ticker.C: - f.prefetchBlocks() - } +// HandleEvent caches blobs from the subscription inline. +func (f *asyncBlockRetriever) HandleEvent(ctx context.Context, ev datypes.SubscriptionEvent) { + if len(ev.Blobs) > 0 { + f.cacheBlock(ctx, ev.Height, ev.Blobs) } } -// prefetchBlocks prefetches blocks within the prefetch window. -func (f *asyncBlockRetriever) prefetchBlocks() { - if len(f.namespace) == 0 { - return - } - - currentHeight := f.currentDAHeight.Load() - - // Prefetch upcoming blocks - for i := uint64(0); i < f.prefetchWindow; i++ { - targetHeight := currentHeight + i - - // Check if already cached - key := newBlockDataKey(targetHeight) - _, err := f.cache.Get(f.ctx, key) - if err == nil { - // Already cached - continue +// HandleCatchup fetches a single height via Retrieve and caches it. +// Also applies the prefetch window for speculative forward fetching. +func (f *asyncBlockRetriever) HandleCatchup(ctx context.Context, height uint64) error { + f.fetchAndCacheBlock(ctx, height) + + // Speculatively prefetch ahead. + highest := f.subscriber.HighestSeenDAHeight() + target := highest + f.prefetchWindow + for h := height + 1; h <= target; h++ { + if err := ctx.Err(); err != nil { + return err } - - // Fetch and cache the block - f.fetchAndCacheBlock(targetHeight) + key := newBlockDataKey(h) + if _, err := f.cache.Get(ctx, key); err == nil { + continue // Already cached. + } + f.fetchAndCacheBlock(ctx, h) } - // Clean up old blocks from cache to prevent memory growth - f.cleanupOldBlocks(currentHeight) + return nil } -// fetchAndCacheBlock fetches a block and stores it in the cache. -func (f *asyncBlockRetriever) fetchAndCacheBlock(height uint64) { - f.logger.Debug(). - Uint64("height", height). - Msg("prefetching block") +// --------------------------------------------------------------------------- +// Cache helpers +// --------------------------------------------------------------------------- - result := f.client.Retrieve(f.ctx, height, f.namespace) +// fetchAndCacheBlock fetches a block via Retrieve and caches it. +func (f *asyncBlockRetriever) fetchAndCacheBlock(ctx context.Context, height uint64) { + f.logger.Debug().Uint64("height", height).Msg("prefetching block") - block := &BlockData{ - Height: height, - Timestamp: result.Timestamp, - Blobs: [][]byte{}, - } + result := f.client.Retrieve(ctx, height, f.namespace) switch result.Code { case datypes.StatusHeightFromFuture: - f.logger.Debug(). - Uint64("height", height). - Msg("block height not yet available - will retry") + f.logger.Debug().Uint64("height", height).Msg("block height not yet available - will retry") return case datypes.StatusNotFound: - f.logger.Debug(). - Uint64("height", height). - Msg("no blobs at height") - // Cache empty result to avoid re-fetching + f.cacheBlock(ctx, height, nil) case datypes.StatusSuccess: - // Process each blob + blobs := make([][]byte, 0, len(result.Data)) for _, blob := range result.Data { if len(blob) > 0 { - block.Blobs = append(block.Blobs, blob) + blobs = append(blobs, blob) } } - f.logger.Debug(). - Uint64("height", height). - Int("blob_count", len(result.Data)). - Msg("processed blobs for prefetch") + f.cacheBlock(ctx, height, blobs) default: f.logger.Debug(). Uint64("height", height). Str("status", result.Message). Msg("failed to retrieve block - will retry") - return + } +} + +// cacheBlock serializes and stores a block in the in-memory cache. +func (f *asyncBlockRetriever) cacheBlock(ctx context.Context, height uint64, blobs [][]byte) { + if blobs == nil { + blobs = [][]byte{} } - // Serialize and cache the block pbBlock := &pb.BlockData{ - Height: block.Height, - Timestamp: block.Timestamp.UnixNano(), - Blobs: block.Blobs, + Height: height, + Timestamp: time.Now().UnixNano(), + Blobs: blobs, } data, err := proto.Marshal(pbBlock) if err != nil { - f.logger.Error(). - Err(err). - Uint64("height", height). - Msg("failed to marshal block for caching") + f.logger.Error().Err(err).Uint64("height", height).Msg("failed to marshal block for caching") return } key := newBlockDataKey(height) - err = f.cache.Put(f.ctx, key, data) - if err != nil { - f.logger.Error(). - Err(err). - Uint64("height", height). - Msg("failed to cache block") + if err := f.cache.Put(ctx, key, data); err != nil { + f.logger.Error().Err(err).Uint64("height", height).Msg("failed to cache block") return } - f.logger.Debug(). - Uint64("height", height). - Int("blob_count", len(block.Blobs)). - Msg("successfully prefetched and cached block") + f.logger.Debug().Uint64("height", height).Int("blob_count", len(blobs)).Msg("cached block") } -// cleanupOldBlocks removes blocks older than a threshold from cache. +// cleanupOldBlocks removes blocks older than currentHeight − prefetchWindow. func (f *asyncBlockRetriever) cleanupOldBlocks(currentHeight uint64) { - // Remove blocks older than current - prefetchWindow if currentHeight < f.prefetchWindow { return } cleanupThreshold := currentHeight - f.prefetchWindow - // Query all keys query := dsq.Query{Prefix: "/block/"} - results, err := f.cache.Query(f.ctx, query) + results, err := f.cache.Query(context.Background(), query) if err != nil { f.logger.Debug().Err(err).Msg("failed to query cache for cleanup") return @@ -313,7 +280,6 @@ func (f *asyncBlockRetriever) cleanupOldBlocks(currentHeight uint64) { } key := ds.NewKey(result.Key) - // Extract height from key var height uint64 _, err := fmt.Sscanf(key.String(), "/block/%d", &height) if err != nil { @@ -321,11 +287,8 @@ func (f *asyncBlockRetriever) cleanupOldBlocks(currentHeight uint64) { } if height < cleanupThreshold { - if err := f.cache.Delete(f.ctx, key); err != nil { - f.logger.Debug(). - Err(err). - Uint64("height", height). - Msg("failed to delete old block from cache") + if err := f.cache.Delete(context.Background(), key); err != nil { + f.logger.Debug().Err(err).Uint64("height", height).Msg("failed to delete old block from cache") } } } diff --git a/block/internal/da/async_block_retriever_test.go b/block/internal/da/async_block_retriever_test.go index dcfbce84d1..d9c986e4e8 100644 --- a/block/internal/da/async_block_retriever_test.go +++ b/block/internal/da/async_block_retriever_test.go @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" - "github.com/evstack/ev-node/pkg/config" datypes "github.com/evstack/ev-node/pkg/da/types" mocks "github.com/evstack/ev-node/test/mocks" pb "github.com/evstack/ev-node/types/pb/evnode/v1" @@ -21,7 +20,7 @@ func TestAsyncBlockRetriever_GetCachedBlock_NoNamespace(t *testing.T) { client := &mocks.MockClient{} logger := zerolog.Nop() - fetcher := NewAsyncBlockRetriever(client, logger, nil, config.DefaultConfig(), 100, 10) + fetcher := NewAsyncBlockRetriever(client, logger, nil, 100*time.Millisecond, 100, 10) ctx := context.Background() block, err := fetcher.GetCachedBlock(ctx, 100) @@ -34,7 +33,7 @@ func TestAsyncBlockRetriever_GetCachedBlock_CacheMiss(t *testing.T) { fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() logger := zerolog.Nop() - fetcher := NewAsyncBlockRetriever(client, logger, fiNs, config.DefaultConfig(), 100, 10) + fetcher := NewAsyncBlockRetriever(client, logger, fiNs, 100*time.Millisecond, 100, 10) ctx := context.Background() @@ -44,141 +43,143 @@ func TestAsyncBlockRetriever_GetCachedBlock_CacheMiss(t *testing.T) { assert.Nil(t, block) // Cache miss } -func TestAsyncBlockRetriever_FetchAndCache(t *testing.T) { +func TestAsyncBlockRetriever_SubscriptionDrivenCaching(t *testing.T) { + // Test that blobs arriving via subscription are cached inline. testBlobs := [][]byte{ []byte("tx1"), []byte("tx2"), - []byte("tx3"), } client := &mocks.MockClient{} fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() - // Mock Retrieve call for height 100 - client.On("Retrieve", mock.Anything, uint64(100), fiNs).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{ - Code: datypes.StatusSuccess, - Timestamp: time.Unix(1000, 0), - }, - Data: testBlobs, - }).Once() - - // Mock other heights that will be prefetched - for height := uint64(101); height <= 109; height++ { - client.On("Retrieve", mock.Anything, height, fiNs).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{Code: datypes.StatusNotFound}, - }).Maybe() + // Create a subscription channel that delivers one event then blocks. + subCh := make(chan datypes.SubscriptionEvent, 1) + subCh <- datypes.SubscriptionEvent{ + Height: 100, + Blobs: testBlobs, } + client.On("Subscribe", mock.Anything, fiNs).Return((<-chan datypes.SubscriptionEvent)(subCh), nil).Once() + // Catchup loop may call Retrieve for heights beyond 100 — stub those. + client.On("Retrieve", mock.Anything, mock.Anything, fiNs).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusHeightFromFuture}, + }).Maybe() + + // On second subscribe (after watchdog timeout) just block forever. + blockCh := make(chan datypes.SubscriptionEvent) + client.On("Subscribe", mock.Anything, fiNs).Return((<-chan datypes.SubscriptionEvent)(blockCh), nil).Maybe() + logger := zerolog.Nop() - // Use a short poll interval for faster test execution - cfg := config.DefaultConfig() - cfg.DA.BlockTime.Duration = 100 * time.Millisecond - fetcher := NewAsyncBlockRetriever(client, logger, fiNs, cfg, 100, 10) - fetcher.Start() - defer fetcher.Stop() + fetcher := NewAsyncBlockRetriever(client, logger, fiNs, 200*time.Millisecond, 100, 5) - // Update current height to trigger prefetch - fetcher.UpdateCurrentHeight(100) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fetcher.Start(ctx) + defer fetcher.Stop() - // Wait for the background fetch to complete by polling the cache - ctx := context.Background() + // Wait for the subscription event to be processed. var block *BlockData var err error - - // Poll for up to 2 seconds for the block to be cached for range 40 { block, err = fetcher.GetCachedBlock(ctx, 100) require.NoError(t, err) if block != nil { break } - time.Sleep(50 * time.Millisecond) + time.Sleep(25 * time.Millisecond) } - require.NotNil(t, block, "block should be cached after background fetch") + require.NotNil(t, block, "block should be cached after subscription event") assert.Equal(t, uint64(100), block.Height) - assert.Equal(t, 3, len(block.Blobs)) - for i, tb := range testBlobs { - assert.Equal(t, tb, block.Blobs[i]) - } + assert.Equal(t, 2, len(block.Blobs)) + assert.Equal(t, []byte("tx1"), block.Blobs[0]) + assert.Equal(t, []byte("tx2"), block.Blobs[1]) } -func TestAsyncBlockRetriever_BackgroundPrefetch(t *testing.T) { - testBlobs := [][]byte{ - []byte("tx1"), - []byte("tx2"), - } +func TestAsyncBlockRetriever_CatchupFillsGaps(t *testing.T) { + // When subscription reports height 105 but current is 100, + // catchup loop should Retrieve heights 100-114 (100 + prefetch window). + testBlobs := [][]byte{[]byte("gap-tx")} client := &mocks.MockClient{} fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() - // Mock for heights 100-110 (current + prefetch window) - for height := uint64(100); height <= 110; height++ { - if height == 105 { - client.On("Retrieve", mock.Anything, height, fiNs).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{ - Code: datypes.StatusSuccess, - Timestamp: time.Unix(2000, 0), - }, - Data: testBlobs, - }).Maybe() - } else { - client.On("Retrieve", mock.Anything, height, fiNs).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{Code: datypes.StatusNotFound}, - }).Maybe() - } - } + // Subscription delivers height 105 (no blobs — just a signal). + subCh := make(chan datypes.SubscriptionEvent, 1) + subCh <- datypes.SubscriptionEvent{Height: 105} - logger := zerolog.Nop() - cfg := config.DefaultConfig() - cfg.DA.BlockTime.Duration = 100 * time.Millisecond - fetcher := NewAsyncBlockRetriever(client, logger, fiNs, cfg, 100, 10) + client.On("Subscribe", mock.Anything, fiNs).Return((<-chan datypes.SubscriptionEvent)(subCh), nil).Once() + blockCh := make(chan datypes.SubscriptionEvent) + client.On("Subscribe", mock.Anything, fiNs).Return((<-chan datypes.SubscriptionEvent)(blockCh), nil).Maybe() - fetcher.Start() - defer fetcher.Stop() + // Height 102 has blobs; rest return not found or future. + client.On("Retrieve", mock.Anything, uint64(102), fiNs).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{ + Code: datypes.StatusSuccess, + Timestamp: time.Unix(2000, 0), + }, + Data: testBlobs, + }).Maybe() + client.On("Retrieve", mock.Anything, mock.Anything, fiNs).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusNotFound}, + }).Maybe() - // Update current height to trigger prefetch - fetcher.UpdateCurrentHeight(100) + logger := zerolog.Nop() + fetcher := NewAsyncBlockRetriever(client, logger, fiNs, 100*time.Millisecond, 100, 10) - // Wait for background prefetch to happen (wait for at least one poll cycle) - time.Sleep(250 * time.Millisecond) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fetcher.Start(ctx) + defer fetcher.Stop() - // Check if block was prefetched - ctx := context.Background() - block, err := fetcher.GetCachedBlock(ctx, 105) - require.NoError(t, err) - assert.NotNil(t, block) - assert.Equal(t, uint64(105), block.Height) - assert.Equal(t, 2, len(block.Blobs)) + // Wait for catchup to fill the gap. + var block *BlockData + var err error + for range 40 { + block, err = fetcher.GetCachedBlock(ctx, 102) + require.NoError(t, err) + if block != nil { + break + } + time.Sleep(50 * time.Millisecond) + } + require.NotNil(t, block, "block at 102 should be cached via catchup") + assert.Equal(t, uint64(102), block.Height) + assert.Equal(t, 1, len(block.Blobs)) + assert.Equal(t, []byte("gap-tx"), block.Blobs[0]) } func TestAsyncBlockRetriever_HeightFromFuture(t *testing.T) { client := &mocks.MockClient{} fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() - // All heights in prefetch window not available yet - for height := uint64(100); height <= 109; height++ { - client.On("Retrieve", mock.Anything, height, fiNs).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{Code: datypes.StatusHeightFromFuture}, - }).Maybe() - } + // Subscription delivers height 100 with no blobs. + subCh := make(chan datypes.SubscriptionEvent, 1) + subCh <- datypes.SubscriptionEvent{Height: 100} + + client.On("Subscribe", mock.Anything, fiNs).Return((<-chan datypes.SubscriptionEvent)(subCh), nil).Once() + blockCh := make(chan datypes.SubscriptionEvent) + client.On("Subscribe", mock.Anything, fiNs).Return((<-chan datypes.SubscriptionEvent)(blockCh), nil).Maybe() + + // All Retrieve calls return HeightFromFuture. + client.On("Retrieve", mock.Anything, mock.Anything, fiNs).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusHeightFromFuture}, + }).Maybe() logger := zerolog.Nop() - cfg := config.DefaultConfig() - cfg.DA.BlockTime.Duration = 100 * time.Millisecond - fetcher := NewAsyncBlockRetriever(client, logger, fiNs, cfg, 100, 10) - fetcher.Start() - defer fetcher.Stop() + fetcher := NewAsyncBlockRetriever(client, logger, fiNs, 100*time.Millisecond, 100, 10) - fetcher.UpdateCurrentHeight(100) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fetcher.Start(ctx) + defer fetcher.Stop() - // Wait for at least one poll cycle + // Wait a bit for catchup to attempt fetches. time.Sleep(250 * time.Millisecond) - // Cache should be empty - ctx := context.Background() + // Cache should be empty since all heights are from the future. block, err := fetcher.GetCachedBlock(ctx, 100) require.NoError(t, err) assert.Nil(t, block) @@ -188,16 +189,76 @@ func TestAsyncBlockRetriever_StopGracefully(t *testing.T) { client := &mocks.MockClient{} fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() + blockCh := make(chan datypes.SubscriptionEvent) + client.On("Subscribe", mock.Anything, fiNs).Return((<-chan datypes.SubscriptionEvent)(blockCh), nil).Maybe() + logger := zerolog.Nop() - fetcher := NewAsyncBlockRetriever(client, logger, fiNs, config.DefaultConfig(), 100, 10) + fetcher := NewAsyncBlockRetriever(client, logger, fiNs, 100*time.Millisecond, 100, 10) - fetcher.Start() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fetcher.Start(ctx) time.Sleep(100 * time.Millisecond) // Should stop gracefully without panic fetcher.Stop() } +func TestAsyncBlockRetriever_ReconnectOnSubscriptionError(t *testing.T) { + // Verify that the follow loop reconnects after a subscription channel closes. + testBlobs := [][]byte{[]byte("reconnect-tx")} + + client := &mocks.MockClient{} + fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() + + // First subscription closes immediately (simulating error). + closedCh := make(chan datypes.SubscriptionEvent) + close(closedCh) + client.On("Subscribe", mock.Anything, fiNs).Return((<-chan datypes.SubscriptionEvent)(closedCh), nil).Once() + + // Second subscription delivers a blob. + subCh := make(chan datypes.SubscriptionEvent, 1) + subCh <- datypes.SubscriptionEvent{ + Height: 100, + Blobs: testBlobs, + } + client.On("Subscribe", mock.Anything, fiNs).Return((<-chan datypes.SubscriptionEvent)(subCh), nil).Once() + + // Third+ subscribe returns a blocking channel so it doesn't loop forever. + blockCh := make(chan datypes.SubscriptionEvent) + client.On("Subscribe", mock.Anything, fiNs).Return((<-chan datypes.SubscriptionEvent)(blockCh), nil).Maybe() + + // Stub Retrieve for catchup. + client.On("Retrieve", mock.Anything, mock.Anything, fiNs).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusHeightFromFuture}, + }).Maybe() + + logger := zerolog.Nop() + // Very short backoff so reconnect is fast in tests. + fetcher := NewAsyncBlockRetriever(client, logger, fiNs, 50*time.Millisecond, 100, 5) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fetcher.Start(ctx) + defer fetcher.Stop() + + // Wait for reconnect + event processing. + var block *BlockData + var err error + for range 60 { + block, err = fetcher.GetCachedBlock(ctx, 100) + require.NoError(t, err) + if block != nil { + break + } + time.Sleep(50 * time.Millisecond) + } + + require.NotNil(t, block, "block should be cached after reconnect") + assert.Equal(t, 1, len(block.Blobs)) + assert.Equal(t, []byte("reconnect-tx"), block.Blobs[0]) +} + func TestBlockData_Serialization(t *testing.T) { block := &BlockData{ Height: 100, diff --git a/block/internal/da/forced_inclusion_retriever.go b/block/internal/da/forced_inclusion_retriever.go index 2ec299333a..a8051a5758 100644 --- a/block/internal/da/forced_inclusion_retriever.go +++ b/block/internal/da/forced_inclusion_retriever.go @@ -8,7 +8,6 @@ import ( "github.com/rs/zerolog" - "github.com/evstack/ev-node/pkg/config" datypes "github.com/evstack/ev-node/pkg/da/types" "github.com/evstack/ev-node/types" ) @@ -42,9 +41,11 @@ type ForcedInclusionEvent struct { // NewForcedInclusionRetriever creates a new forced inclusion retriever. // It internally creates and manages an AsyncBlockRetriever for background prefetching. func NewForcedInclusionRetriever( + ctx context.Context, client Client, logger zerolog.Logger, - cfg config.Config, + daBlockTime time.Duration, + tracingEnabled bool, daStartHeight, daEpochSize uint64, ) ForcedInclusionRetriever { retrieverLogger := logger.With().Str("component", "forced_inclusion_retriever").Logger() @@ -54,11 +55,11 @@ func NewForcedInclusionRetriever( client, logger, client.GetForcedInclusionNamespace(), - cfg, + daBlockTime, daStartHeight, daEpochSize*2, // prefetch window: 2x epoch size ) - asyncFetcher.Start() + asyncFetcher.Start(ctx) base := &forcedInclusionRetriever{ client: client, @@ -67,7 +68,7 @@ func NewForcedInclusionRetriever( daEpochSize: daEpochSize, asyncFetcher: asyncFetcher, } - if cfg.Instrumentation.IsTracingEnabled() { + if tracingEnabled { return withTracingForcedInclusionRetriever(base) } return base diff --git a/block/internal/da/forced_inclusion_retriever_test.go b/block/internal/da/forced_inclusion_retriever_test.go index 446b655bec..47864d7985 100644 --- a/block/internal/da/forced_inclusion_retriever_test.go +++ b/block/internal/da/forced_inclusion_retriever_test.go @@ -9,7 +9,6 @@ import ( "github.com/stretchr/testify/mock" "gotest.tools/v3/assert" - "github.com/evstack/ev-node/pkg/config" datypes "github.com/evstack/ev-node/pkg/da/types" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/test/mocks" @@ -18,13 +17,14 @@ import ( func TestNewForcedInclusionRetriever(t *testing.T) { client := mocks.NewMockClient(t) client.On("GetForcedInclusionNamespace").Return(datypes.NamespaceFromString("test-fi-ns").Bytes()).Maybe() + client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() gen := genesis.Genesis{ DAStartHeight: 100, DAEpochForcedInclusion: 10, } - retriever := NewForcedInclusionRetriever(client, zerolog.Nop(), config.DefaultConfig(), gen.DAStartHeight, gen.DAEpochForcedInclusion) + retriever := NewForcedInclusionRetriever(context.Background(), client, zerolog.Nop(), 100*time.Millisecond, false, gen.DAStartHeight, gen.DAEpochForcedInclusion) assert.Assert(t, retriever != nil) retriever.Stop() } @@ -39,7 +39,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NoNamespace(t *testi DAEpochForcedInclusion: 10, } - retriever := NewForcedInclusionRetriever(client, zerolog.Nop(), config.DefaultConfig(), gen.DAStartHeight, gen.DAEpochForcedInclusion) + retriever := NewForcedInclusionRetriever(context.Background(), client, zerolog.Nop(), 100*time.Millisecond, false, gen.DAStartHeight, gen.DAEpochForcedInclusion) defer retriever.Stop() ctx := context.Background() @@ -53,13 +53,14 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NotAtEpochStart(t *t fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() client.On("HasForcedInclusionNamespace").Return(true).Maybe() client.On("GetForcedInclusionNamespace").Return(fiNs).Maybe() + client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() gen := genesis.Genesis{ DAStartHeight: 100, DAEpochForcedInclusion: 10, } - retriever := NewForcedInclusionRetriever(client, zerolog.Nop(), config.DefaultConfig(), gen.DAStartHeight, gen.DAEpochForcedInclusion) + retriever := NewForcedInclusionRetriever(context.Background(), client, zerolog.Nop(), 100*time.Millisecond, false, gen.DAStartHeight, gen.DAEpochForcedInclusion) defer retriever.Stop() ctx := context.Background() @@ -83,6 +84,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_EpochStartSuccess(t fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() client.On("HasForcedInclusionNamespace").Return(true).Maybe() client.On("GetForcedInclusionNamespace").Return(fiNs).Maybe() + client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() client.On("Retrieve", mock.Anything, mock.Anything, fiNs).Return(datypes.ResultRetrieve{ BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, IDs: []datypes.ID{[]byte("id1"), []byte("id2"), []byte("id3")}, Timestamp: time.Now()}, Data: testBlobs, @@ -93,7 +95,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_EpochStartSuccess(t DAEpochForcedInclusion: 1, // Single height epoch } - retriever := NewForcedInclusionRetriever(client, zerolog.Nop(), config.DefaultConfig(), gen.DAStartHeight, gen.DAEpochForcedInclusion) + retriever := NewForcedInclusionRetriever(context.Background(), client, zerolog.Nop(), 100*time.Millisecond, false, gen.DAStartHeight, gen.DAEpochForcedInclusion) defer retriever.Stop() ctx := context.Background() @@ -112,6 +114,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_EpochStartNotAvailab fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() client.On("HasForcedInclusionNamespace").Return(true).Maybe() client.On("GetForcedInclusionNamespace").Return(fiNs).Maybe() + client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // Mock the first height in epoch as not available client.On("Retrieve", mock.Anything, uint64(100), fiNs).Return(datypes.ResultRetrieve{ @@ -123,7 +126,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_EpochStartNotAvailab DAEpochForcedInclusion: 10, } - retriever := NewForcedInclusionRetriever(client, zerolog.Nop(), config.DefaultConfig(), gen.DAStartHeight, gen.DAEpochForcedInclusion) + retriever := NewForcedInclusionRetriever(context.Background(), client, zerolog.Nop(), 100*time.Millisecond, false, gen.DAStartHeight, gen.DAEpochForcedInclusion) defer retriever.Stop() ctx := context.Background() @@ -138,6 +141,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NoBlobsAtHeight(t *t fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() client.On("HasForcedInclusionNamespace").Return(true).Maybe() client.On("GetForcedInclusionNamespace").Return(fiNs).Maybe() + client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() client.On("Retrieve", mock.Anything, uint64(100), fiNs).Return(datypes.ResultRetrieve{ BaseResult: datypes.BaseResult{Code: datypes.StatusNotFound}, }).Once() @@ -147,7 +151,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NoBlobsAtHeight(t *t DAEpochForcedInclusion: 1, // Single height epoch } - retriever := NewForcedInclusionRetriever(client, zerolog.Nop(), config.DefaultConfig(), gen.DAStartHeight, gen.DAEpochForcedInclusion) + retriever := NewForcedInclusionRetriever(context.Background(), client, zerolog.Nop(), 100*time.Millisecond, false, gen.DAStartHeight, gen.DAEpochForcedInclusion) defer retriever.Stop() ctx := context.Background() @@ -168,6 +172,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_MultiHeightEpoch(t * fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() client.On("HasForcedInclusionNamespace").Return(true).Maybe() client.On("GetForcedInclusionNamespace").Return(fiNs).Maybe() + client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() client.On("Retrieve", mock.Anything, uint64(102), fiNs).Return(datypes.ResultRetrieve{ BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: time.Now()}, Data: testBlobsByHeight[102], @@ -186,7 +191,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_MultiHeightEpoch(t * DAEpochForcedInclusion: 3, // Epoch: 100-102 } - retriever := NewForcedInclusionRetriever(client, zerolog.Nop(), config.DefaultConfig(), gen.DAStartHeight, gen.DAEpochForcedInclusion) + retriever := NewForcedInclusionRetriever(context.Background(), client, zerolog.Nop(), 100*time.Millisecond, false, gen.DAStartHeight, gen.DAEpochForcedInclusion) defer retriever.Stop() ctx := context.Background() @@ -208,6 +213,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_ErrorHandling(t *tes fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() client.On("HasForcedInclusionNamespace").Return(true).Maybe() client.On("GetForcedInclusionNamespace").Return(fiNs).Maybe() + client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() client.On("Retrieve", mock.Anything, uint64(100), fiNs).Return(datypes.ResultRetrieve{ BaseResult: datypes.BaseResult{ Code: datypes.StatusError, @@ -220,7 +226,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_ErrorHandling(t *tes DAEpochForcedInclusion: 1, // Single height epoch } - retriever := NewForcedInclusionRetriever(client, zerolog.Nop(), config.DefaultConfig(), gen.DAStartHeight, gen.DAEpochForcedInclusion) + retriever := NewForcedInclusionRetriever(context.Background(), client, zerolog.Nop(), 100*time.Millisecond, false, gen.DAStartHeight, gen.DAEpochForcedInclusion) defer retriever.Stop() ctx := context.Background() @@ -236,6 +242,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_EmptyBlobsSkipped(t fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() client.On("HasForcedInclusionNamespace").Return(true).Maybe() client.On("GetForcedInclusionNamespace").Return(fiNs).Maybe() + client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() client.On("Retrieve", mock.Anything, uint64(100), fiNs).Return(datypes.ResultRetrieve{ BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: time.Now()}, Data: [][]byte{[]byte("tx1"), {}, []byte("tx2"), nil, []byte("tx3")}, @@ -246,7 +253,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_EmptyBlobsSkipped(t DAEpochForcedInclusion: 1, // Single height epoch } - retriever := NewForcedInclusionRetriever(client, zerolog.Nop(), config.DefaultConfig(), gen.DAStartHeight, gen.DAEpochForcedInclusion) + retriever := NewForcedInclusionRetriever(context.Background(), client, zerolog.Nop(), 100*time.Millisecond, false, gen.DAStartHeight, gen.DAEpochForcedInclusion) defer retriever.Stop() ctx := context.Background() @@ -272,6 +279,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_OrderPreserved(t *te fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() client.On("HasForcedInclusionNamespace").Return(true).Maybe() client.On("GetForcedInclusionNamespace").Return(fiNs).Maybe() + client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // Return heights out of order to test ordering is preserved client.On("Retrieve", mock.Anything, uint64(102), fiNs).Return(datypes.ResultRetrieve{ BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: time.Now()}, @@ -291,7 +299,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_OrderPreserved(t *te DAEpochForcedInclusion: 3, // Epoch: 100-102 } - retriever := NewForcedInclusionRetriever(client, zerolog.Nop(), config.DefaultConfig(), gen.DAStartHeight, gen.DAEpochForcedInclusion) + retriever := NewForcedInclusionRetriever(context.Background(), client, zerolog.Nop(), 100*time.Millisecond, false, gen.DAStartHeight, gen.DAEpochForcedInclusion) defer retriever.Stop() ctx := context.Background() diff --git a/block/internal/da/subscriber.go b/block/internal/da/subscriber.go new file mode 100644 index 0000000000..ffeb5141c9 --- /dev/null +++ b/block/internal/da/subscriber.go @@ -0,0 +1,391 @@ +package da + +import ( + "bytes" + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/rs/zerolog" + + datypes "github.com/evstack/ev-node/pkg/da/types" +) + +// SubscriberHandler is the callback interface for subscription consumers. +// Implementations drive the consumer-specific logic (caching, piping events, etc.). +type SubscriberHandler interface { + // HandleEvent processes a subscription event inline (fast path). + // Called on the followLoop goroutine for each subscription event. + HandleEvent(ctx context.Context, ev datypes.SubscriptionEvent) + + // HandleCatchup is called for each height during sequential catchup. + // The subscriber advances localDAHeight only after this returns nil. + // Returning an error triggers a backoff retry. + HandleCatchup(ctx context.Context, height uint64) error +} + +// SubscriberConfig holds configuration for creating a Subscriber. +type SubscriberConfig struct { + Client Client + Logger zerolog.Logger + Namespaces [][]byte // subscribe to all, merge into one channel + DABlockTime time.Duration + Handler SubscriberHandler +} + +// Subscriber is a shared DA subscription primitive that encapsulates the +// follow/catchup lifecycle. It subscribes to one or more DA namespaces, +// tracks the highest seen DA height, and drives sequential catchup via +// callbacks on SubscriberHandler. +// +// Used by both DAFollower (syncing) and asyncBlockRetriever (forced inclusion). +type Subscriber struct { + client Client + logger zerolog.Logger + handler SubscriberHandler + + // namespaces to subscribe on. When multiple, they are merged. + namespaces [][]byte + + // localDAHeight is only written by catchupLoop (via CAS) and read by + // followLoop to determine whether inline processing is possible. + localDAHeight atomic.Uint64 + + // highestSeenDAHeight is written by followLoop and read by catchupLoop. + highestSeenDAHeight atomic.Uint64 + + // headReached tracks whether the subscriber has caught up to DA head. + headReached atomic.Bool + + // catchupSignal wakes catchupLoop when a new height is seen above localDAHeight. + catchupSignal chan struct{} + + // daBlockTime used as backoff and watchdog base. + daBlockTime time.Duration + + // lifecycle + cancel context.CancelFunc + wg sync.WaitGroup +} + +// NewSubscriber creates a new Subscriber. +func NewSubscriber(cfg SubscriberConfig) *Subscriber { + s := &Subscriber{ + client: cfg.Client, + logger: cfg.Logger, + handler: cfg.Handler, + namespaces: cfg.Namespaces, + catchupSignal: make(chan struct{}, 1), + daBlockTime: cfg.DABlockTime, + } + return s +} + +// SetStartHeight sets the initial local DA height before Start is called. +func (s *Subscriber) SetStartHeight(height uint64) { + s.localDAHeight.Store(height) +} + +// Start begins the follow and catchup goroutines. +func (s *Subscriber) Start(ctx context.Context) error { + if len(s.namespaces) == 0 { + return nil // Nothing to subscribe to. + } + + ctx, s.cancel = context.WithCancel(ctx) + s.wg.Add(2) + go s.followLoop(ctx) + go s.catchupLoop(ctx) + return nil +} + +// Stop gracefully stops the background goroutines. +func (s *Subscriber) Stop() { + if s.cancel != nil { + s.cancel() + } + s.wg.Wait() +} + +// LocalDAHeight returns the current local DA height. +func (s *Subscriber) LocalDAHeight() uint64 { + return s.localDAHeight.Load() +} + +// HighestSeenDAHeight returns the highest DA height seen from the subscription. +func (s *Subscriber) HighestSeenDAHeight() uint64 { + return s.highestSeenDAHeight.Load() +} + +// HasReachedHead returns whether the subscriber has caught up to DA head. +func (s *Subscriber) HasReachedHead() bool { + return s.headReached.Load() +} + +// SetHeadReached marks the subscriber as having reached DA head. +func (s *Subscriber) SetHeadReached() { + s.headReached.Store(true) +} + +// CompareAndSwapLocalHeight attempts a CAS on localDAHeight. +// Used by handlers that want to claim exclusive processing of a height. +func (s *Subscriber) CompareAndSwapLocalHeight(old, new uint64) bool { + return s.localDAHeight.CompareAndSwap(old, new) +} + +// SetLocalHeight stores a new localDAHeight value. +func (s *Subscriber) SetLocalHeight(height uint64) { + s.localDAHeight.Store(height) +} + +// UpdateHighestForTest directly sets the highest seen DA height and signals catchup. +// Only for use in tests that bypass the subscription loop. +func (s *Subscriber) UpdateHighestForTest(height uint64) { + s.updateHighest(height) +} + +// RunCatchupForTest runs a single catchup pass. Only for use in tests that +// bypass the catchup loop's signal-wait mechanism. +func (s *Subscriber) RunCatchupForTest(ctx context.Context) { + s.runCatchup(ctx) +} + +// --------------------------------------------------------------------------- +// Follow loop +// --------------------------------------------------------------------------- + +// signalCatchup sends a non-blocking signal to wake catchupLoop. +func (s *Subscriber) signalCatchup() { + select { + case s.catchupSignal <- struct{}{}: + default: + } +} + +// followLoop subscribes to DA blob events and keeps highestSeenDAHeight up to date. +func (s *Subscriber) followLoop(ctx context.Context) { + defer s.wg.Done() + + s.logger.Info().Msg("starting follow loop") + defer s.logger.Info().Msg("follow loop stopped") + + for { + if err := s.runSubscription(ctx); err != nil { + if ctx.Err() != nil { + return + } + s.logger.Warn().Err(err).Msg("DA subscription failed, reconnecting") + select { + case <-ctx.Done(): + return + case <-time.After(s.backoff()): + } + } + } +} + +// runSubscription opens subscriptions on all namespaces (merging if more than one) +// and processes events until a channel is closed or the watchdog times out. +func (s *Subscriber) runSubscription(ctx context.Context) error { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + ch, err := s.subscribe(subCtx) + if err != nil { + return err + } + + watchdogTimeout := s.watchdogTimeout() + watchdog := time.NewTimer(watchdogTimeout) + defer watchdog.Stop() + + for { + select { + case <-subCtx.Done(): + return subCtx.Err() + case ev, ok := <-ch: + if !ok { + return errors.New("subscription channel closed") + } + s.updateHighest(ev.Height) + s.handler.HandleEvent(ctx, ev) + watchdog.Reset(watchdogTimeout) + case <-watchdog.C: + return errors.New("subscription watchdog: no events received, reconnecting") + } + } +} + +// subscribe opens subscriptions on all configured namespaces. When there are +// multiple distinct namespaces, channels are merged via mergeSubscriptions. +func (s *Subscriber) subscribe(ctx context.Context) (<-chan datypes.SubscriptionEvent, error) { + if len(s.namespaces) == 0 { + return nil, errors.New("no namespaces configured") + } + + // Subscribe to the first namespace. + ch, err := s.client.Subscribe(ctx, s.namespaces[0]) + if err != nil { + return nil, fmt.Errorf("subscribe namespace 0: %w", err) + } + + // Subscribe to additional namespaces and merge. + for i := 1; i < len(s.namespaces); i++ { + if bytes.Equal(s.namespaces[i], s.namespaces[0]) { + continue // Same namespace, skip duplicate. + } + ch2, err := s.client.Subscribe(ctx, s.namespaces[i]) + if err != nil { + return nil, fmt.Errorf("subscribe namespace %d: %w", i, err) + } + ch = mergeSubscriptions(ctx, ch, ch2) + } + + return ch, nil +} + +// mergeSubscriptions fans two subscription channels into one. +func mergeSubscriptions( + ctx context.Context, + ch1, ch2 <-chan datypes.SubscriptionEvent, +) <-chan datypes.SubscriptionEvent { + out := make(chan datypes.SubscriptionEvent, 16) + go func() { + defer close(out) + for ch1 != nil || ch2 != nil { + var ev datypes.SubscriptionEvent + var ok bool + select { + case <-ctx.Done(): + return + case ev, ok = <-ch1: + if !ok { + ch1 = nil + continue + } + case ev, ok = <-ch2: + if !ok { + ch2 = nil + continue + } + } + select { + case out <- ev: + case <-ctx.Done(): + return + } + } + }() + return out +} + +// updateHighest atomically bumps highestSeenDAHeight and signals catchup if needed. +func (s *Subscriber) updateHighest(height uint64) { + for { + cur := s.highestSeenDAHeight.Load() + if height <= cur { + return + } + if s.highestSeenDAHeight.CompareAndSwap(cur, height) { + s.signalCatchup() + return + } + } +} + +// --------------------------------------------------------------------------- +// Catchup loop +// --------------------------------------------------------------------------- + +// catchupLoop waits for signals and sequentially catches up. +func (s *Subscriber) catchupLoop(ctx context.Context) { + defer s.wg.Done() + + s.logger.Info().Msg("starting catchup loop") + defer s.logger.Info().Msg("catchup loop stopped") + + for { + select { + case <-ctx.Done(): + return + case <-s.catchupSignal: + s.runCatchup(ctx) + } + } +} + +// runCatchup sequentially calls HandleCatchup from localDAHeight to highestSeenDAHeight. +func (s *Subscriber) runCatchup(ctx context.Context) { + for { + if ctx.Err() != nil { + return + } + + local := s.localDAHeight.Load() + highest := s.highestSeenDAHeight.Load() + + if local > highest { + s.headReached.Store(true) + return + } + + // CAS claims this height — prevents followLoop from inline-processing. + if !s.localDAHeight.CompareAndSwap(local, local+1) { + // followLoop already advanced past this height via inline processing. + continue + } + + if err := s.handler.HandleCatchup(ctx, local); err != nil { + // Roll back so we can retry after backoff. + s.localDAHeight.Store(local) + if !s.waitOnCatchupError(ctx, err, local) { + return + } + continue + } + } +} + +// ErrCaughtUp is a sentinel used to signal that the catchup loop has reached DA head. +var ErrCaughtUp = errors.New("caught up with DA head") + +// waitOnCatchupError logs the error and backs off before retrying. +func (s *Subscriber) waitOnCatchupError(ctx context.Context, err error, daHeight uint64) bool { + if errors.Is(err, ErrCaughtUp) { + s.logger.Debug().Uint64("da_height", daHeight).Msg("DA catchup reached head, waiting for subscription signal") + return false + } + if ctx.Err() != nil { + return false + } + s.logger.Warn().Err(err).Uint64("da_height", daHeight).Msg("catchup error, backing off") + select { + case <-ctx.Done(): + return false + case <-time.After(s.backoff()): + return true + } +} + +// --------------------------------------------------------------------------- +// Timing helpers +// --------------------------------------------------------------------------- + +func (s *Subscriber) backoff() time.Duration { + if s.daBlockTime > 0 { + return s.daBlockTime + } + return 2 * time.Second +} + +const subscriberWatchdogMultiplier = 3 + +func (s *Subscriber) watchdogTimeout() time.Duration { + if s.daBlockTime > 0 { + return s.daBlockTime * subscriberWatchdogMultiplier + } + return 30 * time.Second +} diff --git a/block/internal/syncing/da_follower.go b/block/internal/syncing/da_follower.go index 99b51db593..a31ffc43ca 100644 --- a/block/internal/syncing/da_follower.go +++ b/block/internal/syncing/da_follower.go @@ -1,12 +1,10 @@ package syncing import ( - "bytes" "context" "errors" - "fmt" + "slices" "sync" - "sync/atomic" "time" "github.com/rs/zerolog" @@ -16,61 +14,26 @@ import ( datypes "github.com/evstack/ev-node/pkg/da/types" ) -// DAFollower subscribes to DA blob events and drives sequential catchup. -// -// Architecture: -// - followLoop listens on the subscription channel. When caught up, it processes -// subscription blobs inline (fast path, no DA re-fetch). Otherwise, it updates -// highestSeenDAHeight and signals the catchup loop. -// - catchupLoop sequentially retrieves from localNextDAHeight → highestSeenDAHeight, -// piping events to the Syncer's heightInCh. -// -// The two goroutines share only atomic state; no mutexes needed. +// DAFollower follows DA blob events and drives sequential catchup +// using a shared da.Subscriber for the subscription plumbing. type DAFollower interface { - // Start begins the follow and catchup loops. Start(ctx context.Context) error - // Stop cancels the context and waits for goroutines. Stop() - // HasReachedHead returns true once the catchup loop has processed the - // DA head at least once. Once true, it stays true. HasReachedHead() bool + // QueuePriorityHeight queues a DA height for priority retrieval (from P2P hints). + QueuePriorityHeight(daHeight uint64) } // daFollower is the concrete implementation of DAFollower. type daFollower struct { - client da.Client - retriever DARetriever - logger zerolog.Logger - - // pipeEvent sends a DA height event to the syncer's processLoop. - pipeEvent func(ctx context.Context, event common.DAHeightEvent) error - - // Namespace to subscribe on (header namespace). - namespace []byte - // dataNamespace is the data namespace (may equal namespace when header+data - // share the same namespace). When different, we subscribe to both and merge. - dataNamespace []byte - - // localNextDAHeight is only written by catchupLoop and read by followLoop - // to determine whether a catchup is needed. - localNextDAHeight atomic.Uint64 + subscriber *da.Subscriber + retriever DARetriever + eventSink common.EventSink + logger zerolog.Logger - // highestSeenDAHeight is written by followLoop and read by catchupLoop. - highestSeenDAHeight atomic.Uint64 - - // headReached tracks whether the follower has caught up to DA head. - headReached atomic.Bool - - // catchupSignal is sent by followLoop to wake catchupLoop when a new - // height is seen that is above localNextDAHeight. - catchupSignal chan struct{} - - // daBlockTime is used as a backoff before retrying after errors. - daBlockTime time.Duration - - // lifecycle - cancel context.CancelFunc - wg sync.WaitGroup + // Priority queue for P2P hint heights (absorbed from DARetriever refactoring #2). + priorityMu sync.Mutex + priorityHeights []uint64 } // DAFollowerConfig holds configuration for creating a DAFollower. @@ -78,7 +41,7 @@ type DAFollowerConfig struct { Client da.Client Retriever DARetriever Logger zerolog.Logger - PipeEvent func(ctx context.Context, event common.DAHeightEvent) error + EventSink common.EventSink Namespace []byte DataNamespace []byte // may be nil or equal to Namespace StartDAHeight uint64 @@ -91,193 +54,74 @@ func NewDAFollower(cfg DAFollowerConfig) DAFollower { if len(dataNs) == 0 { dataNs = cfg.Namespace } + f := &daFollower{ - client: cfg.Client, - retriever: cfg.Retriever, - logger: cfg.Logger.With().Str("component", "da_follower").Logger(), - pipeEvent: cfg.PipeEvent, - namespace: cfg.Namespace, - dataNamespace: dataNs, - catchupSignal: make(chan struct{}, 1), - daBlockTime: cfg.DABlockTime, + retriever: cfg.Retriever, + eventSink: cfg.EventSink, + logger: cfg.Logger.With().Str("component", "da_follower").Logger(), + priorityHeights: make([]uint64, 0), } - f.localNextDAHeight.Store(cfg.StartDAHeight) + + f.subscriber = da.NewSubscriber(da.SubscriberConfig{ + Client: cfg.Client, + Logger: cfg.Logger, + Namespaces: [][]byte{cfg.Namespace, dataNs}, + DABlockTime: cfg.DABlockTime, + Handler: f, + }) + f.subscriber.SetStartHeight(cfg.StartDAHeight) + return f } // Start begins the follow and catchup goroutines. func (f *daFollower) Start(ctx context.Context) error { - ctx, f.cancel = context.WithCancel(ctx) - - f.wg.Add(2) - go f.followLoop(ctx) - f.signalCatchup() - go f.catchupLoop(ctx) - - f.logger.Info(). - Uint64("start_da_height", f.localNextDAHeight.Load()). - Msg("DA follower started") - return nil + return f.subscriber.Start(ctx) } -// Stop cancels and waits. +// Stop gracefully stops the background goroutines. func (f *daFollower) Stop() { - if f.cancel != nil { - f.cancel() - } - f.wg.Wait() + f.subscriber.Stop() } -// HasReachedHead returns whether the DA head has been reached at least once. +// HasReachedHead returns whether the follower has caught up to DA head. func (f *daFollower) HasReachedHead() bool { - return f.headReached.Load() -} - -// signalCatchup sends a non-blocking signal to wake catchupLoop. -func (f *daFollower) signalCatchup() { - select { - case f.catchupSignal <- struct{}{}: - default: - // Already signaled, catchupLoop will pick up the new highestSeen. - } -} - -// followLoop subscribes to DA blob events and keeps highestSeenDAHeight up to date. -// When a new height appears above localNextDAHeight, it wakes the catchup loop. -func (f *daFollower) followLoop(ctx context.Context) { - defer f.wg.Done() - - f.logger.Info().Msg("starting follow loop") - defer f.logger.Info().Msg("follow loop stopped") - - for { - if err := f.runSubscription(ctx); err != nil { - if ctx.Err() != nil { - return - } - f.logger.Warn().Err(err).Msg("DA subscription failed, reconnecting") - select { - case <-ctx.Done(): - return - case <-time.After(f.backoff()): - } - } - } -} - -// runSubscription opens subscriptions on both header and data namespaces (if -// different) and processes events until a channel is closed or an error occurs. -// A watchdog timer triggers if no events arrive within watchdogTimeout(), -// causing a reconnect. -func (f *daFollower) runSubscription(ctx context.Context) error { - // Sub-context ensures the merge goroutine is cancelled when this function returns. - subCtx, subCancel := context.WithCancel(ctx) - defer subCancel() - - headerCh, err := f.client.Subscribe(subCtx, f.namespace) - if err != nil { - return fmt.Errorf("subscribe header namespace: %w", err) - } - - // If namespaces differ, subscribe to the data namespace too and fan-in. - ch := headerCh - if !bytes.Equal(f.namespace, f.dataNamespace) { - dataCh, err := f.client.Subscribe(subCtx, f.dataNamespace) - if err != nil { - return fmt.Errorf("subscribe data namespace: %w", err) - } - ch = f.mergeSubscriptions(subCtx, headerCh, dataCh) - } - - watchdogTimeout := f.watchdogTimeout() - watchdog := time.NewTimer(watchdogTimeout) - defer watchdog.Stop() - - for { - select { - case <-subCtx.Done(): - return subCtx.Err() - case ev, ok := <-ch: - if !ok { - return errors.New("subscription channel closed") - } - f.handleSubscriptionEvent(ctx, ev) - watchdog.Reset(watchdogTimeout) - case <-watchdog.C: - return errors.New("subscription watchdog: no events received, reconnecting") - } - } + return f.subscriber.HasReachedHead() } -// mergeSubscriptions fans two subscription channels into one. -func (f *daFollower) mergeSubscriptions( - ctx context.Context, - headerCh, dataCh <-chan datypes.SubscriptionEvent, -) <-chan datypes.SubscriptionEvent { - out := make(chan datypes.SubscriptionEvent, 16) - go func() { - defer close(out) - for headerCh != nil || dataCh != nil { - var ev datypes.SubscriptionEvent - var ok bool - select { - case <-ctx.Done(): - return - case ev, ok = <-headerCh: - if !ok { - headerCh = nil - continue - } - case ev, ok = <-dataCh: - if !ok { - dataCh = nil - continue - } - } - select { - case out <- ev: - case <-ctx.Done(): - return - } - } - }() - return out -} +// --------------------------------------------------------------------------- +// SubscriberHandler implementation +// --------------------------------------------------------------------------- -// handleSubscriptionEvent processes a subscription event. When the follower is -// caught up (ev.Height == localNextDAHeight) and blobs are available, it processes -// them inline — avoiding a DA re-fetch round trip. Otherwise, it just updates -// highestSeenDAHeight and lets catchupLoop handle retrieval. +// HandleEvent processes a subscription event. When the follower is +// caught up (ev.Height == localDAHeight) and blobs are available, it processes +// them inline — avoiding a DA re-fetch round trip. Otherwise, it just lets +// the catchup loop handle retrieval. // -// Uses CAS on localNextDAHeight to claim exclusive access to processBlobs, +// Uses CAS on localDAHeight to claim exclusive access to processBlobs, // preventing concurrent map access with catchupLoop. -func (f *daFollower) handleSubscriptionEvent(ctx context.Context, ev datypes.SubscriptionEvent) { - // Always record the highest height we've seen from the subscription. - f.updateHighest(ev.Height) - +func (f *daFollower) HandleEvent(ctx context.Context, ev datypes.SubscriptionEvent) { // Fast path: try to claim this height for inline processing. // CAS(N, N+1) ensures only one goroutine (followLoop or catchupLoop) // can enter processBlobs for height N. - if len(ev.Blobs) > 0 && f.localNextDAHeight.CompareAndSwap(ev.Height, ev.Height+1) { + if len(ev.Blobs) > 0 && f.subscriber.CompareAndSwapLocalHeight(ev.Height, ev.Height+1) { events := f.retriever.ProcessBlobs(ctx, ev.Blobs, ev.Height) for _, event := range events { - if err := f.pipeEvent(ctx, event); err != nil { + if err := f.eventSink.PipeEvent(ctx, event); err != nil { // Roll back so catchupLoop can retry this height. - f.localNextDAHeight.Store(ev.Height) + f.subscriber.SetLocalHeight(ev.Height) f.logger.Warn().Err(err).Uint64("da_height", ev.Height). Msg("failed to pipe inline event, catchup will retry") return } } if len(events) != 0 { - if !f.headReached.Load() && f.localNextDAHeight.Load() > f.highestSeenDAHeight.Load() { - f.headReached.Store(true) - } + f.subscriber.SetHeadReached() f.logger.Debug().Uint64("da_height", ev.Height).Int("events", len(events)). Msg("processed subscription blobs inline (fast path)") } else { // No complete events (split namespace, waiting for other half). - f.localNextDAHeight.Store(ev.Height) + f.subscriber.SetLocalHeight(ev.Height) } return } @@ -285,105 +129,36 @@ func (f *daFollower) handleSubscriptionEvent(ctx context.Context, ev datypes.Sub // Slow path: behind, no blobs, or catchupLoop claimed this height. } -// updateHighest atomically bumps highestSeenDAHeight and signals catchup if needed. -func (f *daFollower) updateHighest(height uint64) { - for { - cur := f.highestSeenDAHeight.Load() - if height <= cur { - return - } - if f.highestSeenDAHeight.CompareAndSwap(cur, height) { - f.signalCatchup() - return - } - } -} - -// catchupLoop waits for signals and sequentially retrieves DA heights -// from localNextDAHeight up to highestSeenDAHeight. -func (f *daFollower) catchupLoop(ctx context.Context) { - defer f.wg.Done() - - f.logger.Info().Msg("starting catchup loop") - defer f.logger.Info().Msg("catchup loop stopped") - - for { - select { - case <-ctx.Done(): - return - case <-f.catchupSignal: - f.runCatchup(ctx) - } - } -} - -// runCatchup sequentially retrieves from localNextDAHeight to highestSeenDAHeight. -// It handles priority heights first, then sequential heights. -func (f *daFollower) runCatchup(ctx context.Context) { - for { - if ctx.Err() != nil { - return - } - - // Check for priority heights from P2P hints first. - // We drain stale hints to avoid a tight CPU loop if many are queued. - priorityHeight := f.retriever.PopPriorityHeight() - for priorityHeight > 0 && priorityHeight < f.localNextDAHeight.Load() { - priorityHeight = f.retriever.PopPriorityHeight() - } - - if priorityHeight > 0 { +// HandleCatchup retrieves events at a single DA height and pipes them +// to the event sink. Checks priority heights first. +func (f *daFollower) HandleCatchup(ctx context.Context, daHeight uint64) error { + // Check for priority heights from P2P hints first. + if priorityHeight := f.popPriorityHeight(); priorityHeight > 0 { + if priorityHeight >= daHeight { f.logger.Debug(). Uint64("da_height", priorityHeight). Msg("fetching priority DA height from P2P hint") if err := f.fetchAndPipeHeight(ctx, priorityHeight); err != nil { - if !f.waitOnCatchupError(ctx, err, priorityHeight) { - return - } - } - continue - } - - // Sequential catchup. - local := f.localNextDAHeight.Load() - highest := f.highestSeenDAHeight.Load() - - if highest > 0 && local > highest { - // Caught up. - f.headReached.Store(true) - return - } - - // CAS claims this height prevents followLoop from inline-processing - if !f.localNextDAHeight.CompareAndSwap(local, local+1) { - // followLoop already advanced past this height via inline processing. - continue - } - - if err := f.fetchAndPipeHeight(ctx, local); err != nil { - // Roll back so we can retry after backoff. - f.localNextDAHeight.Store(local) - if !f.waitOnCatchupError(ctx, err, local) { - return + return err } - continue } + // Re-queue the current height by rolling back (the subscriber already advanced). + f.subscriber.SetLocalHeight(daHeight) + return nil } + + return f.fetchAndPipeHeight(ctx, daHeight) } -// fetchAndPipeHeight retrieves events at a single DA height and pipes them -// to the syncer. +// fetchAndPipeHeight retrieves events at a single DA height and pipes them. func (f *daFollower) fetchAndPipeHeight(ctx context.Context, daHeight uint64) error { events, err := f.retriever.RetrieveFromDA(ctx, daHeight) if err != nil { switch { case errors.Is(err, datypes.ErrBlobNotFound): - // No blobs at this height — not an error, just skip. return nil case errors.Is(err, datypes.ErrHeightFromFuture): - // DA hasn't produced this height yet — mark head reached - // but return the error to trigger a backoff retry. - f.headReached.Store(true) + f.subscriber.SetHeadReached() return err default: return err @@ -391,7 +166,7 @@ func (f *daFollower) fetchAndPipeHeight(ctx context.Context, daHeight uint64) er } for _, event := range events { - if err := f.pipeEvent(ctx, event); err != nil { + if err := f.eventSink.PipeEvent(ctx, event); err != nil { return err } } @@ -399,44 +174,31 @@ func (f *daFollower) fetchAndPipeHeight(ctx context.Context, daHeight uint64) er return nil } -// errCaughtUp is a sentinel used to signal that the catchup loop has reached DA head. -var errCaughtUp = errors.New("caught up with DA head") +// --------------------------------------------------------------------------- +// Priority queue (absorbed from DARetriever — refactoring #2) +// --------------------------------------------------------------------------- -// waitOnCatchupError logs the error and backs off before retrying. -// It returns true if the caller should continue (retry), or false if the -// catchup loop should exit (context cancelled or caught-up sentinel). -func (f *daFollower) waitOnCatchupError(ctx context.Context, err error, daHeight uint64) bool { - if errors.Is(err, errCaughtUp) { - f.logger.Debug().Uint64("da_height", daHeight).Msg("DA catchup reached head, waiting for subscription signal") - return false - } - if ctx.Err() != nil { - return false - } - f.logger.Warn().Err(err).Uint64("da_height", daHeight).Msg("catchup error, backing off") - select { - case <-ctx.Done(): - return false - case <-time.After(f.backoff()): - return true - } -} +// QueuePriorityHeight queues a DA height for priority retrieval. +func (f *daFollower) QueuePriorityHeight(daHeight uint64) { + f.priorityMu.Lock() + defer f.priorityMu.Unlock() -// backoff returns the configured DA block time or a sane default. -func (f *daFollower) backoff() time.Duration { - if f.daBlockTime > 0 { - return f.daBlockTime + idx, found := slices.BinarySearch(f.priorityHeights, daHeight) + if found { + return } - return 2 * time.Second + f.priorityHeights = slices.Insert(f.priorityHeights, idx, daHeight) } -// watchdogTimeout returns how long to wait for a subscription event before -// assuming the subscription is stalled. Defaults to 3× the DA block time. -const watchdogMultiplier = 3 +// popPriorityHeight returns the next priority height to fetch, or 0 if none. +func (f *daFollower) popPriorityHeight() uint64 { + f.priorityMu.Lock() + defer f.priorityMu.Unlock() -func (f *daFollower) watchdogTimeout() time.Duration { - if f.daBlockTime > 0 { - return f.daBlockTime * watchdogMultiplier + if len(f.priorityHeights) == 0 { + return 0 } - return 30 * time.Second + height := f.priorityHeights[0] + f.priorityHeights = f.priorityHeights[1:] + return height } diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index 366d3ed30c..691b2d1bb6 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -5,9 +5,6 @@ import ( "context" "errors" "fmt" - "slices" - "sync" - "sync/atomic" "github.com/rs/zerolog" "google.golang.org/protobuf/proto" @@ -28,11 +25,6 @@ type DARetriever interface { // ProcessBlobs parses raw blob bytes at a given DA height into height events. // Used by the DAFollower to process subscription blobs inline without re-fetching. ProcessBlobs(ctx context.Context, blobs [][]byte, daHeight uint64) []common.DAHeightEvent - // QueuePriorityHeight queues a DA height for priority retrieval (from P2P hints). - // These heights take precedence over sequential fetching. - QueuePriorityHeight(daHeight uint64) - // PopPriorityHeight returns the next priority height to fetch, or 0 if none. - PopPriorityHeight() uint64 } // daRetriever handles DA retrieval operations for syncing @@ -44,19 +36,12 @@ type daRetriever struct { // transient cache, only full event need to be passed to the syncer // on restart, will be refetch as da height is updated by syncer - pendingMu sync.Mutex pendingHeaders map[uint64]*types.SignedHeader pendingData map[uint64]*types.Data // strictMode indicates if the node has seen a valid DAHeaderEnvelope // and should now reject all legacy/unsigned headers. - strictMode atomic.Bool - - // priorityMu protects priorityHeights from concurrent access - priorityMu sync.Mutex - // priorityHeights holds DA heights from P2P hints that should be fetched - // before continuing sequential retrieval. Sorted in ascending order. - priorityHeights []uint64 + strictMode bool } // NewDARetriever creates a new DA retriever @@ -67,42 +52,14 @@ func NewDARetriever( logger zerolog.Logger, ) *daRetriever { return &daRetriever{ - client: client, - cache: cache, - genesis: genesis, - logger: logger.With().Str("component", "da_retriever").Logger(), - pendingHeaders: make(map[uint64]*types.SignedHeader), - pendingData: make(map[uint64]*types.Data), - priorityHeights: make([]uint64, 0), - } -} - -// QueuePriorityHeight queues a DA height for priority retrieval. -// Heights from P2P hints take precedence over sequential fetching. -func (r *daRetriever) QueuePriorityHeight(daHeight uint64) { - r.priorityMu.Lock() - defer r.priorityMu.Unlock() - - idx, found := slices.BinarySearch(r.priorityHeights, daHeight) - if found { - return // Already queued - } - r.priorityHeights = slices.Insert(r.priorityHeights, idx, daHeight) -} - -// PopPriorityHeight returns the next priority height to fetch, or 0 if none. -func (r *daRetriever) PopPriorityHeight() uint64 { - r.priorityMu.Lock() - defer r.priorityMu.Unlock() - - if len(r.priorityHeights) == 0 { - return 0 + client: client, + cache: cache, + genesis: genesis, + logger: logger.With().Str("component", "da_retriever").Logger(), + pendingHeaders: make(map[uint64]*types.SignedHeader), + pendingData: make(map[uint64]*types.Data), + strictMode: false, } - - height := r.priorityHeights[0] - r.priorityHeights = r.priorityHeights[1:] - - return height } // RetrieveFromDA retrieves blocks from the specified DA height and returns height events @@ -199,55 +156,41 @@ func (r *daRetriever) validateBlobResponse(res datypes.ResultRetrieve, daHeight // This is the public interface used by the DAFollower for inline subscription processing. // // NOT thread-safe: the caller (DAFollower) must ensure exclusive access via CAS -// on localNextDAHeight before calling this method. +// on localDAHeight before calling this method. func (r *daRetriever) ProcessBlobs(ctx context.Context, blobs [][]byte, daHeight uint64) []common.DAHeightEvent { return r.processBlobs(ctx, blobs, daHeight) } // processBlobs processes retrieved blobs to extract headers and data and returns height events func (r *daRetriever) processBlobs(ctx context.Context, blobs [][]byte, daHeight uint64) []common.DAHeightEvent { - var decodedHeaders []*types.SignedHeader - var decodedData []*types.Data - - // Decode all blobs first without holding the lock + // Decode all blobs for _, bz := range blobs { if len(bz) == 0 { continue } if header := r.tryDecodeHeader(bz, daHeight); header != nil { - decodedHeaders = append(decodedHeaders, header) - continue - } - - if data := r.tryDecodeData(bz, daHeight); data != nil { - decodedData = append(decodedData, data) - } - } - - r.pendingMu.Lock() - defer r.pendingMu.Unlock() + if _, ok := r.pendingHeaders[header.Height()]; ok { + // a (malicious) node may have re-published valid header to another da height (should never happen) + // we can already discard it, only the first one is valid + r.logger.Debug().Uint64("height", header.Height()).Uint64("da_height", daHeight).Msg("header blob already exists for height, discarding") + continue + } - for _, header := range decodedHeaders { - if _, ok := r.pendingHeaders[header.Height()]; ok { - // a (malicious) node may have re-published valid header to another da height (should never happen) - // we can already discard it, only the first one is valid - r.logger.Debug().Uint64("height", header.Height()).Uint64("da_height", daHeight).Msg("header blob already exists for height, discarding") + r.pendingHeaders[header.Height()] = header continue } - r.pendingHeaders[header.Height()] = header - } + if data := r.tryDecodeData(bz, daHeight); data != nil { + if _, ok := r.pendingData[data.Height()]; ok { + // a (malicious) node may have re-published valid data to another da height (should never happen) + // we can already discard it, only the first one is valid + r.logger.Debug().Uint64("height", data.Height()).Uint64("da_height", daHeight).Msg("data blob already exists for height, discarding") + continue + } - for _, data := range decodedData { - if _, ok := r.pendingData[data.Height()]; ok { - // a (malicious) node may have re-published valid data to another da height (should never happen) - // we can already discard it, only the first one is valid - r.logger.Debug().Uint64("height", data.Height()).Uint64("da_height", daHeight).Msg("data blob already exists for height, discarding") - continue + r.pendingData[data.Height()] = data } - - r.pendingData[data.Height()] = data } var events []common.DAHeightEvent @@ -309,7 +252,7 @@ func (r *daRetriever) tryDecodeHeader(bz []byte, daHeight uint64) *types.SignedH // Attempt to unmarshal as DAHeaderEnvelope and get the envelope signature if envelopeSignature, err := header.UnmarshalDAEnvelope(bz); err != nil { // If in strict mode, we REQUIRE an envelope. - if r.strictMode.Load() { + if r.strictMode { r.logger.Warn().Err(err).Msg("strict mode is enabled, rejecting non-envelope blob") return nil } @@ -343,14 +286,14 @@ func (r *daRetriever) tryDecodeHeader(bz []byte, daHeight uint64) *types.SignedH isValidEnvelope = true } } - if r.strictMode.Load() && !isValidEnvelope { + if r.strictMode && !isValidEnvelope { r.logger.Warn().Msg("strict mode: rejecting block that is not a fully valid envelope") return nil } // Mode Switch Logic - if isValidEnvelope && !r.strictMode.Load() { + if isValidEnvelope && !r.strictMode { r.logger.Info().Uint64("height", header.Height()).Msg("valid DA envelope detected, switching to STRICT MODE") - r.strictMode.Store(true) + r.strictMode = true } // Legacy blob support implies: strictMode == false AND (!isValidEnvelope). diff --git a/block/internal/syncing/da_retriever_mock.go b/block/internal/syncing/da_retriever_mock.go index dc6926485e..7e8cc47be1 100644 --- a/block/internal/syncing/da_retriever_mock.go +++ b/block/internal/syncing/da_retriever_mock.go @@ -38,90 +38,6 @@ func (_m *MockDARetriever) EXPECT() *MockDARetriever_Expecter { return &MockDARetriever_Expecter{mock: &_m.Mock} } -// PopPriorityHeight provides a mock function for the type MockDARetriever -func (_mock *MockDARetriever) PopPriorityHeight() uint64 { - ret := _mock.Called() - - if len(ret) == 0 { - panic("no return value specified for PopPriorityHeight") - } - - var r0 uint64 - if returnFunc, ok := ret.Get(0).(func() uint64); ok { - r0 = returnFunc() - } else { - r0 = ret.Get(0).(uint64) - } - return r0 -} - -// MockDARetriever_PopPriorityHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PopPriorityHeight' -type MockDARetriever_PopPriorityHeight_Call struct { - *mock.Call -} - -// PopPriorityHeight is a helper method to define mock.On call -func (_e *MockDARetriever_Expecter) PopPriorityHeight() *MockDARetriever_PopPriorityHeight_Call { - return &MockDARetriever_PopPriorityHeight_Call{Call: _e.mock.On("PopPriorityHeight")} -} - -func (_c *MockDARetriever_PopPriorityHeight_Call) Run(run func()) *MockDARetriever_PopPriorityHeight_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *MockDARetriever_PopPriorityHeight_Call) Return(v uint64) *MockDARetriever_PopPriorityHeight_Call { - _c.Call.Return(v) - return _c -} - -func (_c *MockDARetriever_PopPriorityHeight_Call) RunAndReturn(run func() uint64) *MockDARetriever_PopPriorityHeight_Call { - _c.Call.Return(run) - return _c -} - -// QueuePriorityHeight provides a mock function for the type MockDARetriever -func (_mock *MockDARetriever) QueuePriorityHeight(daHeight uint64) { - _mock.Called(daHeight) - return -} - -// MockDARetriever_QueuePriorityHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueuePriorityHeight' -type MockDARetriever_QueuePriorityHeight_Call struct { - *mock.Call -} - -// QueuePriorityHeight is a helper method to define mock.On call -// - daHeight uint64 -func (_e *MockDARetriever_Expecter) QueuePriorityHeight(daHeight interface{}) *MockDARetriever_QueuePriorityHeight_Call { - return &MockDARetriever_QueuePriorityHeight_Call{Call: _e.mock.On("QueuePriorityHeight", daHeight)} -} - -func (_c *MockDARetriever_QueuePriorityHeight_Call) Run(run func(daHeight uint64)) *MockDARetriever_QueuePriorityHeight_Call { - _c.Call.Run(func(args mock.Arguments) { - var arg0 uint64 - if args[0] != nil { - arg0 = args[0].(uint64) - } - run( - arg0, - ) - }) - return _c -} - -func (_c *MockDARetriever_QueuePriorityHeight_Call) Return() *MockDARetriever_QueuePriorityHeight_Call { - _c.Call.Return() - return _c -} - -func (_c *MockDARetriever_QueuePriorityHeight_Call) RunAndReturn(run func(daHeight uint64)) *MockDARetriever_QueuePriorityHeight_Call { - _c.Run(run) - return _c -} - // RetrieveFromDA provides a mock function for the type MockDARetriever func (_mock *MockDARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) { ret := _mock.Called(ctx, daHeight) diff --git a/block/internal/syncing/da_retriever_tracing.go b/block/internal/syncing/da_retriever_tracing.go index 1e1a9ea7c0..d41418a1d8 100644 --- a/block/internal/syncing/da_retriever_tracing.go +++ b/block/internal/syncing/da_retriever_tracing.go @@ -56,14 +56,6 @@ func (t *tracedDARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) return events, nil } -func (t *tracedDARetriever) QueuePriorityHeight(daHeight uint64) { - t.inner.QueuePriorityHeight(daHeight) -} - -func (t *tracedDARetriever) PopPriorityHeight() uint64 { - return t.inner.PopPriorityHeight() -} - func (t *tracedDARetriever) ProcessBlobs(ctx context.Context, blobs [][]byte, daHeight uint64) []common.DAHeightEvent { return t.inner.ProcessBlobs(ctx, blobs, daHeight) } diff --git a/block/internal/syncing/da_retriever_tracing_test.go b/block/internal/syncing/da_retriever_tracing_test.go index 930fc43617..83bd864bfa 100644 --- a/block/internal/syncing/da_retriever_tracing_test.go +++ b/block/internal/syncing/da_retriever_tracing_test.go @@ -27,10 +27,6 @@ func (m *mockDARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ( return nil, nil } -func (m *mockDARetriever) QueuePriorityHeight(daHeight uint64) {} - -func (m *mockDARetriever) PopPriorityHeight() uint64 { return 0 } - func (m *mockDARetriever) ProcessBlobs(_ context.Context, blobs [][]byte, daHeight uint64) []common.DAHeightEvent { return nil } diff --git a/block/internal/syncing/raft_retriever.go b/block/internal/syncing/raft_retriever.go index 0b976acab9..cfa55662bd 100644 --- a/block/internal/syncing/raft_retriever.go +++ b/block/internal/syncing/raft_retriever.go @@ -14,20 +14,6 @@ import ( "github.com/evstack/ev-node/types" ) -// eventProcessor handles DA height events. Used for syncing. -type eventProcessor interface { - // handle processes a single DA height event. - handle(ctx context.Context, event common.DAHeightEvent) error -} - -// eventProcessorFn adapts a function to an eventProcessor. -type eventProcessorFn func(ctx context.Context, event common.DAHeightEvent) error - -// handle calls the wrapped function. -func (e eventProcessorFn) handle(ctx context.Context, event common.DAHeightEvent) error { - return e(ctx, event) -} - // raftStatePreProcessor is called before processing a raft block state type raftStatePreProcessor func(ctx context.Context, state *raft.RaftBlockState) error @@ -37,7 +23,7 @@ type raftRetriever struct { wg sync.WaitGroup logger zerolog.Logger genesis genesis.Genesis - eventProcessor eventProcessor + eventSink common.EventSink raftBlockPreProcessor raftStatePreProcessor mtx sync.Mutex @@ -49,14 +35,14 @@ func newRaftRetriever( raftNode common.RaftNode, genesis genesis.Genesis, logger zerolog.Logger, - eventProcessor eventProcessor, + eventSink common.EventSink, raftBlockPreProcessor raftStatePreProcessor, ) *raftRetriever { return &raftRetriever{ raftNode: raftNode, genesis: genesis, logger: logger, - eventProcessor: eventProcessor, + eventSink: eventSink, raftBlockPreProcessor: raftBlockPreProcessor, } } @@ -153,7 +139,7 @@ func (r *raftRetriever) consumeRaftBlock(ctx context.Context, state *raft.RaftBl Data: &data, DaHeight: 0, // raft events don't have DA height context, yet as DA submission is asynchronous } - return r.eventProcessor.handle(ctx, event) + return r.eventSink.PipeEvent(ctx, event) } // Height returns the current height of the raft node's state. diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index a41f2a5478..91d231527e 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -143,7 +143,7 @@ func NewSyncer( } s.blockSyncer = s if raftNode != nil && !reflect.ValueOf(raftNode).IsNil() { - s.raftRetriever = newRaftRetriever(raftNode, genesis, logger, eventProcessorFn(s.pipeEvent), + s.raftRetriever = newRaftRetriever(raftNode, genesis, logger, s, func(ctx context.Context, state *raft.RaftBlockState) error { s.logger.Debug().Uint64("header_height", state.LastSubmittedDaHeaderHeight).Uint64("data_height", state.LastSubmittedDaDataHeight).Msg("received raft block state") cache.SetLastSubmittedHeaderHeight(ctx, state.LastSubmittedDaHeaderHeight) @@ -181,7 +181,7 @@ func (s *Syncer) Start(ctx context.Context) (err error) { s.daRetriever = WithTracingDARetriever(s.daRetriever) } - s.fiRetriever = da.NewForcedInclusionRetriever(s.daClient, s.logger, s.config, s.genesis.DAStartHeight, s.genesis.DAEpochForcedInclusion) + s.fiRetriever = da.NewForcedInclusionRetriever(ctx, s.daClient, s.logger, s.config.DA.BlockTime.Duration, s.config.Instrumentation.IsTracingEnabled(), s.genesis.DAStartHeight, s.genesis.DAEpochForcedInclusion) s.p2pHandler = NewP2PHandler(s.headerStore, s.dataStore, s.cache, s.genesis, s.logger) currentHeight, initErr := s.store.Height(ctx) @@ -209,7 +209,7 @@ func (s *Syncer) Start(ctx context.Context) (err error) { Client: s.daClient, Retriever: s.daRetriever, Logger: s.logger, - PipeEvent: s.pipeEvent, + EventSink: s, Namespace: s.daClient.GetHeaderNamespace(), DataNamespace: s.daClient.GetDataNamespace(), StartDAHeight: s.daRetrieverHeight.Load(), @@ -492,7 +492,7 @@ func (s *Syncer) waitForGenesis() bool { return true } -func (s *Syncer) pipeEvent(ctx context.Context, event common.DAHeightEvent) error { +func (s *Syncer) PipeEvent(ctx context.Context, event common.DAHeightEvent) error { select { case s.heightInCh <- event: return nil @@ -608,7 +608,7 @@ func (s *Syncer) processHeightEvent(ctx context.Context, event *common.DAHeightE Msg("P2P event with DA height hint, queuing priority DA retrieval") // Queue priority DA retrieval - will be processed in fetchDAUntilCaughtUp - s.daRetriever.QueuePriorityHeight(daHeightHint) + s.daFollower.QueuePriorityHeight(daHeightHint) } } } diff --git a/block/internal/syncing/syncer_backoff_test.go b/block/internal/syncing/syncer_backoff_test.go index b8ebf269e9..113cf173d9 100644 --- a/block/internal/syncing/syncer_backoff_test.go +++ b/block/internal/syncing/syncer_backoff_test.go @@ -60,21 +60,26 @@ func TestDAFollower_BackoffOnCatchupError(t *testing.T) { defer cancel() daRetriever := NewMockDARetriever(t) - daRetriever.On("PopPriorityHeight").Return(uint64(0)).Maybe() pipeEvent := func(_ context.Context, _ common.DAHeightEvent) error { return nil } follower := NewDAFollower(DAFollowerConfig{ Retriever: daRetriever, Logger: zerolog.Nop(), - PipeEvent: pipeEvent, + EventSink: common.EventSinkFunc(pipeEvent), Namespace: []byte("ns"), StartDAHeight: 100, DABlockTime: tc.daBlockTime, }).(*daFollower) - ctx, follower.cancel = context.WithCancel(ctx) - follower.highestSeenDAHeight.Store(102) + // Set up the subscriber for direct testing. + sub := follower.subscriber + sub.SetStartHeight(100) + ctx, subCancel := context.WithCancel(ctx) + defer subCancel() + + // Set highest to trigger catchup. + sub.UpdateHighestForTest(102) var callTimes []time.Time callCount := 0 @@ -92,7 +97,7 @@ func TestDAFollower_BackoffOnCatchupError(t *testing.T) { Run(func(args mock.Arguments) { callTimes = append(callTimes, time.Now()) callCount++ - cancel() + subCancel() }). Return(nil, datypes.ErrBlobNotFound).Once() } else { @@ -100,12 +105,12 @@ func TestDAFollower_BackoffOnCatchupError(t *testing.T) { Run(func(args mock.Arguments) { callTimes = append(callTimes, time.Now()) callCount++ - cancel() + subCancel() }). Return(nil, datypes.ErrBlobNotFound).Once() } - go follower.runCatchup(ctx) + go sub.RunCatchupForTest(ctx) <-ctx.Done() if tc.expectsBackoff { @@ -144,21 +149,22 @@ func TestDAFollower_BackoffResetOnSuccess(t *testing.T) { gen := backoffTestGenesis(addr) daRetriever := NewMockDARetriever(t) - daRetriever.On("PopPriorityHeight").Return(uint64(0)).Maybe() pipeEvent := func(_ context.Context, _ common.DAHeightEvent) error { return nil } follower := NewDAFollower(DAFollowerConfig{ Retriever: daRetriever, Logger: zerolog.Nop(), - PipeEvent: pipeEvent, + EventSink: common.EventSinkFunc(pipeEvent), Namespace: []byte("ns"), StartDAHeight: 100, DABlockTime: 1 * time.Second, }).(*daFollower) - ctx, follower.cancel = context.WithCancel(ctx) - follower.highestSeenDAHeight.Store(105) + sub := follower.subscriber + ctx, subCancel := context.WithCancel(ctx) + defer subCancel() + sub.UpdateHighestForTest(105) var callTimes []time.Time @@ -194,11 +200,11 @@ func TestDAFollower_BackoffResetOnSuccess(t *testing.T) { daRetriever.On("RetrieveFromDA", mock.Anything, uint64(101)). Run(func(args mock.Arguments) { callTimes = append(callTimes, time.Now()) - cancel() + subCancel() }). Return(nil, datypes.ErrBlobNotFound).Once() - go follower.runCatchup(ctx) + go sub.RunCatchupForTest(ctx) <-ctx.Done() require.Len(t, callTimes, 3, "should make exactly 3 calls") @@ -221,20 +227,20 @@ func TestDAFollower_CatchupThenReachHead(t *testing.T) { defer cancel() daRetriever := NewMockDARetriever(t) - daRetriever.On("PopPriorityHeight").Return(uint64(0)).Maybe() pipeEvent := func(_ context.Context, _ common.DAHeightEvent) error { return nil } follower := NewDAFollower(DAFollowerConfig{ Retriever: daRetriever, Logger: zerolog.Nop(), - PipeEvent: pipeEvent, + EventSink: common.EventSinkFunc(pipeEvent), Namespace: []byte("ns"), StartDAHeight: 3, DABlockTime: 500 * time.Millisecond, }).(*daFollower) - follower.highestSeenDAHeight.Store(5) + sub := follower.subscriber + sub.UpdateHighestForTest(5) var fetchedHeights []uint64 @@ -246,7 +252,7 @@ func TestDAFollower_CatchupThenReachHead(t *testing.T) { Return(nil, datypes.ErrBlobNotFound).Once() } - follower.runCatchup(ctx) + sub.RunCatchupForTest(ctx) assert.True(t, follower.HasReachedHead(), "should have reached DA head") // Heights 3, 4, 5 processed; local now at 6 which > highest (5) → caught up @@ -255,7 +261,7 @@ func TestDAFollower_CatchupThenReachHead(t *testing.T) { } // TestDAFollower_InlineProcessing verifies the fast path: when the subscription -// delivers blobs at the current localNextDAHeight, handleSubscriptionEvent processes +// delivers blobs at the current localDAHeight, HandleEvent processes // them inline via ProcessBlobs (not RetrieveFromDA). func TestDAFollower_InlineProcessing(t *testing.T) { t.Run("processes_blobs_inline_when_caught_up", func(t *testing.T) { @@ -270,7 +276,7 @@ func TestDAFollower_InlineProcessing(t *testing.T) { follower := NewDAFollower(DAFollowerConfig{ Retriever: daRetriever, Logger: zerolog.Nop(), - PipeEvent: pipeEvent, + EventSink: common.EventSinkFunc(pipeEvent), Namespace: []byte("ns"), StartDAHeight: 10, DABlockTime: 500 * time.Millisecond, @@ -285,8 +291,8 @@ func TestDAFollower_InlineProcessing(t *testing.T) { daRetriever.On("ProcessBlobs", mock.Anything, blobs, uint64(10)). Return(expectedEvents).Once() - // Simulate subscription event at the current localNextDAHeight - follower.handleSubscriptionEvent(t.Context(), datypes.SubscriptionEvent{ + // Simulate subscription event at the current localDAHeight + follower.HandleEvent(t.Context(), datypes.SubscriptionEvent{ Height: 10, Blobs: blobs, }) @@ -294,7 +300,7 @@ func TestDAFollower_InlineProcessing(t *testing.T) { // Verify: ProcessBlobs was called, events were piped, height advanced require.Len(t, pipedEvents, 1, "should pipe 1 event from inline processing") assert.Equal(t, uint64(10), pipedEvents[0].DaHeight) - assert.Equal(t, uint64(11), follower.localNextDAHeight.Load(), "localNextDAHeight should advance past processed height") + assert.Equal(t, uint64(11), follower.subscriber.LocalDAHeight(), "localDAHeight should advance past processed height") assert.True(t, follower.HasReachedHead(), "should mark head as reached after inline processing") }) @@ -306,26 +312,24 @@ func TestDAFollower_InlineProcessing(t *testing.T) { follower := NewDAFollower(DAFollowerConfig{ Retriever: daRetriever, Logger: zerolog.Nop(), - PipeEvent: pipeEvent, + EventSink: common.EventSinkFunc(pipeEvent), Namespace: []byte("ns"), StartDAHeight: 10, DABlockTime: 500 * time.Millisecond, }).(*daFollower) - ctx := t.Context() - ctx, follower.cancel = context.WithCancel(ctx) - defer follower.cancel() - // Subscription reports height 15 but local is at 10 — should NOT process inline - follower.handleSubscriptionEvent(ctx, datypes.SubscriptionEvent{ + // In production, the subscriber calls updateHighest before HandleEvent. + follower.subscriber.UpdateHighestForTest(15) + follower.HandleEvent(t.Context(), datypes.SubscriptionEvent{ Height: 15, Blobs: [][]byte{[]byte("blob")}, }) // ProcessBlobs should NOT have been called daRetriever.AssertNotCalled(t, "ProcessBlobs", mock.Anything, mock.Anything, mock.Anything) - assert.Equal(t, uint64(10), follower.localNextDAHeight.Load(), "localNextDAHeight should not change") - assert.Equal(t, uint64(15), follower.highestSeenDAHeight.Load(), "highestSeen should be updated") + assert.Equal(t, uint64(10), follower.subscriber.LocalDAHeight(), "localDAHeight should not change") + assert.Equal(t, uint64(15), follower.subscriber.HighestSeenDAHeight(), "highestSeen should be updated") }) t.Run("falls_through_when_no_blobs", func(t *testing.T) { @@ -336,22 +340,24 @@ func TestDAFollower_InlineProcessing(t *testing.T) { follower := NewDAFollower(DAFollowerConfig{ Retriever: daRetriever, Logger: zerolog.Nop(), - PipeEvent: pipeEvent, + EventSink: common.EventSinkFunc(pipeEvent), Namespace: []byte("ns"), StartDAHeight: 10, DABlockTime: 500 * time.Millisecond, }).(*daFollower) // Subscription at current height but no blobs — should fall through - follower.handleSubscriptionEvent(t.Context(), datypes.SubscriptionEvent{ + // In production, the subscriber calls updateHighest before HandleEvent. + follower.subscriber.UpdateHighestForTest(10) + follower.HandleEvent(t.Context(), datypes.SubscriptionEvent{ Height: 10, Blobs: nil, }) // ProcessBlobs should NOT have been called daRetriever.AssertNotCalled(t, "ProcessBlobs", mock.Anything, mock.Anything, mock.Anything) - assert.Equal(t, uint64(10), follower.localNextDAHeight.Load(), "localNextDAHeight should not change") - assert.Equal(t, uint64(10), follower.highestSeenDAHeight.Load(), "highestSeen should be updated") + assert.Equal(t, uint64(10), follower.subscriber.LocalDAHeight(), "localDAHeight should not change") + assert.Equal(t, uint64(10), follower.subscriber.HighestSeenDAHeight(), "highestSeen should be updated") }) } diff --git a/block/internal/syncing/syncer_benchmark_test.go b/block/internal/syncing/syncer_benchmark_test.go index 9536808657..b0bccc6f74 100644 --- a/block/internal/syncing/syncer_benchmark_test.go +++ b/block/internal/syncing/syncer_benchmark_test.go @@ -43,25 +43,26 @@ func BenchmarkSyncerIO(b *testing.B) { fixt := newBenchFixture(b, spec.heights, spec.shuffledTx, spec.daDelay, spec.execDelay, true) // run both loops - runCtx := fixt.s.ctx - go fixt.s.processLoop(runCtx) + ctx := b.Context() + go fixt.s.processLoop(ctx) // Create a DAFollower to drive DA retrieval. follower := NewDAFollower(DAFollowerConfig{ Retriever: fixt.s.daRetriever, Logger: zerolog.Nop(), - PipeEvent: fixt.s.pipeEvent, + EventSink: fixt.s, Namespace: []byte("ns"), StartDAHeight: fixt.s.daRetrieverHeight.Load(), DABlockTime: 0, }).(*daFollower) - follower.highestSeenDAHeight.Store(spec.heights + daHeightOffset) - go follower.runCatchup(runCtx) + sub := follower.subscriber + sub.UpdateHighestForTest(spec.heights + daHeightOffset) + go sub.RunCatchupForTest(ctx) - fixt.s.startSyncWorkers(runCtx) + fixt.s.startSyncWorkers(ctx) require.Eventually(b, func() bool { - processedHeight, _ := fixt.s.store.Height(b.Context()) + processedHeight, _ := fixt.s.store.Height(ctx) return processedHeight == spec.heights }, 5*time.Second, 50*time.Microsecond) fixt.s.cancel() @@ -75,7 +76,7 @@ func BenchmarkSyncerIO(b *testing.B) { require.Len(b, fixt.s.heightInCh, 0) assert.Equal(b, spec.heights+daHeightOffset, fixt.s.daRetrieverHeight) - gotStoreHeight, err := fixt.s.store.Height(b.Context()) + gotStoreHeight, err := fixt.s.store.Height(ctx) require.NoError(b, err) assert.Equal(b, spec.heights, gotStoreHeight) } @@ -151,7 +152,7 @@ func newBenchFixture(b *testing.B, totalHeights uint64, shuffledTx bool, daDelay // Mock DA retriever to emit exactly totalHeights events, then HFF and cancel daR := NewMockDARetriever(b) - daR.On("PopPriorityHeight").Return(uint64(0)).Maybe() + for i := range totalHeights { daHeight := i + daHeightOffset daR.On("RetrieveFromDA", mock.Anything, daHeight). diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go index bcb96d5bb1..110fa05609 100644 --- a/block/internal/syncing/syncer_forced_inclusion_test.go +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -74,9 +74,11 @@ func newForcedInclusionSyncer(t *testing.T, daStart, epochSize uint64) (*Syncer, client.On("GetDataNamespace").Return([]byte(cfg.DA.DataNamespace)).Maybe() client.On("GetForcedInclusionNamespace").Return([]byte(cfg.DA.ForcedInclusionNamespace)).Maybe() client.On("HasForcedInclusionNamespace").Return(true).Maybe() + subCh := make(chan datypes.SubscriptionEvent) + client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(subCh), nil).Maybe() daRetriever := NewDARetriever(client, cm, gen, zerolog.Nop()) - fiRetriever := da.NewForcedInclusionRetriever(client, zerolog.Nop(), cfg, gen.DAStartHeight, gen.DAEpochForcedInclusion) + fiRetriever := da.NewForcedInclusionRetriever(t.Context(), client, zerolog.Nop(), cfg.DA.BlockTime.Duration, false, gen.DAStartHeight, gen.DAEpochForcedInclusion) t.Cleanup(fiRetriever.Stop) s := NewSyncer( @@ -145,8 +147,10 @@ func TestVerifyForcedInclusionTxs_NamespaceNotConfigured(t *testing.T) { client.On("GetDataNamespace").Return([]byte(cfg.DA.DataNamespace)).Maybe() client.On("GetForcedInclusionNamespace").Return([]byte(nil)).Maybe() client.On("HasForcedInclusionNamespace").Return(false).Maybe() + subCh := make(chan datypes.SubscriptionEvent) + client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(subCh), nil).Maybe() - fiRetriever := da.NewForcedInclusionRetriever(client, zerolog.Nop(), cfg, gen.DAStartHeight, gen.DAEpochForcedInclusion) + fiRetriever := da.NewForcedInclusionRetriever(t.Context(), client, zerolog.Nop(), cfg.DA.BlockTime.Duration, false, gen.DAStartHeight, gen.DAEpochForcedInclusion) t.Cleanup(fiRetriever.Stop) s := NewSyncer( diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 771e71c284..757d4fb283 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -271,13 +271,13 @@ func TestSequentialBlockSync(t *testing.T) { assert.Equal(t, uint64(2), finalState.LastBlockHeight) // Verify DA inclusion markers are set - _, ok := cm.GetHeaderDAIncludedByHash(hdr1.Hash().String()) + _, ok := cm.GetHeaderDAIncluded(hdr1.Hash().String()) assert.True(t, ok) - _, ok = cm.GetHeaderDAIncludedByHash(hdr2.Hash().String()) + _, ok = cm.GetHeaderDAIncluded(hdr2.Hash().String()) assert.True(t, ok) - _, ok = cm.GetDataDAIncludedByHash(data1.DACommitment().String()) + _, ok = cm.GetDataDAIncluded(data1.DACommitment().String()) assert.True(t, ok) - _, ok = cm.GetDataDAIncludedByHash(data2.DACommitment().String()) + _, ok = cm.GetDataDAIncluded(data2.DACommitment().String()) assert.True(t, ok) requireEmptyChan(t, errChan) } @@ -375,8 +375,15 @@ func TestSyncLoopPersistState(t *testing.T) { daRtrMock, p2pHndlMock := NewMockDARetriever(t), newMockp2pHandler(t) p2pHndlMock.On("ProcessHeight", mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() p2pHndlMock.On("SetProcessedHeight", mock.Anything).Return().Maybe() - daRtrMock.On("PopPriorityHeight").Return(uint64(0)).Maybe() + syncerInst1.daRetriever, syncerInst1.p2pHandler = daRtrMock, p2pHndlMock + syncerInst1.daFollower = NewDAFollower(DAFollowerConfig{ + Retriever: daRtrMock, + Logger: zerolog.Nop(), + EventSink: common.EventSinkFunc(syncerInst1.PipeEvent), + Namespace: []byte("ns"), + StartDAHeight: syncerInst1.daRetrieverHeight.Load(), + }) // with n da blobs fetched var prevHeaderHash, prevAppHash []byte @@ -422,21 +429,20 @@ func TestSyncLoopPersistState(t *testing.T) { follower1 := NewDAFollower(DAFollowerConfig{ Retriever: daRtrMock, Logger: zerolog.Nop(), - PipeEvent: syncerInst1.pipeEvent, + EventSink: common.EventSinkFunc(syncerInst1.PipeEvent), Namespace: []byte("ns"), StartDAHeight: syncerInst1.daRetrieverHeight.Load(), DABlockTime: cfg.DA.BlockTime.Duration, }).(*daFollower) - ctx, follower1.cancel = context.WithCancel(ctx) - // Set highest so catchup runs through all mocked heights. - follower1.highestSeenDAHeight.Store(myFutureDAHeight) - go follower1.runCatchup(ctx) + sub1 := follower1.subscriber + sub1.UpdateHighestForTest(myFutureDAHeight) + go sub1.RunCatchupForTest(ctx) syncerInst1.startSyncWorkers(ctx) syncerInst1.wg.Wait() requireEmptyChan(t, errorCh) t.Log("sync workers on instance1 completed") - require.Equal(t, myFutureDAHeight, follower1.localNextDAHeight.Load()) + require.Equal(t, myFutureDAHeight, follower1.subscriber.LocalDAHeight()) // wait for all events consumed require.NoError(t, cm.SaveToStore()) @@ -481,8 +487,15 @@ func TestSyncLoopPersistState(t *testing.T) { daRtrMock, p2pHndlMock = NewMockDARetriever(t), newMockp2pHandler(t) p2pHndlMock.On("ProcessHeight", mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() p2pHndlMock.On("SetProcessedHeight", mock.Anything).Return().Maybe() - daRtrMock.On("PopPriorityHeight").Return(uint64(0)).Maybe() + syncerInst2.daRetriever, syncerInst2.p2pHandler = daRtrMock, p2pHndlMock + syncerInst2.daFollower = NewDAFollower(DAFollowerConfig{ + Retriever: daRtrMock, + Logger: zerolog.Nop(), + EventSink: common.EventSinkFunc(syncerInst2.PipeEvent), + Namespace: []byte("ns"), + StartDAHeight: syncerInst2.daRetrieverHeight.Load(), + }) daRtrMock.On("RetrieveFromDA", mock.Anything, mock.Anything). Run(func(arg mock.Arguments) { @@ -499,14 +512,14 @@ func TestSyncLoopPersistState(t *testing.T) { follower2 := NewDAFollower(DAFollowerConfig{ Retriever: daRtrMock, Logger: zerolog.Nop(), - PipeEvent: syncerInst2.pipeEvent, + EventSink: common.EventSinkFunc(syncerInst2.PipeEvent), Namespace: []byte("ns"), StartDAHeight: syncerInst2.daRetrieverHeight.Load(), DABlockTime: cfg.DA.BlockTime.Duration, }).(*daFollower) - ctx, follower2.cancel = context.WithCancel(ctx) - follower2.highestSeenDAHeight.Store(syncerInst2.daRetrieverHeight.Load() + 1) - go follower2.runCatchup(ctx) + sub2 := follower2.subscriber + sub2.UpdateHighestForTest(syncerInst2.daRetrieverHeight.Load() + 1) + go sub2.RunCatchupForTest(ctx) syncerInst2.startSyncWorkers(ctx) syncerInst2.wg.Wait() @@ -719,6 +732,13 @@ func TestProcessHeightEvent_TriggersAsyncDARetrieval(t *testing.T) { // Create a real daRetriever to test priority queue s.daRetriever = NewDARetriever(nil, cm, gen, zerolog.Nop()) + s.daFollower = NewDAFollower(DAFollowerConfig{ + Retriever: s.daRetriever, + Logger: zerolog.Nop(), + EventSink: common.EventSinkFunc(func(_ context.Context, _ common.DAHeightEvent) error { return nil }), + Namespace: []byte("ns"), + StartDAHeight: 0, + }) // Create event with DA height hint evt := common.DAHeightEvent{ @@ -737,7 +757,7 @@ func TestProcessHeightEvent_TriggersAsyncDARetrieval(t *testing.T) { s.processHeightEvent(t.Context(), &evt) // Verify that the priority height was queued in the daRetriever - priorityHeight := s.daRetriever.PopPriorityHeight() + priorityHeight := s.daFollower.(*daFollower).popPriorityHeight() assert.Equal(t, uint64(100), priorityHeight) } @@ -776,6 +796,13 @@ func TestProcessHeightEvent_RejectsUnreasonableDAHint(t *testing.T) { require.NoError(t, s.initializeState()) s.ctx = context.Background() s.daRetriever = NewDARetriever(nil, cm, gen, zerolog.Nop()) + s.daFollower = NewDAFollower(DAFollowerConfig{ + Retriever: s.daRetriever, + Logger: zerolog.Nop(), + EventSink: common.EventSinkFunc(func(_ context.Context, _ common.DAHeightEvent) error { return nil }), + Namespace: []byte("ns"), + StartDAHeight: 0, + }) // Set store height to 1 so event at height 2 is "next" batch, err := st.NewBatch(context.Background()) @@ -794,7 +821,7 @@ func TestProcessHeightEvent_RejectsUnreasonableDAHint(t *testing.T) { s.processHeightEvent(t.Context(), &evt) // Verify that NO priority height was queued — the hint was rejected - priorityHeight := s.daRetriever.PopPriorityHeight() + priorityHeight := s.daFollower.(*daFollower).popPriorityHeight() assert.Equal(t, uint64(0), priorityHeight, "unreasonable DA hint should be rejected") } @@ -833,6 +860,13 @@ func TestProcessHeightEvent_AcceptsValidDAHint(t *testing.T) { require.NoError(t, s.initializeState()) s.ctx = context.Background() s.daRetriever = NewDARetriever(nil, cm, gen, zerolog.Nop()) + s.daFollower = NewDAFollower(DAFollowerConfig{ + Retriever: s.daRetriever, + Logger: zerolog.Nop(), + EventSink: common.EventSinkFunc(func(_ context.Context, _ common.DAHeightEvent) error { return nil }), + Namespace: []byte("ns"), + StartDAHeight: 0, + }) // Set store height to 1 so event at height 2 is "next" batch, err := st.NewBatch(context.Background()) @@ -851,104 +885,11 @@ func TestProcessHeightEvent_AcceptsValidDAHint(t *testing.T) { s.processHeightEvent(t.Context(), &evt) // Verify that the priority height was queued — the hint is valid - priorityHeight := s.daRetriever.PopPriorityHeight() + priorityHeight := s.daFollower.(*daFollower).popPriorityHeight() assert.Equal(t, uint64(50), priorityHeight, "valid DA hint should be queued") } -// TestProcessHeightEvent_SkipsDAHintWhenAlreadyDAIncluded verifies that when the -// DA-inclusion cache already has an entry for the block height carried by a P2P -// event, the DA height hint is NOT queued for priority retrieval. -func TestProcessHeightEvent_SkipsDAHintWhenAlreadyDAIncluded(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) - - addr, _, _ := buildSyncTestSigner(t) - cfg := config.DefaultConfig() - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} - - mockExec := testmocks.NewMockExecutor(t) - mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain").Return([]byte("app0"), nil).Once() - - mockDAClient := testmocks.NewMockClient(t) - - s := NewSyncer( - st, - mockExec, - mockDAClient, - cm, - common.NopMetrics(), - cfg, - gen, - extmocks.NewMockStore[*types.P2PSignedHeader](t), - extmocks.NewMockStore[*types.P2PData](t), - zerolog.Nop(), - common.DefaultBlockOptions(), - make(chan error, 1), - nil, - ) - require.NoError(t, s.initializeState()) - s.ctx = context.Background() - s.daRetriever = NewDARetriever(nil, cm, gen, zerolog.Nop()) - - // Set the store height to 1 so the event at height 2 is "next". - batch, err := st.NewBatch(context.Background()) - require.NoError(t, err) - require.NoError(t, batch.SetHeight(1)) - require.NoError(t, batch.Commit()) - - // Simulate the DA retriever having already confirmed header and data at height 2. - cm.SetHeaderDAIncluded("somehash-hdr2", 42, 2) - cm.SetDataDAIncluded("somehash-data2", 42, 2) - - // Both hints point to DA height 100, which is above daRetrieverHeight (0), - // so the only reason NOT to queue them is the cache hit. - evt := common.DAHeightEvent{ - Header: &types.SignedHeader{Header: types.Header{BaseHeader: types.BaseHeader{ChainID: gen.ChainID, Height: 2}}}, - Data: &types.Data{Metadata: &types.Metadata{ChainID: gen.ChainID, Height: 2}}, - Source: common.SourceP2P, - DaHeightHints: [2]uint64{100, 100}, - } - - s.processHeightEvent(t.Context(), &evt) - - // Neither hint should be queued — both are already DA-included in the cache. - priorityHeight := s.daRetriever.PopPriorityHeight() - assert.Equal(t, uint64(0), priorityHeight, - "DA hint must not be queued when header and data are already DA-included in cache") - - // ── partial case: only header confirmed ────────────────────────────────── - // Reset with a fresh cache that has only the header entry for height 3. - cm2, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) - s.cache = cm2 - cm2.SetHeaderDAIncluded("somehash-hdr3", 55, 3) - // data at height 3 is NOT in cache - - batch, err = st.NewBatch(context.Background()) - require.NoError(t, err) - require.NoError(t, batch.SetHeight(2)) - require.NoError(t, batch.Commit()) - - evt3 := common.DAHeightEvent{ - Header: &types.SignedHeader{Header: types.Header{BaseHeader: types.BaseHeader{ChainID: gen.ChainID, Height: 3}}}, - Data: &types.Data{Metadata: &types.Metadata{ChainID: gen.ChainID, Height: 3}, Txs: types.Txs{types.Tx("tx2")}}, - Source: common.SourceP2P, - DaHeightHints: [2]uint64{150, 151}, // within daHintMaxDrift(200) of daRetrieverHeight(0) — no DA validation needed - } - - s.processHeightEvent(t.Context(), &evt3) - - // Header hint (150) must be skipped; data hint (151) must be queued. - priorityHeight = s.daRetriever.PopPriorityHeight() - assert.Equal(t, uint64(151), priorityHeight, - "data hint must be queued when only the header is already DA-included") - assert.Equal(t, uint64(0), s.daRetriever.PopPriorityHeight(), - "no further hints should be queued") -} - -func TestProcessHeightEvent_SkipsDAHintWhenBelowRetrieverCursor(t *testing.T) { +func TestProcessHeightEvent_SkipsDAHintWhenAlreadyFetched(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) @@ -985,9 +926,15 @@ func TestProcessHeightEvent_SkipsDAHintWhenBelowRetrieverCursor(t *testing.T) { // Create a real daRetriever to test priority queue s.daRetriever = NewDARetriever(nil, cm, gen, zerolog.Nop()) + s.daFollower = NewDAFollower(DAFollowerConfig{ + Retriever: s.daRetriever, + Logger: zerolog.Nop(), + EventSink: common.EventSinkFunc(func(_ context.Context, _ common.DAHeightEvent) error { return nil }), + Namespace: []byte("ns"), + StartDAHeight: 0, + }) - // Set DA retriever height to 150 - the sequential DA scan cursor is already past - // the hint (100), so there is no need to queue a priority retrieval. + // Set DA retriever height to 150 - simulating we've already fetched past height 100 s.daRetrieverHeight.Store(150) // Set the store height to 1 so the event can be processed @@ -1006,9 +953,8 @@ func TestProcessHeightEvent_SkipsDAHintWhenBelowRetrieverCursor(t *testing.T) { s.processHeightEvent(t.Context(), &evt) - // Verify that no priority height was queued: the hint (100) is below the - // retriever cursor (150), so sequential scanning will cover it naturally. - priorityHeight := s.daRetriever.PopPriorityHeight() + // Verify that no priority height was queued since we've already fetched past it + priorityHeight := s.daFollower.(*daFollower).popPriorityHeight() assert.Equal(t, uint64(0), priorityHeight, "should not queue DA hint that is below current daRetrieverHeight") // Now test with a hint that is ABOVE the current daRetrieverHeight @@ -1028,7 +974,7 @@ func TestProcessHeightEvent_SkipsDAHintWhenBelowRetrieverCursor(t *testing.T) { s.processHeightEvent(t.Context(), &evt2) // Verify that the priority height WAS queued since it's above daRetrieverHeight - priorityHeight = s.daRetriever.PopPriorityHeight() + priorityHeight = s.daFollower.(*daFollower).popPriorityHeight() assert.Equal(t, uint64(200), priorityHeight, "should queue DA hint that is above current daRetrieverHeight") } diff --git a/block/public.go b/block/public.go index 39f6c3e308..988760211b 100644 --- a/block/public.go +++ b/block/public.go @@ -84,12 +84,13 @@ type ForcedInclusionRetriever interface { // It internally creates and manages an AsyncBlockRetriever for background prefetching. // Tracing is automatically enabled when configured. func NewForcedInclusionRetriever( + ctx context.Context, client DAClient, cfg config.Config, logger zerolog.Logger, daStartHeight, daEpochSize uint64, ) ForcedInclusionRetriever { - return da.NewForcedInclusionRetriever(client, logger, cfg, daStartHeight, daEpochSize) + return da.NewForcedInclusionRetriever(ctx, client, logger, cfg.DA.BlockTime.Duration, cfg.Instrumentation.IsTracingEnabled(), daStartHeight, daEpochSize) } // Expose Raft types for consensus integration diff --git a/pkg/sequencers/based/sequencer.go b/pkg/sequencers/based/sequencer.go index f079d8ccab..22bb8947f5 100644 --- a/pkg/sequencers/based/sequencer.go +++ b/pkg/sequencers/based/sequencer.go @@ -97,7 +97,7 @@ func NewBasedSequencer( } } - bs.fiRetriever = block.NewForcedInclusionRetriever(daClient, cfg, logger, genesis.DAStartHeight, genesis.DAEpochForcedInclusion) + bs.fiRetriever = block.NewForcedInclusionRetriever(context.Background(), daClient, cfg, logger, genesis.DAStartHeight, genesis.DAEpochForcedInclusion) return bs, nil } diff --git a/pkg/sequencers/based/sequencer_test.go b/pkg/sequencers/based/sequencer_test.go index c51a7673fe..7c65b8ad0a 100644 --- a/pkg/sequencers/based/sequencer_test.go +++ b/pkg/sequencers/based/sequencer_test.go @@ -71,6 +71,7 @@ func createTestSequencer(t *testing.T, mockRetriever *common.MockForcedInclusion // Mock the forced inclusion namespace call mockDAClient.MockClient.On("GetForcedInclusionNamespace").Return([]byte("test-forced-inclusion-ns")).Maybe() mockDAClient.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDAClient.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() mockExec := createDefaultMockExecutor(t) @@ -469,6 +470,7 @@ func TestBasedSequencer_CheckpointPersistence(t *testing.T) { } mockDAClient.MockClient.On("GetForcedInclusionNamespace").Return([]byte("test-forced-inclusion-ns")).Maybe() mockDAClient.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDAClient.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // Create first sequencer mockExec1 := createDefaultMockExecutor(t) @@ -496,6 +498,7 @@ func TestBasedSequencer_CheckpointPersistence(t *testing.T) { } mockDAClient2.MockClient.On("GetForcedInclusionNamespace").Return([]byte("test-forced-inclusion-ns")).Maybe() mockDAClient2.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDAClient2.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() mockExec2 := createDefaultMockExecutor(t) seq2, err := NewBasedSequencer(mockDAClient2, config.DefaultConfig(), db, gen, zerolog.Nop(), mockExec2) require.NoError(t, err) @@ -542,6 +545,7 @@ func TestBasedSequencer_CrashRecoveryMidEpoch(t *testing.T) { mockDAClient.MockClient.On("GetForcedInclusionNamespace").Return([]byte("test-forced-inclusion-ns")).Maybe() mockDAClient.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDAClient.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // Create mock executor that postpones tx2 on first call filterCallCount := 0 @@ -602,6 +606,7 @@ func TestBasedSequencer_CrashRecoveryMidEpoch(t *testing.T) { } mockDAClient2.MockClient.On("GetForcedInclusionNamespace").Return([]byte("test-forced-inclusion-ns")).Maybe() mockDAClient2.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDAClient2.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() seq2, err := NewBasedSequencer(mockDAClient2, config.DefaultConfig(), db, gen, zerolog.Nop(), mockExec) require.NoError(t, err) @@ -927,6 +932,7 @@ func TestBasedSequencer_GetNextBatch_GasFilteringPreservesUnprocessedTxs(t *test } mockDAClient.MockClient.On("GetForcedInclusionNamespace").Return([]byte("test-forced-inclusion-ns")).Maybe() mockDAClient.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDAClient.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() seq, err := NewBasedSequencer(mockDAClient, config.DefaultConfig(), db, gen, zerolog.New(zerolog.NewTestWriter(t)), mockExec) require.NoError(t, err) diff --git a/pkg/sequencers/single/sequencer.go b/pkg/sequencers/single/sequencer.go index 16cdf12b92..44bbb5b311 100644 --- a/pkg/sequencers/single/sequencer.go +++ b/pkg/sequencers/single/sequencer.go @@ -130,7 +130,7 @@ func NewSequencer( // Determine initial DA height for forced inclusion initialDAHeight := s.getInitialDAStartHeight(context.Background()) - s.fiRetriever = block.NewForcedInclusionRetriever(daClient, cfg, logger, initialDAHeight, genesis.DAEpochForcedInclusion) + s.fiRetriever = block.NewForcedInclusionRetriever(context.Background(), daClient, cfg, logger, initialDAHeight, genesis.DAEpochForcedInclusion) return s, nil } @@ -202,7 +202,7 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB if c.fiRetriever != nil { c.fiRetriever.Stop() } - c.fiRetriever = block.NewForcedInclusionRetriever(c.daClient, c.cfg, c.logger, c.getInitialDAStartHeight(ctx), c.genesis.DAEpochForcedInclusion) + c.fiRetriever = block.NewForcedInclusionRetriever(ctx, c.daClient, c.cfg, c.logger, c.getInitialDAStartHeight(ctx), c.genesis.DAEpochForcedInclusion) } // If we have no cached transactions or we've consumed all from the current epoch, diff --git a/pkg/sequencers/single/sequencer_test.go b/pkg/sequencers/single/sequencer_test.go index b0bbe247c5..6392694ef5 100644 --- a/pkg/sequencers/single/sequencer_test.go +++ b/pkg/sequencers/single/sequencer_test.go @@ -380,6 +380,7 @@ func TestSequencer_GetNextBatch_ForcedInclusionAndBatch_MaxBytes(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 100 — same as sequencer start, no catch-up needed mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(100), nil).Maybe() @@ -471,6 +472,7 @@ func TestSequencer_GetNextBatch_ForcedInclusion_ExceedsMaxBytes(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 100 — same as sequencer start, no catch-up needed mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(100), nil).Maybe() @@ -554,6 +556,7 @@ func TestSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 100 — same as sequencer start, no catch-up needed mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(100), nil).Maybe() @@ -895,6 +898,7 @@ func TestSequencer_CheckpointPersistence_CrashRecovery(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 101 — close to sequencer start (100), no catch-up needed. // Use Maybe() since two sequencer instances share this mock. @@ -998,6 +1002,7 @@ func TestSequencer_GetNextBatch_EmptyDABatch_IncreasesDAHeight(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 100 — same as sequencer start, no catch-up needed mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(100), nil).Maybe() @@ -1091,6 +1096,7 @@ func TestSequencer_GetNextBatch_WithGasFiltering(t *testing.T) { mockDA.MockClient.On("GetBlobsAtHeight", mock.Anything, mock.Anything, mock.Anything). Return(forcedTxs, nil).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return([]byte("forced")).Maybe() mockDA.MockClient.On("MaxBlobSize", mock.Anything).Return(uint64(1000000), nil).Maybe() @@ -1190,6 +1196,7 @@ func TestSequencer_GetNextBatch_GasFilterError(t *testing.T) { mockDA := newMockFullDAClient(t) mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return([]byte("forced")).Maybe() mockDA.MockClient.On("MaxBlobSize", mock.Anything).Return(uint64(1000000), nil).Maybe() @@ -1254,6 +1261,7 @@ func TestSequencer_CatchUp_DetectsOldEpoch(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at height 105 — sequencer starts at 100 with epoch size 1, // so it has missed 5 epochs (>1), triggering catch-up. @@ -1324,6 +1332,7 @@ func TestSequencer_CatchUp_SkipsMempoolDuringCatchUp(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 105 — sequencer starts at 100 with epoch size 1, // so it has missed multiple epochs, triggering catch-up. @@ -1416,6 +1425,7 @@ func TestSequencer_CatchUp_UsesDATimestamp(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 105 — multiple epochs ahead, triggers catch-up mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(105), nil).Once() @@ -1475,6 +1485,7 @@ func TestSequencer_CatchUp_ExitsCatchUpAtDAHead(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 105 — multiple epochs ahead, triggers catch-up mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(105), nil).Once() @@ -1556,6 +1567,7 @@ func TestSequencer_CatchUp_HeightFromFutureExitsCatchUp(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 105 — multiple epochs ahead, triggers catch-up mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(105), nil).Once() @@ -1622,6 +1634,7 @@ func TestSequencer_CatchUp_NoCatchUpWhenRecentEpoch(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 100 — sequencer starts at 100 with epoch size 1, // so it is within the same epoch (0 missed). No catch-up. @@ -1688,6 +1701,7 @@ func TestSequencer_CatchUp_MultiEpochReplay(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 106 — sequencer starts at 100 with epoch size 1, // so it has missed 6 epochs (>1), triggering catch-up. @@ -1839,6 +1853,7 @@ func TestSequencer_CatchUp_CheckpointAdvancesDuringCatchUp(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 105 — multiple epochs ahead, triggers catch-up mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(105), nil).Once() @@ -1931,6 +1946,7 @@ func TestSequencer_CatchUp_MonotonicTimestamps(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is far ahead — triggers catch-up mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(110), nil).Once() @@ -2057,6 +2073,7 @@ func TestSequencer_CatchUp_MonotonicTimestamps_EmptyEpoch(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(110), nil).Once() @@ -2133,6 +2150,7 @@ func TestSequencer_GetNextBatch_GasFilteringPreservesUnprocessedTxs(t *testing.T mockDA := newMockFullDAClient(t) mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return([]byte("forced")).Maybe() mockDA.MockClient.On("MaxBlobSize", mock.Anything).Return(uint64(1000000), nil).Maybe() From a878f8de4d26c133f85b2114a0db1c78dc5da3ff Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Mon, 9 Mar 2026 12:44:54 +0100 Subject: [PATCH 02/10] Fix compile errors in tests after rebase --- block/internal/syncing/da_retriever_strict_test.go | 6 +++--- block/internal/syncing/syncer_test.go | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/block/internal/syncing/da_retriever_strict_test.go b/block/internal/syncing/da_retriever_strict_test.go index a6d5140d97..af1046dba8 100644 --- a/block/internal/syncing/da_retriever_strict_test.go +++ b/block/internal/syncing/da_retriever_strict_test.go @@ -70,21 +70,21 @@ func TestDARetriever_StrictEnvelopeMode_Switch(t *testing.T) { // --- Test Scenario --- // A. Initial State: StrictMode is false. Legacy blob should be accepted. - assert.False(t, r.strictMode.Load()) + assert.False(t, r.strictMode) decodedLegacy := r.tryDecodeHeader(legacyBlob, 100) require.NotNil(t, decodedLegacy) assert.Equal(t, uint64(1), decodedLegacy.Height()) // StrictMode should still be false because it was a legacy blob - assert.False(t, r.strictMode.Load()) + assert.False(t, r.strictMode) // B. Receiving Envelope: Should be accepted and Switch StrictMode to true. decodedEnvelope := r.tryDecodeHeader(envelopeBlob, 101) require.NotNil(t, decodedEnvelope) assert.Equal(t, uint64(2), decodedEnvelope.Height()) - assert.True(t, r.strictMode.Load(), "retriever should have switched to strict mode") + assert.True(t, r.strictMode, "retriever should have switched to strict mode") // C. Receiving Legacy again: Should be REJECTED now. // We reuse the same legacyBlob (or a new one, doesn't matter, structure is legacy). diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 757d4fb283..537b3e8ad9 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -271,13 +271,13 @@ func TestSequentialBlockSync(t *testing.T) { assert.Equal(t, uint64(2), finalState.LastBlockHeight) // Verify DA inclusion markers are set - _, ok := cm.GetHeaderDAIncluded(hdr1.Hash().String()) + _, ok := cm.GetHeaderDAIncludedByHash(hdr1.Hash().String()) assert.True(t, ok) - _, ok = cm.GetHeaderDAIncluded(hdr2.Hash().String()) + _, ok = cm.GetHeaderDAIncludedByHash(hdr2.Hash().String()) assert.True(t, ok) - _, ok = cm.GetDataDAIncluded(data1.DACommitment().String()) + _, ok = cm.GetDataDAIncludedByHash(data1.DACommitment().String()) assert.True(t, ok) - _, ok = cm.GetDataDAIncluded(data2.DACommitment().String()) + _, ok = cm.GetDataDAIncludedByHash(data2.DACommitment().String()) assert.True(t, ok) requireEmptyChan(t, errChan) } From f20b7aecfb772cfaf9885a5ef88beb8125ddf1a9 Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Mon, 9 Mar 2026 16:00:37 +0100 Subject: [PATCH 03/10] Include timestamp for subscription events --- apps/evm/server/force_inclusion_test.go | 2 +- block/internal/da/async_block_retriever.go | 43 ++++---- .../internal/da/async_block_retriever_test.go | 20 ++-- block/internal/da/client.go | 23 +++- .../da/forced_inclusion_retriever_test.go | 18 ++-- block/internal/da/interface.go | 3 +- block/internal/da/subscriber.go | 32 +++--- block/internal/da/tracing.go | 4 +- block/internal/da/tracing_test.go | 6 +- block/internal/syncing/da_retriever_mock.go | 102 +++++++++--------- .../syncing/syncer_forced_inclusion_test.go | 4 +- pkg/da/jsonrpc/types.go | 5 +- pkg/da/types/types.go | 3 + pkg/sequencers/based/sequencer_test.go | 12 +-- pkg/sequencers/single/sequencer_test.go | 36 +++---- test/mocks/da.go | 30 +++--- test/testda/dummy.go | 2 +- 17 files changed, 184 insertions(+), 161 deletions(-) diff --git a/apps/evm/server/force_inclusion_test.go b/apps/evm/server/force_inclusion_test.go index 6be6e6ca76..a107a10d11 100644 --- a/apps/evm/server/force_inclusion_test.go +++ b/apps/evm/server/force_inclusion_test.go @@ -50,7 +50,7 @@ func (m *mockDA) Get(ctx context.Context, ids []da.ID, namespace []byte) ([]da.B return nil, nil } -func (m *mockDA) Subscribe(_ context.Context, _ []byte) (<-chan da.SubscriptionEvent, error) { +func (m *mockDA) Subscribe(_ context.Context, _ []byte, _ bool) (<-chan da.SubscriptionEvent, error) { // Not needed in these tests; return a closed channel. ch := make(chan da.SubscriptionEvent) close(ch) diff --git a/block/internal/da/async_block_retriever.go b/block/internal/da/async_block_retriever.go index a0dcf7a578..6789051514 100644 --- a/block/internal/da/async_block_retriever.go +++ b/block/internal/da/async_block_retriever.go @@ -79,11 +79,12 @@ func NewAsyncBlockRetriever( } f.subscriber = NewSubscriber(SubscriberConfig{ - Client: client, - Logger: logger, - Namespaces: namespaces, - DABlockTime: daBlockTime, - Handler: f, + Client: client, + Logger: logger, + Namespaces: namespaces, + DABlockTime: daBlockTime, + Handler: f, + FetchBlockTimestamp: true, }) f.subscriber.SetStartHeight(daStartHeight) @@ -118,7 +119,7 @@ func (f *asyncBlockRetriever) UpdateCurrentHeight(height uint64) { f.logger.Debug(). Uint64("new_height", height). Msg("updated current DA height") - f.cleanupOldBlocks(height) + f.cleanupOldBlocks(context.Background(), height) return } } @@ -167,14 +168,10 @@ func (f *asyncBlockRetriever) GetCachedBlock(ctx context.Context, daHeight uint6 return block, nil } -// --------------------------------------------------------------------------- -// SubscriberHandler implementation -// --------------------------------------------------------------------------- - // HandleEvent caches blobs from the subscription inline. func (f *asyncBlockRetriever) HandleEvent(ctx context.Context, ev datypes.SubscriptionEvent) { if len(ev.Blobs) > 0 { - f.cacheBlock(ctx, ev.Height, ev.Blobs) + f.cacheBlock(ctx, ev.Height, ev.Timestamp, ev.Blobs) } } @@ -215,7 +212,7 @@ func (f *asyncBlockRetriever) fetchAndCacheBlock(ctx context.Context, height uin f.logger.Debug().Uint64("height", height).Msg("block height not yet available - will retry") return case datypes.StatusNotFound: - f.cacheBlock(ctx, height, nil) + f.cacheBlock(ctx, height, result.Timestamp, nil) case datypes.StatusSuccess: blobs := make([][]byte, 0, len(result.Data)) for _, blob := range result.Data { @@ -223,7 +220,7 @@ func (f *asyncBlockRetriever) fetchAndCacheBlock(ctx context.Context, height uin blobs = append(blobs, blob) } } - f.cacheBlock(ctx, height, blobs) + f.cacheBlock(ctx, height, result.Timestamp, blobs) default: f.logger.Debug(). Uint64("height", height). @@ -233,33 +230,33 @@ func (f *asyncBlockRetriever) fetchAndCacheBlock(ctx context.Context, height uin } // cacheBlock serializes and stores a block in the in-memory cache. -func (f *asyncBlockRetriever) cacheBlock(ctx context.Context, height uint64, blobs [][]byte) { +func (f *asyncBlockRetriever) cacheBlock(ctx context.Context, daHeight uint64, daTimestamp time.Time, blobs [][]byte) { if blobs == nil { blobs = [][]byte{} } pbBlock := &pb.BlockData{ - Height: height, - Timestamp: time.Now().UnixNano(), + Height: daHeight, + Timestamp: daTimestamp.UnixNano(), Blobs: blobs, } data, err := proto.Marshal(pbBlock) if err != nil { - f.logger.Error().Err(err).Uint64("height", height).Msg("failed to marshal block for caching") + f.logger.Error().Err(err).Uint64("height", daHeight).Msg("failed to marshal block for caching") return } - key := newBlockDataKey(height) + key := newBlockDataKey(daHeight) if err := f.cache.Put(ctx, key, data); err != nil { - f.logger.Error().Err(err).Uint64("height", height).Msg("failed to cache block") + f.logger.Error().Err(err).Uint64("height", daHeight).Msg("failed to cache block") return } - f.logger.Debug().Uint64("height", height).Int("blob_count", len(blobs)).Msg("cached block") + f.logger.Debug().Uint64("height", daHeight).Int("blob_count", len(blobs)).Msg("cached block") } // cleanupOldBlocks removes blocks older than currentHeight − prefetchWindow. -func (f *asyncBlockRetriever) cleanupOldBlocks(currentHeight uint64) { +func (f *asyncBlockRetriever) cleanupOldBlocks(ctx context.Context, currentHeight uint64) { if currentHeight < f.prefetchWindow { return } @@ -267,7 +264,7 @@ func (f *asyncBlockRetriever) cleanupOldBlocks(currentHeight uint64) { cleanupThreshold := currentHeight - f.prefetchWindow query := dsq.Query{Prefix: "/block/"} - results, err := f.cache.Query(context.Background(), query) + results, err := f.cache.Query(ctx, query) if err != nil { f.logger.Debug().Err(err).Msg("failed to query cache for cleanup") return @@ -287,7 +284,7 @@ func (f *asyncBlockRetriever) cleanupOldBlocks(currentHeight uint64) { } if height < cleanupThreshold { - if err := f.cache.Delete(context.Background(), key); err != nil { + if err := f.cache.Delete(ctx, key); err != nil { f.logger.Debug().Err(err).Uint64("height", height).Msg("failed to delete old block from cache") } } diff --git a/block/internal/da/async_block_retriever_test.go b/block/internal/da/async_block_retriever_test.go index d9c986e4e8..1ecaca9a88 100644 --- a/block/internal/da/async_block_retriever_test.go +++ b/block/internal/da/async_block_retriever_test.go @@ -60,7 +60,7 @@ func TestAsyncBlockRetriever_SubscriptionDrivenCaching(t *testing.T) { Blobs: testBlobs, } - client.On("Subscribe", mock.Anything, fiNs).Return((<-chan datypes.SubscriptionEvent)(subCh), nil).Once() + client.On("Subscribe", mock.Anything, fiNs, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(subCh), nil).Once() // Catchup loop may call Retrieve for heights beyond 100 — stub those. client.On("Retrieve", mock.Anything, mock.Anything, fiNs).Return(datypes.ResultRetrieve{ BaseResult: datypes.BaseResult{Code: datypes.StatusHeightFromFuture}, @@ -68,7 +68,7 @@ func TestAsyncBlockRetriever_SubscriptionDrivenCaching(t *testing.T) { // On second subscribe (after watchdog timeout) just block forever. blockCh := make(chan datypes.SubscriptionEvent) - client.On("Subscribe", mock.Anything, fiNs).Return((<-chan datypes.SubscriptionEvent)(blockCh), nil).Maybe() + client.On("Subscribe", mock.Anything, fiNs, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(blockCh), nil).Maybe() logger := zerolog.Nop() fetcher := NewAsyncBlockRetriever(client, logger, fiNs, 200*time.Millisecond, 100, 5) @@ -109,9 +109,9 @@ func TestAsyncBlockRetriever_CatchupFillsGaps(t *testing.T) { subCh := make(chan datypes.SubscriptionEvent, 1) subCh <- datypes.SubscriptionEvent{Height: 105} - client.On("Subscribe", mock.Anything, fiNs).Return((<-chan datypes.SubscriptionEvent)(subCh), nil).Once() + client.On("Subscribe", mock.Anything, fiNs, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(subCh), nil).Once() blockCh := make(chan datypes.SubscriptionEvent) - client.On("Subscribe", mock.Anything, fiNs).Return((<-chan datypes.SubscriptionEvent)(blockCh), nil).Maybe() + client.On("Subscribe", mock.Anything, fiNs, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(blockCh), nil).Maybe() // Height 102 has blobs; rest return not found or future. client.On("Retrieve", mock.Anything, uint64(102), fiNs).Return(datypes.ResultRetrieve{ @@ -159,9 +159,9 @@ func TestAsyncBlockRetriever_HeightFromFuture(t *testing.T) { subCh := make(chan datypes.SubscriptionEvent, 1) subCh <- datypes.SubscriptionEvent{Height: 100} - client.On("Subscribe", mock.Anything, fiNs).Return((<-chan datypes.SubscriptionEvent)(subCh), nil).Once() + client.On("Subscribe", mock.Anything, fiNs, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(subCh), nil).Once() blockCh := make(chan datypes.SubscriptionEvent) - client.On("Subscribe", mock.Anything, fiNs).Return((<-chan datypes.SubscriptionEvent)(blockCh), nil).Maybe() + client.On("Subscribe", mock.Anything, fiNs, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(blockCh), nil).Maybe() // All Retrieve calls return HeightFromFuture. client.On("Retrieve", mock.Anything, mock.Anything, fiNs).Return(datypes.ResultRetrieve{ @@ -190,7 +190,7 @@ func TestAsyncBlockRetriever_StopGracefully(t *testing.T) { fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() blockCh := make(chan datypes.SubscriptionEvent) - client.On("Subscribe", mock.Anything, fiNs).Return((<-chan datypes.SubscriptionEvent)(blockCh), nil).Maybe() + client.On("Subscribe", mock.Anything, fiNs, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(blockCh), nil).Maybe() logger := zerolog.Nop() fetcher := NewAsyncBlockRetriever(client, logger, fiNs, 100*time.Millisecond, 100, 10) @@ -214,7 +214,7 @@ func TestAsyncBlockRetriever_ReconnectOnSubscriptionError(t *testing.T) { // First subscription closes immediately (simulating error). closedCh := make(chan datypes.SubscriptionEvent) close(closedCh) - client.On("Subscribe", mock.Anything, fiNs).Return((<-chan datypes.SubscriptionEvent)(closedCh), nil).Once() + client.On("Subscribe", mock.Anything, fiNs, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(closedCh), nil).Once() // Second subscription delivers a blob. subCh := make(chan datypes.SubscriptionEvent, 1) @@ -222,11 +222,11 @@ func TestAsyncBlockRetriever_ReconnectOnSubscriptionError(t *testing.T) { Height: 100, Blobs: testBlobs, } - client.On("Subscribe", mock.Anything, fiNs).Return((<-chan datypes.SubscriptionEvent)(subCh), nil).Once() + client.On("Subscribe", mock.Anything, fiNs, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(subCh), nil).Once() // Third+ subscribe returns a blocking channel so it doesn't loop forever. blockCh := make(chan datypes.SubscriptionEvent) - client.On("Subscribe", mock.Anything, fiNs).Return((<-chan datypes.SubscriptionEvent)(blockCh), nil).Maybe() + client.On("Subscribe", mock.Anything, fiNs, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(blockCh), nil).Maybe() // Stub Retrieve for catchup. client.On("Retrieve", mock.Anything, mock.Anything, fiNs).Return(datypes.ResultRetrieve{ diff --git a/block/internal/da/client.go b/block/internal/da/client.go index bb0f118436..0b30ed6466 100644 --- a/block/internal/da/client.go +++ b/block/internal/da/client.go @@ -355,7 +355,10 @@ func (c *client) HasForcedInclusionNamespace() bool { // DA block containing a matching blob. The channel is closed when ctx is // cancelled. The caller must drain the channel after cancellation to avoid // goroutine leaks. -func (c *client) Subscribe(ctx context.Context, namespace []byte) (<-chan datypes.SubscriptionEvent, error) { +// Timestamps are included from the header if available (celestia-node v0.29.1+§), otherwise +// fetched via a separate call when includeTimestamp is true. Be aware that fetching timestamps +// separately is an additional call to the celestia node for each event. +func (c *client) Subscribe(ctx context.Context, namespace []byte, includeTimestamp bool) (<-chan datypes.SubscriptionEvent, error) { ns, err := share.NewNamespaceFromBytes(namespace) if err != nil { return nil, fmt.Errorf("invalid namespace: %w", err) @@ -380,10 +383,24 @@ func (c *client) Subscribe(ctx context.Context, namespace []byte) (<-chan datype if resp == nil { continue } + var blockTime time.Time + // Use header time if available (celestia-node v0.21.0+) + if resp.Header != nil && !resp.Header.Time.IsZero() { + blockTime = resp.Header.Time + } else if includeTimestamp { + // Fallback to fetching timestamp for older nodes + blockTime, err = c.getBlockTimestamp(ctx, resp.Height) + if err != nil { + c.logger.Error().Uint64("height", resp.Height).Err(err).Msg("failed to get DA block timestamp for subscription event") + blockTime = time.Now() + // TODO: we should retry fetching the timestamp. Current time may mess block time consistency for based sequencers. + } + } select { case out <- datypes.SubscriptionEvent{ - Height: resp.Height, - Blobs: extractBlobData(resp), + Height: resp.Height, + Timestamp: blockTime, + Blobs: extractBlobData(resp), }: case <-ctx.Done(): return diff --git a/block/internal/da/forced_inclusion_retriever_test.go b/block/internal/da/forced_inclusion_retriever_test.go index 47864d7985..09a5e5b077 100644 --- a/block/internal/da/forced_inclusion_retriever_test.go +++ b/block/internal/da/forced_inclusion_retriever_test.go @@ -17,7 +17,7 @@ import ( func TestNewForcedInclusionRetriever(t *testing.T) { client := mocks.NewMockClient(t) client.On("GetForcedInclusionNamespace").Return(datypes.NamespaceFromString("test-fi-ns").Bytes()).Maybe() - client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + client.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() gen := genesis.Genesis{ DAStartHeight: 100, @@ -53,7 +53,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NotAtEpochStart(t *t fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() client.On("HasForcedInclusionNamespace").Return(true).Maybe() client.On("GetForcedInclusionNamespace").Return(fiNs).Maybe() - client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + client.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() gen := genesis.Genesis{ DAStartHeight: 100, @@ -84,7 +84,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_EpochStartSuccess(t fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() client.On("HasForcedInclusionNamespace").Return(true).Maybe() client.On("GetForcedInclusionNamespace").Return(fiNs).Maybe() - client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + client.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() client.On("Retrieve", mock.Anything, mock.Anything, fiNs).Return(datypes.ResultRetrieve{ BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, IDs: []datypes.ID{[]byte("id1"), []byte("id2"), []byte("id3")}, Timestamp: time.Now()}, Data: testBlobs, @@ -114,7 +114,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_EpochStartNotAvailab fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() client.On("HasForcedInclusionNamespace").Return(true).Maybe() client.On("GetForcedInclusionNamespace").Return(fiNs).Maybe() - client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + client.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // Mock the first height in epoch as not available client.On("Retrieve", mock.Anything, uint64(100), fiNs).Return(datypes.ResultRetrieve{ @@ -141,7 +141,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NoBlobsAtHeight(t *t fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() client.On("HasForcedInclusionNamespace").Return(true).Maybe() client.On("GetForcedInclusionNamespace").Return(fiNs).Maybe() - client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + client.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() client.On("Retrieve", mock.Anything, uint64(100), fiNs).Return(datypes.ResultRetrieve{ BaseResult: datypes.BaseResult{Code: datypes.StatusNotFound}, }).Once() @@ -172,7 +172,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_MultiHeightEpoch(t * fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() client.On("HasForcedInclusionNamespace").Return(true).Maybe() client.On("GetForcedInclusionNamespace").Return(fiNs).Maybe() - client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + client.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() client.On("Retrieve", mock.Anything, uint64(102), fiNs).Return(datypes.ResultRetrieve{ BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: time.Now()}, Data: testBlobsByHeight[102], @@ -213,7 +213,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_ErrorHandling(t *tes fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() client.On("HasForcedInclusionNamespace").Return(true).Maybe() client.On("GetForcedInclusionNamespace").Return(fiNs).Maybe() - client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + client.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() client.On("Retrieve", mock.Anything, uint64(100), fiNs).Return(datypes.ResultRetrieve{ BaseResult: datypes.BaseResult{ Code: datypes.StatusError, @@ -242,7 +242,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_EmptyBlobsSkipped(t fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() client.On("HasForcedInclusionNamespace").Return(true).Maybe() client.On("GetForcedInclusionNamespace").Return(fiNs).Maybe() - client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + client.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() client.On("Retrieve", mock.Anything, uint64(100), fiNs).Return(datypes.ResultRetrieve{ BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: time.Now()}, Data: [][]byte{[]byte("tx1"), {}, []byte("tx2"), nil, []byte("tx3")}, @@ -279,7 +279,7 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_OrderPreserved(t *te fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() client.On("HasForcedInclusionNamespace").Return(true).Maybe() client.On("GetForcedInclusionNamespace").Return(fiNs).Maybe() - client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + client.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // Return heights out of order to test ordering is preserved client.On("Retrieve", mock.Anything, uint64(102), fiNs).Return(datypes.ResultRetrieve{ BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: time.Now()}, diff --git a/block/internal/da/interface.go b/block/internal/da/interface.go index 3cc0677580..812d12847b 100644 --- a/block/internal/da/interface.go +++ b/block/internal/da/interface.go @@ -20,7 +20,8 @@ type Client interface { // Subscribe returns a channel that emits one SubscriptionEvent per DA block // that contains a blob in the given namespace. The channel is closed when ctx // is cancelled. Callers MUST drain the channel after cancellation. - Subscribe(ctx context.Context, namespace []byte) (<-chan datypes.SubscriptionEvent, error) + // The fetchTimestamp param is going to be removed with https://github.com/evstack/ev-node/issues/3142 as the timestamp is going to be included by default + Subscribe(ctx context.Context, namespace []byte, fetchTimestamp bool) (<-chan datypes.SubscriptionEvent, error) // GetLatestDAHeight returns the latest height available on the DA layer. GetLatestDAHeight(ctx context.Context) (uint64, error) diff --git a/block/internal/da/subscriber.go b/block/internal/da/subscriber.go index ffeb5141c9..779ff559f3 100644 --- a/block/internal/da/subscriber.go +++ b/block/internal/da/subscriber.go @@ -34,6 +34,8 @@ type SubscriberConfig struct { Namespaces [][]byte // subscribe to all, merge into one channel DABlockTime time.Duration Handler SubscriberHandler + // Deprecated: Remove with https://github.com/evstack/ev-node/issues/3142 + FetchBlockTimestamp bool // the timestamp comes with an extra api call before Celestia v0.29.1-mocha. } // Subscriber is a shared DA subscription primitive that encapsulates the @@ -66,6 +68,9 @@ type Subscriber struct { // daBlockTime used as backoff and watchdog base. daBlockTime time.Duration + // Deprecated: Remove with https://github.com/evstack/ev-node/issues/3142 + fetchBlockTimestamp bool + // lifecycle cancel context.CancelFunc wg sync.WaitGroup @@ -74,12 +79,13 @@ type Subscriber struct { // NewSubscriber creates a new Subscriber. func NewSubscriber(cfg SubscriberConfig) *Subscriber { s := &Subscriber{ - client: cfg.Client, - logger: cfg.Logger, - handler: cfg.Handler, - namespaces: cfg.Namespaces, - catchupSignal: make(chan struct{}, 1), - daBlockTime: cfg.DABlockTime, + client: cfg.Client, + logger: cfg.Logger, + handler: cfg.Handler, + namespaces: cfg.Namespaces, + catchupSignal: make(chan struct{}, 1), + daBlockTime: cfg.DABlockTime, + fetchBlockTimestamp: cfg.FetchBlockTimestamp, } return s } @@ -92,7 +98,7 @@ func (s *Subscriber) SetStartHeight(height uint64) { // Start begins the follow and catchup goroutines. func (s *Subscriber) Start(ctx context.Context) error { if len(s.namespaces) == 0 { - return nil // Nothing to subscribe to. + return errors.New("no namespaces configured") } ctx, s.cancel = context.WithCancel(ctx) @@ -227,7 +233,7 @@ func (s *Subscriber) subscribe(ctx context.Context) (<-chan datypes.Subscription } // Subscribe to the first namespace. - ch, err := s.client.Subscribe(ctx, s.namespaces[0]) + ch, err := s.client.Subscribe(ctx, s.namespaces[0], s.fetchBlockTimestamp) if err != nil { return nil, fmt.Errorf("subscribe namespace 0: %w", err) } @@ -237,7 +243,7 @@ func (s *Subscriber) subscribe(ctx context.Context) (<-chan datypes.Subscription if bytes.Equal(s.namespaces[i], s.namespaces[0]) { continue // Same namespace, skip duplicate. } - ch2, err := s.client.Subscribe(ctx, s.namespaces[i]) + ch2, err := s.client.Subscribe(ctx, s.namespaces[i], s.fetchBlockTimestamp) if err != nil { return nil, fmt.Errorf("subscribe namespace %d: %w", i, err) } @@ -296,10 +302,6 @@ func (s *Subscriber) updateHighest(height uint64) { } } -// --------------------------------------------------------------------------- -// Catchup loop -// --------------------------------------------------------------------------- - // catchupLoop waits for signals and sequentially catches up. func (s *Subscriber) catchupLoop(ctx context.Context) { defer s.wg.Done() @@ -370,10 +372,6 @@ func (s *Subscriber) waitOnCatchupError(ctx context.Context, err error, daHeight } } -// --------------------------------------------------------------------------- -// Timing helpers -// --------------------------------------------------------------------------- - func (s *Subscriber) backoff() time.Duration { if s.daBlockTime > 0 { return s.daBlockTime diff --git a/block/internal/da/tracing.go b/block/internal/da/tracing.go index 161293c2c3..3da79feddb 100644 --- a/block/internal/da/tracing.go +++ b/block/internal/da/tracing.go @@ -145,8 +145,8 @@ func (t *tracedClient) GetForcedInclusionNamespace() []byte { func (t *tracedClient) HasForcedInclusionNamespace() bool { return t.inner.HasForcedInclusionNamespace() } -func (t *tracedClient) Subscribe(ctx context.Context, namespace []byte) (<-chan datypes.SubscriptionEvent, error) { - return t.inner.Subscribe(ctx, namespace) +func (t *tracedClient) Subscribe(ctx context.Context, namespace []byte, includeTimestamp bool) (<-chan datypes.SubscriptionEvent, error) { + return t.inner.Subscribe(ctx, namespace, includeTimestamp) } type submitError struct{ msg string } diff --git a/block/internal/da/tracing_test.go b/block/internal/da/tracing_test.go index e20bf3b84b..fc1ae72f96 100644 --- a/block/internal/da/tracing_test.go +++ b/block/internal/da/tracing_test.go @@ -22,14 +22,14 @@ type mockFullClient struct { getFn func(ctx context.Context, ids []datypes.ID, namespace []byte) ([]datypes.Blob, error) getProofsFn func(ctx context.Context, ids []datypes.ID, namespace []byte) ([]datypes.Proof, error) validateFn func(ctx context.Context, ids []datypes.ID, proofs []datypes.Proof, namespace []byte) ([]bool, error) - subscribeFn func(ctx context.Context, namespace []byte) (<-chan datypes.SubscriptionEvent, error) + subscribeFn func(ctx context.Context, namespace []byte, ts bool) (<-chan datypes.SubscriptionEvent, error) } -func (m *mockFullClient) Subscribe(ctx context.Context, namespace []byte) (<-chan datypes.SubscriptionEvent, error) { +func (m *mockFullClient) Subscribe(ctx context.Context, namespace []byte, ts bool) (<-chan datypes.SubscriptionEvent, error) { if m.subscribeFn == nil { panic("not expected to be called") } - return m.subscribeFn(ctx, namespace) + return m.subscribeFn(ctx, namespace, ts) } func (m *mockFullClient) Submit(ctx context.Context, data [][]byte, gasPrice float64, namespace []byte, options []byte) datypes.ResultSubmit { diff --git a/block/internal/syncing/da_retriever_mock.go b/block/internal/syncing/da_retriever_mock.go index 7e8cc47be1..10e08bbd90 100644 --- a/block/internal/syncing/da_retriever_mock.go +++ b/block/internal/syncing/da_retriever_mock.go @@ -38,135 +38,135 @@ func (_m *MockDARetriever) EXPECT() *MockDARetriever_Expecter { return &MockDARetriever_Expecter{mock: &_m.Mock} } -// RetrieveFromDA provides a mock function for the type MockDARetriever -func (_mock *MockDARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) { - ret := _mock.Called(ctx, daHeight) +// ProcessBlobs provides a mock function for the type MockDARetriever +func (_mock *MockDARetriever) ProcessBlobs(ctx context.Context, blobs [][]byte, daHeight uint64) []common.DAHeightEvent { + ret := _mock.Called(ctx, blobs, daHeight) if len(ret) == 0 { - panic("no return value specified for RetrieveFromDA") + panic("no return value specified for ProcessBlobs") } var r0 []common.DAHeightEvent - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) ([]common.DAHeightEvent, error)); ok { - return returnFunc(ctx, daHeight) - } - if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) []common.DAHeightEvent); ok { - r0 = returnFunc(ctx, daHeight) + if returnFunc, ok := ret.Get(0).(func(context.Context, [][]byte, uint64) []common.DAHeightEvent); ok { + r0 = returnFunc(ctx, blobs, daHeight) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]common.DAHeightEvent) } } - if returnFunc, ok := ret.Get(1).(func(context.Context, uint64) error); ok { - r1 = returnFunc(ctx, daHeight) - } else { - r1 = ret.Error(1) - } - return r0, r1 + return r0 } -// MockDARetriever_RetrieveFromDA_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RetrieveFromDA' -type MockDARetriever_RetrieveFromDA_Call struct { +// MockDARetriever_ProcessBlobs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProcessBlobs' +type MockDARetriever_ProcessBlobs_Call struct { *mock.Call } -// RetrieveFromDA is a helper method to define mock.On call +// ProcessBlobs is a helper method to define mock.On call // - ctx context.Context +// - blobs [][]byte // - daHeight uint64 -func (_e *MockDARetriever_Expecter) RetrieveFromDA(ctx interface{}, daHeight interface{}) *MockDARetriever_RetrieveFromDA_Call { - return &MockDARetriever_RetrieveFromDA_Call{Call: _e.mock.On("RetrieveFromDA", ctx, daHeight)} +func (_e *MockDARetriever_Expecter) ProcessBlobs(ctx interface{}, blobs interface{}, daHeight interface{}) *MockDARetriever_ProcessBlobs_Call { + return &MockDARetriever_ProcessBlobs_Call{Call: _e.mock.On("ProcessBlobs", ctx, blobs, daHeight)} } -func (_c *MockDARetriever_RetrieveFromDA_Call) Run(run func(ctx context.Context, daHeight uint64)) *MockDARetriever_RetrieveFromDA_Call { +func (_c *MockDARetriever_ProcessBlobs_Call) Run(run func(ctx context.Context, blobs [][]byte, daHeight uint64)) *MockDARetriever_ProcessBlobs_Call { _c.Call.Run(func(args mock.Arguments) { var arg0 context.Context if args[0] != nil { arg0 = args[0].(context.Context) } - var arg1 uint64 + var arg1 [][]byte if args[1] != nil { - arg1 = args[1].(uint64) + arg1 = args[1].([][]byte) + } + var arg2 uint64 + if args[2] != nil { + arg2 = args[2].(uint64) } run( arg0, arg1, + arg2, ) }) return _c } -func (_c *MockDARetriever_RetrieveFromDA_Call) Return(dAHeightEvents []common.DAHeightEvent, err error) *MockDARetriever_RetrieveFromDA_Call { - _c.Call.Return(dAHeightEvents, err) +func (_c *MockDARetriever_ProcessBlobs_Call) Return(dAHeightEvents []common.DAHeightEvent) *MockDARetriever_ProcessBlobs_Call { + _c.Call.Return(dAHeightEvents) return _c } -func (_c *MockDARetriever_RetrieveFromDA_Call) RunAndReturn(run func(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error)) *MockDARetriever_RetrieveFromDA_Call { +func (_c *MockDARetriever_ProcessBlobs_Call) RunAndReturn(run func(ctx context.Context, blobs [][]byte, daHeight uint64) []common.DAHeightEvent) *MockDARetriever_ProcessBlobs_Call { _c.Call.Return(run) return _c } -// ProcessBlobs provides a mock function for the type MockDARetriever -func (_mock *MockDARetriever) ProcessBlobs(ctx context.Context, blobs [][]byte, daHeight uint64) []common.DAHeightEvent { - ret := _mock.Called(ctx, blobs, daHeight) +// RetrieveFromDA provides a mock function for the type MockDARetriever +func (_mock *MockDARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) { + ret := _mock.Called(ctx, daHeight) if len(ret) == 0 { - panic("no return value specified for ProcessBlobs") + panic("no return value specified for RetrieveFromDA") } var r0 []common.DAHeightEvent - if returnFunc, ok := ret.Get(0).(func(context.Context, [][]byte, uint64) []common.DAHeightEvent); ok { - r0 = returnFunc(ctx, blobs, daHeight) + var r1 error + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) ([]common.DAHeightEvent, error)); ok { + return returnFunc(ctx, daHeight) + } + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) []common.DAHeightEvent); ok { + r0 = returnFunc(ctx, daHeight) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]common.DAHeightEvent) } } - return r0 + if returnFunc, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = returnFunc(ctx, daHeight) + } else { + r1 = ret.Error(1) + } + return r0, r1 } -// MockDARetriever_ProcessBlobs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProcessBlobs' -type MockDARetriever_ProcessBlobs_Call struct { +// MockDARetriever_RetrieveFromDA_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RetrieveFromDA' +type MockDARetriever_RetrieveFromDA_Call struct { *mock.Call } -// ProcessBlobs is a helper method to define mock.On call +// RetrieveFromDA is a helper method to define mock.On call // - ctx context.Context -// - blobs [][]byte // - daHeight uint64 -func (_e *MockDARetriever_Expecter) ProcessBlobs(ctx interface{}, blobs interface{}, daHeight interface{}) *MockDARetriever_ProcessBlobs_Call { - return &MockDARetriever_ProcessBlobs_Call{Call: _e.mock.On("ProcessBlobs", ctx, blobs, daHeight)} +func (_e *MockDARetriever_Expecter) RetrieveFromDA(ctx interface{}, daHeight interface{}) *MockDARetriever_RetrieveFromDA_Call { + return &MockDARetriever_RetrieveFromDA_Call{Call: _e.mock.On("RetrieveFromDA", ctx, daHeight)} } -func (_c *MockDARetriever_ProcessBlobs_Call) Run(run func(ctx context.Context, blobs [][]byte, daHeight uint64)) *MockDARetriever_ProcessBlobs_Call { +func (_c *MockDARetriever_RetrieveFromDA_Call) Run(run func(ctx context.Context, daHeight uint64)) *MockDARetriever_RetrieveFromDA_Call { _c.Call.Run(func(args mock.Arguments) { var arg0 context.Context if args[0] != nil { arg0 = args[0].(context.Context) } - var arg1 [][]byte + var arg1 uint64 if args[1] != nil { - arg1 = args[1].([][]byte) - } - var arg2 uint64 - if args[2] != nil { - arg2 = args[2].(uint64) + arg1 = args[1].(uint64) } run( arg0, arg1, - arg2, ) }) return _c } -func (_c *MockDARetriever_ProcessBlobs_Call) Return(dAHeightEvents []common.DAHeightEvent) *MockDARetriever_ProcessBlobs_Call { - _c.Call.Return(dAHeightEvents) +func (_c *MockDARetriever_RetrieveFromDA_Call) Return(dAHeightEvents []common.DAHeightEvent, err error) *MockDARetriever_RetrieveFromDA_Call { + _c.Call.Return(dAHeightEvents, err) return _c } -func (_c *MockDARetriever_ProcessBlobs_Call) RunAndReturn(run func(ctx context.Context, blobs [][]byte, daHeight uint64) []common.DAHeightEvent) *MockDARetriever_ProcessBlobs_Call { +func (_c *MockDARetriever_RetrieveFromDA_Call) RunAndReturn(run func(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error)) *MockDARetriever_RetrieveFromDA_Call { _c.Call.Return(run) return _c } diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go index 110fa05609..daba0322dd 100644 --- a/block/internal/syncing/syncer_forced_inclusion_test.go +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -75,7 +75,7 @@ func newForcedInclusionSyncer(t *testing.T, daStart, epochSize uint64) (*Syncer, client.On("GetForcedInclusionNamespace").Return([]byte(cfg.DA.ForcedInclusionNamespace)).Maybe() client.On("HasForcedInclusionNamespace").Return(true).Maybe() subCh := make(chan datypes.SubscriptionEvent) - client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(subCh), nil).Maybe() + client.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(subCh), nil).Maybe() daRetriever := NewDARetriever(client, cm, gen, zerolog.Nop()) fiRetriever := da.NewForcedInclusionRetriever(t.Context(), client, zerolog.Nop(), cfg.DA.BlockTime.Duration, false, gen.DAStartHeight, gen.DAEpochForcedInclusion) @@ -148,7 +148,7 @@ func TestVerifyForcedInclusionTxs_NamespaceNotConfigured(t *testing.T) { client.On("GetForcedInclusionNamespace").Return([]byte(nil)).Maybe() client.On("HasForcedInclusionNamespace").Return(false).Maybe() subCh := make(chan datypes.SubscriptionEvent) - client.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(subCh), nil).Maybe() + client.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(subCh), nil).Maybe() fiRetriever := da.NewForcedInclusionRetriever(t.Context(), client, zerolog.Nop(), cfg.DA.BlockTime.Duration, false, gen.DAStartHeight, gen.DAEpochForcedInclusion) t.Cleanup(fiRetriever.Stop) diff --git a/pkg/da/jsonrpc/types.go b/pkg/da/jsonrpc/types.go index b7377e950e..daecb9ec71 100644 --- a/pkg/da/jsonrpc/types.go +++ b/pkg/da/jsonrpc/types.go @@ -8,6 +8,7 @@ type CommitmentProof struct { // SubscriptionResponse mirrors celestia-node's blob.SubscriptionResponse. type SubscriptionResponse struct { - Blobs []*Blob `json:"blobs"` - Height uint64 `json:"height"` + Blobs []*Blob `json:"blobs"` + Height uint64 `json:"height"` + Header *RawHeader `json:"header,omitempty"` // Available in celestia-node v0.29.1+ } diff --git a/pkg/da/types/types.go b/pkg/da/types/types.go index 8c11ce5cb5..24bcceea84 100644 --- a/pkg/da/types/types.go +++ b/pkg/da/types/types.go @@ -88,6 +88,9 @@ func SplitID(id []byte) (uint64, []byte, error) { type SubscriptionEvent struct { // Height is the DA layer height at which the blob was finalized. Height uint64 + // Timestamp is the DA block timestamp at the given height. + // This is an optional field that is not set by default. Enable via SubscribeExtra method + Timestamp time.Time // Blobs contains the raw blob data from the subscription response. // When non-nil, followers can process blobs inline without re-fetching from DA. Blobs [][]byte diff --git a/pkg/sequencers/based/sequencer_test.go b/pkg/sequencers/based/sequencer_test.go index 7c65b8ad0a..b4690d527d 100644 --- a/pkg/sequencers/based/sequencer_test.go +++ b/pkg/sequencers/based/sequencer_test.go @@ -71,7 +71,7 @@ func createTestSequencer(t *testing.T, mockRetriever *common.MockForcedInclusion // Mock the forced inclusion namespace call mockDAClient.MockClient.On("GetForcedInclusionNamespace").Return([]byte("test-forced-inclusion-ns")).Maybe() mockDAClient.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDAClient.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDAClient.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() mockExec := createDefaultMockExecutor(t) @@ -470,7 +470,7 @@ func TestBasedSequencer_CheckpointPersistence(t *testing.T) { } mockDAClient.MockClient.On("GetForcedInclusionNamespace").Return([]byte("test-forced-inclusion-ns")).Maybe() mockDAClient.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDAClient.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDAClient.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // Create first sequencer mockExec1 := createDefaultMockExecutor(t) @@ -498,7 +498,7 @@ func TestBasedSequencer_CheckpointPersistence(t *testing.T) { } mockDAClient2.MockClient.On("GetForcedInclusionNamespace").Return([]byte("test-forced-inclusion-ns")).Maybe() mockDAClient2.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDAClient2.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDAClient2.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() mockExec2 := createDefaultMockExecutor(t) seq2, err := NewBasedSequencer(mockDAClient2, config.DefaultConfig(), db, gen, zerolog.Nop(), mockExec2) require.NoError(t, err) @@ -545,7 +545,7 @@ func TestBasedSequencer_CrashRecoveryMidEpoch(t *testing.T) { mockDAClient.MockClient.On("GetForcedInclusionNamespace").Return([]byte("test-forced-inclusion-ns")).Maybe() mockDAClient.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDAClient.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDAClient.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // Create mock executor that postpones tx2 on first call filterCallCount := 0 @@ -606,7 +606,7 @@ func TestBasedSequencer_CrashRecoveryMidEpoch(t *testing.T) { } mockDAClient2.MockClient.On("GetForcedInclusionNamespace").Return([]byte("test-forced-inclusion-ns")).Maybe() mockDAClient2.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDAClient2.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDAClient2.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() seq2, err := NewBasedSequencer(mockDAClient2, config.DefaultConfig(), db, gen, zerolog.Nop(), mockExec) require.NoError(t, err) @@ -932,7 +932,7 @@ func TestBasedSequencer_GetNextBatch_GasFilteringPreservesUnprocessedTxs(t *test } mockDAClient.MockClient.On("GetForcedInclusionNamespace").Return([]byte("test-forced-inclusion-ns")).Maybe() mockDAClient.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDAClient.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDAClient.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() seq, err := NewBasedSequencer(mockDAClient, config.DefaultConfig(), db, gen, zerolog.New(zerolog.NewTestWriter(t)), mockExec) require.NoError(t, err) diff --git a/pkg/sequencers/single/sequencer_test.go b/pkg/sequencers/single/sequencer_test.go index 6392694ef5..0a393d4275 100644 --- a/pkg/sequencers/single/sequencer_test.go +++ b/pkg/sequencers/single/sequencer_test.go @@ -380,7 +380,7 @@ func TestSequencer_GetNextBatch_ForcedInclusionAndBatch_MaxBytes(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 100 — same as sequencer start, no catch-up needed mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(100), nil).Maybe() @@ -472,7 +472,7 @@ func TestSequencer_GetNextBatch_ForcedInclusion_ExceedsMaxBytes(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 100 — same as sequencer start, no catch-up needed mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(100), nil).Maybe() @@ -556,7 +556,7 @@ func TestSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 100 — same as sequencer start, no catch-up needed mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(100), nil).Maybe() @@ -898,7 +898,7 @@ func TestSequencer_CheckpointPersistence_CrashRecovery(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 101 — close to sequencer start (100), no catch-up needed. // Use Maybe() since two sequencer instances share this mock. @@ -1002,7 +1002,7 @@ func TestSequencer_GetNextBatch_EmptyDABatch_IncreasesDAHeight(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 100 — same as sequencer start, no catch-up needed mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(100), nil).Maybe() @@ -1096,7 +1096,7 @@ func TestSequencer_GetNextBatch_WithGasFiltering(t *testing.T) { mockDA.MockClient.On("GetBlobsAtHeight", mock.Anything, mock.Anything, mock.Anything). Return(forcedTxs, nil).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return([]byte("forced")).Maybe() mockDA.MockClient.On("MaxBlobSize", mock.Anything).Return(uint64(1000000), nil).Maybe() @@ -1196,7 +1196,7 @@ func TestSequencer_GetNextBatch_GasFilterError(t *testing.T) { mockDA := newMockFullDAClient(t) mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return([]byte("forced")).Maybe() mockDA.MockClient.On("MaxBlobSize", mock.Anything).Return(uint64(1000000), nil).Maybe() @@ -1261,7 +1261,7 @@ func TestSequencer_CatchUp_DetectsOldEpoch(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at height 105 — sequencer starts at 100 with epoch size 1, // so it has missed 5 epochs (>1), triggering catch-up. @@ -1332,7 +1332,7 @@ func TestSequencer_CatchUp_SkipsMempoolDuringCatchUp(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 105 — sequencer starts at 100 with epoch size 1, // so it has missed multiple epochs, triggering catch-up. @@ -1425,7 +1425,7 @@ func TestSequencer_CatchUp_UsesDATimestamp(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 105 — multiple epochs ahead, triggers catch-up mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(105), nil).Once() @@ -1485,7 +1485,7 @@ func TestSequencer_CatchUp_ExitsCatchUpAtDAHead(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 105 — multiple epochs ahead, triggers catch-up mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(105), nil).Once() @@ -1567,7 +1567,7 @@ func TestSequencer_CatchUp_HeightFromFutureExitsCatchUp(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 105 — multiple epochs ahead, triggers catch-up mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(105), nil).Once() @@ -1634,7 +1634,7 @@ func TestSequencer_CatchUp_NoCatchUpWhenRecentEpoch(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 100 — sequencer starts at 100 with epoch size 1, // so it is within the same epoch (0 missed). No catch-up. @@ -1701,7 +1701,7 @@ func TestSequencer_CatchUp_MultiEpochReplay(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 106 — sequencer starts at 100 with epoch size 1, // so it has missed 6 epochs (>1), triggering catch-up. @@ -1853,7 +1853,7 @@ func TestSequencer_CatchUp_CheckpointAdvancesDuringCatchUp(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is at 105 — multiple epochs ahead, triggers catch-up mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(105), nil).Once() @@ -1946,7 +1946,7 @@ func TestSequencer_CatchUp_MonotonicTimestamps(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() // DA head is far ahead — triggers catch-up mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(110), nil).Once() @@ -2073,7 +2073,7 @@ func TestSequencer_CatchUp_MonotonicTimestamps_EmptyEpoch(t *testing.T) { mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(110), nil).Once() @@ -2150,7 +2150,7 @@ func TestSequencer_GetNextBatch_GasFilteringPreservesUnprocessedTxs(t *testing.T mockDA := newMockFullDAClient(t) mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() - mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() + mockDA.MockClient.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(make(chan datypes.SubscriptionEvent)), nil).Maybe() mockDA.MockClient.On("GetForcedInclusionNamespace").Return([]byte("forced")).Maybe() mockDA.MockClient.On("MaxBlobSize", mock.Anything).Return(uint64(1000000), nil).Maybe() diff --git a/test/mocks/da.go b/test/mocks/da.go index 27ce9badf3..f3a4e960db 100644 --- a/test/mocks/da.go +++ b/test/mocks/da.go @@ -493,8 +493,8 @@ func (_c *MockClient_Submit_Call) RunAndReturn(run func(ctx context.Context, dat } // Subscribe provides a mock function for the type MockClient -func (_mock *MockClient) Subscribe(ctx context.Context, namespace []byte) (<-chan da.SubscriptionEvent, error) { - ret := _mock.Called(ctx, namespace) +func (_mock *MockClient) Subscribe(ctx context.Context, namespace []byte, fetchTimestamp bool) (<-chan da.SubscriptionEvent, error) { + ret := _mock.Called(ctx, namespace, fetchTimestamp) if len(ret) == 0 { panic("no return value specified for Subscribe") @@ -502,18 +502,18 @@ func (_mock *MockClient) Subscribe(ctx context.Context, namespace []byte) (<-cha var r0 <-chan da.SubscriptionEvent var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, []byte) (<-chan da.SubscriptionEvent, error)); ok { - return returnFunc(ctx, namespace) + if returnFunc, ok := ret.Get(0).(func(context.Context, []byte, bool) (<-chan da.SubscriptionEvent, error)); ok { + return returnFunc(ctx, namespace, fetchTimestamp) } - if returnFunc, ok := ret.Get(0).(func(context.Context, []byte) <-chan da.SubscriptionEvent); ok { - r0 = returnFunc(ctx, namespace) + if returnFunc, ok := ret.Get(0).(func(context.Context, []byte, bool) <-chan da.SubscriptionEvent); ok { + r0 = returnFunc(ctx, namespace, fetchTimestamp) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(<-chan da.SubscriptionEvent) } } - if returnFunc, ok := ret.Get(1).(func(context.Context, []byte) error); ok { - r1 = returnFunc(ctx, namespace) + if returnFunc, ok := ret.Get(1).(func(context.Context, []byte, bool) error); ok { + r1 = returnFunc(ctx, namespace, fetchTimestamp) } else { r1 = ret.Error(1) } @@ -528,11 +528,12 @@ type MockClient_Subscribe_Call struct { // Subscribe is a helper method to define mock.On call // - ctx context.Context // - namespace []byte -func (_e *MockClient_Expecter) Subscribe(ctx interface{}, namespace interface{}) *MockClient_Subscribe_Call { - return &MockClient_Subscribe_Call{Call: _e.mock.On("Subscribe", ctx, namespace)} +// - fetchTimestamp bool +func (_e *MockClient_Expecter) Subscribe(ctx interface{}, namespace interface{}, fetchTimestamp interface{}) *MockClient_Subscribe_Call { + return &MockClient_Subscribe_Call{Call: _e.mock.On("Subscribe", ctx, namespace, fetchTimestamp)} } -func (_c *MockClient_Subscribe_Call) Run(run func(ctx context.Context, namespace []byte)) *MockClient_Subscribe_Call { +func (_c *MockClient_Subscribe_Call) Run(run func(ctx context.Context, namespace []byte, fetchTimestamp bool)) *MockClient_Subscribe_Call { _c.Call.Run(func(args mock.Arguments) { var arg0 context.Context if args[0] != nil { @@ -542,9 +543,14 @@ func (_c *MockClient_Subscribe_Call) Run(run func(ctx context.Context, namespace if args[1] != nil { arg1 = args[1].([]byte) } + var arg2 bool + if args[2] != nil { + arg2 = args[2].(bool) + } run( arg0, arg1, + arg2, ) }) return _c @@ -555,7 +561,7 @@ func (_c *MockClient_Subscribe_Call) Return(subscriptionEventCh <-chan da.Subscr return _c } -func (_c *MockClient_Subscribe_Call) RunAndReturn(run func(ctx context.Context, namespace []byte) (<-chan da.SubscriptionEvent, error)) *MockClient_Subscribe_Call { +func (_c *MockClient_Subscribe_Call) RunAndReturn(run func(ctx context.Context, namespace []byte, fetchTimestamp bool) (<-chan da.SubscriptionEvent, error)) *MockClient_Subscribe_Call { _c.Call.Return(run) return _c } diff --git a/test/testda/dummy.go b/test/testda/dummy.go index 44fc565c60..2f92e47e60 100644 --- a/test/testda/dummy.go +++ b/test/testda/dummy.go @@ -54,7 +54,7 @@ type DummyDA struct { // Subscribe returns a channel that emits a SubscriptionEvent for every new DA // height produced by Submit or StartHeightTicker. The channel is closed when // ctx is cancelled or Reset is called. -func (d *DummyDA) Subscribe(ctx context.Context, _ []byte) (<-chan datypes.SubscriptionEvent, error) { +func (d *DummyDA) Subscribe(ctx context.Context, _ []byte, _ bool) (<-chan datypes.SubscriptionEvent, error) { ch := make(chan datypes.SubscriptionEvent, 64) sub := &subscriber{ch: ch, ctx: ctx} From de64d823a9debb284b85a331a35f6853139935a9 Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Mon, 9 Mar 2026 16:29:33 +0100 Subject: [PATCH 04/10] Doc update (cherry picked from commit 7a2adbd8a24df57e6e5db8aeaf8d3a57396ff750) --- docs/guides/da-layers/celestia.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/guides/da-layers/celestia.md b/docs/guides/da-layers/celestia.md index 88bbd48e4c..307dde63e9 100644 --- a/docs/guides/da-layers/celestia.md +++ b/docs/guides/da-layers/celestia.md @@ -72,10 +72,10 @@ You can use the same namespace for both headers and data, or separate them for o ### Set DA Address -Default Celestia light node port is 26658: +Default Celestia light node port is 26658. **Note:** Connection to a celestia-node DA requires a websocket connection: ```bash -DA_ADDRESS=http://localhost:26658 +DA_ADDRESS=ws://localhost:26658 ``` ## Running Your Chain From 239157f1197d2ab7d3223edcd7ae1b83b5fe476b Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Tue, 10 Mar 2026 11:57:06 +0100 Subject: [PATCH 05/10] Apply review feedback --- apps/evm/server/force_inclusion_test.go | 2 +- block/internal/da/async_block_retriever.go | 23 +++---- .../internal/da/async_block_retriever_test.go | 12 ++-- block/internal/da/subscriber.go | 28 ++++----- block/internal/syncing/da_follower.go | 63 +++++++++++++------ block/internal/syncing/syncer_backoff_test.go | 38 ++++++++--- 6 files changed, 103 insertions(+), 63 deletions(-) diff --git a/apps/evm/server/force_inclusion_test.go b/apps/evm/server/force_inclusion_test.go index a107a10d11..6be6e6ca76 100644 --- a/apps/evm/server/force_inclusion_test.go +++ b/apps/evm/server/force_inclusion_test.go @@ -50,7 +50,7 @@ func (m *mockDA) Get(ctx context.Context, ids []da.ID, namespace []byte) ([]da.B return nil, nil } -func (m *mockDA) Subscribe(_ context.Context, _ []byte, _ bool) (<-chan da.SubscriptionEvent, error) { +func (m *mockDA) Subscribe(_ context.Context, _ []byte) (<-chan da.SubscriptionEvent, error) { // Not needed in these tests; return a closed channel. ch := make(chan da.SubscriptionEvent) close(ch) diff --git a/block/internal/da/async_block_retriever.go b/block/internal/da/async_block_retriever.go index 6789051514..4bdbb2a2ec 100644 --- a/block/internal/da/async_block_retriever.go +++ b/block/internal/da/async_block_retriever.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "sync/atomic" "time" ds "github.com/ipfs/go-datastore" @@ -44,9 +45,11 @@ type asyncBlockRetriever struct { cache ds.Batching // Current DA height tracking (accessed atomically via subscriber). - // Updated externally via UpdateCurrentHeight. daStartHeight uint64 + // Tracks DA height consumed by the sequencer to trigger cache cleanups. + consumedHeight atomic.Uint64 + // Prefetch window - how many blocks ahead to speculatively fetch. prefetchWindow uint64 } @@ -108,17 +111,17 @@ func (f *asyncBlockRetriever) Stop() { f.subscriber.Stop() } -// UpdateCurrentHeight updates the current DA height. +// UpdateCurrentHeight updates the consumed DA height and triggers cache cleanup. func (f *asyncBlockRetriever) UpdateCurrentHeight(height uint64) { for { - current := f.subscriber.LocalDAHeight() + current := f.consumedHeight.Load() if height <= current { return } - if f.subscriber.CompareAndSwapLocalHeight(current, height) { + if f.consumedHeight.CompareAndSwap(current, height) { f.logger.Debug(). Uint64("new_height", height). - Msg("updated current DA height") + Msg("updated consumed DA height for cleanup") f.cleanupOldBlocks(context.Background(), height) return } @@ -168,11 +171,10 @@ func (f *asyncBlockRetriever) GetCachedBlock(ctx context.Context, daHeight uint6 return block, nil } -// HandleEvent caches blobs from the subscription inline. +// HandleEvent caches blobs from the subscription inline, even empty ones, +// to record that the DA height was seen and has 0 blobs. func (f *asyncBlockRetriever) HandleEvent(ctx context.Context, ev datypes.SubscriptionEvent) { - if len(ev.Blobs) > 0 { - f.cacheBlock(ctx, ev.Height, ev.Timestamp, ev.Blobs) - } + f.cacheBlock(ctx, ev.Height, ev.Timestamp, ev.Blobs) } // HandleCatchup fetches a single height via Retrieve and caches it. @@ -181,8 +183,7 @@ func (f *asyncBlockRetriever) HandleCatchup(ctx context.Context, height uint64) f.fetchAndCacheBlock(ctx, height) // Speculatively prefetch ahead. - highest := f.subscriber.HighestSeenDAHeight() - target := highest + f.prefetchWindow + target := height + f.prefetchWindow for h := height + 1; h <= target; h++ { if err := ctx.Err(); err != nil { return err diff --git a/block/internal/da/async_block_retriever_test.go b/block/internal/da/async_block_retriever_test.go index 1ecaca9a88..0aded0dcce 100644 --- a/block/internal/da/async_block_retriever_test.go +++ b/block/internal/da/async_block_retriever_test.go @@ -156,9 +156,7 @@ func TestAsyncBlockRetriever_HeightFromFuture(t *testing.T) { fiNs := datypes.NamespaceFromString("test-fi-ns").Bytes() // Subscription delivers height 100 with no blobs. - subCh := make(chan datypes.SubscriptionEvent, 1) - subCh <- datypes.SubscriptionEvent{Height: 100} - + subCh := make(chan datypes.SubscriptionEvent) client.On("Subscribe", mock.Anything, fiNs, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(subCh), nil).Once() blockCh := make(chan datypes.SubscriptionEvent) client.On("Subscribe", mock.Anything, fiNs, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(blockCh), nil).Maybe() @@ -169,15 +167,17 @@ func TestAsyncBlockRetriever_HeightFromFuture(t *testing.T) { }).Maybe() logger := zerolog.Nop() - fetcher := NewAsyncBlockRetriever(client, logger, fiNs, 100*time.Millisecond, 100, 10) + fetcher := NewAsyncBlockRetriever(client, logger, fiNs, time.Millisecond, 100, 10) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher.Start(ctx) defer fetcher.Stop() // Wait a bit for catchup to attempt fetches. - time.Sleep(250 * time.Millisecond) + require.Eventually(t, func() bool { + return fetcher.(*asyncBlockRetriever).subscriber.HasReachedHead() + }, 1250*time.Millisecond, time.Millisecond) // Cache should be empty since all heights are from the future. block, err := fetcher.GetCachedBlock(ctx, 100) diff --git a/block/internal/da/subscriber.go b/block/internal/da/subscriber.go index 779ff559f3..24c5ae9513 100644 --- a/block/internal/da/subscriber.go +++ b/block/internal/da/subscriber.go @@ -87,6 +87,9 @@ func NewSubscriber(cfg SubscriberConfig) *Subscriber { daBlockTime: cfg.DABlockTime, fetchBlockTimestamp: cfg.FetchBlockTimestamp, } + if len(s.namespaces) == 0 { + s.logger.Warn().Msg("no namespaces configured, subscriber will stay idle") + } return s } @@ -98,7 +101,7 @@ func (s *Subscriber) SetStartHeight(height uint64) { // Start begins the follow and catchup goroutines. func (s *Subscriber) Start(ctx context.Context) error { if len(s.namespaces) == 0 { - return errors.New("no namespaces configured") + return nil } ctx, s.cancel = context.WithCancel(ctx) @@ -131,11 +134,6 @@ func (s *Subscriber) HasReachedHead() bool { return s.headReached.Load() } -// SetHeadReached marks the subscriber as having reached DA head. -func (s *Subscriber) SetHeadReached() { - s.headReached.Store(true) -} - // CompareAndSwapLocalHeight attempts a CAS on localDAHeight. // Used by handlers that want to claim exclusive processing of a height. func (s *Subscriber) CompareAndSwapLocalHeight(old, new uint64) bool { @@ -343,23 +341,19 @@ func (s *Subscriber) runCatchup(ctx context.Context) { if err := s.handler.HandleCatchup(ctx, local); err != nil { // Roll back so we can retry after backoff. s.localDAHeight.Store(local) - if !s.waitOnCatchupError(ctx, err, local) { + if errors.Is(err, datypes.ErrHeightFromFuture) { + s.headReached.Store(true) + return + } + if !s.shouldContinueCatchup(ctx, err, local) { return } - continue } } } -// ErrCaughtUp is a sentinel used to signal that the catchup loop has reached DA head. -var ErrCaughtUp = errors.New("caught up with DA head") - -// waitOnCatchupError logs the error and backs off before retrying. -func (s *Subscriber) waitOnCatchupError(ctx context.Context, err error, daHeight uint64) bool { - if errors.Is(err, ErrCaughtUp) { - s.logger.Debug().Uint64("da_height", daHeight).Msg("DA catchup reached head, waiting for subscription signal") - return false - } +// shouldContinueCatchup logs the error and backs off before retrying. +func (s *Subscriber) shouldContinueCatchup(ctx context.Context, err error, daHeight uint64) bool { if ctx.Err() != nil { return false } diff --git a/block/internal/syncing/da_follower.go b/block/internal/syncing/da_follower.go index a31ffc43ca..e8897ec981 100644 --- a/block/internal/syncing/da_follower.go +++ b/block/internal/syncing/da_follower.go @@ -116,7 +116,6 @@ func (f *daFollower) HandleEvent(ctx context.Context, ev datypes.SubscriptionEve } } if len(events) != 0 { - f.subscriber.SetHeadReached() f.logger.Debug().Uint64("da_height", ev.Height).Int("events", len(events)). Msg("processed subscription blobs inline (fast path)") } else { @@ -131,38 +130,63 @@ func (f *daFollower) HandleEvent(ctx context.Context, ev datypes.SubscriptionEve // HandleCatchup retrieves events at a single DA height and pipes them // to the event sink. Checks priority heights first. +// +// ErrHeightFromFuture is handled here rather than in fetchAndPipeHeight because +// the two call sites need different behaviour: +// - Normal catchup: mark head reached and return da.ErrCaughtUp to stop the loop. +// - Priority hint: the hint points to a future height — ignore it without +// skipping the current daHeight or stopping catchup. func (f *daFollower) HandleCatchup(ctx context.Context, daHeight uint64) error { - // Check for priority heights from P2P hints first. - if priorityHeight := f.popPriorityHeight(); priorityHeight > 0 { - if priorityHeight >= daHeight { - f.logger.Debug(). - Uint64("da_height", priorityHeight). - Msg("fetching priority DA height from P2P hint") - if err := f.fetchAndPipeHeight(ctx, priorityHeight); err != nil { - return err + // 1. Drain stale or future priority heights from P2P hints + for { + priorityHeight := f.popPriorityHeight() + if priorityHeight == 0 { + break + } + if priorityHeight < daHeight { + continue // skip stale hints without yielding back to the catchup loop + } + + f.logger.Debug(). + Uint64("da_height", priorityHeight). + Msg("fetching priority DA height from P2P hint") + + if err := f.fetchAndPipeHeight(ctx, priorityHeight); err != nil { + if errors.Is(err, datypes.ErrHeightFromFuture) { + // Priority hint points to a future height — silently ignore. + f.logger.Debug().Uint64("priority_da_height", priorityHeight). + Msg("priority hint is from future, ignoring") + continue } + // Roll back so daHeight is attempted again next cycle after backoff. + f.subscriber.SetLocalHeight(daHeight) + return err } - // Re-queue the current height by rolling back (the subscriber already advanced). + + // We successfully handled a priority height (we didn't actually process `daHeight`) + // Roll back so daHeight is attempted again next cycle. f.subscriber.SetLocalHeight(daHeight) return nil } - return f.fetchAndPipeHeight(ctx, daHeight) + // 2. Normal sequential fetch + if err := f.fetchAndPipeHeight(ctx, daHeight); err != nil { + return err + } + return nil } // fetchAndPipeHeight retrieves events at a single DA height and pipes them. +// It does NOT handle ErrHeightFromFuture — callers must decide how to react +// because the correct response depends on whether this is a normal sequential +// catchup or a priority-hint fetch. func (f *daFollower) fetchAndPipeHeight(ctx context.Context, daHeight uint64) error { events, err := f.retriever.RetrieveFromDA(ctx, daHeight) if err != nil { - switch { - case errors.Is(err, datypes.ErrBlobNotFound): + if errors.Is(err, datypes.ErrBlobNotFound) { return nil - case errors.Is(err, datypes.ErrHeightFromFuture): - f.subscriber.SetHeadReached() - return err - default: - return err } + return err } for _, event := range events { @@ -200,5 +224,8 @@ func (f *daFollower) popPriorityHeight() uint64 { } height := f.priorityHeights[0] f.priorityHeights = f.priorityHeights[1:] + if len(f.priorityHeights) == 0 { + f.priorityHeights = nil + } return height } diff --git a/block/internal/syncing/syncer_backoff_test.go b/block/internal/syncing/syncer_backoff_test.go index 113cf173d9..30d37b5689 100644 --- a/block/internal/syncing/syncer_backoff_test.go +++ b/block/internal/syncing/syncer_backoff_test.go @@ -25,6 +25,7 @@ func TestDAFollower_BackoffOnCatchupError(t *testing.T) { daBlockTime time.Duration error error expectsBackoff bool + exitsCatchup bool // ErrCaughtUp → clean exit, no backoff, no advance description string }{ "generic_error_triggers_backoff": { @@ -33,11 +34,11 @@ func TestDAFollower_BackoffOnCatchupError(t *testing.T) { expectsBackoff: true, description: "Generic DA errors should trigger backoff", }, - "height_from_future_triggers_backoff": { - daBlockTime: 500 * time.Millisecond, - error: datypes.ErrHeightFromFuture, - expectsBackoff: true, - description: "Height from future should trigger backoff", + "height_from_future_stops_catchup": { + daBlockTime: 500 * time.Millisecond, + error: datypes.ErrHeightFromFuture, + exitsCatchup: true, + description: "Height from future should stop catchup (da.ErrCaughtUp), not backoff", }, "blob_not_found_no_backoff": { daBlockTime: 1 * time.Second, @@ -92,7 +93,10 @@ func TestDAFollower_BackoffOnCatchupError(t *testing.T) { }). Return(nil, tc.error).Once() - if tc.expectsBackoff { + if tc.exitsCatchup { + // ErrCaughtUp causes runCatchup to exit cleanly after 1 call. + // No second mock needed. + } else if tc.expectsBackoff { daRetriever.On("RetrieveFromDA", mock.Anything, uint64(100)). Run(func(args mock.Arguments) { callTimes = append(callTimes, time.Now()) @@ -110,10 +114,20 @@ func TestDAFollower_BackoffOnCatchupError(t *testing.T) { Return(nil, datypes.ErrBlobNotFound).Once() } - go sub.RunCatchupForTest(ctx) - <-ctx.Done() + done := make(chan struct{}) + go func() { + sub.RunCatchupForTest(ctx) + close(done) + }() + select { + case <-ctx.Done(): + case <-done: + } - if tc.expectsBackoff { + if tc.exitsCatchup { + assert.Equal(t, 1, callCount, "should exit after single call (ErrCaughtUp)") + assert.True(t, follower.HasReachedHead(), "should mark head as reached") + } else if tc.expectsBackoff { require.Len(t, callTimes, 2, "should make exactly 2 calls with backoff") timeBetweenCalls := callTimes[1].Sub(callTimes[0]) @@ -291,12 +305,16 @@ func TestDAFollower_InlineProcessing(t *testing.T) { daRetriever.On("ProcessBlobs", mock.Anything, blobs, uint64(10)). Return(expectedEvents).Once() - // Simulate subscription event at the current localDAHeight + // Simulate subscription event: update highest seen, then handle event + follower.subscriber.UpdateHighestForTest(10) follower.HandleEvent(t.Context(), datypes.SubscriptionEvent{ Height: 10, Blobs: blobs, }) + // Simulate catchup loop waking up to see if local > highest + follower.subscriber.RunCatchupForTest(t.Context()) + // Verify: ProcessBlobs was called, events were piped, height advanced require.Len(t, pipedEvents, 1, "should pipe 1 event from inline processing") assert.Equal(t, uint64(10), pipedEvents[0].DaHeight) From 9df4ff90f2aa7187be99b7091d9e620c78ef0c8b Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Tue, 10 Mar 2026 15:45:44 +0100 Subject: [PATCH 06/10] Updates - WIP --- block/internal/da/async_block_retriever.go | 8 +- block/internal/da/subscriber.go | 66 +++++------- block/internal/syncing/da_follower.go | 59 ++++------ block/internal/syncing/syncer_backoff_test.go | 102 ++++++++++-------- .../internal/syncing/syncer_benchmark_test.go | 8 +- block/internal/syncing/syncer_test.go | 22 ++-- 6 files changed, 134 insertions(+), 131 deletions(-) diff --git a/block/internal/da/async_block_retriever.go b/block/internal/da/async_block_retriever.go index 4bdbb2a2ec..4da527a34f 100644 --- a/block/internal/da/async_block_retriever.go +++ b/block/internal/da/async_block_retriever.go @@ -88,8 +88,8 @@ func NewAsyncBlockRetriever( DABlockTime: daBlockTime, Handler: f, FetchBlockTimestamp: true, + StartHeight: daStartHeight, }) - f.subscriber.SetStartHeight(daStartHeight) return f } @@ -173,8 +173,12 @@ func (f *asyncBlockRetriever) GetCachedBlock(ctx context.Context, daHeight uint6 // HandleEvent caches blobs from the subscription inline, even empty ones, // to record that the DA height was seen and has 0 blobs. -func (f *asyncBlockRetriever) HandleEvent(ctx context.Context, ev datypes.SubscriptionEvent) { +func (f *asyncBlockRetriever) HandleEvent(ctx context.Context, ev datypes.SubscriptionEvent, isInline bool) error { f.cacheBlock(ctx, ev.Height, ev.Timestamp, ev.Blobs) + if isInline { + return errors.New("async block retriever relies on catchup state machine") + } + return nil } // HandleCatchup fetches a single height via Retrieve and caches it. diff --git a/block/internal/da/subscriber.go b/block/internal/da/subscriber.go index 24c5ae9513..edb0b73d56 100644 --- a/block/internal/da/subscriber.go +++ b/block/internal/da/subscriber.go @@ -17,13 +17,15 @@ import ( // SubscriberHandler is the callback interface for subscription consumers. // Implementations drive the consumer-specific logic (caching, piping events, etc.). type SubscriberHandler interface { - // HandleEvent processes a subscription event inline (fast path). - // Called on the followLoop goroutine for each subscription event. - HandleEvent(ctx context.Context, ev datypes.SubscriptionEvent) + // HandleEvent processes a subscription event. + // isInline is true if the subscriber successfully claimed this height (via CAS). + // Returning an error when isInline is true instructs the Subscriber to roll back the localDAHeight. + HandleEvent(ctx context.Context, ev datypes.SubscriptionEvent, isInline bool) error // HandleCatchup is called for each height during sequential catchup. - // The subscriber advances localDAHeight only after this returns nil. - // Returning an error triggers a backoff retry. + // The subscriber advances localDAHeight only after this returns (true, nil). + // Returning (false, nil) rolls back localDAHeight without triggering a backoff. + // Returning an error rolls back localDAHeight and triggers a backoff retry. HandleCatchup(ctx context.Context, height uint64) error } @@ -36,6 +38,8 @@ type SubscriberConfig struct { Handler SubscriberHandler // Deprecated: Remove with https://github.com/evstack/ev-node/issues/3142 FetchBlockTimestamp bool // the timestamp comes with an extra api call before Celestia v0.29.1-mocha. + + StartHeight uint64 // initial localDAHeight } // Subscriber is a shared DA subscription primitive that encapsulates the @@ -87,17 +91,15 @@ func NewSubscriber(cfg SubscriberConfig) *Subscriber { daBlockTime: cfg.DABlockTime, fetchBlockTimestamp: cfg.FetchBlockTimestamp, } + s.localDAHeight.Store(cfg.StartHeight) + s.catchupSignal <- struct{}{} + if len(s.namespaces) == 0 { s.logger.Warn().Msg("no namespaces configured, subscriber will stay idle") } return s } -// SetStartHeight sets the initial local DA height before Start is called. -func (s *Subscriber) SetStartHeight(height uint64) { - s.localDAHeight.Store(height) -} - // Start begins the follow and catchup goroutines. func (s *Subscriber) Start(ctx context.Context) error { if len(s.namespaces) == 0 { @@ -108,6 +110,7 @@ func (s *Subscriber) Start(ctx context.Context) error { s.wg.Add(2) go s.followLoop(ctx) go s.catchupLoop(ctx) + return nil } @@ -134,29 +137,6 @@ func (s *Subscriber) HasReachedHead() bool { return s.headReached.Load() } -// CompareAndSwapLocalHeight attempts a CAS on localDAHeight. -// Used by handlers that want to claim exclusive processing of a height. -func (s *Subscriber) CompareAndSwapLocalHeight(old, new uint64) bool { - return s.localDAHeight.CompareAndSwap(old, new) -} - -// SetLocalHeight stores a new localDAHeight value. -func (s *Subscriber) SetLocalHeight(height uint64) { - s.localDAHeight.Store(height) -} - -// UpdateHighestForTest directly sets the highest seen DA height and signals catchup. -// Only for use in tests that bypass the subscription loop. -func (s *Subscriber) UpdateHighestForTest(height uint64) { - s.updateHighest(height) -} - -// RunCatchupForTest runs a single catchup pass. Only for use in tests that -// bypass the catchup loop's signal-wait mechanism. -func (s *Subscriber) RunCatchupForTest(ctx context.Context) { - s.runCatchup(ctx) -} - // --------------------------------------------------------------------------- // Follow loop // --------------------------------------------------------------------------- @@ -215,7 +195,21 @@ func (s *Subscriber) runSubscription(ctx context.Context) error { return errors.New("subscription channel closed") } s.updateHighest(ev.Height) - s.handler.HandleEvent(ctx, ev) + + local := s.localDAHeight.Load() + isInline := ev.Height == local && s.localDAHeight.CompareAndSwap(local, local+1) + + err = s.handler.HandleEvent(ctx, ev, isInline) + if isInline { + if err != nil { + s.localDAHeight.Store(local) + s.logger.Debug().Err(err).Uint64("da_height", ev.Height). + Msg("inline processing skipped/failed, rolling back") + } else { + s.headReached.Store(true) + } + } + watchdog.Reset(watchdogTimeout) case <-watchdog.C: return errors.New("subscription watchdog: no events received, reconnecting") @@ -325,9 +319,7 @@ func (s *Subscriber) runCatchup(ctx context.Context) { } local := s.localDAHeight.Load() - highest := s.highestSeenDAHeight.Load() - - if local > highest { + if local > s.highestSeenDAHeight.Load() { s.headReached.Store(true) return } diff --git a/block/internal/syncing/da_follower.go b/block/internal/syncing/da_follower.go index e8897ec981..377ae110b4 100644 --- a/block/internal/syncing/da_follower.go +++ b/block/internal/syncing/da_follower.go @@ -68,8 +68,8 @@ func NewDAFollower(cfg DAFollowerConfig) DAFollower { Namespaces: [][]byte{cfg.Namespace, dataNs}, DABlockTime: cfg.DABlockTime, Handler: f, + StartHeight: cfg.StartDAHeight, }) - f.subscriber.SetStartHeight(cfg.StartDAHeight) return f } @@ -97,35 +97,27 @@ func (f *daFollower) HasReachedHead() bool { // caught up (ev.Height == localDAHeight) and blobs are available, it processes // them inline — avoiding a DA re-fetch round trip. Otherwise, it just lets // the catchup loop handle retrieval. -// -// Uses CAS on localDAHeight to claim exclusive access to processBlobs, -// preventing concurrent map access with catchupLoop. -func (f *daFollower) HandleEvent(ctx context.Context, ev datypes.SubscriptionEvent) { - // Fast path: try to claim this height for inline processing. - // CAS(N, N+1) ensures only one goroutine (followLoop or catchupLoop) - // can enter processBlobs for height N. - if len(ev.Blobs) > 0 && f.subscriber.CompareAndSwapLocalHeight(ev.Height, ev.Height+1) { - events := f.retriever.ProcessBlobs(ctx, ev.Blobs, ev.Height) - for _, event := range events { - if err := f.eventSink.PipeEvent(ctx, event); err != nil { - // Roll back so catchupLoop can retry this height. - f.subscriber.SetLocalHeight(ev.Height) - f.logger.Warn().Err(err).Uint64("da_height", ev.Height). - Msg("failed to pipe inline event, catchup will retry") - return - } - } - if len(events) != 0 { - f.logger.Debug().Uint64("da_height", ev.Height).Int("events", len(events)). - Msg("processed subscription blobs inline (fast path)") - } else { - // No complete events (split namespace, waiting for other half). - f.subscriber.SetLocalHeight(ev.Height) +func (f *daFollower) HandleEvent(ctx context.Context, ev datypes.SubscriptionEvent, isInline bool) error { + if !isInline || len(ev.Blobs) == 0 { + return nil // skip + } + + events := f.retriever.ProcessBlobs(ctx, ev.Blobs, ev.Height) + if len(events) == 0 { + return errors.New("skip inline: no complete events") // Split namespace, subscriber rolls back + } + + for _, event := range events { + if err := f.eventSink.PipeEvent(ctx, event); err != nil { + f.logger.Warn().Err(err).Uint64("da_height", ev.Height). + Msg("failed to pipe inline event, catchup will retry") + return err // Actual pipe failure, subscriber rolls back } - return } - // Slow path: behind, no blobs, or catchupLoop claimed this height. + f.logger.Debug().Uint64("da_height", ev.Height).Int("events", len(events)). + Msg("processed subscription blobs inline (fast path)") + return nil } // HandleCatchup retrieves events at a single DA height and pipes them @@ -138,11 +130,7 @@ func (f *daFollower) HandleEvent(ctx context.Context, ev datypes.SubscriptionEve // skipping the current daHeight or stopping catchup. func (f *daFollower) HandleCatchup(ctx context.Context, daHeight uint64) error { // 1. Drain stale or future priority heights from P2P hints - for { - priorityHeight := f.popPriorityHeight() - if priorityHeight == 0 { - break - } + for priorityHeight := f.popPriorityHeight(); priorityHeight != 0; priorityHeight = f.popPriorityHeight() { if priorityHeight < daHeight { continue // skip stale hints without yielding back to the catchup loop } @@ -159,14 +147,9 @@ func (f *daFollower) HandleCatchup(ctx context.Context, daHeight uint64) error { continue } // Roll back so daHeight is attempted again next cycle after backoff. - f.subscriber.SetLocalHeight(daHeight) return err } - - // We successfully handled a priority height (we didn't actually process `daHeight`) - // Roll back so daHeight is attempted again next cycle. - f.subscriber.SetLocalHeight(daHeight) - return nil + break // continue with daHeight } // 2. Normal sequential fetch diff --git a/block/internal/syncing/syncer_backoff_test.go b/block/internal/syncing/syncer_backoff_test.go index 30d37b5689..3dd892ba18 100644 --- a/block/internal/syncing/syncer_backoff_test.go +++ b/block/internal/syncing/syncer_backoff_test.go @@ -3,6 +3,8 @@ package syncing import ( "context" "errors" + "sync" + "sync/atomic" "testing" "testing/synctest" "time" @@ -64,7 +66,10 @@ func TestDAFollower_BackoffOnCatchupError(t *testing.T) { pipeEvent := func(_ context.Context, _ common.DAHeightEvent) error { return nil } + // Replace manual test usages with Config. + daClient, eventCh := setupMockDAClient(t) follower := NewDAFollower(DAFollowerConfig{ + Client: daClient, Retriever: daRetriever, Logger: zerolog.Nop(), EventSink: common.EventSinkFunc(pipeEvent), @@ -73,14 +78,11 @@ func TestDAFollower_BackoffOnCatchupError(t *testing.T) { DABlockTime: tc.daBlockTime, }).(*daFollower) - // Set up the subscriber for direct testing. - sub := follower.subscriber - sub.SetStartHeight(100) ctx, subCancel := context.WithCancel(ctx) defer subCancel() - // Set highest to trigger catchup. - sub.UpdateHighestForTest(102) + follower.Start(ctx) + eventCh <- datypes.SubscriptionEvent{Height: 102} var callTimes []time.Time callCount := 0 @@ -114,14 +116,14 @@ func TestDAFollower_BackoffOnCatchupError(t *testing.T) { Return(nil, datypes.ErrBlobNotFound).Once() } - done := make(chan struct{}) - go func() { - sub.RunCatchupForTest(ctx) - close(done) - }() - select { - case <-ctx.Done(): - case <-done: + // Start already called above to avoid race on eventCh push vs Start + + if tc.exitsCatchup { + for !follower.HasReachedHead() && ctx.Err() == nil { + time.Sleep(time.Millisecond) + } + } else { + <-ctx.Done() } if tc.exitsCatchup { @@ -166,7 +168,9 @@ func TestDAFollower_BackoffResetOnSuccess(t *testing.T) { pipeEvent := func(_ context.Context, _ common.DAHeightEvent) error { return nil } + daClient, eventCh := setupMockDAClient(t) follower := NewDAFollower(DAFollowerConfig{ + Client: daClient, Retriever: daRetriever, Logger: zerolog.Nop(), EventSink: common.EventSinkFunc(pipeEvent), @@ -175,10 +179,11 @@ func TestDAFollower_BackoffResetOnSuccess(t *testing.T) { DABlockTime: 1 * time.Second, }).(*daFollower) - sub := follower.subscriber ctx, subCancel := context.WithCancel(ctx) defer subCancel() - sub.UpdateHighestForTest(105) + + follower.Start(ctx) + eventCh <- datypes.SubscriptionEvent{Height: 105} var callTimes []time.Time @@ -218,7 +223,6 @@ func TestDAFollower_BackoffResetOnSuccess(t *testing.T) { }). Return(nil, datypes.ErrBlobNotFound).Once() - go sub.RunCatchupForTest(ctx) <-ctx.Done() require.Len(t, callTimes, 3, "should make exactly 3 calls") @@ -244,7 +248,9 @@ func TestDAFollower_CatchupThenReachHead(t *testing.T) { pipeEvent := func(_ context.Context, _ common.DAHeightEvent) error { return nil } + daClient, eventCh := setupMockDAClient(t) follower := NewDAFollower(DAFollowerConfig{ + Client: daClient, Retriever: daRetriever, Logger: zerolog.Nop(), EventSink: common.EventSinkFunc(pipeEvent), @@ -253,8 +259,8 @@ func TestDAFollower_CatchupThenReachHead(t *testing.T) { DABlockTime: 500 * time.Millisecond, }).(*daFollower) - sub := follower.subscriber - sub.UpdateHighestForTest(5) + follower.Start(ctx) + eventCh <- datypes.SubscriptionEvent{Height: 5} var fetchedHeights []uint64 @@ -266,7 +272,9 @@ func TestDAFollower_CatchupThenReachHead(t *testing.T) { Return(nil, datypes.ErrBlobNotFound).Once() } - sub.RunCatchupForTest(ctx) + for !follower.HasReachedHead() && ctx.Err() == nil { + time.Sleep(time.Millisecond) + } assert.True(t, follower.HasReachedHead(), "should have reached DA head") // Heights 3, 4, 5 processed; local now at 6 which > highest (5) → caught up @@ -281,13 +289,18 @@ func TestDAFollower_InlineProcessing(t *testing.T) { t.Run("processes_blobs_inline_when_caught_up", func(t *testing.T) { daRetriever := NewMockDARetriever(t) + var mx sync.Mutex var pipedEvents []common.DAHeightEvent pipeEvent := func(_ context.Context, ev common.DAHeightEvent) error { + mx.Lock() + defer mx.Unlock() pipedEvents = append(pipedEvents, ev) return nil } + daClient, eventCh := setupMockDAClient(t) follower := NewDAFollower(DAFollowerConfig{ + Client: daClient, Retriever: daRetriever, Logger: zerolog.Nop(), EventSink: common.EventSinkFunc(pipeEvent), @@ -301,22 +314,16 @@ func TestDAFollower_InlineProcessing(t *testing.T) { {DaHeight: 10, Source: common.SourceDA}, } - // ProcessBlobs should be called (not RetrieveFromDA) - daRetriever.On("ProcessBlobs", mock.Anything, blobs, uint64(10)). - Return(expectedEvents).Once() + daRetriever.On("ProcessBlobs", mock.Anything, blobs, uint64(10)).Return(expectedEvents).Once() + daRetriever.On("RetrieveFromDA", mock.Anything, uint64(10)).Return(expectedEvents, nil).Maybe() - // Simulate subscription event: update highest seen, then handle event - follower.subscriber.UpdateHighestForTest(10) - follower.HandleEvent(t.Context(), datypes.SubscriptionEvent{ - Height: 10, - Blobs: blobs, - }) + // Simulate catchup loop to verify it advances naturally + require.NoError(t, follower.Start(t.Context())) + eventCh <- datypes.SubscriptionEvent{Height: 10, Blobs: blobs} // Signal current height - // Simulate catchup loop waking up to see if local > highest - follower.subscriber.RunCatchupForTest(t.Context()) + require.Eventually(t, func() bool { return len(pipedEvents) == 1 }, 10*time.Millisecond, time.Millisecond) // Verify: ProcessBlobs was called, events were piped, height advanced - require.Len(t, pipedEvents, 1, "should pipe 1 event from inline processing") assert.Equal(t, uint64(10), pipedEvents[0].DaHeight) assert.Equal(t, uint64(11), follower.subscriber.LocalDAHeight(), "localDAHeight should advance past processed height") assert.True(t, follower.HasReachedHead(), "should mark head as reached after inline processing") @@ -327,26 +334,31 @@ func TestDAFollower_InlineProcessing(t *testing.T) { pipeEvent := func(_ context.Context, _ common.DAHeightEvent) error { return nil } + daClient, subChan := setupMockDAClient(t) follower := NewDAFollower(DAFollowerConfig{ + Client: daClient, Retriever: daRetriever, Logger: zerolog.Nop(), EventSink: common.EventSinkFunc(pipeEvent), Namespace: []byte("ns"), StartDAHeight: 10, - DABlockTime: 500 * time.Millisecond, + DABlockTime: 1 * time.Millisecond, }).(*daFollower) - // Subscription reports height 15 but local is at 10 — should NOT process inline - // In production, the subscriber calls updateHighest before HandleEvent. - follower.subscriber.UpdateHighestForTest(15) - follower.HandleEvent(t.Context(), datypes.SubscriptionEvent{ - Height: 15, - Blobs: [][]byte{[]byte("blob")}, - }) + blobs := [][]byte{[]byte("blob")} + subChan <- datypes.SubscriptionEvent{Height: 15, Blobs: blobs} + + daRetriever.On("RetrieveFromDA", mock.Anything, uint64(10)).Return([]common.DAHeightEvent{}, nil).Maybe() + var updated atomic.Bool + notifyFn := func(args mock.Arguments) { updated.Store(true) } + daRetriever.On("RetrieveFromDA", mock.Anything, uint64(11)).Run(notifyFn).Return(nil, datypes.ErrHeightFromFuture).Maybe() + require.NoError(t, follower.Start(t.Context())) + + require.Eventually(t, func() bool { return updated.Load() }, 10*time.Millisecond, time.Millisecond) // ProcessBlobs should NOT have been called daRetriever.AssertNotCalled(t, "ProcessBlobs", mock.Anything, mock.Anything, mock.Anything) - assert.Equal(t, uint64(10), follower.subscriber.LocalDAHeight(), "localDAHeight should not change") + assert.Equal(t, uint64(11), follower.subscriber.LocalDAHeight(), "localDAHeight should not change") assert.Equal(t, uint64(15), follower.subscriber.HighestSeenDAHeight(), "highestSeen should be updated") }) @@ -355,7 +367,9 @@ func TestDAFollower_InlineProcessing(t *testing.T) { pipeEvent := func(_ context.Context, _ common.DAHeightEvent) error { return nil } + daClient, _ := setupMockDAClient(t) follower := NewDAFollower(DAFollowerConfig{ + Client: daClient, Retriever: daRetriever, Logger: zerolog.Nop(), EventSink: common.EventSinkFunc(pipeEvent), @@ -363,14 +377,12 @@ func TestDAFollower_InlineProcessing(t *testing.T) { StartDAHeight: 10, DABlockTime: 500 * time.Millisecond, }).(*daFollower) - - // Subscription at current height but no blobs — should fall through - // In production, the subscriber calls updateHighest before HandleEvent. - follower.subscriber.UpdateHighestForTest(10) + // HandleEvent at current height but no blobs — should fall through + // Subscriber.HandleEvent updates highest internally even if not processed. follower.HandleEvent(t.Context(), datypes.SubscriptionEvent{ Height: 10, Blobs: nil, - }) + }, true) // ProcessBlobs should NOT have been called daRetriever.AssertNotCalled(t, "ProcessBlobs", mock.Anything, mock.Anything, mock.Anything) diff --git a/block/internal/syncing/syncer_benchmark_test.go b/block/internal/syncing/syncer_benchmark_test.go index b0bccc6f74..b066f2ec71 100644 --- a/block/internal/syncing/syncer_benchmark_test.go +++ b/block/internal/syncing/syncer_benchmark_test.go @@ -16,6 +16,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" "github.com/evstack/ev-node/pkg/config" + datypes "github.com/evstack/ev-node/pkg/da/types" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/store" testmocks "github.com/evstack/ev-node/test/mocks" @@ -47,7 +48,9 @@ func BenchmarkSyncerIO(b *testing.B) { go fixt.s.processLoop(ctx) // Create a DAFollower to drive DA retrieval. + daClient, eventCh := setupMockDAClient(b) follower := NewDAFollower(DAFollowerConfig{ + Client: daClient, Retriever: fixt.s.daRetriever, Logger: zerolog.Nop(), EventSink: fixt.s, @@ -55,9 +58,8 @@ func BenchmarkSyncerIO(b *testing.B) { StartDAHeight: fixt.s.daRetrieverHeight.Load(), DABlockTime: 0, }).(*daFollower) - sub := follower.subscriber - sub.UpdateHighestForTest(spec.heights + daHeightOffset) - go sub.RunCatchupForTest(ctx) + follower.Start(ctx) + eventCh <- datypes.SubscriptionEvent{Height: spec.heights + daHeightOffset} fixt.s.startSyncWorkers(ctx) diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 537b3e8ad9..07f5cefc6a 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -21,6 +21,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" "github.com/evstack/ev-node/core/execution" "github.com/evstack/ev-node/pkg/config" datypes "github.com/evstack/ev-node/pkg/da/types" @@ -86,6 +87,13 @@ func makeSignedHeaderBytes( return bin, hdr } +func setupMockDAClient(tb testing.TB) (da.Client, chan datypes.SubscriptionEvent) { + mockClient := testmocks.NewMockClient(tb) + eventCh := make(chan datypes.SubscriptionEvent, 1) + mockClient.EXPECT().Subscribe(mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(eventCh), nil).Maybe() + return mockClient, eventCh +} + func makeData(chainID string, height uint64, txs int) *types.Data { d := &types.Data{ Metadata: &types.Metadata{ @@ -426,7 +434,9 @@ func TestSyncLoopPersistState(t *testing.T) { go syncerInst1.processLoop(ctx) // Create and start a DAFollower so DA retrieval actually happens. + daClient, eventCh := setupMockDAClient(t) follower1 := NewDAFollower(DAFollowerConfig{ + Client: daClient, Retriever: daRtrMock, Logger: zerolog.Nop(), EventSink: common.EventSinkFunc(syncerInst1.PipeEvent), @@ -434,9 +444,8 @@ func TestSyncLoopPersistState(t *testing.T) { StartDAHeight: syncerInst1.daRetrieverHeight.Load(), DABlockTime: cfg.DA.BlockTime.Duration, }).(*daFollower) - sub1 := follower1.subscriber - sub1.UpdateHighestForTest(myFutureDAHeight) - go sub1.RunCatchupForTest(ctx) + require.NoError(t, follower1.Start(ctx)) + eventCh <- datypes.SubscriptionEvent{Height: myFutureDAHeight} syncerInst1.startSyncWorkers(ctx) syncerInst1.wg.Wait() requireEmptyChan(t, errorCh) @@ -509,7 +518,9 @@ func TestSyncLoopPersistState(t *testing.T) { t.Log("sync workers on instance2 started") // Create a follower for instance 2. + daClient2, eventCh2 := setupMockDAClient(t) follower2 := NewDAFollower(DAFollowerConfig{ + Client: daClient2, Retriever: daRtrMock, Logger: zerolog.Nop(), EventSink: common.EventSinkFunc(syncerInst2.PipeEvent), @@ -517,9 +528,8 @@ func TestSyncLoopPersistState(t *testing.T) { StartDAHeight: syncerInst2.daRetrieverHeight.Load(), DABlockTime: cfg.DA.BlockTime.Duration, }).(*daFollower) - sub2 := follower2.subscriber - sub2.UpdateHighestForTest(syncerInst2.daRetrieverHeight.Load() + 1) - go sub2.RunCatchupForTest(ctx) + follower2.Start(ctx) + eventCh2 <- datypes.SubscriptionEvent{Height: syncerInst2.daRetrieverHeight.Load() + 1} syncerInst2.startSyncWorkers(ctx) syncerInst2.wg.Wait() From d2455582c928db2ce6992b141c7816f9f91ac09e Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Wed, 11 Mar 2026 09:42:57 +0100 Subject: [PATCH 07/10] Updates --- .../internal/da/forced_inclusion_retriever.go | 7 +++- block/internal/da/forced_inclusion_tracing.go | 4 ++ block/internal/da/subscriber.go | 6 +-- block/internal/syncing/da_follower.go | 13 +++--- block/internal/syncing/syncer.go | 1 + block/internal/syncing/syncer_backoff_test.go | 17 ++++++-- block/public.go | 1 + .../common/forced_inclusion_retriever_mock.go | 40 +++++++++++++++++++ pkg/sequencers/single/sequencer.go | 1 + 9 files changed, 74 insertions(+), 16 deletions(-) diff --git a/block/internal/da/forced_inclusion_retriever.go b/block/internal/da/forced_inclusion_retriever.go index a8051a5758..20b00732a1 100644 --- a/block/internal/da/forced_inclusion_retriever.go +++ b/block/internal/da/forced_inclusion_retriever.go @@ -18,6 +18,7 @@ var ErrForceInclusionNotConfigured = errors.New("forced inclusion namespace not // ForcedInclusionRetriever defines the interface for retrieving forced inclusion transactions from DA. type ForcedInclusionRetriever interface { RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) + Start(ctx context.Context) Stop() } @@ -59,7 +60,6 @@ func NewForcedInclusionRetriever( daStartHeight, daEpochSize*2, // prefetch window: 2x epoch size ) - asyncFetcher.Start(ctx) base := &forcedInclusionRetriever{ client: client, @@ -74,6 +74,11 @@ func NewForcedInclusionRetriever( return base } +// Start begins the background prefetcher. +func (r *forcedInclusionRetriever) Start(ctx context.Context) { + r.asyncFetcher.Start(ctx) +} + // Stop stops the background prefetcher. func (r *forcedInclusionRetriever) Stop() { r.asyncFetcher.Stop() diff --git a/block/internal/da/forced_inclusion_tracing.go b/block/internal/da/forced_inclusion_tracing.go index 7e777161f7..6d96a47a08 100644 --- a/block/internal/da/forced_inclusion_tracing.go +++ b/block/internal/da/forced_inclusion_tracing.go @@ -49,6 +49,10 @@ func (t *tracedForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.C return event, nil } +func (t *tracedForcedInclusionRetriever) Start(ctx context.Context) { + t.inner.Start(ctx) +} + func (t *tracedForcedInclusionRetriever) Stop() { t.inner.Stop() } diff --git a/block/internal/da/subscriber.go b/block/internal/da/subscriber.go index edb0b73d56..cf9e4e488e 100644 --- a/block/internal/da/subscriber.go +++ b/block/internal/da/subscriber.go @@ -201,12 +201,12 @@ func (s *Subscriber) runSubscription(ctx context.Context) error { err = s.handler.HandleEvent(ctx, ev, isInline) if isInline { - if err != nil { + if err == nil { + s.headReached.Store(true) + } else { s.localDAHeight.Store(local) s.logger.Debug().Err(err).Uint64("da_height", ev.Height). Msg("inline processing skipped/failed, rolling back") - } else { - s.headReached.Store(true) } } diff --git a/block/internal/syncing/da_follower.go b/block/internal/syncing/da_follower.go index 377ae110b4..04ff5e639b 100644 --- a/block/internal/syncing/da_follower.go +++ b/block/internal/syncing/da_follower.go @@ -98,8 +98,11 @@ func (f *daFollower) HasReachedHead() bool { // them inline — avoiding a DA re-fetch round trip. Otherwise, it just lets // the catchup loop handle retrieval. func (f *daFollower) HandleEvent(ctx context.Context, ev datypes.SubscriptionEvent, isInline bool) error { - if !isInline || len(ev.Blobs) == 0 { - return nil // skip + if !isInline { + return nil // skip: let subscriber just update highestSeenDAHeight + } + if len(ev.Blobs) == 0 { + return errors.New("skip inline: no blobs") // subscriber rolls back, catch-up loop will retry } events := f.retriever.ProcessBlobs(ctx, ev.Blobs, ev.Height) @@ -122,12 +125,6 @@ func (f *daFollower) HandleEvent(ctx context.Context, ev datypes.SubscriptionEve // HandleCatchup retrieves events at a single DA height and pipes them // to the event sink. Checks priority heights first. -// -// ErrHeightFromFuture is handled here rather than in fetchAndPipeHeight because -// the two call sites need different behaviour: -// - Normal catchup: mark head reached and return da.ErrCaughtUp to stop the loop. -// - Priority hint: the hint points to a future height — ignore it without -// skipping the current daHeight or stopping catchup. func (f *daFollower) HandleCatchup(ctx context.Context, daHeight uint64) error { // 1. Drain stale or future priority heights from P2P hints for priorityHeight := f.popPriorityHeight(); priorityHeight != 0; priorityHeight = f.popPriorityHeight() { diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 91d231527e..c7e1e1e11f 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -182,6 +182,7 @@ func (s *Syncer) Start(ctx context.Context) (err error) { } s.fiRetriever = da.NewForcedInclusionRetriever(ctx, s.daClient, s.logger, s.config.DA.BlockTime.Duration, s.config.Instrumentation.IsTracingEnabled(), s.genesis.DAStartHeight, s.genesis.DAEpochForcedInclusion) + s.fiRetriever.Start(ctx) s.p2pHandler = NewP2PHandler(s.headerStore, s.dataStore, s.cache, s.genesis, s.logger) currentHeight, initErr := s.store.Height(ctx) diff --git a/block/internal/syncing/syncer_backoff_test.go b/block/internal/syncing/syncer_backoff_test.go index 3dd892ba18..f22638f5a6 100644 --- a/block/internal/syncing/syncer_backoff_test.go +++ b/block/internal/syncing/syncer_backoff_test.go @@ -315,7 +315,8 @@ func TestDAFollower_InlineProcessing(t *testing.T) { } daRetriever.On("ProcessBlobs", mock.Anything, blobs, uint64(10)).Return(expectedEvents).Once() - daRetriever.On("RetrieveFromDA", mock.Anything, uint64(10)).Return(expectedEvents, nil).Maybe() + // Mock RetrieveFromDA for height 10 in case background catch-up fires before HandleEvent. + daRetriever.On("RetrieveFromDA", mock.Anything, uint64(10)).Return(nil, datypes.ErrHeightFromFuture).Maybe() // Simulate catchup loop to verify it advances naturally require.NoError(t, follower.Start(t.Context())) @@ -367,7 +368,7 @@ func TestDAFollower_InlineProcessing(t *testing.T) { pipeEvent := func(_ context.Context, _ common.DAHeightEvent) error { return nil } - daClient, _ := setupMockDAClient(t) + daClient, eventCh := setupMockDAClient(t) follower := NewDAFollower(DAFollowerConfig{ Client: daClient, Retriever: daRetriever, @@ -377,15 +378,23 @@ func TestDAFollower_InlineProcessing(t *testing.T) { StartDAHeight: 10, DABlockTime: 500 * time.Millisecond, }).(*daFollower) + + // Mock RetrieveFromDA because falling through inline processing triggers catchup. + daRetriever.On("RetrieveFromDA", mock.Anything, uint64(10)).Return(nil, datypes.ErrHeightFromFuture).Maybe() + require.NoError(t, follower.Start(t.Context())) + // HandleEvent at current height but no blobs — should fall through // Subscriber.HandleEvent updates highest internally even if not processed. - follower.HandleEvent(t.Context(), datypes.SubscriptionEvent{ + eventCh <- datypes.SubscriptionEvent{ Height: 10, Blobs: nil, - }, true) + } // ProcessBlobs should NOT have been called daRetriever.AssertNotCalled(t, "ProcessBlobs", mock.Anything, mock.Anything, mock.Anything) + require.Eventually(t, func() bool { + return follower.subscriber.LocalDAHeight() == 10 && follower.subscriber.HighestSeenDAHeight() == 10 + }, 100*time.Millisecond, 2*time.Millisecond) assert.Equal(t, uint64(10), follower.subscriber.LocalDAHeight(), "localDAHeight should not change") assert.Equal(t, uint64(10), follower.subscriber.HighestSeenDAHeight(), "highestSeen should be updated") }) diff --git a/block/public.go b/block/public.go index 988760211b..1eba0da298 100644 --- a/block/public.go +++ b/block/public.go @@ -78,6 +78,7 @@ type ForcedInclusionEvent = da.ForcedInclusionEvent type ForcedInclusionRetriever interface { RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) Stop() + Start(ctx context.Context) } // NewForcedInclusionRetriever creates a new forced inclusion retriever. diff --git a/pkg/sequencers/common/forced_inclusion_retriever_mock.go b/pkg/sequencers/common/forced_inclusion_retriever_mock.go index c90b84f7f0..e4dc2b0c8e 100644 --- a/pkg/sequencers/common/forced_inclusion_retriever_mock.go +++ b/pkg/sequencers/common/forced_inclusion_retriever_mock.go @@ -106,6 +106,46 @@ func (_c *MockForcedInclusionRetriever_RetrieveForcedIncludedTxs_Call) RunAndRet return _c } +// Start provides a mock function for the type MockForcedInclusionRetriever +func (_mock *MockForcedInclusionRetriever) Start(ctx context.Context) { + _mock.Called(ctx) + return +} + +// MockForcedInclusionRetriever_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' +type MockForcedInclusionRetriever_Start_Call struct { + *mock.Call +} + +// Start is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockForcedInclusionRetriever_Expecter) Start(ctx interface{}) *MockForcedInclusionRetriever_Start_Call { + return &MockForcedInclusionRetriever_Start_Call{Call: _e.mock.On("Start", ctx)} +} + +func (_c *MockForcedInclusionRetriever_Start_Call) Run(run func(ctx context.Context)) *MockForcedInclusionRetriever_Start_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockForcedInclusionRetriever_Start_Call) Return() *MockForcedInclusionRetriever_Start_Call { + _c.Call.Return() + return _c +} + +func (_c *MockForcedInclusionRetriever_Start_Call) RunAndReturn(run func(ctx context.Context)) *MockForcedInclusionRetriever_Start_Call { + _c.Run(run) + return _c +} + // Stop provides a mock function for the type MockForcedInclusionRetriever func (_mock *MockForcedInclusionRetriever) Stop() { _mock.Called() diff --git a/pkg/sequencers/single/sequencer.go b/pkg/sequencers/single/sequencer.go index 44bbb5b311..abe757f09b 100644 --- a/pkg/sequencers/single/sequencer.go +++ b/pkg/sequencers/single/sequencer.go @@ -203,6 +203,7 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB c.fiRetriever.Stop() } c.fiRetriever = block.NewForcedInclusionRetriever(ctx, c.daClient, c.cfg, c.logger, c.getInitialDAStartHeight(ctx), c.genesis.DAEpochForcedInclusion) + c.fiRetriever.Start(ctx) } // If we have no cached transactions or we've consumed all from the current epoch, From 2d4cd71ba62bed2bb31fae20a459aa811f661d2c Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Wed, 11 Mar 2026 10:06:14 +0100 Subject: [PATCH 08/10] No fetch when in cache --- block/internal/da/async_block_retriever.go | 18 +++++++----------- block/internal/da/subscriber.go | 2 +- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/block/internal/da/async_block_retriever.go b/block/internal/da/async_block_retriever.go index 4da527a34f..17c5a5c1bb 100644 --- a/block/internal/da/async_block_retriever.go +++ b/block/internal/da/async_block_retriever.go @@ -183,17 +183,17 @@ func (f *asyncBlockRetriever) HandleEvent(ctx context.Context, ev datypes.Subscr // HandleCatchup fetches a single height via Retrieve and caches it. // Also applies the prefetch window for speculative forward fetching. -func (f *asyncBlockRetriever) HandleCatchup(ctx context.Context, height uint64) error { - f.fetchAndCacheBlock(ctx, height) - +func (f *asyncBlockRetriever) HandleCatchup(ctx context.Context, daHeight uint64) error { + if _, err := f.cache.Get(ctx, newBlockDataKey(daHeight)); err != nil { + f.fetchAndCacheBlock(ctx, daHeight) + } // Speculatively prefetch ahead. - target := height + f.prefetchWindow - for h := height + 1; h <= target; h++ { + target := daHeight + f.prefetchWindow + for h := daHeight + 1; h <= target; h++ { if err := ctx.Err(); err != nil { return err } - key := newBlockDataKey(h) - if _, err := f.cache.Get(ctx, key); err == nil { + if _, err := f.cache.Get(ctx, newBlockDataKey(h)); err == nil { continue // Already cached. } f.fetchAndCacheBlock(ctx, h) @@ -202,10 +202,6 @@ func (f *asyncBlockRetriever) HandleCatchup(ctx context.Context, height uint64) return nil } -// --------------------------------------------------------------------------- -// Cache helpers -// --------------------------------------------------------------------------- - // fetchAndCacheBlock fetches a block via Retrieve and caches it. func (f *asyncBlockRetriever) fetchAndCacheBlock(ctx context.Context, height uint64) { f.logger.Debug().Uint64("height", height).Msg("prefetching block") diff --git a/block/internal/da/subscriber.go b/block/internal/da/subscriber.go index cf9e4e488e..655681c970 100644 --- a/block/internal/da/subscriber.go +++ b/block/internal/da/subscriber.go @@ -199,7 +199,7 @@ func (s *Subscriber) runSubscription(ctx context.Context) error { local := s.localDAHeight.Load() isInline := ev.Height == local && s.localDAHeight.CompareAndSwap(local, local+1) - err = s.handler.HandleEvent(ctx, ev, isInline) + err = s.handler.HandleEvent(subCtx, ev, isInline) if isInline { if err == nil { s.headReached.Store(true) From e91860706cf9fae7cba41493892bfe18632ff0f2 Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Wed, 11 Mar 2026 13:30:56 +0100 Subject: [PATCH 09/10] Updates --- apps/evm/server/force_inclusion_test.go | 2 +- block/internal/da/async_block_retriever.go | 13 +- .../internal/da/async_block_retriever_test.go | 5 +- block/internal/da/subscriber.go | 6 +- block/internal/da/subscriber_test.go | 98 +++++ block/internal/syncing/da_follower_test.go | 187 ++++++++ block/internal/syncing/syncer_backoff_test.go | 412 ------------------ 7 files changed, 300 insertions(+), 423 deletions(-) create mode 100644 block/internal/da/subscriber_test.go create mode 100644 block/internal/syncing/da_follower_test.go delete mode 100644 block/internal/syncing/syncer_backoff_test.go diff --git a/apps/evm/server/force_inclusion_test.go b/apps/evm/server/force_inclusion_test.go index 6be6e6ca76..a107a10d11 100644 --- a/apps/evm/server/force_inclusion_test.go +++ b/apps/evm/server/force_inclusion_test.go @@ -50,7 +50,7 @@ func (m *mockDA) Get(ctx context.Context, ids []da.ID, namespace []byte) ([]da.B return nil, nil } -func (m *mockDA) Subscribe(_ context.Context, _ []byte) (<-chan da.SubscriptionEvent, error) { +func (m *mockDA) Subscribe(_ context.Context, _ []byte, _ bool) (<-chan da.SubscriptionEvent, error) { // Not needed in these tests; return a closed channel. ch := make(chan da.SubscriptionEvent) close(ch) diff --git a/block/internal/da/async_block_retriever.go b/block/internal/da/async_block_retriever.go index 17c5a5c1bb..4fe21a8b31 100644 --- a/block/internal/da/async_block_retriever.go +++ b/block/internal/da/async_block_retriever.go @@ -185,7 +185,9 @@ func (f *asyncBlockRetriever) HandleEvent(ctx context.Context, ev datypes.Subscr // Also applies the prefetch window for speculative forward fetching. func (f *asyncBlockRetriever) HandleCatchup(ctx context.Context, daHeight uint64) error { if _, err := f.cache.Get(ctx, newBlockDataKey(daHeight)); err != nil { - f.fetchAndCacheBlock(ctx, daHeight) + if err := f.fetchAndCacheBlock(ctx, daHeight); err != nil { + return err + } } // Speculatively prefetch ahead. target := daHeight + f.prefetchWindow @@ -196,14 +198,16 @@ func (f *asyncBlockRetriever) HandleCatchup(ctx context.Context, daHeight uint64 if _, err := f.cache.Get(ctx, newBlockDataKey(h)); err == nil { continue // Already cached. } - f.fetchAndCacheBlock(ctx, h) + if err := f.fetchAndCacheBlock(ctx, h); err != nil { + return err + } } return nil } // fetchAndCacheBlock fetches a block via Retrieve and caches it. -func (f *asyncBlockRetriever) fetchAndCacheBlock(ctx context.Context, height uint64) { +func (f *asyncBlockRetriever) fetchAndCacheBlock(ctx context.Context, height uint64) error { f.logger.Debug().Uint64("height", height).Msg("prefetching block") result := f.client.Retrieve(ctx, height, f.namespace) @@ -211,7 +215,7 @@ func (f *asyncBlockRetriever) fetchAndCacheBlock(ctx context.Context, height uin switch result.Code { case datypes.StatusHeightFromFuture: f.logger.Debug().Uint64("height", height).Msg("block height not yet available - will retry") - return + return datypes.ErrHeightFromFuture case datypes.StatusNotFound: f.cacheBlock(ctx, height, result.Timestamp, nil) case datypes.StatusSuccess: @@ -228,6 +232,7 @@ func (f *asyncBlockRetriever) fetchAndCacheBlock(ctx context.Context, height uin Str("status", result.Message). Msg("failed to retrieve block - will retry") } + return nil } // cacheBlock serializes and stores a block in the in-memory cache. diff --git a/block/internal/da/async_block_retriever_test.go b/block/internal/da/async_block_retriever_test.go index 0aded0dcce..75a2543cac 100644 --- a/block/internal/da/async_block_retriever_test.go +++ b/block/internal/da/async_block_retriever_test.go @@ -177,7 +177,7 @@ func TestAsyncBlockRetriever_HeightFromFuture(t *testing.T) { // Wait a bit for catchup to attempt fetches. require.Eventually(t, func() bool { return fetcher.(*asyncBlockRetriever).subscriber.HasReachedHead() - }, 1250*time.Millisecond, time.Millisecond) + }, 1250*time.Second, time.Millisecond) // Cache should be empty since all heights are from the future. block, err := fetcher.GetCachedBlock(ctx, 100) @@ -191,6 +191,9 @@ func TestAsyncBlockRetriever_StopGracefully(t *testing.T) { blockCh := make(chan datypes.SubscriptionEvent) client.On("Subscribe", mock.Anything, fiNs, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(blockCh), nil).Maybe() + client.On("Retrieve", mock.Anything, mock.Anything, fiNs).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusHeightFromFuture}, + }).Maybe() logger := zerolog.Nop() fetcher := NewAsyncBlockRetriever(client, logger, fiNs, 100*time.Millisecond, 100, 10) diff --git a/block/internal/da/subscriber.go b/block/internal/da/subscriber.go index 655681c970..e03c74e2f0 100644 --- a/block/internal/da/subscriber.go +++ b/block/internal/da/subscriber.go @@ -92,6 +92,7 @@ func NewSubscriber(cfg SubscriberConfig) *Subscriber { fetchBlockTimestamp: cfg.FetchBlockTimestamp, } s.localDAHeight.Store(cfg.StartHeight) + s.highestSeenDAHeight.Store(cfg.StartHeight) s.catchupSignal <- struct{}{} if len(s.namespaces) == 0 { @@ -319,11 +320,6 @@ func (s *Subscriber) runCatchup(ctx context.Context) { } local := s.localDAHeight.Load() - if local > s.highestSeenDAHeight.Load() { - s.headReached.Store(true) - return - } - // CAS claims this height — prevents followLoop from inline-processing. if !s.localDAHeight.CompareAndSwap(local, local+1) { // followLoop already advanced past this height via inline processing. diff --git a/block/internal/da/subscriber_test.go b/block/internal/da/subscriber_test.go new file mode 100644 index 0000000000..4f4a667652 --- /dev/null +++ b/block/internal/da/subscriber_test.go @@ -0,0 +1,98 @@ +package da + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + datypes "github.com/evstack/ev-node/pkg/da/types" + testmocks "github.com/evstack/ev-node/test/mocks" +) + +// MockSubscriberHandler mocks SubscriberHandler +type MockSubscriberHandler struct { + mock.Mock +} + +func (m *MockSubscriberHandler) HandleEvent(ctx context.Context, ev datypes.SubscriptionEvent, isInline bool) error { + args := m.Called(ctx, ev, isInline) + return args.Error(0) +} + +func (m *MockSubscriberHandler) HandleCatchup(ctx context.Context, height uint64) error { + args := m.Called(ctx, height) + return args.Error(0) +} + +func TestSubscriber_RunCatchup(t *testing.T) { + t.Run("success_sequence", func(t *testing.T) { + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + mockHandler := new(MockSubscriberHandler) + mockClient := testmocks.NewMockClient(t) + + sub := NewSubscriber(SubscriberConfig{ + Client: mockClient, + Logger: zerolog.Nop(), + Handler: mockHandler, + Namespaces: [][]byte{[]byte("ns")}, + StartHeight: 100, + DABlockTime: time.Millisecond, + }) + + // It should try 100, 101, then we return ErrHeightFromFuture at 102 + mockHandler.On("HandleCatchup", mock.Anything, uint64(100)).Return(nil).Once() + mockHandler.On("HandleCatchup", mock.Anything, uint64(101)).Return(nil).Once() + mockHandler.On("HandleCatchup", mock.Anything, uint64(102)).Return(datypes.ErrHeightFromFuture).Once() + + sub.runCatchup(ctx) + + mockHandler.AssertExpectations(t) + assert.Equal(t, uint64(102), sub.LocalDAHeight()) + assert.True(t, sub.HasReachedHead()) + }) + + t.Run("backoff_on_error", func(t *testing.T) { + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + mockHandler := new(MockSubscriberHandler) + mockClient := testmocks.NewMockClient(t) + + sub := NewSubscriber(SubscriberConfig{ + Client: mockClient, + Logger: zerolog.Nop(), + Handler: mockHandler, + Namespaces: [][]byte{[]byte("ns")}, + StartHeight: 100, + DABlockTime: time.Millisecond, + }) + + var callCount int + + mockHandler.On("HandleCatchup", mock.Anything, uint64(100)). + Run(func(args mock.Arguments) { + callCount++ + }). + Return(errors.New("network failure")).Once() + + mockHandler.On("HandleCatchup", mock.Anything, uint64(100)). + Run(func(args mock.Arguments) { + callCount++ + cancel() + }). + Return(datypes.ErrHeightFromFuture).Once() + + sub.runCatchup(ctx) + + mockHandler.AssertExpectations(t) + assert.Equal(t, 2, callCount) + assert.Equal(t, uint64(100), sub.LocalDAHeight(), "should roll back to 100 on future error") + }) +} diff --git a/block/internal/syncing/da_follower_test.go b/block/internal/syncing/da_follower_test.go new file mode 100644 index 0000000000..10b6087363 --- /dev/null +++ b/block/internal/syncing/da_follower_test.go @@ -0,0 +1,187 @@ +package syncing + +import ( + "context" + "errors" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/evstack/ev-node/block/internal/common" + datypes "github.com/evstack/ev-node/pkg/da/types" +) + +func TestDAFollower_HandleEvent(t *testing.T) { + tests := []struct { + name string + isInline bool + blobs [][]byte + mockEvents []common.DAHeightEvent + mockPipeErr error + expectedError string + }{ + { + name: "ignore_not_inline", + isInline: false, + }, + { + name: "error_no_blobs", + isInline: true, + blobs: [][]byte{}, + expectedError: "skip inline: no blobs", + }, + { + name: "error_no_complete_events", + isInline: true, + blobs: [][]byte{[]byte("blob")}, + mockEvents: []common.DAHeightEvent{}, + expectedError: "skip inline: no complete events", + }, + { + name: "error_pipe_fails", + isInline: true, + blobs: [][]byte{[]byte("blob")}, + mockEvents: []common.DAHeightEvent{{DaHeight: 100}}, + mockPipeErr: errors.New("pipe error"), + expectedError: "pipe error", + }, + { + name: "success", + isInline: true, + blobs: [][]byte{[]byte("blob")}, + mockEvents: []common.DAHeightEvent{{DaHeight: 100}}, + mockPipeErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + daRetriever := NewMockDARetriever(t) + ctx := t.Context() + + var pipedEvents []common.DAHeightEvent + pipeEvent := func(_ context.Context, ev common.DAHeightEvent) error { + pipedEvents = append(pipedEvents, ev) + return tt.mockPipeErr + } + + follower := &daFollower{ + retriever: daRetriever, + eventSink: common.EventSinkFunc(pipeEvent), + logger: zerolog.Nop(), + } + + ev := datypes.SubscriptionEvent{Height: 100, Blobs: tt.blobs} + + if tt.isInline && len(tt.blobs) > 0 { + daRetriever.On("ProcessBlobs", mock.Anything, tt.blobs, uint64(100)).Return(tt.mockEvents) + } + + err := follower.HandleEvent(ctx, ev, tt.isInline) + + if tt.expectedError != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.expectedError) + } else { + require.NoError(t, err) + if tt.isInline && len(tt.blobs) > 0 { + assert.Len(t, pipedEvents, len(tt.mockEvents)) + } + } + }) + } +} + +func TestDAFollower_HandleCatchup(t *testing.T) { + t.Run("normal_sequential_fetch_success", func(t *testing.T) { + daRetriever := NewMockDARetriever(t) + ctx := t.Context() + + var pipedEvents []common.DAHeightEvent + pipeEvent := func(_ context.Context, ev common.DAHeightEvent) error { + pipedEvents = append(pipedEvents, ev) + return nil + } + + follower := &daFollower{ + retriever: daRetriever, + eventSink: common.EventSinkFunc(pipeEvent), + logger: zerolog.Nop(), + priorityHeights: make([]uint64, 0), + } + + events := []common.DAHeightEvent{{DaHeight: 100}} + daRetriever.On("RetrieveFromDA", mock.Anything, uint64(100)).Return(events, nil) + + err := follower.HandleCatchup(ctx, 100) + require.NoError(t, err) + assert.Len(t, pipedEvents, 1) + }) + + t.Run("normal_sequential_fetch_blob_not_found_ignored", func(t *testing.T) { + daRetriever := NewMockDARetriever(t) + ctx := t.Context() + + follower := &daFollower{ + retriever: daRetriever, + eventSink: common.EventSinkFunc(func(_ context.Context, _ common.DAHeightEvent) error { return nil }), + logger: zerolog.Nop(), + priorityHeights: make([]uint64, 0), + } + + daRetriever.On("RetrieveFromDA", mock.Anything, uint64(100)).Return(nil, datypes.ErrBlobNotFound) + + err := follower.HandleCatchup(ctx, 100) + require.NoError(t, err) + }) + + t.Run("normal_sequential_fetch_error_propagated", func(t *testing.T) { + daRetriever := NewMockDARetriever(t) + ctx := t.Context() + + follower := &daFollower{ + retriever: daRetriever, + eventSink: common.EventSinkFunc(func(_ context.Context, _ common.DAHeightEvent) error { return nil }), + logger: zerolog.Nop(), + priorityHeights: make([]uint64, 0), + } + + daRetriever.On("RetrieveFromDA", mock.Anything, uint64(100)).Return(nil, datypes.ErrHeightFromFuture) + + err := follower.HandleCatchup(ctx, 100) + require.ErrorIs(t, err, datypes.ErrHeightFromFuture) + }) + + t.Run("priority_queue_handled_first", func(t *testing.T) { + daRetriever := NewMockDARetriever(t) + ctx := t.Context() + + var pipedEvents []common.DAHeightEvent + pipeEvent := func(_ context.Context, ev common.DAHeightEvent) error { + pipedEvents = append(pipedEvents, ev) + return nil + } + + follower := &daFollower{ + retriever: daRetriever, + eventSink: common.EventSinkFunc(pipeEvent), + logger: zerolog.Nop(), + priorityHeights: []uint64{105}, // Priority height to fetch + } + + daRetriever.On("RetrieveFromDA", mock.Anything, uint64(105)).Return([]common.DAHeightEvent{{DaHeight: 105}}, nil).Once() + daRetriever.On("RetrieveFromDA", mock.Anything, uint64(100)).Return([]common.DAHeightEvent{{DaHeight: 100}}, nil).Once() + + err := follower.HandleCatchup(ctx, 100) + require.NoError(t, err) + + // It should pipe 105 then 100 + assert.Len(t, pipedEvents, 2) + assert.Equal(t, uint64(105), pipedEvents[0].DaHeight) + assert.Equal(t, uint64(100), pipedEvents[1].DaHeight) + assert.Empty(t, follower.priorityHeights) + }) +} diff --git a/block/internal/syncing/syncer_backoff_test.go b/block/internal/syncing/syncer_backoff_test.go deleted file mode 100644 index f22638f5a6..0000000000 --- a/block/internal/syncing/syncer_backoff_test.go +++ /dev/null @@ -1,412 +0,0 @@ -package syncing - -import ( - "context" - "errors" - "sync" - "sync/atomic" - "testing" - "testing/synctest" - "time" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/evstack/ev-node/block/internal/common" - datypes "github.com/evstack/ev-node/pkg/da/types" - "github.com/evstack/ev-node/pkg/genesis" - "github.com/evstack/ev-node/types" -) - -// TestDAFollower_BackoffOnCatchupError verifies that the DAFollower implements -// proper backoff behavior when encountering different types of DA layer errors. -func TestDAFollower_BackoffOnCatchupError(t *testing.T) { - tests := map[string]struct { - daBlockTime time.Duration - error error - expectsBackoff bool - exitsCatchup bool // ErrCaughtUp → clean exit, no backoff, no advance - description string - }{ - "generic_error_triggers_backoff": { - daBlockTime: 1 * time.Second, - error: errors.New("network failure"), - expectsBackoff: true, - description: "Generic DA errors should trigger backoff", - }, - "height_from_future_stops_catchup": { - daBlockTime: 500 * time.Millisecond, - error: datypes.ErrHeightFromFuture, - exitsCatchup: true, - description: "Height from future should stop catchup (da.ErrCaughtUp), not backoff", - }, - "blob_not_found_no_backoff": { - daBlockTime: 1 * time.Second, - error: datypes.ErrBlobNotFound, - expectsBackoff: false, - description: "ErrBlobNotFound should not trigger backoff", - }, - "zero_block_time_fallback": { - daBlockTime: 0, // Should fallback to 2s - error: errors.New("some error"), - expectsBackoff: true, - description: "Zero block time should use 2s fallback", - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - synctest.Test(t, func(t *testing.T) { - ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) - defer cancel() - - daRetriever := NewMockDARetriever(t) - - pipeEvent := func(_ context.Context, _ common.DAHeightEvent) error { return nil } - - // Replace manual test usages with Config. - daClient, eventCh := setupMockDAClient(t) - follower := NewDAFollower(DAFollowerConfig{ - Client: daClient, - Retriever: daRetriever, - Logger: zerolog.Nop(), - EventSink: common.EventSinkFunc(pipeEvent), - Namespace: []byte("ns"), - StartDAHeight: 100, - DABlockTime: tc.daBlockTime, - }).(*daFollower) - - ctx, subCancel := context.WithCancel(ctx) - defer subCancel() - - follower.Start(ctx) - eventCh <- datypes.SubscriptionEvent{Height: 102} - - var callTimes []time.Time - callCount := 0 - - // First call - returns test error - daRetriever.On("RetrieveFromDA", mock.Anything, uint64(100)). - Run(func(args mock.Arguments) { - callTimes = append(callTimes, time.Now()) - callCount++ - }). - Return(nil, tc.error).Once() - - if tc.exitsCatchup { - // ErrCaughtUp causes runCatchup to exit cleanly after 1 call. - // No second mock needed. - } else if tc.expectsBackoff { - daRetriever.On("RetrieveFromDA", mock.Anything, uint64(100)). - Run(func(args mock.Arguments) { - callTimes = append(callTimes, time.Now()) - callCount++ - subCancel() - }). - Return(nil, datypes.ErrBlobNotFound).Once() - } else { - daRetriever.On("RetrieveFromDA", mock.Anything, uint64(101)). - Run(func(args mock.Arguments) { - callTimes = append(callTimes, time.Now()) - callCount++ - subCancel() - }). - Return(nil, datypes.ErrBlobNotFound).Once() - } - - // Start already called above to avoid race on eventCh push vs Start - - if tc.exitsCatchup { - for !follower.HasReachedHead() && ctx.Err() == nil { - time.Sleep(time.Millisecond) - } - } else { - <-ctx.Done() - } - - if tc.exitsCatchup { - assert.Equal(t, 1, callCount, "should exit after single call (ErrCaughtUp)") - assert.True(t, follower.HasReachedHead(), "should mark head as reached") - } else if tc.expectsBackoff { - require.Len(t, callTimes, 2, "should make exactly 2 calls with backoff") - - timeBetweenCalls := callTimes[1].Sub(callTimes[0]) - expectedDelay := tc.daBlockTime - if expectedDelay == 0 { - expectedDelay = 2 * time.Second - } - - assert.GreaterOrEqual(t, timeBetweenCalls, expectedDelay, - "second call should be delayed by backoff duration (expected ~%v, got %v)", - expectedDelay, timeBetweenCalls) - } else { - assert.GreaterOrEqual(t, callCount, 2, "should continue without significant delay") - if len(callTimes) >= 2 { - timeBetweenCalls := callTimes[1].Sub(callTimes[0]) - assert.Less(t, timeBetweenCalls, 120*time.Millisecond, - "should not have backoff delay for ErrBlobNotFound") - } - } - }) - }) - } -} - -// TestDAFollower_BackoffResetOnSuccess verifies that backoff is properly reset -// after a successful DA retrieval. -func TestDAFollower_BackoffResetOnSuccess(t *testing.T) { - synctest.Test(t, func(t *testing.T) { - ctx, cancel := context.WithTimeout(t.Context(), 15*time.Second) - defer cancel() - - addr, pub, signer := buildSyncTestSigner(t) - gen := backoffTestGenesis(addr) - - daRetriever := NewMockDARetriever(t) - - pipeEvent := func(_ context.Context, _ common.DAHeightEvent) error { return nil } - - daClient, eventCh := setupMockDAClient(t) - follower := NewDAFollower(DAFollowerConfig{ - Client: daClient, - Retriever: daRetriever, - Logger: zerolog.Nop(), - EventSink: common.EventSinkFunc(pipeEvent), - Namespace: []byte("ns"), - StartDAHeight: 100, - DABlockTime: 1 * time.Second, - }).(*daFollower) - - ctx, subCancel := context.WithCancel(ctx) - defer subCancel() - - follower.Start(ctx) - eventCh <- datypes.SubscriptionEvent{Height: 105} - - var callTimes []time.Time - - // First call - error (should trigger backoff) - daRetriever.On("RetrieveFromDA", mock.Anything, uint64(100)). - Run(func(args mock.Arguments) { - callTimes = append(callTimes, time.Now()) - }). - Return(nil, errors.New("temporary failure")).Once() - - // Second call - success - _, header := makeSignedHeaderBytes(t, gen.ChainID, 1, addr, pub, signer, nil, nil, nil) - data := &types.Data{ - Metadata: &types.Metadata{ - ChainID: gen.ChainID, - Height: 1, - Time: uint64(time.Now().UnixNano()), - }, - } - event := common.DAHeightEvent{ - Header: header, - Data: data, - DaHeight: 100, - } - - daRetriever.On("RetrieveFromDA", mock.Anything, uint64(100)). - Run(func(args mock.Arguments) { - callTimes = append(callTimes, time.Now()) - }). - Return([]common.DAHeightEvent{event}, nil).Once() - - // Third call - should happen immediately after success (DA height incremented) - daRetriever.On("RetrieveFromDA", mock.Anything, uint64(101)). - Run(func(args mock.Arguments) { - callTimes = append(callTimes, time.Now()) - subCancel() - }). - Return(nil, datypes.ErrBlobNotFound).Once() - - <-ctx.Done() - - require.Len(t, callTimes, 3, "should make exactly 3 calls") - - delay1to2 := callTimes[1].Sub(callTimes[0]) - assert.GreaterOrEqual(t, delay1to2, 1*time.Second, - "should have backed off between error and success (got %v)", delay1to2) - - delay2to3 := callTimes[2].Sub(callTimes[1]) - assert.Less(t, delay2to3, 100*time.Millisecond, - "should continue immediately after success (got %v)", delay2to3) - }) -} - -// TestDAFollower_CatchupThenReachHead verifies the catchup flow: -// sequential fetch from local → highest → mark head reached. -func TestDAFollower_CatchupThenReachHead(t *testing.T) { - synctest.Test(t, func(t *testing.T) { - ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) - defer cancel() - - daRetriever := NewMockDARetriever(t) - - pipeEvent := func(_ context.Context, _ common.DAHeightEvent) error { return nil } - - daClient, eventCh := setupMockDAClient(t) - follower := NewDAFollower(DAFollowerConfig{ - Client: daClient, - Retriever: daRetriever, - Logger: zerolog.Nop(), - EventSink: common.EventSinkFunc(pipeEvent), - Namespace: []byte("ns"), - StartDAHeight: 3, - DABlockTime: 500 * time.Millisecond, - }).(*daFollower) - - follower.Start(ctx) - eventCh <- datypes.SubscriptionEvent{Height: 5} - - var fetchedHeights []uint64 - - for h := uint64(3); h <= 5; h++ { - daRetriever.On("RetrieveFromDA", mock.Anything, h). - Run(func(args mock.Arguments) { - fetchedHeights = append(fetchedHeights, h) - }). - Return(nil, datypes.ErrBlobNotFound).Once() - } - - for !follower.HasReachedHead() && ctx.Err() == nil { - time.Sleep(time.Millisecond) - } - - assert.True(t, follower.HasReachedHead(), "should have reached DA head") - // Heights 3, 4, 5 processed; local now at 6 which > highest (5) → caught up - assert.Equal(t, []uint64{3, 4, 5}, fetchedHeights, "should have fetched heights sequentially") - }) -} - -// TestDAFollower_InlineProcessing verifies the fast path: when the subscription -// delivers blobs at the current localDAHeight, HandleEvent processes -// them inline via ProcessBlobs (not RetrieveFromDA). -func TestDAFollower_InlineProcessing(t *testing.T) { - t.Run("processes_blobs_inline_when_caught_up", func(t *testing.T) { - daRetriever := NewMockDARetriever(t) - - var mx sync.Mutex - var pipedEvents []common.DAHeightEvent - pipeEvent := func(_ context.Context, ev common.DAHeightEvent) error { - mx.Lock() - defer mx.Unlock() - pipedEvents = append(pipedEvents, ev) - return nil - } - - daClient, eventCh := setupMockDAClient(t) - follower := NewDAFollower(DAFollowerConfig{ - Client: daClient, - Retriever: daRetriever, - Logger: zerolog.Nop(), - EventSink: common.EventSinkFunc(pipeEvent), - Namespace: []byte("ns"), - StartDAHeight: 10, - DABlockTime: 500 * time.Millisecond, - }).(*daFollower) - - blobs := [][]byte{[]byte("header-blob"), []byte("data-blob")} - expectedEvents := []common.DAHeightEvent{ - {DaHeight: 10, Source: common.SourceDA}, - } - - daRetriever.On("ProcessBlobs", mock.Anything, blobs, uint64(10)).Return(expectedEvents).Once() - // Mock RetrieveFromDA for height 10 in case background catch-up fires before HandleEvent. - daRetriever.On("RetrieveFromDA", mock.Anything, uint64(10)).Return(nil, datypes.ErrHeightFromFuture).Maybe() - - // Simulate catchup loop to verify it advances naturally - require.NoError(t, follower.Start(t.Context())) - eventCh <- datypes.SubscriptionEvent{Height: 10, Blobs: blobs} // Signal current height - - require.Eventually(t, func() bool { return len(pipedEvents) == 1 }, 10*time.Millisecond, time.Millisecond) - - // Verify: ProcessBlobs was called, events were piped, height advanced - assert.Equal(t, uint64(10), pipedEvents[0].DaHeight) - assert.Equal(t, uint64(11), follower.subscriber.LocalDAHeight(), "localDAHeight should advance past processed height") - assert.True(t, follower.HasReachedHead(), "should mark head as reached after inline processing") - }) - - t.Run("falls_through_to_catchup_when_behind", func(t *testing.T) { - daRetriever := NewMockDARetriever(t) - - pipeEvent := func(_ context.Context, _ common.DAHeightEvent) error { return nil } - - daClient, subChan := setupMockDAClient(t) - follower := NewDAFollower(DAFollowerConfig{ - Client: daClient, - Retriever: daRetriever, - Logger: zerolog.Nop(), - EventSink: common.EventSinkFunc(pipeEvent), - Namespace: []byte("ns"), - StartDAHeight: 10, - DABlockTime: 1 * time.Millisecond, - }).(*daFollower) - - blobs := [][]byte{[]byte("blob")} - subChan <- datypes.SubscriptionEvent{Height: 15, Blobs: blobs} - - daRetriever.On("RetrieveFromDA", mock.Anything, uint64(10)).Return([]common.DAHeightEvent{}, nil).Maybe() - - var updated atomic.Bool - notifyFn := func(args mock.Arguments) { updated.Store(true) } - daRetriever.On("RetrieveFromDA", mock.Anything, uint64(11)).Run(notifyFn).Return(nil, datypes.ErrHeightFromFuture).Maybe() - require.NoError(t, follower.Start(t.Context())) - - require.Eventually(t, func() bool { return updated.Load() }, 10*time.Millisecond, time.Millisecond) - // ProcessBlobs should NOT have been called - daRetriever.AssertNotCalled(t, "ProcessBlobs", mock.Anything, mock.Anything, mock.Anything) - assert.Equal(t, uint64(11), follower.subscriber.LocalDAHeight(), "localDAHeight should not change") - assert.Equal(t, uint64(15), follower.subscriber.HighestSeenDAHeight(), "highestSeen should be updated") - }) - - t.Run("falls_through_when_no_blobs", func(t *testing.T) { - daRetriever := NewMockDARetriever(t) - - pipeEvent := func(_ context.Context, _ common.DAHeightEvent) error { return nil } - - daClient, eventCh := setupMockDAClient(t) - follower := NewDAFollower(DAFollowerConfig{ - Client: daClient, - Retriever: daRetriever, - Logger: zerolog.Nop(), - EventSink: common.EventSinkFunc(pipeEvent), - Namespace: []byte("ns"), - StartDAHeight: 10, - DABlockTime: 500 * time.Millisecond, - }).(*daFollower) - - // Mock RetrieveFromDA because falling through inline processing triggers catchup. - daRetriever.On("RetrieveFromDA", mock.Anything, uint64(10)).Return(nil, datypes.ErrHeightFromFuture).Maybe() - require.NoError(t, follower.Start(t.Context())) - - // HandleEvent at current height but no blobs — should fall through - // Subscriber.HandleEvent updates highest internally even if not processed. - eventCh <- datypes.SubscriptionEvent{ - Height: 10, - Blobs: nil, - } - - // ProcessBlobs should NOT have been called - daRetriever.AssertNotCalled(t, "ProcessBlobs", mock.Anything, mock.Anything, mock.Anything) - require.Eventually(t, func() bool { - return follower.subscriber.LocalDAHeight() == 10 && follower.subscriber.HighestSeenDAHeight() == 10 - }, 100*time.Millisecond, 2*time.Millisecond) - assert.Equal(t, uint64(10), follower.subscriber.LocalDAHeight(), "localDAHeight should not change") - assert.Equal(t, uint64(10), follower.subscriber.HighestSeenDAHeight(), "highestSeen should be updated") - }) -} - -// backoffTestGenesis creates a test genesis for the backoff tests. -func backoffTestGenesis(addr []byte) genesis.Genesis { - return genesis.Genesis{ - ChainID: "test-chain", - InitialHeight: 1, - StartTime: time.Now().Add(-time.Hour), - ProposerAddress: addr, - DAStartHeight: 100, - } -} From aca762fbb4b0d0249ac724b5ffabf2e26fa15b2d Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Wed, 11 Mar 2026 15:17:00 +0100 Subject: [PATCH 10/10] Review feedback --- block/internal/da/async_block_retriever_test.go | 2 +- block/internal/syncing/da_retriever.go | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/block/internal/da/async_block_retriever_test.go b/block/internal/da/async_block_retriever_test.go index 75a2543cac..2e5ead5278 100644 --- a/block/internal/da/async_block_retriever_test.go +++ b/block/internal/da/async_block_retriever_test.go @@ -177,7 +177,7 @@ func TestAsyncBlockRetriever_HeightFromFuture(t *testing.T) { // Wait a bit for catchup to attempt fetches. require.Eventually(t, func() bool { return fetcher.(*asyncBlockRetriever).subscriber.HasReachedHead() - }, 1250*time.Second, time.Millisecond) + }, time.Second, time.Millisecond) // Cache should be empty since all heights are from the future. block, err := fetcher.GetCachedBlock(ctx, 100) diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index 691b2d1bb6..cefb5af6a2 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -5,6 +5,7 @@ import ( "context" "errors" "fmt" + "sync" "github.com/rs/zerolog" "google.golang.org/protobuf/proto" @@ -34,6 +35,7 @@ type daRetriever struct { genesis genesis.Genesis logger zerolog.Logger + mu sync.Mutex // transient cache, only full event need to be passed to the syncer // on restart, will be refetch as da height is updated by syncer pendingHeaders map[uint64]*types.SignedHeader @@ -154,15 +156,15 @@ func (r *daRetriever) validateBlobResponse(res datypes.ResultRetrieve, daHeight // ProcessBlobs processes raw blob bytes to extract headers and data and returns height events. // This is the public interface used by the DAFollower for inline subscription processing. -// -// NOT thread-safe: the caller (DAFollower) must ensure exclusive access via CAS -// on localDAHeight before calling this method. func (r *daRetriever) ProcessBlobs(ctx context.Context, blobs [][]byte, daHeight uint64) []common.DAHeightEvent { return r.processBlobs(ctx, blobs, daHeight) } // processBlobs processes retrieved blobs to extract headers and data and returns height events func (r *daRetriever) processBlobs(ctx context.Context, blobs [][]byte, daHeight uint64) []common.DAHeightEvent { + r.mu.Lock() + defer r.mu.Unlock() + // Decode all blobs for _, bz := range blobs { if len(bz) == 0 {