Skip to content

Commit 00d05e4

Browse files
committed
[chore] Unify persistentQueue fields into QueueMetadata (open-telemetry#13140)
Embed QueueMetadata directly into persistentQueue, replacing the scattered metadata fields. 1. Add QueueMetadata as a member of persistentQueue. 2. Delete redundant fields (queueSize, readIdx, writeIdx, …). Relates to open-telemetry#13126
1 parent 693ba1d commit 00d05e4

File tree

2 files changed

+23
-26
lines changed

2 files changed

+23
-26
lines changed

exporter/exporterhelper/internal/queuebatch/persistent_queue.go

Lines changed: 20 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -98,11 +98,8 @@ type persistentQueue[T any] struct {
9898
mu sync.Mutex
9999
hasMoreElements *sync.Cond
100100
hasMoreSpace *cond
101-
readIndex uint64
102-
writeIndex uint64
103-
currentlyDispatchedItems []uint64
104-
queueSize int64
105-
refClient int64
101+
metadata *persistentqueue.QueueMetadata
102+
refClient int64
106103
stopped bool
107104

108105
sizerTypeMismatch atomic.Bool
@@ -137,7 +134,7 @@ func (pq *persistentQueue[T]) Start(ctx context.Context, host component.Host) er
137134
func (pq *persistentQueue[T]) Size() int64 {
138135
pq.mu.Lock()
139136
defer pq.mu.Unlock()
140-
return pq.queueSize
137+
return pq.metadata.QueueSize
141138
}
142139

143140
func (pq *persistentQueue[T]) Capacity() int64 {
@@ -167,11 +164,11 @@ func (pq *persistentQueue[T]) initPersistentContiguousStorage(ctx context.Contex
167164

168165
err := pq.client.Batch(ctx, riOp, wiOp)
169166
if err == nil {
170-
pq.readIndex, err = bytesToItemIndex(riOp.Value)
167+
pq.metadata.ReadIndex, err = bytesToItemIndex(riOp.Value)
171168
}
172169

173170
if err == nil {
174-
pq.writeIndex, err = bytesToItemIndex(wiOp.Value)
171+
pq.metadata.WriteIndex, err = bytesToItemIndex(wiOp.Value)
175172
}
176173

177174
if err != nil {
@@ -180,11 +177,11 @@ func (pq *persistentQueue[T]) initPersistentContiguousStorage(ctx context.Contex
180177
} else {
181178
pq.logger.Error("Failed getting read/write index, starting with new ones", zap.Error(err))
182179
}
183-
pq.readIndex = 0
184-
pq.writeIndex = 0
180+
pq.metadata.ReadIndex = 0
181+
pq.metadata.WriteIndex = 0
185182
}
186183

187-
queueSize := pq.writeIndex - pq.readIndex
184+
queueSize := pq.metadata.WriteIndex - pq.metadata.ReadIndex
188185

189186
// If the queue is sized by the number of requests, no need to read the queue size from storage.
190187
if queueSize > 0 && !pq.isRequestSized {
@@ -359,7 +356,7 @@ func (pq *persistentQueue[T]) Offer(ctx context.Context, req T) error {
359356
// putInternal is the internal version that requires caller to hold the mutex lock.
360357
func (pq *persistentQueue[T]) putInternal(ctx context.Context, req T) error {
361358
reqSize := pq.set.sizer.Sizeof(req)
362-
for pq.queueSize+reqSize > pq.set.capacity {
359+
for pq.metadata.QueueSize+reqSize > pq.set.capacity {
363360
if !pq.set.blockOnOverflow {
364361
return ErrQueueIsFull
365362
}
@@ -418,11 +415,11 @@ func (pq *persistentQueue[T]) Read(ctx context.Context) (context.Context, T, Don
418415
}
419416

420417
// Read until either a successful retrieved element or no more elements in the storage.
421-
for pq.readIndex != pq.writeIndex {
418+
for pq.metadata.ReadIndex != pq.metadata.WriteIndex {
422419
index, req, consumed := pq.getNextItem(ctx)
423420
// Ensure the used size and the channel size are in sync.
424-
if pq.readIndex == pq.writeIndex {
425-
pq.queueSize = 0
421+
if pq.metadata.ReadIndex == pq.metadata.WriteIndex {
422+
pq.metadata.QueueSize = 0
426423
pq.hasMoreSpace.Signal()
427424
}
428425
if consumed {
@@ -442,7 +439,7 @@ func (pq *persistentQueue[T]) Read(ctx context.Context) (context.Context, T, Don
442439
// finished, the index should be called with onDone to clean up the storage. If no new item is available,
443440
// returns false.
444441
func (pq *persistentQueue[T]) getNextItem(ctx context.Context) (uint64, T, bool) {
445-
index := pq.readIndex
442+
index := pq.metadata.ReadIndex
446443
// Increase here, so even if errors happen below, it always iterates
447444
pq.readIndex++
448445
pq.currentlyDispatchedItems = append(pq.currentlyDispatchedItems, index)
@@ -496,12 +493,12 @@ func (pq *persistentQueue[T]) onDone(index uint64, elSize int64, consumeErr erro
496493
pq.mu.Unlock()
497494
}()
498495

499-
pq.queueSize -= elSize
496+
pq.metadata.QueueSize -= elSize
500497
// The size might be not in sync with the queue in case it's restored from the disk
501498
// because we don't flush the current queue size on the disk on every read/write.
502499
// In that case we need to make sure it doesn't go below 0.
503-
if pq.queueSize < 0 {
504-
pq.queueSize = 0
500+
if pq.metadata.QueueSize < 0 {
501+
pq.metadata.QueueSize = 0
505502
}
506503
pq.hasMoreSpace.Signal()
507504

@@ -593,11 +590,11 @@ func (pq *persistentQueue[T]) retrieveAndEnqueueNotDispatchedReqs(ctx context.Co
593590

594591
// itemDispatchingFinish removes the item from the list of currently dispatched items and deletes it from the persistent queue
595592
func (pq *persistentQueue[T]) itemDispatchingFinish(ctx context.Context, index uint64) error {
596-
lenCDI := len(pq.currentlyDispatchedItems)
593+
lenCDI := len(pq.metadata.CurrentlyDispatchedItems)
597594
for i := 0; i < lenCDI; i++ {
598-
if pq.currentlyDispatchedItems[i] == index {
599-
pq.currentlyDispatchedItems[i] = pq.currentlyDispatchedItems[lenCDI-1]
600-
pq.currentlyDispatchedItems = pq.currentlyDispatchedItems[:lenCDI-1]
595+
if pq.metadata.CurrentlyDispatchedItems[i] == index {
596+
pq.metadata.CurrentlyDispatchedItems[i] = pq.metadata.CurrentlyDispatchedItems[lenCDI-1]
597+
pq.metadata.CurrentlyDispatchedItems = pq.metadata.CurrentlyDispatchedItems[:lenCDI-1]
601598
break
602599
}
603600
}

exporter/exporterhelper/internal/queuebatch/persistent_queue_test.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -679,10 +679,10 @@ func TestPersistentQueue_CurrentlyProcessedItems(t *testing.T) {
679679
requireCurrentlyDispatchedItemsEqual(t, newPs, []uint64{})
680680
assert.Equal(t, int64(0), newPs.Size())
681681
// The writeIndex should be now set accordingly
682-
require.EqualValues(t, 6, newPs.writeIndex)
682+
require.EqualValues(t, 6, newPs.metadata.WriteIndex)
683683

684684
// There should be no items left in the storage
685-
for i := uint64(0); i < newPs.writeIndex; i++ {
685+
for i := uint64(0); i < newPs.metadata.WriteIndex; i++ {
686686
bb, err := newPs.client.Get(context.Background(), getItemKey(i))
687687
require.NoError(t, err)
688688
require.Nil(t, bb)
@@ -1264,5 +1264,5 @@ func TestPersistentQueue_SizerLegacyFormatMigration(t *testing.T) {
12641264
func requireCurrentlyDispatchedItemsEqual(t *testing.T, pq *persistentQueue[uint64], compare []uint64) {
12651265
pq.mu.Lock()
12661266
defer pq.mu.Unlock()
1267-
assert.ElementsMatch(t, compare, pq.currentlyDispatchedItems)
1267+
assert.ElementsMatch(t, compare, pq.metadata.CurrentlyDispatchedItems)
12681268
}

0 commit comments

Comments
 (0)