1
1
package gtshred
2
2
3
3
import (
4
+ "context"
4
5
"fmt"
5
6
"sync"
6
7
"time"
@@ -16,22 +17,38 @@ const (
16
17
maxBlockSize = 128 * 1024 * 1024 // 128MB maximum block size (matches Solana)
17
18
)
18
19
20
+ // ShredGroupWithTimestamp is a ShredGroup with a timestamp for tracking when the group was created (when the first shred was received).
21
+ type ShredGroupWithTimestamp struct {
22
+ * ShredGroup
23
+ Timestamp time.Time
24
+ }
25
+
19
26
type Processor struct {
20
- groups map [string ]* ShredGroup
21
- mu sync.Mutex
22
- cb ProcessorCallback
23
- completedBlocks map [string ]time.Time
27
+ // cb is the callback to call when a block is fully reassembled
28
+ cb ProcessorCallback
29
+
30
+ // groups is a cache of shred groups currently being processed.
31
+ groups map [string ]* ShredGroupWithTimestamp
32
+ groupsMu sync.RWMutex
33
+
34
+ // completedBlocks is a cache of block hashes that have been fully reassembled and should no longer be processed.
35
+ completedBlocks map [string ]time.Time
36
+ completedBlocksMu sync.RWMutex
37
+
38
+ // cleanupInterval is the interval at which stale groups are cleaned up and completed blocks are removed
24
39
cleanupInterval time.Duration
25
40
}
26
41
42
+ // ProcessorCallback is the interface for processor callbacks.
27
43
type ProcessorCallback interface {
28
44
ProcessBlock (height uint64 , blockHash []byte , block []byte ) error
29
45
}
30
46
31
- func NewProcessor (cb ProcessorCallback , cleanupInterval time.Duration ) * Processor {
47
+ // NewProcessor creates a new Processor with the given callback and cleanup interval.
48
+ func NewProcessor (ctx context.Context , cb ProcessorCallback , cleanupInterval time.Duration ) * Processor {
32
49
p := & Processor {
33
50
cb : cb ,
34
- groups : make (map [string ]* ShredGroup ),
51
+ groups : make (map [string ]* ShredGroupWithTimestamp ),
35
52
completedBlocks : make (map [string ]time.Time ),
36
53
cleanupInterval : cleanupInterval ,
37
54
}
@@ -43,6 +60,8 @@ func NewProcessor(cb ProcessorCallback, cleanupInterval time.Duration) *Processo
43
60
44
61
for {
45
62
select {
63
+ case <- ctx .Done ():
64
+ return
46
65
case now := <- ticker .C :
47
66
p .cleanupStaleGroups (now )
48
67
}
@@ -58,28 +77,42 @@ func (p *Processor) CollectShred(shred *gturbine.Shred) error {
58
77
return fmt .Errorf ("nil shred" )
59
78
}
60
79
80
+ p .completedBlocksMu .RLock ()
61
81
// Skip shreds from already processed blocks
62
- if _ , completed := p .completedBlocks [string (shred .BlockHash )]; completed {
82
+ _ , completed := p .completedBlocks [string (shred .BlockHash )]
83
+ p .completedBlocksMu .RUnlock ()
84
+ if completed {
63
85
return nil
64
86
}
65
87
66
- p . mu . Lock ()
67
- defer p . mu . Unlock ()
88
+ // Take read lock on groups to check if group exists, and get it if it does.
89
+ p . groupsMu . RLock ()
68
90
group , ok := p .groups [shred .GroupID ]
91
+ p .groupsMu .RUnlock ()
92
+
69
93
if ! ok {
70
- group := & ShredGroup {
71
- DataShreds : make ([]* gturbine.Shred , shred .TotalDataShreds ),
72
- RecoveryShreds : make ([]* gturbine.Shred , shred .TotalRecoveryShreds ),
73
- TotalDataShreds : shred .TotalDataShreds ,
74
- TotalRecoveryShreds : shred .TotalRecoveryShreds ,
75
- GroupID : shred .GroupID ,
76
- BlockHash : shred .BlockHash ,
77
- Height : shred .Height ,
78
- OriginalSize : shred .FullDataSize ,
94
+ // If the group doesn't exist, create it and add the shred
95
+ group := & ShredGroupWithTimestamp {
96
+ ShredGroup : & ShredGroup {
97
+ DataShreds : make ([]* gturbine.Shred , shred .TotalDataShreds ),
98
+ RecoveryShreds : make ([]* gturbine.Shred , shred .TotalRecoveryShreds ),
99
+ TotalDataShreds : shred .TotalDataShreds ,
100
+ TotalRecoveryShreds : shred .TotalRecoveryShreds ,
101
+ GroupID : shred .GroupID ,
102
+ BlockHash : shred .BlockHash ,
103
+ Height : shred .Height ,
104
+ OriginalSize : shred .FullDataSize ,
105
+ },
106
+ Timestamp : time .Now (), // Record the time the group was created consumer side.
79
107
}
108
+
80
109
group .DataShreds [shred .Index ] = shred
81
110
111
+ // Take write lock to add the group
112
+ p .groupsMu .Lock ()
82
113
p .groups [shred .GroupID ] = group
114
+ p .groupsMu .Unlock ()
115
+
83
116
return nil
84
117
}
85
118
@@ -111,19 +144,51 @@ func (p *Processor) CollectShred(shred *gturbine.Shred) error {
111
144
}
112
145
113
146
func (p * Processor ) cleanupStaleGroups (now time.Time ) {
114
- p .mu .Lock ()
115
- defer p .mu .Unlock ()
147
+ var deleteHashes []string
116
148
149
+ p .completedBlocksMu .RLock ()
117
150
for hash , completedAt := range p .completedBlocks {
118
151
if now .Sub (completedAt ) > p .cleanupInterval {
152
+ deleteHashes = append (deleteHashes , hash )
153
+ }
154
+ }
155
+ p .completedBlocksMu .RUnlock ()
156
+
157
+ if len (deleteHashes ) != 0 {
158
+ // Take write lock once for all deletions
159
+ p .completedBlocksMu .Lock ()
160
+ for _ , hash := range deleteHashes {
119
161
delete (p .completedBlocks , hash )
120
- // Find and reset any groups with this block hash
121
- for id , group := range p .groups {
122
- if string (group .BlockHash ) == hash {
123
- group .Reset ()
124
- delete (p .groups , id )
125
- }
162
+ }
163
+ p .completedBlocksMu .Unlock ()
164
+ }
165
+
166
+ var deleteGroups []string
167
+
168
+ // Take read lock on groups to check for groups to delete (stale or duplicate blockhash)
169
+ p .groupsMu .RLock ()
170
+ for id , group := range p .groups {
171
+ for _ , hash := range deleteHashes {
172
+ // Check if group is associated with a completed block
173
+ if string (group .BlockHash ) == hash {
174
+ deleteGroups = append (deleteGroups , id )
126
175
}
127
176
}
177
+
178
+ // Check if group is stale
179
+ if now .Sub (group .Timestamp ) > p .cleanupInterval {
180
+ deleteGroups = append (deleteGroups , id )
181
+ }
182
+ }
183
+ p .groupsMu .RUnlock ()
184
+
185
+ if len (deleteGroups ) != 0 {
186
+ // Take write lock once for all deletions
187
+ p .groupsMu .Lock ()
188
+ for _ , id := range deleteGroups {
189
+ p .groups [id ].Reset () // TODO: is this necessary?
190
+ delete (p .groups , id )
191
+ }
192
+ p .groupsMu .Unlock ()
128
193
}
129
194
}
0 commit comments