forked from OffchainLabs/nitro
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathaggregator.go
346 lines (300 loc) · 12.2 KB
/
aggregator.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
// Copyright 2021-2022, Offchain Labs, Inc.
// For license information, see https://github.com/nitro/blob/master/LICENSE
package das
import (
"bytes"
"context"
"errors"
"fmt"
"math/bits"
"sync/atomic"
"time"
flag "github.com/spf13/pflag"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/offchainlabs/nitro/arbstate/daprovider"
"github.com/offchainlabs/nitro/blsSignatures"
"github.com/offchainlabs/nitro/das/dastree"
"github.com/offchainlabs/nitro/solgen/go/bridgegen"
"github.com/offchainlabs/nitro/util/pretty"
)
const metricBase string = "arb/das/rpc/aggregator/store"
var (
// This metric shows 1 if there was any error posting to the backends, until
// there was a Store that had no backend failures.
anyErrorGauge = metrics.GetOrRegisterGauge(metricBase+"/error/gauge", nil)
// Other aggregator metrics are generated dynamically in the Store function.
)
type AggregatorConfig struct {
Enable bool `koanf:"enable"`
AssumedHonest int `koanf:"assumed-honest"`
Backends BackendConfigList `koanf:"backends"`
MaxStoreChunkBodySize int `koanf:"max-store-chunk-body-size"`
}
var DefaultAggregatorConfig = AggregatorConfig{
AssumedHonest: 0,
Backends: nil,
MaxStoreChunkBodySize: 512 * 1024,
}
var parsedBackendsConf BackendConfigList
func AggregatorConfigAddOptions(prefix string, f *flag.FlagSet) {
f.Bool(prefix+".enable", DefaultAggregatorConfig.Enable, "enable storage of sequencer batch data from a list of RPC endpoints; this should only be used by the batch poster and not in combination with other DAS storage types")
f.Int(prefix+".assumed-honest", DefaultAggregatorConfig.AssumedHonest, "Number of assumed honest backends (H). If there are N backends, K=N+1-H valid responses are required to consider an Store request to be successful.")
f.Var(&parsedBackendsConf, prefix+".backends", "JSON RPC backend configuration. This can be specified on the command line as a JSON array, eg: [{\"url\": \"...\", \"pubkey\": \"...\"},...], or as a JSON array in the config file.")
f.Int(prefix+".max-store-chunk-body-size", DefaultAggregatorConfig.MaxStoreChunkBodySize, "maximum HTTP POST body size to use for individual batch chunks, including JSON RPC overhead and an estimated overhead of 512B of headers")
}
type Aggregator struct {
config AggregatorConfig
services []ServiceDetails
requestTimeout time.Duration
// calculated fields
requiredServicesForStore int
maxAllowedServiceStoreFailures int
keysetHash [32]byte
keysetBytes []byte
}
type ServiceDetails struct {
service DataAvailabilityServiceWriter
pubKey blsSignatures.PublicKey
signersMask uint64
metricName string
}
func (s *ServiceDetails) String() string {
return fmt.Sprintf("ServiceDetails{service: %v, signersMask %d}", s.service, s.signersMask)
}
func NewServiceDetails(service DataAvailabilityServiceWriter, pubKey blsSignatures.PublicKey, signersMask uint64, metricName string) (*ServiceDetails, error) {
if bits.OnesCount64(signersMask) != 1 {
return nil, fmt.Errorf("tried to configure backend DAS %v with invalid signersMask %X", service, signersMask)
}
return &ServiceDetails{
service: service,
pubKey: pubKey,
signersMask: signersMask,
metricName: metricName,
}, nil
}
func NewAggregator(ctx context.Context, config DataAvailabilityConfig, services []ServiceDetails) (*Aggregator, error) {
if config.ParentChainNodeURL == "none" {
return NewAggregatorWithSeqInboxCaller(config, services, nil)
}
l1client, err := GetL1Client(ctx, config.ParentChainConnectionAttempts, config.ParentChainNodeURL)
if err != nil {
return nil, err
}
seqInboxAddress, err := OptionalAddressFromString(config.SequencerInboxAddress)
if err != nil {
return nil, err
}
if seqInboxAddress == nil {
return NewAggregatorWithSeqInboxCaller(config, services, nil)
}
return NewAggregatorWithL1Info(config, services, l1client, *seqInboxAddress)
}
func NewAggregatorWithL1Info(
config DataAvailabilityConfig,
services []ServiceDetails,
l1client *ethclient.Client,
seqInboxAddress common.Address,
) (*Aggregator, error) {
seqInboxCaller, err := bridgegen.NewSequencerInboxCaller(seqInboxAddress, l1client)
if err != nil {
return nil, err
}
return NewAggregatorWithSeqInboxCaller(config, services, seqInboxCaller)
}
func NewAggregatorWithSeqInboxCaller(
config DataAvailabilityConfig,
services []ServiceDetails,
seqInboxCaller *bridgegen.SequencerInboxCaller,
) (*Aggregator, error) {
// #nosec G115
keysetHash, keysetBytes, err := KeysetHashFromServices(services, uint64(config.RPCAggregator.AssumedHonest))
if err != nil {
return nil, err
}
return &Aggregator{
config: config.RPCAggregator,
services: services,
requestTimeout: config.RequestTimeout,
requiredServicesForStore: len(services) + 1 - config.RPCAggregator.AssumedHonest,
maxAllowedServiceStoreFailures: config.RPCAggregator.AssumedHonest - 1,
keysetHash: keysetHash,
keysetBytes: keysetBytes,
}, nil
}
type storeResponse struct {
details ServiceDetails
sig blsSignatures.Signature
err error
}
// Store calls Store on each backend DAS in parallel and collects responses.
// If there were at least K responses then it aggregates the signatures and
// signersMasks from each DAS together into the DataAvailabilityCertificate
// then Store returns immediately. If there were any backend Store subroutines
// that were still running when Aggregator.Store returns, they are allowed to
// continue running until the context is canceled (eg via TimeoutWrapper),
// with their results discarded.
//
// If Store gets enough errors that K successes is impossible, then it stops early
// and returns an error.
//
// If Store gets not enough successful responses by the time its context is canceled
// (eg via TimeoutWrapper) then it also returns an error.
func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64) (*daprovider.DataAvailabilityCertificate, error) {
// #nosec G115
log.Trace("das.Aggregator.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0))
allBackendsSucceeded := false
defer func() {
if allBackendsSucceeded {
anyErrorGauge.Update(0)
} else {
anyErrorGauge.Update(1)
}
}()
responses := make(chan storeResponse, len(a.services))
expectedHash := dastree.Hash(message)
for _, d := range a.services {
go func(ctx context.Context, d ServiceDetails) {
storeCtx, cancel := context.WithTimeout(ctx, a.requestTimeout)
var metricWithServiceName = metricBase + "/" + d.metricName
defer cancel()
incFailureMetric := func() {
metrics.GetOrRegisterCounter(metricWithServiceName+"/error/total", nil).Inc(1)
metrics.GetOrRegisterCounter(metricBase+"/error/all/total", nil).Inc(1)
}
cert, err := d.service.Store(storeCtx, message, timeout)
if err != nil {
incFailureMetric()
log.Warn("DAS Aggregator failed to store batch to backend", "backend", d.metricName, "err", err)
responses <- storeResponse{d, nil, err}
return
}
verified, err := blsSignatures.VerifySignature(
cert.Sig, cert.SerializeSignableFields(), d.pubKey,
)
if err != nil {
incFailureMetric()
log.Warn("DAS Aggregator couldn't parse backend's store response signature", "backend", d.metricName, "err", err)
responses <- storeResponse{d, nil, err}
return
}
if !verified {
incFailureMetric()
log.Warn("DAS Aggregator failed to verify backend's store response signature", "backend", d.metricName, "err", err)
responses <- storeResponse{d, nil, errors.New("signature verification failed")}
return
}
// SignersMask from backend DAS is ignored.
if cert.DataHash != expectedHash {
incFailureMetric()
log.Warn("DAS Aggregator got a store response with a data hash not matching the expected hash", "backend", d.metricName, "dataHash", cert.DataHash, "expectedHash", expectedHash, "err", err)
responses <- storeResponse{d, nil, errors.New("hash verification failed")}
return
}
if cert.Timeout != timeout {
incFailureMetric()
log.Warn("DAS Aggregator got a store response with any expiry time not matching the expected expiry time", "backend", d.metricName, "dataHash", cert.DataHash, "expectedHash", expectedHash, "err", err)
responses <- storeResponse{d, nil, fmt.Errorf("timeout was %d, expected %d", cert.Timeout, timeout)}
return
}
metrics.GetOrRegisterCounter(metricWithServiceName+"/success/total", nil).Inc(1)
metrics.GetOrRegisterCounter(metricBase+"/success/all/total", nil).Inc(1)
responses <- storeResponse{d, cert.Sig, nil}
}(ctx, d)
}
var aggCert daprovider.DataAvailabilityCertificate
type certDetails struct {
pubKeys []blsSignatures.PublicKey
sigs []blsSignatures.Signature
aggSignersMask uint64
err error
}
var storeFailures atomic.Int64
// Collect responses from backends.
certDetailsChan := make(chan certDetails)
go func() {
var pubKeys []blsSignatures.PublicKey
var sigs []blsSignatures.Signature
var aggSignersMask uint64
var successfullyStoredCount int
var returned bool
for i := 0; i < len(a.services); i++ {
select {
case <-ctx.Done():
break
case r := <-responses:
if r.err != nil {
_ = storeFailures.Add(1)
log.Warn("das.Aggregator: Error from backend", "backend", r.details.service, "signerMask", r.details.signersMask, "err", r.err)
} else {
pubKeys = append(pubKeys, r.details.pubKey)
sigs = append(sigs, r.sig)
aggSignersMask |= r.details.signersMask
successfullyStoredCount++
}
}
// As soon as enough responses are returned, pass the response to
// certDetailsChan, so the Store function can return, but also continue
// running until all responses are received (or the context is canceled)
// in order to produce accurate logs/metrics.
if !returned {
if successfullyStoredCount >= a.requiredServicesForStore {
cd := certDetails{}
cd.pubKeys = append(cd.pubKeys, pubKeys...)
cd.sigs = append(cd.sigs, sigs...)
cd.aggSignersMask = aggSignersMask
certDetailsChan <- cd
returned = true
if a.maxAllowedServiceStoreFailures > 0 && // Ignore the case where AssumedHonest = 1, probably a testnet
int(storeFailures.Load())+1 > a.maxAllowedServiceStoreFailures {
log.Error("das.Aggregator: storing the batch data succeeded to enough DAS commitee members to generate the Data Availability Cert, but if one more had failed then the cert would not have been able to be generated. Look for preceding logs with \"Error from backend\"")
}
} else if int(storeFailures.Load()) > a.maxAllowedServiceStoreFailures {
cd := certDetails{}
cd.err = fmt.Errorf("aggregator failed to store message to at least %d out of %d DASes (assuming %d are honest). %w", a.requiredServicesForStore, len(a.services), a.config.AssumedHonest, daprovider.ErrBatchToDasFailed)
certDetailsChan <- cd
returned = true
}
}
}
}()
cd := <-certDetailsChan
if cd.err != nil {
return nil, cd.err
}
aggCert.Sig = blsSignatures.AggregateSignatures(cd.sigs)
aggPubKey := blsSignatures.AggregatePublicKeys(cd.pubKeys)
aggCert.SignersMask = cd.aggSignersMask
aggCert.DataHash = expectedHash
aggCert.Timeout = timeout
aggCert.KeysetHash = a.keysetHash
aggCert.Version = 1
verified, err := blsSignatures.VerifySignature(aggCert.Sig, aggCert.SerializeSignableFields(), aggPubKey)
if err != nil {
//nolint:errorlint
return nil, fmt.Errorf("%s. %w", err.Error(), daprovider.ErrBatchToDasFailed)
}
if !verified {
return nil, fmt.Errorf("failed aggregate signature check. %w", daprovider.ErrBatchToDasFailed)
}
if storeFailures.Load() == 0 {
allBackendsSucceeded = true
}
return &aggCert, nil
}
func (a *Aggregator) String() string {
var b bytes.Buffer
b.WriteString("das.Aggregator{")
first := true
for _, d := range a.services {
if !first {
b.WriteString(",")
}
b.WriteString(fmt.Sprintf("signersMask(aggregator):%d,", d.signersMask))
b.WriteString(d.service.String())
}
b.WriteString("}")
return b.String()
}