@@ -10,6 +10,7 @@ import (
10
10
"fmt"
11
11
"github.com/cockroachdb/pebble"
12
12
"github.com/elastic/apm-server/x-pack/apm-server/sampling/eventstorage"
13
+ "sync"
13
14
"sync/atomic"
14
15
"time"
15
16
@@ -90,6 +91,9 @@ type ReadWriter struct {
90
91
// This must not be used in write operations, as keys are expected to
91
92
// be unmodified until the end of a transaction.
92
93
readKeyBuf []byte
94
+
95
+ mu sync.Mutex
96
+ batch * pebble.Batch
93
97
}
94
98
95
99
// Close closes the writer. Any writes that have not been flushed may be lost.
@@ -143,7 +147,7 @@ func (rw *ReadWriter) IsTraceSampled(traceID string) (bool, error) {
143
147
// return false, err
144
148
//}
145
149
//return item.UserMeta() == entryMetaTraceSampled, nil
146
- return false , nil
150
+ return false , eventstorage . ErrNotFound
147
151
}
148
152
149
153
// WriteTraceEvent writes a trace event to storage.
@@ -161,64 +165,84 @@ func (rw *ReadWriter) WriteTraceEvent(traceID string, id string, event *modelpb.
161
165
buf .WriteByte (':' )
162
166
buf .WriteString (id )
163
167
key := buf .Bytes ()
164
- return rw .s .db .Set (key , data , pebble .NoSync )
165
- //rw.writeEntry(badger.NewEntry(key, data).WithMeta(entryMetaTraceEvent), opts)
168
+
169
+ //return rw.s.db.Set(key, data, pebble.NoSync)
170
+ return rw .writeEntry (key , data )
171
+ }
172
+
173
+ func (rw * ReadWriter ) writeEntry (key , data []byte ) error {
174
+ rw .mu .Lock ()
175
+ defer rw .mu .Unlock ()
176
+ if rw .batch == nil {
177
+ rw .batch = rw .s .db .NewIndexedBatch ()
178
+ }
179
+ if err := rw .batch .Set (key , data , pebble .NoSync ); err != nil {
180
+ return err
181
+ }
182
+
183
+ if rw .batch .Len () > 2000 {
184
+ err := rw .batch .Commit (pebble .Sync )
185
+ rw .batch .Close ()
186
+ rw .batch = nil
187
+ return err
188
+ }
189
+ return nil
190
+
191
+ //
192
+ //rw.pendingWrites++
193
+ //entrySize := estimateSize(e)
194
+ //// The badger database has an async size reconciliation, with a 1 minute
195
+ //// ticker that keeps the lsm and vlog sizes updated in an in-memory map.
196
+ //// It's OK to call call s.db.Size() on the hot path, since the memory
197
+ //// lookup is cheap.
198
+ //lsm, vlog := rw.s.db.Size()
199
+ //
200
+ //// there are multiple ReadWriters writing to the same storage so add
201
+ //// the entry size and consider the new value to avoid TOCTOU issues.
202
+ //pendingSize := rw.s.pendingSize.Add(entrySize)
203
+ //rw.pendingSize += entrySize
204
+ //
205
+ //if current := pendingSize + lsm + vlog; opts.StorageLimitInBytes != 0 && current >= opts.StorageLimitInBytes {
206
+ // // flush what we currently have and discard the current entry
207
+ // if err := rw.Flush(); err != nil {
208
+ // return err
209
+ // }
210
+ // return fmt.Errorf("%w (current: %d, limit: %d)", ErrLimitReached, current, opts.StorageLimitInBytes)
211
+ //}
212
+ //
213
+ //if rw.pendingWrites >= 200 {
214
+ // // Attempt to flush if there are 200 or more uncommitted writes.
215
+ // // This ensures calls to ReadTraceEvents are not slowed down;
216
+ // // ReadTraceEvents uses an iterator, which must sort all keys
217
+ // // of uncommitted writes.
218
+ // // The 200 value yielded a good balance between read and write speed:
219
+ // // https://github.com/elastic/apm-server/pull/8407#issuecomment-1162994643
220
+ // if err := rw.Flush(); err != nil {
221
+ // return err
222
+ // }
223
+ //
224
+ // // the current ReadWriter flushed the transaction and reset the pendingSize so add
225
+ // // the entrySize again.
226
+ // rw.pendingSize += entrySize
227
+ // rw.s.pendingSize.Add(entrySize)
228
+ //}
229
+ //
230
+ //err := rw.txn.SetEntry(e.WithTTL(opts.TTL))
231
+ //
232
+ //// If the transaction is already too big to accommodate the new entry, flush
233
+ //// the existing transaction and set the entry on a new one, otherwise,
234
+ //// returns early.
235
+ //if err != badger.ErrTxnTooBig {
236
+ // return err
237
+ //}
238
+ //if err := rw.Flush(); err != nil {
239
+ // return err
240
+ //}
241
+ //rw.pendingSize += entrySize
242
+ //rw.s.pendingSize.Add(entrySize)
243
+ //return rw.txn.SetEntry(e.WithTTL(opts.TTL))
166
244
}
167
245
168
- //func (rw *ReadWriter) writeEntry(e *badger.Entry, opts WriterOpts) error {
169
- // rw.pendingWrites++
170
- // entrySize := estimateSize(e)
171
- // // The badger database has an async size reconciliation, with a 1 minute
172
- // // ticker that keeps the lsm and vlog sizes updated in an in-memory map.
173
- // // It's OK to call call s.db.Size() on the hot path, since the memory
174
- // // lookup is cheap.
175
- // lsm, vlog := rw.s.db.Size()
176
- //
177
- // // there are multiple ReadWriters writing to the same storage so add
178
- // // the entry size and consider the new value to avoid TOCTOU issues.
179
- // pendingSize := rw.s.pendingSize.Add(entrySize)
180
- // rw.pendingSize += entrySize
181
- //
182
- // if current := pendingSize + lsm + vlog; opts.StorageLimitInBytes != 0 && current >= opts.StorageLimitInBytes {
183
- // // flush what we currently have and discard the current entry
184
- // if err := rw.Flush(); err != nil {
185
- // return err
186
- // }
187
- // return fmt.Errorf("%w (current: %d, limit: %d)", ErrLimitReached, current, opts.StorageLimitInBytes)
188
- // }
189
- //
190
- // if rw.pendingWrites >= 200 {
191
- // // Attempt to flush if there are 200 or more uncommitted writes.
192
- // // This ensures calls to ReadTraceEvents are not slowed down;
193
- // // ReadTraceEvents uses an iterator, which must sort all keys
194
- // // of uncommitted writes.
195
- // // The 200 value yielded a good balance between read and write speed:
196
- // // https://github.com/elastic/apm-server/pull/8407#issuecomment-1162994643
197
- // if err := rw.Flush(); err != nil {
198
- // return err
199
- // }
200
- //
201
- // // the current ReadWriter flushed the transaction and reset the pendingSize so add
202
- // // the entrySize again.
203
- // rw.pendingSize += entrySize
204
- // rw.s.pendingSize.Add(entrySize)
205
- // }
206
- //
207
- // err := rw.txn.SetEntry(e.WithTTL(opts.TTL))
208
- //
209
- // // If the transaction is already too big to accommodate the new entry, flush
210
- // // the existing transaction and set the entry on a new one, otherwise,
211
- // // returns early.
212
- // if err != badger.ErrTxnTooBig {
213
- // return err
214
- // }
215
- // if err := rw.Flush(); err != nil {
216
- // return err
217
- // }
218
- // rw.pendingSize += entrySize
219
- // rw.s.pendingSize.Add(entrySize)
220
- // return rw.txn.SetEntry(e.WithTTL(opts.TTL))
221
- //}
222
246
//
223
247
//func estimateSize(e *badger.Entry) int64 {
224
248
// // See badger WithValueThreshold option
@@ -261,7 +285,12 @@ func (rw *ReadWriter) DeleteTraceEvent(traceID, id string) error {
261
285
262
286
// ReadTraceEvents reads trace events with the given trace ID from storage into out.
263
287
func (rw * ReadWriter ) ReadTraceEvents (traceID string , out * modelpb.Batch ) error {
264
- iter , err := rw .s .db .NewIter (& pebble.IterOptions {
288
+ rw .mu .Lock ()
289
+ defer rw .mu .Unlock ()
290
+ if rw .batch == nil {
291
+ rw .batch = rw .s .db .NewIndexedBatch ()
292
+ }
293
+ iter , err := rw .batch .NewIter (& pebble.IterOptions {
265
294
LowerBound : append ([]byte (traceID ), ':' ),
266
295
UpperBound : append ([]byte (traceID ), ';' ),
267
296
})
0 commit comments