@@ -11,6 +11,7 @@ import { logger } from '@powersync/lib-services-framework';
1111import { Metrics } from '../metrics/Metrics.js' ;
1212import { mergeAsyncIterables } from './merge.js' ;
1313import { TokenStreamOptions , tokenStream } from './util.js' ;
14+ import { RequestTracker } from './RequestTracker.js' ;
1415
1516/**
1617 * Maximum number of connections actively fetching data.
@@ -28,12 +29,14 @@ export interface SyncStreamParameters {
2829 */
2930 signal ?: AbortSignal ;
3031 tokenStreamOptions ?: Partial < TokenStreamOptions > ;
32+
33+ tracker : RequestTracker ;
3134}
3235
3336export async function * streamResponse (
3437 options : SyncStreamParameters
3538) : AsyncIterable < util . StreamingSyncLine | string | null > {
36- const { storage, params, syncParams, token, tokenStreamOptions, signal } = options ;
39+ const { storage, params, syncParams, token, tokenStreamOptions, tracker , signal } = options ;
3740 // We also need to be able to abort, so we create our own controller.
3841 const controller = new AbortController ( ) ;
3942 if ( signal ) {
@@ -49,7 +52,7 @@ export async function* streamResponse(
4952 }
5053 }
5154 const ki = tokenStream ( token , controller . signal , tokenStreamOptions ) ;
52- const stream = streamResponseInner ( storage , params , syncParams , controller . signal ) ;
55+ const stream = streamResponseInner ( storage , params , syncParams , tracker , controller . signal ) ;
5356 // Merge the two streams, and abort as soon as one of the streams end.
5457 const merged = mergeAsyncIterables ( [ stream , ki ] , controller . signal ) ;
5558
@@ -72,6 +75,7 @@ async function* streamResponseInner(
7275 storage : storage . BucketStorageFactory ,
7376 params : util . StreamingSyncRequest ,
7477 syncParams : RequestParameters ,
78+ tracker : RequestTracker ,
7579 signal : AbortSignal
7680) : AsyncGenerator < util . StreamingSyncLine | string | null > {
7781 // Bucket state of bucket id -> op_id.
@@ -109,6 +113,11 @@ async function* streamResponseInner(
109113 } ) ;
110114
111115 if ( allBuckets . length > 1000 ) {
116+ logger . error ( `Too many buckets` , {
117+ checkpoint,
118+ user_id : syncParams . user_id ,
119+ buckets : allBuckets . length
120+ } ) ;
112121 // TODO: Limit number of buckets even before we get to this point
113122 throw new Error ( `Too many buckets: ${ allBuckets . length } ` ) ;
114123 }
@@ -137,11 +146,18 @@ async function* streamResponseInner(
137146 }
138147 bucketsToFetch = diff . updatedBuckets . map ( ( c ) => c . bucket ) ;
139148
140- let message = `Updated checkpoint: ${ checkpoint } | write: ${ writeCheckpoint } | ` ;
149+ let message = `Updated checkpoint: ${ checkpoint } | ` ;
150+ message += `write: ${ writeCheckpoint } | ` ;
141151 message += `buckets: ${ allBuckets . length } | ` ;
142152 message += `updated: ${ limitedBuckets ( diff . updatedBuckets , 20 ) } | ` ;
143- message += `removed: ${ limitedBuckets ( diff . removedBuckets , 20 ) } | ` ;
144- logger . info ( message ) ;
153+ message += `removed: ${ limitedBuckets ( diff . removedBuckets , 20 ) } ` ;
154+ logger . info ( message , {
155+ checkpoint,
156+ user_id : syncParams . user_id ,
157+ buckets : allBuckets . length ,
158+ updated : diff . updatedBuckets . length ,
159+ removed : diff . removedBuckets . length
160+ } ) ;
145161
146162 const checksum_line : util . StreamingSyncCheckpointDiff = {
147163 checkpoint_diff : {
@@ -156,7 +172,7 @@ async function* streamResponseInner(
156172 } else {
157173 let message = `New checkpoint: ${ checkpoint } | write: ${ writeCheckpoint } | ` ;
158174 message += `buckets: ${ allBuckets . length } ${ limitedBuckets ( allBuckets , 20 ) } ` ;
159- logger . info ( message ) ;
175+ logger . info ( message , { checkpoint , user_id : syncParams . user_id , buckets : allBuckets . length } ) ;
160176 bucketsToFetch = allBuckets ;
161177 const checksum_line : util . StreamingSyncCheckpoint = {
162178 checkpoint : {
@@ -172,7 +188,16 @@ async function* streamResponseInner(
172188
173189 // This incrementally updates dataBuckets with each individual bucket position.
174190 // At the end of this, we can be sure that all buckets have data up to the checkpoint.
175- yield * bucketDataInBatches ( { storage, checkpoint, bucketsToFetch, dataBuckets, raw_data, binary_data, signal } ) ;
191+ yield * bucketDataInBatches ( {
192+ storage,
193+ checkpoint,
194+ bucketsToFetch,
195+ dataBuckets,
196+ raw_data,
197+ binary_data,
198+ signal,
199+ tracker
200+ } ) ;
176201
177202 await new Promise ( ( resolve ) => setTimeout ( resolve , 10 ) ) ;
178203 }
@@ -186,6 +211,7 @@ interface BucketDataRequest {
186211 dataBuckets : Map < string , string > ;
187212 raw_data : boolean | undefined ;
188213 binary_data : boolean | undefined ;
214+ tracker : RequestTracker ;
189215 signal : AbortSignal ;
190216}
191217
@@ -221,11 +247,16 @@ async function* bucketDataInBatches(request: BucketDataRequest) {
221247 }
222248}
223249
250+ interface BucketDataBatchResult {
251+ done : boolean ;
252+ data : any ;
253+ }
254+
224255/**
225256 * Extracted as a separate internal function just to avoid memory leaks.
226257 */
227- async function * bucketDataBatch ( request : BucketDataRequest ) {
228- const { storage, checkpoint, bucketsToFetch, dataBuckets, raw_data, binary_data, signal } = request ;
258+ async function * bucketDataBatch ( request : BucketDataRequest ) : AsyncGenerator < BucketDataBatchResult , void > {
259+ const { storage, checkpoint, bucketsToFetch, dataBuckets, raw_data, binary_data, tracker , signal } = request ;
229260
230261 const [ _ , release ] = await syncSemaphore . acquire ( ) ;
231262 try {
@@ -272,7 +303,7 @@ async function* bucketDataBatch(request: BucketDataRequest) {
272303 // iterator memory in case if large data sent.
273304 yield { data : null , done : false } ;
274305 }
275- Metrics . getInstance ( ) . operations_synced_total . add ( r . data . length ) ;
306+ tracker . addOperationsSynced ( r . data . length ) ;
276307
277308 dataBuckets . set ( r . bucket , r . next_after ) ;
278309 }
0 commit comments