@@ -30,18 +30,19 @@ def __init__(self, header=None):
30
30
class SyncTask (object ):
31
31
32
32
"""
33
- synchronizes a the chain starting from a given blockhash
34
- blockchain hash is fetched from a single peer (which led to the unknown blockhash)
35
- blocks are fetched from the best peers
36
-
37
- with missing block:
38
- fetch headers
39
- until known block
40
- for headers
41
- fetch block bodies
42
- for each block body
43
- construct block
44
- chainservice.add_blocks() # blocks if queue is full
33
+ Block header syncing
34
+ When syncing with the original peer, from the latest block head of the
35
+ chain, divide the missing blocks into N sections, each section is made of
36
+ 128 blockheader batches, each batch contains 192 headers, downloading a
37
+ skeleton of first header of each header batch from the original peer, for
38
+ each available idle peer, download header batch in parallel, for
39
+ each batch, match the first header and last header against respected
40
+ skeleton headers, verify header order and save the downloaded batch into a
41
+ header cache and deliver the partially downloaded headers to a queue for
42
+ body downloads scheduling in ascending order.
43
+ When header section downloading is complete, move the starting header
44
+ position to the start of next section, if the downloading is interrupted, restart downloading
45
+ from the head of best block of the current chain`
45
46
"""
46
47
initial_blockheaders_per_request = 32
47
48
max_blockheaders_per_request = 192
@@ -359,8 +360,17 @@ def receive_blockheaders(self, proto, blockheaders):
359
360
self .requests [proto ].set (blockheaders )
360
361
361
362
362
- class SyncBody (object ):
363
363
364
+ class SyncBody (object ):
365
+ """
366
+ Handles body syncing
367
+ For each available peer, fetch block bodies in parallel from the task queue
368
+ in batches (128), for each body fetch response, match it against headers in
369
+ the body fetch task queue, if it matches, put the downloaded body in a body
370
+ result cache, delete the corresponding task from task queue, import the
371
+ block bodies from block cache into the chain, remove the imported bodies
372
+ from body cache
373
+ """
364
374
max_blocks_per_request = 128
365
375
max_blocks_process = 2048
366
376
blocks_request_timeout = 19.
@@ -412,7 +422,12 @@ def run(self):
412
422
print (traceback .format_exc ())
413
423
self .exit (success = False )
414
424
415
-
425
+
426
+ #Body fetch scheduler
427
+ #Body fetch scheduler reads from downloaded header queue, dividing headers
428
+ #into batches(2048 or less), for each header batch adding the headers to the
429
+ #task queue, each queue item contains a task of 128 body fetches, activate
430
+ #body fetcher
416
431
def schedule_block_fetch (self ):
417
432
batch_header = []
418
433
log_st .debug ('start sheduleing blocks' )
@@ -650,7 +665,6 @@ def deliver_blocks(self, proto, bodies):
650
665
self .body_cache = self .body_cache [nimp :]+ [None for b in body_result ]
651
666
self .body_cache_offset += nimp
652
667
log_body_st .debug ('body cache offset' , offset = self .body_cache_offset )
653
- log_body_st .debug ('body cache' , bodycache = self .body_cache )
654
668
return result .block
655
669
656
670
0 commit comments