File tree Expand file tree Collapse file tree 2 files changed +18
-6
lines changed
Expand file tree Collapse file tree 2 files changed +18
-6
lines changed Original file line number Diff line number Diff line change @@ -285,8 +285,8 @@ fill_cache() {
285285 for count in $( seq $completed $(( batch_end - 1 )) ) ; do
286286 (
287287 # Create truly unique cache entries by making each query unique
288- # Use timestamp + count to ensure uniqueness even in parallel execution
289- local unique_id=" WorstCaseFill_${count} _$$ _$( date +%s%3N ) "
288+ # Use timestamp + count + random + PID to ensure uniqueness even in parallel execution
289+ local unique_id=" WorstCaseFill_${count} _${RANDOM} _ $$ _$( date +%s%N ) "
290290 local pattern=$(( count % 3 ))
291291
292292 # Create truly unique cache entries by varying query parameters
@@ -317,7 +317,8 @@ fill_cache() {
317317 echo " "
318318
319319 # Wait for all cache operations to complete and stabilize
320- sleep 2
320+ log_info " Waiting for cache to stabilize..."
321+ sleep 5
321322
322323 # Sanity check: Verify cache actually contains entries
323324 log_info " Sanity check - Verifying cache size after fill..."
@@ -1723,6 +1724,11 @@ main() {
17231724 echo " "
17241725 log_section " PHASE 3: Fill Cache with 1000 Entries (Worst Case - Non-Matching)"
17251726 echo " [INFO] Filling cache with entries that will NEVER match test queries (worst case)..."
1727+
1728+ # Clear cache and wait for system to stabilize after write operations
1729+ clear_cache
1730+ sleep 5
1731+
17261732 fill_cache $CACHE_FILL_SIZE
17271733
17281734 # ============================================================
Original file line number Diff line number Diff line change @@ -292,8 +292,8 @@ fill_cache() {
292292 for count in $( seq $completed $(( batch_end - 1 )) ) ; do
293293 (
294294 # Create truly unique cache entries by making each query unique
295- # Use timestamp + count to ensure uniqueness even in parallel execution
296- local unique_id=" CacheFill_${count} _$$ _$( date +%s%3N ) "
295+ # Use timestamp + count + random + PID to ensure uniqueness even in parallel execution
296+ local unique_id=" CacheFill_${count} _${RANDOM} _ $$ _$( date +%s%N ) "
297297 local pattern=$(( count % 3 ))
298298
299299 # First 3 requests create the cache entries we'll test for hits in Phase 4
@@ -343,7 +343,8 @@ fill_cache() {
343343 echo " "
344344
345345 # Wait for all cache operations to complete and stabilize
346- sleep 2
346+ log_info " Waiting for cache to stabilize..."
347+ sleep 5
347348
348349 # Sanity check: Verify cache actually contains entries
349350 log_info " Sanity check - Verifying cache size after fill..."
@@ -1812,6 +1813,11 @@ main() {
18121813 echo " "
18131814 log_section " PHASE 3: Fill Cache with 1000 Entries"
18141815 echo " [INFO] Filling cache to test performance at scale..."
1816+
1817+ # Clear cache and wait for system to stabilize after write operations
1818+ clear_cache
1819+ sleep 5
1820+
18151821 fill_cache $CACHE_FILL_SIZE
18161822
18171823 # ============================================================
You can’t perform that action at this time.
0 commit comments