@@ -63,7 +63,12 @@ BucketSnapshotBase<BucketT>::getEntryAtOffset(LedgerKey const& k,
63
63
}
64
64
else if (stream.readPage (be, k, pageSize))
65
65
{
66
- return {std::make_shared<typename BucketT::EntryT>(be), false };
66
+ auto ret = std::make_shared<typename BucketT::EntryT>(be);
67
+ if constexpr (std::is_same_v<BucketT, LiveBucket>)
68
+ {
69
+ mBucket ->getIndex ().addToCache (ret);
70
+ }
71
+ return {ret, false };
67
72
}
68
73
69
74
// Mark entry miss for metrics
@@ -81,6 +86,15 @@ BucketSnapshotBase<BucketT>::getBucketEntry(LedgerKey const& k) const
81
86
return {nullptr , false };
82
87
}
83
88
89
+ if constexpr (std::is_same_v<BucketT, LiveBucket>)
90
+ {
91
+ auto [entryOp, hit] = mBucket ->getIndex ().getFromCache (k);
92
+ if (hit)
93
+ {
94
+ return {entryOp, false };
95
+ }
96
+ }
97
+
84
98
auto pos = mBucket ->getIndex ().lookup (k);
85
99
if (pos.has_value ())
86
100
{
@@ -111,53 +125,67 @@ BucketSnapshotBase<BucketT>::loadKeys(
111
125
auto currKeyIt = keys.begin ();
112
126
auto const & index = mBucket ->getIndex ();
113
127
auto indexIter = index .begin ();
114
- while (currKeyIt != keys.end () && indexIter != index .end ())
128
+
129
+ while (currKeyIt != keys.end () &&
130
+ (indexIter != index .end () || index .isFullyCached ()))
115
131
{
116
- auto [offOp, newIndexIter] = index .scan (indexIter, *currKeyIt);
117
- indexIter = newIndexIter;
118
- if (offOp)
132
+ std::shared_ptr<typename BucketT::EntryT> entryOp{};
133
+ bool cacheHit = false ;
134
+ if constexpr (std::is_same_v<BucketT, LiveBucket>)
135
+ {
136
+ std::tie (entryOp, cacheHit) = index .getFromCache (*currKeyIt);
137
+ }
138
+
139
+ if (!cacheHit)
119
140
{
120
- auto [entryOp, bloomMiss] = getEntryAtOffset (
141
+ std::optional<std::streamoff> offOp{};
142
+ auto bloomMiss = false ;
143
+ std::tie (offOp, indexIter) = index .scan (indexIter, *currKeyIt);
144
+ if (!offOp)
145
+ {
146
+ ++currKeyIt;
147
+ continue ;
148
+ }
149
+
150
+ std::tie (entryOp, bloomMiss) = getEntryAtOffset (
121
151
*currKeyIt, *offOp, mBucket ->getIndex ().getPageSize ());
152
+ }
122
153
123
- if (entryOp)
154
+ if (entryOp)
155
+ {
156
+ // Don't return tombstone entries, as these do not exist wrt
157
+ // ledger state
158
+ if (!BucketT::isTombstoneEntry (*entryOp))
124
159
{
125
- // Don't return tombstone entries, as these do not exist wrt
126
- // ledger state
127
- if (!BucketT::isTombstoneEntry (*entryOp))
160
+ // Only live bucket loads can be metered
161
+ if constexpr (std::is_same_v<BucketT, LiveBucket>)
128
162
{
129
- // Only live bucket loads can be metered
130
- if constexpr (std::is_same_v<BucketT, LiveBucket> )
163
+ bool addEntry = true ;
164
+ if (lkMeter )
131
165
{
132
- bool addEntry = true ;
133
- if (lkMeter)
134
- {
135
- // Here, we are metering after the entry has been
136
- // loaded. This is because we need to know the size
137
- // of the entry to meter it. Future work will add
138
- // metering at the xdr level.
139
- auto entrySize =
140
- xdr::xdr_size (entryOp->liveEntry ());
141
- addEntry = lkMeter->canLoad (*currKeyIt, entrySize);
142
- lkMeter->updateReadQuotasForKey (*currKeyIt,
143
- entrySize);
144
- }
145
- if (addEntry)
146
- {
147
- result.push_back (entryOp->liveEntry ());
148
- }
166
+ // Here, we are metering after the entry has been
167
+ // loaded. This is because we need to know the size
168
+ // of the entry to meter it. Future work will add
169
+ // metering at the xdr level.
170
+ auto entrySize = xdr::xdr_size (entryOp->liveEntry ());
171
+ addEntry = lkMeter->canLoad (*currKeyIt, entrySize);
172
+ lkMeter->updateReadQuotasForKey (*currKeyIt, entrySize);
149
173
}
150
- else
174
+ if (addEntry)
151
175
{
152
- static_assert (std::is_same_v<BucketT, HotArchiveBucket>,
153
- " unexpected bucket type" );
154
- result.push_back (*entryOp);
176
+ result.push_back (entryOp->liveEntry ());
155
177
}
156
178
}
157
-
158
- currKeyIt = keys.erase (currKeyIt);
159
- continue ;
179
+ else
180
+ {
181
+ static_assert (std::is_same_v<BucketT, HotArchiveBucket>,
182
+ " unexpected bucket type" );
183
+ result.push_back (*entryOp);
184
+ }
160
185
}
186
+
187
+ currKeyIt = keys.erase (currKeyIt);
188
+ continue ;
161
189
}
162
190
163
191
++currKeyIt;
0 commit comments