|
17 | 17 | * knowledge of the underlying PAI implementation). |
18 | 18 | */ |
19 | 19 |
|
20 | | -/* |
21 | | - * For now, this is just one field; eventually, we'll probably want to get more |
22 | | - * fine-grained data out (like per-size class statistics). |
23 | | - */ |
| 20 | +typedef struct sec_bin_stats_s sec_bin_stats_t; |
| 21 | +struct sec_bin_stats_s { |
| 22 | + /* Number of alloc requests that did not find extent in this bin */ |
| 23 | + size_t nmisses; |
| 24 | + /* Number of successful alloc requests. */ |
| 25 | + size_t nhits; |
| 26 | + /* Number of dallocs causing the flush */ |
| 27 | + size_t ndalloc_flush; |
| 28 | + /* Number of dallocs not causing the flush */ |
| 29 | + size_t ndalloc_noflush; |
| 30 | +}; |
24 | 31 | typedef struct sec_stats_s sec_stats_t; |
25 | 32 | struct sec_stats_s { |
26 | 33 | /* Sum of bytes_cur across all shards. */ |
27 | 34 | size_t bytes; |
| 35 | + |
| 36 | + /* Totals of bin_stats. */ |
| 37 | + sec_bin_stats_t total; |
28 | 38 | }; |
29 | 39 |
|
| 40 | +static inline void |
| 41 | +sec_bin_stats_init(sec_bin_stats_t *stats) { |
| 42 | + stats->ndalloc_flush = 0; |
| 43 | + stats->nmisses = 0; |
| 44 | + stats->nhits = 0; |
| 45 | + stats->ndalloc_noflush = 0; |
| 46 | +} |
| 47 | + |
| 48 | +static inline void |
| 49 | +sec_bin_stats_accum(sec_bin_stats_t *dst, sec_bin_stats_t *src) { |
| 50 | + dst->nmisses += src->nmisses; |
| 51 | + dst->nhits += src->nhits; |
| 52 | + dst->ndalloc_flush += src->ndalloc_flush; |
| 53 | + dst->ndalloc_noflush += src->ndalloc_noflush; |
| 54 | +} |
| 55 | + |
30 | 56 | static inline void |
31 | 57 | sec_stats_accum(sec_stats_t *dst, sec_stats_t *src) { |
32 | 58 | dst->bytes += src->bytes; |
| 59 | + sec_bin_stats_accum(&dst->total, &src->total); |
33 | 60 | } |
34 | 61 |
|
35 | 62 | /* A collections of free extents, all of the same size. */ |
36 | 63 | typedef struct sec_bin_s sec_bin_t; |
37 | 64 | struct sec_bin_s { |
38 | 65 | /* |
39 | | - * When we fail to fulfill an allocation, we do a batch-alloc on the |
40 | | - * underlying allocator to fill extra items, as well. We drop the SEC |
41 | | - * lock while doing so, to allow operations on other bins to succeed. |
42 | | - * That introduces the possibility of other threads also trying to |
43 | | - * allocate out of this bin, failing, and also going to the backing |
44 | | - * allocator. To avoid a thundering herd problem in which lots of |
45 | | - * threads do batch allocs and overfill this bin as a result, we only |
46 | | - * allow one batch allocation at a time for a bin. This bool tracks |
47 | | - * whether or not some thread is already batch allocating. |
48 | | - * |
49 | | - * Eventually, the right answer may be a smarter sharding policy for the |
50 | | - * bins (e.g. a mutex per bin, which would also be more scalable |
51 | | - * generally; the batch-allocating thread could hold it while |
52 | | - * batch-allocating). |
| 66 | + * Protects the data members of the bin. |
53 | 67 | */ |
54 | | - bool being_batch_filled; |
| 68 | + malloc_mutex_t mtx; |
55 | 69 |
|
56 | 70 | /* |
57 | | - * Number of bytes in this particular bin (as opposed to the |
58 | | - * sec_shard_t's bytes_cur. This isn't user visible or reported in |
59 | | - * stats; rather, it allows us to quickly determine the change in the |
60 | | - * centralized counter when flushing. |
| 71 | + * Number of bytes in this particular bin. |
61 | 72 | */ |
62 | 73 | size_t bytes_cur; |
63 | 74 | edata_list_active_t freelist; |
| 75 | + sec_bin_stats_t stats; |
64 | 76 | }; |
65 | 77 |
|
66 | | -typedef struct sec_shard_s sec_shard_t; |
67 | | -struct sec_shard_s { |
68 | | - /* |
69 | | - * We don't keep per-bin mutexes, even though that would allow more |
70 | | - * sharding; this allows global cache-eviction, which in turn allows for |
71 | | - * better balancing across free lists. |
72 | | - */ |
73 | | - malloc_mutex_t mtx; |
74 | | - /* |
75 | | - * A SEC may need to be shut down (i.e. flushed of its contents and |
76 | | - * prevented from further caching). To avoid tricky synchronization |
77 | | - * issues, we just track enabled-status in each shard, guarded by a |
78 | | - * mutex. In practice, this is only ever checked during brief races, |
79 | | - * since the arena-level atomic boolean tracking HPA enabled-ness means |
80 | | - * that we won't go down these pathways very often after custom extent |
81 | | - * hooks are installed. |
82 | | - */ |
83 | | - bool enabled; |
| 78 | +typedef struct sec_s sec_t; |
| 79 | +struct sec_s { |
| 80 | + sec_opts_t opts; |
84 | 81 | sec_bin_t *bins; |
85 | | - /* Number of bytes in all bins in the shard. */ |
86 | | - size_t bytes_cur; |
87 | | - /* The next pszind to flush in the flush-some pathways. */ |
88 | | - pszind_t to_flush_next; |
| 82 | + pszind_t npsizes; |
89 | 83 | }; |
90 | 84 |
|
91 | | -typedef struct sec_s sec_t; |
92 | | -struct sec_s { |
93 | | - pai_t pai; |
94 | | - pai_t *fallback; |
| 85 | +static inline bool |
| 86 | +sec_is_used(sec_t *sec) { |
| 87 | + return sec->opts.nshards != 0; |
| 88 | +} |
95 | 89 |
|
96 | | - sec_opts_t opts; |
97 | | - sec_shard_t *shards; |
98 | | - pszind_t npsizes; |
99 | | -}; |
| 90 | +static inline bool |
| 91 | +sec_size_supported(sec_t *sec, size_t size) { |
| 92 | + return sec_is_used(sec) && size <= sec->opts.max_alloc; |
| 93 | +} |
| 94 | + |
| 95 | +/* If sec does not have extent available, it will return NULL. */ |
| 96 | +edata_t *sec_alloc(tsdn_t *tsdn, sec_t *sec, size_t size); |
| 97 | +void sec_fill(tsdn_t *tsdn, sec_t *sec, size_t size, |
| 98 | + edata_list_active_t *result, size_t nallocs); |
| 99 | + |
| 100 | +/* |
| 101 | + * Upon return dalloc_list may be empty if edata is consumed by sec or |
| 102 | + * non-empty if there are extents that need to be flushed from cache. |
| 103 | + */ |
| 104 | +void sec_dalloc(tsdn_t *tsdn, sec_t *sec, edata_list_active_t *dalloc_list); |
| 105 | + |
| 106 | +bool sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, const sec_opts_t *opts); |
100 | 107 |
|
101 | | -bool sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback, |
102 | | - const sec_opts_t *opts); |
103 | | -void sec_flush(tsdn_t *tsdn, sec_t *sec); |
104 | | -void sec_disable(tsdn_t *tsdn, sec_t *sec); |
| 108 | +/* Fills to_flush with extents that need to be deallocated */ |
| 109 | +void sec_flush(tsdn_t *tsdn, sec_t *sec, edata_list_active_t *to_flush); |
105 | 110 |
|
106 | 111 | /* |
107 | 112 | * Morally, these two stats methods probably ought to be a single one (and the |
|
0 commit comments