Skip to content

Commit dadc477

Browse files
Objcache: add locking option and drain functionality
This change introduces a new heap sub-type, called caching_heap, that exports an additional callback to drain memory that has been cached. The objcache heap is now a caching_heap. In addition, the objcache heap now implements locking, which allows safe use of the drain functionality in concurrently accessed heaps without the need for an external lock. Memory cleaner instances have been added in various places where an objcache is used, so that caching heaps can be drained when the system is low on memory. Partially addresses #1494.
1 parent 63ba81d commit dadc477

File tree

17 files changed

+222
-105
lines changed

17 files changed

+222
-105
lines changed

src/drivers/acpi.c

+4-3
Original file line numberDiff line numberDiff line change
@@ -491,8 +491,8 @@ ACPI_STATUS AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES *init_val, ACPI
491491
ACPI_STATUS AcpiOsCreateCache(char *cache_name, UINT16 object_size, UINT16 max_depth,
492492
ACPI_CACHE_T **return_cache)
493493
{
494-
heap h = allocate_objcache(acpi_heap, (heap)heap_linear_backed(get_kernel_heaps()), object_size,
495-
PAGESIZE);
494+
caching_heap h = allocate_objcache(acpi_heap, (heap)heap_linear_backed(get_kernel_heaps()),
495+
object_size, PAGESIZE, false);
496496
if (h == INVALID_ADDRESS)
497497
return AE_NO_MEMORY;
498498
*return_cache = (ACPI_CACHE_T *)h;
@@ -515,7 +515,8 @@ ACPI_STATUS AcpiOsReleaseObject(ACPI_CACHE_T *cache, void *object)
515515

516516
ACPI_STATUS AcpiOsPurgeCache(ACPI_CACHE_T *cache)
517517
{
518-
/* not implemented */
518+
caching_heap ch = (caching_heap)cache;
519+
cache_drain(ch, CACHE_DRAIN_ALL);
519520
return AE_OK;
520521
}
521522

src/hyperv/netvsc/hv_net_vsc.h

+4-2
Original file line numberDiff line numberDiff line change
@@ -958,6 +958,8 @@ typedef struct netvsc_packet_ {
958958
/*
959959
* Device-specific softc structure
960960
*/
961+
declare_closure_struct(0, 1, u64, hn_mem_cleaner,
962+
u64, clean_bytes);
961963
typedef struct hn_softc {
962964
heap general;
963965
heap contiguous; /* physically */
@@ -968,8 +970,8 @@ typedef struct hn_softc {
968970
/* lwIP */
969971
struct netif *netif;
970972
u16 rxbuflen;
971-
heap rxbuffers;
972-
struct spinlock rx_buflock;
973+
caching_heap rxbuffers;
974+
closure_struct(hn_mem_cleaner, mem_cleaner);
973975
} hn_softc_t;
974976

975977

src/hyperv/netvsc/netvsc.c

+11-8
Original file line numberDiff line numberDiff line change
@@ -53,16 +53,13 @@ static void
5353
receive_buffer_release(struct pbuf *p)
5454
{
5555
xpbuf x = (void *)p;
56-
u64 flags = spin_lock_irq(&x->hn->rx_buflock);
57-
deallocate(x->hn->rxbuffers, x, x->hn->rxbuflen + sizeof(struct xpbuf));
58-
spin_unlock_irq(&x->hn->rx_buflock, flags);
56+
deallocate((heap)x->hn->rxbuffers, x, x->hn->rxbuflen + sizeof(struct xpbuf));
5957
}
6058

6159
static xpbuf
6260
receive_buffer_alloc(hn_softc_t *hn)
6361
{
64-
u64 flags = spin_lock_irq(&hn->rx_buflock);
65-
xpbuf x = allocate(hn->rxbuffers, sizeof(struct xpbuf) + hn->rxbuflen);
62+
xpbuf x = allocate((heap)hn->rxbuffers, sizeof(struct xpbuf) + hn->rxbuflen);
6663
assert(x != INVALID_ADDRESS);
6764
x->hn = hn;
6865
x->p.custom_free_function = receive_buffer_release;
@@ -73,7 +70,6 @@ receive_buffer_alloc(hn_softc_t *hn)
7370
&x->p,
7471
x+1,
7572
hn->rxbuflen);
76-
spin_unlock_irq(&hn->rx_buflock, flags);
7773
return x;
7874
}
7975

@@ -222,6 +218,13 @@ low_level_output(struct netif *netif, struct pbuf *p)
222218
return ERR_OK;
223219
}
224220

221+
define_closure_function(0, 1, u64, hn_mem_cleaner,
222+
u64, clean_bytes)
223+
{
224+
hn_softc_t *hn = struct_from_field(closure_self(), hn_softc_t *, mem_cleaner);
225+
return cache_drain(hn->rxbuffers, clean_bytes);
226+
}
227+
225228
static err_t
226229
vmxif_init(struct netif *netif)
227230
{
@@ -257,8 +260,7 @@ netvsc_attach(kernel_heaps kh, hv_device* device)
257260

258261
hn->rxbuflen = NETVSC_RX_MAXSEGSIZE;
259262
hn->rxbuffers = allocate_objcache(hn->general, hn->contiguous,
260-
hn->rxbuflen + sizeof(struct xpbuf), PAGESIZE_2M);
261-
spin_lock_init(&hn->rx_buflock);
263+
hn->rxbuflen + sizeof(struct xpbuf), PAGESIZE_2M, true);
262264

263265
struct netif *netif = allocate(h, sizeof(struct netif));
264266
assert(netif != INVALID_ADDRESS);
@@ -280,6 +282,7 @@ netvsc_attach(kernel_heaps kh, hv_device* device)
280282
ethernet_input);
281283
lwip_unlock();
282284

285+
mm_register_mem_cleaner(init_closure(&hn->mem_cleaner, hn_mem_cleaner));
283286
netvsc_debug("%s: hwaddr %02x:%02x:%02x:%02x:%02x:%02x", __func__,
284287
netif->hwaddr[0], netif->hwaddr[1], netif->hwaddr[2],
285288
netif->hwaddr[3], netif->hwaddr[4], netif->hwaddr[5]);

src/hyperv/storvsc/storvsc.c

+16-10
Original file line numberDiff line numberDiff line change
@@ -143,12 +143,14 @@ struct hv_storvsc_request {
143143
volatile boolean channel_wait_msg_flag;
144144
};
145145

146+
declare_closure_struct(0, 1, u64, storvsc_mem_cleaner,
147+
u64, clean_bytes);
146148
struct storvsc_softc {
147149
heap general;
148150
heap contiguous; /* physically */
149151

150-
heap hcb_objcache;
151-
struct spinlock mem_lock;
152+
caching_heap hcb_objcache;
153+
closure_struct(storvsc_mem_cleaner, mem_cleaner);
152154

153155
struct list hcb_queue;
154156
struct spinlock queue_lock;
@@ -643,19 +645,16 @@ static void storvsc_init_requests(struct storvsc_softc *sc)
643645

644646
static void storvsc_hcb_dealloc(struct storvsc_softc *sc, struct storvsc_hcb *hcb)
645647
{
646-
u64 flags = spin_lock_irq(&sc->mem_lock);
647648
if (hcb->alloc_len) {
648649
deallocate(sc->contiguous, hcb->data, pad(hcb->alloc_len, sc->contiguous->pagesize));
649650
}
650-
deallocate(sc->hcb_objcache, hcb, sizeof(struct storvsc_hcb));
651-
spin_unlock_irq(&sc->mem_lock, flags);
651+
deallocate((heap)sc->hcb_objcache, hcb, sizeof(struct storvsc_hcb));
652652
}
653653

654654
static struct storvsc_hcb *storvsc_hcb_alloc(struct storvsc_softc* sc, u16 target, u16 lun, u8 cmd)
655655
{
656656
int alloc_len = scsi_data_len(cmd);
657-
u64 flags = spin_lock_irq(&sc->mem_lock);
658-
struct storvsc_hcb *hcb = allocate_zero(sc->hcb_objcache, sizeof(struct storvsc_hcb));
657+
struct storvsc_hcb *hcb = allocate_zero((heap)sc->hcb_objcache, sizeof(struct storvsc_hcb));
659658
assert(hcb != INVALID_ADDRESS);
660659
if (alloc_len) {
661660
hcb->data = allocate(sc->contiguous, alloc_len);
@@ -665,7 +664,6 @@ static struct storvsc_hcb *storvsc_hcb_alloc(struct storvsc_softc* sc, u16 targe
665664
hcb->data = 0;
666665
hcb->alloc_len = 0;
667666
}
668-
spin_unlock_irq(&sc->mem_lock, flags);
669667
hcb->cdb[0] = cmd;
670668
return hcb;
671669
}
@@ -910,6 +908,14 @@ static void storvsc_report_luns(struct storvsc_softc *sc, u16 target)
910908
storvsc_action(sc, r, target, 0);
911909
}
912910

911+
define_closure_function(0, 1, u64, storvsc_mem_cleaner,
912+
u64, clean_bytes)
913+
{
914+
struct storvsc_softc *sc = struct_from_field(closure_self(), struct storvsc_softc *,
915+
mem_cleaner);
916+
return cache_drain(sc->hcb_objcache, clean_bytes);
917+
}
918+
913919
/**
914920
* @brief StorVSC attach function
915921
*
@@ -943,8 +949,8 @@ static status storvsc_attach(kernel_heaps kh, hv_device* device, storage_attach
943949
spin_lock_init(&sc->queue_lock);
944950
// setup hcb cache
945951
sc->hcb_objcache = allocate_objcache(sc->general, sc->contiguous,
946-
sizeof(struct storvsc_hcb), PAGESIZE_2M);
947-
spin_lock_init(&sc->mem_lock);
952+
sizeof(struct storvsc_hcb), PAGESIZE_2M, true);
953+
mm_register_mem_cleaner(init_closure(&sc->mem_cleaner, storvsc_mem_cleaner));
948954
sc->sa = a;
949955
sc->disks = allocate_vector(h, 1);
950956
spin_lock_init(&sc->disks_lock);

src/kernel/pagecache.c

+2-4
Original file line numberDiff line numberDiff line change
@@ -1536,10 +1536,8 @@ void init_pagecache(heap general, heap contiguous, heap physical, u64 pagesize)
15361536
assert(pc->zero_page != INVALID_ADDRESS);
15371537

15381538
#ifdef KERNEL
1539-
pc->completions =
1540-
locking_heap_wrapper(general, allocate_objcache(general, contiguous,
1541-
sizeof(struct page_completion),
1542-
PAGESIZE));
1539+
pc->completions = (heap)allocate_objcache(general, contiguous, sizeof(struct page_completion),
1540+
PAGESIZE, true);
15431541
assert(pc->completions != INVALID_ADDRESS);
15441542
spin_lock_init(&pc->state_lock);
15451543
spin_lock_init(&pc->global_lock);

src/net/netsyscall.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -2391,8 +2391,8 @@ boolean netsyscall_init(unix_heaps uh, tuple cfg)
23912391
else
23922392
so_rcvbuf = DEFAULT_SO_RCVBUF;
23932393
kernel_heaps kh = (kernel_heaps)uh;
2394-
heap socket_cache = locking_heap_wrapper(heap_general(kh), allocate_objcache(heap_general(kh),
2395-
(heap)heap_linear_backed(kh), sizeof(struct netsock), PAGESIZE));
2394+
caching_heap socket_cache = allocate_objcache(heap_general(kh), (heap)heap_linear_backed(kh),
2395+
sizeof(struct netsock), PAGESIZE, true);
23962396
if (socket_cache == INVALID_ADDRESS)
23972397
return false;
23982398
uh->socket_cache = socket_cache;

src/runtime/heap/heap.h

+12-3
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,15 @@ typedef struct backed_heap {
1919
#define alloc_map(__bh, __l, __p) ((__bh)->alloc_map(__bh, __l, __p))
2020
#define dealloc_unmap(__bh, __v, __p, __l) ((__bh)->dealloc_unmap(__bh, __v, __p, __l))
2121

22+
typedef struct caching_heap {
23+
struct heap h;
24+
bytes (*drain)(struct caching_heap *ch, bytes len);
25+
} *caching_heap;
26+
27+
#define CACHE_DRAIN_ALL ((bytes)-1)
28+
29+
#define cache_drain(__ch, __l) ((__ch)->drain(__ch, __l))
30+
2231
heap debug_heap(heap m, heap p);
2332
heap mem_debug(heap m, heap p, u64 padsize);
2433
heap mem_debug_objcache(heap meta, heap parent, u64 objsize, u64 pagesize);
@@ -44,9 +53,9 @@ static inline value heap_management(heap h)
4453
}
4554

4655
heap wrap_freelist(heap meta, heap parent, bytes size);
47-
heap allocate_objcache(heap meta, heap parent, bytes objsize, bytes pagesize);
48-
heap allocate_wrapped_objcache(heap meta, heap parent, bytes objsize, bytes pagesize, heap wrapper);
49-
heap allocate_objcache_preallocated(heap meta, heap parent, bytes objsize, bytes pagesize, u64 prealloc_count, boolean prealloc_only);
56+
caching_heap allocate_objcache(heap meta, heap parent, bytes objsize, bytes pagesize, boolean locking);
57+
caching_heap allocate_wrapped_objcache(heap meta, heap parent, bytes objsize, bytes pagesize, heap wrapper);
58+
caching_heap allocate_objcache_preallocated(heap meta, heap parent, bytes objsize, bytes pagesize, u64 prealloc_count, boolean prealloc_only);
5059
boolean objcache_validate(heap h);
5160
heap objcache_from_object(u64 obj, bytes parent_pagesize);
5261
heap allocate_mcache(heap meta, heap parent, int min_order, int max_order, bytes pagesize);

src/runtime/heap/mcache.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -297,7 +297,7 @@ heap allocate_mcache(heap meta, heap parent, int min_order, int max_order, bytes
297297
#if defined(MEMDEBUG_MCACHE) || defined(MEMDEBUG_ALL)
298298
heap h = mem_debug_objcache(meta, parent, obj_size, pagesize);
299299
#else
300-
heap h = allocate_objcache(meta, parent, obj_size, pagesize);
300+
caching_heap h = allocate_objcache(meta, parent, obj_size, pagesize, false);
301301
#endif
302302
#ifdef MCACHE_DEBUG
303303
rputs(" - cache size ");

src/runtime/heap/mem_debug.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ heap mem_debug_objcache(heap meta, heap parent, u64 objsize, u64 pagesize)
161161
u64 padding = objsize >= PAGESIZE ? PAGESIZE : PAD_MIN;
162162

163163
newsize = objsize + padding * 2;
164-
mdh->parent = allocate_wrapped_objcache(meta, parent, newsize, pagesize, &mdh->h);
164+
mdh->parent = (heap)allocate_wrapped_objcache(meta, parent, newsize, pagesize, &mdh->h);
165165
mdh->h.pagesize = objsize;
166166
mdh->h.alloc = mem_debug_alloc;
167167
mdh->h.dealloc = mem_debug_dealloc;

0 commit comments

Comments
 (0)