Skip to content

Commit 414d0f4

Browse files
committedApr 15, 2024
io_uring/alloc_cache: switch to array based caching
Currently lists are being used to manage this, but best practice is usually to have these in an array instead as that it cheaper to manage. Outside of that detail, games are also played with KASAN as the list is inside the cached entry itself. Finally, all users of this need a struct io_cache_entry embedded in their struct, which is union'ized with something else in there that isn't used across the free -> realloc cycle. Get rid of all of that, and simply have it be an array. This will not change the memory used, as we're just trading an 8-byte member entry for the per-elem array size. This reduces the overhead of the recycled allocations, and it reduces the amount of code code needed to support recycling to about half of what it currently is. Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent e10677a commit 414d0f4

15 files changed

+93
-145
lines changed
 

‎include/linux/io_uring_types.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ struct io_ev_fd {
220220
};
221221

222222
struct io_alloc_cache {
223-
struct io_wq_work_node list;
223+
void **entries;
224224
unsigned int nr_cached;
225225
unsigned int max_cached;
226226
size_t elem_size;

‎io_uring/alloc_cache.h

+26-31
Original file line numberDiff line numberDiff line change
@@ -6,61 +6,56 @@
66
*/
77
#define IO_ALLOC_CACHE_MAX 128
88

9-
struct io_cache_entry {
10-
struct io_wq_work_node node;
11-
};
12-
139
static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
14-
struct io_cache_entry *entry)
10+
void *entry)
1511
{
1612
if (cache->nr_cached < cache->max_cached) {
17-
cache->nr_cached++;
18-
wq_stack_add_head(&entry->node, &cache->list);
19-
kasan_mempool_poison_object(entry);
13+
if (!kasan_mempool_poison_object(entry))
14+
return false;
15+
cache->entries[cache->nr_cached++] = entry;
2016
return true;
2117
}
2218
return false;
2319
}
2420

25-
static inline bool io_alloc_cache_empty(struct io_alloc_cache *cache)
26-
{
27-
return !cache->list.next;
28-
}
29-
30-
static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
21+
static inline void *io_alloc_cache_get(struct io_alloc_cache *cache)
3122
{
32-
if (cache->list.next) {
33-
struct io_cache_entry *entry;
23+
if (cache->nr_cached) {
24+
void *entry = cache->entries[--cache->nr_cached];
3425

35-
entry = container_of(cache->list.next, struct io_cache_entry, node);
3626
kasan_mempool_unpoison_object(entry, cache->elem_size);
37-
cache->list.next = cache->list.next->next;
38-
cache->nr_cached--;
3927
return entry;
4028
}
4129

4230
return NULL;
4331
}
4432

45-
static inline void io_alloc_cache_init(struct io_alloc_cache *cache,
33+
/* returns false if the cache was initialized properly */
34+
static inline bool io_alloc_cache_init(struct io_alloc_cache *cache,
4635
unsigned max_nr, size_t size)
4736
{
48-
cache->list.next = NULL;
49-
cache->nr_cached = 0;
50-
cache->max_cached = max_nr;
51-
cache->elem_size = size;
37+
cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL);
38+
if (cache->entries) {
39+
cache->nr_cached = 0;
40+
cache->max_cached = max_nr;
41+
cache->elem_size = size;
42+
return false;
43+
}
44+
return true;
5245
}
5346

5447
static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
55-
void (*free)(struct io_cache_entry *))
48+
void (*free)(const void *))
5649
{
57-
while (1) {
58-
struct io_cache_entry *entry = io_alloc_cache_get(cache);
50+
void *entry;
51+
52+
if (!cache->entries)
53+
return;
5954

60-
if (!entry)
61-
break;
55+
while ((entry = io_alloc_cache_get(cache)) != NULL)
6256
free(entry);
63-
}
64-
cache->nr_cached = 0;
57+
58+
kvfree(cache->entries);
59+
cache->entries = NULL;
6560
}
6661
#endif

‎io_uring/futex.c

+12-18
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010
#include "../kernel/futex/futex.h"
1111
#include "io_uring.h"
12-
#include "rsrc.h"
12+
#include "alloc_cache.h"
1313
#include "futex.h"
1414

1515
struct io_futex {
@@ -27,27 +27,21 @@ struct io_futex {
2727
};
2828

2929
struct io_futex_data {
30-
union {
31-
struct futex_q q;
32-
struct io_cache_entry cache;
33-
};
30+
struct futex_q q;
3431
struct io_kiocb *req;
3532
};
3633

37-
void io_futex_cache_init(struct io_ring_ctx *ctx)
38-
{
39-
io_alloc_cache_init(&ctx->futex_cache, IO_NODE_ALLOC_CACHE_MAX,
40-
sizeof(struct io_futex_data));
41-
}
34+
#define IO_FUTEX_ALLOC_CACHE_MAX 32
4235

43-
static void io_futex_cache_entry_free(struct io_cache_entry *entry)
36+
bool io_futex_cache_init(struct io_ring_ctx *ctx)
4437
{
45-
kfree(container_of(entry, struct io_futex_data, cache));
38+
return io_alloc_cache_init(&ctx->futex_cache, IO_FUTEX_ALLOC_CACHE_MAX,
39+
sizeof(struct io_futex_data));
4640
}
4741

4842
void io_futex_cache_free(struct io_ring_ctx *ctx)
4943
{
50-
io_alloc_cache_free(&ctx->futex_cache, io_futex_cache_entry_free);
44+
io_alloc_cache_free(&ctx->futex_cache, kfree);
5145
}
5246

5347
static void __io_futex_complete(struct io_kiocb *req, struct io_tw_state *ts)
@@ -63,7 +57,7 @@ static void io_futex_complete(struct io_kiocb *req, struct io_tw_state *ts)
6357
struct io_ring_ctx *ctx = req->ctx;
6458

6559
io_tw_lock(ctx, ts);
66-
if (!io_alloc_cache_put(&ctx->futex_cache, &ifd->cache))
60+
if (!io_alloc_cache_put(&ctx->futex_cache, ifd))
6761
kfree(ifd);
6862
__io_futex_complete(req, ts);
6963
}
@@ -259,11 +253,11 @@ static void io_futex_wake_fn(struct wake_q_head *wake_q, struct futex_q *q)
259253

260254
static struct io_futex_data *io_alloc_ifd(struct io_ring_ctx *ctx)
261255
{
262-
struct io_cache_entry *entry;
256+
struct io_futex_data *ifd;
263257

264-
entry = io_alloc_cache_get(&ctx->futex_cache);
265-
if (entry)
266-
return container_of(entry, struct io_futex_data, cache);
258+
ifd = io_alloc_cache_get(&ctx->futex_cache);
259+
if (ifd)
260+
return ifd;
267261

268262
return kmalloc(sizeof(struct io_futex_data), GFP_NOWAIT);
269263
}

‎io_uring/futex.h

+3-2
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ int io_futex_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
1313
unsigned int issue_flags);
1414
bool io_futex_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
1515
bool cancel_all);
16-
void io_futex_cache_init(struct io_ring_ctx *ctx);
16+
bool io_futex_cache_init(struct io_ring_ctx *ctx);
1717
void io_futex_cache_free(struct io_ring_ctx *ctx);
1818
#else
1919
static inline int io_futex_cancel(struct io_ring_ctx *ctx,
@@ -27,8 +27,9 @@ static inline bool io_futex_remove_all(struct io_ring_ctx *ctx,
2727
{
2828
return false;
2929
}
30-
static inline void io_futex_cache_init(struct io_ring_ctx *ctx)
30+
static inline bool io_futex_cache_init(struct io_ring_ctx *ctx)
3131
{
32+
return false;
3233
}
3334
static inline void io_futex_cache_free(struct io_ring_ctx *ctx)
3435
{

‎io_uring/io_uring.c

+19-15
Original file line numberDiff line numberDiff line change
@@ -276,6 +276,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
276276
{
277277
struct io_ring_ctx *ctx;
278278
int hash_bits;
279+
bool ret;
279280

280281
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
281282
if (!ctx)
@@ -305,17 +306,19 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
305306
INIT_LIST_HEAD(&ctx->cq_overflow_list);
306307
INIT_LIST_HEAD(&ctx->io_buffers_cache);
307308
INIT_HLIST_HEAD(&ctx->io_buf_list);
308-
io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX,
309+
ret = io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX,
309310
sizeof(struct io_rsrc_node));
310-
io_alloc_cache_init(&ctx->apoll_cache, IO_ALLOC_CACHE_MAX,
311+
ret |= io_alloc_cache_init(&ctx->apoll_cache, IO_ALLOC_CACHE_MAX,
311312
sizeof(struct async_poll));
312-
io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX,
313+
ret |= io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX,
313314
sizeof(struct io_async_msghdr));
314-
io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX,
315+
ret |= io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX,
315316
sizeof(struct io_async_rw));
316-
io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX,
317+
ret |= io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX,
317318
sizeof(struct uring_cache));
318-
io_futex_cache_init(ctx);
319+
ret |= io_futex_cache_init(ctx);
320+
if (ret)
321+
goto err;
319322
init_completion(&ctx->ref_comp);
320323
xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
321324
mutex_init(&ctx->uring_lock);
@@ -345,6 +348,12 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
345348

346349
return ctx;
347350
err:
351+
io_alloc_cache_free(&ctx->rsrc_node_cache, kfree);
352+
io_alloc_cache_free(&ctx->apoll_cache, kfree);
353+
io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
354+
io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
355+
io_alloc_cache_free(&ctx->uring_cache, kfree);
356+
io_futex_cache_free(ctx);
348357
kfree(ctx->cancel_table.hbs);
349358
kfree(ctx->cancel_table_locked.hbs);
350359
xa_destroy(&ctx->io_bl_xa);
@@ -1482,7 +1491,7 @@ static void io_free_batch_list(struct io_ring_ctx *ctx,
14821491

14831492
if (apoll->double_poll)
14841493
kfree(apoll->double_poll);
1485-
if (!io_alloc_cache_put(&ctx->apoll_cache, &apoll->cache))
1494+
if (!io_alloc_cache_put(&ctx->apoll_cache, apoll))
14861495
kfree(apoll);
14871496
req->flags &= ~REQ_F_POLLED;
14881497
}
@@ -2778,11 +2787,6 @@ static void io_req_caches_free(struct io_ring_ctx *ctx)
27782787
mutex_unlock(&ctx->uring_lock);
27792788
}
27802789

2781-
static void io_rsrc_node_cache_free(struct io_cache_entry *entry)
2782-
{
2783-
kfree(container_of(entry, struct io_rsrc_node, cache));
2784-
}
2785-
27862790
static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
27872791
{
27882792
io_sq_thread_finish(ctx);
@@ -2797,10 +2801,10 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
27972801
__io_sqe_files_unregister(ctx);
27982802
io_cqring_overflow_kill(ctx);
27992803
io_eventfd_unregister(ctx);
2800-
io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
2804+
io_alloc_cache_free(&ctx->apoll_cache, kfree);
28012805
io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
28022806
io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
2803-
io_alloc_cache_free(&ctx->uring_cache, io_uring_cache_free);
2807+
io_alloc_cache_free(&ctx->uring_cache, kfree);
28042808
io_futex_cache_free(ctx);
28052809
io_destroy_buffers(ctx);
28062810
mutex_unlock(&ctx->uring_lock);
@@ -2816,7 +2820,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
28162820
WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
28172821
WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
28182822

2819-
io_alloc_cache_free(&ctx->rsrc_node_cache, io_rsrc_node_cache_free);
2823+
io_alloc_cache_free(&ctx->rsrc_node_cache, kfree);
28202824
if (ctx->mm_account) {
28212825
mmdrop(ctx->mm_account);
28222826
ctx->mm_account = NULL;

‎io_uring/net.c

+5-8
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
137137

138138
/* Let normal cleanup path reap it if we fail adding to the cache */
139139
iov = hdr->free_iov;
140-
if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
140+
if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) {
141141
if (iov)
142142
kasan_mempool_poison_object(iov);
143143
req->async_data = NULL;
@@ -148,12 +148,10 @@ static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
148148
static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req)
149149
{
150150
struct io_ring_ctx *ctx = req->ctx;
151-
struct io_cache_entry *entry;
152151
struct io_async_msghdr *hdr;
153152

154-
entry = io_alloc_cache_get(&ctx->netmsg_cache);
155-
if (entry) {
156-
hdr = container_of(entry, struct io_async_msghdr, cache);
153+
hdr = io_alloc_cache_get(&ctx->netmsg_cache);
154+
if (hdr) {
157155
if (hdr->free_iov) {
158156
kasan_mempool_unpoison_object(hdr->free_iov,
159157
hdr->free_iov_nr * sizeof(struct iovec));
@@ -1490,11 +1488,10 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
14901488
return IOU_OK;
14911489
}
14921490

1493-
void io_netmsg_cache_free(struct io_cache_entry *entry)
1491+
void io_netmsg_cache_free(const void *entry)
14941492
{
1495-
struct io_async_msghdr *kmsg;
1493+
struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry;
14961494

1497-
kmsg = container_of(entry, struct io_async_msghdr, cache);
14981495
if (kmsg->free_iov) {
14991496
kasan_mempool_unpoison_object(kmsg->free_iov,
15001497
kmsg->free_iov_nr * sizeof(struct iovec));

‎io_uring/net.h

+5-13
Original file line numberDiff line numberDiff line change
@@ -3,23 +3,15 @@
33
#include <linux/net.h>
44
#include <linux/uio.h>
55

6-
#include "alloc_cache.h"
7-
86
struct io_async_msghdr {
97
#if defined(CONFIG_NET)
10-
union {
11-
struct iovec fast_iov;
12-
struct {
13-
struct io_cache_entry cache;
14-
/* entry size of ->free_iov, if valid */
15-
int free_iov_nr;
16-
};
17-
};
8+
struct iovec fast_iov;
189
/* points to an allocated iov, if NULL we use fast_iov instead */
1910
struct iovec *free_iov;
11+
int free_iov_nr;
12+
int namelen;
2013
__kernel_size_t controllen;
2114
__kernel_size_t payloadlen;
22-
int namelen;
2315
struct sockaddr __user *uaddr;
2416
struct msghdr msg;
2517
struct sockaddr_storage addr;
@@ -57,9 +49,9 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags);
5749
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
5850
void io_send_zc_cleanup(struct io_kiocb *req);
5951

60-
void io_netmsg_cache_free(struct io_cache_entry *entry);
52+
void io_netmsg_cache_free(const void *entry);
6153
#else
62-
static inline void io_netmsg_cache_free(struct io_cache_entry *entry)
54+
static inline void io_netmsg_cache_free(const void *entry)
6355
{
6456
}
6557
#endif

‎io_uring/poll.c

+3-9
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#include <uapi/linux/io_uring.h>
1515

1616
#include "io_uring.h"
17+
#include "alloc_cache.h"
1718
#include "refs.h"
1819
#include "napi.h"
1920
#include "opdef.h"
@@ -686,17 +687,15 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
686687
unsigned issue_flags)
687688
{
688689
struct io_ring_ctx *ctx = req->ctx;
689-
struct io_cache_entry *entry;
690690
struct async_poll *apoll;
691691

692692
if (req->flags & REQ_F_POLLED) {
693693
apoll = req->apoll;
694694
kfree(apoll->double_poll);
695695
} else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
696-
entry = io_alloc_cache_get(&ctx->apoll_cache);
697-
if (entry == NULL)
696+
apoll = io_alloc_cache_get(&ctx->apoll_cache);
697+
if (!apoll)
698698
goto alloc_apoll;
699-
apoll = container_of(entry, struct async_poll, cache);
700699
apoll->poll.retries = APOLL_MAX_RETRY;
701700
} else {
702701
alloc_apoll:
@@ -1055,8 +1054,3 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
10551054
io_req_set_res(req, ret, 0);
10561055
return IOU_OK;
10571056
}
1058-
1059-
void io_apoll_cache_free(struct io_cache_entry *entry)
1060-
{
1061-
kfree(container_of(entry, struct async_poll, cache));
1062-
}

‎io_uring/poll.h

+1-8
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,5 @@
11
// SPDX-License-Identifier: GPL-2.0
22

3-
#include "alloc_cache.h"
4-
53
enum {
64
IO_APOLL_OK,
75
IO_APOLL_ABORTED,
@@ -17,10 +15,7 @@ struct io_poll {
1715
};
1816

1917
struct async_poll {
20-
union {
21-
struct io_poll poll;
22-
struct io_cache_entry cache;
23-
};
18+
struct io_poll poll;
2419
struct io_poll *double_poll;
2520
};
2621

@@ -46,6 +41,4 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags);
4641
bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
4742
bool cancel_all);
4843

49-
void io_apoll_cache_free(struct io_cache_entry *entry);
50-
5144
void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts);

‎io_uring/rsrc.c

+4-6
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
#include <uapi/linux/io_uring.h>
1414

1515
#include "io_uring.h"
16+
#include "alloc_cache.h"
1617
#include "openclose.h"
1718
#include "rsrc.h"
1819

@@ -169,7 +170,7 @@ static void io_rsrc_put_work(struct io_rsrc_node *node)
169170

170171
void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
171172
{
172-
if (!io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache))
173+
if (!io_alloc_cache_put(&ctx->rsrc_node_cache, node))
173174
kfree(node);
174175
}
175176

@@ -197,12 +198,9 @@ void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
197198
struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
198199
{
199200
struct io_rsrc_node *ref_node;
200-
struct io_cache_entry *entry;
201201

202-
entry = io_alloc_cache_get(&ctx->rsrc_node_cache);
203-
if (entry) {
204-
ref_node = container_of(entry, struct io_rsrc_node, cache);
205-
} else {
202+
ref_node = io_alloc_cache_get(&ctx->rsrc_node_cache);
203+
if (!ref_node) {
206204
ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
207205
if (!ref_node)
208206
return NULL;

‎io_uring/rsrc.h

+1-6
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,6 @@
22
#ifndef IOU_RSRC_H
33
#define IOU_RSRC_H
44

5-
#include "alloc_cache.h"
6-
75
#define IO_NODE_ALLOC_CACHE_MAX 32
86

97
#define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3)
@@ -36,10 +34,7 @@ struct io_rsrc_data {
3634
};
3735

3836
struct io_rsrc_node {
39-
union {
40-
struct io_cache_entry cache;
41-
struct io_ring_ctx *ctx;
42-
};
37+
struct io_ring_ctx *ctx;
4338
int refs;
4439
bool empty;
4540
u16 type;

‎io_uring/rw.c

+6-8
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
#include "io_uring.h"
1919
#include "opdef.h"
2020
#include "kbuf.h"
21+
#include "alloc_cache.h"
2122
#include "rsrc.h"
2223
#include "poll.h"
2324
#include "rw.h"
@@ -154,7 +155,7 @@ static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags)
154155
return;
155156
}
156157
iov = rw->free_iovec;
157-
if (io_alloc_cache_put(&req->ctx->rw_cache, &rw->cache)) {
158+
if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) {
158159
if (iov)
159160
kasan_mempool_poison_object(iov);
160161
req->async_data = NULL;
@@ -200,12 +201,10 @@ static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags)
200201
static int io_rw_alloc_async(struct io_kiocb *req)
201202
{
202203
struct io_ring_ctx *ctx = req->ctx;
203-
struct io_cache_entry *entry;
204204
struct io_async_rw *rw;
205205

206-
entry = io_alloc_cache_get(&ctx->rw_cache);
207-
if (entry) {
208-
rw = container_of(entry, struct io_async_rw, cache);
206+
rw = io_alloc_cache_get(&ctx->rw_cache);
207+
if (rw) {
209208
if (rw->free_iovec) {
210209
kasan_mempool_unpoison_object(rw->free_iovec,
211210
rw->free_iov_nr * sizeof(struct iovec));
@@ -1168,11 +1167,10 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
11681167
return nr_events;
11691168
}
11701169

1171-
void io_rw_cache_free(struct io_cache_entry *entry)
1170+
void io_rw_cache_free(const void *entry)
11721171
{
1173-
struct io_async_rw *rw;
1172+
struct io_async_rw *rw = (struct io_async_rw *) entry;
11741173

1175-
rw = container_of(entry, struct io_async_rw, cache);
11761174
if (rw->free_iovec) {
11771175
kasan_mempool_unpoison_object(rw->free_iovec,
11781176
rw->free_iov_nr * sizeof(struct iovec));

‎io_uring/rw.h

+2-5
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,7 @@
33
#include <linux/pagemap.h>
44

55
struct io_async_rw {
6-
union {
7-
size_t bytes_done;
8-
struct io_cache_entry cache;
9-
};
6+
size_t bytes_done;
107
struct iov_iter iter;
118
struct iov_iter_state iter_state;
129
struct iovec fast_iov;
@@ -28,4 +25,4 @@ void io_rw_fail(struct io_kiocb *req);
2825
void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts);
2926
int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
3027
int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags);
31-
void io_rw_cache_free(struct io_cache_entry *entry);
28+
void io_rw_cache_free(const void *entry);

‎io_uring/uring_cmd.c

+4-10
Original file line numberDiff line numberDiff line change
@@ -11,18 +11,17 @@
1111
#include <asm/ioctls.h>
1212

1313
#include "io_uring.h"
14+
#include "alloc_cache.h"
1415
#include "rsrc.h"
1516
#include "uring_cmd.h"
1617

1718
static struct uring_cache *io_uring_async_get(struct io_kiocb *req)
1819
{
1920
struct io_ring_ctx *ctx = req->ctx;
20-
struct io_cache_entry *entry;
2121
struct uring_cache *cache;
2222

23-
entry = io_alloc_cache_get(&ctx->uring_cache);
24-
if (entry) {
25-
cache = container_of(entry, struct uring_cache, cache);
23+
cache = io_alloc_cache_get(&ctx->uring_cache);
24+
if (cache) {
2625
req->flags |= REQ_F_ASYNC_DATA;
2726
req->async_data = cache;
2827
return cache;
@@ -39,7 +38,7 @@ static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
3938

4039
if (issue_flags & IO_URING_F_UNLOCKED)
4140
return;
42-
if (io_alloc_cache_put(&req->ctx->uring_cache, &cache->cache)) {
41+
if (io_alloc_cache_put(&req->ctx->uring_cache, cache)) {
4342
ioucmd->sqe = NULL;
4443
req->async_data = NULL;
4544
req->flags &= ~REQ_F_ASYNC_DATA;
@@ -354,8 +353,3 @@ int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
354353
}
355354
EXPORT_SYMBOL_GPL(io_uring_cmd_sock);
356355
#endif
357-
358-
void io_uring_cache_free(struct io_cache_entry *entry)
359-
{
360-
kfree(container_of(entry, struct uring_cache, cache));
361-
}

‎io_uring/uring_cmd.h

+1-5
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,11 @@
11
// SPDX-License-Identifier: GPL-2.0
22

33
struct uring_cache {
4-
union {
5-
struct io_cache_entry cache;
6-
struct io_uring_sqe sqes[2];
7-
};
4+
struct io_uring_sqe sqes[2];
85
};
96

107
int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags);
118
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
12-
void io_uring_cache_free(struct io_cache_entry *entry);
139

1410
bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
1511
struct task_struct *task, bool cancel_all);

0 commit comments

Comments
 (0)
Please sign in to comment.