|
5 | 5 | %linkable_extern_defs%
|
6 | 6 |
|
7 | 7 | %explicit_pmd_links%
|
| 8 | + |
| 9 | +// Following code block is copied from `drivers/mempool/ring`. |
| 10 | +// TODO Automate this process. |
| 11 | + |
| 12 | +/* SPDX-License-Identifier: BSD-3-Clause |
| 13 | + * Copyright(c) 2010-2016 Intel Corporation |
| 14 | + */ |
| 15 | + |
| 16 | +#include <stdio.h> |
| 17 | +#include <string.h> |
| 18 | + |
| 19 | +#include <rte_errno.h> |
| 20 | +#include <rte_ring.h> |
| 21 | +#include <rte_mempool.h> |
| 22 | + |
| 23 | +static int |
| 24 | +common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, |
| 25 | + unsigned n) |
| 26 | +{ |
| 27 | + return rte_ring_mp_enqueue_bulk(mp->pool_data, |
| 28 | + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; |
| 29 | +} |
| 30 | + |
| 31 | +static int |
| 32 | +common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table, |
| 33 | + unsigned n) |
| 34 | +{ |
| 35 | + return rte_ring_sp_enqueue_bulk(mp->pool_data, |
| 36 | + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; |
| 37 | +} |
| 38 | + |
| 39 | +static int |
| 40 | +common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) |
| 41 | +{ |
| 42 | + return rte_ring_mc_dequeue_bulk(mp->pool_data, |
| 43 | + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; |
| 44 | +} |
| 45 | + |
| 46 | +static int |
| 47 | +common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) |
| 48 | +{ |
| 49 | + return rte_ring_sc_dequeue_bulk(mp->pool_data, |
| 50 | + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; |
| 51 | +} |
| 52 | + |
| 53 | +static unsigned |
| 54 | +common_ring_get_count(const struct rte_mempool *mp) |
| 55 | +{ |
| 56 | + return rte_ring_count(mp->pool_data); |
| 57 | +} |
| 58 | + |
| 59 | + |
| 60 | +static int |
| 61 | +common_ring_alloc(struct rte_mempool *mp) |
| 62 | +{ |
| 63 | + int rg_flags = 0, ret; |
| 64 | + char rg_name[RTE_RING_NAMESIZE]; |
| 65 | + struct rte_ring *r; |
| 66 | + |
| 67 | + ret = snprintf(rg_name, sizeof(rg_name), |
| 68 | + RTE_MEMPOOL_MZ_FORMAT, mp->name); |
| 69 | + if (ret < 0 || ret >= (int)sizeof(rg_name)) { |
| 70 | + rte_errno = ENAMETOOLONG; |
| 71 | + return -rte_errno; |
| 72 | + } |
| 73 | + |
| 74 | + /* ring flags */ |
| 75 | + if (mp->flags & MEMPOOL_F_SP_PUT) |
| 76 | + rg_flags |= RING_F_SP_ENQ; |
| 77 | + if (mp->flags & MEMPOOL_F_SC_GET) |
| 78 | + rg_flags |= RING_F_SC_DEQ; |
| 79 | + |
| 80 | + /* |
| 81 | + * Allocate the ring that will be used to store objects. |
| 82 | + * Ring functions will return appropriate errors if we are |
| 83 | + * running as a secondary process etc., so no checks made |
| 84 | + * in this function for that condition. |
| 85 | + */ |
| 86 | + r = rte_ring_create(rg_name, rte_align32pow2(mp->size + 1), |
| 87 | + mp->socket_id, rg_flags); |
| 88 | + if (r == NULL) |
| 89 | + return -rte_errno; |
| 90 | + |
| 91 | + mp->pool_data = r; |
| 92 | + |
| 93 | + return 0; |
| 94 | +} |
| 95 | + |
| 96 | +static void |
| 97 | +common_ring_free(struct rte_mempool *mp) |
| 98 | +{ |
| 99 | + rte_ring_free(mp->pool_data); |
| 100 | +} |
| 101 | + |
| 102 | +/* |
| 103 | + * The following 4 declarations of mempool ops structs address |
| 104 | + * the need for the backward compatible mempool handlers for |
| 105 | + * single/multi producers and single/multi consumers as dictated by the |
| 106 | + * flags provided to the rte_mempool_create function |
| 107 | + */ |
| 108 | +static const struct rte_mempool_ops ops_mp_mc = { |
| 109 | + .name = "ring_mp_mc", |
| 110 | + .alloc = common_ring_alloc, |
| 111 | + .free = common_ring_free, |
| 112 | + .enqueue = common_ring_mp_enqueue, |
| 113 | + .dequeue = common_ring_mc_dequeue, |
| 114 | + .get_count = common_ring_get_count, |
| 115 | +}; |
| 116 | + |
| 117 | +static const struct rte_mempool_ops ops_sp_sc = { |
| 118 | + .name = "ring_sp_sc", |
| 119 | + .alloc = common_ring_alloc, |
| 120 | + .free = common_ring_free, |
| 121 | + .enqueue = common_ring_sp_enqueue, |
| 122 | + .dequeue = common_ring_sc_dequeue, |
| 123 | + .get_count = common_ring_get_count, |
| 124 | +}; |
| 125 | + |
| 126 | +static const struct rte_mempool_ops ops_mp_sc = { |
| 127 | + .name = "ring_mp_sc", |
| 128 | + .alloc = common_ring_alloc, |
| 129 | + .free = common_ring_free, |
| 130 | + .enqueue = common_ring_mp_enqueue, |
| 131 | + .dequeue = common_ring_sc_dequeue, |
| 132 | + .get_count = common_ring_get_count, |
| 133 | +}; |
| 134 | + |
| 135 | +static const struct rte_mempool_ops ops_sp_mc = { |
| 136 | + .name = "ring_sp_mc", |
| 137 | + .alloc = common_ring_alloc, |
| 138 | + .free = common_ring_free, |
| 139 | + .enqueue = common_ring_sp_enqueue, |
| 140 | + .dequeue = common_ring_mc_dequeue, |
| 141 | + .get_count = common_ring_get_count, |
| 142 | +}; |
| 143 | + |
| 144 | +MEMPOOL_REGISTER_OPS(ops_mp_mc); |
| 145 | +MEMPOOL_REGISTER_OPS(ops_sp_sc); |
| 146 | +MEMPOOL_REGISTER_OPS(ops_mp_sc); |
| 147 | +MEMPOOL_REGISTER_OPS(ops_sp_mc); |
0 commit comments