diff options
Diffstat (limited to 'net/xdp/xsk_queue.h')
| -rw-r--r-- | net/xdp/xsk_queue.h | 247 |
1 files changed, 247 insertions, 0 deletions
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h new file mode 100644 index 000000000000..7aa9a535db0e --- /dev/null +++ b/net/xdp/xsk_queue.h | |||
| @@ -0,0 +1,247 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 | ||
| 2 | * XDP user-space ring structure | ||
| 3 | * Copyright(c) 2018 Intel Corporation. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms and conditions of the GNU General Public License, | ||
| 7 | * version 2, as published by the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | */ | ||
| 14 | |||
| 15 | #ifndef _LINUX_XSK_QUEUE_H | ||
| 16 | #define _LINUX_XSK_QUEUE_H | ||
| 17 | |||
| 18 | #include <linux/types.h> | ||
| 19 | #include <linux/if_xdp.h> | ||
| 20 | |||
| 21 | #include "xdp_umem_props.h" | ||
| 22 | |||
| 23 | #define RX_BATCH_SIZE 16 | ||
| 24 | |||
| 25 | struct xsk_queue { | ||
| 26 | struct xdp_umem_props umem_props; | ||
| 27 | u32 ring_mask; | ||
| 28 | u32 nentries; | ||
| 29 | u32 prod_head; | ||
| 30 | u32 prod_tail; | ||
| 31 | u32 cons_head; | ||
| 32 | u32 cons_tail; | ||
| 33 | struct xdp_ring *ring; | ||
| 34 | u64 invalid_descs; | ||
| 35 | }; | ||
| 36 | |||
| 37 | /* Common functions operating for both RXTX and umem queues */ | ||
| 38 | |||
| 39 | static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) | ||
| 40 | { | ||
| 41 | return q ? q->invalid_descs : 0; | ||
| 42 | } | ||
| 43 | |||
| 44 | static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt) | ||
| 45 | { | ||
| 46 | u32 entries = q->prod_tail - q->cons_tail; | ||
| 47 | |||
| 48 | if (entries == 0) { | ||
| 49 | /* Refresh the local pointer */ | ||
| 50 | q->prod_tail = READ_ONCE(q->ring->producer); | ||
| 51 | entries = q->prod_tail - q->cons_tail; | ||
| 52 | } | ||
| 53 | |||
| 54 | return (entries > dcnt) ? dcnt : entries; | ||
| 55 | } | ||
| 56 | |||
| 57 | static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt) | ||
| 58 | { | ||
| 59 | u32 free_entries = q->nentries - (producer - q->cons_tail); | ||
| 60 | |||
| 61 | if (free_entries >= dcnt) | ||
| 62 | return free_entries; | ||
| 63 | |||
| 64 | /* Refresh the local tail pointer */ | ||
| 65 | q->cons_tail = READ_ONCE(q->ring->consumer); | ||
| 66 | return q->nentries - (producer - q->cons_tail); | ||
| 67 | } | ||
| 68 | |||
| 69 | /* UMEM queue */ | ||
| 70 | |||
| 71 | static inline bool xskq_is_valid_id(struct xsk_queue *q, u32 idx) | ||
| 72 | { | ||
| 73 | if (unlikely(idx >= q->umem_props.nframes)) { | ||
| 74 | q->invalid_descs++; | ||
| 75 | return false; | ||
| 76 | } | ||
| 77 | return true; | ||
| 78 | } | ||
| 79 | |||
| 80 | static inline u32 *xskq_validate_id(struct xsk_queue *q) | ||
| 81 | { | ||
| 82 | while (q->cons_tail != q->cons_head) { | ||
| 83 | struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; | ||
| 84 | unsigned int idx = q->cons_tail & q->ring_mask; | ||
| 85 | |||
| 86 | if (xskq_is_valid_id(q, ring->desc[idx])) | ||
| 87 | return &ring->desc[idx]; | ||
| 88 | |||
| 89 | q->cons_tail++; | ||
| 90 | } | ||
| 91 | |||
| 92 | return NULL; | ||
| 93 | } | ||
| 94 | |||
| 95 | static inline u32 *xskq_peek_id(struct xsk_queue *q) | ||
| 96 | { | ||
| 97 | struct xdp_umem_ring *ring; | ||
| 98 | |||
| 99 | if (q->cons_tail == q->cons_head) { | ||
| 100 | WRITE_ONCE(q->ring->consumer, q->cons_tail); | ||
| 101 | q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE); | ||
| 102 | |||
| 103 | /* Order consumer and data */ | ||
| 104 | smp_rmb(); | ||
| 105 | |||
| 106 | return xskq_validate_id(q); | ||
| 107 | } | ||
| 108 | |||
| 109 | ring = (struct xdp_umem_ring *)q->ring; | ||
| 110 | return &ring->desc[q->cons_tail & q->ring_mask]; | ||
| 111 | } | ||
| 112 | |||
| 113 | static inline void xskq_discard_id(struct xsk_queue *q) | ||
| 114 | { | ||
| 115 | q->cons_tail++; | ||
| 116 | (void)xskq_validate_id(q); | ||
| 117 | } | ||
| 118 | |||
| 119 | static inline int xskq_produce_id(struct xsk_queue *q, u32 id) | ||
| 120 | { | ||
| 121 | struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; | ||
| 122 | |||
| 123 | ring->desc[q->prod_tail++ & q->ring_mask] = id; | ||
| 124 | |||
| 125 | /* Order producer and data */ | ||
| 126 | smp_wmb(); | ||
| 127 | |||
| 128 | WRITE_ONCE(q->ring->producer, q->prod_tail); | ||
| 129 | return 0; | ||
| 130 | } | ||
| 131 | |||
| 132 | static inline int xskq_reserve_id(struct xsk_queue *q) | ||
| 133 | { | ||
| 134 | if (xskq_nb_free(q, q->prod_head, 1) == 0) | ||
| 135 | return -ENOSPC; | ||
| 136 | |||
| 137 | q->prod_head++; | ||
| 138 | return 0; | ||
| 139 | } | ||
| 140 | |||
| 141 | /* Rx/Tx queue */ | ||
| 142 | |||
| 143 | static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d) | ||
| 144 | { | ||
| 145 | u32 buff_len; | ||
| 146 | |||
| 147 | if (unlikely(d->idx >= q->umem_props.nframes)) { | ||
| 148 | q->invalid_descs++; | ||
| 149 | return false; | ||
| 150 | } | ||
| 151 | |||
| 152 | buff_len = q->umem_props.frame_size; | ||
| 153 | if (unlikely(d->len > buff_len || d->len == 0 || | ||
| 154 | d->offset > buff_len || d->offset + d->len > buff_len)) { | ||
| 155 | q->invalid_descs++; | ||
| 156 | return false; | ||
| 157 | } | ||
| 158 | |||
| 159 | return true; | ||
| 160 | } | ||
| 161 | |||
| 162 | static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q, | ||
| 163 | struct xdp_desc *desc) | ||
| 164 | { | ||
| 165 | while (q->cons_tail != q->cons_head) { | ||
| 166 | struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; | ||
| 167 | unsigned int idx = q->cons_tail & q->ring_mask; | ||
| 168 | |||
| 169 | if (xskq_is_valid_desc(q, &ring->desc[idx])) { | ||
| 170 | if (desc) | ||
| 171 | *desc = ring->desc[idx]; | ||
| 172 | return desc; | ||
| 173 | } | ||
| 174 | |||
| 175 | q->cons_tail++; | ||
| 176 | } | ||
| 177 | |||
| 178 | return NULL; | ||
| 179 | } | ||
| 180 | |||
| 181 | static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q, | ||
| 182 | struct xdp_desc *desc) | ||
| 183 | { | ||
| 184 | struct xdp_rxtx_ring *ring; | ||
| 185 | |||
| 186 | if (q->cons_tail == q->cons_head) { | ||
| 187 | WRITE_ONCE(q->ring->consumer, q->cons_tail); | ||
| 188 | q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE); | ||
| 189 | |||
| 190 | /* Order consumer and data */ | ||
| 191 | smp_rmb(); | ||
| 192 | |||
| 193 | return xskq_validate_desc(q, desc); | ||
| 194 | } | ||
| 195 | |||
| 196 | ring = (struct xdp_rxtx_ring *)q->ring; | ||
| 197 | *desc = ring->desc[q->cons_tail & q->ring_mask]; | ||
| 198 | return desc; | ||
| 199 | } | ||
| 200 | |||
| 201 | static inline void xskq_discard_desc(struct xsk_queue *q) | ||
| 202 | { | ||
| 203 | q->cons_tail++; | ||
| 204 | (void)xskq_validate_desc(q, NULL); | ||
| 205 | } | ||
| 206 | |||
| 207 | static inline int xskq_produce_batch_desc(struct xsk_queue *q, | ||
| 208 | u32 id, u32 len, u16 offset) | ||
| 209 | { | ||
| 210 | struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; | ||
| 211 | unsigned int idx; | ||
| 212 | |||
| 213 | if (xskq_nb_free(q, q->prod_head, 1) == 0) | ||
| 214 | return -ENOSPC; | ||
| 215 | |||
| 216 | idx = (q->prod_head++) & q->ring_mask; | ||
| 217 | ring->desc[idx].idx = id; | ||
| 218 | ring->desc[idx].len = len; | ||
| 219 | ring->desc[idx].offset = offset; | ||
| 220 | |||
| 221 | return 0; | ||
| 222 | } | ||
| 223 | |||
| 224 | static inline void xskq_produce_flush_desc(struct xsk_queue *q) | ||
| 225 | { | ||
| 226 | /* Order producer and data */ | ||
| 227 | smp_wmb(); | ||
| 228 | |||
| 229 | q->prod_tail = q->prod_head, | ||
| 230 | WRITE_ONCE(q->ring->producer, q->prod_tail); | ||
| 231 | } | ||
| 232 | |||
| 233 | static inline bool xskq_full_desc(struct xsk_queue *q) | ||
| 234 | { | ||
| 235 | return (xskq_nb_avail(q, q->nentries) == q->nentries); | ||
| 236 | } | ||
| 237 | |||
| 238 | static inline bool xskq_empty_desc(struct xsk_queue *q) | ||
| 239 | { | ||
| 240 | return (xskq_nb_free(q, q->prod_tail, 1) == q->nentries); | ||
| 241 | } | ||
| 242 | |||
| 243 | void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props); | ||
| 244 | struct xsk_queue *xskq_create(u32 nentries, bool umem_queue); | ||
| 245 | void xskq_destroy(struct xsk_queue *q_ops); | ||
| 246 | |||
| 247 | #endif /* _LINUX_XSK_QUEUE_H */ | ||
