diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c')
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 801 |
1 files changed, 801 insertions, 0 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c new file mode 100644 index 000000000000..65c3e2c979d4 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | |||
| @@ -0,0 +1,801 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* Copyright(c) 2018 Intel Corporation. */ | ||
| 3 | |||
| 4 | #include <linux/bpf_trace.h> | ||
| 5 | #include <net/xdp_sock.h> | ||
| 6 | #include <net/xdp.h> | ||
| 7 | |||
| 8 | #include "ixgbe.h" | ||
| 9 | #include "ixgbe_txrx_common.h" | ||
| 10 | |||
| 11 | struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter, | ||
| 12 | struct ixgbe_ring *ring) | ||
| 13 | { | ||
| 14 | bool xdp_on = READ_ONCE(adapter->xdp_prog); | ||
| 15 | int qid = ring->ring_idx; | ||
| 16 | |||
| 17 | if (!adapter->xsk_umems || !adapter->xsk_umems[qid] || | ||
| 18 | qid >= adapter->num_xsk_umems || !xdp_on) | ||
| 19 | return NULL; | ||
| 20 | |||
| 21 | return adapter->xsk_umems[qid]; | ||
| 22 | } | ||
| 23 | |||
| 24 | static int ixgbe_alloc_xsk_umems(struct ixgbe_adapter *adapter) | ||
| 25 | { | ||
| 26 | if (adapter->xsk_umems) | ||
| 27 | return 0; | ||
| 28 | |||
| 29 | adapter->num_xsk_umems_used = 0; | ||
| 30 | adapter->num_xsk_umems = adapter->num_rx_queues; | ||
| 31 | adapter->xsk_umems = kcalloc(adapter->num_xsk_umems, | ||
| 32 | sizeof(*adapter->xsk_umems), | ||
| 33 | GFP_KERNEL); | ||
| 34 | if (!adapter->xsk_umems) { | ||
| 35 | adapter->num_xsk_umems = 0; | ||
| 36 | return -ENOMEM; | ||
| 37 | } | ||
| 38 | |||
| 39 | return 0; | ||
| 40 | } | ||
| 41 | |||
| 42 | static int ixgbe_add_xsk_umem(struct ixgbe_adapter *adapter, | ||
| 43 | struct xdp_umem *umem, | ||
| 44 | u16 qid) | ||
| 45 | { | ||
| 46 | int err; | ||
| 47 | |||
| 48 | err = ixgbe_alloc_xsk_umems(adapter); | ||
| 49 | if (err) | ||
| 50 | return err; | ||
| 51 | |||
| 52 | adapter->xsk_umems[qid] = umem; | ||
| 53 | adapter->num_xsk_umems_used++; | ||
| 54 | |||
| 55 | return 0; | ||
| 56 | } | ||
| 57 | |||
| 58 | static void ixgbe_remove_xsk_umem(struct ixgbe_adapter *adapter, u16 qid) | ||
| 59 | { | ||
| 60 | adapter->xsk_umems[qid] = NULL; | ||
| 61 | adapter->num_xsk_umems_used--; | ||
| 62 | |||
| 63 | if (adapter->num_xsk_umems == 0) { | ||
| 64 | kfree(adapter->xsk_umems); | ||
| 65 | adapter->xsk_umems = NULL; | ||
| 66 | adapter->num_xsk_umems = 0; | ||
| 67 | } | ||
| 68 | } | ||
| 69 | |||
| 70 | static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter, | ||
| 71 | struct xdp_umem *umem) | ||
| 72 | { | ||
| 73 | struct device *dev = &adapter->pdev->dev; | ||
| 74 | unsigned int i, j; | ||
| 75 | dma_addr_t dma; | ||
| 76 | |||
| 77 | for (i = 0; i < umem->npgs; i++) { | ||
| 78 | dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE, | ||
| 79 | DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR); | ||
| 80 | if (dma_mapping_error(dev, dma)) | ||
| 81 | goto out_unmap; | ||
| 82 | |||
| 83 | umem->pages[i].dma = dma; | ||
| 84 | } | ||
| 85 | |||
| 86 | return 0; | ||
| 87 | |||
| 88 | out_unmap: | ||
| 89 | for (j = 0; j < i; j++) { | ||
| 90 | dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, | ||
| 91 | DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR); | ||
| 92 | umem->pages[i].dma = 0; | ||
| 93 | } | ||
| 94 | |||
| 95 | return -1; | ||
| 96 | } | ||
| 97 | |||
| 98 | static void ixgbe_xsk_umem_dma_unmap(struct ixgbe_adapter *adapter, | ||
| 99 | struct xdp_umem *umem) | ||
| 100 | { | ||
| 101 | struct device *dev = &adapter->pdev->dev; | ||
| 102 | unsigned int i; | ||
| 103 | |||
| 104 | for (i = 0; i < umem->npgs; i++) { | ||
| 105 | dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, | ||
| 106 | DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR); | ||
| 107 | |||
| 108 | umem->pages[i].dma = 0; | ||
| 109 | } | ||
| 110 | } | ||
| 111 | |||
| 112 | static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, | ||
| 113 | struct xdp_umem *umem, | ||
| 114 | u16 qid) | ||
| 115 | { | ||
| 116 | struct xdp_umem_fq_reuse *reuseq; | ||
| 117 | bool if_running; | ||
| 118 | int err; | ||
| 119 | |||
| 120 | if (qid >= adapter->num_rx_queues) | ||
| 121 | return -EINVAL; | ||
| 122 | |||
| 123 | if (adapter->xsk_umems) { | ||
| 124 | if (qid >= adapter->num_xsk_umems) | ||
| 125 | return -EINVAL; | ||
| 126 | if (adapter->xsk_umems[qid]) | ||
| 127 | return -EBUSY; | ||
| 128 | } | ||
| 129 | |||
| 130 | reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count); | ||
| 131 | if (!reuseq) | ||
| 132 | return -ENOMEM; | ||
| 133 | |||
| 134 | xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq)); | ||
| 135 | |||
| 136 | err = ixgbe_xsk_umem_dma_map(adapter, umem); | ||
| 137 | if (err) | ||
| 138 | return err; | ||
| 139 | |||
| 140 | if_running = netif_running(adapter->netdev) && | ||
| 141 | READ_ONCE(adapter->xdp_prog); | ||
| 142 | |||
| 143 | if (if_running) | ||
| 144 | ixgbe_txrx_ring_disable(adapter, qid); | ||
| 145 | |||
| 146 | err = ixgbe_add_xsk_umem(adapter, umem, qid); | ||
| 147 | |||
| 148 | if (if_running) | ||
| 149 | ixgbe_txrx_ring_enable(adapter, qid); | ||
| 150 | |||
| 151 | return err; | ||
| 152 | } | ||
| 153 | |||
| 154 | static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) | ||
| 155 | { | ||
| 156 | bool if_running; | ||
| 157 | |||
| 158 | if (!adapter->xsk_umems || qid >= adapter->num_xsk_umems || | ||
| 159 | !adapter->xsk_umems[qid]) | ||
| 160 | return -EINVAL; | ||
| 161 | |||
| 162 | if_running = netif_running(adapter->netdev) && | ||
| 163 | READ_ONCE(adapter->xdp_prog); | ||
| 164 | |||
| 165 | if (if_running) | ||
| 166 | ixgbe_txrx_ring_disable(adapter, qid); | ||
| 167 | |||
| 168 | ixgbe_xsk_umem_dma_unmap(adapter, adapter->xsk_umems[qid]); | ||
| 169 | ixgbe_remove_xsk_umem(adapter, qid); | ||
| 170 | |||
| 171 | if (if_running) | ||
| 172 | ixgbe_txrx_ring_enable(adapter, qid); | ||
| 173 | |||
| 174 | return 0; | ||
| 175 | } | ||
| 176 | |||
| 177 | int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem, | ||
| 178 | u16 qid) | ||
| 179 | { | ||
| 180 | if (qid >= adapter->num_rx_queues) | ||
| 181 | return -EINVAL; | ||
| 182 | |||
| 183 | if (adapter->xsk_umems) { | ||
| 184 | if (qid >= adapter->num_xsk_umems) | ||
| 185 | return -EINVAL; | ||
| 186 | *umem = adapter->xsk_umems[qid]; | ||
| 187 | return 0; | ||
| 188 | } | ||
| 189 | |||
| 190 | *umem = NULL; | ||
| 191 | return 0; | ||
| 192 | } | ||
| 193 | |||
| 194 | int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem, | ||
| 195 | u16 qid) | ||
| 196 | { | ||
| 197 | return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) : | ||
| 198 | ixgbe_xsk_umem_disable(adapter, qid); | ||
| 199 | } | ||
| 200 | |||
| 201 | static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, | ||
| 202 | struct ixgbe_ring *rx_ring, | ||
| 203 | struct xdp_buff *xdp) | ||
| 204 | { | ||
| 205 | int err, result = IXGBE_XDP_PASS; | ||
| 206 | struct bpf_prog *xdp_prog; | ||
| 207 | struct xdp_frame *xdpf; | ||
| 208 | u32 act; | ||
| 209 | |||
| 210 | rcu_read_lock(); | ||
| 211 | xdp_prog = READ_ONCE(rx_ring->xdp_prog); | ||
| 212 | act = bpf_prog_run_xdp(xdp_prog, xdp); | ||
| 213 | xdp->handle += xdp->data - xdp->data_hard_start; | ||
| 214 | switch (act) { | ||
| 215 | case XDP_PASS: | ||
| 216 | break; | ||
| 217 | case XDP_TX: | ||
| 218 | xdpf = convert_to_xdp_frame(xdp); | ||
| 219 | if (unlikely(!xdpf)) { | ||
| 220 | result = IXGBE_XDP_CONSUMED; | ||
| 221 | break; | ||
| 222 | } | ||
| 223 | result = ixgbe_xmit_xdp_ring(adapter, xdpf); | ||
| 224 | break; | ||
| 225 | case XDP_REDIRECT: | ||
| 226 | err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); | ||
| 227 | result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED; | ||
| 228 | break; | ||
| 229 | default: | ||
| 230 | bpf_warn_invalid_xdp_action(act); | ||
| 231 | /* fallthrough */ | ||
| 232 | case XDP_ABORTED: | ||
| 233 | trace_xdp_exception(rx_ring->netdev, xdp_prog, act); | ||
| 234 | /* fallthrough -- handle aborts by dropping packet */ | ||
| 235 | case XDP_DROP: | ||
| 236 | result = IXGBE_XDP_CONSUMED; | ||
| 237 | break; | ||
| 238 | } | ||
| 239 | rcu_read_unlock(); | ||
| 240 | return result; | ||
| 241 | } | ||
| 242 | |||
| 243 | static struct | ||
| 244 | ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring, | ||
| 245 | unsigned int size) | ||
| 246 | { | ||
| 247 | struct ixgbe_rx_buffer *bi; | ||
| 248 | |||
| 249 | bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; | ||
| 250 | |||
| 251 | /* we are reusing so sync this buffer for CPU use */ | ||
| 252 | dma_sync_single_range_for_cpu(rx_ring->dev, | ||
| 253 | bi->dma, 0, | ||
| 254 | size, | ||
| 255 | DMA_BIDIRECTIONAL); | ||
| 256 | |||
| 257 | return bi; | ||
| 258 | } | ||
| 259 | |||
| 260 | static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring, | ||
| 261 | struct ixgbe_rx_buffer *obi) | ||
| 262 | { | ||
| 263 | unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; | ||
| 264 | u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; | ||
| 265 | u16 nta = rx_ring->next_to_alloc; | ||
| 266 | struct ixgbe_rx_buffer *nbi; | ||
| 267 | |||
| 268 | nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc]; | ||
| 269 | /* update, and store next to alloc */ | ||
| 270 | nta++; | ||
| 271 | rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; | ||
| 272 | |||
| 273 | /* transfer page from old buffer to new buffer */ | ||
| 274 | nbi->dma = obi->dma & mask; | ||
| 275 | nbi->dma += hr; | ||
| 276 | |||
| 277 | nbi->addr = (void *)((unsigned long)obi->addr & mask); | ||
| 278 | nbi->addr += hr; | ||
| 279 | |||
| 280 | nbi->handle = obi->handle & mask; | ||
| 281 | nbi->handle += rx_ring->xsk_umem->headroom; | ||
| 282 | |||
| 283 | obi->addr = NULL; | ||
| 284 | obi->skb = NULL; | ||
| 285 | } | ||
| 286 | |||
| 287 | void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) | ||
| 288 | { | ||
| 289 | struct ixgbe_rx_buffer *bi; | ||
| 290 | struct ixgbe_ring *rx_ring; | ||
| 291 | u64 hr, mask; | ||
| 292 | u16 nta; | ||
| 293 | |||
| 294 | rx_ring = container_of(alloc, struct ixgbe_ring, zca); | ||
| 295 | hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; | ||
| 296 | mask = rx_ring->xsk_umem->chunk_mask; | ||
| 297 | |||
| 298 | nta = rx_ring->next_to_alloc; | ||
| 299 | bi = rx_ring->rx_buffer_info; | ||
| 300 | |||
| 301 | nta++; | ||
| 302 | rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; | ||
| 303 | |||
| 304 | handle &= mask; | ||
| 305 | |||
| 306 | bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle); | ||
| 307 | bi->dma += hr; | ||
| 308 | |||
| 309 | bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); | ||
| 310 | bi->addr += hr; | ||
| 311 | |||
| 312 | bi->handle = (u64)handle + rx_ring->xsk_umem->headroom; | ||
| 313 | } | ||
| 314 | |||
| 315 | static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring, | ||
| 316 | struct ixgbe_rx_buffer *bi) | ||
| 317 | { | ||
| 318 | struct xdp_umem *umem = rx_ring->xsk_umem; | ||
| 319 | void *addr = bi->addr; | ||
| 320 | u64 handle, hr; | ||
| 321 | |||
| 322 | if (addr) | ||
| 323 | return true; | ||
| 324 | |||
| 325 | if (!xsk_umem_peek_addr(umem, &handle)) { | ||
| 326 | rx_ring->rx_stats.alloc_rx_page_failed++; | ||
| 327 | return false; | ||
| 328 | } | ||
| 329 | |||
| 330 | hr = umem->headroom + XDP_PACKET_HEADROOM; | ||
| 331 | |||
| 332 | bi->dma = xdp_umem_get_dma(umem, handle); | ||
| 333 | bi->dma += hr; | ||
| 334 | |||
| 335 | bi->addr = xdp_umem_get_data(umem, handle); | ||
| 336 | bi->addr += hr; | ||
| 337 | |||
| 338 | bi->handle = handle + umem->headroom; | ||
| 339 | |||
| 340 | xsk_umem_discard_addr(umem); | ||
| 341 | return true; | ||
| 342 | } | ||
| 343 | |||
| 344 | static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring, | ||
| 345 | struct ixgbe_rx_buffer *bi) | ||
| 346 | { | ||
| 347 | struct xdp_umem *umem = rx_ring->xsk_umem; | ||
| 348 | u64 handle, hr; | ||
| 349 | |||
| 350 | if (!xsk_umem_peek_addr_rq(umem, &handle)) { | ||
| 351 | rx_ring->rx_stats.alloc_rx_page_failed++; | ||
| 352 | return false; | ||
| 353 | } | ||
| 354 | |||
| 355 | handle &= rx_ring->xsk_umem->chunk_mask; | ||
| 356 | |||
| 357 | hr = umem->headroom + XDP_PACKET_HEADROOM; | ||
| 358 | |||
| 359 | bi->dma = xdp_umem_get_dma(umem, handle); | ||
| 360 | bi->dma += hr; | ||
| 361 | |||
| 362 | bi->addr = xdp_umem_get_data(umem, handle); | ||
| 363 | bi->addr += hr; | ||
| 364 | |||
| 365 | bi->handle = handle + umem->headroom; | ||
| 366 | |||
| 367 | xsk_umem_discard_addr_rq(umem); | ||
| 368 | return true; | ||
| 369 | } | ||
| 370 | |||
| 371 | static __always_inline bool | ||
| 372 | __ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count, | ||
| 373 | bool alloc(struct ixgbe_ring *rx_ring, | ||
| 374 | struct ixgbe_rx_buffer *bi)) | ||
| 375 | { | ||
| 376 | union ixgbe_adv_rx_desc *rx_desc; | ||
| 377 | struct ixgbe_rx_buffer *bi; | ||
| 378 | u16 i = rx_ring->next_to_use; | ||
| 379 | bool ok = true; | ||
| 380 | |||
| 381 | /* nothing to do */ | ||
| 382 | if (!cleaned_count) | ||
| 383 | return true; | ||
| 384 | |||
| 385 | rx_desc = IXGBE_RX_DESC(rx_ring, i); | ||
| 386 | bi = &rx_ring->rx_buffer_info[i]; | ||
| 387 | i -= rx_ring->count; | ||
| 388 | |||
| 389 | do { | ||
| 390 | if (!alloc(rx_ring, bi)) { | ||
| 391 | ok = false; | ||
| 392 | break; | ||
| 393 | } | ||
| 394 | |||
| 395 | /* sync the buffer for use by the device */ | ||
| 396 | dma_sync_single_range_for_device(rx_ring->dev, bi->dma, | ||
| 397 | bi->page_offset, | ||
| 398 | rx_ring->rx_buf_len, | ||
| 399 | DMA_BIDIRECTIONAL); | ||
| 400 | |||
| 401 | /* Refresh the desc even if buffer_addrs didn't change | ||
| 402 | * because each write-back erases this info. | ||
| 403 | */ | ||
| 404 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); | ||
| 405 | |||
| 406 | rx_desc++; | ||
| 407 | bi++; | ||
| 408 | i++; | ||
| 409 | if (unlikely(!i)) { | ||
| 410 | rx_desc = IXGBE_RX_DESC(rx_ring, 0); | ||
| 411 | bi = rx_ring->rx_buffer_info; | ||
| 412 | i -= rx_ring->count; | ||
| 413 | } | ||
| 414 | |||
| 415 | /* clear the length for the next_to_use descriptor */ | ||
| 416 | rx_desc->wb.upper.length = 0; | ||
| 417 | |||
| 418 | cleaned_count--; | ||
| 419 | } while (cleaned_count); | ||
| 420 | |||
| 421 | i += rx_ring->count; | ||
| 422 | |||
| 423 | if (rx_ring->next_to_use != i) { | ||
| 424 | rx_ring->next_to_use = i; | ||
| 425 | |||
| 426 | /* update next to alloc since we have filled the ring */ | ||
| 427 | rx_ring->next_to_alloc = i; | ||
| 428 | |||
| 429 | /* Force memory writes to complete before letting h/w | ||
| 430 | * know there are new descriptors to fetch. (Only | ||
| 431 | * applicable for weak-ordered memory model archs, | ||
| 432 | * such as IA-64). | ||
| 433 | */ | ||
| 434 | wmb(); | ||
| 435 | writel(i, rx_ring->tail); | ||
| 436 | } | ||
| 437 | |||
| 438 | return ok; | ||
| 439 | } | ||
| 440 | |||
| 441 | void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count) | ||
| 442 | { | ||
| 443 | __ixgbe_alloc_rx_buffers_zc(rx_ring, count, | ||
| 444 | ixgbe_alloc_buffer_slow_zc); | ||
| 445 | } | ||
| 446 | |||
| 447 | static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring, | ||
| 448 | u16 count) | ||
| 449 | { | ||
| 450 | return __ixgbe_alloc_rx_buffers_zc(rx_ring, count, | ||
| 451 | ixgbe_alloc_buffer_zc); | ||
| 452 | } | ||
| 453 | |||
| 454 | static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring, | ||
| 455 | struct ixgbe_rx_buffer *bi, | ||
| 456 | struct xdp_buff *xdp) | ||
| 457 | { | ||
| 458 | unsigned int metasize = xdp->data - xdp->data_meta; | ||
| 459 | unsigned int datasize = xdp->data_end - xdp->data; | ||
| 460 | struct sk_buff *skb; | ||
| 461 | |||
| 462 | /* allocate a skb to store the frags */ | ||
| 463 | skb = __napi_alloc_skb(&rx_ring->q_vector->napi, | ||
| 464 | xdp->data_end - xdp->data_hard_start, | ||
| 465 | GFP_ATOMIC | __GFP_NOWARN); | ||
| 466 | if (unlikely(!skb)) | ||
| 467 | return NULL; | ||
| 468 | |||
| 469 | skb_reserve(skb, xdp->data - xdp->data_hard_start); | ||
| 470 | memcpy(__skb_put(skb, datasize), xdp->data, datasize); | ||
| 471 | if (metasize) | ||
| 472 | skb_metadata_set(skb, metasize); | ||
| 473 | |||
| 474 | ixgbe_reuse_rx_buffer_zc(rx_ring, bi); | ||
| 475 | return skb; | ||
| 476 | } | ||
| 477 | |||
| 478 | static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring) | ||
| 479 | { | ||
| 480 | u32 ntc = rx_ring->next_to_clean + 1; | ||
| 481 | |||
| 482 | ntc = (ntc < rx_ring->count) ? ntc : 0; | ||
| 483 | rx_ring->next_to_clean = ntc; | ||
| 484 | prefetch(IXGBE_RX_DESC(rx_ring, ntc)); | ||
| 485 | } | ||
| 486 | |||
| 487 | int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, | ||
| 488 | struct ixgbe_ring *rx_ring, | ||
| 489 | const int budget) | ||
| 490 | { | ||
| 491 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | ||
| 492 | struct ixgbe_adapter *adapter = q_vector->adapter; | ||
| 493 | u16 cleaned_count = ixgbe_desc_unused(rx_ring); | ||
| 494 | unsigned int xdp_res, xdp_xmit = 0; | ||
| 495 | bool failure = false; | ||
| 496 | struct sk_buff *skb; | ||
| 497 | struct xdp_buff xdp; | ||
| 498 | |||
| 499 | xdp.rxq = &rx_ring->xdp_rxq; | ||
| 500 | |||
| 501 | while (likely(total_rx_packets < budget)) { | ||
| 502 | union ixgbe_adv_rx_desc *rx_desc; | ||
| 503 | struct ixgbe_rx_buffer *bi; | ||
| 504 | unsigned int size; | ||
| 505 | |||
| 506 | /* return some buffers to hardware, one at a time is too slow */ | ||
| 507 | if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { | ||
| 508 | failure = failure || | ||
| 509 | !ixgbe_alloc_rx_buffers_fast_zc(rx_ring, | ||
| 510 | cleaned_count); | ||
| 511 | cleaned_count = 0; | ||
| 512 | } | ||
| 513 | |||
| 514 | rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); | ||
| 515 | size = le16_to_cpu(rx_desc->wb.upper.length); | ||
| 516 | if (!size) | ||
| 517 | break; | ||
| 518 | |||
| 519 | /* This memory barrier is needed to keep us from reading | ||
| 520 | * any other fields out of the rx_desc until we know the | ||
| 521 | * descriptor has been written back | ||
| 522 | */ | ||
| 523 | dma_rmb(); | ||
| 524 | |||
| 525 | bi = ixgbe_get_rx_buffer_zc(rx_ring, size); | ||
| 526 | |||
| 527 | if (unlikely(!ixgbe_test_staterr(rx_desc, | ||
| 528 | IXGBE_RXD_STAT_EOP))) { | ||
| 529 | struct ixgbe_rx_buffer *next_bi; | ||
| 530 | |||
| 531 | ixgbe_reuse_rx_buffer_zc(rx_ring, bi); | ||
| 532 | ixgbe_inc_ntc(rx_ring); | ||
| 533 | next_bi = | ||
| 534 | &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; | ||
| 535 | next_bi->skb = ERR_PTR(-EINVAL); | ||
| 536 | continue; | ||
| 537 | } | ||
| 538 | |||
| 539 | if (unlikely(bi->skb)) { | ||
| 540 | ixgbe_reuse_rx_buffer_zc(rx_ring, bi); | ||
| 541 | ixgbe_inc_ntc(rx_ring); | ||
| 542 | continue; | ||
| 543 | } | ||
| 544 | |||
| 545 | xdp.data = bi->addr; | ||
| 546 | xdp.data_meta = xdp.data; | ||
| 547 | xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; | ||
| 548 | xdp.data_end = xdp.data + size; | ||
| 549 | xdp.handle = bi->handle; | ||
| 550 | |||
| 551 | xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp); | ||
| 552 | |||
| 553 | if (xdp_res) { | ||
| 554 | if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) { | ||
| 555 | xdp_xmit |= xdp_res; | ||
| 556 | bi->addr = NULL; | ||
| 557 | bi->skb = NULL; | ||
| 558 | } else { | ||
| 559 | ixgbe_reuse_rx_buffer_zc(rx_ring, bi); | ||
| 560 | } | ||
| 561 | total_rx_packets++; | ||
| 562 | total_rx_bytes += size; | ||
| 563 | |||
| 564 | cleaned_count++; | ||
| 565 | ixgbe_inc_ntc(rx_ring); | ||
| 566 | continue; | ||
| 567 | } | ||
| 568 | |||
| 569 | /* XDP_PASS path */ | ||
| 570 | skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp); | ||
| 571 | if (!skb) { | ||
| 572 | rx_ring->rx_stats.alloc_rx_buff_failed++; | ||
| 573 | break; | ||
| 574 | } | ||
| 575 | |||
| 576 | cleaned_count++; | ||
| 577 | ixgbe_inc_ntc(rx_ring); | ||
| 578 | |||
| 579 | if (eth_skb_pad(skb)) | ||
| 580 | continue; | ||
| 581 | |||
| 582 | total_rx_bytes += skb->len; | ||
| 583 | total_rx_packets++; | ||
| 584 | |||
| 585 | ixgbe_process_skb_fields(rx_ring, rx_desc, skb); | ||
| 586 | ixgbe_rx_skb(q_vector, skb); | ||
| 587 | } | ||
| 588 | |||
| 589 | if (xdp_xmit & IXGBE_XDP_REDIR) | ||
| 590 | xdp_do_flush_map(); | ||
| 591 | |||
| 592 | if (xdp_xmit & IXGBE_XDP_TX) { | ||
| 593 | struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; | ||
| 594 | |||
| 595 | /* Force memory writes to complete before letting h/w | ||
| 596 | * know there are new descriptors to fetch. | ||
| 597 | */ | ||
| 598 | wmb(); | ||
| 599 | writel(ring->next_to_use, ring->tail); | ||
| 600 | } | ||
| 601 | |||
| 602 | u64_stats_update_begin(&rx_ring->syncp); | ||
| 603 | rx_ring->stats.packets += total_rx_packets; | ||
| 604 | rx_ring->stats.bytes += total_rx_bytes; | ||
| 605 | u64_stats_update_end(&rx_ring->syncp); | ||
| 606 | q_vector->rx.total_packets += total_rx_packets; | ||
| 607 | q_vector->rx.total_bytes += total_rx_bytes; | ||
| 608 | |||
| 609 | return failure ? budget : (int)total_rx_packets; | ||
| 610 | } | ||
| 611 | |||
| 612 | void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring) | ||
| 613 | { | ||
| 614 | u16 i = rx_ring->next_to_clean; | ||
| 615 | struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i]; | ||
| 616 | |||
| 617 | while (i != rx_ring->next_to_alloc) { | ||
| 618 | xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle); | ||
| 619 | i++; | ||
| 620 | bi++; | ||
| 621 | if (i == rx_ring->count) { | ||
| 622 | i = 0; | ||
| 623 | bi = rx_ring->rx_buffer_info; | ||
| 624 | } | ||
| 625 | } | ||
| 626 | } | ||
| 627 | |||
| 628 | static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) | ||
| 629 | { | ||
| 630 | union ixgbe_adv_tx_desc *tx_desc = NULL; | ||
| 631 | struct ixgbe_tx_buffer *tx_bi; | ||
| 632 | bool work_done = true; | ||
| 633 | u32 len, cmd_type; | ||
| 634 | dma_addr_t dma; | ||
| 635 | |||
| 636 | while (budget-- > 0) { | ||
| 637 | if (unlikely(!ixgbe_desc_unused(xdp_ring))) { | ||
| 638 | work_done = false; | ||
| 639 | break; | ||
| 640 | } | ||
| 641 | |||
| 642 | if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len)) | ||
| 643 | break; | ||
| 644 | |||
| 645 | dma_sync_single_for_device(xdp_ring->dev, dma, len, | ||
| 646 | DMA_BIDIRECTIONAL); | ||
| 647 | |||
| 648 | tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use]; | ||
| 649 | tx_bi->bytecount = len; | ||
| 650 | tx_bi->xdpf = NULL; | ||
| 651 | |||
| 652 | tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use); | ||
| 653 | tx_desc->read.buffer_addr = cpu_to_le64(dma); | ||
| 654 | |||
| 655 | /* put descriptor type bits */ | ||
| 656 | cmd_type = IXGBE_ADVTXD_DTYP_DATA | | ||
| 657 | IXGBE_ADVTXD_DCMD_DEXT | | ||
| 658 | IXGBE_ADVTXD_DCMD_IFCS; | ||
| 659 | cmd_type |= len | IXGBE_TXD_CMD; | ||
| 660 | tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); | ||
| 661 | tx_desc->read.olinfo_status = | ||
| 662 | cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT); | ||
| 663 | |||
| 664 | xdp_ring->next_to_use++; | ||
| 665 | if (xdp_ring->next_to_use == xdp_ring->count) | ||
| 666 | xdp_ring->next_to_use = 0; | ||
| 667 | } | ||
| 668 | |||
| 669 | if (tx_desc) { | ||
| 670 | ixgbe_xdp_ring_update_tail(xdp_ring); | ||
| 671 | xsk_umem_consume_tx_done(xdp_ring->xsk_umem); | ||
| 672 | } | ||
| 673 | |||
| 674 | return !!budget && work_done; | ||
| 675 | } | ||
| 676 | |||
| 677 | static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring, | ||
| 678 | struct ixgbe_tx_buffer *tx_bi) | ||
| 679 | { | ||
| 680 | xdp_return_frame(tx_bi->xdpf); | ||
| 681 | dma_unmap_single(tx_ring->dev, | ||
| 682 | dma_unmap_addr(tx_bi, dma), | ||
| 683 | dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); | ||
| 684 | dma_unmap_len_set(tx_bi, len, 0); | ||
| 685 | } | ||
| 686 | |||
| 687 | bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, | ||
| 688 | struct ixgbe_ring *tx_ring, int napi_budget) | ||
| 689 | { | ||
| 690 | unsigned int total_packets = 0, total_bytes = 0; | ||
| 691 | u32 i = tx_ring->next_to_clean, xsk_frames = 0; | ||
| 692 | unsigned int budget = q_vector->tx.work_limit; | ||
| 693 | struct xdp_umem *umem = tx_ring->xsk_umem; | ||
| 694 | union ixgbe_adv_tx_desc *tx_desc; | ||
| 695 | struct ixgbe_tx_buffer *tx_bi; | ||
| 696 | bool xmit_done; | ||
| 697 | |||
| 698 | tx_bi = &tx_ring->tx_buffer_info[i]; | ||
| 699 | tx_desc = IXGBE_TX_DESC(tx_ring, i); | ||
| 700 | i -= tx_ring->count; | ||
| 701 | |||
| 702 | do { | ||
| 703 | if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) | ||
| 704 | break; | ||
| 705 | |||
| 706 | total_bytes += tx_bi->bytecount; | ||
| 707 | total_packets += tx_bi->gso_segs; | ||
| 708 | |||
| 709 | if (tx_bi->xdpf) | ||
| 710 | ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi); | ||
| 711 | else | ||
| 712 | xsk_frames++; | ||
| 713 | |||
| 714 | tx_bi->xdpf = NULL; | ||
| 715 | total_bytes += tx_bi->bytecount; | ||
| 716 | |||
| 717 | tx_bi++; | ||
| 718 | tx_desc++; | ||
| 719 | i++; | ||
| 720 | if (unlikely(!i)) { | ||
| 721 | i -= tx_ring->count; | ||
| 722 | tx_bi = tx_ring->tx_buffer_info; | ||
| 723 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); | ||
| 724 | } | ||
| 725 | |||
| 726 | /* issue prefetch for next Tx descriptor */ | ||
| 727 | prefetch(tx_desc); | ||
| 728 | |||
| 729 | /* update budget accounting */ | ||
| 730 | budget--; | ||
| 731 | } while (likely(budget)); | ||
| 732 | |||
| 733 | i += tx_ring->count; | ||
| 734 | tx_ring->next_to_clean = i; | ||
| 735 | |||
| 736 | u64_stats_update_begin(&tx_ring->syncp); | ||
| 737 | tx_ring->stats.bytes += total_bytes; | ||
| 738 | tx_ring->stats.packets += total_packets; | ||
| 739 | u64_stats_update_end(&tx_ring->syncp); | ||
| 740 | q_vector->tx.total_bytes += total_bytes; | ||
| 741 | q_vector->tx.total_packets += total_packets; | ||
| 742 | |||
| 743 | if (xsk_frames) | ||
| 744 | xsk_umem_complete_tx(umem, xsk_frames); | ||
| 745 | |||
| 746 | xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); | ||
| 747 | return budget > 0 && xmit_done; | ||
| 748 | } | ||
| 749 | |||
| 750 | int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid) | ||
| 751 | { | ||
| 752 | struct ixgbe_adapter *adapter = netdev_priv(dev); | ||
| 753 | struct ixgbe_ring *ring; | ||
| 754 | |||
| 755 | if (test_bit(__IXGBE_DOWN, &adapter->state)) | ||
| 756 | return -ENETDOWN; | ||
| 757 | |||
| 758 | if (!READ_ONCE(adapter->xdp_prog)) | ||
| 759 | return -ENXIO; | ||
| 760 | |||
| 761 | if (qid >= adapter->num_xdp_queues) | ||
| 762 | return -ENXIO; | ||
| 763 | |||
| 764 | if (!adapter->xsk_umems || !adapter->xsk_umems[qid]) | ||
| 765 | return -ENXIO; | ||
| 766 | |||
| 767 | ring = adapter->xdp_ring[qid]; | ||
| 768 | if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { | ||
| 769 | u64 eics = BIT_ULL(ring->q_vector->v_idx); | ||
| 770 | |||
| 771 | ixgbe_irq_rearm_queues(adapter, eics); | ||
| 772 | } | ||
| 773 | |||
| 774 | return 0; | ||
| 775 | } | ||
| 776 | |||
| 777 | void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring) | ||
| 778 | { | ||
| 779 | u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; | ||
| 780 | struct xdp_umem *umem = tx_ring->xsk_umem; | ||
| 781 | struct ixgbe_tx_buffer *tx_bi; | ||
| 782 | u32 xsk_frames = 0; | ||
| 783 | |||
| 784 | while (ntc != ntu) { | ||
| 785 | tx_bi = &tx_ring->tx_buffer_info[ntc]; | ||
| 786 | |||
| 787 | if (tx_bi->xdpf) | ||
| 788 | ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi); | ||
| 789 | else | ||
| 790 | xsk_frames++; | ||
| 791 | |||
| 792 | tx_bi->xdpf = NULL; | ||
| 793 | |||
| 794 | ntc++; | ||
| 795 | if (ntc == tx_ring->count) | ||
| 796 | ntc = 0; | ||
| 797 | } | ||
| 798 | |||
| 799 | if (xsk_frames) | ||
| 800 | xsk_umem_complete_tx(umem, xsk_frames); | ||
| 801 | } | ||
