aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSunil Goutham <sgoutham@cavium.com>2017-05-02 09:06:57 -0400
committerDavid S. Miller <davem@davemloft.net>2017-05-02 15:41:22 -0400
commite3d06ff9ec9400b93bacf8fa92f3985c9412e282 (patch)
tree9533bd0f5e6a6beaf1b9544b99b5199459e5b6b7
parent16f2bccda75da48888772c4829a468be620c5d79 (diff)
net: thunderx: Support for XDP header adjustment
When in XDP mode reserve XDP_PACKET_HEADROOM bytes at the start of receive buffer for XDP program to modify headers and adjust packet start. Additional code changes done to handle such packets. Signed-off-by: Sunil Goutham <sgoutham@cavium.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c63
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c9
2 files changed, 55 insertions, 17 deletions
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index bb13dee388c3..d6477af88085 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -502,13 +502,15 @@ static int nicvf_init_resources(struct nicvf *nic)
502} 502}
503 503
504static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, 504static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
505 struct cqe_rx_t *cqe_rx, struct snd_queue *sq) 505 struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
506 struct sk_buff **skb)
506{ 507{
507 struct xdp_buff xdp; 508 struct xdp_buff xdp;
508 struct page *page; 509 struct page *page;
509 u32 action; 510 u32 action;
510 u16 len; 511 u16 len, offset = 0;
511 u64 dma_addr, cpu_addr; 512 u64 dma_addr, cpu_addr;
513 void *orig_data;
512 514
513 /* Retrieve packet buffer's DMA address and length */ 515 /* Retrieve packet buffer's DMA address and length */
514 len = *((u16 *)((void *)cqe_rx + (3 * sizeof(u64)))); 516 len = *((u16 *)((void *)cqe_rx + (3 * sizeof(u64))));
@@ -517,17 +519,47 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
517 cpu_addr = nicvf_iova_to_phys(nic, dma_addr); 519 cpu_addr = nicvf_iova_to_phys(nic, dma_addr);
518 if (!cpu_addr) 520 if (!cpu_addr)
519 return false; 521 return false;
522 cpu_addr = (u64)phys_to_virt(cpu_addr);
523 page = virt_to_page((void *)cpu_addr);
520 524
521 xdp.data = phys_to_virt(cpu_addr); 525 xdp.data_hard_start = page_address(page);
526 xdp.data = (void *)cpu_addr;
522 xdp.data_end = xdp.data + len; 527 xdp.data_end = xdp.data + len;
528 orig_data = xdp.data;
523 529
524 rcu_read_lock(); 530 rcu_read_lock();
525 action = bpf_prog_run_xdp(prog, &xdp); 531 action = bpf_prog_run_xdp(prog, &xdp);
526 rcu_read_unlock(); 532 rcu_read_unlock();
527 533
534 /* Check if XDP program has changed headers */
535 if (orig_data != xdp.data) {
536 len = xdp.data_end - xdp.data;
537 offset = orig_data - xdp.data;
538 dma_addr -= offset;
539 }
540
528 switch (action) { 541 switch (action) {
529 case XDP_PASS: 542 case XDP_PASS:
530 /* Pass on packet to network stack */ 543 /* Check if it's a recycled page, if not
544 * unmap the DMA mapping.
545 *
546 * Recycled page holds an extra reference.
547 */
548 if (page_ref_count(page) == 1) {
549 dma_addr &= PAGE_MASK;
550 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
551 RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
552 DMA_FROM_DEVICE,
553 DMA_ATTR_SKIP_CPU_SYNC);
554 }
555
556 /* Build SKB and pass on packet to network stack */
557 *skb = build_skb(xdp.data,
558 RCV_FRAG_LEN - cqe_rx->align_pad + offset);
559 if (!*skb)
560 put_page(page);
561 else
562 skb_put(*skb, len);
531 return false; 563 return false;
532 case XDP_TX: 564 case XDP_TX:
533 nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len); 565 nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
@@ -537,7 +569,6 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
537 case XDP_ABORTED: 569 case XDP_ABORTED:
538 trace_xdp_exception(nic->netdev, prog, action); 570 trace_xdp_exception(nic->netdev, prog, action);
539 case XDP_DROP: 571 case XDP_DROP:
540 page = virt_to_page(xdp.data);
541 /* Check if it's a recycled page, if not 572 /* Check if it's a recycled page, if not
542 * unmap the DMA mapping. 573 * unmap the DMA mapping.
543 * 574 *
@@ -546,7 +577,8 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
546 if (page_ref_count(page) == 1) { 577 if (page_ref_count(page) == 1) {
547 dma_addr &= PAGE_MASK; 578 dma_addr &= PAGE_MASK;
548 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, 579 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
549 RCV_FRAG_LEN, DMA_FROM_DEVICE, 580 RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
581 DMA_FROM_DEVICE,
550 DMA_ATTR_SKIP_CPU_SYNC); 582 DMA_ATTR_SKIP_CPU_SYNC);
551 } 583 }
552 put_page(page); 584 put_page(page);
@@ -654,7 +686,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
654 struct napi_struct *napi, 686 struct napi_struct *napi,
655 struct cqe_rx_t *cqe_rx, struct snd_queue *sq) 687 struct cqe_rx_t *cqe_rx, struct snd_queue *sq)
656{ 688{
657 struct sk_buff *skb; 689 struct sk_buff *skb = NULL;
658 struct nicvf *nic = netdev_priv(netdev); 690 struct nicvf *nic = netdev_priv(netdev);
659 struct nicvf *snic = nic; 691 struct nicvf *snic = nic;
660 int err = 0; 692 int err = 0;
@@ -676,15 +708,17 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
676 } 708 }
677 709
678 /* For XDP, ignore pkts spanning multiple pages */ 710 /* For XDP, ignore pkts spanning multiple pages */
679 if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) 711 if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) {
680 if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq)) 712 /* Packet consumed by XDP */
713 if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, &skb))
681 return; 714 return;
715 } else {
716 skb = nicvf_get_rcv_skb(snic, cqe_rx,
717 nic->xdp_prog ? true : false);
718 }
682 719
683 skb = nicvf_get_rcv_skb(snic, cqe_rx, nic->xdp_prog ? true : false); 720 if (!skb)
684 if (!skb) {
685 netdev_dbg(nic->netdev, "Packet not received\n");
686 return; 721 return;
687 }
688 722
689 if (netif_msg_pktdata(nic)) { 723 if (netif_msg_pktdata(nic)) {
690 netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name, 724 netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name,
@@ -1672,9 +1706,6 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
1672 return -EOPNOTSUPP; 1706 return -EOPNOTSUPP;
1673 } 1707 }
1674 1708
1675 if (prog && prog->xdp_adjust_head)
1676 return -EOPNOTSUPP;
1677
1678 /* ALL SQs attached to CQs i.e same as RQs, are treated as 1709 /* ALL SQs attached to CQs i.e same as RQs, are treated as
1679 * XDP Tx queues and more Tx queues are allocated for 1710 * XDP Tx queues and more Tx queues are allocated for
1680 * network stack to send pkts out. 1711 * network stack to send pkts out.
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index ec234b626fe3..43428ce760ca 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -164,6 +164,11 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
164 } 164 }
165 165
166 nic->rb_page_offset = 0; 166 nic->rb_page_offset = 0;
167
168 /* Reserve space for header modifications by BPF program */
169 if (rbdr->is_xdp)
170 buf_len += XDP_PACKET_HEADROOM;
171
167 /* Check if it's recycled */ 172 /* Check if it's recycled */
168 if (pgcache) 173 if (pgcache)
169 nic->rb_page = pgcache->page; 174 nic->rb_page = pgcache->page;
@@ -183,7 +188,7 @@ ret:
183 return -ENOMEM; 188 return -ENOMEM;
184 } 189 }
185 if (pgcache) 190 if (pgcache)
186 pgcache->dma_addr = *rbuf; 191 pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM;
187 nic->rb_page_offset += buf_len; 192 nic->rb_page_offset += buf_len;
188 } 193 }
189 194
@@ -1575,6 +1580,8 @@ static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
1575 */ 1580 */
1576 if (page_ref_count(page) != 1) 1581 if (page_ref_count(page) != 1)
1577 return; 1582 return;
1583
1584 len += XDP_PACKET_HEADROOM;
1578 /* Receive buffers in XDP mode are mapped from page start */ 1585 /* Receive buffers in XDP mode are mapped from page start */
1579 dma_addr &= PAGE_MASK; 1586 dma_addr &= PAGE_MASK;
1580 } 1587 }