aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2018-01-05 18:30:47 -0500
committerAlexei Starovoitov <ast@kernel.org>2018-01-05 18:31:20 -0500
commit11d16edb04f113348b0c1d0c26cb666e9baaa7d3 (patch)
tree3e3d4ae57b0ebd5158afd33be318775883e943fa
parent5f103c5d4dbadec0f2cacd39b6429e1b8a8cf983 (diff)
parent0fca931a6f21c11f675363b92b5a4fe86da59f30 (diff)
Merge branch 'xdp_rxq_info'
Jesper Dangaard Brouer says: ==================== V4: * Added reviewers/acks to patches * Fix patch desc in i40e that got out-of-sync with code * Add SPDX license headers for the two new files added in patch 14 V3: * Fixed bug in virtio_net driver * Removed export of xdp_rxq_info_init() V2: * Changed API exposed to drivers - Removed invocation of "init" in drivers, and only call "reg" (Suggested by Saeed) - Allow "reg" to fail and handle this in drivers (Suggested by David Ahern) * Removed the SINKQ qtype, instead allow to register as "unused" * Also fixed some drivers during testing on actual HW (noted in patches) There is a need for XDP to know more about the RX-queue a given XDP frames have arrived on. For both the XDP bpf-prog and kernel side. Instead of extending struct xdp_buff each time new info is needed, this patchset takes a different approach. Struct xdp_buff is only extended with a pointer to a struct xdp_rxq_info (allowing for easier extending this later). This xdp_rxq_info contains information related to how the driver have setup the individual RX-queue's. This is read-mostly information, and all xdp_buff frames (in drivers napi_poll) point to the same xdp_rxq_info (per RX-queue). We stress this data/cache-line is for read-mostly info. This is NOT for dynamic per packet info, use the data_meta for such use-cases. This patchset start out small, and only expose ingress_ifindex and the RX-queue index to the XDP/BPF program. Access to tangible info like the ingress ifindex and RX queue index, is fairly easy to comprehent. The other future use-cases could allow XDP frames to be recycled back to the originating device driver, by providing info on RX device and queue number. As XDP doesn't have driver feature flags, and eBPF code due to bpf-tail-calls cannot determine that XDP driver invoke it, this patchset have to update every driver that support XDP. For driver developers (review individual driver patches!): The xdp_rxq_info is tied to the drivers RX-ring(s). Whenever a RX-ring modification require (temporary) stopping RX frames, then the xdp_rxq_info should (likely) also be unregistred and re-registered, especially if reallocating the pages in the ring. Make sure ethtool set_channels does the right thing. When replacing XDP prog, if and only if RX-ring need to be changed, then also re-register the xdp_rxq_info. I'm Cc'ing the individual driver patches to the registered maintainers. Testing: I've only tested the NIC drivers I have hardware for. The general test procedure is to (DUT = Device Under Test): (1) run pktgen script pktgen_sample04_many_flows.sh (against DUT) (2) run samples/bpf program xdp_rxq_info --dev $DEV (on DUT) (3) runtime modify number of NIC queues via ethtool -L (on DUT) (4) runtime modify number of NIC ring-size via ethtool -G (on DUT) Patch based on git tree bpf-next (at commit fb982666e380c1632a): https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git/ ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c11
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c10
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c10
-rw-r--r--drivers/net/tun.c24
-rw-r--r--drivers/net/virtio_net.c14
-rw-r--r--include/linux/filter.h2
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/net/xdp.h48
-rw-r--r--include/uapi/linux/bpf.h3
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/dev.c69
-rw-r--r--net/core/filter.c19
-rw-r--r--net/core/xdp.c73
-rw-r--r--samples/bpf/Makefile4
-rw-r--r--samples/bpf/xdp_rxq_info_kern.c96
-rw-r--r--samples/bpf/xdp_rxq_info_user.c531
36 files changed, 990 insertions, 28 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 9efbdc6f1fcb..89c3c8760a78 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2247,6 +2247,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
2247 if (rxr->xdp_prog) 2247 if (rxr->xdp_prog)
2248 bpf_prog_put(rxr->xdp_prog); 2248 bpf_prog_put(rxr->xdp_prog);
2249 2249
2250 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2251 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2252
2250 kfree(rxr->rx_tpa); 2253 kfree(rxr->rx_tpa);
2251 rxr->rx_tpa = NULL; 2254 rxr->rx_tpa = NULL;
2252 2255
@@ -2280,6 +2283,10 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
2280 2283
2281 ring = &rxr->rx_ring_struct; 2284 ring = &rxr->rx_ring_struct;
2282 2285
2286 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2287 if (rc < 0)
2288 return rc;
2289
2283 rc = bnxt_alloc_ring(bp, ring); 2290 rc = bnxt_alloc_ring(bp, ring);
2284 if (rc) 2291 if (rc)
2285 return rc; 2292 return rc;
@@ -2834,6 +2841,9 @@ void bnxt_set_ring_params(struct bnxt *bp)
2834 bp->cp_ring_mask = bp->cp_bit - 1; 2841 bp->cp_ring_mask = bp->cp_bit - 1;
2835} 2842}
2836 2843
2844/* Changing allocation mode of RX rings.
2845 * TODO: Update when extending xdp_rxq_info to support allocation modes.
2846 */
2837int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 2847int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
2838{ 2848{
2839 if (page_mode) { 2849 if (page_mode) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 5359a1f0045f..2d268fc26f5e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -23,6 +23,7 @@
23#include <net/devlink.h> 23#include <net/devlink.h>
24#include <net/dst_metadata.h> 24#include <net/dst_metadata.h>
25#include <net/switchdev.h> 25#include <net/switchdev.h>
26#include <net/xdp.h>
26 27
27struct tx_bd { 28struct tx_bd {
28 __le32 tx_bd_len_flags_type; 29 __le32 tx_bd_len_flags_type;
@@ -664,6 +665,7 @@ struct bnxt_rx_ring_info {
664 665
665 struct bnxt_ring_struct rx_ring_struct; 666 struct bnxt_ring_struct rx_ring_struct;
666 struct bnxt_ring_struct rx_agg_ring_struct; 667 struct bnxt_ring_struct rx_agg_ring_struct;
668 struct xdp_rxq_info xdp_rxq;
667}; 669};
668 670
669struct bnxt_cp_ring_info { 671struct bnxt_cp_ring_info {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 261e5847557a..1389ab5e05df 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -96,6 +96,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
96 xdp.data = *data_ptr; 96 xdp.data = *data_ptr;
97 xdp_set_data_meta_invalid(&xdp); 97 xdp_set_data_meta_invalid(&xdp);
98 xdp.data_end = *data_ptr + *len; 98 xdp.data_end = *data_ptr + *len;
99 xdp.rxq = &rxr->xdp_rxq;
99 orig_data = xdp.data; 100 orig_data = xdp.data;
100 mapping = rx_buf->mapping - bp->rx_dma_offset; 101 mapping = rx_buf->mapping - bp->rx_dma_offset;
101 102
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 52b3a6044f85..21618d0d694f 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -521,7 +521,7 @@ static void nicvf_unmap_page(struct nicvf *nic, struct page *page, u64 dma_addr)
521 521
522static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, 522static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
523 struct cqe_rx_t *cqe_rx, struct snd_queue *sq, 523 struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
524 struct sk_buff **skb) 524 struct rcv_queue *rq, struct sk_buff **skb)
525{ 525{
526 struct xdp_buff xdp; 526 struct xdp_buff xdp;
527 struct page *page; 527 struct page *page;
@@ -545,6 +545,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
545 xdp.data = (void *)cpu_addr; 545 xdp.data = (void *)cpu_addr;
546 xdp_set_data_meta_invalid(&xdp); 546 xdp_set_data_meta_invalid(&xdp);
547 xdp.data_end = xdp.data + len; 547 xdp.data_end = xdp.data + len;
548 xdp.rxq = &rq->xdp_rxq;
548 orig_data = xdp.data; 549 orig_data = xdp.data;
549 550
550 rcu_read_lock(); 551 rcu_read_lock();
@@ -698,7 +699,8 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
698 699
699static void nicvf_rcv_pkt_handler(struct net_device *netdev, 700static void nicvf_rcv_pkt_handler(struct net_device *netdev,
700 struct napi_struct *napi, 701 struct napi_struct *napi,
701 struct cqe_rx_t *cqe_rx, struct snd_queue *sq) 702 struct cqe_rx_t *cqe_rx,
703 struct snd_queue *sq, struct rcv_queue *rq)
702{ 704{
703 struct sk_buff *skb = NULL; 705 struct sk_buff *skb = NULL;
704 struct nicvf *nic = netdev_priv(netdev); 706 struct nicvf *nic = netdev_priv(netdev);
@@ -724,7 +726,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
724 /* For XDP, ignore pkts spanning multiple pages */ 726 /* For XDP, ignore pkts spanning multiple pages */
725 if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) { 727 if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) {
726 /* Packet consumed by XDP */ 728 /* Packet consumed by XDP */
727 if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, &skb)) 729 if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, rq, &skb))
728 return; 730 return;
729 } else { 731 } else {
730 skb = nicvf_get_rcv_skb(snic, cqe_rx, 732 skb = nicvf_get_rcv_skb(snic, cqe_rx,
@@ -781,6 +783,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
781 struct cqe_rx_t *cq_desc; 783 struct cqe_rx_t *cq_desc;
782 struct netdev_queue *txq; 784 struct netdev_queue *txq;
783 struct snd_queue *sq = &qs->sq[cq_idx]; 785 struct snd_queue *sq = &qs->sq[cq_idx];
786 struct rcv_queue *rq = &qs->rq[cq_idx];
784 unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx; 787 unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx;
785 788
786 spin_lock_bh(&cq->lock); 789 spin_lock_bh(&cq->lock);
@@ -811,7 +814,7 @@ loop:
811 814
812 switch (cq_desc->cqe_type) { 815 switch (cq_desc->cqe_type) {
813 case CQE_TYPE_RX: 816 case CQE_TYPE_RX:
814 nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq); 817 nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq, rq);
815 work_done++; 818 work_done++;
816 break; 819 break;
817 case CQE_TYPE_SEND: 820 case CQE_TYPE_SEND:
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index f38ea349aa00..14e62c6ac342 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -760,6 +760,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
760 760
761 if (!rq->enable) { 761 if (!rq->enable) {
762 nicvf_reclaim_rcv_queue(nic, qs, qidx); 762 nicvf_reclaim_rcv_queue(nic, qs, qidx);
763 xdp_rxq_info_unreg(&rq->xdp_rxq);
763 return; 764 return;
764 } 765 }
765 766
@@ -772,6 +773,9 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
772 /* all writes of RBDR data to be loaded into L2 Cache as well*/ 773 /* all writes of RBDR data to be loaded into L2 Cache as well*/
773 rq->caching = 1; 774 rq->caching = 1;
774 775
776 /* Driver have no proper error path for failed XDP RX-queue info reg */
777 WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx) < 0);
778
775 /* Send a mailbox msg to PF to config RQ */ 779 /* Send a mailbox msg to PF to config RQ */
776 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; 780 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
777 mbx.rq.qs_num = qs->vnic_id; 781 mbx.rq.qs_num = qs->vnic_id;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 178ab6e8e3c5..7d1e4e2aaad0 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -12,6 +12,7 @@
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/iommu.h> 13#include <linux/iommu.h>
14#include <linux/bpf.h> 14#include <linux/bpf.h>
15#include <net/xdp.h>
15#include "q_struct.h" 16#include "q_struct.h"
16 17
17#define MAX_QUEUE_SET 128 18#define MAX_QUEUE_SET 128
@@ -255,6 +256,7 @@ struct rcv_queue {
255 u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */ 256 u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */
256 u8 caching; 257 u8 caching;
257 struct rx_tx_queue_stats stats; 258 struct rx_tx_queue_stats stats;
259 struct xdp_rxq_info xdp_rxq;
258} ____cacheline_aligned_in_smp; 260} ____cacheline_aligned_in_smp;
259 261
260struct cmp_queue { 262struct cmp_queue {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 5f6cf7212d4f..cfd788b4fd7a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1585,6 +1585,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
1585 */ 1585 */
1586 rx_rings[i].desc = NULL; 1586 rx_rings[i].desc = NULL;
1587 rx_rings[i].rx_bi = NULL; 1587 rx_rings[i].rx_bi = NULL;
1588 /* Clear cloned XDP RX-queue info before setup call */
1589 memset(&rx_rings[i].xdp_rxq, 0, sizeof(rx_rings[i].xdp_rxq));
1588 /* this is to allow wr32 to have something to write to 1590 /* this is to allow wr32 to have something to write to
1589 * during early allocation of Rx buffers 1591 * during early allocation of Rx buffers
1590 */ 1592 */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 4566d66ffc7c..2a8a85e3ae8f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -27,6 +27,7 @@
27#include <linux/prefetch.h> 27#include <linux/prefetch.h>
28#include <net/busy_poll.h> 28#include <net/busy_poll.h>
29#include <linux/bpf_trace.h> 29#include <linux/bpf_trace.h>
30#include <net/xdp.h>
30#include "i40e.h" 31#include "i40e.h"
31#include "i40e_trace.h" 32#include "i40e_trace.h"
32#include "i40e_prototype.h" 33#include "i40e_prototype.h"
@@ -1236,6 +1237,8 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1236void i40e_free_rx_resources(struct i40e_ring *rx_ring) 1237void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1237{ 1238{
1238 i40e_clean_rx_ring(rx_ring); 1239 i40e_clean_rx_ring(rx_ring);
1240 if (rx_ring->vsi->type == I40E_VSI_MAIN)
1241 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
1239 rx_ring->xdp_prog = NULL; 1242 rx_ring->xdp_prog = NULL;
1240 kfree(rx_ring->rx_bi); 1243 kfree(rx_ring->rx_bi);
1241 rx_ring->rx_bi = NULL; 1244 rx_ring->rx_bi = NULL;
@@ -1256,6 +1259,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1256int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) 1259int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1257{ 1260{
1258 struct device *dev = rx_ring->dev; 1261 struct device *dev = rx_ring->dev;
1262 int err = -ENOMEM;
1259 int bi_size; 1263 int bi_size;
1260 1264
1261 /* warn if we are about to overwrite the pointer */ 1265 /* warn if we are about to overwrite the pointer */
@@ -1283,13 +1287,21 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1283 rx_ring->next_to_clean = 0; 1287 rx_ring->next_to_clean = 0;
1284 rx_ring->next_to_use = 0; 1288 rx_ring->next_to_use = 0;
1285 1289
1290 /* XDP RX-queue info only needed for RX rings exposed to XDP */
1291 if (rx_ring->vsi->type == I40E_VSI_MAIN) {
1292 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
1293 rx_ring->queue_index);
1294 if (err < 0)
1295 goto err;
1296 }
1297
1286 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; 1298 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1287 1299
1288 return 0; 1300 return 0;
1289err: 1301err:
1290 kfree(rx_ring->rx_bi); 1302 kfree(rx_ring->rx_bi);
1291 rx_ring->rx_bi = NULL; 1303 rx_ring->rx_bi = NULL;
1292 return -ENOMEM; 1304 return err;
1293} 1305}
1294 1306
1295/** 1307/**
@@ -2068,11 +2080,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2068 struct sk_buff *skb = rx_ring->skb; 2080 struct sk_buff *skb = rx_ring->skb;
2069 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); 2081 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2070 bool failure = false, xdp_xmit = false; 2082 bool failure = false, xdp_xmit = false;
2083 struct xdp_buff xdp;
2084
2085 xdp.rxq = &rx_ring->xdp_rxq;
2071 2086
2072 while (likely(total_rx_packets < (unsigned int)budget)) { 2087 while (likely(total_rx_packets < (unsigned int)budget)) {
2073 struct i40e_rx_buffer *rx_buffer; 2088 struct i40e_rx_buffer *rx_buffer;
2074 union i40e_rx_desc *rx_desc; 2089 union i40e_rx_desc *rx_desc;
2075 struct xdp_buff xdp;
2076 unsigned int size; 2090 unsigned int size;
2077 u16 vlan_tag; 2091 u16 vlan_tag;
2078 u8 rx_ptype; 2092 u8 rx_ptype;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index fbae1182e2ea..2d08760fc4ce 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -27,6 +27,8 @@
27#ifndef _I40E_TXRX_H_ 27#ifndef _I40E_TXRX_H_
28#define _I40E_TXRX_H_ 28#define _I40E_TXRX_H_
29 29
30#include <net/xdp.h>
31
30/* Interrupt Throttling and Rate Limiting Goodies */ 32/* Interrupt Throttling and Rate Limiting Goodies */
31 33
32#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ 34#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
@@ -428,6 +430,7 @@ struct i40e_ring {
428 */ 430 */
429 431
430 struct i40e_channel *ch; 432 struct i40e_channel *ch;
433 struct xdp_rxq_info xdp_rxq;
431} ____cacheline_internodealigned_in_smp; 434} ____cacheline_internodealigned_in_smp;
432 435
433static inline bool ring_uses_build_skb(struct i40e_ring *ring) 436static inline bool ring_uses_build_skb(struct i40e_ring *ring)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 468c3555a629..8611763d6129 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -53,6 +53,7 @@
53#include <linux/dca.h> 53#include <linux/dca.h>
54#endif 54#endif
55 55
56#include <net/xdp.h>
56#include <net/busy_poll.h> 57#include <net/busy_poll.h>
57 58
58/* common prefix used by pr_<> macros */ 59/* common prefix used by pr_<> macros */
@@ -371,6 +372,7 @@ struct ixgbe_ring {
371 struct ixgbe_tx_queue_stats tx_stats; 372 struct ixgbe_tx_queue_stats tx_stats;
372 struct ixgbe_rx_queue_stats rx_stats; 373 struct ixgbe_rx_queue_stats rx_stats;
373 }; 374 };
375 struct xdp_rxq_info xdp_rxq;
374} ____cacheline_internodealigned_in_smp; 376} ____cacheline_internodealigned_in_smp;
375 377
376enum ixgbe_ring_f_enum { 378enum ixgbe_ring_f_enum {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 0aad1c2a3667..0aaf70b3cfcd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1156,6 +1156,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
1156 memcpy(&temp_ring[i], adapter->rx_ring[i], 1156 memcpy(&temp_ring[i], adapter->rx_ring[i],
1157 sizeof(struct ixgbe_ring)); 1157 sizeof(struct ixgbe_ring));
1158 1158
1159 /* Clear copied XDP RX-queue info */
1160 memset(&temp_ring[i].xdp_rxq, 0,
1161 sizeof(temp_ring[i].xdp_rxq));
1162
1159 temp_ring[i].count = new_rx_count; 1163 temp_ring[i].count = new_rx_count;
1160 err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]); 1164 err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
1161 if (err) { 1165 if (err) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 7737a05c717c..95aba975b391 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2318,12 +2318,14 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2318#endif /* IXGBE_FCOE */ 2318#endif /* IXGBE_FCOE */
2319 u16 cleaned_count = ixgbe_desc_unused(rx_ring); 2319 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2320 bool xdp_xmit = false; 2320 bool xdp_xmit = false;
2321 struct xdp_buff xdp;
2322
2323 xdp.rxq = &rx_ring->xdp_rxq;
2321 2324
2322 while (likely(total_rx_packets < budget)) { 2325 while (likely(total_rx_packets < budget)) {
2323 union ixgbe_adv_rx_desc *rx_desc; 2326 union ixgbe_adv_rx_desc *rx_desc;
2324 struct ixgbe_rx_buffer *rx_buffer; 2327 struct ixgbe_rx_buffer *rx_buffer;
2325 struct sk_buff *skb; 2328 struct sk_buff *skb;
2326 struct xdp_buff xdp;
2327 unsigned int size; 2329 unsigned int size;
2328 2330
2329 /* return some buffers to hardware, one at a time is too slow */ 2331 /* return some buffers to hardware, one at a time is too slow */
@@ -6444,6 +6446,11 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
6444 rx_ring->next_to_clean = 0; 6446 rx_ring->next_to_clean = 0;
6445 rx_ring->next_to_use = 0; 6447 rx_ring->next_to_use = 0;
6446 6448
6449 /* XDP RX-queue info */
6450 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
6451 rx_ring->queue_index) < 0)
6452 goto err;
6453
6447 rx_ring->xdp_prog = adapter->xdp_prog; 6454 rx_ring->xdp_prog = adapter->xdp_prog;
6448 6455
6449 return 0; 6456 return 0;
@@ -6541,6 +6548,7 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
6541 ixgbe_clean_rx_ring(rx_ring); 6548 ixgbe_clean_rx_ring(rx_ring);
6542 6549
6543 rx_ring->xdp_prog = NULL; 6550 rx_ring->xdp_prog = NULL;
6551 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
6544 vfree(rx_ring->rx_buffer_info); 6552 vfree(rx_ring->rx_buffer_info);
6545 rx_ring->rx_buffer_info = NULL; 6553 rx_ring->rx_buffer_info = NULL;
6546 6554
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 21bc17fa3854..8fc51bc29003 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2172,8 +2172,9 @@ static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
2172 2172
2173 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 2173 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
2174 prof->rx_ring_size, priv->stride, 2174 prof->rx_ring_size, priv->stride,
2175 node)) 2175 node, i))
2176 goto err; 2176 goto err;
2177
2177 } 2178 }
2178 2179
2179#ifdef CONFIG_RFS_ACCEL 2180#ifdef CONFIG_RFS_ACCEL
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 5f9dbc9a7f5b..b4d144e67514 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -262,7 +262,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
262 262
263int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 263int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
264 struct mlx4_en_rx_ring **pring, 264 struct mlx4_en_rx_ring **pring,
265 u32 size, u16 stride, int node) 265 u32 size, u16 stride, int node, int queue_index)
266{ 266{
267 struct mlx4_en_dev *mdev = priv->mdev; 267 struct mlx4_en_dev *mdev = priv->mdev;
268 struct mlx4_en_rx_ring *ring; 268 struct mlx4_en_rx_ring *ring;
@@ -286,6 +286,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
286 ring->log_stride = ffs(ring->stride) - 1; 286 ring->log_stride = ffs(ring->stride) - 1;
287 ring->buf_size = ring->size * ring->stride + TXBB_SIZE; 287 ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
288 288
289 if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index) < 0)
290 goto err_ring;
291
289 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * 292 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
290 sizeof(struct mlx4_en_rx_alloc)); 293 sizeof(struct mlx4_en_rx_alloc));
291 ring->rx_info = vzalloc_node(tmp, node); 294 ring->rx_info = vzalloc_node(tmp, node);
@@ -293,7 +296,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
293 ring->rx_info = vzalloc(tmp); 296 ring->rx_info = vzalloc(tmp);
294 if (!ring->rx_info) { 297 if (!ring->rx_info) {
295 err = -ENOMEM; 298 err = -ENOMEM;
296 goto err_ring; 299 goto err_xdp_info;
297 } 300 }
298 } 301 }
299 302
@@ -317,6 +320,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
317err_info: 320err_info:
318 vfree(ring->rx_info); 321 vfree(ring->rx_info);
319 ring->rx_info = NULL; 322 ring->rx_info = NULL;
323err_xdp_info:
324 xdp_rxq_info_unreg(&ring->xdp_rxq);
320err_ring: 325err_ring:
321 kfree(ring); 326 kfree(ring);
322 *pring = NULL; 327 *pring = NULL;
@@ -440,6 +445,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
440 lockdep_is_held(&mdev->state_lock)); 445 lockdep_is_held(&mdev->state_lock));
441 if (old_prog) 446 if (old_prog)
442 bpf_prog_put(old_prog); 447 bpf_prog_put(old_prog);
448 xdp_rxq_info_unreg(&ring->xdp_rxq);
443 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); 449 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
444 vfree(ring->rx_info); 450 vfree(ring->rx_info);
445 ring->rx_info = NULL; 451 ring->rx_info = NULL;
@@ -652,6 +658,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
652 int cq_ring = cq->ring; 658 int cq_ring = cq->ring;
653 bool doorbell_pending; 659 bool doorbell_pending;
654 struct mlx4_cqe *cqe; 660 struct mlx4_cqe *cqe;
661 struct xdp_buff xdp;
655 int polled = 0; 662 int polled = 0;
656 int index; 663 int index;
657 664
@@ -666,6 +673,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
666 /* Protect accesses to: ring->xdp_prog, priv->mac_hash list */ 673 /* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
667 rcu_read_lock(); 674 rcu_read_lock();
668 xdp_prog = rcu_dereference(ring->xdp_prog); 675 xdp_prog = rcu_dereference(ring->xdp_prog);
676 xdp.rxq = &ring->xdp_rxq;
669 doorbell_pending = 0; 677 doorbell_pending = 0;
670 678
671 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx 679 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
@@ -750,7 +758,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
750 * read bytes but not past the end of the frag. 758 * read bytes but not past the end of the frag.
751 */ 759 */
752 if (xdp_prog) { 760 if (xdp_prog) {
753 struct xdp_buff xdp;
754 dma_addr_t dma; 761 dma_addr_t dma;
755 void *orig_data; 762 void *orig_data;
756 u32 act; 763 u32 act;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 7db3d0d9bfce..f470ae37d937 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -46,6 +46,7 @@
46#endif 46#endif
47#include <linux/cpu_rmap.h> 47#include <linux/cpu_rmap.h>
48#include <linux/ptp_clock_kernel.h> 48#include <linux/ptp_clock_kernel.h>
49#include <net/xdp.h>
49 50
50#include <linux/mlx4/device.h> 51#include <linux/mlx4/device.h>
51#include <linux/mlx4/qp.h> 52#include <linux/mlx4/qp.h>
@@ -356,6 +357,7 @@ struct mlx4_en_rx_ring {
356 unsigned long dropped; 357 unsigned long dropped;
357 int hwtstamp_rx_filter; 358 int hwtstamp_rx_filter;
358 cpumask_var_t affinity_mask; 359 cpumask_var_t affinity_mask;
360 struct xdp_rxq_info xdp_rxq;
359}; 361};
360 362
361struct mlx4_en_cq { 363struct mlx4_en_cq {
@@ -720,7 +722,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
720void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv); 722void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
721int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 723int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
722 struct mlx4_en_rx_ring **pring, 724 struct mlx4_en_rx_ring **pring,
723 u32 size, u16 stride, int node); 725 u32 size, u16 stride, int node, int queue_index);
724void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, 726void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
725 struct mlx4_en_rx_ring **pring, 727 struct mlx4_en_rx_ring **pring,
726 u32 size, u16 stride); 728 u32 size, u16 stride);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 543060c305a0..5299310f2481 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -46,6 +46,7 @@
46#include <linux/mlx5/transobj.h> 46#include <linux/mlx5/transobj.h>
47#include <linux/rhashtable.h> 47#include <linux/rhashtable.h>
48#include <net/switchdev.h> 48#include <net/switchdev.h>
49#include <net/xdp.h>
49#include "wq.h" 50#include "wq.h"
50#include "mlx5_core.h" 51#include "mlx5_core.h"
51#include "en_stats.h" 52#include "en_stats.h"
@@ -571,6 +572,9 @@ struct mlx5e_rq {
571 u32 rqn; 572 u32 rqn;
572 struct mlx5_core_dev *mdev; 573 struct mlx5_core_dev *mdev;
573 struct mlx5_core_mkey umr_mkey; 574 struct mlx5_core_mkey umr_mkey;
575
576 /* XDP read-mostly */
577 struct xdp_rxq_info xdp_rxq;
574} ____cacheline_aligned_in_smp; 578} ____cacheline_aligned_in_smp;
575 579
576struct mlx5e_channel { 580struct mlx5e_channel {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 3aa1c90e7c86..539bd1d24396 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -582,6 +582,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
582 goto err_rq_wq_destroy; 582 goto err_rq_wq_destroy;
583 } 583 }
584 584
585 if (xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix) < 0)
586 goto err_rq_wq_destroy;
587
585 rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; 588 rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
586 rq->buff.headroom = params->rq_headroom; 589 rq->buff.headroom = params->rq_headroom;
587 590
@@ -687,6 +690,7 @@ err_destroy_umr_mkey:
687err_rq_wq_destroy: 690err_rq_wq_destroy:
688 if (rq->xdp_prog) 691 if (rq->xdp_prog)
689 bpf_prog_put(rq->xdp_prog); 692 bpf_prog_put(rq->xdp_prog);
693 xdp_rxq_info_unreg(&rq->xdp_rxq);
690 mlx5_wq_destroy(&rq->wq_ctrl); 694 mlx5_wq_destroy(&rq->wq_ctrl);
691 695
692 return err; 696 return err;
@@ -699,6 +703,8 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
699 if (rq->xdp_prog) 703 if (rq->xdp_prog)
700 bpf_prog_put(rq->xdp_prog); 704 bpf_prog_put(rq->xdp_prog);
701 705
706 xdp_rxq_info_unreg(&rq->xdp_rxq);
707
702 switch (rq->wq_type) { 708 switch (rq->wq_type) {
703 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 709 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
704 mlx5e_rq_free_mpwqe_info(rq); 710 mlx5e_rq_free_mpwqe_info(rq);
@@ -2766,6 +2772,9 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
2766 if (err) 2772 if (err)
2767 return err; 2773 return err;
2768 2774
2775 /* Mark as unused given "Drop-RQ" packets never reach XDP */
2776 xdp_rxq_info_unused(&rq->xdp_rxq);
2777
2769 rq->mdev = mdev; 2778 rq->mdev = mdev;
2770 2779
2771 return 0; 2780 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 5b499c7a698f..7b38480811d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -812,6 +812,7 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
812 xdp_set_data_meta_invalid(&xdp); 812 xdp_set_data_meta_invalid(&xdp);
813 xdp.data_end = xdp.data + *len; 813 xdp.data_end = xdp.data + *len;
814 xdp.data_hard_start = va; 814 xdp.data_hard_start = va;
815 xdp.rxq = &rq->xdp_rxq;
815 816
816 act = bpf_prog_run_xdp(prog, &xdp); 817 act = bpf_prog_run_xdp(prog, &xdp);
817 switch (act) { 818 switch (act) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 3801c52098d5..0e564cfabe7e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -47,6 +47,7 @@
47#include <linux/netdevice.h> 47#include <linux/netdevice.h>
48#include <linux/pci.h> 48#include <linux/pci.h>
49#include <linux/io-64-nonatomic-hi-lo.h> 49#include <linux/io-64-nonatomic-hi-lo.h>
50#include <net/xdp.h>
50 51
51#include "nfp_net_ctrl.h" 52#include "nfp_net_ctrl.h"
52 53
@@ -350,6 +351,7 @@ struct nfp_net_rx_buf {
350 * @rxds: Virtual address of FL/RX ring in host memory 351 * @rxds: Virtual address of FL/RX ring in host memory
351 * @dma: DMA address of the FL/RX ring 352 * @dma: DMA address of the FL/RX ring
352 * @size: Size, in bytes, of the FL/RX ring (needed to free) 353 * @size: Size, in bytes, of the FL/RX ring (needed to free)
354 * @xdp_rxq: RX-ring info avail for XDP
353 */ 355 */
354struct nfp_net_rx_ring { 356struct nfp_net_rx_ring {
355 struct nfp_net_r_vector *r_vec; 357 struct nfp_net_r_vector *r_vec;
@@ -361,13 +363,14 @@ struct nfp_net_rx_ring {
361 u32 idx; 363 u32 idx;
362 364
363 int fl_qcidx; 365 int fl_qcidx;
366 unsigned int size;
364 u8 __iomem *qcp_fl; 367 u8 __iomem *qcp_fl;
365 368
366 struct nfp_net_rx_buf *rxbufs; 369 struct nfp_net_rx_buf *rxbufs;
367 struct nfp_net_rx_desc *rxds; 370 struct nfp_net_rx_desc *rxds;
368 371
369 dma_addr_t dma; 372 dma_addr_t dma;
370 unsigned int size; 373 struct xdp_rxq_info xdp_rxq;
371} ____cacheline_aligned; 374} ____cacheline_aligned;
372 375
373/** 376/**
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 0add4870ce2e..45b8cae937be 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1608,11 +1608,13 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1608 unsigned int true_bufsz; 1608 unsigned int true_bufsz;
1609 struct sk_buff *skb; 1609 struct sk_buff *skb;
1610 int pkts_polled = 0; 1610 int pkts_polled = 0;
1611 struct xdp_buff xdp;
1611 int idx; 1612 int idx;
1612 1613
1613 rcu_read_lock(); 1614 rcu_read_lock();
1614 xdp_prog = READ_ONCE(dp->xdp_prog); 1615 xdp_prog = READ_ONCE(dp->xdp_prog);
1615 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz; 1616 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
1617 xdp.rxq = &rx_ring->xdp_rxq;
1616 tx_ring = r_vec->xdp_ring; 1618 tx_ring = r_vec->xdp_ring;
1617 1619
1618 while (pkts_polled < budget) { 1620 while (pkts_polled < budget) {
@@ -1703,7 +1705,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1703 dp->bpf_offload_xdp) && !meta.portid) { 1705 dp->bpf_offload_xdp) && !meta.portid) {
1704 void *orig_data = rxbuf->frag + pkt_off; 1706 void *orig_data = rxbuf->frag + pkt_off;
1705 unsigned int dma_off; 1707 unsigned int dma_off;
1706 struct xdp_buff xdp;
1707 int act; 1708 int act;
1708 1709
1709 xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM; 1710 xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
@@ -2252,6 +2253,7 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
2252 struct nfp_net_r_vector *r_vec = rx_ring->r_vec; 2253 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
2253 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; 2254 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
2254 2255
2256 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
2255 kfree(rx_ring->rxbufs); 2257 kfree(rx_ring->rxbufs);
2256 2258
2257 if (rx_ring->rxds) 2259 if (rx_ring->rxds)
@@ -2275,7 +2277,11 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
2275static int 2277static int
2276nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) 2278nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
2277{ 2279{
2278 int sz; 2280 int sz, err;
2281
2282 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev, rx_ring->idx);
2283 if (err < 0)
2284 return err;
2279 2285
2280 rx_ring->cnt = dp->rxd_cnt; 2286 rx_ring->cnt = dp->rxd_cnt;
2281 rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt; 2287 rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 8a336517baac..8116cfd30fad 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -40,6 +40,7 @@
40#include <linux/kernel.h> 40#include <linux/kernel.h>
41#include <linux/mutex.h> 41#include <linux/mutex.h>
42#include <linux/bpf.h> 42#include <linux/bpf.h>
43#include <net/xdp.h>
43#include <linux/qed/qede_rdma.h> 44#include <linux/qed/qede_rdma.h>
44#include <linux/io.h> 45#include <linux/io.h>
45#ifdef CONFIG_RFS_ACCEL 46#ifdef CONFIG_RFS_ACCEL
@@ -345,6 +346,7 @@ struct qede_rx_queue {
345 u64 xdp_no_pass; 346 u64 xdp_no_pass;
346 347
347 void *handle; 348 void *handle;
349 struct xdp_rxq_info xdp_rxq;
348}; 350};
349 351
350union db_prod { 352union db_prod {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 48ec4c56cddf..dafc079ab6b9 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1006,6 +1006,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
1006 xdp.data = xdp.data_hard_start + *data_offset; 1006 xdp.data = xdp.data_hard_start + *data_offset;
1007 xdp_set_data_meta_invalid(&xdp); 1007 xdp_set_data_meta_invalid(&xdp);
1008 xdp.data_end = xdp.data + *len; 1008 xdp.data_end = xdp.data + *len;
1009 xdp.rxq = &rxq->xdp_rxq;
1009 1010
1010 /* Queues always have a full reset currently, so for the time 1011 /* Queues always have a full reset currently, so for the time
1011 * being until there's atomic program replace just mark read 1012 * being until there's atomic program replace just mark read
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 90d79ae2a48f..9929b4370ce6 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -765,6 +765,12 @@ static void qede_free_fp_array(struct qede_dev *edev)
765 fp = &edev->fp_array[i]; 765 fp = &edev->fp_array[i];
766 766
767 kfree(fp->sb_info); 767 kfree(fp->sb_info);
768 /* Handle mem alloc failure case where qede_init_fp
769 * didn't register xdp_rxq_info yet.
770 * Implicit only (fp->type & QEDE_FASTPATH_RX)
771 */
772 if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
773 xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
768 kfree(fp->rxq); 774 kfree(fp->rxq);
769 kfree(fp->xdp_tx); 775 kfree(fp->xdp_tx);
770 kfree(fp->txq); 776 kfree(fp->txq);
@@ -1493,6 +1499,10 @@ static void qede_init_fp(struct qede_dev *edev)
1493 else 1499 else
1494 fp->rxq->data_direction = DMA_FROM_DEVICE; 1500 fp->rxq->data_direction = DMA_FROM_DEVICE;
1495 fp->rxq->dev = &edev->pdev->dev; 1501 fp->rxq->dev = &edev->pdev->dev;
1502
1503 /* Driver have no error path from here */
1504 WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
1505 fp->rxq->rxq_id) < 0);
1496 } 1506 }
1497 1507
1498 if (fp->type & QEDE_FASTPATH_TX) { 1508 if (fp->type & QEDE_FASTPATH_TX) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e367d6310353..e7c5f4b2a9a6 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -180,6 +180,7 @@ struct tun_file {
180 struct list_head next; 180 struct list_head next;
181 struct tun_struct *detached; 181 struct tun_struct *detached;
182 struct skb_array tx_array; 182 struct skb_array tx_array;
183 struct xdp_rxq_info xdp_rxq;
183}; 184};
184 185
185struct tun_flow_entry { 186struct tun_flow_entry {
@@ -687,8 +688,10 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
687 tun->dev->reg_state == NETREG_REGISTERED) 688 tun->dev->reg_state == NETREG_REGISTERED)
688 unregister_netdevice(tun->dev); 689 unregister_netdevice(tun->dev);
689 } 690 }
690 if (tun) 691 if (tun) {
691 skb_array_cleanup(&tfile->tx_array); 692 skb_array_cleanup(&tfile->tx_array);
693 xdp_rxq_info_unreg(&tfile->xdp_rxq);
694 }
692 sock_put(&tfile->sk); 695 sock_put(&tfile->sk);
693 } 696 }
694} 697}
@@ -728,11 +731,13 @@ static void tun_detach_all(struct net_device *dev)
728 tun_napi_del(tun, tfile); 731 tun_napi_del(tun, tfile);
729 /* Drop read queue */ 732 /* Drop read queue */
730 tun_queue_purge(tfile); 733 tun_queue_purge(tfile);
734 xdp_rxq_info_unreg(&tfile->xdp_rxq);
731 sock_put(&tfile->sk); 735 sock_put(&tfile->sk);
732 } 736 }
733 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 737 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
734 tun_enable_queue(tfile); 738 tun_enable_queue(tfile);
735 tun_queue_purge(tfile); 739 tun_queue_purge(tfile);
740 xdp_rxq_info_unreg(&tfile->xdp_rxq);
736 sock_put(&tfile->sk); 741 sock_put(&tfile->sk);
737 } 742 }
738 BUG_ON(tun->numdisabled != 0); 743 BUG_ON(tun->numdisabled != 0);
@@ -784,6 +789,22 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
784 789
785 tfile->queue_index = tun->numqueues; 790 tfile->queue_index = tun->numqueues;
786 tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; 791 tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
792
793 if (tfile->detached) {
794 /* Re-attach detached tfile, updating XDP queue_index */
795 WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
796
797 if (tfile->xdp_rxq.queue_index != tfile->queue_index)
798 tfile->xdp_rxq.queue_index = tfile->queue_index;
799 } else {
800 /* Setup XDP RX-queue info, for new tfile getting attached */
801 err = xdp_rxq_info_reg(&tfile->xdp_rxq,
802 tun->dev, tfile->queue_index);
803 if (err < 0)
804 goto out;
805 err = 0;
806 }
807
787 rcu_assign_pointer(tfile->tun, tun); 808 rcu_assign_pointer(tfile->tun, tun);
788 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 809 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
789 tun->numqueues++; 810 tun->numqueues++;
@@ -1508,6 +1529,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1508 xdp.data = buf + pad; 1529 xdp.data = buf + pad;
1509 xdp_set_data_meta_invalid(&xdp); 1530 xdp_set_data_meta_invalid(&xdp);
1510 xdp.data_end = xdp.data + len; 1531 xdp.data_end = xdp.data + len;
1532 xdp.rxq = &tfile->xdp_rxq;
1511 orig_data = xdp.data; 1533 orig_data = xdp.data;
1512 act = bpf_prog_run_xdp(xdp_prog, &xdp); 1534 act = bpf_prog_run_xdp(xdp_prog, &xdp);
1513 1535
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6fb7b658a6cc..ed8299343728 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -31,6 +31,7 @@
31#include <linux/average.h> 31#include <linux/average.h>
32#include <linux/filter.h> 32#include <linux/filter.h>
33#include <net/route.h> 33#include <net/route.h>
34#include <net/xdp.h>
34 35
35static int napi_weight = NAPI_POLL_WEIGHT; 36static int napi_weight = NAPI_POLL_WEIGHT;
36module_param(napi_weight, int, 0444); 37module_param(napi_weight, int, 0444);
@@ -115,6 +116,8 @@ struct receive_queue {
115 116
116 /* Name of this receive queue: input.$index */ 117 /* Name of this receive queue: input.$index */
117 char name[40]; 118 char name[40];
119
120 struct xdp_rxq_info xdp_rxq;
118}; 121};
119 122
120struct virtnet_info { 123struct virtnet_info {
@@ -559,6 +562,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
559 xdp.data = xdp.data_hard_start + xdp_headroom; 562 xdp.data = xdp.data_hard_start + xdp_headroom;
560 xdp_set_data_meta_invalid(&xdp); 563 xdp_set_data_meta_invalid(&xdp);
561 xdp.data_end = xdp.data + len; 564 xdp.data_end = xdp.data + len;
565 xdp.rxq = &rq->xdp_rxq;
562 orig_data = xdp.data; 566 orig_data = xdp.data;
563 act = bpf_prog_run_xdp(xdp_prog, &xdp); 567 act = bpf_prog_run_xdp(xdp_prog, &xdp);
564 568
@@ -692,6 +696,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
692 xdp.data = data + vi->hdr_len; 696 xdp.data = data + vi->hdr_len;
693 xdp_set_data_meta_invalid(&xdp); 697 xdp_set_data_meta_invalid(&xdp);
694 xdp.data_end = xdp.data + (len - vi->hdr_len); 698 xdp.data_end = xdp.data + (len - vi->hdr_len);
699 xdp.rxq = &rq->xdp_rxq;
700
695 act = bpf_prog_run_xdp(xdp_prog, &xdp); 701 act = bpf_prog_run_xdp(xdp_prog, &xdp);
696 702
697 if (act != XDP_PASS) 703 if (act != XDP_PASS)
@@ -1225,13 +1231,18 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
1225static int virtnet_open(struct net_device *dev) 1231static int virtnet_open(struct net_device *dev)
1226{ 1232{
1227 struct virtnet_info *vi = netdev_priv(dev); 1233 struct virtnet_info *vi = netdev_priv(dev);
1228 int i; 1234 int i, err;
1229 1235
1230 for (i = 0; i < vi->max_queue_pairs; i++) { 1236 for (i = 0; i < vi->max_queue_pairs; i++) {
1231 if (i < vi->curr_queue_pairs) 1237 if (i < vi->curr_queue_pairs)
1232 /* Make sure we have some buffers: if oom use wq. */ 1238 /* Make sure we have some buffers: if oom use wq. */
1233 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) 1239 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
1234 schedule_delayed_work(&vi->refill, 0); 1240 schedule_delayed_work(&vi->refill, 0);
1241
1242 err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i);
1243 if (err < 0)
1244 return err;
1245
1235 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 1246 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
1236 virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); 1247 virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
1237 } 1248 }
@@ -1560,6 +1571,7 @@ static int virtnet_close(struct net_device *dev)
1560 cancel_delayed_work_sync(&vi->refill); 1571 cancel_delayed_work_sync(&vi->refill);
1561 1572
1562 for (i = 0; i < vi->max_queue_pairs; i++) { 1573 for (i = 0; i < vi->max_queue_pairs; i++) {
1574 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
1563 napi_disable(&vi->rq[i].napi); 1575 napi_disable(&vi->rq[i].napi);
1564 virtnet_napi_tx_disable(&vi->sq[i].napi); 1576 virtnet_napi_tx_disable(&vi->sq[i].napi);
1565 } 1577 }
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 2b0df2703671..425056c7f96c 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -20,6 +20,7 @@
20#include <linux/set_memory.h> 20#include <linux/set_memory.h>
21#include <linux/kallsyms.h> 21#include <linux/kallsyms.h>
22 22
23#include <net/xdp.h>
23#include <net/sch_generic.h> 24#include <net/sch_generic.h>
24 25
25#include <uapi/linux/filter.h> 26#include <uapi/linux/filter.h>
@@ -503,6 +504,7 @@ struct xdp_buff {
503 void *data_end; 504 void *data_end;
504 void *data_meta; 505 void *data_meta;
505 void *data_hard_start; 506 void *data_hard_start;
507 struct xdp_rxq_info *rxq;
506}; 508};
507 509
508/* Compute the linear packet data range [data, data_end) which 510/* Compute the linear packet data range [data, data_end) which
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 49bfc6eec74c..440b000f07f4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -44,6 +44,7 @@
44#include <net/dcbnl.h> 44#include <net/dcbnl.h>
45#endif 45#endif
46#include <net/netprio_cgroup.h> 46#include <net/netprio_cgroup.h>
47#include <net/xdp.h>
47 48
48#include <linux/netdev_features.h> 49#include <linux/netdev_features.h>
49#include <linux/neighbour.h> 50#include <linux/neighbour.h>
@@ -686,6 +687,7 @@ struct netdev_rx_queue {
686#endif 687#endif
687 struct kobject kobj; 688 struct kobject kobj;
688 struct net_device *dev; 689 struct net_device *dev;
690 struct xdp_rxq_info xdp_rxq;
689} ____cacheline_aligned_in_smp; 691} ____cacheline_aligned_in_smp;
690 692
691/* 693/*
diff --git a/include/net/xdp.h b/include/net/xdp.h
new file mode 100644
index 000000000000..b2362ddfa694
--- /dev/null
+++ b/include/net/xdp.h
@@ -0,0 +1,48 @@
1/* include/net/xdp.h
2 *
3 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
4 * Released under terms in GPL version 2. See COPYING.
5 */
6#ifndef __LINUX_NET_XDP_H__
7#define __LINUX_NET_XDP_H__
8
9/**
10 * DOC: XDP RX-queue information
11 *
12 * The XDP RX-queue info (xdp_rxq_info) is associated with the driver
13 * level RX-ring queues. It is information that is specific to how
14 * the driver have configured a given RX-ring queue.
15 *
16 * Each xdp_buff frame received in the driver carry a (pointer)
17 * reference to this xdp_rxq_info structure. This provides the XDP
18 * data-path read-access to RX-info for both kernel and bpf-side
19 * (limited subset).
20 *
21 * For now, direct access is only safe while running in NAPI/softirq
22 * context. Contents is read-mostly and must not be updated during
23 * driver NAPI/softirq poll.
24 *
25 * The driver usage API is a register and unregister API.
26 *
27 * The struct is not directly tied to the XDP prog. A new XDP prog
28 * can be attached as long as it doesn't change the underlying
29 * RX-ring. If the RX-ring does change significantly, the NIC driver
30 * naturally need to stop the RX-ring before purging and reallocating
31 * memory. In that process the driver MUST call unregistor (which
32 * also apply for driver shutdown and unload). The register API is
33 * also mandatory during RX-ring setup.
34 */
35
36struct xdp_rxq_info {
37 struct net_device *dev;
38 u32 queue_index;
39 u32 reg_state;
40} ____cacheline_aligned; /* perf critical, avoid false-sharing */
41
42int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
43 struct net_device *dev, u32 queue_index);
44void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
45void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
46bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
47
48#endif /* __LINUX_NET_XDP_H__ */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index f2f8b36e2ad4..405317f9c064 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -899,6 +899,9 @@ struct xdp_md {
899 __u32 data; 899 __u32 data;
900 __u32 data_end; 900 __u32 data_end;
901 __u32 data_meta; 901 __u32 data_meta;
902 /* Below access go though struct xdp_rxq_info */
903 __u32 ingress_ifindex; /* rxq->dev->ifindex */
904 __u32 rx_queue_index; /* rxq->queue_index */
902}; 905};
903 906
904enum sk_action { 907enum sk_action {
diff --git a/net/core/Makefile b/net/core/Makefile
index 1fd0a9c88b1b..6dbbba8c57ae 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
11obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \ 11obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
12 neighbour.o rtnetlink.o utils.o link_watch.o filter.o \ 12 neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
13 sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \ 13 sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \
14 fib_notifier.o 14 fib_notifier.o xdp.o
15 15
16obj-y += net-sysfs.o 16obj-y += net-sysfs.o
17obj-$(CONFIG_PROC_FS) += net-procfs.o 17obj-$(CONFIG_PROC_FS) += net-procfs.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 2eb66c0d9cdb..d7925ef8743d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3906,9 +3906,33 @@ drop:
3906 return NET_RX_DROP; 3906 return NET_RX_DROP;
3907} 3907}
3908 3908
3909static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
3910{
3911 struct net_device *dev = skb->dev;
3912 struct netdev_rx_queue *rxqueue;
3913
3914 rxqueue = dev->_rx;
3915
3916 if (skb_rx_queue_recorded(skb)) {
3917 u16 index = skb_get_rx_queue(skb);
3918
3919 if (unlikely(index >= dev->real_num_rx_queues)) {
3920 WARN_ONCE(dev->real_num_rx_queues > 1,
3921 "%s received packet on queue %u, but number "
3922 "of RX queues is %u\n",
3923 dev->name, index, dev->real_num_rx_queues);
3924
3925 return rxqueue; /* Return first rxqueue */
3926 }
3927 rxqueue += index;
3928 }
3929 return rxqueue;
3930}
3931
3909static u32 netif_receive_generic_xdp(struct sk_buff *skb, 3932static u32 netif_receive_generic_xdp(struct sk_buff *skb,
3910 struct bpf_prog *xdp_prog) 3933 struct bpf_prog *xdp_prog)
3911{ 3934{
3935 struct netdev_rx_queue *rxqueue;
3912 u32 metalen, act = XDP_DROP; 3936 u32 metalen, act = XDP_DROP;
3913 struct xdp_buff xdp; 3937 struct xdp_buff xdp;
3914 void *orig_data; 3938 void *orig_data;
@@ -3952,6 +3976,9 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
3952 xdp.data_hard_start = skb->data - skb_headroom(skb); 3976 xdp.data_hard_start = skb->data - skb_headroom(skb);
3953 orig_data = xdp.data; 3977 orig_data = xdp.data;
3954 3978
3979 rxqueue = netif_get_rxqueue(skb);
3980 xdp.rxq = &rxqueue->xdp_rxq;
3981
3955 act = bpf_prog_run_xdp(xdp_prog, &xdp); 3982 act = bpf_prog_run_xdp(xdp_prog, &xdp);
3956 3983
3957 off = xdp.data - orig_data; 3984 off = xdp.data - orig_data;
@@ -7589,12 +7616,12 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev,
7589} 7616}
7590EXPORT_SYMBOL(netif_stacked_transfer_operstate); 7617EXPORT_SYMBOL(netif_stacked_transfer_operstate);
7591 7618
7592#ifdef CONFIG_SYSFS
7593static int netif_alloc_rx_queues(struct net_device *dev) 7619static int netif_alloc_rx_queues(struct net_device *dev)
7594{ 7620{
7595 unsigned int i, count = dev->num_rx_queues; 7621 unsigned int i, count = dev->num_rx_queues;
7596 struct netdev_rx_queue *rx; 7622 struct netdev_rx_queue *rx;
7597 size_t sz = count * sizeof(*rx); 7623 size_t sz = count * sizeof(*rx);
7624 int err = 0;
7598 7625
7599 BUG_ON(count < 1); 7626 BUG_ON(count < 1);
7600 7627
@@ -7604,11 +7631,39 @@ static int netif_alloc_rx_queues(struct net_device *dev)
7604 7631
7605 dev->_rx = rx; 7632 dev->_rx = rx;
7606 7633
7607 for (i = 0; i < count; i++) 7634 for (i = 0; i < count; i++) {
7608 rx[i].dev = dev; 7635 rx[i].dev = dev;
7636
7637 /* XDP RX-queue setup */
7638 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i);
7639 if (err < 0)
7640 goto err_rxq_info;
7641 }
7609 return 0; 7642 return 0;
7643
7644err_rxq_info:
7645 /* Rollback successful reg's and free other resources */
7646 while (i--)
7647 xdp_rxq_info_unreg(&rx[i].xdp_rxq);
7648 kfree(dev->_rx);
7649 dev->_rx = NULL;
7650 return err;
7651}
7652
7653static void netif_free_rx_queues(struct net_device *dev)
7654{
7655 unsigned int i, count = dev->num_rx_queues;
7656 struct netdev_rx_queue *rx;
7657
7658 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
7659 if (!dev->_rx)
7660 return;
7661
7662 rx = dev->_rx;
7663
7664 for (i = 0; i < count; i++)
7665 xdp_rxq_info_unreg(&rx[i].xdp_rxq);
7610} 7666}
7611#endif
7612 7667
7613static void netdev_init_one_queue(struct net_device *dev, 7668static void netdev_init_one_queue(struct net_device *dev,
7614 struct netdev_queue *queue, void *_unused) 7669 struct netdev_queue *queue, void *_unused)
@@ -8169,12 +8224,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
8169 return NULL; 8224 return NULL;
8170 } 8225 }
8171 8226
8172#ifdef CONFIG_SYSFS
8173 if (rxqs < 1) { 8227 if (rxqs < 1) {
8174 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); 8228 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
8175 return NULL; 8229 return NULL;
8176 } 8230 }
8177#endif
8178 8231
8179 alloc_size = sizeof(struct net_device); 8232 alloc_size = sizeof(struct net_device);
8180 if (sizeof_priv) { 8233 if (sizeof_priv) {
@@ -8231,12 +8284,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
8231 if (netif_alloc_netdev_queues(dev)) 8284 if (netif_alloc_netdev_queues(dev))
8232 goto free_all; 8285 goto free_all;
8233 8286
8234#ifdef CONFIG_SYSFS
8235 dev->num_rx_queues = rxqs; 8287 dev->num_rx_queues = rxqs;
8236 dev->real_num_rx_queues = rxqs; 8288 dev->real_num_rx_queues = rxqs;
8237 if (netif_alloc_rx_queues(dev)) 8289 if (netif_alloc_rx_queues(dev))
8238 goto free_all; 8290 goto free_all;
8239#endif
8240 8291
8241 strcpy(dev->name, name); 8292 strcpy(dev->name, name);
8242 dev->name_assign_type = name_assign_type; 8293 dev->name_assign_type = name_assign_type;
@@ -8275,9 +8326,7 @@ void free_netdev(struct net_device *dev)
8275 8326
8276 might_sleep(); 8327 might_sleep();
8277 netif_free_tx_queues(dev); 8328 netif_free_tx_queues(dev);
8278#ifdef CONFIG_SYSFS 8329 netif_free_rx_queues(dev);
8279 kvfree(dev->_rx);
8280#endif
8281 8330
8282 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); 8331 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
8283 8332
diff --git a/net/core/filter.c b/net/core/filter.c
index 130b842c3a15..acdb94c0e97f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4304,6 +4304,25 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type,
4304 si->dst_reg, si->src_reg, 4304 si->dst_reg, si->src_reg,
4305 offsetof(struct xdp_buff, data_end)); 4305 offsetof(struct xdp_buff, data_end));
4306 break; 4306 break;
4307 case offsetof(struct xdp_md, ingress_ifindex):
4308 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
4309 si->dst_reg, si->src_reg,
4310 offsetof(struct xdp_buff, rxq));
4311 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev),
4312 si->dst_reg, si->dst_reg,
4313 offsetof(struct xdp_rxq_info, dev));
4314 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
4315 bpf_target_off(struct net_device,
4316 ifindex, 4, target_size));
4317 break;
4318 case offsetof(struct xdp_md, rx_queue_index):
4319 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
4320 si->dst_reg, si->src_reg,
4321 offsetof(struct xdp_buff, rxq));
4322 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
4323 bpf_target_off(struct xdp_rxq_info,
4324 queue_index, 4, target_size));
4325 break;
4307 } 4326 }
4308 4327
4309 return insn - insn_buf; 4328 return insn - insn_buf;
diff --git a/net/core/xdp.c b/net/core/xdp.c
new file mode 100644
index 000000000000..097a0f74e004
--- /dev/null
+++ b/net/core/xdp.c
@@ -0,0 +1,73 @@
1/* net/core/xdp.c
2 *
3 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
4 * Released under terms in GPL version 2. See COPYING.
5 */
6#include <linux/types.h>
7#include <linux/mm.h>
8
9#include <net/xdp.h>
10
11#define REG_STATE_NEW 0x0
12#define REG_STATE_REGISTERED 0x1
13#define REG_STATE_UNREGISTERED 0x2
14#define REG_STATE_UNUSED 0x3
15
16void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
17{
18 /* Simplify driver cleanup code paths, allow unreg "unused" */
19 if (xdp_rxq->reg_state == REG_STATE_UNUSED)
20 return;
21
22 WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG");
23
24 xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
25 xdp_rxq->dev = NULL;
26}
27EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
28
29static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
30{
31 memset(xdp_rxq, 0, sizeof(*xdp_rxq));
32}
33
34/* Returns 0 on success, negative on failure */
35int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
36 struct net_device *dev, u32 queue_index)
37{
38 if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
39 WARN(1, "Driver promised not to register this");
40 return -EINVAL;
41 }
42
43 if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
44 WARN(1, "Missing unregister, handled but fix driver");
45 xdp_rxq_info_unreg(xdp_rxq);
46 }
47
48 if (!dev) {
49 WARN(1, "Missing net_device from driver");
50 return -ENODEV;
51 }
52
53 /* State either UNREGISTERED or NEW */
54 xdp_rxq_info_init(xdp_rxq);
55 xdp_rxq->dev = dev;
56 xdp_rxq->queue_index = queue_index;
57
58 xdp_rxq->reg_state = REG_STATE_REGISTERED;
59 return 0;
60}
61EXPORT_SYMBOL_GPL(xdp_rxq_info_reg);
62
63void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
64{
65 xdp_rxq->reg_state = REG_STATE_UNUSED;
66}
67EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
68
69bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
70{
71 return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
72}
73EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 4fb944a7ecf8..3ff7a05bea9a 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -41,6 +41,7 @@ hostprogs-y += xdp_redirect
41hostprogs-y += xdp_redirect_map 41hostprogs-y += xdp_redirect_map
42hostprogs-y += xdp_redirect_cpu 42hostprogs-y += xdp_redirect_cpu
43hostprogs-y += xdp_monitor 43hostprogs-y += xdp_monitor
44hostprogs-y += xdp_rxq_info
44hostprogs-y += syscall_tp 45hostprogs-y += syscall_tp
45 46
46# Libbpf dependencies 47# Libbpf dependencies
@@ -90,6 +91,7 @@ xdp_redirect-objs := bpf_load.o $(LIBBPF) xdp_redirect_user.o
90xdp_redirect_map-objs := bpf_load.o $(LIBBPF) xdp_redirect_map_user.o 91xdp_redirect_map-objs := bpf_load.o $(LIBBPF) xdp_redirect_map_user.o
91xdp_redirect_cpu-objs := bpf_load.o $(LIBBPF) xdp_redirect_cpu_user.o 92xdp_redirect_cpu-objs := bpf_load.o $(LIBBPF) xdp_redirect_cpu_user.o
92xdp_monitor-objs := bpf_load.o $(LIBBPF) xdp_monitor_user.o 93xdp_monitor-objs := bpf_load.o $(LIBBPF) xdp_monitor_user.o
94xdp_rxq_info-objs := bpf_load.o $(LIBBPF) xdp_rxq_info_user.o
93syscall_tp-objs := bpf_load.o $(LIBBPF) syscall_tp_user.o 95syscall_tp-objs := bpf_load.o $(LIBBPF) syscall_tp_user.o
94 96
95# Tell kbuild to always build the programs 97# Tell kbuild to always build the programs
@@ -139,6 +141,7 @@ always += xdp_redirect_kern.o
139always += xdp_redirect_map_kern.o 141always += xdp_redirect_map_kern.o
140always += xdp_redirect_cpu_kern.o 142always += xdp_redirect_cpu_kern.o
141always += xdp_monitor_kern.o 143always += xdp_monitor_kern.o
144always += xdp_rxq_info_kern.o
142always += syscall_tp_kern.o 145always += syscall_tp_kern.o
143 146
144HOSTCFLAGS += -I$(objtree)/usr/include 147HOSTCFLAGS += -I$(objtree)/usr/include
@@ -182,6 +185,7 @@ HOSTLOADLIBES_xdp_redirect += -lelf
182HOSTLOADLIBES_xdp_redirect_map += -lelf 185HOSTLOADLIBES_xdp_redirect_map += -lelf
183HOSTLOADLIBES_xdp_redirect_cpu += -lelf 186HOSTLOADLIBES_xdp_redirect_cpu += -lelf
184HOSTLOADLIBES_xdp_monitor += -lelf 187HOSTLOADLIBES_xdp_monitor += -lelf
188HOSTLOADLIBES_xdp_rxq_info += -lelf
185HOSTLOADLIBES_syscall_tp += -lelf 189HOSTLOADLIBES_syscall_tp += -lelf
186 190
187# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: 191# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
diff --git a/samples/bpf/xdp_rxq_info_kern.c b/samples/bpf/xdp_rxq_info_kern.c
new file mode 100644
index 000000000000..3fd209291653
--- /dev/null
+++ b/samples/bpf/xdp_rxq_info_kern.c
@@ -0,0 +1,96 @@
1/* SPDX-License-Identifier: GPL-2.0
2 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
3 *
4 * Example howto extract XDP RX-queue info
5 */
6#include <uapi/linux/bpf.h>
7#include "bpf_helpers.h"
8
9/* Config setup from with userspace
10 *
11 * User-side setup ifindex in config_map, to verify that
12 * ctx->ingress_ifindex is correct (against configured ifindex)
13 */
14struct config {
15 __u32 action;
16 int ifindex;
17};
18struct bpf_map_def SEC("maps") config_map = {
19 .type = BPF_MAP_TYPE_ARRAY,
20 .key_size = sizeof(int),
21 .value_size = sizeof(struct config),
22 .max_entries = 1,
23};
24
25/* Common stats data record (shared with userspace) */
26struct datarec {
27 __u64 processed;
28 __u64 issue;
29};
30
31struct bpf_map_def SEC("maps") stats_global_map = {
32 .type = BPF_MAP_TYPE_PERCPU_ARRAY,
33 .key_size = sizeof(u32),
34 .value_size = sizeof(struct datarec),
35 .max_entries = 1,
36};
37
38#define MAX_RXQs 64
39
40/* Stats per rx_queue_index (per CPU) */
41struct bpf_map_def SEC("maps") rx_queue_index_map = {
42 .type = BPF_MAP_TYPE_PERCPU_ARRAY,
43 .key_size = sizeof(u32),
44 .value_size = sizeof(struct datarec),
45 .max_entries = MAX_RXQs + 1,
46};
47
48SEC("xdp_prog0")
49int xdp_prognum0(struct xdp_md *ctx)
50{
51 void *data_end = (void *)(long)ctx->data_end;
52 void *data = (void *)(long)ctx->data;
53 struct datarec *rec, *rxq_rec;
54 int ingress_ifindex;
55 struct config *config;
56 u32 key = 0;
57
58 /* Global stats record */
59 rec = bpf_map_lookup_elem(&stats_global_map, &key);
60 if (!rec)
61 return XDP_ABORTED;
62 rec->processed++;
63
64 /* Accessing ctx->ingress_ifindex, cause BPF to rewrite BPF
65 * instructions inside kernel to access xdp_rxq->dev->ifindex
66 */
67 ingress_ifindex = ctx->ingress_ifindex;
68
69 config = bpf_map_lookup_elem(&config_map, &key);
70 if (!config)
71 return XDP_ABORTED;
72
73 /* Simple test: check ctx provided ifindex is as expected */
74 if (ingress_ifindex != config->ifindex) {
75 /* count this error case */
76 rec->issue++;
77 return XDP_ABORTED;
78 }
79
80 /* Update stats per rx_queue_index. Handle if rx_queue_index
81 * is larger than stats map can contain info for.
82 */
83 key = ctx->rx_queue_index;
84 if (key >= MAX_RXQs)
85 key = MAX_RXQs;
86 rxq_rec = bpf_map_lookup_elem(&rx_queue_index_map, &key);
87 if (!rxq_rec)
88 return XDP_ABORTED;
89 rxq_rec->processed++;
90 if (key == MAX_RXQs)
91 rxq_rec->issue++;
92
93 return config->action;
94}
95
96char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
new file mode 100644
index 000000000000..32430e8b3a6a
--- /dev/null
+++ b/samples/bpf/xdp_rxq_info_user.c
@@ -0,0 +1,531 @@
1/* SPDX-License-Identifier: GPL-2.0
2 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
3 */
4static const char *__doc__ = " XDP RX-queue info extract example\n\n"
5 "Monitor how many packets per sec (pps) are received\n"
6 "per NIC RX queue index and which CPU processed the packet\n"
7 ;
8
9#include <errno.h>
10#include <signal.h>
11#include <stdio.h>
12#include <stdlib.h>
13#include <stdbool.h>
14#include <string.h>
15#include <unistd.h>
16#include <locale.h>
17#include <sys/resource.h>
18#include <getopt.h>
19#include <net/if.h>
20#include <time.h>
21
22#include <arpa/inet.h>
23#include <linux/if_link.h>
24
25#include "libbpf.h"
26#include "bpf_load.h"
27#include "bpf_util.h"
28
29static int ifindex = -1;
30static char ifname_buf[IF_NAMESIZE];
31static char *ifname;
32
33static __u32 xdp_flags;
34
35/* Exit return codes */
36#define EXIT_OK 0
37#define EXIT_FAIL 1
38#define EXIT_FAIL_OPTION 2
39#define EXIT_FAIL_XDP 3
40#define EXIT_FAIL_BPF 4
41#define EXIT_FAIL_MEM 5
42
43static const struct option long_options[] = {
44 {"help", no_argument, NULL, 'h' },
45 {"dev", required_argument, NULL, 'd' },
46 {"skb-mode", no_argument, NULL, 'S' },
47 {"sec", required_argument, NULL, 's' },
48 {"no-separators", no_argument, NULL, 'z' },
49 {"action", required_argument, NULL, 'a' },
50 {0, 0, NULL, 0 }
51};
52
53static void int_exit(int sig)
54{
55 fprintf(stderr,
56 "Interrupted: Removing XDP program on ifindex:%d device:%s\n",
57 ifindex, ifname);
58 if (ifindex > -1)
59 set_link_xdp_fd(ifindex, -1, xdp_flags);
60 exit(EXIT_OK);
61}
62
63struct config {
64 __u32 action;
65 int ifindex;
66};
67#define XDP_ACTION_MAX (XDP_TX + 1)
68#define XDP_ACTION_MAX_STRLEN 11
69static const char *xdp_action_names[XDP_ACTION_MAX] = {
70 [XDP_ABORTED] = "XDP_ABORTED",
71 [XDP_DROP] = "XDP_DROP",
72 [XDP_PASS] = "XDP_PASS",
73 [XDP_TX] = "XDP_TX",
74};
75
76static const char *action2str(int action)
77{
78 if (action < XDP_ACTION_MAX)
79 return xdp_action_names[action];
80 return NULL;
81}
82
83static int parse_xdp_action(char *action_str)
84{
85 size_t maxlen;
86 __u64 action = -1;
87 int i;
88
89 for (i = 0; i < XDP_ACTION_MAX; i++) {
90 maxlen = XDP_ACTION_MAX_STRLEN;
91 if (strncmp(xdp_action_names[i], action_str, maxlen) == 0) {
92 action = i;
93 break;
94 }
95 }
96 return action;
97}
98
99static void list_xdp_actions(void)
100{
101 int i;
102
103 printf("Available XDP --action <options>\n");
104 for (i = 0; i < XDP_ACTION_MAX; i++)
105 printf("\t%s\n", xdp_action_names[i]);
106 printf("\n");
107}
108
109static void usage(char *argv[])
110{
111 int i;
112
113 printf("\nDOCUMENTATION:\n%s\n", __doc__);
114 printf(" Usage: %s (options-see-below)\n", argv[0]);
115 printf(" Listing options:\n");
116 for (i = 0; long_options[i].name != 0; i++) {
117 printf(" --%-12s", long_options[i].name);
118 if (long_options[i].flag != NULL)
119 printf(" flag (internal value:%d)",
120 *long_options[i].flag);
121 else
122 printf(" short-option: -%c",
123 long_options[i].val);
124 printf("\n");
125 }
126 printf("\n");
127 list_xdp_actions();
128}
129
130#define NANOSEC_PER_SEC 1000000000 /* 10^9 */
131static __u64 gettime(void)
132{
133 struct timespec t;
134 int res;
135
136 res = clock_gettime(CLOCK_MONOTONIC, &t);
137 if (res < 0) {
138 fprintf(stderr, "Error with gettimeofday! (%i)\n", res);
139 exit(EXIT_FAIL);
140 }
141 return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
142}
143
144/* Common stats data record shared with _kern.c */
145struct datarec {
146 __u64 processed;
147 __u64 issue;
148};
149struct record {
150 __u64 timestamp;
151 struct datarec total;
152 struct datarec *cpu;
153};
154struct stats_record {
155 struct record stats;
156 struct record *rxq;
157};
158
159static struct datarec *alloc_record_per_cpu(void)
160{
161 unsigned int nr_cpus = bpf_num_possible_cpus();
162 struct datarec *array;
163 size_t size;
164
165 size = sizeof(struct datarec) * nr_cpus;
166 array = malloc(size);
167 memset(array, 0, size);
168 if (!array) {
169 fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
170 exit(EXIT_FAIL_MEM);
171 }
172 return array;
173}
174
175static struct record *alloc_record_per_rxq(void)
176{
177 unsigned int nr_rxqs = map_data[2].def.max_entries;
178 struct record *array;
179 size_t size;
180
181 size = sizeof(struct record) * nr_rxqs;
182 array = malloc(size);
183 memset(array, 0, size);
184 if (!array) {
185 fprintf(stderr, "Mem alloc error (nr_rxqs:%u)\n", nr_rxqs);
186 exit(EXIT_FAIL_MEM);
187 }
188 return array;
189}
190
191static struct stats_record *alloc_stats_record(void)
192{
193 unsigned int nr_rxqs = map_data[2].def.max_entries;
194 struct stats_record *rec;
195 int i;
196
197 rec = malloc(sizeof(*rec));
198 memset(rec, 0, sizeof(*rec));
199 if (!rec) {
200 fprintf(stderr, "Mem alloc error\n");
201 exit(EXIT_FAIL_MEM);
202 }
203 rec->rxq = alloc_record_per_rxq();
204 for (i = 0; i < nr_rxqs; i++)
205 rec->rxq[i].cpu = alloc_record_per_cpu();
206
207 rec->stats.cpu = alloc_record_per_cpu();
208 return rec;
209}
210
211static void free_stats_record(struct stats_record *r)
212{
213 unsigned int nr_rxqs = map_data[2].def.max_entries;
214 int i;
215
216 for (i = 0; i < nr_rxqs; i++)
217 free(r->rxq[i].cpu);
218
219 free(r->rxq);
220 free(r->stats.cpu);
221 free(r);
222}
223
224static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
225{
226 /* For percpu maps, userspace gets a value per possible CPU */
227 unsigned int nr_cpus = bpf_num_possible_cpus();
228 struct datarec values[nr_cpus];
229 __u64 sum_processed = 0;
230 __u64 sum_issue = 0;
231 int i;
232
233 if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
234 fprintf(stderr,
235 "ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
236 return false;
237 }
238 /* Get time as close as possible to reading map contents */
239 rec->timestamp = gettime();
240
241 /* Record and sum values from each CPU */
242 for (i = 0; i < nr_cpus; i++) {
243 rec->cpu[i].processed = values[i].processed;
244 sum_processed += values[i].processed;
245 rec->cpu[i].issue = values[i].issue;
246 sum_issue += values[i].issue;
247 }
248 rec->total.processed = sum_processed;
249 rec->total.issue = sum_issue;
250 return true;
251}
252
253static void stats_collect(struct stats_record *rec)
254{
255 int fd, i, max_rxqs;
256
257 fd = map_data[1].fd; /* map: stats_global_map */
258 map_collect_percpu(fd, 0, &rec->stats);
259
260 fd = map_data[2].fd; /* map: rx_queue_index_map */
261 max_rxqs = map_data[2].def.max_entries;
262 for (i = 0; i < max_rxqs; i++)
263 map_collect_percpu(fd, i, &rec->rxq[i]);
264}
265
266static double calc_period(struct record *r, struct record *p)
267{
268 double period_ = 0;
269 __u64 period = 0;
270
271 period = r->timestamp - p->timestamp;
272 if (period > 0)
273 period_ = ((double) period / NANOSEC_PER_SEC);
274
275 return period_;
276}
277
278static __u64 calc_pps(struct datarec *r, struct datarec *p, double period_)
279{
280 __u64 packets = 0;
281 __u64 pps = 0;
282
283 if (period_ > 0) {
284 packets = r->processed - p->processed;
285 pps = packets / period_;
286 }
287 return pps;
288}
289
290static __u64 calc_errs_pps(struct datarec *r,
291 struct datarec *p, double period_)
292{
293 __u64 packets = 0;
294 __u64 pps = 0;
295
296 if (period_ > 0) {
297 packets = r->issue - p->issue;
298 pps = packets / period_;
299 }
300 return pps;
301}
302
303static void stats_print(struct stats_record *stats_rec,
304 struct stats_record *stats_prev,
305 int action)
306{
307 unsigned int nr_cpus = bpf_num_possible_cpus();
308 unsigned int nr_rxqs = map_data[2].def.max_entries;
309 double pps = 0, err = 0;
310 struct record *rec, *prev;
311 double t;
312 int rxq;
313 int i;
314
315 /* Header */
316 printf("\nRunning XDP on dev:%s (ifindex:%d) action:%s\n",
317 ifname, ifindex, action2str(action));
318
319 /* stats_global_map */
320 {
321 char *fmt_rx = "%-15s %-7d %'-11.0f %'-10.0f %s\n";
322 char *fm2_rx = "%-15s %-7s %'-11.0f\n";
323 char *errstr = "";
324
325 printf("%-15s %-7s %-11s %-11s\n",
326 "XDP stats", "CPU", "pps", "issue-pps");
327
328 rec = &stats_rec->stats;
329 prev = &stats_prev->stats;
330 t = calc_period(rec, prev);
331 for (i = 0; i < nr_cpus; i++) {
332 struct datarec *r = &rec->cpu[i];
333 struct datarec *p = &prev->cpu[i];
334
335 pps = calc_pps (r, p, t);
336 err = calc_errs_pps(r, p, t);
337 if (err > 0)
338 errstr = "invalid-ifindex";
339 if (pps > 0)
340 printf(fmt_rx, "XDP-RX CPU",
341 i, pps, err, errstr);
342 }
343 pps = calc_pps (&rec->total, &prev->total, t);
344 err = calc_errs_pps(&rec->total, &prev->total, t);
345 printf(fm2_rx, "XDP-RX CPU", "total", pps, err);
346 }
347
348 /* rx_queue_index_map */
349 printf("\n%-15s %-7s %-11s %-11s\n",
350 "RXQ stats", "RXQ:CPU", "pps", "issue-pps");
351
352 for (rxq = 0; rxq < nr_rxqs; rxq++) {
353 char *fmt_rx = "%-15s %3d:%-3d %'-11.0f %'-10.0f %s\n";
354 char *fm2_rx = "%-15s %3d:%-3s %'-11.0f\n";
355 char *errstr = "";
356 int rxq_ = rxq;
357
358 /* Last RXQ in map catch overflows */
359 if (rxq_ == nr_rxqs - 1)
360 rxq_ = -1;
361
362 rec = &stats_rec->rxq[rxq];
363 prev = &stats_prev->rxq[rxq];
364 t = calc_period(rec, prev);
365 for (i = 0; i < nr_cpus; i++) {
366 struct datarec *r = &rec->cpu[i];
367 struct datarec *p = &prev->cpu[i];
368
369 pps = calc_pps (r, p, t);
370 err = calc_errs_pps(r, p, t);
371 if (err > 0) {
372 if (rxq_ == -1)
373 errstr = "map-overflow-RXQ";
374 else
375 errstr = "err";
376 }
377 if (pps > 0)
378 printf(fmt_rx, "rx_queue_index",
379 rxq_, i, pps, err, errstr);
380 }
381 pps = calc_pps (&rec->total, &prev->total, t);
382 err = calc_errs_pps(&rec->total, &prev->total, t);
383 if (pps || err)
384 printf(fm2_rx, "rx_queue_index", rxq_, "sum", pps, err);
385 }
386}
387
388
389/* Pointer swap trick */
390static inline void swap(struct stats_record **a, struct stats_record **b)
391{
392 struct stats_record *tmp;
393
394 tmp = *a;
395 *a = *b;
396 *b = tmp;
397}
398
399static void stats_poll(int interval, int action)
400{
401 struct stats_record *record, *prev;
402
403 record = alloc_stats_record();
404 prev = alloc_stats_record();
405 stats_collect(record);
406
407 while (1) {
408 swap(&prev, &record);
409 stats_collect(record);
410 stats_print(record, prev, action);
411 sleep(interval);
412 }
413
414 free_stats_record(record);
415 free_stats_record(prev);
416}
417
418
419int main(int argc, char **argv)
420{
421 struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
422 bool use_separators = true;
423 struct config cfg = { 0 };
424 char filename[256];
425 int longindex = 0;
426 int interval = 2;
427 __u32 key = 0;
428 int opt, err;
429
430 char action_str_buf[XDP_ACTION_MAX_STRLEN + 1 /* for \0 */] = { 0 };
431 int action = XDP_PASS; /* Default action */
432 char *action_str = NULL;
433
434 snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
435
436 if (setrlimit(RLIMIT_MEMLOCK, &r)) {
437 perror("setrlimit(RLIMIT_MEMLOCK)");
438 return 1;
439 }
440
441 if (load_bpf_file(filename)) {
442 fprintf(stderr, "ERR in load_bpf_file(): %s", bpf_log_buf);
443 return EXIT_FAIL;
444 }
445
446 if (!prog_fd[0]) {
447 fprintf(stderr, "ERR: load_bpf_file: %s\n", strerror(errno));
448 return EXIT_FAIL;
449 }
450
451 /* Parse commands line args */
452 while ((opt = getopt_long(argc, argv, "hSd:",
453 long_options, &longindex)) != -1) {
454 switch (opt) {
455 case 'd':
456 if (strlen(optarg) >= IF_NAMESIZE) {
457 fprintf(stderr, "ERR: --dev name too long\n");
458 goto error;
459 }
460 ifname = (char *)&ifname_buf;
461 strncpy(ifname, optarg, IF_NAMESIZE);
462 ifindex = if_nametoindex(ifname);
463 if (ifindex == 0) {
464 fprintf(stderr,
465 "ERR: --dev name unknown err(%d):%s\n",
466 errno, strerror(errno));
467 goto error;
468 }
469 break;
470 case 's':
471 interval = atoi(optarg);
472 break;
473 case 'S':
474 xdp_flags |= XDP_FLAGS_SKB_MODE;
475 break;
476 case 'z':
477 use_separators = false;
478 break;
479 case 'a':
480 action_str = (char *)&action_str_buf;
481 strncpy(action_str, optarg, XDP_ACTION_MAX_STRLEN);
482 break;
483 case 'h':
484 error:
485 default:
486 usage(argv);
487 return EXIT_FAIL_OPTION;
488 }
489 }
490 /* Required option */
491 if (ifindex == -1) {
492 fprintf(stderr, "ERR: required option --dev missing\n");
493 usage(argv);
494 return EXIT_FAIL_OPTION;
495 }
496 cfg.ifindex = ifindex;
497
498 /* Parse action string */
499 if (action_str) {
500 action = parse_xdp_action(action_str);
501 if (action < 0) {
502 fprintf(stderr, "ERR: Invalid XDP --action: %s\n",
503 action_str);
504 list_xdp_actions();
505 return EXIT_FAIL_OPTION;
506 }
507 }
508 cfg.action = action;
509
510 /* Trick to pretty printf with thousands separators use %' */
511 if (use_separators)
512 setlocale(LC_NUMERIC, "en_US");
513
514 /* User-side setup ifindex in config_map */
515 err = bpf_map_update_elem(map_fd[0], &key, &cfg, 0);
516 if (err) {
517 fprintf(stderr, "Store config failed (err:%d)\n", err);
518 exit(EXIT_FAIL_BPF);
519 }
520
521 /* Remove XDP program when program is interrupted */
522 signal(SIGINT, int_exit);
523
524 if (set_link_xdp_fd(ifindex, prog_fd[0], xdp_flags) < 0) {
525 fprintf(stderr, "link set xdp fd failed\n");
526 return EXIT_FAIL_XDP;
527 }
528
529 stats_poll(interval, action);
530 return EXIT_OK;
531}