aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-11-05 08:26:20 -0500
committerDavid S. Miller <davem@davemloft.net>2017-11-05 08:26:20 -0500
commit8a3b718ac2c29abcba8e12636710cff3cee8c01b (patch)
tree8993f6c89d54cc54d2fb083635abe832d4f3b5bb
parent28e8c1914a20d020893978b67a6d2c618756bc3f (diff)
parentb37a530613104aa3f592376c67a462823298759c (diff)
Merge branch 'bpf-add-offload-as-a-first-class-citizen'
Jakub Kicinski says: ==================== bpf: add offload as a first class citizen This series is my stab at what was discussed at a recent IOvisor bi-weekly call. The idea is to make the device translator run at the program load time. This makes the offload more explicit to the user space. It also makes it easy for the device translator to insert information into the original verifier log. v2: - include linux/bug.h instead of asm/bug.h; - rebased on top of Craig's verifier fix (no changes, the last patch just removes more code now). I checked the set doesn't conflict with Jiri's, Josef's or Roman's patches, but missed Craig's fix :( v1: - rename the ifindex member on load; - improve commit messages; - split nfp patches more. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c194
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c87
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h59
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c282
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c54
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h37
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c12
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c4
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/virtio_net.c4
-rw-r--r--include/linux/bpf.h47
-rw-r--r--include/linux/bpf_verifier.h13
-rw-r--r--include/linux/netdevice.h37
-rw-r--r--include/uapi/linux/bpf.h7
-rw-r--r--kernel/bpf/Makefile1
-rw-r--r--kernel/bpf/core.c10
-rw-r--r--kernel/bpf/offload.c194
-rw-r--r--kernel/bpf/syscall.c52
-rw-r--r--kernel/bpf/verifier.c84
-rw-r--r--net/core/dev.c40
-rw-r--r--net/core/filter.c42
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/sched/cls_bpf.c10
-rw-r--r--tools/bpf/bpftool/prog.c31
-rw-r--r--tools/include/uapi/linux/bpf.h7
36 files changed, 686 insertions, 666 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4e3d569bf32e..96416f5d97f3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -7775,7 +7775,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
7775#endif 7775#endif
7776 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, 7776 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
7777 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, 7777 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
7778 .ndo_xdp = bnxt_xdp, 7778 .ndo_bpf = bnxt_xdp,
7779 .ndo_bridge_getlink = bnxt_bridge_getlink, 7779 .ndo_bridge_getlink = bnxt_bridge_getlink,
7780 .ndo_bridge_setlink = bnxt_bridge_setlink, 7780 .ndo_bridge_setlink = bnxt_bridge_setlink,
7781 .ndo_get_phys_port_name = bnxt_get_phys_port_name 7781 .ndo_get_phys_port_name = bnxt_get_phys_port_name
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 06ce63c00821..261e5847557a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -208,7 +208,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
208 return 0; 208 return 0;
209} 209}
210 210
211int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp) 211int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
212{ 212{
213 struct bnxt *bp = netdev_priv(dev); 213 struct bnxt *bp = netdev_priv(dev);
214 int rc; 214 int rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 12a5ad66b564..414b748038ca 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -16,6 +16,6 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
16bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, 16bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
17 struct page *page, u8 **data_ptr, unsigned int *len, 17 struct page *page, u8 **data_ptr, unsigned int *len,
18 u8 *event); 18 u8 *event);
19int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp); 19int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
20 20
21#endif 21#endif
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 71989e180289..a063c36c4c58 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1741,7 +1741,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
1741 return 0; 1741 return 0;
1742} 1742}
1743 1743
1744static int nicvf_xdp(struct net_device *netdev, struct netdev_xdp *xdp) 1744static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
1745{ 1745{
1746 struct nicvf *nic = netdev_priv(netdev); 1746 struct nicvf *nic = netdev_priv(netdev);
1747 1747
@@ -1774,7 +1774,7 @@ static const struct net_device_ops nicvf_netdev_ops = {
1774 .ndo_tx_timeout = nicvf_tx_timeout, 1774 .ndo_tx_timeout = nicvf_tx_timeout,
1775 .ndo_fix_features = nicvf_fix_features, 1775 .ndo_fix_features = nicvf_fix_features,
1776 .ndo_set_features = nicvf_set_features, 1776 .ndo_set_features = nicvf_set_features,
1777 .ndo_xdp = nicvf_xdp, 1777 .ndo_bpf = nicvf_xdp,
1778}; 1778};
1779 1779
1780static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1780static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index dfecaeda0654..05b94d87a6c3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -11648,12 +11648,12 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
11648} 11648}
11649 11649
11650/** 11650/**
11651 * i40e_xdp - implements ndo_xdp for i40e 11651 * i40e_xdp - implements ndo_bpf for i40e
11652 * @dev: netdevice 11652 * @dev: netdevice
11653 * @xdp: XDP command 11653 * @xdp: XDP command
11654 **/ 11654 **/
11655static int i40e_xdp(struct net_device *dev, 11655static int i40e_xdp(struct net_device *dev,
11656 struct netdev_xdp *xdp) 11656 struct netdev_bpf *xdp)
11657{ 11657{
11658 struct i40e_netdev_priv *np = netdev_priv(dev); 11658 struct i40e_netdev_priv *np = netdev_priv(dev);
11659 struct i40e_vsi *vsi = np->vsi; 11659 struct i40e_vsi *vsi = np->vsi;
@@ -11705,7 +11705,7 @@ static const struct net_device_ops i40e_netdev_ops = {
11705 .ndo_features_check = i40e_features_check, 11705 .ndo_features_check = i40e_features_check,
11706 .ndo_bridge_getlink = i40e_ndo_bridge_getlink, 11706 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
11707 .ndo_bridge_setlink = i40e_ndo_bridge_setlink, 11707 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
11708 .ndo_xdp = i40e_xdp, 11708 .ndo_bpf = i40e_xdp,
11709}; 11709};
11710 11710
11711/** 11711/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 507977994a03..e5dcb25be398 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -10004,7 +10004,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10004 return 0; 10004 return 0;
10005} 10005}
10006 10006
10007static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp) 10007static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
10008{ 10008{
10009 struct ixgbe_adapter *adapter = netdev_priv(dev); 10009 struct ixgbe_adapter *adapter = netdev_priv(dev);
10010 10010
@@ -10113,7 +10113,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
10113 .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port, 10113 .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
10114 .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, 10114 .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
10115 .ndo_features_check = ixgbe_features_check, 10115 .ndo_features_check = ixgbe_features_check,
10116 .ndo_xdp = ixgbe_xdp, 10116 .ndo_bpf = ixgbe_xdp,
10117 .ndo_xdp_xmit = ixgbe_xdp_xmit, 10117 .ndo_xdp_xmit = ixgbe_xdp_xmit,
10118 .ndo_xdp_flush = ixgbe_xdp_flush, 10118 .ndo_xdp_flush = ixgbe_xdp_flush,
10119}; 10119};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index d611df2f274d..736a6ccaf05e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2916,7 +2916,7 @@ static u32 mlx4_xdp_query(struct net_device *dev)
2916 return prog_id; 2916 return prog_id;
2917} 2917}
2918 2918
2919static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp) 2919static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2920{ 2920{
2921 switch (xdp->command) { 2921 switch (xdp->command) {
2922 case XDP_SETUP_PROG: 2922 case XDP_SETUP_PROG:
@@ -2958,7 +2958,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
2958 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, 2958 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
2959 .ndo_features_check = mlx4_en_features_check, 2959 .ndo_features_check = mlx4_en_features_check,
2960 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, 2960 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2961 .ndo_xdp = mlx4_xdp, 2961 .ndo_bpf = mlx4_xdp,
2962}; 2962};
2963 2963
2964static const struct net_device_ops mlx4_netdev_ops_master = { 2964static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -2995,7 +2995,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
2995 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, 2995 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
2996 .ndo_features_check = mlx4_en_features_check, 2996 .ndo_features_check = mlx4_en_features_check,
2997 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, 2997 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2998 .ndo_xdp = mlx4_xdp, 2998 .ndo_bpf = mlx4_xdp,
2999}; 2999};
3000 3000
3001struct mlx4_en_bond { 3001struct mlx4_en_bond {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 28ae00b3eb88..3b7b7bb84eb0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3831,7 +3831,7 @@ static u32 mlx5e_xdp_query(struct net_device *dev)
3831 return prog_id; 3831 return prog_id;
3832} 3832}
3833 3833
3834static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp) 3834static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3835{ 3835{
3836 switch (xdp->command) { 3836 switch (xdp->command) {
3837 case XDP_SETUP_PROG: 3837 case XDP_SETUP_PROG:
@@ -3883,7 +3883,7 @@ static const struct net_device_ops mlx5e_netdev_ops = {
3883 .ndo_rx_flow_steer = mlx5e_rx_flow_steer, 3883 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
3884#endif 3884#endif
3885 .ndo_tx_timeout = mlx5e_tx_timeout, 3885 .ndo_tx_timeout = mlx5e_tx_timeout,
3886 .ndo_xdp = mlx5e_xdp, 3886 .ndo_bpf = mlx5e_xdp,
3887#ifdef CONFIG_NET_POLL_CONTROLLER 3887#ifdef CONFIG_NET_POLL_CONTROLLER
3888 .ndo_poll_controller = mlx5e_netpoll, 3888 .ndo_poll_controller = mlx5e_netpoll,
3889#endif 3889#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 2609a2487100..995e95410b11 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -77,17 +77,6 @@ nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
77 return meta->l.prev != &nfp_prog->insns; 77 return meta->l.prev != &nfp_prog->insns;
78} 78}
79 79
80static void nfp_prog_free(struct nfp_prog *nfp_prog)
81{
82 struct nfp_insn_meta *meta, *tmp;
83
84 list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
85 list_del(&meta->l);
86 kfree(meta);
87 }
88 kfree(nfp_prog);
89}
90
91static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) 80static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
92{ 81{
93 if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) { 82 if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) {
@@ -202,47 +191,6 @@ emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
202} 191}
203 192
204static void 193static void
205__emit_br_byte(struct nfp_prog *nfp_prog, u8 areg, u8 breg, bool imm8,
206 u8 byte, bool equal, u16 addr, u8 defer, bool src_lmextn)
207{
208 u16 addr_lo, addr_hi;
209 u64 insn;
210
211 addr_lo = addr & (OP_BB_ADDR_LO >> __bf_shf(OP_BB_ADDR_LO));
212 addr_hi = addr != addr_lo;
213
214 insn = OP_BBYTE_BASE |
215 FIELD_PREP(OP_BB_A_SRC, areg) |
216 FIELD_PREP(OP_BB_BYTE, byte) |
217 FIELD_PREP(OP_BB_B_SRC, breg) |
218 FIELD_PREP(OP_BB_I8, imm8) |
219 FIELD_PREP(OP_BB_EQ, equal) |
220 FIELD_PREP(OP_BB_DEFBR, defer) |
221 FIELD_PREP(OP_BB_ADDR_LO, addr_lo) |
222 FIELD_PREP(OP_BB_ADDR_HI, addr_hi) |
223 FIELD_PREP(OP_BB_SRC_LMEXTN, src_lmextn);
224
225 nfp_prog_push(nfp_prog, insn);
226}
227
228static void
229emit_br_byte_neq(struct nfp_prog *nfp_prog,
230 swreg src, u8 imm, u8 byte, u16 addr, u8 defer)
231{
232 struct nfp_insn_re_regs reg;
233 int err;
234
235 err = swreg_to_restricted(reg_none(), src, reg_imm(imm), &reg, true);
236 if (err) {
237 nfp_prog->error = err;
238 return;
239 }
240
241 __emit_br_byte(nfp_prog, reg.areg, reg.breg, reg.i8, byte, false, addr,
242 defer, reg.src_lmextn);
243}
244
245static void
246__emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 194__emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
247 enum immed_width width, bool invert, 195 enum immed_width width, bool invert,
248 enum immed_shift shift, bool wr_both, 196 enum immed_shift shift, bool wr_both,
@@ -1479,19 +1427,18 @@ static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1479 swreg dst = reg_both(meta->insn.dst_reg * 2); 1427 swreg dst = reg_both(meta->insn.dst_reg * 2);
1480 1428
1481 switch (meta->insn.off) { 1429 switch (meta->insn.off) {
1482 case offsetof(struct sk_buff, len): 1430 case offsetof(struct __sk_buff, len):
1483 if (size != FIELD_SIZEOF(struct sk_buff, len)) 1431 if (size != FIELD_SIZEOF(struct __sk_buff, len))
1484 return -EOPNOTSUPP; 1432 return -EOPNOTSUPP;
1485 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); 1433 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog));
1486 break; 1434 break;
1487 case offsetof(struct sk_buff, data): 1435 case offsetof(struct __sk_buff, data):
1488 if (size != sizeof(void *)) 1436 if (size != FIELD_SIZEOF(struct __sk_buff, data))
1489 return -EOPNOTSUPP; 1437 return -EOPNOTSUPP;
1490 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 1438 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
1491 break; 1439 break;
1492 case offsetof(struct sk_buff, cb) + 1440 case offsetof(struct __sk_buff, data_end):
1493 offsetof(struct bpf_skb_data_end, data_end): 1441 if (size != FIELD_SIZEOF(struct __sk_buff, data_end))
1494 if (size != sizeof(void *))
1495 return -EOPNOTSUPP; 1442 return -EOPNOTSUPP;
1496 emit_alu(nfp_prog, dst, 1443 emit_alu(nfp_prog, dst,
1497 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 1444 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
@@ -1510,14 +1457,15 @@ static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1510{ 1457{
1511 swreg dst = reg_both(meta->insn.dst_reg * 2); 1458 swreg dst = reg_both(meta->insn.dst_reg * 2);
1512 1459
1513 if (size != sizeof(void *))
1514 return -EINVAL;
1515
1516 switch (meta->insn.off) { 1460 switch (meta->insn.off) {
1517 case offsetof(struct xdp_buff, data): 1461 case offsetof(struct xdp_md, data):
1462 if (size != FIELD_SIZEOF(struct xdp_md, data))
1463 return -EOPNOTSUPP;
1518 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 1464 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
1519 break; 1465 break;
1520 case offsetof(struct xdp_buff, data_end): 1466 case offsetof(struct xdp_md, data_end):
1467 if (size != FIELD_SIZEOF(struct xdp_md, data_end))
1468 return -EOPNOTSUPP;
1521 emit_alu(nfp_prog, dst, 1469 emit_alu(nfp_prog, dst,
1522 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 1470 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
1523 break; 1471 break;
@@ -1547,7 +1495,7 @@ mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1547 unsigned int size) 1495 unsigned int size)
1548{ 1496{
1549 if (meta->ptr.type == PTR_TO_CTX) { 1497 if (meta->ptr.type == PTR_TO_CTX) {
1550 if (nfp_prog->act == NN_ACT_XDP) 1498 if (nfp_prog->type == BPF_PROG_TYPE_XDP)
1551 return mem_ldx_xdp(nfp_prog, meta, size); 1499 return mem_ldx_xdp(nfp_prog, meta, size);
1552 else 1500 else
1553 return mem_ldx_skb(nfp_prog, meta, size); 1501 return mem_ldx_skb(nfp_prog, meta, size);
@@ -2022,34 +1970,6 @@ static void nfp_intro(struct nfp_prog *nfp_prog)
2022 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); 1970 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog));
2023} 1971}
2024 1972
2025static void nfp_outro_tc_legacy(struct nfp_prog *nfp_prog)
2026{
2027 const u8 act2code[] = {
2028 [NN_ACT_TC_DROP] = 0x22,
2029 [NN_ACT_TC_REDIR] = 0x24
2030 };
2031 /* Target for aborts */
2032 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
2033 wrp_immed(nfp_prog, reg_both(0), 0);
2034
2035 /* Target for normal exits */
2036 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
2037 /* Legacy TC mode:
2038 * 0 0x11 -> pass, count as stat0
2039 * -1 drop 0x22 -> drop, count as stat1
2040 * redir 0x24 -> redir, count as stat1
2041 * ife mark 0x21 -> pass, count as stat1
2042 * ife + tx 0x24 -> redir, count as stat1
2043 */
2044 emit_br_byte_neq(nfp_prog, reg_b(0), 0xff, 0, nfp_prog->tgt_done, 2);
2045 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
2046 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
2047
2048 emit_br(nfp_prog, BR_UNC, nfp_prog->tgt_done, 1);
2049 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(act2code[nfp_prog->act]),
2050 SHF_SC_L_SHF, 16);
2051}
2052
2053static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) 1973static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
2054{ 1974{
2055 /* TC direct-action mode: 1975 /* TC direct-action mode:
@@ -2142,17 +2062,15 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
2142 2062
2143static void nfp_outro(struct nfp_prog *nfp_prog) 2063static void nfp_outro(struct nfp_prog *nfp_prog)
2144{ 2064{
2145 switch (nfp_prog->act) { 2065 switch (nfp_prog->type) {
2146 case NN_ACT_DIRECT: 2066 case BPF_PROG_TYPE_SCHED_CLS:
2147 nfp_outro_tc_da(nfp_prog); 2067 nfp_outro_tc_da(nfp_prog);
2148 break; 2068 break;
2149 case NN_ACT_TC_DROP: 2069 case BPF_PROG_TYPE_XDP:
2150 case NN_ACT_TC_REDIR:
2151 nfp_outro_tc_legacy(nfp_prog);
2152 break;
2153 case NN_ACT_XDP:
2154 nfp_outro_xdp(nfp_prog); 2070 nfp_outro_xdp(nfp_prog);
2155 break; 2071 break;
2072 default:
2073 WARN_ON(1);
2156 } 2074 }
2157} 2075}
2158 2076
@@ -2198,28 +2116,6 @@ static int nfp_translate(struct nfp_prog *nfp_prog)
2198 return nfp_fixup_branches(nfp_prog); 2116 return nfp_fixup_branches(nfp_prog);
2199} 2117}
2200 2118
2201static int
2202nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
2203 unsigned int cnt)
2204{
2205 unsigned int i;
2206
2207 for (i = 0; i < cnt; i++) {
2208 struct nfp_insn_meta *meta;
2209
2210 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
2211 if (!meta)
2212 return -ENOMEM;
2213
2214 meta->insn = prog[i];
2215 meta->n = i;
2216
2217 list_add_tail(&meta->l, &nfp_prog->insns);
2218 }
2219
2220 return 0;
2221}
2222
2223/* --- Optimizations --- */ 2119/* --- Optimizations --- */
2224static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) 2120static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
2225{ 2121{
@@ -2347,66 +2243,20 @@ static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore)
2347 return 0; 2243 return 0;
2348} 2244}
2349 2245
2350/** 2246int nfp_bpf_jit(struct nfp_prog *nfp_prog)
2351 * nfp_bpf_jit() - translate BPF code into NFP assembly
2352 * @filter: kernel BPF filter struct
2353 * @prog_mem: memory to store assembler instructions
2354 * @act: action attached to this eBPF program
2355 * @prog_start: offset of the first instruction when loaded
2356 * @prog_done: where to jump on exit
2357 * @prog_sz: size of @prog_mem in instructions
2358 * @res: achieved parameters of translation results
2359 */
2360int
2361nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem,
2362 enum nfp_bpf_action_type act,
2363 unsigned int prog_start, unsigned int prog_done,
2364 unsigned int prog_sz, struct nfp_bpf_result *res)
2365{ 2247{
2366 struct nfp_prog *nfp_prog;
2367 int ret; 2248 int ret;
2368 2249
2369 nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
2370 if (!nfp_prog)
2371 return -ENOMEM;
2372
2373 INIT_LIST_HEAD(&nfp_prog->insns);
2374 nfp_prog->act = act;
2375 nfp_prog->start_off = prog_start;
2376 nfp_prog->tgt_done = prog_done;
2377
2378 ret = nfp_prog_prepare(nfp_prog, filter->insnsi, filter->len);
2379 if (ret)
2380 goto out;
2381
2382 ret = nfp_prog_verify(nfp_prog, filter);
2383 if (ret)
2384 goto out;
2385
2386 ret = nfp_bpf_optimize(nfp_prog); 2250 ret = nfp_bpf_optimize(nfp_prog);
2387 if (ret) 2251 if (ret)
2388 goto out; 2252 return ret;
2389
2390 nfp_prog->num_regs = MAX_BPF_REG;
2391 nfp_prog->regs_per_thread = 32;
2392
2393 nfp_prog->prog = prog_mem;
2394 nfp_prog->__prog_alloc_len = prog_sz;
2395 2253
2396 ret = nfp_translate(nfp_prog); 2254 ret = nfp_translate(nfp_prog);
2397 if (ret) { 2255 if (ret) {
2398 pr_err("Translation failed with error %d (translated: %u)\n", 2256 pr_err("Translation failed with error %d (translated: %u)\n",
2399 ret, nfp_prog->n_translated); 2257 ret, nfp_prog->n_translated);
2400 ret = -EINVAL; 2258 return -EINVAL;
2401 goto out;
2402 } 2259 }
2403 2260
2404 ret = nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)prog_mem); 2261 return nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)nfp_prog->prog);
2405
2406 res->n_instr = nfp_prog->prog_len;
2407 res->dense_mode = false;
2408out:
2409 nfp_prog_free(nfp_prog);
2410
2411 return ret;
2412} 2262}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 8e3e89cace8d..e379b78e86ef 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -54,28 +54,25 @@ static int
54nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn, 54nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
55 struct bpf_prog *prog) 55 struct bpf_prog *prog)
56{ 56{
57 struct tc_cls_bpf_offload cmd = { 57 bool running, xdp_running;
58 .prog = prog,
59 };
60 int ret; 58 int ret;
61 59
62 if (!nfp_net_ebpf_capable(nn)) 60 if (!nfp_net_ebpf_capable(nn))
63 return -EINVAL; 61 return -EINVAL;
64 62
65 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) { 63 running = nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
66 if (!nn->dp.bpf_offload_xdp) 64 xdp_running = running && nn->dp.bpf_offload_xdp;
67 return prog ? -EBUSY : 0; 65
68 cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY; 66 if (!prog && !xdp_running)
69 } else { 67 return 0;
70 if (!prog) 68 if (prog && running && !xdp_running)
71 return 0; 69 return -EBUSY;
72 cmd.command = TC_CLSBPF_ADD;
73 }
74 70
75 ret = nfp_net_bpf_offload(nn, &cmd); 71 ret = nfp_net_bpf_offload(nn, prog, running);
76 /* Stop offload if replace not possible */ 72 /* Stop offload if replace not possible */
77 if (ret && cmd.command == TC_CLSBPF_REPLACE) 73 if (ret && prog)
78 nfp_bpf_xdp_offload(app, nn, NULL); 74 nfp_bpf_xdp_offload(app, nn, NULL);
75
79 nn->dp.bpf_offload_xdp = prog && !ret; 76 nn->dp.bpf_offload_xdp = prog && !ret;
80 return ret; 77 return ret;
81} 78}
@@ -85,34 +82,10 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
85 return nfp_net_ebpf_capable(nn) ? "BPF" : ""; 82 return nfp_net_ebpf_capable(nn) ? "BPF" : "";
86} 83}
87 84
88static int
89nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
90{
91 struct nfp_net_bpf_priv *priv;
92 int ret;
93
94 priv = kmalloc(sizeof(*priv), GFP_KERNEL);
95 if (!priv)
96 return -ENOMEM;
97
98 nn->app_priv = priv;
99 spin_lock_init(&priv->rx_filter_lock);
100 priv->nn = nn;
101 timer_setup(&priv->rx_filter_stats_timer,
102 nfp_net_filter_stats_timer, 0);
103
104 ret = nfp_app_nic_vnic_alloc(app, nn, id);
105 if (ret)
106 kfree(priv);
107
108 return ret;
109}
110
111static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn) 85static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
112{ 86{
113 if (nn->dp.bpf_offload_xdp) 87 if (nn->dp.bpf_offload_xdp)
114 nfp_bpf_xdp_offload(app, nn, NULL); 88 nfp_bpf_xdp_offload(app, nn, NULL);
115 kfree(nn->app_priv);
116} 89}
117 90
118static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, 91static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
@@ -121,19 +94,29 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
121 struct tc_cls_bpf_offload *cls_bpf = type_data; 94 struct tc_cls_bpf_offload *cls_bpf = type_data;
122 struct nfp_net *nn = cb_priv; 95 struct nfp_net *nn = cb_priv;
123 96
124 if (!tc_can_offload(nn->dp.netdev)) 97 if (type != TC_SETUP_CLSBPF ||
98 !tc_can_offload(nn->dp.netdev) ||
99 !nfp_net_ebpf_capable(nn) ||
100 cls_bpf->common.protocol != htons(ETH_P_ALL) ||
101 cls_bpf->common.chain_index)
102 return -EOPNOTSUPP;
103 if (nn->dp.bpf_offload_xdp)
104 return -EBUSY;
105
106 /* Only support TC direct action */
107 if (!cls_bpf->exts_integrated ||
108 tcf_exts_has_actions(cls_bpf->exts)) {
109 nn_err(nn, "only direct action with no legacy actions supported\n");
125 return -EOPNOTSUPP; 110 return -EOPNOTSUPP;
111 }
126 112
127 switch (type) { 113 switch (cls_bpf->command) {
128 case TC_SETUP_CLSBPF: 114 case TC_CLSBPF_REPLACE:
129 if (!nfp_net_ebpf_capable(nn) || 115 return nfp_net_bpf_offload(nn, cls_bpf->prog, true);
130 cls_bpf->common.protocol != htons(ETH_P_ALL) || 116 case TC_CLSBPF_ADD:
131 cls_bpf->common.chain_index) 117 return nfp_net_bpf_offload(nn, cls_bpf->prog, false);
132 return -EOPNOTSUPP; 118 case TC_CLSBPF_DESTROY:
133 if (nn->dp.bpf_offload_xdp) 119 return nfp_net_bpf_offload(nn, NULL, true);
134 return -EBUSY;
135
136 return nfp_net_bpf_offload(nn, cls_bpf);
137 default: 120 default:
138 return -EOPNOTSUPP; 121 return -EOPNOTSUPP;
139 } 122 }
@@ -184,10 +167,14 @@ const struct nfp_app_type app_bpf = {
184 167
185 .extra_cap = nfp_bpf_extra_cap, 168 .extra_cap = nfp_bpf_extra_cap,
186 169
187 .vnic_alloc = nfp_bpf_vnic_alloc, 170 .vnic_alloc = nfp_app_nic_vnic_alloc,
188 .vnic_free = nfp_bpf_vnic_free, 171 .vnic_free = nfp_bpf_vnic_free,
189 172
190 .setup_tc = nfp_bpf_setup_tc, 173 .setup_tc = nfp_bpf_setup_tc,
191 .tc_busy = nfp_bpf_tc_busy, 174 .tc_busy = nfp_bpf_tc_busy,
192 .xdp_offload = nfp_bpf_xdp_offload, 175 .xdp_offload = nfp_bpf_xdp_offload,
176
177 .bpf_verifier_prep = nfp_bpf_verifier_prep,
178 .bpf_translate = nfp_bpf_translate,
179 .bpf_destroy = nfp_bpf_destroy,
193}; 180};
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index bc604030ff6c..082a15f6dfb5 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -41,7 +41,6 @@
41#include <linux/types.h> 41#include <linux/types.h>
42 42
43#include "../nfp_asm.h" 43#include "../nfp_asm.h"
44#include "../nfp_net.h"
45 44
46/* For branch fixup logic use up-most byte of branch instruction as scratch 45/* For branch fixup logic use up-most byte of branch instruction as scratch
47 * area. Remember to clear this before sending instructions to HW! 46 * area. Remember to clear this before sending instructions to HW!
@@ -65,13 +64,6 @@ enum pkt_vec {
65 PKT_VEC_PKT_PTR = 2, 64 PKT_VEC_PKT_PTR = 2,
66}; 65};
67 66
68enum nfp_bpf_action_type {
69 NN_ACT_TC_DROP,
70 NN_ACT_TC_REDIR,
71 NN_ACT_DIRECT,
72 NN_ACT_XDP,
73};
74
75#define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN) 67#define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN)
76#define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR) 68#define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR)
77 69
@@ -147,9 +139,8 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
147 * @prog: machine code 139 * @prog: machine code
148 * @prog_len: number of valid instructions in @prog array 140 * @prog_len: number of valid instructions in @prog array
149 * @__prog_alloc_len: alloc size of @prog array 141 * @__prog_alloc_len: alloc size of @prog array
150 * @act: BPF program/action type (TC DA, TC with action, XDP etc.) 142 * @verifier_meta: temporary storage for verifier's insn meta
151 * @num_regs: number of registers used by this program 143 * @type: BPF program type
152 * @regs_per_thread: number of basic registers allocated per thread
153 * @start_off: address of the first instruction in the memory 144 * @start_off: address of the first instruction in the memory
154 * @tgt_out: jump target for normal exit 145 * @tgt_out: jump target for normal exit
155 * @tgt_abort: jump target for abort (e.g. access outside of packet buffer) 146 * @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
@@ -164,10 +155,9 @@ struct nfp_prog {
164 unsigned int prog_len; 155 unsigned int prog_len;
165 unsigned int __prog_alloc_len; 156 unsigned int __prog_alloc_len;
166 157
167 enum nfp_bpf_action_type act; 158 struct nfp_insn_meta *verifier_meta;
168 159
169 unsigned int num_regs; 160 enum bpf_prog_type type;
170 unsigned int regs_per_thread;
171 161
172 unsigned int start_off; 162 unsigned int start_off;
173 unsigned int tgt_out; 163 unsigned int tgt_out;
@@ -182,38 +172,21 @@ struct nfp_prog {
182 struct list_head insns; 172 struct list_head insns;
183}; 173};
184 174
185struct nfp_bpf_result { 175int nfp_bpf_jit(struct nfp_prog *prog);
186 unsigned int n_instr;
187 bool dense_mode;
188};
189
190int
191nfp_bpf_jit(struct bpf_prog *filter, void *prog, enum nfp_bpf_action_type act,
192 unsigned int prog_start, unsigned int prog_done,
193 unsigned int prog_sz, struct nfp_bpf_result *res);
194 176
195int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog); 177extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
196 178
179struct netdev_bpf;
180struct nfp_app;
197struct nfp_net; 181struct nfp_net;
198struct tc_cls_bpf_offload;
199
200/**
201 * struct nfp_net_bpf_priv - per-vNIC BPF private data
202 * @rx_filter: Filter offload statistics - dropped packets/bytes
203 * @rx_filter_prev: Filter offload statistics - values from previous update
204 * @rx_filter_change: Jiffies when statistics last changed
205 * @rx_filter_stats_timer: Timer for polling filter offload statistics
206 * @rx_filter_lock: Lock protecting timer state changes (teardown)
207 */
208struct nfp_net_bpf_priv {
209 struct nfp_stat_pair rx_filter, rx_filter_prev;
210 unsigned long rx_filter_change;
211 struct timer_list rx_filter_stats_timer;
212 struct nfp_net *nn;
213 spinlock_t rx_filter_lock;
214};
215 182
216int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf); 183int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
217void nfp_net_filter_stats_timer(struct timer_list *t); 184 bool old_prog);
218 185
186int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
187 struct netdev_bpf *bpf);
188int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
189 struct bpf_prog *prog);
190int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
191 struct bpf_prog *prog);
219#endif 192#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 6d576f631392..b6cee71f49d3 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -51,109 +51,114 @@
51#include "../nfp_net_ctrl.h" 51#include "../nfp_net_ctrl.h"
52#include "../nfp_net.h" 52#include "../nfp_net.h"
53 53
54void nfp_net_filter_stats_timer(struct timer_list *t) 54static int
55nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
56 unsigned int cnt)
55{ 57{
56 struct nfp_net_bpf_priv *priv = from_timer(priv, t, 58 unsigned int i;
57 rx_filter_stats_timer);
58 struct nfp_net *nn = priv->nn;
59 struct nfp_stat_pair latest;
60
61 spin_lock_bh(&priv->rx_filter_lock);
62 59
63 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) 60 for (i = 0; i < cnt; i++) {
64 mod_timer(&priv->rx_filter_stats_timer, 61 struct nfp_insn_meta *meta;
65 jiffies + NFP_NET_STAT_POLL_IVL);
66 62
67 spin_unlock_bh(&priv->rx_filter_lock); 63 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
64 if (!meta)
65 return -ENOMEM;
68 66
69 latest.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES); 67 meta->insn = prog[i];
70 latest.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES); 68 meta->n = i;
71 69
72 if (latest.pkts != priv->rx_filter.pkts) 70 list_add_tail(&meta->l, &nfp_prog->insns);
73 priv->rx_filter_change = jiffies; 71 }
74 72
75 priv->rx_filter = latest; 73 return 0;
76} 74}
77 75
78static void nfp_net_bpf_stats_reset(struct nfp_net *nn) 76static void nfp_prog_free(struct nfp_prog *nfp_prog)
79{ 77{
80 struct nfp_net_bpf_priv *priv = nn->app_priv; 78 struct nfp_insn_meta *meta, *tmp;
81 79
82 priv->rx_filter.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES); 80 list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
83 priv->rx_filter.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES); 81 list_del(&meta->l);
84 priv->rx_filter_prev = priv->rx_filter; 82 kfree(meta);
85 priv->rx_filter_change = jiffies; 83 }
84 kfree(nfp_prog);
86} 85}
87 86
88static int 87int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
89nfp_net_bpf_stats_update(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) 88 struct netdev_bpf *bpf)
90{ 89{
91 struct nfp_net_bpf_priv *priv = nn->app_priv; 90 struct bpf_prog *prog = bpf->verifier.prog;
92 u64 bytes, pkts; 91 struct nfp_prog *nfp_prog;
92 int ret;
93 93
94 pkts = priv->rx_filter.pkts - priv->rx_filter_prev.pkts; 94 nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
95 bytes = priv->rx_filter.bytes - priv->rx_filter_prev.bytes; 95 if (!nfp_prog)
96 bytes -= pkts * ETH_HLEN; 96 return -ENOMEM;
97 prog->aux->offload->dev_priv = nfp_prog;
97 98
98 priv->rx_filter_prev = priv->rx_filter; 99 INIT_LIST_HEAD(&nfp_prog->insns);
100 nfp_prog->type = prog->type;
101
102 ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
103 if (ret)
104 goto err_free;
99 105
100 tcf_exts_stats_update(cls_bpf->exts, 106 nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
101 bytes, pkts, priv->rx_filter_change); 107 bpf->verifier.ops = &nfp_bpf_analyzer_ops;
102 108
103 return 0; 109 return 0;
104}
105 110
106static int 111err_free:
107nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) 112 nfp_prog_free(nfp_prog);
108{
109 const struct tc_action *a;
110 LIST_HEAD(actions);
111 113
112 if (!cls_bpf->exts) 114 return ret;
113 return NN_ACT_XDP; 115}
114 116
115 /* TC direct action */ 117int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
116 if (cls_bpf->exts_integrated) { 118 struct bpf_prog *prog)
117 if (!tcf_exts_has_actions(cls_bpf->exts)) 119{
118 return NN_ACT_DIRECT; 120 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
121 unsigned int stack_size;
122 unsigned int max_instr;
119 123
124 stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
125 if (prog->aux->stack_depth > stack_size) {
126 nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
127 prog->aux->stack_depth, stack_size);
120 return -EOPNOTSUPP; 128 return -EOPNOTSUPP;
121 } 129 }
122 130
123 /* TC legacy mode */ 131 nfp_prog->stack_depth = prog->aux->stack_depth;
124 if (!tcf_exts_has_one_action(cls_bpf->exts)) 132 nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
125 return -EOPNOTSUPP; 133 nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
126 134
127 tcf_exts_to_list(cls_bpf->exts, &actions); 135 max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
128 list_for_each_entry(a, &actions, list) { 136 nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
129 if (is_tcf_gact_shot(a))
130 return NN_ACT_TC_DROP;
131 137
132 if (is_tcf_mirred_egress_redirect(a) && 138 nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
133 tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex) 139 if (!nfp_prog->prog)
134 return NN_ACT_TC_REDIR; 140 return -ENOMEM;
135 }
136 141
137 return -EOPNOTSUPP; 142 return nfp_bpf_jit(nfp_prog);
138} 143}
139 144
140static int 145int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
141nfp_net_bpf_offload_prepare(struct nfp_net *nn, 146 struct bpf_prog *prog)
142 struct tc_cls_bpf_offload *cls_bpf,
143 struct nfp_bpf_result *res,
144 void **code, dma_addr_t *dma_addr, u16 max_instr)
145{ 147{
146 unsigned int code_sz = max_instr * sizeof(u64); 148 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
147 enum nfp_bpf_action_type act;
148 unsigned int stack_size;
149 u16 start_off, done_off;
150 unsigned int max_mtu;
151 int ret;
152 149
153 ret = nfp_net_bpf_get_act(nn, cls_bpf); 150 kfree(nfp_prog->prog);
154 if (ret < 0) 151 nfp_prog_free(nfp_prog);
155 return ret; 152
156 act = ret; 153 return 0;
154}
155
156static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
157{
158 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
159 unsigned int max_mtu;
160 dma_addr_t dma_addr;
161 int err;
157 162
158 max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; 163 max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
159 if (max_mtu < nn->dp.netdev->mtu) { 164 if (max_mtu < nn->dp.netdev->mtu) {
@@ -161,141 +166,80 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
161 return -EOPNOTSUPP; 166 return -EOPNOTSUPP;
162 } 167 }
163 168
164 start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); 169 dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog,
165 done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE); 170 nfp_prog->prog_len * sizeof(u64),
166 171 DMA_TO_DEVICE);
167 stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; 172 if (dma_mapping_error(nn->dp.dev, dma_addr))
168 if (cls_bpf->prog->aux->stack_depth > stack_size) {
169 nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
170 cls_bpf->prog->aux->stack_depth, stack_size);
171 return -EOPNOTSUPP;
172 }
173
174 *code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL);
175 if (!*code)
176 return -ENOMEM; 173 return -ENOMEM;
177 174
178 ret = nfp_bpf_jit(cls_bpf->prog, *code, act, start_off, done_off, 175 nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
179 max_instr, res); 176 nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
180 if (ret)
181 goto out;
182 177
183 return 0; 178 /* Load up the JITed code */
179 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
180 if (err)
181 nn_err(nn, "FW command error while loading BPF: %d\n", err);
184 182
185out: 183 dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
186 dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr); 184 DMA_TO_DEVICE);
187 return ret; 185
186 return err;
188} 187}
189 188
190static void 189static void nfp_net_bpf_start(struct nfp_net *nn)
191nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
192 void *code, dma_addr_t dma_addr,
193 unsigned int code_sz, unsigned int n_instr,
194 bool dense_mode)
195{ 190{
196 struct nfp_net_bpf_priv *priv = nn->app_priv;
197 u64 bpf_addr = dma_addr;
198 int err; 191 int err;
199 192
200 nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
201
202 if (dense_mode)
203 bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX;
204
205 nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr);
206 nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, bpf_addr);
207
208 /* Load up the JITed code */
209 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
210 if (err)
211 nn_err(nn, "FW command error while loading BPF: %d\n", err);
212
213 /* Enable passing packets through BPF function */ 193 /* Enable passing packets through BPF function */
214 nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF; 194 nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
215 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); 195 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
216 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); 196 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
217 if (err) 197 if (err)
218 nn_err(nn, "FW command error while enabling BPF: %d\n", err); 198 nn_err(nn, "FW command error while enabling BPF: %d\n", err);
219
220 dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr);
221
222 nfp_net_bpf_stats_reset(nn);
223 mod_timer(&priv->rx_filter_stats_timer,
224 jiffies + NFP_NET_STAT_POLL_IVL);
225} 199}
226 200
227static int nfp_net_bpf_stop(struct nfp_net *nn) 201static int nfp_net_bpf_stop(struct nfp_net *nn)
228{ 202{
229 struct nfp_net_bpf_priv *priv = nn->app_priv;
230
231 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)) 203 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
232 return 0; 204 return 0;
233 205
234 spin_lock_bh(&priv->rx_filter_lock);
235 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF; 206 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
236 spin_unlock_bh(&priv->rx_filter_lock);
237 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); 207 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
238 208
239 del_timer_sync(&priv->rx_filter_stats_timer);
240 nn->dp.bpf_offload_skip_sw = 0;
241
242 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); 209 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
243} 210}
244 211
245int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) 212int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
213 bool old_prog)
246{ 214{
247 struct nfp_bpf_result res;
248 dma_addr_t dma_addr;
249 u16 max_instr;
250 void *code;
251 int err; 215 int err;
252 216
253 max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); 217 if (prog && !prog->aux->offload)
254 218 return -EINVAL;
255 switch (cls_bpf->command) {
256 case TC_CLSBPF_REPLACE:
257 /* There is nothing stopping us from implementing seamless
258 * replace but the simple method of loading I adopted in
259 * the firmware does not handle atomic replace (i.e. we have to
260 * stop the BPF offload and re-enable it). Leaking-in a few
261 * frames which didn't have BPF applied in the hardware should
262 * be fine if software fallback is available, though.
263 */
264 if (nn->dp.bpf_offload_skip_sw)
265 return -EBUSY;
266
267 err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
268 &dma_addr, max_instr);
269 if (err)
270 return err;
271 219
272 nfp_net_bpf_stop(nn); 220 if (prog && old_prog) {
273 nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code, 221 u8 cap;
274 dma_addr, max_instr * sizeof(u64),
275 res.n_instr, res.dense_mode);
276 return 0;
277 222
278 case TC_CLSBPF_ADD: 223 cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
279 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) 224 if (!(cap & NFP_NET_BPF_CAP_RELO)) {
225 nn_err(nn, "FW does not support live reload\n");
280 return -EBUSY; 226 return -EBUSY;
227 }
228 }
281 229
282 err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code, 230 /* Something else is loaded, different program type? */
283 &dma_addr, max_instr); 231 if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
284 if (err) 232 return -EBUSY;
285 return err;
286 233
287 nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code, 234 if (old_prog && !prog)
288 dma_addr, max_instr * sizeof(u64),
289 res.n_instr, res.dense_mode);
290 return 0;
291
292 case TC_CLSBPF_DESTROY:
293 return nfp_net_bpf_stop(nn); 235 return nfp_net_bpf_stop(nn);
294 236
295 case TC_CLSBPF_STATS: 237 err = nfp_net_bpf_load(nn, prog);
296 return nfp_net_bpf_stats_update(nn, cls_bpf); 238 if (err)
239 return err;
297 240
298 default: 241 if (!old_prog)
299 return -EOPNOTSUPP; 242 nfp_net_bpf_start(nn);
300 } 243
244 return 0;
301} 245}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index a8c7615546a9..8d43491ddd6b 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -40,12 +40,6 @@
40 40
41#include "main.h" 41#include "main.h"
42 42
43/* Analyzer/verifier definitions */
44struct nfp_bpf_analyzer_priv {
45 struct nfp_prog *prog;
46 struct nfp_insn_meta *meta;
47};
48
49static struct nfp_insn_meta * 43static struct nfp_insn_meta *
50nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 44nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
51 unsigned int insn_idx, unsigned int n_insns) 45 unsigned int insn_idx, unsigned int n_insns)
@@ -81,7 +75,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
81 const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0; 75 const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0;
82 u64 imm; 76 u64 imm;
83 77
84 if (nfp_prog->act == NN_ACT_XDP) 78 if (nfp_prog->type == BPF_PROG_TYPE_XDP)
85 return 0; 79 return 0;
86 80
87 if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) { 81 if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) {
@@ -94,13 +88,8 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
94 } 88 }
95 89
96 imm = reg0->var_off.value; 90 imm = reg0->var_off.value;
97 if (nfp_prog->act != NN_ACT_DIRECT && imm != 0 && (imm & ~0U) != ~0U) { 91 if (nfp_prog->type == BPF_PROG_TYPE_SCHED_CLS &&
98 pr_info("unsupported exit state: %d, imm: %llx\n", 92 imm <= TC_ACT_REDIRECT &&
99 reg0->type, imm);
100 return -EINVAL;
101 }
102
103 if (nfp_prog->act == NN_ACT_DIRECT && imm <= TC_ACT_REDIRECT &&
104 imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN && 93 imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
105 imm != TC_ACT_QUEUED) { 94 imm != TC_ACT_QUEUED) {
106 pr_info("unsupported exit state: %d, imm: %llx\n", 95 pr_info("unsupported exit state: %d, imm: %llx\n",
@@ -176,11 +165,11 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
176static int 165static int
177nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) 166nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
178{ 167{
179 struct nfp_bpf_analyzer_priv *priv = env->analyzer_priv; 168 struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
180 struct nfp_insn_meta *meta = priv->meta; 169 struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
181 170
182 meta = nfp_bpf_goto_meta(priv->prog, meta, insn_idx, env->prog->len); 171 meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
183 priv->meta = meta; 172 nfp_prog->verifier_meta = meta;
184 173
185 if (meta->insn.src_reg >= MAX_BPF_REG || 174 if (meta->insn.src_reg >= MAX_BPF_REG ||
186 meta->insn.dst_reg >= MAX_BPF_REG) { 175 meta->insn.dst_reg >= MAX_BPF_REG) {
@@ -189,39 +178,18 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
189 } 178 }
190 179
191 if (meta->insn.code == (BPF_JMP | BPF_EXIT)) 180 if (meta->insn.code == (BPF_JMP | BPF_EXIT))
192 return nfp_bpf_check_exit(priv->prog, env); 181 return nfp_bpf_check_exit(nfp_prog, env);
193 182
194 if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM)) 183 if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM))
195 return nfp_bpf_check_ptr(priv->prog, meta, env, 184 return nfp_bpf_check_ptr(nfp_prog, meta, env,
196 meta->insn.src_reg); 185 meta->insn.src_reg);
197 if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM)) 186 if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM))
198 return nfp_bpf_check_ptr(priv->prog, meta, env, 187 return nfp_bpf_check_ptr(nfp_prog, meta, env,
199 meta->insn.dst_reg); 188 meta->insn.dst_reg);
200 189
201 return 0; 190 return 0;
202} 191}
203 192
204static const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = { 193const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
205 .insn_hook = nfp_verify_insn, 194 .insn_hook = nfp_verify_insn,
206}; 195};
207
208int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog)
209{
210 struct nfp_bpf_analyzer_priv *priv;
211 int ret;
212
213 nfp_prog->stack_depth = prog->aux->stack_depth;
214
215 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
216 if (!priv)
217 return -ENOMEM;
218
219 priv->prog = nfp_prog;
220 priv->meta = nfp_prog_first_meta(nfp_prog);
221
222 ret = bpf_analyzer(prog, &nfp_bpf_analyzer_ops, priv);
223
224 kfree(priv);
225
226 return ret;
227}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 857bb33020ba..54b67c9b8d5b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -42,6 +42,7 @@
42 42
43struct bpf_prog; 43struct bpf_prog;
44struct net_device; 44struct net_device;
45struct netdev_bpf;
45struct pci_dev; 46struct pci_dev;
46struct sk_buff; 47struct sk_buff;
47struct sk_buff; 48struct sk_buff;
@@ -83,6 +84,9 @@ extern const struct nfp_app_type app_flower;
83 * @setup_tc: setup TC ndo 84 * @setup_tc: setup TC ndo
84 * @tc_busy: TC HW offload busy (rules loaded) 85 * @tc_busy: TC HW offload busy (rules loaded)
85 * @xdp_offload: offload an XDP program 86 * @xdp_offload: offload an XDP program
87 * @bpf_verifier_prep: verifier prep for dev-specific BPF programs
88 * @bpf_translate: translate call for dev-specific BPF programs
89 * @bpf_destroy: destroy for dev-specific BPF programs
86 * @eswitch_mode_get: get SR-IOV eswitch mode 90 * @eswitch_mode_get: get SR-IOV eswitch mode
87 * @sriov_enable: app-specific sriov initialisation 91 * @sriov_enable: app-specific sriov initialisation
88 * @sriov_disable: app-specific sriov clean-up 92 * @sriov_disable: app-specific sriov clean-up
@@ -118,6 +122,12 @@ struct nfp_app_type {
118 bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn); 122 bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
119 int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn, 123 int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
120 struct bpf_prog *prog); 124 struct bpf_prog *prog);
125 int (*bpf_verifier_prep)(struct nfp_app *app, struct nfp_net *nn,
126 struct netdev_bpf *bpf);
127 int (*bpf_translate)(struct nfp_app *app, struct nfp_net *nn,
128 struct bpf_prog *prog);
129 int (*bpf_destroy)(struct nfp_app *app, struct nfp_net *nn,
130 struct bpf_prog *prog);
121 131
122 int (*sriov_enable)(struct nfp_app *app, int num_vfs); 132 int (*sriov_enable)(struct nfp_app *app, int num_vfs);
123 void (*sriov_disable)(struct nfp_app *app); 133 void (*sriov_disable)(struct nfp_app *app);
@@ -271,6 +281,33 @@ static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
271 return app->type->xdp_offload(app, nn, prog); 281 return app->type->xdp_offload(app, nn, prog);
272} 282}
273 283
284static inline int
285nfp_app_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
286 struct netdev_bpf *bpf)
287{
288 if (!app || !app->type->bpf_verifier_prep)
289 return -EOPNOTSUPP;
290 return app->type->bpf_verifier_prep(app, nn, bpf);
291}
292
293static inline int
294nfp_app_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
295 struct bpf_prog *prog)
296{
297 if (!app || !app->type->bpf_translate)
298 return -EOPNOTSUPP;
299 return app->type->bpf_translate(app, nn, prog);
300}
301
302static inline int
303nfp_app_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
304 struct bpf_prog *prog)
305{
306 if (!app || !app->type->bpf_destroy)
307 return -EOPNOTSUPP;
308 return app->type->bpf_destroy(app, nn, prog);
309}
310
274static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb) 311static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
275{ 312{
276 trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0, 313 trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 3d411f0d15b6..7f9857c276b1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -476,7 +476,6 @@ struct nfp_stat_pair {
476 * @dev: Backpointer to struct device 476 * @dev: Backpointer to struct device
477 * @netdev: Backpointer to net_device structure 477 * @netdev: Backpointer to net_device structure
478 * @is_vf: Is the driver attached to a VF? 478 * @is_vf: Is the driver attached to a VF?
479 * @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf
480 * @bpf_offload_xdp: Offloaded BPF program is XDP 479 * @bpf_offload_xdp: Offloaded BPF program is XDP
481 * @chained_metadata_format: Firemware will use new metadata format 480 * @chained_metadata_format: Firemware will use new metadata format
482 * @rx_dma_dir: Mapping direction for RX buffers 481 * @rx_dma_dir: Mapping direction for RX buffers
@@ -502,7 +501,6 @@ struct nfp_net_dp {
502 struct net_device *netdev; 501 struct net_device *netdev;
503 502
504 u8 is_vf:1; 503 u8 is_vf:1;
505 u8 bpf_offload_skip_sw:1;
506 u8 bpf_offload_xdp:1; 504 u8 bpf_offload_xdp:1;
507 u8 chained_metadata_format:1; 505 u8 chained_metadata_format:1;
508 506
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 185a3dd35a3f..232044b1b7aa 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -3378,7 +3378,7 @@ nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags,
3378 return 0; 3378 return 0;
3379} 3379}
3380 3380
3381static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp) 3381static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
3382{ 3382{
3383 struct nfp_net *nn = netdev_priv(netdev); 3383 struct nfp_net *nn = netdev_priv(netdev);
3384 3384
@@ -3393,6 +3393,14 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
3393 xdp->prog_attached = XDP_ATTACHED_HW; 3393 xdp->prog_attached = XDP_ATTACHED_HW;
3394 xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0; 3394 xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
3395 return 0; 3395 return 0;
3396 case BPF_OFFLOAD_VERIFIER_PREP:
3397 return nfp_app_bpf_verifier_prep(nn->app, nn, xdp);
3398 case BPF_OFFLOAD_TRANSLATE:
3399 return nfp_app_bpf_translate(nn->app, nn,
3400 xdp->offload.prog);
3401 case BPF_OFFLOAD_DESTROY:
3402 return nfp_app_bpf_destroy(nn->app, nn,
3403 xdp->offload.prog);
3396 default: 3404 default:
3397 return -EINVAL; 3405 return -EINVAL;
3398 } 3406 }
@@ -3441,7 +3449,7 @@ const struct net_device_ops nfp_net_netdev_ops = {
3441 .ndo_get_phys_port_name = nfp_port_get_phys_port_name, 3449 .ndo_get_phys_port_name = nfp_port_get_phys_port_name,
3442 .ndo_udp_tunnel_add = nfp_net_add_vxlan_port, 3450 .ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
3443 .ndo_udp_tunnel_del = nfp_net_del_vxlan_port, 3451 .ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
3444 .ndo_xdp = nfp_net_xdp, 3452 .ndo_bpf = nfp_net_xdp,
3445}; 3453};
3446 3454
3447/** 3455/**
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index adb700512baa..a3a70ade411f 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -503,7 +503,7 @@ void qede_fill_rss_params(struct qede_dev *edev,
503void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti); 503void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti);
504void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti); 504void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti);
505 505
506int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp); 506int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp);
507 507
508#ifdef CONFIG_DCB 508#ifdef CONFIG_DCB
509void qede_set_dcbnl_ops(struct net_device *ndev); 509void qede_set_dcbnl_ops(struct net_device *ndev);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index f79e36e4060a..c1a0708a7d7c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1065,7 +1065,7 @@ static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
1065 return 0; 1065 return 0;
1066} 1066}
1067 1067
1068int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp) 1068int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1069{ 1069{
1070 struct qede_dev *edev = netdev_priv(dev); 1070 struct qede_dev *edev = netdev_priv(dev);
1071 1071
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index e5ee9f274a71..8f9b3eb82137 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -556,7 +556,7 @@ static const struct net_device_ops qede_netdev_ops = {
556 .ndo_udp_tunnel_add = qede_udp_tunnel_add, 556 .ndo_udp_tunnel_add = qede_udp_tunnel_add,
557 .ndo_udp_tunnel_del = qede_udp_tunnel_del, 557 .ndo_udp_tunnel_del = qede_udp_tunnel_del,
558 .ndo_features_check = qede_features_check, 558 .ndo_features_check = qede_features_check,
559 .ndo_xdp = qede_xdp, 559 .ndo_bpf = qede_xdp,
560#ifdef CONFIG_RFS_ACCEL 560#ifdef CONFIG_RFS_ACCEL
561 .ndo_rx_flow_steer = qede_rx_flow_steer, 561 .ndo_rx_flow_steer = qede_rx_flow_steer,
562#endif 562#endif
@@ -594,7 +594,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
594 .ndo_udp_tunnel_add = qede_udp_tunnel_add, 594 .ndo_udp_tunnel_add = qede_udp_tunnel_add,
595 .ndo_udp_tunnel_del = qede_udp_tunnel_del, 595 .ndo_udp_tunnel_del = qede_udp_tunnel_del,
596 .ndo_features_check = qede_features_check, 596 .ndo_features_check = qede_features_check,
597 .ndo_xdp = qede_xdp, 597 .ndo_bpf = qede_xdp,
598}; 598};
599 599
600/* ------------------------------------------------------------------------- 600/* -------------------------------------------------------------------------
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 8125956f62a1..1a326b697221 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1141,7 +1141,7 @@ static u32 tun_xdp_query(struct net_device *dev)
1141 return 0; 1141 return 0;
1142} 1142}
1143 1143
1144static int tun_xdp(struct net_device *dev, struct netdev_xdp *xdp) 1144static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1145{ 1145{
1146 switch (xdp->command) { 1146 switch (xdp->command) {
1147 case XDP_SETUP_PROG: 1147 case XDP_SETUP_PROG:
@@ -1185,7 +1185,7 @@ static const struct net_device_ops tap_netdev_ops = {
1185 .ndo_features_check = passthru_features_check, 1185 .ndo_features_check = passthru_features_check,
1186 .ndo_set_rx_headroom = tun_set_headroom, 1186 .ndo_set_rx_headroom = tun_set_headroom,
1187 .ndo_get_stats64 = tun_net_get_stats64, 1187 .ndo_get_stats64 = tun_net_get_stats64,
1188 .ndo_xdp = tun_xdp, 1188 .ndo_bpf = tun_xdp,
1189}; 1189};
1190 1190
1191static void tun_flow_init(struct tun_struct *tun) 1191static void tun_flow_init(struct tun_struct *tun)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index fc059f193e7d..edf984406ba0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2088,7 +2088,7 @@ static u32 virtnet_xdp_query(struct net_device *dev)
2088 return 0; 2088 return 0;
2089} 2089}
2090 2090
2091static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp) 2091static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2092{ 2092{
2093 switch (xdp->command) { 2093 switch (xdp->command) {
2094 case XDP_SETUP_PROG: 2094 case XDP_SETUP_PROG:
@@ -2115,7 +2115,7 @@ static const struct net_device_ops virtnet_netdev = {
2115#ifdef CONFIG_NET_POLL_CONTROLLER 2115#ifdef CONFIG_NET_POLL_CONTROLLER
2116 .ndo_poll_controller = virtnet_netpoll, 2116 .ndo_poll_controller = virtnet_netpoll,
2117#endif 2117#endif
2118 .ndo_xdp = virtnet_xdp, 2118 .ndo_bpf = virtnet_xdp,
2119 .ndo_xdp_xmit = virtnet_xdp_xmit, 2119 .ndo_xdp_xmit = virtnet_xdp_xmit,
2120 .ndo_xdp_flush = virtnet_xdp_flush, 2120 .ndo_xdp_flush = virtnet_xdp_flush,
2121 .ndo_features_check = passthru_features_check, 2121 .ndo_features_check = passthru_features_check,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 520aeebe0d93..c397934f91dd 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -15,6 +15,7 @@
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/rbtree_latch.h> 16#include <linux/rbtree_latch.h>
17#include <linux/numa.h> 17#include <linux/numa.h>
18#include <linux/wait.h>
18 19
19struct perf_event; 20struct perf_event;
20struct bpf_prog; 21struct bpf_prog;
@@ -182,6 +183,16 @@ struct bpf_verifier_ops {
182 struct bpf_prog *prog, u32 *target_size); 183 struct bpf_prog *prog, u32 *target_size);
183}; 184};
184 185
186struct bpf_dev_offload {
187 struct bpf_prog *prog;
188 struct net_device *netdev;
189 void *dev_priv;
190 struct list_head offloads;
191 bool dev_state;
192 bool verifier_running;
193 wait_queue_head_t verifier_done;
194};
195
185struct bpf_prog_aux { 196struct bpf_prog_aux {
186 atomic_t refcnt; 197 atomic_t refcnt;
187 u32 used_map_cnt; 198 u32 used_map_cnt;
@@ -199,6 +210,7 @@ struct bpf_prog_aux {
199#ifdef CONFIG_SECURITY 210#ifdef CONFIG_SECURITY
200 void *security; 211 void *security;
201#endif 212#endif
213 struct bpf_dev_offload *offload;
202 union { 214 union {
203 struct work_struct work; 215 struct work_struct work;
204 struct rcu_head rcu; 216 struct rcu_head rcu;
@@ -317,11 +329,14 @@ extern const struct file_operations bpf_prog_fops;
317#undef BPF_PROG_TYPE 329#undef BPF_PROG_TYPE
318#undef BPF_MAP_TYPE 330#undef BPF_MAP_TYPE
319 331
332extern const struct bpf_prog_ops bpf_offload_prog_ops;
320extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; 333extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
321extern const struct bpf_verifier_ops xdp_analyzer_ops; 334extern const struct bpf_verifier_ops xdp_analyzer_ops;
322 335
323struct bpf_prog *bpf_prog_get(u32 ufd); 336struct bpf_prog *bpf_prog_get(u32 ufd);
324struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type); 337struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
338struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
339 struct net_device *netdev);
325struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); 340struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
326void bpf_prog_sub(struct bpf_prog *prog, int i); 341void bpf_prog_sub(struct bpf_prog *prog, int i);
327struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); 342struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
@@ -415,6 +430,14 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
415{ 430{
416 return ERR_PTR(-EOPNOTSUPP); 431 return ERR_PTR(-EOPNOTSUPP);
417} 432}
433
434static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
435 enum bpf_prog_type type,
436 struct net_device *netdev)
437{
438 return ERR_PTR(-EOPNOTSUPP);
439}
440
418static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, 441static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog,
419 int i) 442 int i)
420{ 443{
@@ -491,6 +514,30 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
491} 514}
492#endif /* CONFIG_BPF_SYSCALL */ 515#endif /* CONFIG_BPF_SYSCALL */
493 516
517int bpf_prog_offload_compile(struct bpf_prog *prog);
518void bpf_prog_offload_destroy(struct bpf_prog *prog);
519u32 bpf_prog_offload_ifindex(struct bpf_prog *prog);
520
521#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
522int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
523
524static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
525{
526 return aux->offload;
527}
528#else
529static inline int bpf_prog_offload_init(struct bpf_prog *prog,
530 union bpf_attr *attr)
531{
532 return -EOPNOTSUPP;
533}
534
535static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
536{
537 return false;
538}
539#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
540
494#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) 541#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL)
495struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); 542struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
496int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); 543int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 3b0976aaac75..07b96aaca256 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -152,8 +152,7 @@ struct bpf_verifier_env {
152 bool strict_alignment; /* perform strict pointer alignment checks */ 152 bool strict_alignment; /* perform strict pointer alignment checks */
153 struct bpf_verifier_state *cur_state; /* current verifier state */ 153 struct bpf_verifier_state *cur_state; /* current verifier state */
154 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ 154 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
155 const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */ 155 const struct bpf_ext_analyzer_ops *dev_ops; /* device analyzer ops */
156 void *analyzer_priv; /* pointer to external analyzer's private data */
157 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ 156 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
158 u32 used_map_cnt; /* number of used maps */ 157 u32 used_map_cnt; /* number of used maps */
159 u32 id_gen; /* used to generate unique reg IDs */ 158 u32 id_gen; /* used to generate unique reg IDs */
@@ -169,7 +168,13 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
169 return env->cur_state->regs; 168 return env->cur_state->regs;
170} 169}
171 170
172int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops, 171#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
173 void *priv); 172int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
173#else
174int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
175{
176 return -EOPNOTSUPP;
177}
178#endif
174 179
175#endif /* _LINUX_BPF_VERIFIER_H */ 180#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7de7656550c2..fda527ccb263 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -779,10 +779,10 @@ enum tc_setup_type {
779 TC_SETUP_CBS, 779 TC_SETUP_CBS,
780}; 780};
781 781
782/* These structures hold the attributes of xdp state that are being passed 782/* These structures hold the attributes of bpf state that are being passed
783 * to the netdevice through the xdp op. 783 * to the netdevice through the bpf op.
784 */ 784 */
785enum xdp_netdev_command { 785enum bpf_netdev_command {
786 /* Set or clear a bpf program used in the earliest stages of packet 786 /* Set or clear a bpf program used in the earliest stages of packet
787 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee 787 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
788 * is responsible for calling bpf_prog_put on any old progs that are 788 * is responsible for calling bpf_prog_put on any old progs that are
@@ -797,12 +797,17 @@ enum xdp_netdev_command {
797 * is equivalent to XDP_ATTACHED_DRV. 797 * is equivalent to XDP_ATTACHED_DRV.
798 */ 798 */
799 XDP_QUERY_PROG, 799 XDP_QUERY_PROG,
800 /* BPF program for offload callbacks, invoked at program load time. */
801 BPF_OFFLOAD_VERIFIER_PREP,
802 BPF_OFFLOAD_TRANSLATE,
803 BPF_OFFLOAD_DESTROY,
800}; 804};
801 805
806struct bpf_ext_analyzer_ops;
802struct netlink_ext_ack; 807struct netlink_ext_ack;
803 808
804struct netdev_xdp { 809struct netdev_bpf {
805 enum xdp_netdev_command command; 810 enum bpf_netdev_command command;
806 union { 811 union {
807 /* XDP_SETUP_PROG */ 812 /* XDP_SETUP_PROG */
808 struct { 813 struct {
@@ -815,6 +820,15 @@ struct netdev_xdp {
815 u8 prog_attached; 820 u8 prog_attached;
816 u32 prog_id; 821 u32 prog_id;
817 }; 822 };
823 /* BPF_OFFLOAD_VERIFIER_PREP */
824 struct {
825 struct bpf_prog *prog;
826 const struct bpf_ext_analyzer_ops *ops; /* callee set */
827 } verifier;
828 /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */
829 struct {
830 struct bpf_prog *prog;
831 } offload;
818 }; 832 };
819}; 833};
820 834
@@ -1124,9 +1138,10 @@ struct dev_ifalias {
1124 * appropriate rx headroom value allows avoiding skb head copy on 1138 * appropriate rx headroom value allows avoiding skb head copy on
1125 * forward. Setting a negative value resets the rx headroom to the 1139 * forward. Setting a negative value resets the rx headroom to the
1126 * default value. 1140 * default value.
1127 * int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp); 1141 * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf);
1128 * This function is used to set or query state related to XDP on the 1142 * This function is used to set or query state related to XDP on the
1129 * netdevice. See definition of enum xdp_netdev_command for details. 1143 * netdevice and manage BPF offload. See definition of
1144 * enum bpf_netdev_command for details.
1130 * int (*ndo_xdp_xmit)(struct net_device *dev, struct xdp_buff *xdp); 1145 * int (*ndo_xdp_xmit)(struct net_device *dev, struct xdp_buff *xdp);
1131 * This function is used to submit a XDP packet for transmit on a 1146 * This function is used to submit a XDP packet for transmit on a
1132 * netdevice. 1147 * netdevice.
@@ -1315,8 +1330,8 @@ struct net_device_ops {
1315 struct sk_buff *skb); 1330 struct sk_buff *skb);
1316 void (*ndo_set_rx_headroom)(struct net_device *dev, 1331 void (*ndo_set_rx_headroom)(struct net_device *dev,
1317 int needed_headroom); 1332 int needed_headroom);
1318 int (*ndo_xdp)(struct net_device *dev, 1333 int (*ndo_bpf)(struct net_device *dev,
1319 struct netdev_xdp *xdp); 1334 struct netdev_bpf *bpf);
1320 int (*ndo_xdp_xmit)(struct net_device *dev, 1335 int (*ndo_xdp_xmit)(struct net_device *dev,
1321 struct xdp_buff *xdp); 1336 struct xdp_buff *xdp);
1322 void (*ndo_xdp_flush)(struct net_device *dev); 1337 void (*ndo_xdp_flush)(struct net_device *dev);
@@ -3311,10 +3326,10 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
3311struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 3326struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3312 struct netdev_queue *txq, int *ret); 3327 struct netdev_queue *txq, int *ret);
3313 3328
3314typedef int (*xdp_op_t)(struct net_device *dev, struct netdev_xdp *xdp); 3329typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
3315int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 3330int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
3316 int fd, u32 flags); 3331 int fd, u32 flags);
3317u8 __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op, u32 *prog_id); 3332u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t xdp_op, u32 *prog_id);
3318 3333
3319int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3334int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3320int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3335int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index a9820677c2ff..4455dd195201 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -260,6 +260,7 @@ union bpf_attr {
260 __u32 kern_version; /* checked when prog_type=kprobe */ 260 __u32 kern_version; /* checked when prog_type=kprobe */
261 __u32 prog_flags; 261 __u32 prog_flags;
262 char prog_name[BPF_OBJ_NAME_LEN]; 262 char prog_name[BPF_OBJ_NAME_LEN];
263 __u32 prog_target_ifindex; /* ifindex of netdev to prep for */
263 }; 264 };
264 265
265 struct { /* anonymous struct used by BPF_OBJ_* commands */ 266 struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -894,6 +895,10 @@ enum sk_action {
894 895
895#define BPF_TAG_SIZE 8 896#define BPF_TAG_SIZE 8
896 897
898enum bpf_prog_status {
899 BPF_PROG_STATUS_DEV_BOUND = (1 << 0),
900};
901
897struct bpf_prog_info { 902struct bpf_prog_info {
898 __u32 type; 903 __u32 type;
899 __u32 id; 904 __u32 id;
@@ -907,6 +912,8 @@ struct bpf_prog_info {
907 __u32 nr_map_ids; 912 __u32 nr_map_ids;
908 __aligned_u64 map_ids; 913 __aligned_u64 map_ids;
909 char name[BPF_OBJ_NAME_LEN]; 914 char name[BPF_OBJ_NAME_LEN];
915 __u32 ifindex;
916 __u32 status;
910} __attribute__((aligned(8))); 917} __attribute__((aligned(8)));
911 918
912struct bpf_map_info { 919struct bpf_map_info {
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 16e95c8e749e..e691da0b3bab 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_BPF_SYSCALL) += disasm.o
7ifeq ($(CONFIG_NET),y) 7ifeq ($(CONFIG_NET),y)
8obj-$(CONFIG_BPF_SYSCALL) += devmap.o 8obj-$(CONFIG_BPF_SYSCALL) += devmap.o
9obj-$(CONFIG_BPF_SYSCALL) += cpumap.o 9obj-$(CONFIG_BPF_SYSCALL) += cpumap.o
10obj-$(CONFIG_BPF_SYSCALL) += offload.o
10ifeq ($(CONFIG_STREAM_PARSER),y) 11ifeq ($(CONFIG_STREAM_PARSER),y)
11obj-$(CONFIG_BPF_SYSCALL) += sockmap.o 12obj-$(CONFIG_BPF_SYSCALL) += sockmap.o
12endif 13endif
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 7fe448799d76..8a6c37762330 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1380,7 +1380,13 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1380 * valid program, which in this case would simply not 1380 * valid program, which in this case would simply not
1381 * be JITed, but falls back to the interpreter. 1381 * be JITed, but falls back to the interpreter.
1382 */ 1382 */
1383 fp = bpf_int_jit_compile(fp); 1383 if (!bpf_prog_is_dev_bound(fp->aux)) {
1384 fp = bpf_int_jit_compile(fp);
1385 } else {
1386 *err = bpf_prog_offload_compile(fp);
1387 if (*err)
1388 return fp;
1389 }
1384 bpf_prog_lock_ro(fp); 1390 bpf_prog_lock_ro(fp);
1385 1391
1386 /* The tail call compatibility check can only be done at 1392 /* The tail call compatibility check can only be done at
@@ -1549,6 +1555,8 @@ static void bpf_prog_free_deferred(struct work_struct *work)
1549 struct bpf_prog_aux *aux; 1555 struct bpf_prog_aux *aux;
1550 1556
1551 aux = container_of(work, struct bpf_prog_aux, work); 1557 aux = container_of(work, struct bpf_prog_aux, work);
1558 if (bpf_prog_is_dev_bound(aux))
1559 bpf_prog_offload_destroy(aux->prog);
1552 bpf_jit_free(aux->prog); 1560 bpf_jit_free(aux->prog);
1553} 1561}
1554 1562
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
new file mode 100644
index 000000000000..2816feb38be1
--- /dev/null
+++ b/kernel/bpf/offload.c
@@ -0,0 +1,194 @@
1#include <linux/bpf.h>
2#include <linux/bpf_verifier.h>
3#include <linux/bug.h>
4#include <linux/list.h>
5#include <linux/netdevice.h>
6#include <linux/printk.h>
7#include <linux/rtnetlink.h>
8
9/* protected by RTNL */
10static LIST_HEAD(bpf_prog_offload_devs);
11
12int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
13{
14 struct net *net = current->nsproxy->net_ns;
15 struct bpf_dev_offload *offload;
16
17 if (!capable(CAP_SYS_ADMIN))
18 return -EPERM;
19
20 if (attr->prog_flags)
21 return -EINVAL;
22
23 offload = kzalloc(sizeof(*offload), GFP_USER);
24 if (!offload)
25 return -ENOMEM;
26
27 offload->prog = prog;
28 init_waitqueue_head(&offload->verifier_done);
29
30 rtnl_lock();
31 offload->netdev = __dev_get_by_index(net, attr->prog_target_ifindex);
32 if (!offload->netdev) {
33 rtnl_unlock();
34 kfree(offload);
35 return -EINVAL;
36 }
37
38 prog->aux->offload = offload;
39 list_add_tail(&offload->offloads, &bpf_prog_offload_devs);
40 rtnl_unlock();
41
42 return 0;
43}
44
45static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
46 struct netdev_bpf *data)
47{
48 struct net_device *netdev = prog->aux->offload->netdev;
49
50 ASSERT_RTNL();
51
52 if (!netdev)
53 return -ENODEV;
54 if (!netdev->netdev_ops->ndo_bpf)
55 return -EOPNOTSUPP;
56
57 data->command = cmd;
58
59 return netdev->netdev_ops->ndo_bpf(netdev, data);
60}
61
62int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
63{
64 struct netdev_bpf data = {};
65 int err;
66
67 data.verifier.prog = env->prog;
68
69 rtnl_lock();
70 err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data);
71 if (err)
72 goto exit_unlock;
73
74 env->dev_ops = data.verifier.ops;
75
76 env->prog->aux->offload->dev_state = true;
77 env->prog->aux->offload->verifier_running = true;
78exit_unlock:
79 rtnl_unlock();
80 return err;
81}
82
83static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
84{
85 struct bpf_dev_offload *offload = prog->aux->offload;
86 struct netdev_bpf data = {};
87
88 data.offload.prog = prog;
89
90 if (offload->verifier_running)
91 wait_event(offload->verifier_done, !offload->verifier_running);
92
93 if (offload->dev_state)
94 WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
95
96 offload->dev_state = false;
97 list_del_init(&offload->offloads);
98 offload->netdev = NULL;
99}
100
101void bpf_prog_offload_destroy(struct bpf_prog *prog)
102{
103 struct bpf_dev_offload *offload = prog->aux->offload;
104
105 offload->verifier_running = false;
106 wake_up(&offload->verifier_done);
107
108 rtnl_lock();
109 __bpf_prog_offload_destroy(prog);
110 rtnl_unlock();
111
112 kfree(offload);
113}
114
115static int bpf_prog_offload_translate(struct bpf_prog *prog)
116{
117 struct bpf_dev_offload *offload = prog->aux->offload;
118 struct netdev_bpf data = {};
119 int ret;
120
121 data.offload.prog = prog;
122
123 offload->verifier_running = false;
124 wake_up(&offload->verifier_done);
125
126 rtnl_lock();
127 ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
128 rtnl_unlock();
129
130 return ret;
131}
132
133static unsigned int bpf_prog_warn_on_exec(const void *ctx,
134 const struct bpf_insn *insn)
135{
136 WARN(1, "attempt to execute device eBPF program on the host!");
137 return 0;
138}
139
140int bpf_prog_offload_compile(struct bpf_prog *prog)
141{
142 prog->bpf_func = bpf_prog_warn_on_exec;
143
144 return bpf_prog_offload_translate(prog);
145}
146
147u32 bpf_prog_offload_ifindex(struct bpf_prog *prog)
148{
149 struct bpf_dev_offload *offload = prog->aux->offload;
150 u32 ifindex;
151
152 rtnl_lock();
153 ifindex = offload->netdev ? offload->netdev->ifindex : 0;
154 rtnl_unlock();
155
156 return ifindex;
157}
158
159const struct bpf_prog_ops bpf_offload_prog_ops = {
160};
161
162static int bpf_offload_notification(struct notifier_block *notifier,
163 ulong event, void *ptr)
164{
165 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
166 struct bpf_dev_offload *offload, *tmp;
167
168 ASSERT_RTNL();
169
170 switch (event) {
171 case NETDEV_UNREGISTER:
172 list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs,
173 offloads) {
174 if (offload->netdev == netdev)
175 __bpf_prog_offload_destroy(offload->prog);
176 }
177 break;
178 default:
179 break;
180 }
181 return NOTIFY_OK;
182}
183
184static struct notifier_block bpf_offload_notifier = {
185 .notifier_call = bpf_offload_notification,
186};
187
188static int __init bpf_offload_init(void)
189{
190 register_netdevice_notifier(&bpf_offload_notifier);
191 return 0;
192}
193
194subsys_initcall(bpf_offload_init);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 323be2473c4b..416d70cdfc76 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -824,7 +824,10 @@ static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
824 if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type]) 824 if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
825 return -EINVAL; 825 return -EINVAL;
826 826
827 prog->aux->ops = bpf_prog_types[type]; 827 if (!bpf_prog_is_dev_bound(prog->aux))
828 prog->aux->ops = bpf_prog_types[type];
829 else
830 prog->aux->ops = &bpf_offload_prog_ops;
828 prog->type = type; 831 prog->type = type;
829 return 0; 832 return 0;
830} 833}
@@ -1054,7 +1057,22 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1054} 1057}
1055EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 1058EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1056 1059
1057static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type) 1060static bool bpf_prog_can_attach(struct bpf_prog *prog,
1061 enum bpf_prog_type *attach_type,
1062 struct net_device *netdev)
1063{
1064 struct bpf_dev_offload *offload = prog->aux->offload;
1065
1066 if (prog->type != *attach_type)
1067 return false;
1068 if (offload && offload->netdev != netdev)
1069 return false;
1070
1071 return true;
1072}
1073
1074static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
1075 struct net_device *netdev)
1058{ 1076{
1059 struct fd f = fdget(ufd); 1077 struct fd f = fdget(ufd);
1060 struct bpf_prog *prog; 1078 struct bpf_prog *prog;
@@ -1062,7 +1080,7 @@ static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
1062 prog = ____bpf_prog_get(f); 1080 prog = ____bpf_prog_get(f);
1063 if (IS_ERR(prog)) 1081 if (IS_ERR(prog))
1064 return prog; 1082 return prog;
1065 if (type && prog->type != *type) { 1083 if (attach_type && !bpf_prog_can_attach(prog, attach_type, netdev)) {
1066 prog = ERR_PTR(-EINVAL); 1084 prog = ERR_PTR(-EINVAL);
1067 goto out; 1085 goto out;
1068 } 1086 }
@@ -1075,12 +1093,12 @@ out:
1075 1093
1076struct bpf_prog *bpf_prog_get(u32 ufd) 1094struct bpf_prog *bpf_prog_get(u32 ufd)
1077{ 1095{
1078 return __bpf_prog_get(ufd, NULL); 1096 return __bpf_prog_get(ufd, NULL, NULL);
1079} 1097}
1080 1098
1081struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type) 1099struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
1082{ 1100{
1083 struct bpf_prog *prog = __bpf_prog_get(ufd, &type); 1101 struct bpf_prog *prog = __bpf_prog_get(ufd, &type, NULL);
1084 1102
1085 if (!IS_ERR(prog)) 1103 if (!IS_ERR(prog))
1086 trace_bpf_prog_get_type(prog); 1104 trace_bpf_prog_get_type(prog);
@@ -1088,8 +1106,19 @@ struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
1088} 1106}
1089EXPORT_SYMBOL_GPL(bpf_prog_get_type); 1107EXPORT_SYMBOL_GPL(bpf_prog_get_type);
1090 1108
1109struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1110 struct net_device *netdev)
1111{
1112 struct bpf_prog *prog = __bpf_prog_get(ufd, &type, netdev);
1113
1114 if (!IS_ERR(prog))
1115 trace_bpf_prog_get_type(prog);
1116 return prog;
1117}
1118EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
1119
1091/* last field in 'union bpf_attr' used by this command */ 1120/* last field in 'union bpf_attr' used by this command */
1092#define BPF_PROG_LOAD_LAST_FIELD prog_name 1121#define BPF_PROG_LOAD_LAST_FIELD prog_target_ifindex
1093 1122
1094static int bpf_prog_load(union bpf_attr *attr) 1123static int bpf_prog_load(union bpf_attr *attr)
1095{ 1124{
@@ -1152,6 +1181,12 @@ static int bpf_prog_load(union bpf_attr *attr)
1152 atomic_set(&prog->aux->refcnt, 1); 1181 atomic_set(&prog->aux->refcnt, 1);
1153 prog->gpl_compatible = is_gpl ? 1 : 0; 1182 prog->gpl_compatible = is_gpl ? 1 : 0;
1154 1183
1184 if (attr->prog_target_ifindex) {
1185 err = bpf_prog_offload_init(prog, attr);
1186 if (err)
1187 goto free_prog;
1188 }
1189
1155 /* find program type: socket_filter vs tracing_filter */ 1190 /* find program type: socket_filter vs tracing_filter */
1156 err = find_prog_type(type, prog); 1191 err = find_prog_type(type, prog);
1157 if (err < 0) 1192 if (err < 0)
@@ -1583,6 +1618,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
1583 return -EFAULT; 1618 return -EFAULT;
1584 } 1619 }
1585 1620
1621 if (bpf_prog_is_dev_bound(prog->aux)) {
1622 info.status |= BPF_PROG_STATUS_DEV_BOUND;
1623 info.ifindex = bpf_prog_offload_ifindex(prog);
1624 }
1625
1586done: 1626done:
1587 if (copy_to_user(uinfo, &info, info_len) || 1627 if (copy_to_user(uinfo, &info, info_len) ||
1588 put_user(info_len, &uattr->info.info_len)) 1628 put_user(info_len, &uattr->info.info_len))
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 04357ad5a812..add845fe788a 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -949,9 +949,6 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
949 */ 949 */
950 *reg_type = info.reg_type; 950 *reg_type = info.reg_type;
951 951
952 if (env->analyzer_ops)
953 return 0;
954
955 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; 952 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
956 /* remember the offset of last byte accessed in ctx */ 953 /* remember the offset of last byte accessed in ctx */
957 if (env->prog->aux->max_ctx_offset < off + size) 954 if (env->prog->aux->max_ctx_offset < off + size)
@@ -3736,10 +3733,10 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
3736static int ext_analyzer_insn_hook(struct bpf_verifier_env *env, 3733static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
3737 int insn_idx, int prev_insn_idx) 3734 int insn_idx, int prev_insn_idx)
3738{ 3735{
3739 if (!env->analyzer_ops || !env->analyzer_ops->insn_hook) 3736 if (env->dev_ops && env->dev_ops->insn_hook)
3740 return 0; 3737 return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
3741 3738
3742 return env->analyzer_ops->insn_hook(env, insn_idx, prev_insn_idx); 3739 return 0;
3743} 3740}
3744 3741
3745static int do_check(struct bpf_verifier_env *env) 3742static int do_check(struct bpf_verifier_env *env)
@@ -4516,6 +4513,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
4516 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 4513 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
4517 env->strict_alignment = true; 4514 env->strict_alignment = true;
4518 4515
4516 if (env->prog->aux->offload) {
4517 ret = bpf_prog_offload_verifier_prep(env);
4518 if (ret)
4519 goto err_unlock;
4520 }
4521
4519 ret = replace_map_fd_with_map_ptr(env); 4522 ret = replace_map_fd_with_map_ptr(env);
4520 if (ret < 0) 4523 if (ret < 0)
4521 goto skip_full_check; 4524 goto skip_full_check;
@@ -4592,72 +4595,3 @@ err_free_env:
4592 kfree(env); 4595 kfree(env);
4593 return ret; 4596 return ret;
4594} 4597}
4595
4596static const struct bpf_verifier_ops * const bpf_analyzer_ops[] = {
4597#ifdef CONFIG_NET
4598 [BPF_PROG_TYPE_XDP] = &xdp_analyzer_ops,
4599 [BPF_PROG_TYPE_SCHED_CLS] = &tc_cls_act_analyzer_ops,
4600#endif
4601};
4602
4603int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
4604 void *priv)
4605{
4606 struct bpf_verifier_env *env;
4607 int ret;
4608
4609 if (prog->type >= ARRAY_SIZE(bpf_analyzer_ops) ||
4610 !bpf_analyzer_ops[prog->type])
4611 return -EOPNOTSUPP;
4612
4613 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
4614 if (!env)
4615 return -ENOMEM;
4616
4617 env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
4618 prog->len);
4619 ret = -ENOMEM;
4620 if (!env->insn_aux_data)
4621 goto err_free_env;
4622 env->prog = prog;
4623 env->ops = bpf_analyzer_ops[env->prog->type];
4624 env->analyzer_ops = ops;
4625 env->analyzer_priv = priv;
4626
4627 /* grab the mutex to protect few globals used by verifier */
4628 mutex_lock(&bpf_verifier_lock);
4629
4630 env->strict_alignment = false;
4631 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
4632 env->strict_alignment = true;
4633
4634 env->explored_states = kcalloc(env->prog->len,
4635 sizeof(struct bpf_verifier_state_list *),
4636 GFP_KERNEL);
4637 ret = -ENOMEM;
4638 if (!env->explored_states)
4639 goto skip_full_check;
4640
4641 ret = check_cfg(env);
4642 if (ret < 0)
4643 goto skip_full_check;
4644
4645 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
4646
4647 ret = do_check(env);
4648 if (env->cur_state) {
4649 free_verifier_state(env->cur_state, true);
4650 env->cur_state = NULL;
4651 }
4652
4653skip_full_check:
4654 while (!pop_stack(env, NULL, NULL));
4655 free_states(env);
4656
4657 mutex_unlock(&bpf_verifier_lock);
4658 vfree(env->insn_aux_data);
4659err_free_env:
4660 kfree(env);
4661 return ret;
4662}
4663EXPORT_SYMBOL_GPL(bpf_analyzer);
diff --git a/net/core/dev.c b/net/core/dev.c
index 1423cf4d695c..30b5fe32c525 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4545,7 +4545,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
4545 return ret; 4545 return ret;
4546} 4546}
4547 4547
4548static int generic_xdp_install(struct net_device *dev, struct netdev_xdp *xdp) 4548static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
4549{ 4549{
4550 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); 4550 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
4551 struct bpf_prog *new = xdp->prog; 4551 struct bpf_prog *new = xdp->prog;
@@ -7090,26 +7090,26 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down)
7090} 7090}
7091EXPORT_SYMBOL(dev_change_proto_down); 7091EXPORT_SYMBOL(dev_change_proto_down);
7092 7092
7093u8 __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op, u32 *prog_id) 7093u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op, u32 *prog_id)
7094{ 7094{
7095 struct netdev_xdp xdp; 7095 struct netdev_bpf xdp;
7096 7096
7097 memset(&xdp, 0, sizeof(xdp)); 7097 memset(&xdp, 0, sizeof(xdp));
7098 xdp.command = XDP_QUERY_PROG; 7098 xdp.command = XDP_QUERY_PROG;
7099 7099
7100 /* Query must always succeed. */ 7100 /* Query must always succeed. */
7101 WARN_ON(xdp_op(dev, &xdp) < 0); 7101 WARN_ON(bpf_op(dev, &xdp) < 0);
7102 if (prog_id) 7102 if (prog_id)
7103 *prog_id = xdp.prog_id; 7103 *prog_id = xdp.prog_id;
7104 7104
7105 return xdp.prog_attached; 7105 return xdp.prog_attached;
7106} 7106}
7107 7107
7108static int dev_xdp_install(struct net_device *dev, xdp_op_t xdp_op, 7108static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
7109 struct netlink_ext_ack *extack, u32 flags, 7109 struct netlink_ext_ack *extack, u32 flags,
7110 struct bpf_prog *prog) 7110 struct bpf_prog *prog)
7111{ 7111{
7112 struct netdev_xdp xdp; 7112 struct netdev_bpf xdp;
7113 7113
7114 memset(&xdp, 0, sizeof(xdp)); 7114 memset(&xdp, 0, sizeof(xdp));
7115 if (flags & XDP_FLAGS_HW_MODE) 7115 if (flags & XDP_FLAGS_HW_MODE)
@@ -7120,7 +7120,7 @@ static int dev_xdp_install(struct net_device *dev, xdp_op_t xdp_op,
7120 xdp.flags = flags; 7120 xdp.flags = flags;
7121 xdp.prog = prog; 7121 xdp.prog = prog;
7122 7122
7123 return xdp_op(dev, &xdp); 7123 return bpf_op(dev, &xdp);
7124} 7124}
7125 7125
7126/** 7126/**
@@ -7137,32 +7137,36 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
7137{ 7137{
7138 const struct net_device_ops *ops = dev->netdev_ops; 7138 const struct net_device_ops *ops = dev->netdev_ops;
7139 struct bpf_prog *prog = NULL; 7139 struct bpf_prog *prog = NULL;
7140 xdp_op_t xdp_op, xdp_chk; 7140 bpf_op_t bpf_op, bpf_chk;
7141 int err; 7141 int err;
7142 7142
7143 ASSERT_RTNL(); 7143 ASSERT_RTNL();
7144 7144
7145 xdp_op = xdp_chk = ops->ndo_xdp; 7145 bpf_op = bpf_chk = ops->ndo_bpf;
7146 if (!xdp_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) 7146 if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE)))
7147 return -EOPNOTSUPP; 7147 return -EOPNOTSUPP;
7148 if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE)) 7148 if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE))
7149 xdp_op = generic_xdp_install; 7149 bpf_op = generic_xdp_install;
7150 if (xdp_op == xdp_chk) 7150 if (bpf_op == bpf_chk)
7151 xdp_chk = generic_xdp_install; 7151 bpf_chk = generic_xdp_install;
7152 7152
7153 if (fd >= 0) { 7153 if (fd >= 0) {
7154 if (xdp_chk && __dev_xdp_attached(dev, xdp_chk, NULL)) 7154 if (bpf_chk && __dev_xdp_attached(dev, bpf_chk, NULL))
7155 return -EEXIST; 7155 return -EEXIST;
7156 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && 7156 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
7157 __dev_xdp_attached(dev, xdp_op, NULL)) 7157 __dev_xdp_attached(dev, bpf_op, NULL))
7158 return -EBUSY; 7158 return -EBUSY;
7159 7159
7160 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP); 7160 if (bpf_op == ops->ndo_bpf)
7161 prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
7162 dev);
7163 else
7164 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
7161 if (IS_ERR(prog)) 7165 if (IS_ERR(prog))
7162 return PTR_ERR(prog); 7166 return PTR_ERR(prog);
7163 } 7167 }
7164 7168
7165 err = dev_xdp_install(dev, xdp_op, extack, flags, prog); 7169 err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
7166 if (err < 0 && prog) 7170 if (err < 0 && prog)
7167 bpf_prog_put(prog); 7171 bpf_prog_put(prog);
7168 7172
diff --git a/net/core/filter.c b/net/core/filter.c
index a0112168d6f9..1afa17935954 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3777,25 +3777,6 @@ static bool tc_cls_act_is_valid_access(int off, int size,
3777 return bpf_skb_is_valid_access(off, size, type, info); 3777 return bpf_skb_is_valid_access(off, size, type, info);
3778} 3778}
3779 3779
3780static bool
3781tc_cls_act_is_valid_access_analyzer(int off, int size,
3782 enum bpf_access_type type,
3783 struct bpf_insn_access_aux *info)
3784{
3785 switch (off) {
3786 case offsetof(struct sk_buff, len):
3787 return true;
3788 case offsetof(struct sk_buff, data):
3789 info->reg_type = PTR_TO_PACKET;
3790 return true;
3791 case offsetof(struct sk_buff, cb) +
3792 offsetof(struct bpf_skb_data_end, data_end):
3793 info->reg_type = PTR_TO_PACKET_END;
3794 return true;
3795 }
3796 return false;
3797}
3798
3799static bool __is_valid_xdp_access(int off, int size) 3780static bool __is_valid_xdp_access(int off, int size)
3800{ 3781{
3801 if (off < 0 || off >= sizeof(struct xdp_md)) 3782 if (off < 0 || off >= sizeof(struct xdp_md))
@@ -3830,21 +3811,6 @@ static bool xdp_is_valid_access(int off, int size,
3830 return __is_valid_xdp_access(off, size); 3811 return __is_valid_xdp_access(off, size);
3831} 3812}
3832 3813
3833static bool xdp_is_valid_access_analyzer(int off, int size,
3834 enum bpf_access_type type,
3835 struct bpf_insn_access_aux *info)
3836{
3837 switch (off) {
3838 case offsetof(struct xdp_buff, data):
3839 info->reg_type = PTR_TO_PACKET;
3840 return true;
3841 case offsetof(struct xdp_buff, data_end):
3842 info->reg_type = PTR_TO_PACKET_END;
3843 return true;
3844 }
3845 return false;
3846}
3847
3848void bpf_warn_invalid_xdp_action(u32 act) 3814void bpf_warn_invalid_xdp_action(u32 act)
3849{ 3815{
3850 const u32 act_max = XDP_REDIRECT; 3816 const u32 act_max = XDP_REDIRECT;
@@ -4516,10 +4482,6 @@ const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
4516 .gen_prologue = tc_cls_act_prologue, 4482 .gen_prologue = tc_cls_act_prologue,
4517}; 4483};
4518 4484
4519const struct bpf_verifier_ops tc_cls_act_analyzer_ops = {
4520 .is_valid_access = tc_cls_act_is_valid_access_analyzer,
4521};
4522
4523const struct bpf_prog_ops tc_cls_act_prog_ops = { 4485const struct bpf_prog_ops tc_cls_act_prog_ops = {
4524 .test_run = bpf_prog_test_run_skb, 4486 .test_run = bpf_prog_test_run_skb,
4525}; 4487};
@@ -4530,10 +4492,6 @@ const struct bpf_verifier_ops xdp_verifier_ops = {
4530 .convert_ctx_access = xdp_convert_ctx_access, 4492 .convert_ctx_access = xdp_convert_ctx_access,
4531}; 4493};
4532 4494
4533const struct bpf_verifier_ops xdp_analyzer_ops = {
4534 .is_valid_access = xdp_is_valid_access_analyzer,
4535};
4536
4537const struct bpf_prog_ops xdp_prog_ops = { 4495const struct bpf_prog_ops xdp_prog_ops = {
4538 .test_run = bpf_prog_test_run_xdp, 4496 .test_run = bpf_prog_test_run_xdp,
4539}; 4497};
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 8a8c51937edf..dc5ad84ac096 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1270,10 +1270,10 @@ static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id)
1270 *prog_id = generic_xdp_prog->aux->id; 1270 *prog_id = generic_xdp_prog->aux->id;
1271 return XDP_ATTACHED_SKB; 1271 return XDP_ATTACHED_SKB;
1272 } 1272 }
1273 if (!ops->ndo_xdp) 1273 if (!ops->ndo_bpf)
1274 return XDP_ATTACHED_NONE; 1274 return XDP_ATTACHED_NONE;
1275 1275
1276 return __dev_xdp_attached(dev, ops->ndo_xdp, prog_id); 1276 return __dev_xdp_attached(dev, ops->ndo_bpf, prog_id);
1277} 1277}
1278 1278
1279static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) 1279static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index bc3edde1b9d7..dc9bd9a0070b 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -374,7 +374,7 @@ static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
374} 374}
375 375
376static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog, 376static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
377 const struct tcf_proto *tp) 377 u32 gen_flags, const struct tcf_proto *tp)
378{ 378{
379 struct bpf_prog *fp; 379 struct bpf_prog *fp;
380 char *name = NULL; 380 char *name = NULL;
@@ -382,7 +382,11 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
382 382
383 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]); 383 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
384 384
385 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS); 385 if (gen_flags & TCA_CLS_FLAGS_SKIP_SW)
386 fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS,
387 qdisc_dev(tp->q));
388 else
389 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
386 if (IS_ERR(fp)) 390 if (IS_ERR(fp))
387 return PTR_ERR(fp); 391 return PTR_ERR(fp);
388 392
@@ -440,7 +444,7 @@ static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
440 prog->gen_flags = gen_flags; 444 prog->gen_flags = gen_flags;
441 445
442 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) : 446 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
443 cls_bpf_prog_from_efd(tb, prog, tp); 447 cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
444 if (ret < 0) 448 if (ret < 0)
445 return ret; 449 return ret;
446 450
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 250f80fd46aa..d3ab808dc882 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -41,6 +41,7 @@
41#include <string.h> 41#include <string.h>
42#include <time.h> 42#include <time.h>
43#include <unistd.h> 43#include <unistd.h>
44#include <net/if.h>
44#include <sys/types.h> 45#include <sys/types.h>
45#include <sys/stat.h> 46#include <sys/stat.h>
46 47
@@ -229,6 +230,21 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
229 info->tag[0], info->tag[1], info->tag[2], info->tag[3], 230 info->tag[0], info->tag[1], info->tag[2], info->tag[3],
230 info->tag[4], info->tag[5], info->tag[6], info->tag[7]); 231 info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
231 232
233 if (info->status & BPF_PROG_STATUS_DEV_BOUND) {
234 jsonw_name(json_wtr, "dev");
235 if (info->ifindex) {
236 char name[IF_NAMESIZE];
237
238 if (!if_indextoname(info->ifindex, name))
239 jsonw_printf(json_wtr, "\"ifindex:%d\"",
240 info->ifindex);
241 else
242 jsonw_printf(json_wtr, "\"%s\"", name);
243 } else {
244 jsonw_printf(json_wtr, "\"unknown\"");
245 }
246 }
247
232 if (info->load_time) { 248 if (info->load_time) {
233 char buf[32]; 249 char buf[32];
234 250
@@ -274,6 +290,21 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
274 290
275 printf("tag "); 291 printf("tag ");
276 fprint_hex(stdout, info->tag, BPF_TAG_SIZE, ""); 292 fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
293 printf(" ");
294
295 if (info->status & BPF_PROG_STATUS_DEV_BOUND) {
296 printf("dev ");
297 if (info->ifindex) {
298 char name[IF_NAMESIZE];
299
300 if (!if_indextoname(info->ifindex, name))
301 printf("ifindex:%d ", info->ifindex);
302 else
303 printf("%s ", name);
304 } else {
305 printf("unknown ");
306 }
307 }
277 printf("\n"); 308 printf("\n");
278 309
279 if (info->load_time) { 310 if (info->load_time) {
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 7cebba491011..e92f62cf933a 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -259,6 +259,7 @@ union bpf_attr {
259 __u32 kern_version; /* checked when prog_type=kprobe */ 259 __u32 kern_version; /* checked when prog_type=kprobe */
260 __u32 prog_flags; 260 __u32 prog_flags;
261 char prog_name[BPF_OBJ_NAME_LEN]; 261 char prog_name[BPF_OBJ_NAME_LEN];
262 __u32 prog_target_ifindex; /* ifindex of netdev to prep for */
262 }; 263 };
263 264
264 struct { /* anonymous struct used by BPF_OBJ_* commands */ 265 struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -893,6 +894,10 @@ enum sk_action {
893 894
894#define BPF_TAG_SIZE 8 895#define BPF_TAG_SIZE 8
895 896
897enum bpf_prog_status {
898 BPF_PROG_STATUS_DEV_BOUND = (1 << 0),
899};
900
896struct bpf_prog_info { 901struct bpf_prog_info {
897 __u32 type; 902 __u32 type;
898 __u32 id; 903 __u32 id;
@@ -906,6 +911,8 @@ struct bpf_prog_info {
906 __u32 nr_map_ids; 911 __u32 nr_map_ids;
907 __aligned_u64 map_ids; 912 __aligned_u64 map_ids;
908 char name[BPF_OBJ_NAME_LEN]; 913 char name[BPF_OBJ_NAME_LEN];
914 __u32 ifindex;
915 __u32 status;
909} __attribute__((aligned(8))); 916} __attribute__((aligned(8)));
910 917
911struct bpf_map_info { 918struct bpf_map_info {