diff options
114 files changed, 1249 insertions, 624 deletions
@@ -507,11 +507,6 @@ ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLA | |||
507 | KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO | 507 | KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO |
508 | endif | 508 | endif |
509 | 509 | ||
510 | ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/cc-can-link.sh $(CC)), y) | ||
511 | CC_CAN_LINK := y | ||
512 | export CC_CAN_LINK | ||
513 | endif | ||
514 | |||
515 | # The expansion should be delayed until arch/$(SRCARCH)/Makefile is included. | 510 | # The expansion should be delayed until arch/$(SRCARCH)/Makefile is included. |
516 | # Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile. | 511 | # Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile. |
517 | # CC_VERSION_TEXT is referenced from Kconfig (so it needs export), | 512 | # CC_VERSION_TEXT is referenced from Kconfig (so it needs export), |
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 6e8b71613039..f6a62ae44a65 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c | |||
@@ -1844,7 +1844,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) | |||
1844 | /* there are 2 passes here */ | 1844 | /* there are 2 passes here */ |
1845 | bpf_jit_dump(prog->len, image_size, 2, ctx.target); | 1845 | bpf_jit_dump(prog->len, image_size, 2, ctx.target); |
1846 | 1846 | ||
1847 | set_memory_ro((unsigned long)header, header->pages); | 1847 | bpf_jit_binary_lock_ro(header); |
1848 | prog->bpf_func = (void *)ctx.target; | 1848 | prog->bpf_func = (void *)ctx.target; |
1849 | prog->jited = 1; | 1849 | prog->jited = 1; |
1850 | prog->jited_len = image_size; | 1850 | prog->jited_len = image_size; |
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index d2db8acb1a55..5f0234ec8038 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c | |||
@@ -1286,6 +1286,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) | |||
1286 | goto free_addrs; | 1286 | goto free_addrs; |
1287 | } | 1287 | } |
1288 | if (bpf_jit_prog(&jit, fp)) { | 1288 | if (bpf_jit_prog(&jit, fp)) { |
1289 | bpf_jit_binary_free(header); | ||
1289 | fp = orig_fp; | 1290 | fp = orig_fp; |
1290 | goto free_addrs; | 1291 | goto free_addrs; |
1291 | } | 1292 | } |
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index ff81a576347e..82532c299bb5 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c | |||
@@ -1618,7 +1618,7 @@ static int rx_init(struct atm_dev *dev) | |||
1618 | skb_queue_head_init(&iadev->rx_dma_q); | 1618 | skb_queue_head_init(&iadev->rx_dma_q); |
1619 | iadev->rx_free_desc_qhead = NULL; | 1619 | iadev->rx_free_desc_qhead = NULL; |
1620 | 1620 | ||
1621 | iadev->rx_open = kcalloc(4, iadev->num_vc, GFP_KERNEL); | 1621 | iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL); |
1622 | if (!iadev->rx_open) { | 1622 | if (!iadev->rx_open) { |
1623 | printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n", | 1623 | printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n", |
1624 | dev->number); | 1624 | dev->number); |
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index a8d2eb0ceb8d..2c288d1f42bb 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c | |||
@@ -1483,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) | |||
1483 | return -EFAULT; | 1483 | return -EFAULT; |
1484 | if (pool < 0 || pool > ZATM_LAST_POOL) | 1484 | if (pool < 0 || pool > ZATM_LAST_POOL) |
1485 | return -EINVAL; | 1485 | return -EINVAL; |
1486 | pool = array_index_nospec(pool, | ||
1487 | ZATM_LAST_POOL + 1); | ||
1486 | if (copy_from_user(&info, | 1488 | if (copy_from_user(&info, |
1487 | &((struct zatm_pool_req __user *) arg)->info, | 1489 | &((struct zatm_pool_req __user *) arg)->info, |
1488 | sizeof(info))) return -EFAULT; | 1490 | sizeof(info))) return -EFAULT; |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index e3e330f59c2c..b3ba9a222550 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -6113,7 +6113,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
6113 | dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports), | 6113 | dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports), |
6114 | MLX5_CAP_GEN(mdev, num_vhca_ports)); | 6114 | MLX5_CAP_GEN(mdev, num_vhca_ports)); |
6115 | 6115 | ||
6116 | if (MLX5_VPORT_MANAGER(mdev) && | 6116 | if (MLX5_ESWITCH_MANAGER(mdev) && |
6117 | mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { | 6117 | mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { |
6118 | dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0); | 6118 | dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0); |
6119 | 6119 | ||
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index 40826bba06b6..fcfab6635f9c 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c | |||
@@ -207,29 +207,19 @@ void lirc_bpf_free(struct rc_dev *rcdev) | |||
207 | bpf_prog_array_free(rcdev->raw->progs); | 207 | bpf_prog_array_free(rcdev->raw->progs); |
208 | } | 208 | } |
209 | 209 | ||
210 | int lirc_prog_attach(const union bpf_attr *attr) | 210 | int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) |
211 | { | 211 | { |
212 | struct bpf_prog *prog; | ||
213 | struct rc_dev *rcdev; | 212 | struct rc_dev *rcdev; |
214 | int ret; | 213 | int ret; |
215 | 214 | ||
216 | if (attr->attach_flags) | 215 | if (attr->attach_flags) |
217 | return -EINVAL; | 216 | return -EINVAL; |
218 | 217 | ||
219 | prog = bpf_prog_get_type(attr->attach_bpf_fd, | ||
220 | BPF_PROG_TYPE_LIRC_MODE2); | ||
221 | if (IS_ERR(prog)) | ||
222 | return PTR_ERR(prog); | ||
223 | |||
224 | rcdev = rc_dev_get_from_fd(attr->target_fd); | 218 | rcdev = rc_dev_get_from_fd(attr->target_fd); |
225 | if (IS_ERR(rcdev)) { | 219 | if (IS_ERR(rcdev)) |
226 | bpf_prog_put(prog); | ||
227 | return PTR_ERR(rcdev); | 220 | return PTR_ERR(rcdev); |
228 | } | ||
229 | 221 | ||
230 | ret = lirc_bpf_attach(rcdev, prog); | 222 | ret = lirc_bpf_attach(rcdev, prog); |
231 | if (ret) | ||
232 | bpf_prog_put(prog); | ||
233 | 223 | ||
234 | put_device(&rcdev->dev); | 224 | put_device(&rcdev->dev); |
235 | 225 | ||
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 567ee54504bc..5e5022fa1d04 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c | |||
@@ -1897,13 +1897,19 @@ static int alx_resume(struct device *dev) | |||
1897 | struct pci_dev *pdev = to_pci_dev(dev); | 1897 | struct pci_dev *pdev = to_pci_dev(dev); |
1898 | struct alx_priv *alx = pci_get_drvdata(pdev); | 1898 | struct alx_priv *alx = pci_get_drvdata(pdev); |
1899 | struct alx_hw *hw = &alx->hw; | 1899 | struct alx_hw *hw = &alx->hw; |
1900 | int err; | ||
1900 | 1901 | ||
1901 | alx_reset_phy(hw); | 1902 | alx_reset_phy(hw); |
1902 | 1903 | ||
1903 | if (!netif_running(alx->dev)) | 1904 | if (!netif_running(alx->dev)) |
1904 | return 0; | 1905 | return 0; |
1905 | netif_device_attach(alx->dev); | 1906 | netif_device_attach(alx->dev); |
1906 | return __alx_open(alx, true); | 1907 | |
1908 | rtnl_lock(); | ||
1909 | err = __alx_open(alx, true); | ||
1910 | rtnl_unlock(); | ||
1911 | |||
1912 | return err; | ||
1907 | } | 1913 | } |
1908 | 1914 | ||
1909 | static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); | 1915 | static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index d847e1b9c37b..be1506169076 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -1533,6 +1533,7 @@ struct bnx2x { | |||
1533 | struct link_vars link_vars; | 1533 | struct link_vars link_vars; |
1534 | u32 link_cnt; | 1534 | u32 link_cnt; |
1535 | struct bnx2x_link_report_data last_reported_link; | 1535 | struct bnx2x_link_report_data last_reported_link; |
1536 | bool force_link_down; | ||
1536 | 1537 | ||
1537 | struct mdio_if_info mdio; | 1538 | struct mdio_if_info mdio; |
1538 | 1539 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 8cd73ff5debc..af7b5a4d8ba0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -1261,6 +1261,11 @@ void __bnx2x_link_report(struct bnx2x *bp) | |||
1261 | { | 1261 | { |
1262 | struct bnx2x_link_report_data cur_data; | 1262 | struct bnx2x_link_report_data cur_data; |
1263 | 1263 | ||
1264 | if (bp->force_link_down) { | ||
1265 | bp->link_vars.link_up = 0; | ||
1266 | return; | ||
1267 | } | ||
1268 | |||
1264 | /* reread mf_cfg */ | 1269 | /* reread mf_cfg */ |
1265 | if (IS_PF(bp) && !CHIP_IS_E1(bp)) | 1270 | if (IS_PF(bp) && !CHIP_IS_E1(bp)) |
1266 | bnx2x_read_mf_cfg(bp); | 1271 | bnx2x_read_mf_cfg(bp); |
@@ -2817,6 +2822,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
2817 | bp->pending_max = 0; | 2822 | bp->pending_max = 0; |
2818 | } | 2823 | } |
2819 | 2824 | ||
2825 | bp->force_link_down = false; | ||
2820 | if (bp->port.pmf) { | 2826 | if (bp->port.pmf) { |
2821 | rc = bnx2x_initial_phy_init(bp, load_mode); | 2827 | rc = bnx2x_initial_phy_init(bp, load_mode); |
2822 | if (rc) | 2828 | if (rc) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 5b1ed240bf18..57348f2b49a3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -10279,6 +10279,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) | |||
10279 | bp->sp_rtnl_state = 0; | 10279 | bp->sp_rtnl_state = 0; |
10280 | smp_mb(); | 10280 | smp_mb(); |
10281 | 10281 | ||
10282 | /* Immediately indicate link as down */ | ||
10283 | bp->link_vars.link_up = 0; | ||
10284 | bp->force_link_down = true; | ||
10285 | netif_carrier_off(bp->dev); | ||
10286 | BNX2X_ERR("Indicating link is down due to Tx-timeout\n"); | ||
10287 | |||
10282 | bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); | 10288 | bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); |
10283 | /* When ret value shows failure of allocation failure, | 10289 | /* When ret value shows failure of allocation failure, |
10284 | * the nic is rebooted again. If open still fails, a error | 10290 | * the nic is rebooted again. If open still fails, a error |
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 30273a7717e2..4fd829b5e65d 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c | |||
@@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id, | |||
660 | id_tbl->max = size; | 660 | id_tbl->max = size; |
661 | id_tbl->next = next; | 661 | id_tbl->next = next; |
662 | spin_lock_init(&id_tbl->lock); | 662 | spin_lock_init(&id_tbl->lock); |
663 | id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL); | 663 | id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL); |
664 | if (!id_tbl->table) | 664 | if (!id_tbl->table) |
665 | return -ENOMEM; | 665 | return -ENOMEM; |
666 | 666 | ||
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 3e93df5d4e3b..96cc03a6d942 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c | |||
@@ -3726,6 +3726,8 @@ static int at91ether_init(struct platform_device *pdev) | |||
3726 | int err; | 3726 | int err; |
3727 | u32 reg; | 3727 | u32 reg; |
3728 | 3728 | ||
3729 | bp->queues[0].bp = bp; | ||
3730 | |||
3729 | dev->netdev_ops = &at91ether_netdev_ops; | 3731 | dev->netdev_ops = &at91ether_netdev_ops; |
3730 | dev->ethtool_ops = &macb_ethtool_ops; | 3732 | dev->ethtool_ops = &macb_ethtool_ops; |
3731 | 3733 | ||
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 5f4e1ffa7b95..ab02057ac730 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | |||
@@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); | |||
125 | /* Default alignment for start of data in an Rx FD */ | 125 | /* Default alignment for start of data in an Rx FD */ |
126 | #define DPAA_FD_DATA_ALIGNMENT 16 | 126 | #define DPAA_FD_DATA_ALIGNMENT 16 |
127 | 127 | ||
128 | /* The DPAA requires 256 bytes reserved and mapped for the SGT */ | ||
129 | #define DPAA_SGT_SIZE 256 | ||
130 | |||
128 | /* Values for the L3R field of the FM Parse Results | 131 | /* Values for the L3R field of the FM Parse Results |
129 | */ | 132 | */ |
130 | /* L3 Type field: First IP Present IPv4 */ | 133 | /* L3 Type field: First IP Present IPv4 */ |
@@ -1617,8 +1620,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, | |||
1617 | 1620 | ||
1618 | if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { | 1621 | if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { |
1619 | nr_frags = skb_shinfo(skb)->nr_frags; | 1622 | nr_frags = skb_shinfo(skb)->nr_frags; |
1620 | dma_unmap_single(dev, addr, qm_fd_get_offset(fd) + | 1623 | dma_unmap_single(dev, addr, |
1621 | sizeof(struct qm_sg_entry) * (1 + nr_frags), | 1624 | qm_fd_get_offset(fd) + DPAA_SGT_SIZE, |
1622 | dma_dir); | 1625 | dma_dir); |
1623 | 1626 | ||
1624 | /* The sgt buffer has been allocated with netdev_alloc_frag(), | 1627 | /* The sgt buffer has been allocated with netdev_alloc_frag(), |
@@ -1903,8 +1906,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, | |||
1903 | void *sgt_buf; | 1906 | void *sgt_buf; |
1904 | 1907 | ||
1905 | /* get a page frag to store the SGTable */ | 1908 | /* get a page frag to store the SGTable */ |
1906 | sz = SKB_DATA_ALIGN(priv->tx_headroom + | 1909 | sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE); |
1907 | sizeof(struct qm_sg_entry) * (1 + nr_frags)); | ||
1908 | sgt_buf = netdev_alloc_frag(sz); | 1910 | sgt_buf = netdev_alloc_frag(sz); |
1909 | if (unlikely(!sgt_buf)) { | 1911 | if (unlikely(!sgt_buf)) { |
1910 | netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", | 1912 | netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", |
@@ -1972,9 +1974,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, | |||
1972 | skbh = (struct sk_buff **)buffer_start; | 1974 | skbh = (struct sk_buff **)buffer_start; |
1973 | *skbh = skb; | 1975 | *skbh = skb; |
1974 | 1976 | ||
1975 | addr = dma_map_single(dev, buffer_start, priv->tx_headroom + | 1977 | addr = dma_map_single(dev, buffer_start, |
1976 | sizeof(struct qm_sg_entry) * (1 + nr_frags), | 1978 | priv->tx_headroom + DPAA_SGT_SIZE, dma_dir); |
1977 | dma_dir); | ||
1978 | if (unlikely(dma_mapping_error(dev, addr))) { | 1979 | if (unlikely(dma_mapping_error(dev, addr))) { |
1979 | dev_err(dev, "DMA mapping failed"); | 1980 | dev_err(dev, "DMA mapping failed"); |
1980 | err = -EINVAL; | 1981 | err = -EINVAL; |
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index ce6e24c74978..ecbf6187e13a 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c | |||
@@ -324,6 +324,10 @@ struct fman_port_qmi_regs { | |||
324 | #define HWP_HXS_PHE_REPORT 0x00000800 | 324 | #define HWP_HXS_PHE_REPORT 0x00000800 |
325 | #define HWP_HXS_PCAC_PSTAT 0x00000100 | 325 | #define HWP_HXS_PCAC_PSTAT 0x00000100 |
326 | #define HWP_HXS_PCAC_PSTOP 0x00000001 | 326 | #define HWP_HXS_PCAC_PSTOP 0x00000001 |
327 | #define HWP_HXS_TCP_OFFSET 0xA | ||
328 | #define HWP_HXS_UDP_OFFSET 0xB | ||
329 | #define HWP_HXS_SH_PAD_REM 0x80000000 | ||
330 | |||
327 | struct fman_port_hwp_regs { | 331 | struct fman_port_hwp_regs { |
328 | struct { | 332 | struct { |
329 | u32 ssa; /* Soft Sequence Attachment */ | 333 | u32 ssa; /* Soft Sequence Attachment */ |
@@ -728,6 +732,10 @@ static void init_hwp(struct fman_port *port) | |||
728 | iowrite32be(0xffffffff, ®s->pmda[i].lcv); | 732 | iowrite32be(0xffffffff, ®s->pmda[i].lcv); |
729 | } | 733 | } |
730 | 734 | ||
735 | /* Short packet padding removal from checksum calculation */ | ||
736 | iowrite32be(HWP_HXS_SH_PAD_REM, ®s->pmda[HWP_HXS_TCP_OFFSET].ssa); | ||
737 | iowrite32be(HWP_HXS_SH_PAD_REM, ®s->pmda[HWP_HXS_UDP_OFFSET].ssa); | ||
738 | |||
731 | start_port_hwp(port); | 739 | start_port_hwp(port); |
732 | } | 740 | } |
733 | 741 | ||
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index e2e5cdc7119c..4c0f7eda1166 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c | |||
@@ -439,6 +439,7 @@ static void rx_free_irq(struct hinic_rxq *rxq) | |||
439 | { | 439 | { |
440 | struct hinic_rq *rq = rxq->rq; | 440 | struct hinic_rq *rq = rxq->rq; |
441 | 441 | ||
442 | irq_set_affinity_hint(rq->irq, NULL); | ||
442 | free_irq(rq->irq, rxq); | 443 | free_irq(rq->irq, rxq); |
443 | rx_del_napi(rxq); | 444 | rx_del_napi(rxq); |
444 | } | 445 | } |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index ed6dbcfd4e96..b151ae316546 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c | |||
@@ -2199,9 +2199,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, | |||
2199 | return true; | 2199 | return true; |
2200 | } | 2200 | } |
2201 | 2201 | ||
2202 | #define I40E_XDP_PASS 0 | 2202 | #define I40E_XDP_PASS 0 |
2203 | #define I40E_XDP_CONSUMED 1 | 2203 | #define I40E_XDP_CONSUMED BIT(0) |
2204 | #define I40E_XDP_TX 2 | 2204 | #define I40E_XDP_TX BIT(1) |
2205 | #define I40E_XDP_REDIR BIT(2) | ||
2205 | 2206 | ||
2206 | static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, | 2207 | static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, |
2207 | struct i40e_ring *xdp_ring); | 2208 | struct i40e_ring *xdp_ring); |
@@ -2248,7 +2249,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, | |||
2248 | break; | 2249 | break; |
2249 | case XDP_REDIRECT: | 2250 | case XDP_REDIRECT: |
2250 | err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); | 2251 | err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); |
2251 | result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED; | 2252 | result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; |
2252 | break; | 2253 | break; |
2253 | default: | 2254 | default: |
2254 | bpf_warn_invalid_xdp_action(act); | 2255 | bpf_warn_invalid_xdp_action(act); |
@@ -2311,7 +2312,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) | |||
2311 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | 2312 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; |
2312 | struct sk_buff *skb = rx_ring->skb; | 2313 | struct sk_buff *skb = rx_ring->skb; |
2313 | u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); | 2314 | u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); |
2314 | bool failure = false, xdp_xmit = false; | 2315 | unsigned int xdp_xmit = 0; |
2316 | bool failure = false; | ||
2315 | struct xdp_buff xdp; | 2317 | struct xdp_buff xdp; |
2316 | 2318 | ||
2317 | xdp.rxq = &rx_ring->xdp_rxq; | 2319 | xdp.rxq = &rx_ring->xdp_rxq; |
@@ -2372,8 +2374,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) | |||
2372 | } | 2374 | } |
2373 | 2375 | ||
2374 | if (IS_ERR(skb)) { | 2376 | if (IS_ERR(skb)) { |
2375 | if (PTR_ERR(skb) == -I40E_XDP_TX) { | 2377 | unsigned int xdp_res = -PTR_ERR(skb); |
2376 | xdp_xmit = true; | 2378 | |
2379 | if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) { | ||
2380 | xdp_xmit |= xdp_res; | ||
2377 | i40e_rx_buffer_flip(rx_ring, rx_buffer, size); | 2381 | i40e_rx_buffer_flip(rx_ring, rx_buffer, size); |
2378 | } else { | 2382 | } else { |
2379 | rx_buffer->pagecnt_bias++; | 2383 | rx_buffer->pagecnt_bias++; |
@@ -2427,12 +2431,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) | |||
2427 | total_rx_packets++; | 2431 | total_rx_packets++; |
2428 | } | 2432 | } |
2429 | 2433 | ||
2430 | if (xdp_xmit) { | 2434 | if (xdp_xmit & I40E_XDP_REDIR) |
2435 | xdp_do_flush_map(); | ||
2436 | |||
2437 | if (xdp_xmit & I40E_XDP_TX) { | ||
2431 | struct i40e_ring *xdp_ring = | 2438 | struct i40e_ring *xdp_ring = |
2432 | rx_ring->vsi->xdp_rings[rx_ring->queue_index]; | 2439 | rx_ring->vsi->xdp_rings[rx_ring->queue_index]; |
2433 | 2440 | ||
2434 | i40e_xdp_ring_update_tail(xdp_ring); | 2441 | i40e_xdp_ring_update_tail(xdp_ring); |
2435 | xdp_do_flush_map(); | ||
2436 | } | 2442 | } |
2437 | 2443 | ||
2438 | rx_ring->skb = skb; | 2444 | rx_ring->skb = skb; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3e87dbbc9024..62e57b05a0ae 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -2186,9 +2186,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, | |||
2186 | return skb; | 2186 | return skb; |
2187 | } | 2187 | } |
2188 | 2188 | ||
2189 | #define IXGBE_XDP_PASS 0 | 2189 | #define IXGBE_XDP_PASS 0 |
2190 | #define IXGBE_XDP_CONSUMED 1 | 2190 | #define IXGBE_XDP_CONSUMED BIT(0) |
2191 | #define IXGBE_XDP_TX 2 | 2191 | #define IXGBE_XDP_TX BIT(1) |
2192 | #define IXGBE_XDP_REDIR BIT(2) | ||
2192 | 2193 | ||
2193 | static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, | 2194 | static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, |
2194 | struct xdp_frame *xdpf); | 2195 | struct xdp_frame *xdpf); |
@@ -2225,7 +2226,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, | |||
2225 | case XDP_REDIRECT: | 2226 | case XDP_REDIRECT: |
2226 | err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); | 2227 | err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); |
2227 | if (!err) | 2228 | if (!err) |
2228 | result = IXGBE_XDP_TX; | 2229 | result = IXGBE_XDP_REDIR; |
2229 | else | 2230 | else |
2230 | result = IXGBE_XDP_CONSUMED; | 2231 | result = IXGBE_XDP_CONSUMED; |
2231 | break; | 2232 | break; |
@@ -2285,7 +2286,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
2285 | unsigned int mss = 0; | 2286 | unsigned int mss = 0; |
2286 | #endif /* IXGBE_FCOE */ | 2287 | #endif /* IXGBE_FCOE */ |
2287 | u16 cleaned_count = ixgbe_desc_unused(rx_ring); | 2288 | u16 cleaned_count = ixgbe_desc_unused(rx_ring); |
2288 | bool xdp_xmit = false; | 2289 | unsigned int xdp_xmit = 0; |
2289 | struct xdp_buff xdp; | 2290 | struct xdp_buff xdp; |
2290 | 2291 | ||
2291 | xdp.rxq = &rx_ring->xdp_rxq; | 2292 | xdp.rxq = &rx_ring->xdp_rxq; |
@@ -2328,8 +2329,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
2328 | } | 2329 | } |
2329 | 2330 | ||
2330 | if (IS_ERR(skb)) { | 2331 | if (IS_ERR(skb)) { |
2331 | if (PTR_ERR(skb) == -IXGBE_XDP_TX) { | 2332 | unsigned int xdp_res = -PTR_ERR(skb); |
2332 | xdp_xmit = true; | 2333 | |
2334 | if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) { | ||
2335 | xdp_xmit |= xdp_res; | ||
2333 | ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); | 2336 | ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); |
2334 | } else { | 2337 | } else { |
2335 | rx_buffer->pagecnt_bias++; | 2338 | rx_buffer->pagecnt_bias++; |
@@ -2401,7 +2404,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
2401 | total_rx_packets++; | 2404 | total_rx_packets++; |
2402 | } | 2405 | } |
2403 | 2406 | ||
2404 | if (xdp_xmit) { | 2407 | if (xdp_xmit & IXGBE_XDP_REDIR) |
2408 | xdp_do_flush_map(); | ||
2409 | |||
2410 | if (xdp_xmit & IXGBE_XDP_TX) { | ||
2405 | struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; | 2411 | struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; |
2406 | 2412 | ||
2407 | /* Force memory writes to complete before letting h/w | 2413 | /* Force memory writes to complete before letting h/w |
@@ -2409,8 +2415,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
2409 | */ | 2415 | */ |
2410 | wmb(); | 2416 | wmb(); |
2411 | writel(ring->next_to_use, ring->tail); | 2417 | writel(ring->next_to_use, ring->tail); |
2412 | |||
2413 | xdp_do_flush_map(); | ||
2414 | } | 2418 | } |
2415 | 2419 | ||
2416 | u64_stats_update_begin(&rx_ring->syncp); | 2420 | u64_stats_update_begin(&rx_ring->syncp); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 487388aed98f..384c1fa49081 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
@@ -807,6 +807,7 @@ static void cmd_work_handler(struct work_struct *work) | |||
807 | unsigned long flags; | 807 | unsigned long flags; |
808 | bool poll_cmd = ent->polling; | 808 | bool poll_cmd = ent->polling; |
809 | int alloc_ret; | 809 | int alloc_ret; |
810 | int cmd_mode; | ||
810 | 811 | ||
811 | sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; | 812 | sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; |
812 | down(sem); | 813 | down(sem); |
@@ -853,6 +854,7 @@ static void cmd_work_handler(struct work_struct *work) | |||
853 | set_signature(ent, !cmd->checksum_disabled); | 854 | set_signature(ent, !cmd->checksum_disabled); |
854 | dump_command(dev, ent, 1); | 855 | dump_command(dev, ent, 1); |
855 | ent->ts1 = ktime_get_ns(); | 856 | ent->ts1 = ktime_get_ns(); |
857 | cmd_mode = cmd->mode; | ||
856 | 858 | ||
857 | if (ent->callback) | 859 | if (ent->callback) |
858 | schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); | 860 | schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); |
@@ -877,7 +879,7 @@ static void cmd_work_handler(struct work_struct *work) | |||
877 | iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); | 879 | iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); |
878 | mmiowb(); | 880 | mmiowb(); |
879 | /* if not in polling don't use ent after this point */ | 881 | /* if not in polling don't use ent after this point */ |
880 | if (cmd->mode == CMD_MODE_POLLING || poll_cmd) { | 882 | if (cmd_mode == CMD_MODE_POLLING || poll_cmd) { |
881 | poll_timeout(ent); | 883 | poll_timeout(ent); |
882 | /* make sure we read the descriptor after ownership is SW */ | 884 | /* make sure we read the descriptor after ownership is SW */ |
883 | rmb(); | 885 | rmb(); |
@@ -1276,7 +1278,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, | |||
1276 | { | 1278 | { |
1277 | struct mlx5_core_dev *dev = filp->private_data; | 1279 | struct mlx5_core_dev *dev = filp->private_data; |
1278 | struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; | 1280 | struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; |
1279 | char outlen_str[8]; | 1281 | char outlen_str[8] = {0}; |
1280 | int outlen; | 1282 | int outlen; |
1281 | void *ptr; | 1283 | void *ptr; |
1282 | int err; | 1284 | int err; |
@@ -1291,8 +1293,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, | |||
1291 | if (copy_from_user(outlen_str, buf, count)) | 1293 | if (copy_from_user(outlen_str, buf, count)) |
1292 | return -EFAULT; | 1294 | return -EFAULT; |
1293 | 1295 | ||
1294 | outlen_str[7] = 0; | ||
1295 | |||
1296 | err = sscanf(outlen_str, "%d", &outlen); | 1296 | err = sscanf(outlen_str, "%d", &outlen); |
1297 | if (err < 0) | 1297 | if (err < 0) |
1298 | return err; | 1298 | return err; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 56c1b6f5593e..dae4156a710d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
@@ -2846,7 +2846,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) | |||
2846 | mlx5e_activate_channels(&priv->channels); | 2846 | mlx5e_activate_channels(&priv->channels); |
2847 | netif_tx_start_all_queues(priv->netdev); | 2847 | netif_tx_start_all_queues(priv->netdev); |
2848 | 2848 | ||
2849 | if (MLX5_VPORT_MANAGER(priv->mdev)) | 2849 | if (MLX5_ESWITCH_MANAGER(priv->mdev)) |
2850 | mlx5e_add_sqs_fwd_rules(priv); | 2850 | mlx5e_add_sqs_fwd_rules(priv); |
2851 | 2851 | ||
2852 | mlx5e_wait_channels_min_rx_wqes(&priv->channels); | 2852 | mlx5e_wait_channels_min_rx_wqes(&priv->channels); |
@@ -2857,7 +2857,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) | |||
2857 | { | 2857 | { |
2858 | mlx5e_redirect_rqts_to_drop(priv); | 2858 | mlx5e_redirect_rqts_to_drop(priv); |
2859 | 2859 | ||
2860 | if (MLX5_VPORT_MANAGER(priv->mdev)) | 2860 | if (MLX5_ESWITCH_MANAGER(priv->mdev)) |
2861 | mlx5e_remove_sqs_fwd_rules(priv); | 2861 | mlx5e_remove_sqs_fwd_rules(priv); |
2862 | 2862 | ||
2863 | /* FIXME: This is a W/A only for tx timeout watch dog false alarm when | 2863 | /* FIXME: This is a W/A only for tx timeout watch dog false alarm when |
@@ -4597,7 +4597,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) | |||
4597 | mlx5e_set_netdev_dev_addr(netdev); | 4597 | mlx5e_set_netdev_dev_addr(netdev); |
4598 | 4598 | ||
4599 | #if IS_ENABLED(CONFIG_MLX5_ESWITCH) | 4599 | #if IS_ENABLED(CONFIG_MLX5_ESWITCH) |
4600 | if (MLX5_VPORT_MANAGER(mdev)) | 4600 | if (MLX5_ESWITCH_MANAGER(mdev)) |
4601 | netdev->switchdev_ops = &mlx5e_switchdev_ops; | 4601 | netdev->switchdev_ops = &mlx5e_switchdev_ops; |
4602 | #endif | 4602 | #endif |
4603 | 4603 | ||
@@ -4753,7 +4753,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) | |||
4753 | 4753 | ||
4754 | mlx5e_enable_async_events(priv); | 4754 | mlx5e_enable_async_events(priv); |
4755 | 4755 | ||
4756 | if (MLX5_VPORT_MANAGER(priv->mdev)) | 4756 | if (MLX5_ESWITCH_MANAGER(priv->mdev)) |
4757 | mlx5e_register_vport_reps(priv); | 4757 | mlx5e_register_vport_reps(priv); |
4758 | 4758 | ||
4759 | if (netdev->reg_state != NETREG_REGISTERED) | 4759 | if (netdev->reg_state != NETREG_REGISTERED) |
@@ -4788,7 +4788,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) | |||
4788 | 4788 | ||
4789 | queue_work(priv->wq, &priv->set_rx_mode_work); | 4789 | queue_work(priv->wq, &priv->set_rx_mode_work); |
4790 | 4790 | ||
4791 | if (MLX5_VPORT_MANAGER(priv->mdev)) | 4791 | if (MLX5_ESWITCH_MANAGER(priv->mdev)) |
4792 | mlx5e_unregister_vport_reps(priv); | 4792 | mlx5e_unregister_vport_reps(priv); |
4793 | 4793 | ||
4794 | mlx5e_disable_async_events(priv); | 4794 | mlx5e_disable_async_events(priv); |
@@ -4972,7 +4972,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) | |||
4972 | return NULL; | 4972 | return NULL; |
4973 | 4973 | ||
4974 | #ifdef CONFIG_MLX5_ESWITCH | 4974 | #ifdef CONFIG_MLX5_ESWITCH |
4975 | if (MLX5_VPORT_MANAGER(mdev)) { | 4975 | if (MLX5_ESWITCH_MANAGER(mdev)) { |
4976 | rpriv = mlx5e_alloc_nic_rep_priv(mdev); | 4976 | rpriv = mlx5e_alloc_nic_rep_priv(mdev); |
4977 | if (!rpriv) { | 4977 | if (!rpriv) { |
4978 | mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n"); | 4978 | mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n"); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 57987f6546e8..2b8040a3cdbd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
@@ -823,7 +823,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) | |||
823 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | 823 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
824 | struct mlx5_eswitch_rep *rep; | 824 | struct mlx5_eswitch_rep *rep; |
825 | 825 | ||
826 | if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) | 826 | if (!MLX5_ESWITCH_MANAGER(priv->mdev)) |
827 | return false; | 827 | return false; |
828 | 828 | ||
829 | rep = rpriv->rep; | 829 | rep = rpriv->rep; |
@@ -837,8 +837,12 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) | |||
837 | static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) | 837 | static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) |
838 | { | 838 | { |
839 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | 839 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
840 | struct mlx5_eswitch_rep *rep = rpriv->rep; | 840 | struct mlx5_eswitch_rep *rep; |
841 | 841 | ||
842 | if (!MLX5_ESWITCH_MANAGER(priv->mdev)) | ||
843 | return false; | ||
844 | |||
845 | rep = rpriv->rep; | ||
842 | if (rep && rep->vport != FDB_UPLINK_VPORT) | 846 | if (rep && rep->vport != FDB_UPLINK_VPORT) |
843 | return true; | 847 | return true; |
844 | 848 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index f63dfbcd29fe..b79d74860a30 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |||
@@ -1594,17 +1594,15 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) | |||
1594 | } | 1594 | } |
1595 | 1595 | ||
1596 | /* Public E-Switch API */ | 1596 | /* Public E-Switch API */ |
1597 | #define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev)) | 1597 | #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev)) |
1598 | |||
1598 | 1599 | ||
1599 | int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) | 1600 | int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) |
1600 | { | 1601 | { |
1601 | int err; | 1602 | int err; |
1602 | int i, enabled_events; | 1603 | int i, enabled_events; |
1603 | 1604 | ||
1604 | if (!ESW_ALLOWED(esw)) | 1605 | if (!ESW_ALLOWED(esw) || |
1605 | return 0; | ||
1606 | |||
1607 | if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || | ||
1608 | !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { | 1606 | !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { |
1609 | esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); | 1607 | esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); |
1610 | return -EOPNOTSUPP; | 1608 | return -EOPNOTSUPP; |
@@ -1806,7 +1804,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, | |||
1806 | u64 node_guid; | 1804 | u64 node_guid; |
1807 | int err = 0; | 1805 | int err = 0; |
1808 | 1806 | ||
1809 | if (!ESW_ALLOWED(esw)) | 1807 | if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) |
1810 | return -EPERM; | 1808 | return -EPERM; |
1811 | if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac)) | 1809 | if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac)) |
1812 | return -EINVAL; | 1810 | return -EINVAL; |
@@ -1883,7 +1881,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, | |||
1883 | { | 1881 | { |
1884 | struct mlx5_vport *evport; | 1882 | struct mlx5_vport *evport; |
1885 | 1883 | ||
1886 | if (!ESW_ALLOWED(esw)) | 1884 | if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) |
1887 | return -EPERM; | 1885 | return -EPERM; |
1888 | if (!LEGAL_VPORT(esw, vport)) | 1886 | if (!LEGAL_VPORT(esw, vport)) |
1889 | return -EINVAL; | 1887 | return -EINVAL; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index cecd201f0b73..91f1209886ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
@@ -1079,8 +1079,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink) | |||
1079 | if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) | 1079 | if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) |
1080 | return -EOPNOTSUPP; | 1080 | return -EOPNOTSUPP; |
1081 | 1081 | ||
1082 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) | 1082 | if(!MLX5_ESWITCH_MANAGER(dev)) |
1083 | return -EOPNOTSUPP; | 1083 | return -EPERM; |
1084 | 1084 | ||
1085 | if (dev->priv.eswitch->mode == SRIOV_NONE) | 1085 | if (dev->priv.eswitch->mode == SRIOV_NONE) |
1086 | return -EOPNOTSUPP; | 1086 | return -EOPNOTSUPP; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 49a75d31185e..f1a86cea86a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
@@ -32,6 +32,7 @@ | |||
32 | 32 | ||
33 | #include <linux/mutex.h> | 33 | #include <linux/mutex.h> |
34 | #include <linux/mlx5/driver.h> | 34 | #include <linux/mlx5/driver.h> |
35 | #include <linux/mlx5/eswitch.h> | ||
35 | 36 | ||
36 | #include "mlx5_core.h" | 37 | #include "mlx5_core.h" |
37 | #include "fs_core.h" | 38 | #include "fs_core.h" |
@@ -2652,7 +2653,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) | |||
2652 | goto err; | 2653 | goto err; |
2653 | } | 2654 | } |
2654 | 2655 | ||
2655 | if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { | 2656 | if (MLX5_ESWITCH_MANAGER(dev)) { |
2656 | if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { | 2657 | if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { |
2657 | err = init_fdb_root_ns(steering); | 2658 | err = init_fdb_root_ns(steering); |
2658 | if (err) | 2659 | if (err) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index afd9f4fa22f4..41ad24f0de2c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c | |||
@@ -32,6 +32,7 @@ | |||
32 | 32 | ||
33 | #include <linux/mlx5/driver.h> | 33 | #include <linux/mlx5/driver.h> |
34 | #include <linux/mlx5/cmd.h> | 34 | #include <linux/mlx5/cmd.h> |
35 | #include <linux/mlx5/eswitch.h> | ||
35 | #include <linux/module.h> | 36 | #include <linux/module.h> |
36 | #include "mlx5_core.h" | 37 | #include "mlx5_core.h" |
37 | #include "../../mlxfw/mlxfw.h" | 38 | #include "../../mlxfw/mlxfw.h" |
@@ -159,13 +160,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) | |||
159 | } | 160 | } |
160 | 161 | ||
161 | if (MLX5_CAP_GEN(dev, vport_group_manager) && | 162 | if (MLX5_CAP_GEN(dev, vport_group_manager) && |
162 | MLX5_CAP_GEN(dev, eswitch_flow_table)) { | 163 | MLX5_ESWITCH_MANAGER(dev)) { |
163 | err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE); | 164 | err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE); |
164 | if (err) | 165 | if (err) |
165 | return err; | 166 | return err; |
166 | } | 167 | } |
167 | 168 | ||
168 | if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { | 169 | if (MLX5_ESWITCH_MANAGER(dev)) { |
169 | err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH); | 170 | err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH); |
170 | if (err) | 171 | if (err) |
171 | return err; | 172 | return err; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c index 7cb67122e8b5..98359559c77e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/etherdevice.h> | 33 | #include <linux/etherdevice.h> |
34 | #include <linux/mlx5/driver.h> | 34 | #include <linux/mlx5/driver.h> |
35 | #include <linux/mlx5/mlx5_ifc.h> | 35 | #include <linux/mlx5/mlx5_ifc.h> |
36 | #include <linux/mlx5/eswitch.h> | ||
36 | #include "mlx5_core.h" | 37 | #include "mlx5_core.h" |
37 | #include "lib/mpfs.h" | 38 | #include "lib/mpfs.h" |
38 | 39 | ||
@@ -98,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev) | |||
98 | int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); | 99 | int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); |
99 | struct mlx5_mpfs *mpfs; | 100 | struct mlx5_mpfs *mpfs; |
100 | 101 | ||
101 | if (!MLX5_VPORT_MANAGER(dev)) | 102 | if (!MLX5_ESWITCH_MANAGER(dev)) |
102 | return 0; | 103 | return 0; |
103 | 104 | ||
104 | mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL); | 105 | mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL); |
@@ -122,7 +123,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) | |||
122 | { | 123 | { |
123 | struct mlx5_mpfs *mpfs = dev->priv.mpfs; | 124 | struct mlx5_mpfs *mpfs = dev->priv.mpfs; |
124 | 125 | ||
125 | if (!MLX5_VPORT_MANAGER(dev)) | 126 | if (!MLX5_ESWITCH_MANAGER(dev)) |
126 | return; | 127 | return; |
127 | 128 | ||
128 | WARN_ON(!hlist_empty(mpfs->hash)); | 129 | WARN_ON(!hlist_empty(mpfs->hash)); |
@@ -137,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) | |||
137 | u32 index; | 138 | u32 index; |
138 | int err; | 139 | int err; |
139 | 140 | ||
140 | if (!MLX5_VPORT_MANAGER(dev)) | 141 | if (!MLX5_ESWITCH_MANAGER(dev)) |
141 | return 0; | 142 | return 0; |
142 | 143 | ||
143 | mutex_lock(&mpfs->lock); | 144 | mutex_lock(&mpfs->lock); |
@@ -179,7 +180,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) | |||
179 | int err = 0; | 180 | int err = 0; |
180 | u32 index; | 181 | u32 index; |
181 | 182 | ||
182 | if (!MLX5_VPORT_MANAGER(dev)) | 183 | if (!MLX5_ESWITCH_MANAGER(dev)) |
183 | return 0; | 184 | return 0; |
184 | 185 | ||
185 | mutex_lock(&mpfs->lock); | 186 | mutex_lock(&mpfs->lock); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index fa9d0760dd36..31a9cbd85689 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c | |||
@@ -701,7 +701,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc); | |||
701 | static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, | 701 | static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, |
702 | int inlen) | 702 | int inlen) |
703 | { | 703 | { |
704 | u32 out[MLX5_ST_SZ_DW(qtct_reg)]; | 704 | u32 out[MLX5_ST_SZ_DW(qetc_reg)]; |
705 | 705 | ||
706 | if (!MLX5_CAP_GEN(mdev, ets)) | 706 | if (!MLX5_CAP_GEN(mdev, ets)) |
707 | return -EOPNOTSUPP; | 707 | return -EOPNOTSUPP; |
@@ -713,7 +713,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, | |||
713 | static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out, | 713 | static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out, |
714 | int outlen) | 714 | int outlen) |
715 | { | 715 | { |
716 | u32 in[MLX5_ST_SZ_DW(qtct_reg)]; | 716 | u32 in[MLX5_ST_SZ_DW(qetc_reg)]; |
717 | 717 | ||
718 | if (!MLX5_CAP_GEN(mdev, ets)) | 718 | if (!MLX5_CAP_GEN(mdev, ets)) |
719 | return -EOPNOTSUPP; | 719 | return -EOPNOTSUPP; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 2a8b529ce6dd..a0674962f02c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c | |||
@@ -88,6 +88,9 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) | |||
88 | return -EBUSY; | 88 | return -EBUSY; |
89 | } | 89 | } |
90 | 90 | ||
91 | if (!MLX5_ESWITCH_MANAGER(dev)) | ||
92 | goto enable_vfs_hca; | ||
93 | |||
91 | err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); | 94 | err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); |
92 | if (err) { | 95 | if (err) { |
93 | mlx5_core_warn(dev, | 96 | mlx5_core_warn(dev, |
@@ -95,6 +98,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) | |||
95 | return err; | 98 | return err; |
96 | } | 99 | } |
97 | 100 | ||
101 | enable_vfs_hca: | ||
98 | for (vf = 0; vf < num_vfs; vf++) { | 102 | for (vf = 0; vf < num_vfs; vf++) { |
99 | err = mlx5_core_enable_hca(dev, vf + 1); | 103 | err = mlx5_core_enable_hca(dev, vf + 1); |
100 | if (err) { | 104 | if (err) { |
@@ -140,7 +144,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) | |||
140 | } | 144 | } |
141 | 145 | ||
142 | out: | 146 | out: |
143 | mlx5_eswitch_disable_sriov(dev->priv.eswitch); | 147 | if (MLX5_ESWITCH_MANAGER(dev)) |
148 | mlx5_eswitch_disable_sriov(dev->priv.eswitch); | ||
144 | 149 | ||
145 | if (mlx5_wait_for_vf_pages(dev)) | 150 | if (mlx5_wait_for_vf_pages(dev)) |
146 | mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); | 151 | mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 719cecb182c6..7eecd5b07bb1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c | |||
@@ -549,8 +549,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, | |||
549 | return -EINVAL; | 549 | return -EINVAL; |
550 | if (!MLX5_CAP_GEN(mdev, vport_group_manager)) | 550 | if (!MLX5_CAP_GEN(mdev, vport_group_manager)) |
551 | return -EACCES; | 551 | return -EACCES; |
552 | if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) | ||
553 | return -EOPNOTSUPP; | ||
554 | 552 | ||
555 | in = kvzalloc(inlen, GFP_KERNEL); | 553 | in = kvzalloc(inlen, GFP_KERNEL); |
556 | if (!in) | 554 | if (!in) |
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index fcdfb8e7fdea..40216d56dddc 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c | |||
@@ -81,10 +81,10 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn, | |||
81 | 81 | ||
82 | ret = nfp_net_bpf_offload(nn, prog, running, extack); | 82 | ret = nfp_net_bpf_offload(nn, prog, running, extack); |
83 | /* Stop offload if replace not possible */ | 83 | /* Stop offload if replace not possible */ |
84 | if (ret && prog) | 84 | if (ret) |
85 | nfp_bpf_xdp_offload(app, nn, NULL, extack); | 85 | return ret; |
86 | 86 | ||
87 | nn->dp.bpf_offload_xdp = prog && !ret; | 87 | nn->dp.bpf_offload_xdp = !!prog; |
88 | return ret; | 88 | return ret; |
89 | } | 89 | } |
90 | 90 | ||
@@ -202,6 +202,9 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev, | |||
202 | if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) | 202 | if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
203 | return -EOPNOTSUPP; | 203 | return -EOPNOTSUPP; |
204 | 204 | ||
205 | if (tcf_block_shared(f->block)) | ||
206 | return -EOPNOTSUPP; | ||
207 | |||
205 | switch (f->command) { | 208 | switch (f->command) { |
206 | case TC_BLOCK_BIND: | 209 | case TC_BLOCK_BIND: |
207 | return tcf_block_cb_register(f->block, | 210 | return tcf_block_cb_register(f->block, |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 91935405f586..84f7a5dbea9d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c | |||
@@ -123,6 +123,20 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame, | |||
123 | NFP_FLOWER_MASK_MPLS_Q; | 123 | NFP_FLOWER_MASK_MPLS_Q; |
124 | 124 | ||
125 | frame->mpls_lse = cpu_to_be32(t_mpls); | 125 | frame->mpls_lse = cpu_to_be32(t_mpls); |
126 | } else if (dissector_uses_key(flow->dissector, | ||
127 | FLOW_DISSECTOR_KEY_BASIC)) { | ||
128 | /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q | ||
129 | * bit, which indicates an mpls ether type but without any | ||
130 | * mpls fields. | ||
131 | */ | ||
132 | struct flow_dissector_key_basic *key_basic; | ||
133 | |||
134 | key_basic = skb_flow_dissector_target(flow->dissector, | ||
135 | FLOW_DISSECTOR_KEY_BASIC, | ||
136 | flow->key); | ||
137 | if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) || | ||
138 | key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) | ||
139 | frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); | ||
126 | } | 140 | } |
127 | } | 141 | } |
128 | 142 | ||
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index c42e64f32333..525057bee0ed 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c | |||
@@ -264,6 +264,14 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, | |||
264 | case cpu_to_be16(ETH_P_ARP): | 264 | case cpu_to_be16(ETH_P_ARP): |
265 | return -EOPNOTSUPP; | 265 | return -EOPNOTSUPP; |
266 | 266 | ||
267 | case cpu_to_be16(ETH_P_MPLS_UC): | ||
268 | case cpu_to_be16(ETH_P_MPLS_MC): | ||
269 | if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { | ||
270 | key_layer |= NFP_FLOWER_LAYER_MAC; | ||
271 | key_size += sizeof(struct nfp_flower_mac_mpls); | ||
272 | } | ||
273 | break; | ||
274 | |||
267 | /* Will be included in layer 2. */ | 275 | /* Will be included in layer 2. */ |
268 | case cpu_to_be16(ETH_P_8021Q): | 276 | case cpu_to_be16(ETH_P_8021Q): |
269 | break; | 277 | break; |
@@ -623,6 +631,9 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, | |||
623 | if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) | 631 | if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
624 | return -EOPNOTSUPP; | 632 | return -EOPNOTSUPP; |
625 | 633 | ||
634 | if (tcf_block_shared(f->block)) | ||
635 | return -EOPNOTSUPP; | ||
636 | |||
626 | switch (f->command) { | 637 | switch (f->command) { |
627 | case TC_BLOCK_BIND: | 638 | case TC_BLOCK_BIND: |
628 | return tcf_block_cb_register(f->block, | 639 | return tcf_block_cb_register(f->block, |
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c index cd34097b79f1..37a6d7822a38 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c | |||
@@ -232,7 +232,7 @@ struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp) | |||
232 | err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res), | 232 | err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res), |
233 | nfp_resource_address(state->res), | 233 | nfp_resource_address(state->res), |
234 | fwinf, sizeof(*fwinf)); | 234 | fwinf, sizeof(*fwinf)); |
235 | if (err < sizeof(*fwinf)) | 235 | if (err < (int)sizeof(*fwinf)) |
236 | goto err_release; | 236 | goto err_release; |
237 | 237 | ||
238 | if (!nffw_res_flg_init_get(fwinf)) | 238 | if (!nffw_res_flg_init_get(fwinf)) |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index f0b01385d5cb..e0680ce91328 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c | |||
@@ -709,9 +709,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn, | |||
709 | p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE]; | 709 | p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE]; |
710 | 710 | ||
711 | memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id, | 711 | memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id, |
712 | ARRAY_SIZE(p_local->local_chassis_id)); | 712 | sizeof(p_local->local_chassis_id)); |
713 | memcpy(params->lldp_local.local_port_id, p_local->local_port_id, | 713 | memcpy(params->lldp_local.local_port_id, p_local->local_port_id, |
714 | ARRAY_SIZE(p_local->local_port_id)); | 714 | sizeof(p_local->local_port_id)); |
715 | } | 715 | } |
716 | 716 | ||
717 | static void | 717 | static void |
@@ -723,9 +723,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn, | |||
723 | p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE]; | 723 | p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE]; |
724 | 724 | ||
725 | memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id, | 725 | memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id, |
726 | ARRAY_SIZE(p_remote->peer_chassis_id)); | 726 | sizeof(p_remote->peer_chassis_id)); |
727 | memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id, | 727 | memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id, |
728 | ARRAY_SIZE(p_remote->peer_port_id)); | 728 | sizeof(p_remote->peer_port_id)); |
729 | } | 729 | } |
730 | 730 | ||
731 | static int | 731 | static int |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 329781cda77f..e5249b4741d0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c | |||
@@ -1804,7 +1804,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) | |||
1804 | DP_INFO(p_hwfn, "Failed to update driver state\n"); | 1804 | DP_INFO(p_hwfn, "Failed to update driver state\n"); |
1805 | 1805 | ||
1806 | rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, | 1806 | rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, |
1807 | QED_OV_ESWITCH_VEB); | 1807 | QED_OV_ESWITCH_NONE); |
1808 | if (rc) | 1808 | if (rc) |
1809 | DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); | 1809 | DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); |
1810 | } | 1810 | } |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 5c10fd7210c3..0cbc74d6ca8b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
@@ -789,6 +789,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, | |||
789 | /* We want a minimum of one slowpath and one fastpath vector per hwfn */ | 789 | /* We want a minimum of one slowpath and one fastpath vector per hwfn */ |
790 | cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; | 790 | cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; |
791 | 791 | ||
792 | if (is_kdump_kernel()) { | ||
793 | DP_INFO(cdev, | ||
794 | "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", | ||
795 | cdev->int_params.in.min_msix_cnt); | ||
796 | cdev->int_params.in.num_vectors = | ||
797 | cdev->int_params.in.min_msix_cnt; | ||
798 | } | ||
799 | |||
792 | rc = qed_set_int_mode(cdev, false); | 800 | rc = qed_set_int_mode(cdev, false); |
793 | if (rc) { | 801 | if (rc) { |
794 | DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); | 802 | DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index f01bf52bc381..fd59cf45f4be 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c | |||
@@ -4513,6 +4513,8 @@ static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn, | |||
4513 | static int qed_sriov_enable(struct qed_dev *cdev, int num) | 4513 | static int qed_sriov_enable(struct qed_dev *cdev, int num) |
4514 | { | 4514 | { |
4515 | struct qed_iov_vf_init_params params; | 4515 | struct qed_iov_vf_init_params params; |
4516 | struct qed_hwfn *hwfn; | ||
4517 | struct qed_ptt *ptt; | ||
4516 | int i, j, rc; | 4518 | int i, j, rc; |
4517 | 4519 | ||
4518 | if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { | 4520 | if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { |
@@ -4525,8 +4527,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num) | |||
4525 | 4527 | ||
4526 | /* Initialize HW for VF access */ | 4528 | /* Initialize HW for VF access */ |
4527 | for_each_hwfn(cdev, j) { | 4529 | for_each_hwfn(cdev, j) { |
4528 | struct qed_hwfn *hwfn = &cdev->hwfns[j]; | 4530 | hwfn = &cdev->hwfns[j]; |
4529 | struct qed_ptt *ptt = qed_ptt_acquire(hwfn); | 4531 | ptt = qed_ptt_acquire(hwfn); |
4530 | 4532 | ||
4531 | /* Make sure not to use more than 16 queues per VF */ | 4533 | /* Make sure not to use more than 16 queues per VF */ |
4532 | params.num_queues = min_t(int, | 4534 | params.num_queues = min_t(int, |
@@ -4562,6 +4564,19 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num) | |||
4562 | goto err; | 4564 | goto err; |
4563 | } | 4565 | } |
4564 | 4566 | ||
4567 | hwfn = QED_LEADING_HWFN(cdev); | ||
4568 | ptt = qed_ptt_acquire(hwfn); | ||
4569 | if (!ptt) { | ||
4570 | DP_ERR(hwfn, "Failed to acquire ptt\n"); | ||
4571 | rc = -EBUSY; | ||
4572 | goto err; | ||
4573 | } | ||
4574 | |||
4575 | rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB); | ||
4576 | if (rc) | ||
4577 | DP_INFO(cdev, "Failed to update eswitch mode\n"); | ||
4578 | qed_ptt_release(hwfn, ptt); | ||
4579 | |||
4565 | return num; | 4580 | return num; |
4566 | 4581 | ||
4567 | err: | 4582 | err: |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 02adb513f475..013ff567283c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c | |||
@@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info) | |||
337 | { | 337 | { |
338 | struct qede_ptp *ptp = edev->ptp; | 338 | struct qede_ptp *ptp = edev->ptp; |
339 | 339 | ||
340 | if (!ptp) | 340 | if (!ptp) { |
341 | return -EIO; | 341 | info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | |
342 | SOF_TIMESTAMPING_RX_SOFTWARE | | ||
343 | SOF_TIMESTAMPING_SOFTWARE; | ||
344 | info->phc_index = -1; | ||
345 | |||
346 | return 0; | ||
347 | } | ||
342 | 348 | ||
343 | info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | | 349 | info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | |
344 | SOF_TIMESTAMPING_RX_SOFTWARE | | 350 | SOF_TIMESTAMPING_RX_SOFTWARE | |
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 8edf20967c82..e045a5d6b938 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c | |||
@@ -2794,6 +2794,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx) | |||
2794 | if (!state) | 2794 | if (!state) |
2795 | return -ENOMEM; | 2795 | return -ENOMEM; |
2796 | efx->filter_state = state; | 2796 | efx->filter_state = state; |
2797 | init_rwsem(&state->lock); | ||
2797 | 2798 | ||
2798 | table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; | 2799 | table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; |
2799 | table->id = EFX_FARCH_FILTER_TABLE_RX_IP; | 2800 | table->id = EFX_FARCH_FILTER_TABLE_RX_IP; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index d37f17ca62fe..65bc3556bd8f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c | |||
@@ -407,6 +407,16 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan) | |||
407 | } | 407 | } |
408 | } | 408 | } |
409 | 409 | ||
410 | static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) | ||
411 | { | ||
412 | u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); | ||
413 | |||
414 | value &= ~DMA_RBSZ_MASK; | ||
415 | value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK; | ||
416 | |||
417 | writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); | ||
418 | } | ||
419 | |||
410 | const struct stmmac_dma_ops dwmac4_dma_ops = { | 420 | const struct stmmac_dma_ops dwmac4_dma_ops = { |
411 | .reset = dwmac4_dma_reset, | 421 | .reset = dwmac4_dma_reset, |
412 | .init = dwmac4_dma_init, | 422 | .init = dwmac4_dma_init, |
@@ -431,6 +441,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = { | |||
431 | .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, | 441 | .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, |
432 | .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, | 442 | .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, |
433 | .enable_tso = dwmac4_enable_tso, | 443 | .enable_tso = dwmac4_enable_tso, |
444 | .set_bfsize = dwmac4_set_bfsize, | ||
434 | }; | 445 | }; |
435 | 446 | ||
436 | const struct stmmac_dma_ops dwmac410_dma_ops = { | 447 | const struct stmmac_dma_ops dwmac410_dma_ops = { |
@@ -457,4 +468,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = { | |||
457 | .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, | 468 | .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, |
458 | .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, | 469 | .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, |
459 | .enable_tso = dwmac4_enable_tso, | 470 | .enable_tso = dwmac4_enable_tso, |
471 | .set_bfsize = dwmac4_set_bfsize, | ||
460 | }; | 472 | }; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h index c63c1fe3f26b..22a4a6dbb1a4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h | |||
@@ -120,6 +120,8 @@ | |||
120 | 120 | ||
121 | /* DMA Rx Channel X Control register defines */ | 121 | /* DMA Rx Channel X Control register defines */ |
122 | #define DMA_CONTROL_SR BIT(0) | 122 | #define DMA_CONTROL_SR BIT(0) |
123 | #define DMA_RBSZ_MASK GENMASK(14, 1) | ||
124 | #define DMA_RBSZ_SHIFT 1 | ||
123 | 125 | ||
124 | /* Interrupt status per channel */ | 126 | /* Interrupt status per channel */ |
125 | #define DMA_CHAN_STATUS_REB GENMASK(21, 19) | 127 | #define DMA_CHAN_STATUS_REB GENMASK(21, 19) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index e44e7b26ce82..fe8b536b13f8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h | |||
@@ -183,6 +183,7 @@ struct stmmac_dma_ops { | |||
183 | void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); | 183 | void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); |
184 | void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); | 184 | void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); |
185 | void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); | 185 | void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); |
186 | void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan); | ||
186 | }; | 187 | }; |
187 | 188 | ||
188 | #define stmmac_reset(__priv, __args...) \ | 189 | #define stmmac_reset(__priv, __args...) \ |
@@ -235,6 +236,8 @@ struct stmmac_dma_ops { | |||
235 | stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args) | 236 | stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args) |
236 | #define stmmac_enable_tso(__priv, __args...) \ | 237 | #define stmmac_enable_tso(__priv, __args...) \ |
237 | stmmac_do_void_callback(__priv, dma, enable_tso, __args) | 238 | stmmac_do_void_callback(__priv, dma, enable_tso, __args) |
239 | #define stmmac_set_dma_bfsize(__priv, __args...) \ | ||
240 | stmmac_do_void_callback(__priv, dma, set_bfsize, __args) | ||
238 | 241 | ||
239 | struct mac_device_info; | 242 | struct mac_device_info; |
240 | struct net_device; | 243 | struct net_device; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index cba46b62a1cd..60f59abab009 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -1804,6 +1804,8 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) | |||
1804 | 1804 | ||
1805 | stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, | 1805 | stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, |
1806 | rxfifosz, qmode); | 1806 | rxfifosz, qmode); |
1807 | stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz, | ||
1808 | chan); | ||
1807 | } | 1809 | } |
1808 | 1810 | ||
1809 | for (chan = 0; chan < tx_channels_count; chan++) { | 1811 | for (chan = 0; chan < tx_channels_count; chan++) { |
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 750eaa53bf0c..ada33c2d9ac2 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c | |||
@@ -476,7 +476,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk, | |||
476 | out_unlock: | 476 | out_unlock: |
477 | rcu_read_unlock(); | 477 | rcu_read_unlock(); |
478 | out: | 478 | out: |
479 | NAPI_GRO_CB(skb)->flush |= flush; | 479 | skb_gro_flush_final(skb, pp, flush); |
480 | 480 | ||
481 | return pp; | 481 | return pp; |
482 | } | 482 | } |
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 1a924b867b07..4b6e308199d2 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
@@ -210,7 +210,7 @@ int netvsc_recv_callback(struct net_device *net, | |||
210 | void netvsc_channel_cb(void *context); | 210 | void netvsc_channel_cb(void *context); |
211 | int netvsc_poll(struct napi_struct *napi, int budget); | 211 | int netvsc_poll(struct napi_struct *napi, int budget); |
212 | 212 | ||
213 | void rndis_set_subchannel(struct work_struct *w); | 213 | int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev); |
214 | int rndis_filter_open(struct netvsc_device *nvdev); | 214 | int rndis_filter_open(struct netvsc_device *nvdev); |
215 | int rndis_filter_close(struct netvsc_device *nvdev); | 215 | int rndis_filter_close(struct netvsc_device *nvdev); |
216 | struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, | 216 | struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 5d5bd513847f..8e9d0ee1572b 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -65,6 +65,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf) | |||
65 | VM_PKT_DATA_INBAND, 0); | 65 | VM_PKT_DATA_INBAND, 0); |
66 | } | 66 | } |
67 | 67 | ||
68 | /* Worker to setup sub channels on initial setup | ||
69 | * Initial hotplug event occurs in softirq context | ||
70 | * and can't wait for channels. | ||
71 | */ | ||
72 | static void netvsc_subchan_work(struct work_struct *w) | ||
73 | { | ||
74 | struct netvsc_device *nvdev = | ||
75 | container_of(w, struct netvsc_device, subchan_work); | ||
76 | struct rndis_device *rdev; | ||
77 | int i, ret; | ||
78 | |||
79 | /* Avoid deadlock with device removal already under RTNL */ | ||
80 | if (!rtnl_trylock()) { | ||
81 | schedule_work(w); | ||
82 | return; | ||
83 | } | ||
84 | |||
85 | rdev = nvdev->extension; | ||
86 | if (rdev) { | ||
87 | ret = rndis_set_subchannel(rdev->ndev, nvdev); | ||
88 | if (ret == 0) { | ||
89 | netif_device_attach(rdev->ndev); | ||
90 | } else { | ||
91 | /* fallback to only primary channel */ | ||
92 | for (i = 1; i < nvdev->num_chn; i++) | ||
93 | netif_napi_del(&nvdev->chan_table[i].napi); | ||
94 | |||
95 | nvdev->max_chn = 1; | ||
96 | nvdev->num_chn = 1; | ||
97 | } | ||
98 | } | ||
99 | |||
100 | rtnl_unlock(); | ||
101 | } | ||
102 | |||
68 | static struct netvsc_device *alloc_net_device(void) | 103 | static struct netvsc_device *alloc_net_device(void) |
69 | { | 104 | { |
70 | struct netvsc_device *net_device; | 105 | struct netvsc_device *net_device; |
@@ -81,7 +116,7 @@ static struct netvsc_device *alloc_net_device(void) | |||
81 | 116 | ||
82 | init_completion(&net_device->channel_init_wait); | 117 | init_completion(&net_device->channel_init_wait); |
83 | init_waitqueue_head(&net_device->subchan_open); | 118 | init_waitqueue_head(&net_device->subchan_open); |
84 | INIT_WORK(&net_device->subchan_work, rndis_set_subchannel); | 119 | INIT_WORK(&net_device->subchan_work, netvsc_subchan_work); |
85 | 120 | ||
86 | return net_device; | 121 | return net_device; |
87 | } | 122 | } |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index fe2256bf1d13..dd1d6e115145 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -905,8 +905,20 @@ static int netvsc_attach(struct net_device *ndev, | |||
905 | if (IS_ERR(nvdev)) | 905 | if (IS_ERR(nvdev)) |
906 | return PTR_ERR(nvdev); | 906 | return PTR_ERR(nvdev); |
907 | 907 | ||
908 | /* Note: enable and attach happen when sub-channels setup */ | 908 | if (nvdev->num_chn > 1) { |
909 | ret = rndis_set_subchannel(ndev, nvdev); | ||
910 | |||
911 | /* if unavailable, just proceed with one queue */ | ||
912 | if (ret) { | ||
913 | nvdev->max_chn = 1; | ||
914 | nvdev->num_chn = 1; | ||
915 | } | ||
916 | } | ||
917 | |||
918 | /* In any case device is now ready */ | ||
919 | netif_device_attach(ndev); | ||
909 | 920 | ||
921 | /* Note: enable and attach happen when sub-channels setup */ | ||
910 | netif_carrier_off(ndev); | 922 | netif_carrier_off(ndev); |
911 | 923 | ||
912 | if (netif_running(ndev)) { | 924 | if (netif_running(ndev)) { |
@@ -2089,6 +2101,9 @@ static int netvsc_probe(struct hv_device *dev, | |||
2089 | 2101 | ||
2090 | memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); | 2102 | memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); |
2091 | 2103 | ||
2104 | if (nvdev->num_chn > 1) | ||
2105 | schedule_work(&nvdev->subchan_work); | ||
2106 | |||
2092 | /* hw_features computed in rndis_netdev_set_hwcaps() */ | 2107 | /* hw_features computed in rndis_netdev_set_hwcaps() */ |
2093 | net->features = net->hw_features | | 2108 | net->features = net->hw_features | |
2094 | NETIF_F_HIGHDMA | NETIF_F_SG | | 2109 | NETIF_F_HIGHDMA | NETIF_F_SG | |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 5428bb261102..9b4e3c3787e5 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
@@ -1062,29 +1062,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) | |||
1062 | * This breaks overlap of processing the host message for the | 1062 | * This breaks overlap of processing the host message for the |
1063 | * new primary channel with the initialization of sub-channels. | 1063 | * new primary channel with the initialization of sub-channels. |
1064 | */ | 1064 | */ |
1065 | void rndis_set_subchannel(struct work_struct *w) | 1065 | int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev) |
1066 | { | 1066 | { |
1067 | struct netvsc_device *nvdev | ||
1068 | = container_of(w, struct netvsc_device, subchan_work); | ||
1069 | struct nvsp_message *init_packet = &nvdev->channel_init_pkt; | 1067 | struct nvsp_message *init_packet = &nvdev->channel_init_pkt; |
1070 | struct net_device_context *ndev_ctx; | 1068 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
1071 | struct rndis_device *rdev; | 1069 | struct hv_device *hv_dev = ndev_ctx->device_ctx; |
1072 | struct net_device *ndev; | 1070 | struct rndis_device *rdev = nvdev->extension; |
1073 | struct hv_device *hv_dev; | ||
1074 | int i, ret; | 1071 | int i, ret; |
1075 | 1072 | ||
1076 | if (!rtnl_trylock()) { | 1073 | ASSERT_RTNL(); |
1077 | schedule_work(w); | ||
1078 | return; | ||
1079 | } | ||
1080 | |||
1081 | rdev = nvdev->extension; | ||
1082 | if (!rdev) | ||
1083 | goto unlock; /* device was removed */ | ||
1084 | |||
1085 | ndev = rdev->ndev; | ||
1086 | ndev_ctx = netdev_priv(ndev); | ||
1087 | hv_dev = ndev_ctx->device_ctx; | ||
1088 | 1074 | ||
1089 | memset(init_packet, 0, sizeof(struct nvsp_message)); | 1075 | memset(init_packet, 0, sizeof(struct nvsp_message)); |
1090 | init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; | 1076 | init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; |
@@ -1100,13 +1086,13 @@ void rndis_set_subchannel(struct work_struct *w) | |||
1100 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | 1086 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
1101 | if (ret) { | 1087 | if (ret) { |
1102 | netdev_err(ndev, "sub channel allocate send failed: %d\n", ret); | 1088 | netdev_err(ndev, "sub channel allocate send failed: %d\n", ret); |
1103 | goto failed; | 1089 | return ret; |
1104 | } | 1090 | } |
1105 | 1091 | ||
1106 | wait_for_completion(&nvdev->channel_init_wait); | 1092 | wait_for_completion(&nvdev->channel_init_wait); |
1107 | if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { | 1093 | if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { |
1108 | netdev_err(ndev, "sub channel request failed\n"); | 1094 | netdev_err(ndev, "sub channel request failed\n"); |
1109 | goto failed; | 1095 | return -EIO; |
1110 | } | 1096 | } |
1111 | 1097 | ||
1112 | nvdev->num_chn = 1 + | 1098 | nvdev->num_chn = 1 + |
@@ -1125,21 +1111,7 @@ void rndis_set_subchannel(struct work_struct *w) | |||
1125 | for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) | 1111 | for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) |
1126 | ndev_ctx->tx_table[i] = i % nvdev->num_chn; | 1112 | ndev_ctx->tx_table[i] = i % nvdev->num_chn; |
1127 | 1113 | ||
1128 | netif_device_attach(ndev); | 1114 | return 0; |
1129 | rtnl_unlock(); | ||
1130 | return; | ||
1131 | |||
1132 | failed: | ||
1133 | /* fallback to only primary channel */ | ||
1134 | for (i = 1; i < nvdev->num_chn; i++) | ||
1135 | netif_napi_del(&nvdev->chan_table[i].napi); | ||
1136 | |||
1137 | nvdev->max_chn = 1; | ||
1138 | nvdev->num_chn = 1; | ||
1139 | |||
1140 | netif_device_attach(ndev); | ||
1141 | unlock: | ||
1142 | rtnl_unlock(); | ||
1143 | } | 1115 | } |
1144 | 1116 | ||
1145 | static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, | 1117 | static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, |
@@ -1360,21 +1332,12 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, | |||
1360 | netif_napi_add(net, &net_device->chan_table[i].napi, | 1332 | netif_napi_add(net, &net_device->chan_table[i].napi, |
1361 | netvsc_poll, NAPI_POLL_WEIGHT); | 1333 | netvsc_poll, NAPI_POLL_WEIGHT); |
1362 | 1334 | ||
1363 | if (net_device->num_chn > 1) | 1335 | return net_device; |
1364 | schedule_work(&net_device->subchan_work); | ||
1365 | 1336 | ||
1366 | out: | 1337 | out: |
1367 | /* if unavailable, just proceed with one queue */ | 1338 | /* setting up multiple channels failed */ |
1368 | if (ret) { | 1339 | net_device->max_chn = 1; |
1369 | net_device->max_chn = 1; | 1340 | net_device->num_chn = 1; |
1370 | net_device->num_chn = 1; | ||
1371 | } | ||
1372 | |||
1373 | /* No sub channels, device is ready */ | ||
1374 | if (net_device->num_chn == 1) | ||
1375 | netif_device_attach(net); | ||
1376 | |||
1377 | return net_device; | ||
1378 | 1341 | ||
1379 | err_dev_remv: | 1342 | err_dev_remv: |
1380 | rndis_filter_device_remove(dev, net_device); | 1343 | rndis_filter_device_remove(dev, net_device); |
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 23c1d6600241..4a949569ec4c 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c | |||
@@ -75,10 +75,23 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval) | |||
75 | { | 75 | { |
76 | struct ipvl_dev *ipvlan; | 76 | struct ipvl_dev *ipvlan; |
77 | struct net_device *mdev = port->dev; | 77 | struct net_device *mdev = port->dev; |
78 | int err = 0; | 78 | unsigned int flags; |
79 | int err; | ||
79 | 80 | ||
80 | ASSERT_RTNL(); | 81 | ASSERT_RTNL(); |
81 | if (port->mode != nval) { | 82 | if (port->mode != nval) { |
83 | list_for_each_entry(ipvlan, &port->ipvlans, pnode) { | ||
84 | flags = ipvlan->dev->flags; | ||
85 | if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) { | ||
86 | err = dev_change_flags(ipvlan->dev, | ||
87 | flags | IFF_NOARP); | ||
88 | } else { | ||
89 | err = dev_change_flags(ipvlan->dev, | ||
90 | flags & ~IFF_NOARP); | ||
91 | } | ||
92 | if (unlikely(err)) | ||
93 | goto fail; | ||
94 | } | ||
82 | if (nval == IPVLAN_MODE_L3S) { | 95 | if (nval == IPVLAN_MODE_L3S) { |
83 | /* New mode is L3S */ | 96 | /* New mode is L3S */ |
84 | err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); | 97 | err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); |
@@ -86,21 +99,28 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval) | |||
86 | mdev->l3mdev_ops = &ipvl_l3mdev_ops; | 99 | mdev->l3mdev_ops = &ipvl_l3mdev_ops; |
87 | mdev->priv_flags |= IFF_L3MDEV_MASTER; | 100 | mdev->priv_flags |= IFF_L3MDEV_MASTER; |
88 | } else | 101 | } else |
89 | return err; | 102 | goto fail; |
90 | } else if (port->mode == IPVLAN_MODE_L3S) { | 103 | } else if (port->mode == IPVLAN_MODE_L3S) { |
91 | /* Old mode was L3S */ | 104 | /* Old mode was L3S */ |
92 | mdev->priv_flags &= ~IFF_L3MDEV_MASTER; | 105 | mdev->priv_flags &= ~IFF_L3MDEV_MASTER; |
93 | ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); | 106 | ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); |
94 | mdev->l3mdev_ops = NULL; | 107 | mdev->l3mdev_ops = NULL; |
95 | } | 108 | } |
96 | list_for_each_entry(ipvlan, &port->ipvlans, pnode) { | ||
97 | if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) | ||
98 | ipvlan->dev->flags |= IFF_NOARP; | ||
99 | else | ||
100 | ipvlan->dev->flags &= ~IFF_NOARP; | ||
101 | } | ||
102 | port->mode = nval; | 109 | port->mode = nval; |
103 | } | 110 | } |
111 | return 0; | ||
112 | |||
113 | fail: | ||
114 | /* Undo the flags changes that have been done so far. */ | ||
115 | list_for_each_entry_continue_reverse(ipvlan, &port->ipvlans, pnode) { | ||
116 | flags = ipvlan->dev->flags; | ||
117 | if (port->mode == IPVLAN_MODE_L3 || | ||
118 | port->mode == IPVLAN_MODE_L3S) | ||
119 | dev_change_flags(ipvlan->dev, flags | IFF_NOARP); | ||
120 | else | ||
121 | dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP); | ||
122 | } | ||
123 | |||
104 | return err; | 124 | return err; |
105 | } | 125 | } |
106 | 126 | ||
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c index 081d99aa3985..49ac678eb2dc 100644 --- a/drivers/net/phy/dp83tc811.c +++ b/drivers/net/phy/dp83tc811.c | |||
@@ -222,7 +222,7 @@ static int dp83811_config_intr(struct phy_device *phydev) | |||
222 | if (err < 0) | 222 | if (err < 0) |
223 | return err; | 223 | return err; |
224 | 224 | ||
225 | err = phy_write(phydev, MII_DP83811_INT_STAT1, 0); | 225 | err = phy_write(phydev, MII_DP83811_INT_STAT2, 0); |
226 | } | 226 | } |
227 | 227 | ||
228 | return err; | 228 | return err; |
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 8dff87ec6d99..2e4130746c40 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c | |||
@@ -64,6 +64,7 @@ | |||
64 | #define DEFAULT_RX_CSUM_ENABLE (true) | 64 | #define DEFAULT_RX_CSUM_ENABLE (true) |
65 | #define DEFAULT_TSO_CSUM_ENABLE (true) | 65 | #define DEFAULT_TSO_CSUM_ENABLE (true) |
66 | #define DEFAULT_VLAN_FILTER_ENABLE (true) | 66 | #define DEFAULT_VLAN_FILTER_ENABLE (true) |
67 | #define DEFAULT_VLAN_RX_OFFLOAD (true) | ||
67 | #define TX_OVERHEAD (8) | 68 | #define TX_OVERHEAD (8) |
68 | #define RXW_PADDING 2 | 69 | #define RXW_PADDING 2 |
69 | 70 | ||
@@ -2298,7 +2299,7 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu) | |||
2298 | if ((ll_mtu % dev->maxpacket) == 0) | 2299 | if ((ll_mtu % dev->maxpacket) == 0) |
2299 | return -EDOM; | 2300 | return -EDOM; |
2300 | 2301 | ||
2301 | ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); | 2302 | ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN); |
2302 | 2303 | ||
2303 | netdev->mtu = new_mtu; | 2304 | netdev->mtu = new_mtu; |
2304 | 2305 | ||
@@ -2364,6 +2365,11 @@ static int lan78xx_set_features(struct net_device *netdev, | |||
2364 | } | 2365 | } |
2365 | 2366 | ||
2366 | if (features & NETIF_F_HW_VLAN_CTAG_RX) | 2367 | if (features & NETIF_F_HW_VLAN_CTAG_RX) |
2368 | pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_; | ||
2369 | else | ||
2370 | pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_; | ||
2371 | |||
2372 | if (features & NETIF_F_HW_VLAN_CTAG_FILTER) | ||
2367 | pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_; | 2373 | pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_; |
2368 | else | 2374 | else |
2369 | pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_; | 2375 | pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_; |
@@ -2587,7 +2593,8 @@ static int lan78xx_reset(struct lan78xx_net *dev) | |||
2587 | buf |= FCT_TX_CTL_EN_; | 2593 | buf |= FCT_TX_CTL_EN_; |
2588 | ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf); | 2594 | ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf); |
2589 | 2595 | ||
2590 | ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); | 2596 | ret = lan78xx_set_rx_max_frame_length(dev, |
2597 | dev->net->mtu + VLAN_ETH_HLEN); | ||
2591 | 2598 | ||
2592 | ret = lan78xx_read_reg(dev, MAC_RX, &buf); | 2599 | ret = lan78xx_read_reg(dev, MAC_RX, &buf); |
2593 | buf |= MAC_RX_RXEN_; | 2600 | buf |= MAC_RX_RXEN_; |
@@ -2975,6 +2982,12 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) | |||
2975 | if (DEFAULT_TSO_CSUM_ENABLE) | 2982 | if (DEFAULT_TSO_CSUM_ENABLE) |
2976 | dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG; | 2983 | dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG; |
2977 | 2984 | ||
2985 | if (DEFAULT_VLAN_RX_OFFLOAD) | ||
2986 | dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX; | ||
2987 | |||
2988 | if (DEFAULT_VLAN_FILTER_ENABLE) | ||
2989 | dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER; | ||
2990 | |||
2978 | dev->net->hw_features = dev->net->features; | 2991 | dev->net->hw_features = dev->net->features; |
2979 | 2992 | ||
2980 | ret = lan78xx_setup_irq_domain(dev); | 2993 | ret = lan78xx_setup_irq_domain(dev); |
@@ -3039,8 +3052,13 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev, | |||
3039 | struct sk_buff *skb, | 3052 | struct sk_buff *skb, |
3040 | u32 rx_cmd_a, u32 rx_cmd_b) | 3053 | u32 rx_cmd_a, u32 rx_cmd_b) |
3041 | { | 3054 | { |
3055 | /* HW Checksum offload appears to be flawed if used when not stripping | ||
3056 | * VLAN headers. Drop back to S/W checksums under these conditions. | ||
3057 | */ | ||
3042 | if (!(dev->net->features & NETIF_F_RXCSUM) || | 3058 | if (!(dev->net->features & NETIF_F_RXCSUM) || |
3043 | unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) { | 3059 | unlikely(rx_cmd_a & RX_CMD_A_ICSM_) || |
3060 | ((rx_cmd_a & RX_CMD_A_FVTG_) && | ||
3061 | !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) { | ||
3044 | skb->ip_summed = CHECKSUM_NONE; | 3062 | skb->ip_summed = CHECKSUM_NONE; |
3045 | } else { | 3063 | } else { |
3046 | skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_)); | 3064 | skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_)); |
@@ -3048,6 +3066,16 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev, | |||
3048 | } | 3066 | } |
3049 | } | 3067 | } |
3050 | 3068 | ||
3069 | static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev, | ||
3070 | struct sk_buff *skb, | ||
3071 | u32 rx_cmd_a, u32 rx_cmd_b) | ||
3072 | { | ||
3073 | if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) && | ||
3074 | (rx_cmd_a & RX_CMD_A_FVTG_)) | ||
3075 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | ||
3076 | (rx_cmd_b & 0xffff)); | ||
3077 | } | ||
3078 | |||
3051 | static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb) | 3079 | static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb) |
3052 | { | 3080 | { |
3053 | int status; | 3081 | int status; |
@@ -3112,6 +3140,8 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb) | |||
3112 | if (skb->len == size) { | 3140 | if (skb->len == size) { |
3113 | lan78xx_rx_csum_offload(dev, skb, | 3141 | lan78xx_rx_csum_offload(dev, skb, |
3114 | rx_cmd_a, rx_cmd_b); | 3142 | rx_cmd_a, rx_cmd_b); |
3143 | lan78xx_rx_vlan_offload(dev, skb, | ||
3144 | rx_cmd_a, rx_cmd_b); | ||
3115 | 3145 | ||
3116 | skb_trim(skb, skb->len - 4); /* remove fcs */ | 3146 | skb_trim(skb, skb->len - 4); /* remove fcs */ |
3117 | skb->truesize = size + sizeof(struct sk_buff); | 3147 | skb->truesize = size + sizeof(struct sk_buff); |
@@ -3130,6 +3160,7 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb) | |||
3130 | skb_set_tail_pointer(skb2, size); | 3160 | skb_set_tail_pointer(skb2, size); |
3131 | 3161 | ||
3132 | lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b); | 3162 | lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b); |
3163 | lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b); | ||
3133 | 3164 | ||
3134 | skb_trim(skb2, skb2->len - 4); /* remove fcs */ | 3165 | skb_trim(skb2, skb2->len - 4); /* remove fcs */ |
3135 | skb2->truesize = size + sizeof(struct sk_buff); | 3166 | skb2->truesize = size + sizeof(struct sk_buff); |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 86f7196f9d91..2a58607a6aea 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -3962,7 +3962,8 @@ static int rtl8152_close(struct net_device *netdev) | |||
3962 | #ifdef CONFIG_PM_SLEEP | 3962 | #ifdef CONFIG_PM_SLEEP |
3963 | unregister_pm_notifier(&tp->pm_notifier); | 3963 | unregister_pm_notifier(&tp->pm_notifier); |
3964 | #endif | 3964 | #endif |
3965 | napi_disable(&tp->napi); | 3965 | if (!test_bit(RTL8152_UNPLUG, &tp->flags)) |
3966 | napi_disable(&tp->napi); | ||
3966 | clear_bit(WORK_ENABLE, &tp->flags); | 3967 | clear_bit(WORK_ENABLE, &tp->flags); |
3967 | usb_kill_urb(tp->intr_urb); | 3968 | usb_kill_urb(tp->intr_urb); |
3968 | cancel_delayed_work_sync(&tp->schedule); | 3969 | cancel_delayed_work_sync(&tp->schedule); |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index b6c9a2af3732..53085c63277b 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -53,6 +53,10 @@ module_param(napi_tx, bool, 0644); | |||
53 | /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ | 53 | /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ |
54 | #define VIRTIO_XDP_HEADROOM 256 | 54 | #define VIRTIO_XDP_HEADROOM 256 |
55 | 55 | ||
56 | /* Separating two types of XDP xmit */ | ||
57 | #define VIRTIO_XDP_TX BIT(0) | ||
58 | #define VIRTIO_XDP_REDIR BIT(1) | ||
59 | |||
56 | /* RX packet size EWMA. The average packet size is used to determine the packet | 60 | /* RX packet size EWMA. The average packet size is used to determine the packet |
57 | * buffer size when refilling RX rings. As the entire RX ring may be refilled | 61 | * buffer size when refilling RX rings. As the entire RX ring may be refilled |
58 | * at once, the weight is chosen so that the EWMA will be insensitive to short- | 62 | * at once, the weight is chosen so that the EWMA will be insensitive to short- |
@@ -582,7 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev, | |||
582 | struct receive_queue *rq, | 586 | struct receive_queue *rq, |
583 | void *buf, void *ctx, | 587 | void *buf, void *ctx, |
584 | unsigned int len, | 588 | unsigned int len, |
585 | bool *xdp_xmit) | 589 | unsigned int *xdp_xmit) |
586 | { | 590 | { |
587 | struct sk_buff *skb; | 591 | struct sk_buff *skb; |
588 | struct bpf_prog *xdp_prog; | 592 | struct bpf_prog *xdp_prog; |
@@ -654,14 +658,14 @@ static struct sk_buff *receive_small(struct net_device *dev, | |||
654 | trace_xdp_exception(vi->dev, xdp_prog, act); | 658 | trace_xdp_exception(vi->dev, xdp_prog, act); |
655 | goto err_xdp; | 659 | goto err_xdp; |
656 | } | 660 | } |
657 | *xdp_xmit = true; | 661 | *xdp_xmit |= VIRTIO_XDP_TX; |
658 | rcu_read_unlock(); | 662 | rcu_read_unlock(); |
659 | goto xdp_xmit; | 663 | goto xdp_xmit; |
660 | case XDP_REDIRECT: | 664 | case XDP_REDIRECT: |
661 | err = xdp_do_redirect(dev, &xdp, xdp_prog); | 665 | err = xdp_do_redirect(dev, &xdp, xdp_prog); |
662 | if (err) | 666 | if (err) |
663 | goto err_xdp; | 667 | goto err_xdp; |
664 | *xdp_xmit = true; | 668 | *xdp_xmit |= VIRTIO_XDP_REDIR; |
665 | rcu_read_unlock(); | 669 | rcu_read_unlock(); |
666 | goto xdp_xmit; | 670 | goto xdp_xmit; |
667 | default: | 671 | default: |
@@ -723,7 +727,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
723 | void *buf, | 727 | void *buf, |
724 | void *ctx, | 728 | void *ctx, |
725 | unsigned int len, | 729 | unsigned int len, |
726 | bool *xdp_xmit) | 730 | unsigned int *xdp_xmit) |
727 | { | 731 | { |
728 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf; | 732 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf; |
729 | u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); | 733 | u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); |
@@ -818,7 +822,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
818 | put_page(xdp_page); | 822 | put_page(xdp_page); |
819 | goto err_xdp; | 823 | goto err_xdp; |
820 | } | 824 | } |
821 | *xdp_xmit = true; | 825 | *xdp_xmit |= VIRTIO_XDP_TX; |
822 | if (unlikely(xdp_page != page)) | 826 | if (unlikely(xdp_page != page)) |
823 | put_page(page); | 827 | put_page(page); |
824 | rcu_read_unlock(); | 828 | rcu_read_unlock(); |
@@ -830,7 +834,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
830 | put_page(xdp_page); | 834 | put_page(xdp_page); |
831 | goto err_xdp; | 835 | goto err_xdp; |
832 | } | 836 | } |
833 | *xdp_xmit = true; | 837 | *xdp_xmit |= VIRTIO_XDP_REDIR; |
834 | if (unlikely(xdp_page != page)) | 838 | if (unlikely(xdp_page != page)) |
835 | put_page(page); | 839 | put_page(page); |
836 | rcu_read_unlock(); | 840 | rcu_read_unlock(); |
@@ -939,7 +943,8 @@ xdp_xmit: | |||
939 | } | 943 | } |
940 | 944 | ||
941 | static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, | 945 | static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, |
942 | void *buf, unsigned int len, void **ctx, bool *xdp_xmit) | 946 | void *buf, unsigned int len, void **ctx, |
947 | unsigned int *xdp_xmit) | ||
943 | { | 948 | { |
944 | struct net_device *dev = vi->dev; | 949 | struct net_device *dev = vi->dev; |
945 | struct sk_buff *skb; | 950 | struct sk_buff *skb; |
@@ -1232,7 +1237,8 @@ static void refill_work(struct work_struct *work) | |||
1232 | } | 1237 | } |
1233 | } | 1238 | } |
1234 | 1239 | ||
1235 | static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit) | 1240 | static int virtnet_receive(struct receive_queue *rq, int budget, |
1241 | unsigned int *xdp_xmit) | ||
1236 | { | 1242 | { |
1237 | struct virtnet_info *vi = rq->vq->vdev->priv; | 1243 | struct virtnet_info *vi = rq->vq->vdev->priv; |
1238 | unsigned int len, received = 0, bytes = 0; | 1244 | unsigned int len, received = 0, bytes = 0; |
@@ -1321,7 +1327,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) | |||
1321 | struct virtnet_info *vi = rq->vq->vdev->priv; | 1327 | struct virtnet_info *vi = rq->vq->vdev->priv; |
1322 | struct send_queue *sq; | 1328 | struct send_queue *sq; |
1323 | unsigned int received, qp; | 1329 | unsigned int received, qp; |
1324 | bool xdp_xmit = false; | 1330 | unsigned int xdp_xmit = 0; |
1325 | 1331 | ||
1326 | virtnet_poll_cleantx(rq); | 1332 | virtnet_poll_cleantx(rq); |
1327 | 1333 | ||
@@ -1331,12 +1337,14 @@ static int virtnet_poll(struct napi_struct *napi, int budget) | |||
1331 | if (received < budget) | 1337 | if (received < budget) |
1332 | virtqueue_napi_complete(napi, rq->vq, received); | 1338 | virtqueue_napi_complete(napi, rq->vq, received); |
1333 | 1339 | ||
1334 | if (xdp_xmit) { | 1340 | if (xdp_xmit & VIRTIO_XDP_REDIR) |
1341 | xdp_do_flush_map(); | ||
1342 | |||
1343 | if (xdp_xmit & VIRTIO_XDP_TX) { | ||
1335 | qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + | 1344 | qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + |
1336 | smp_processor_id(); | 1345 | smp_processor_id(); |
1337 | sq = &vi->sq[qp]; | 1346 | sq = &vi->sq[qp]; |
1338 | virtqueue_kick(sq->vq); | 1347 | virtqueue_kick(sq->vq); |
1339 | xdp_do_flush_map(); | ||
1340 | } | 1348 | } |
1341 | 1349 | ||
1342 | return received; | 1350 | return received; |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index aee0e60471f1..f6bb1d54d4bd 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -623,9 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk, | |||
623 | flush = 0; | 623 | flush = 0; |
624 | 624 | ||
625 | out: | 625 | out: |
626 | skb_gro_remcsum_cleanup(skb, &grc); | 626 | skb_gro_flush_final_remcsum(skb, pp, flush, &grc); |
627 | skb->remcsum_offload = 0; | ||
628 | NAPI_GRO_CB(skb)->flush |= flush; | ||
629 | 627 | ||
630 | return pp; | 628 | return pp; |
631 | } | 629 | } |
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 2a5fec55bf60..a246a618f9a4 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -829,6 +829,17 @@ struct qeth_trap_id { | |||
829 | /*some helper functions*/ | 829 | /*some helper functions*/ |
830 | #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") | 830 | #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") |
831 | 831 | ||
832 | static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf, | ||
833 | unsigned int elements) | ||
834 | { | ||
835 | unsigned int i; | ||
836 | |||
837 | for (i = 0; i < elements; i++) | ||
838 | memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element)); | ||
839 | buf->element[14].sflags = 0; | ||
840 | buf->element[15].sflags = 0; | ||
841 | } | ||
842 | |||
832 | /** | 843 | /** |
833 | * qeth_get_elements_for_range() - find number of SBALEs to cover range. | 844 | * qeth_get_elements_for_range() - find number of SBALEs to cover range. |
834 | * @start: Start of the address range. | 845 | * @start: Start of the address range. |
@@ -1029,7 +1040,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *, | |||
1029 | __u16, __u16, | 1040 | __u16, __u16, |
1030 | enum qeth_prot_versions); | 1041 | enum qeth_prot_versions); |
1031 | int qeth_set_features(struct net_device *, netdev_features_t); | 1042 | int qeth_set_features(struct net_device *, netdev_features_t); |
1032 | void qeth_recover_features(struct net_device *dev); | 1043 | void qeth_enable_hw_features(struct net_device *dev); |
1033 | netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); | 1044 | netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); |
1034 | netdev_features_t qeth_features_check(struct sk_buff *skb, | 1045 | netdev_features_t qeth_features_check(struct sk_buff *skb, |
1035 | struct net_device *dev, | 1046 | struct net_device *dev, |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 8e1474f1ffac..d01ac29fd986 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -73,9 +73,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, | |||
73 | struct qeth_qdio_out_buffer *buf, | 73 | struct qeth_qdio_out_buffer *buf, |
74 | enum iucv_tx_notify notification); | 74 | enum iucv_tx_notify notification); |
75 | static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); | 75 | static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); |
76 | static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, | ||
77 | struct qeth_qdio_out_buffer *buf, | ||
78 | enum qeth_qdio_buffer_states newbufstate); | ||
79 | static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); | 76 | static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); |
80 | 77 | ||
81 | struct workqueue_struct *qeth_wq; | 78 | struct workqueue_struct *qeth_wq; |
@@ -489,6 +486,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, | |||
489 | struct qaob *aob; | 486 | struct qaob *aob; |
490 | struct qeth_qdio_out_buffer *buffer; | 487 | struct qeth_qdio_out_buffer *buffer; |
491 | enum iucv_tx_notify notification; | 488 | enum iucv_tx_notify notification; |
489 | unsigned int i; | ||
492 | 490 | ||
493 | aob = (struct qaob *) phys_to_virt(phys_aob_addr); | 491 | aob = (struct qaob *) phys_to_virt(phys_aob_addr); |
494 | QETH_CARD_TEXT(card, 5, "haob"); | 492 | QETH_CARD_TEXT(card, 5, "haob"); |
@@ -513,10 +511,18 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, | |||
513 | qeth_notify_skbs(buffer->q, buffer, notification); | 511 | qeth_notify_skbs(buffer->q, buffer, notification); |
514 | 512 | ||
515 | buffer->aob = NULL; | 513 | buffer->aob = NULL; |
516 | qeth_clear_output_buffer(buffer->q, buffer, | 514 | /* Free dangling allocations. The attached skbs are handled by |
517 | QETH_QDIO_BUF_HANDLED_DELAYED); | 515 | * qeth_cleanup_handled_pending(). |
516 | */ | ||
517 | for (i = 0; | ||
518 | i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card); | ||
519 | i++) { | ||
520 | if (aob->sba[i] && buffer->is_header[i]) | ||
521 | kmem_cache_free(qeth_core_header_cache, | ||
522 | (void *) aob->sba[i]); | ||
523 | } | ||
524 | atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED); | ||
518 | 525 | ||
519 | /* from here on: do not touch buffer anymore */ | ||
520 | qdio_release_aob(aob); | 526 | qdio_release_aob(aob); |
521 | } | 527 | } |
522 | 528 | ||
@@ -3759,6 +3765,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, | |||
3759 | QETH_CARD_TEXT(queue->card, 5, "aob"); | 3765 | QETH_CARD_TEXT(queue->card, 5, "aob"); |
3760 | QETH_CARD_TEXT_(queue->card, 5, "%lx", | 3766 | QETH_CARD_TEXT_(queue->card, 5, "%lx", |
3761 | virt_to_phys(buffer->aob)); | 3767 | virt_to_phys(buffer->aob)); |
3768 | |||
3769 | /* prepare the queue slot for re-use: */ | ||
3770 | qeth_scrub_qdio_buffer(buffer->buffer, | ||
3771 | QETH_MAX_BUFFER_ELEMENTS(card)); | ||
3762 | if (qeth_init_qdio_out_buf(queue, bidx)) { | 3772 | if (qeth_init_qdio_out_buf(queue, bidx)) { |
3763 | QETH_CARD_TEXT(card, 2, "outofbuf"); | 3773 | QETH_CARD_TEXT(card, 2, "outofbuf"); |
3764 | qeth_schedule_recovery(card); | 3774 | qeth_schedule_recovery(card); |
@@ -4834,7 +4844,7 @@ int qeth_vm_request_mac(struct qeth_card *card) | |||
4834 | goto out; | 4844 | goto out; |
4835 | } | 4845 | } |
4836 | 4846 | ||
4837 | ccw_device_get_id(CARD_RDEV(card), &id); | 4847 | ccw_device_get_id(CARD_DDEV(card), &id); |
4838 | request->resp_buf_len = sizeof(*response); | 4848 | request->resp_buf_len = sizeof(*response); |
4839 | request->resp_version = DIAG26C_VERSION2; | 4849 | request->resp_version = DIAG26C_VERSION2; |
4840 | request->op_code = DIAG26C_GET_MAC; | 4850 | request->op_code = DIAG26C_GET_MAC; |
@@ -6459,28 +6469,27 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) | |||
6459 | #define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \ | 6469 | #define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \ |
6460 | NETIF_F_IPV6_CSUM) | 6470 | NETIF_F_IPV6_CSUM) |
6461 | /** | 6471 | /** |
6462 | * qeth_recover_features() - Restore device features after recovery | 6472 | * qeth_enable_hw_features() - (Re-)Enable HW functions for device features |
6463 | * @dev: the recovering net_device | 6473 | * @dev: a net_device |
6464 | * | ||
6465 | * Caller must hold rtnl lock. | ||
6466 | */ | 6474 | */ |
6467 | void qeth_recover_features(struct net_device *dev) | 6475 | void qeth_enable_hw_features(struct net_device *dev) |
6468 | { | 6476 | { |
6469 | netdev_features_t features = dev->features; | ||
6470 | struct qeth_card *card = dev->ml_priv; | 6477 | struct qeth_card *card = dev->ml_priv; |
6478 | netdev_features_t features; | ||
6471 | 6479 | ||
6480 | rtnl_lock(); | ||
6481 | features = dev->features; | ||
6472 | /* force-off any feature that needs an IPA sequence. | 6482 | /* force-off any feature that needs an IPA sequence. |
6473 | * netdev_update_features() will restart them. | 6483 | * netdev_update_features() will restart them. |
6474 | */ | 6484 | */ |
6475 | dev->features &= ~QETH_HW_FEATURES; | 6485 | dev->features &= ~QETH_HW_FEATURES; |
6476 | netdev_update_features(dev); | 6486 | netdev_update_features(dev); |
6477 | 6487 | if (features != dev->features) | |
6478 | if (features == dev->features) | 6488 | dev_warn(&card->gdev->dev, |
6479 | return; | 6489 | "Device recovery failed to restore all offload features\n"); |
6480 | dev_warn(&card->gdev->dev, | 6490 | rtnl_unlock(); |
6481 | "Device recovery failed to restore all offload features\n"); | ||
6482 | } | 6491 | } |
6483 | EXPORT_SYMBOL_GPL(qeth_recover_features); | 6492 | EXPORT_SYMBOL_GPL(qeth_enable_hw_features); |
6484 | 6493 | ||
6485 | int qeth_set_features(struct net_device *dev, netdev_features_t features) | 6494 | int qeth_set_features(struct net_device *dev, netdev_features_t features) |
6486 | { | 6495 | { |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index a7cb37da6a21..2487f0aeb165 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -140,7 +140,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) | |||
140 | 140 | ||
141 | static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) | 141 | static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) |
142 | { | 142 | { |
143 | enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? | 143 | enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ? |
144 | IPA_CMD_SETGMAC : IPA_CMD_SETVMAC; | 144 | IPA_CMD_SETGMAC : IPA_CMD_SETVMAC; |
145 | int rc; | 145 | int rc; |
146 | 146 | ||
@@ -157,7 +157,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) | |||
157 | 157 | ||
158 | static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac) | 158 | static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac) |
159 | { | 159 | { |
160 | enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? | 160 | enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ? |
161 | IPA_CMD_DELGMAC : IPA_CMD_DELVMAC; | 161 | IPA_CMD_DELGMAC : IPA_CMD_DELVMAC; |
162 | int rc; | 162 | int rc; |
163 | 163 | ||
@@ -501,27 +501,34 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) | |||
501 | return -ERESTARTSYS; | 501 | return -ERESTARTSYS; |
502 | } | 502 | } |
503 | 503 | ||
504 | /* avoid racing against concurrent state change: */ | ||
505 | if (!mutex_trylock(&card->conf_mutex)) | ||
506 | return -EAGAIN; | ||
507 | |||
504 | if (!qeth_card_hw_is_reachable(card)) { | 508 | if (!qeth_card_hw_is_reachable(card)) { |
505 | ether_addr_copy(dev->dev_addr, addr->sa_data); | 509 | ether_addr_copy(dev->dev_addr, addr->sa_data); |
506 | return 0; | 510 | goto out_unlock; |
507 | } | 511 | } |
508 | 512 | ||
509 | /* don't register the same address twice */ | 513 | /* don't register the same address twice */ |
510 | if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) && | 514 | if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) && |
511 | (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) | 515 | (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) |
512 | return 0; | 516 | goto out_unlock; |
513 | 517 | ||
514 | /* add the new address, switch over, drop the old */ | 518 | /* add the new address, switch over, drop the old */ |
515 | rc = qeth_l2_send_setmac(card, addr->sa_data); | 519 | rc = qeth_l2_send_setmac(card, addr->sa_data); |
516 | if (rc) | 520 | if (rc) |
517 | return rc; | 521 | goto out_unlock; |
518 | ether_addr_copy(old_addr, dev->dev_addr); | 522 | ether_addr_copy(old_addr, dev->dev_addr); |
519 | ether_addr_copy(dev->dev_addr, addr->sa_data); | 523 | ether_addr_copy(dev->dev_addr, addr->sa_data); |
520 | 524 | ||
521 | if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED) | 525 | if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED) |
522 | qeth_l2_remove_mac(card, old_addr); | 526 | qeth_l2_remove_mac(card, old_addr); |
523 | card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; | 527 | card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; |
524 | return 0; | 528 | |
529 | out_unlock: | ||
530 | mutex_unlock(&card->conf_mutex); | ||
531 | return rc; | ||
525 | } | 532 | } |
526 | 533 | ||
527 | static void qeth_promisc_to_bridge(struct qeth_card *card) | 534 | static void qeth_promisc_to_bridge(struct qeth_card *card) |
@@ -1112,6 +1119,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
1112 | netif_carrier_off(card->dev); | 1119 | netif_carrier_off(card->dev); |
1113 | 1120 | ||
1114 | qeth_set_allowed_threads(card, 0xffffffff, 0); | 1121 | qeth_set_allowed_threads(card, 0xffffffff, 0); |
1122 | |||
1123 | qeth_enable_hw_features(card->dev); | ||
1115 | if (recover_flag == CARD_STATE_RECOVER) { | 1124 | if (recover_flag == CARD_STATE_RECOVER) { |
1116 | if (recovery_mode && | 1125 | if (recovery_mode && |
1117 | card->info.type != QETH_CARD_TYPE_OSN) { | 1126 | card->info.type != QETH_CARD_TYPE_OSN) { |
@@ -1123,9 +1132,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
1123 | } | 1132 | } |
1124 | /* this also sets saved unicast addresses */ | 1133 | /* this also sets saved unicast addresses */ |
1125 | qeth_l2_set_rx_mode(card->dev); | 1134 | qeth_l2_set_rx_mode(card->dev); |
1126 | rtnl_lock(); | ||
1127 | qeth_recover_features(card->dev); | ||
1128 | rtnl_unlock(); | ||
1129 | } | 1135 | } |
1130 | /* let user_space know that device is online */ | 1136 | /* let user_space know that device is online */ |
1131 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); | 1137 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index e7fa479adf47..5905dc63e256 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -2662,6 +2662,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
2662 | netif_carrier_on(card->dev); | 2662 | netif_carrier_on(card->dev); |
2663 | else | 2663 | else |
2664 | netif_carrier_off(card->dev); | 2664 | netif_carrier_off(card->dev); |
2665 | |||
2666 | qeth_enable_hw_features(card->dev); | ||
2665 | if (recover_flag == CARD_STATE_RECOVER) { | 2667 | if (recover_flag == CARD_STATE_RECOVER) { |
2666 | rtnl_lock(); | 2668 | rtnl_lock(); |
2667 | if (recovery_mode) | 2669 | if (recovery_mode) |
@@ -2669,7 +2671,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
2669 | else | 2671 | else |
2670 | dev_open(card->dev); | 2672 | dev_open(card->dev); |
2671 | qeth_l3_set_rx_mode(card->dev); | 2673 | qeth_l3_set_rx_mode(card->dev); |
2672 | qeth_recover_features(card->dev); | ||
2673 | rtnl_unlock(); | 2674 | rtnl_unlock(); |
2674 | } | 2675 | } |
2675 | qeth_trace_features(card); | 2676 | qeth_trace_features(card); |
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 975fb4cf1bb7..79795c5fa7c3 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h | |||
@@ -188,12 +188,38 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, | |||
188 | \ | 188 | \ |
189 | __ret; \ | 189 | __ret; \ |
190 | }) | 190 | }) |
191 | int cgroup_bpf_prog_attach(const union bpf_attr *attr, | ||
192 | enum bpf_prog_type ptype, struct bpf_prog *prog); | ||
193 | int cgroup_bpf_prog_detach(const union bpf_attr *attr, | ||
194 | enum bpf_prog_type ptype); | ||
195 | int cgroup_bpf_prog_query(const union bpf_attr *attr, | ||
196 | union bpf_attr __user *uattr); | ||
191 | #else | 197 | #else |
192 | 198 | ||
199 | struct bpf_prog; | ||
193 | struct cgroup_bpf {}; | 200 | struct cgroup_bpf {}; |
194 | static inline void cgroup_bpf_put(struct cgroup *cgrp) {} | 201 | static inline void cgroup_bpf_put(struct cgroup *cgrp) {} |
195 | static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } | 202 | static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } |
196 | 203 | ||
204 | static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr, | ||
205 | enum bpf_prog_type ptype, | ||
206 | struct bpf_prog *prog) | ||
207 | { | ||
208 | return -EINVAL; | ||
209 | } | ||
210 | |||
211 | static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr, | ||
212 | enum bpf_prog_type ptype) | ||
213 | { | ||
214 | return -EINVAL; | ||
215 | } | ||
216 | |||
217 | static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, | ||
218 | union bpf_attr __user *uattr) | ||
219 | { | ||
220 | return -EINVAL; | ||
221 | } | ||
222 | |||
197 | #define cgroup_bpf_enabled (0) | 223 | #define cgroup_bpf_enabled (0) |
198 | #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) | 224 | #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) |
199 | #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) | 225 | #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) |
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 7df32a3200f7..8827e797ff97 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
@@ -696,6 +696,8 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map) | |||
696 | struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); | 696 | struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); |
697 | struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key); | 697 | struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key); |
698 | int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); | 698 | int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); |
699 | int sockmap_get_from_fd(const union bpf_attr *attr, int type, | ||
700 | struct bpf_prog *prog); | ||
699 | #else | 701 | #else |
700 | static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) | 702 | static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) |
701 | { | 703 | { |
@@ -714,6 +716,12 @@ static inline int sock_map_prog(struct bpf_map *map, | |||
714 | { | 716 | { |
715 | return -EOPNOTSUPP; | 717 | return -EOPNOTSUPP; |
716 | } | 718 | } |
719 | |||
720 | static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type, | ||
721 | struct bpf_prog *prog) | ||
722 | { | ||
723 | return -EINVAL; | ||
724 | } | ||
717 | #endif | 725 | #endif |
718 | 726 | ||
719 | #if defined(CONFIG_XDP_SOCKETS) | 727 | #if defined(CONFIG_XDP_SOCKETS) |
diff --git a/include/linux/bpf_lirc.h b/include/linux/bpf_lirc.h index 5f8a4283092d..9d9ff755ec29 100644 --- a/include/linux/bpf_lirc.h +++ b/include/linux/bpf_lirc.h | |||
@@ -5,11 +5,12 @@ | |||
5 | #include <uapi/linux/bpf.h> | 5 | #include <uapi/linux/bpf.h> |
6 | 6 | ||
7 | #ifdef CONFIG_BPF_LIRC_MODE2 | 7 | #ifdef CONFIG_BPF_LIRC_MODE2 |
8 | int lirc_prog_attach(const union bpf_attr *attr); | 8 | int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog); |
9 | int lirc_prog_detach(const union bpf_attr *attr); | 9 | int lirc_prog_detach(const union bpf_attr *attr); |
10 | int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); | 10 | int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); |
11 | #else | 11 | #else |
12 | static inline int lirc_prog_attach(const union bpf_attr *attr) | 12 | static inline int lirc_prog_attach(const union bpf_attr *attr, |
13 | struct bpf_prog *prog) | ||
13 | { | 14 | { |
14 | return -EINVAL; | 15 | return -EINVAL; |
15 | } | 16 | } |
diff --git a/include/linux/filter.h b/include/linux/filter.h index 20f2659dd829..300baad62c88 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
@@ -470,9 +470,7 @@ struct sock_fprog_kern { | |||
470 | }; | 470 | }; |
471 | 471 | ||
472 | struct bpf_binary_header { | 472 | struct bpf_binary_header { |
473 | u16 pages; | 473 | u32 pages; |
474 | u16 locked:1; | ||
475 | |||
476 | /* Some arches need word alignment for their instructions */ | 474 | /* Some arches need word alignment for their instructions */ |
477 | u8 image[] __aligned(4); | 475 | u8 image[] __aligned(4); |
478 | }; | 476 | }; |
@@ -481,7 +479,7 @@ struct bpf_prog { | |||
481 | u16 pages; /* Number of allocated pages */ | 479 | u16 pages; /* Number of allocated pages */ |
482 | u16 jited:1, /* Is our filter JIT'ed? */ | 480 | u16 jited:1, /* Is our filter JIT'ed? */ |
483 | jit_requested:1,/* archs need to JIT the prog */ | 481 | jit_requested:1,/* archs need to JIT the prog */ |
484 | locked:1, /* Program image locked? */ | 482 | undo_set_mem:1, /* Passed set_memory_ro() checkpoint */ |
485 | gpl_compatible:1, /* Is filter GPL compatible? */ | 483 | gpl_compatible:1, /* Is filter GPL compatible? */ |
486 | cb_access:1, /* Is control block accessed? */ | 484 | cb_access:1, /* Is control block accessed? */ |
487 | dst_needed:1, /* Do we need dst entry? */ | 485 | dst_needed:1, /* Do we need dst entry? */ |
@@ -677,46 +675,24 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) | |||
677 | 675 | ||
678 | static inline void bpf_prog_lock_ro(struct bpf_prog *fp) | 676 | static inline void bpf_prog_lock_ro(struct bpf_prog *fp) |
679 | { | 677 | { |
680 | #ifdef CONFIG_ARCH_HAS_SET_MEMORY | 678 | fp->undo_set_mem = 1; |
681 | fp->locked = 1; | 679 | set_memory_ro((unsigned long)fp, fp->pages); |
682 | if (set_memory_ro((unsigned long)fp, fp->pages)) | ||
683 | fp->locked = 0; | ||
684 | #endif | ||
685 | } | 680 | } |
686 | 681 | ||
687 | static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) | 682 | static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) |
688 | { | 683 | { |
689 | #ifdef CONFIG_ARCH_HAS_SET_MEMORY | 684 | if (fp->undo_set_mem) |
690 | if (fp->locked) { | 685 | set_memory_rw((unsigned long)fp, fp->pages); |
691 | WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages)); | ||
692 | /* In case set_memory_rw() fails, we want to be the first | ||
693 | * to crash here instead of some random place later on. | ||
694 | */ | ||
695 | fp->locked = 0; | ||
696 | } | ||
697 | #endif | ||
698 | } | 686 | } |
699 | 687 | ||
700 | static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) | 688 | static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) |
701 | { | 689 | { |
702 | #ifdef CONFIG_ARCH_HAS_SET_MEMORY | 690 | set_memory_ro((unsigned long)hdr, hdr->pages); |
703 | hdr->locked = 1; | ||
704 | if (set_memory_ro((unsigned long)hdr, hdr->pages)) | ||
705 | hdr->locked = 0; | ||
706 | #endif | ||
707 | } | 691 | } |
708 | 692 | ||
709 | static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) | 693 | static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) |
710 | { | 694 | { |
711 | #ifdef CONFIG_ARCH_HAS_SET_MEMORY | 695 | set_memory_rw((unsigned long)hdr, hdr->pages); |
712 | if (hdr->locked) { | ||
713 | WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages)); | ||
714 | /* In case set_memory_rw() fails, we want to be the first | ||
715 | * to crash here instead of some random place later on. | ||
716 | */ | ||
717 | hdr->locked = 0; | ||
718 | } | ||
719 | #endif | ||
720 | } | 696 | } |
721 | 697 | ||
722 | static inline struct bpf_binary_header * | 698 | static inline struct bpf_binary_header * |
@@ -728,22 +704,6 @@ bpf_jit_binary_hdr(const struct bpf_prog *fp) | |||
728 | return (void *)addr; | 704 | return (void *)addr; |
729 | } | 705 | } |
730 | 706 | ||
731 | #ifdef CONFIG_ARCH_HAS_SET_MEMORY | ||
732 | static inline int bpf_prog_check_pages_ro_single(const struct bpf_prog *fp) | ||
733 | { | ||
734 | if (!fp->locked) | ||
735 | return -ENOLCK; | ||
736 | if (fp->jited) { | ||
737 | const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); | ||
738 | |||
739 | if (!hdr->locked) | ||
740 | return -ENOLCK; | ||
741 | } | ||
742 | |||
743 | return 0; | ||
744 | } | ||
745 | #endif | ||
746 | |||
747 | int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); | 707 | int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); |
748 | static inline int sk_filter(struct sock *sk, struct sk_buff *skb) | 708 | static inline int sk_filter(struct sock *sk, struct sk_buff *skb) |
749 | { | 709 | { |
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index d3c9db492b30..fab5121ffb8f 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h | |||
@@ -8,6 +8,8 @@ | |||
8 | 8 | ||
9 | #include <linux/mlx5/driver.h> | 9 | #include <linux/mlx5/driver.h> |
10 | 10 | ||
11 | #define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager) | ||
12 | |||
11 | enum { | 13 | enum { |
12 | SRIOV_NONE, | 14 | SRIOV_NONE, |
13 | SRIOV_LEGACY, | 15 | SRIOV_LEGACY, |
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 27134c4fcb76..ac281f5ec9b8 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h | |||
@@ -922,7 +922,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { | |||
922 | u8 vnic_env_queue_counters[0x1]; | 922 | u8 vnic_env_queue_counters[0x1]; |
923 | u8 ets[0x1]; | 923 | u8 ets[0x1]; |
924 | u8 nic_flow_table[0x1]; | 924 | u8 nic_flow_table[0x1]; |
925 | u8 eswitch_flow_table[0x1]; | 925 | u8 eswitch_manager[0x1]; |
926 | u8 device_memory[0x1]; | 926 | u8 device_memory[0x1]; |
927 | u8 mcam_reg[0x1]; | 927 | u8 mcam_reg[0x1]; |
928 | u8 pcam_reg[0x1]; | 928 | u8 pcam_reg[0x1]; |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3ec9850c7936..3d0cc0b5cec2 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -2789,11 +2789,31 @@ static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, | |||
2789 | if (PTR_ERR(pp) != -EINPROGRESS) | 2789 | if (PTR_ERR(pp) != -EINPROGRESS) |
2790 | NAPI_GRO_CB(skb)->flush |= flush; | 2790 | NAPI_GRO_CB(skb)->flush |= flush; |
2791 | } | 2791 | } |
2792 | static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, | ||
2793 | struct sk_buff **pp, | ||
2794 | int flush, | ||
2795 | struct gro_remcsum *grc) | ||
2796 | { | ||
2797 | if (PTR_ERR(pp) != -EINPROGRESS) { | ||
2798 | NAPI_GRO_CB(skb)->flush |= flush; | ||
2799 | skb_gro_remcsum_cleanup(skb, grc); | ||
2800 | skb->remcsum_offload = 0; | ||
2801 | } | ||
2802 | } | ||
2792 | #else | 2803 | #else |
2793 | static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) | 2804 | static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) |
2794 | { | 2805 | { |
2795 | NAPI_GRO_CB(skb)->flush |= flush; | 2806 | NAPI_GRO_CB(skb)->flush |= flush; |
2796 | } | 2807 | } |
2808 | static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, | ||
2809 | struct sk_buff **pp, | ||
2810 | int flush, | ||
2811 | struct gro_remcsum *grc) | ||
2812 | { | ||
2813 | NAPI_GRO_CB(skb)->flush |= flush; | ||
2814 | skb_gro_remcsum_cleanup(skb, grc); | ||
2815 | skb->remcsum_offload = 0; | ||
2816 | } | ||
2797 | #endif | 2817 | #endif |
2798 | 2818 | ||
2799 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, | 2819 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, |
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 47e35cce3b64..a71264d75d7f 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h | |||
@@ -128,6 +128,7 @@ struct net { | |||
128 | #endif | 128 | #endif |
129 | #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) | 129 | #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) |
130 | struct netns_nf_frag nf_frag; | 130 | struct netns_nf_frag nf_frag; |
131 | struct ctl_table_header *nf_frag_frags_hdr; | ||
131 | #endif | 132 | #endif |
132 | struct sock *nfnl; | 133 | struct sock *nfnl; |
133 | struct sock *nfnl_stash; | 134 | struct sock *nfnl_stash; |
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index c978a31b0f84..762ac9931b62 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h | |||
@@ -109,7 +109,6 @@ struct netns_ipv6 { | |||
109 | 109 | ||
110 | #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) | 110 | #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) |
111 | struct netns_nf_frag { | 111 | struct netns_nf_frag { |
112 | struct netns_sysctl_ipv6 sysctl; | ||
113 | struct netns_frags frags; | 112 | struct netns_frags frags; |
114 | }; | 113 | }; |
115 | #endif | 114 | #endif |
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index a3c1a2c47cd4..20b059574e60 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h | |||
@@ -111,6 +111,11 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, | |||
111 | { | 111 | { |
112 | } | 112 | } |
113 | 113 | ||
114 | static inline bool tcf_block_shared(struct tcf_block *block) | ||
115 | { | ||
116 | return false; | ||
117 | } | ||
118 | |||
114 | static inline struct Qdisc *tcf_block_q(struct tcf_block *block) | 119 | static inline struct Qdisc *tcf_block_q(struct tcf_block *block) |
115 | { | 120 | { |
116 | return NULL; | 121 | return NULL; |
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 59b19b6a40d7..b7db3261c62d 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h | |||
@@ -1857,7 +1857,8 @@ union bpf_attr { | |||
1857 | * is resolved), the nexthop address is returned in ipv4_dst | 1857 | * is resolved), the nexthop address is returned in ipv4_dst |
1858 | * or ipv6_dst based on family, smac is set to mac address of | 1858 | * or ipv6_dst based on family, smac is set to mac address of |
1859 | * egress device, dmac is set to nexthop mac address, rt_metric | 1859 | * egress device, dmac is set to nexthop mac address, rt_metric |
1860 | * is set to metric from route (IPv4/IPv6 only). | 1860 | * is set to metric from route (IPv4/IPv6 only), and ifindex |
1861 | * is set to the device index of the nexthop from the FIB lookup. | ||
1861 | * | 1862 | * |
1862 | * *plen* argument is the size of the passed in struct. | 1863 | * *plen* argument is the size of the passed in struct. |
1863 | * *flags* argument can be a combination of one or more of the | 1864 | * *flags* argument can be a combination of one or more of the |
@@ -1873,9 +1874,10 @@ union bpf_attr { | |||
1873 | * *ctx* is either **struct xdp_md** for XDP programs or | 1874 | * *ctx* is either **struct xdp_md** for XDP programs or |
1874 | * **struct sk_buff** tc cls_act programs. | 1875 | * **struct sk_buff** tc cls_act programs. |
1875 | * Return | 1876 | * Return |
1876 | * Egress device index on success, 0 if packet needs to continue | 1877 | * * < 0 if any input argument is invalid |
1877 | * up the stack for further processing or a negative error in case | 1878 | * * 0 on success (packet is forwarded, nexthop neighbor exists) |
1878 | * of failure. | 1879 | * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the |
1880 | * * packet is not forwarded or needs assist from full stack | ||
1879 | * | 1881 | * |
1880 | * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags) | 1882 | * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags) |
1881 | * Description | 1883 | * Description |
@@ -2612,6 +2614,18 @@ struct bpf_raw_tracepoint_args { | |||
2612 | #define BPF_FIB_LOOKUP_DIRECT BIT(0) | 2614 | #define BPF_FIB_LOOKUP_DIRECT BIT(0) |
2613 | #define BPF_FIB_LOOKUP_OUTPUT BIT(1) | 2615 | #define BPF_FIB_LOOKUP_OUTPUT BIT(1) |
2614 | 2616 | ||
2617 | enum { | ||
2618 | BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ | ||
2619 | BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ | ||
2620 | BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ | ||
2621 | BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ | ||
2622 | BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ | ||
2623 | BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ | ||
2624 | BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ | ||
2625 | BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ | ||
2626 | BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ | ||
2627 | }; | ||
2628 | |||
2615 | struct bpf_fib_lookup { | 2629 | struct bpf_fib_lookup { |
2616 | /* input: network family for lookup (AF_INET, AF_INET6) | 2630 | /* input: network family for lookup (AF_INET, AF_INET6) |
2617 | * output: network family of egress nexthop | 2631 | * output: network family of egress nexthop |
@@ -2625,7 +2639,11 @@ struct bpf_fib_lookup { | |||
2625 | 2639 | ||
2626 | /* total length of packet from network header - used for MTU check */ | 2640 | /* total length of packet from network header - used for MTU check */ |
2627 | __u16 tot_len; | 2641 | __u16 tot_len; |
2628 | __u32 ifindex; /* L3 device index for lookup */ | 2642 | |
2643 | /* input: L3 device index for lookup | ||
2644 | * output: device index from FIB lookup | ||
2645 | */ | ||
2646 | __u32 ifindex; | ||
2629 | 2647 | ||
2630 | union { | 2648 | union { |
2631 | /* inputs to lookup */ | 2649 | /* inputs to lookup */ |
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index f7c00bd6f8e4..3d83ee7df381 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c | |||
@@ -428,6 +428,60 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, | |||
428 | return ret; | 428 | return ret; |
429 | } | 429 | } |
430 | 430 | ||
431 | int cgroup_bpf_prog_attach(const union bpf_attr *attr, | ||
432 | enum bpf_prog_type ptype, struct bpf_prog *prog) | ||
433 | { | ||
434 | struct cgroup *cgrp; | ||
435 | int ret; | ||
436 | |||
437 | cgrp = cgroup_get_from_fd(attr->target_fd); | ||
438 | if (IS_ERR(cgrp)) | ||
439 | return PTR_ERR(cgrp); | ||
440 | |||
441 | ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type, | ||
442 | attr->attach_flags); | ||
443 | cgroup_put(cgrp); | ||
444 | return ret; | ||
445 | } | ||
446 | |||
447 | int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) | ||
448 | { | ||
449 | struct bpf_prog *prog; | ||
450 | struct cgroup *cgrp; | ||
451 | int ret; | ||
452 | |||
453 | cgrp = cgroup_get_from_fd(attr->target_fd); | ||
454 | if (IS_ERR(cgrp)) | ||
455 | return PTR_ERR(cgrp); | ||
456 | |||
457 | prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); | ||
458 | if (IS_ERR(prog)) | ||
459 | prog = NULL; | ||
460 | |||
461 | ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0); | ||
462 | if (prog) | ||
463 | bpf_prog_put(prog); | ||
464 | |||
465 | cgroup_put(cgrp); | ||
466 | return ret; | ||
467 | } | ||
468 | |||
469 | int cgroup_bpf_prog_query(const union bpf_attr *attr, | ||
470 | union bpf_attr __user *uattr) | ||
471 | { | ||
472 | struct cgroup *cgrp; | ||
473 | int ret; | ||
474 | |||
475 | cgrp = cgroup_get_from_fd(attr->query.target_fd); | ||
476 | if (IS_ERR(cgrp)) | ||
477 | return PTR_ERR(cgrp); | ||
478 | |||
479 | ret = cgroup_bpf_query(cgrp, attr, uattr); | ||
480 | |||
481 | cgroup_put(cgrp); | ||
482 | return ret; | ||
483 | } | ||
484 | |||
431 | /** | 485 | /** |
432 | * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering | 486 | * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering |
433 | * @sk: The socket sending or receiving traffic | 487 | * @sk: The socket sending or receiving traffic |
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index a9e6c04d0f4a..1e5625d46414 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c | |||
@@ -598,8 +598,6 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, | |||
598 | bpf_fill_ill_insns(hdr, size); | 598 | bpf_fill_ill_insns(hdr, size); |
599 | 599 | ||
600 | hdr->pages = size / PAGE_SIZE; | 600 | hdr->pages = size / PAGE_SIZE; |
601 | hdr->locked = 0; | ||
602 | |||
603 | hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), | 601 | hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), |
604 | PAGE_SIZE - sizeof(*hdr)); | 602 | PAGE_SIZE - sizeof(*hdr)); |
605 | start = (get_random_int() % hole) & ~(alignment - 1); | 603 | start = (get_random_int() % hole) & ~(alignment - 1); |
@@ -1450,22 +1448,6 @@ static int bpf_check_tail_call(const struct bpf_prog *fp) | |||
1450 | return 0; | 1448 | return 0; |
1451 | } | 1449 | } |
1452 | 1450 | ||
1453 | static int bpf_prog_check_pages_ro_locked(const struct bpf_prog *fp) | ||
1454 | { | ||
1455 | #ifdef CONFIG_ARCH_HAS_SET_MEMORY | ||
1456 | int i, err; | ||
1457 | |||
1458 | for (i = 0; i < fp->aux->func_cnt; i++) { | ||
1459 | err = bpf_prog_check_pages_ro_single(fp->aux->func[i]); | ||
1460 | if (err) | ||
1461 | return err; | ||
1462 | } | ||
1463 | |||
1464 | return bpf_prog_check_pages_ro_single(fp); | ||
1465 | #endif | ||
1466 | return 0; | ||
1467 | } | ||
1468 | |||
1469 | static void bpf_prog_select_func(struct bpf_prog *fp) | 1451 | static void bpf_prog_select_func(struct bpf_prog *fp) |
1470 | { | 1452 | { |
1471 | #ifndef CONFIG_BPF_JIT_ALWAYS_ON | 1453 | #ifndef CONFIG_BPF_JIT_ALWAYS_ON |
@@ -1524,17 +1506,7 @@ finalize: | |||
1524 | * all eBPF JITs might immediately support all features. | 1506 | * all eBPF JITs might immediately support all features. |
1525 | */ | 1507 | */ |
1526 | *err = bpf_check_tail_call(fp); | 1508 | *err = bpf_check_tail_call(fp); |
1527 | if (*err) | 1509 | |
1528 | return fp; | ||
1529 | |||
1530 | /* Checkpoint: at this point onwards any cBPF -> eBPF or | ||
1531 | * native eBPF program is read-only. If we failed to change | ||
1532 | * the page attributes (e.g. allocation failure from | ||
1533 | * splitting large pages), then reject the whole program | ||
1534 | * in order to guarantee not ending up with any W+X pages | ||
1535 | * from BPF side in kernel. | ||
1536 | */ | ||
1537 | *err = bpf_prog_check_pages_ro_locked(fp); | ||
1538 | return fp; | 1510 | return fp; |
1539 | } | 1511 | } |
1540 | EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); | 1512 | EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); |
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 52a91d816c0e..cf7b6a6dbd1f 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c | |||
@@ -72,6 +72,7 @@ struct bpf_htab { | |||
72 | u32 n_buckets; | 72 | u32 n_buckets; |
73 | u32 elem_size; | 73 | u32 elem_size; |
74 | struct bpf_sock_progs progs; | 74 | struct bpf_sock_progs progs; |
75 | struct rcu_head rcu; | ||
75 | }; | 76 | }; |
76 | 77 | ||
77 | struct htab_elem { | 78 | struct htab_elem { |
@@ -89,8 +90,8 @@ enum smap_psock_state { | |||
89 | struct smap_psock_map_entry { | 90 | struct smap_psock_map_entry { |
90 | struct list_head list; | 91 | struct list_head list; |
91 | struct sock **entry; | 92 | struct sock **entry; |
92 | struct htab_elem *hash_link; | 93 | struct htab_elem __rcu *hash_link; |
93 | struct bpf_htab *htab; | 94 | struct bpf_htab __rcu *htab; |
94 | }; | 95 | }; |
95 | 96 | ||
96 | struct smap_psock { | 97 | struct smap_psock { |
@@ -120,6 +121,7 @@ struct smap_psock { | |||
120 | struct bpf_prog *bpf_parse; | 121 | struct bpf_prog *bpf_parse; |
121 | struct bpf_prog *bpf_verdict; | 122 | struct bpf_prog *bpf_verdict; |
122 | struct list_head maps; | 123 | struct list_head maps; |
124 | spinlock_t maps_lock; | ||
123 | 125 | ||
124 | /* Back reference used when sock callback trigger sockmap operations */ | 126 | /* Back reference used when sock callback trigger sockmap operations */ |
125 | struct sock *sock; | 127 | struct sock *sock; |
@@ -140,6 +142,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, | |||
140 | static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); | 142 | static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); |
141 | static int bpf_tcp_sendpage(struct sock *sk, struct page *page, | 143 | static int bpf_tcp_sendpage(struct sock *sk, struct page *page, |
142 | int offset, size_t size, int flags); | 144 | int offset, size_t size, int flags); |
145 | static void bpf_tcp_close(struct sock *sk, long timeout); | ||
143 | 146 | ||
144 | static inline struct smap_psock *smap_psock_sk(const struct sock *sk) | 147 | static inline struct smap_psock *smap_psock_sk(const struct sock *sk) |
145 | { | 148 | { |
@@ -161,7 +164,42 @@ out: | |||
161 | return !empty; | 164 | return !empty; |
162 | } | 165 | } |
163 | 166 | ||
164 | static struct proto tcp_bpf_proto; | 167 | enum { |
168 | SOCKMAP_IPV4, | ||
169 | SOCKMAP_IPV6, | ||
170 | SOCKMAP_NUM_PROTS, | ||
171 | }; | ||
172 | |||
173 | enum { | ||
174 | SOCKMAP_BASE, | ||
175 | SOCKMAP_TX, | ||
176 | SOCKMAP_NUM_CONFIGS, | ||
177 | }; | ||
178 | |||
179 | static struct proto *saved_tcpv6_prot __read_mostly; | ||
180 | static DEFINE_SPINLOCK(tcpv6_prot_lock); | ||
181 | static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS]; | ||
182 | static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS], | ||
183 | struct proto *base) | ||
184 | { | ||
185 | prot[SOCKMAP_BASE] = *base; | ||
186 | prot[SOCKMAP_BASE].close = bpf_tcp_close; | ||
187 | prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg; | ||
188 | prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read; | ||
189 | |||
190 | prot[SOCKMAP_TX] = prot[SOCKMAP_BASE]; | ||
191 | prot[SOCKMAP_TX].sendmsg = bpf_tcp_sendmsg; | ||
192 | prot[SOCKMAP_TX].sendpage = bpf_tcp_sendpage; | ||
193 | } | ||
194 | |||
195 | static void update_sk_prot(struct sock *sk, struct smap_psock *psock) | ||
196 | { | ||
197 | int family = sk->sk_family == AF_INET6 ? SOCKMAP_IPV6 : SOCKMAP_IPV4; | ||
198 | int conf = psock->bpf_tx_msg ? SOCKMAP_TX : SOCKMAP_BASE; | ||
199 | |||
200 | sk->sk_prot = &bpf_tcp_prots[family][conf]; | ||
201 | } | ||
202 | |||
165 | static int bpf_tcp_init(struct sock *sk) | 203 | static int bpf_tcp_init(struct sock *sk) |
166 | { | 204 | { |
167 | struct smap_psock *psock; | 205 | struct smap_psock *psock; |
@@ -181,14 +219,17 @@ static int bpf_tcp_init(struct sock *sk) | |||
181 | psock->save_close = sk->sk_prot->close; | 219 | psock->save_close = sk->sk_prot->close; |
182 | psock->sk_proto = sk->sk_prot; | 220 | psock->sk_proto = sk->sk_prot; |
183 | 221 | ||
184 | if (psock->bpf_tx_msg) { | 222 | /* Build IPv6 sockmap whenever the address of tcpv6_prot changes */ |
185 | tcp_bpf_proto.sendmsg = bpf_tcp_sendmsg; | 223 | if (sk->sk_family == AF_INET6 && |
186 | tcp_bpf_proto.sendpage = bpf_tcp_sendpage; | 224 | unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) { |
187 | tcp_bpf_proto.recvmsg = bpf_tcp_recvmsg; | 225 | spin_lock_bh(&tcpv6_prot_lock); |
188 | tcp_bpf_proto.stream_memory_read = bpf_tcp_stream_read; | 226 | if (likely(sk->sk_prot != saved_tcpv6_prot)) { |
227 | build_protos(bpf_tcp_prots[SOCKMAP_IPV6], sk->sk_prot); | ||
228 | smp_store_release(&saved_tcpv6_prot, sk->sk_prot); | ||
229 | } | ||
230 | spin_unlock_bh(&tcpv6_prot_lock); | ||
189 | } | 231 | } |
190 | 232 | update_sk_prot(sk, psock); | |
191 | sk->sk_prot = &tcp_bpf_proto; | ||
192 | rcu_read_unlock(); | 233 | rcu_read_unlock(); |
193 | return 0; | 234 | return 0; |
194 | } | 235 | } |
@@ -219,16 +260,54 @@ out: | |||
219 | rcu_read_unlock(); | 260 | rcu_read_unlock(); |
220 | } | 261 | } |
221 | 262 | ||
263 | static struct htab_elem *lookup_elem_raw(struct hlist_head *head, | ||
264 | u32 hash, void *key, u32 key_size) | ||
265 | { | ||
266 | struct htab_elem *l; | ||
267 | |||
268 | hlist_for_each_entry_rcu(l, head, hash_node) { | ||
269 | if (l->hash == hash && !memcmp(&l->key, key, key_size)) | ||
270 | return l; | ||
271 | } | ||
272 | |||
273 | return NULL; | ||
274 | } | ||
275 | |||
276 | static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) | ||
277 | { | ||
278 | return &htab->buckets[hash & (htab->n_buckets - 1)]; | ||
279 | } | ||
280 | |||
281 | static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash) | ||
282 | { | ||
283 | return &__select_bucket(htab, hash)->head; | ||
284 | } | ||
285 | |||
222 | static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) | 286 | static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) |
223 | { | 287 | { |
224 | atomic_dec(&htab->count); | 288 | atomic_dec(&htab->count); |
225 | kfree_rcu(l, rcu); | 289 | kfree_rcu(l, rcu); |
226 | } | 290 | } |
227 | 291 | ||
292 | static struct smap_psock_map_entry *psock_map_pop(struct sock *sk, | ||
293 | struct smap_psock *psock) | ||
294 | { | ||
295 | struct smap_psock_map_entry *e; | ||
296 | |||
297 | spin_lock_bh(&psock->maps_lock); | ||
298 | e = list_first_entry_or_null(&psock->maps, | ||
299 | struct smap_psock_map_entry, | ||
300 | list); | ||
301 | if (e) | ||
302 | list_del(&e->list); | ||
303 | spin_unlock_bh(&psock->maps_lock); | ||
304 | return e; | ||
305 | } | ||
306 | |||
228 | static void bpf_tcp_close(struct sock *sk, long timeout) | 307 | static void bpf_tcp_close(struct sock *sk, long timeout) |
229 | { | 308 | { |
230 | void (*close_fun)(struct sock *sk, long timeout); | 309 | void (*close_fun)(struct sock *sk, long timeout); |
231 | struct smap_psock_map_entry *e, *tmp; | 310 | struct smap_psock_map_entry *e; |
232 | struct sk_msg_buff *md, *mtmp; | 311 | struct sk_msg_buff *md, *mtmp; |
233 | struct smap_psock *psock; | 312 | struct smap_psock *psock; |
234 | struct sock *osk; | 313 | struct sock *osk; |
@@ -247,7 +326,6 @@ static void bpf_tcp_close(struct sock *sk, long timeout) | |||
247 | */ | 326 | */ |
248 | close_fun = psock->save_close; | 327 | close_fun = psock->save_close; |
249 | 328 | ||
250 | write_lock_bh(&sk->sk_callback_lock); | ||
251 | if (psock->cork) { | 329 | if (psock->cork) { |
252 | free_start_sg(psock->sock, psock->cork); | 330 | free_start_sg(psock->sock, psock->cork); |
253 | kfree(psock->cork); | 331 | kfree(psock->cork); |
@@ -260,20 +338,38 @@ static void bpf_tcp_close(struct sock *sk, long timeout) | |||
260 | kfree(md); | 338 | kfree(md); |
261 | } | 339 | } |
262 | 340 | ||
263 | list_for_each_entry_safe(e, tmp, &psock->maps, list) { | 341 | e = psock_map_pop(sk, psock); |
342 | while (e) { | ||
264 | if (e->entry) { | 343 | if (e->entry) { |
265 | osk = cmpxchg(e->entry, sk, NULL); | 344 | osk = cmpxchg(e->entry, sk, NULL); |
266 | if (osk == sk) { | 345 | if (osk == sk) { |
267 | list_del(&e->list); | ||
268 | smap_release_sock(psock, sk); | 346 | smap_release_sock(psock, sk); |
269 | } | 347 | } |
270 | } else { | 348 | } else { |
271 | hlist_del_rcu(&e->hash_link->hash_node); | 349 | struct htab_elem *link = rcu_dereference(e->hash_link); |
272 | smap_release_sock(psock, e->hash_link->sk); | 350 | struct bpf_htab *htab = rcu_dereference(e->htab); |
273 | free_htab_elem(e->htab, e->hash_link); | 351 | struct hlist_head *head; |
352 | struct htab_elem *l; | ||
353 | struct bucket *b; | ||
354 | |||
355 | b = __select_bucket(htab, link->hash); | ||
356 | head = &b->head; | ||
357 | raw_spin_lock_bh(&b->lock); | ||
358 | l = lookup_elem_raw(head, | ||
359 | link->hash, link->key, | ||
360 | htab->map.key_size); | ||
361 | /* If another thread deleted this object skip deletion. | ||
362 | * The refcnt on psock may or may not be zero. | ||
363 | */ | ||
364 | if (l) { | ||
365 | hlist_del_rcu(&link->hash_node); | ||
366 | smap_release_sock(psock, link->sk); | ||
367 | free_htab_elem(htab, link); | ||
368 | } | ||
369 | raw_spin_unlock_bh(&b->lock); | ||
274 | } | 370 | } |
371 | e = psock_map_pop(sk, psock); | ||
275 | } | 372 | } |
276 | write_unlock_bh(&sk->sk_callback_lock); | ||
277 | rcu_read_unlock(); | 373 | rcu_read_unlock(); |
278 | close_fun(sk, timeout); | 374 | close_fun(sk, timeout); |
279 | } | 375 | } |
@@ -1111,8 +1207,7 @@ static void bpf_tcp_msg_add(struct smap_psock *psock, | |||
1111 | 1207 | ||
1112 | static int bpf_tcp_ulp_register(void) | 1208 | static int bpf_tcp_ulp_register(void) |
1113 | { | 1209 | { |
1114 | tcp_bpf_proto = tcp_prot; | 1210 | build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot); |
1115 | tcp_bpf_proto.close = bpf_tcp_close; | ||
1116 | /* Once BPF TX ULP is registered it is never unregistered. It | 1211 | /* Once BPF TX ULP is registered it is never unregistered. It |
1117 | * will be in the ULP list for the lifetime of the system. Doing | 1212 | * will be in the ULP list for the lifetime of the system. Doing |
1118 | * duplicate registers is not a problem. | 1213 | * duplicate registers is not a problem. |
@@ -1357,7 +1452,9 @@ static void smap_release_sock(struct smap_psock *psock, struct sock *sock) | |||
1357 | { | 1452 | { |
1358 | if (refcount_dec_and_test(&psock->refcnt)) { | 1453 | if (refcount_dec_and_test(&psock->refcnt)) { |
1359 | tcp_cleanup_ulp(sock); | 1454 | tcp_cleanup_ulp(sock); |
1455 | write_lock_bh(&sock->sk_callback_lock); | ||
1360 | smap_stop_sock(psock, sock); | 1456 | smap_stop_sock(psock, sock); |
1457 | write_unlock_bh(&sock->sk_callback_lock); | ||
1361 | clear_bit(SMAP_TX_RUNNING, &psock->state); | 1458 | clear_bit(SMAP_TX_RUNNING, &psock->state); |
1362 | rcu_assign_sk_user_data(sock, NULL); | 1459 | rcu_assign_sk_user_data(sock, NULL); |
1363 | call_rcu_sched(&psock->rcu, smap_destroy_psock); | 1460 | call_rcu_sched(&psock->rcu, smap_destroy_psock); |
@@ -1508,6 +1605,7 @@ static struct smap_psock *smap_init_psock(struct sock *sock, int node) | |||
1508 | INIT_LIST_HEAD(&psock->maps); | 1605 | INIT_LIST_HEAD(&psock->maps); |
1509 | INIT_LIST_HEAD(&psock->ingress); | 1606 | INIT_LIST_HEAD(&psock->ingress); |
1510 | refcount_set(&psock->refcnt, 1); | 1607 | refcount_set(&psock->refcnt, 1); |
1608 | spin_lock_init(&psock->maps_lock); | ||
1511 | 1609 | ||
1512 | rcu_assign_sk_user_data(sock, psock); | 1610 | rcu_assign_sk_user_data(sock, psock); |
1513 | sock_hold(sock); | 1611 | sock_hold(sock); |
@@ -1564,18 +1662,32 @@ free_stab: | |||
1564 | return ERR_PTR(err); | 1662 | return ERR_PTR(err); |
1565 | } | 1663 | } |
1566 | 1664 | ||
1567 | static void smap_list_remove(struct smap_psock *psock, | 1665 | static void smap_list_map_remove(struct smap_psock *psock, |
1568 | struct sock **entry, | 1666 | struct sock **entry) |
1569 | struct htab_elem *hash_link) | ||
1570 | { | 1667 | { |
1571 | struct smap_psock_map_entry *e, *tmp; | 1668 | struct smap_psock_map_entry *e, *tmp; |
1572 | 1669 | ||
1670 | spin_lock_bh(&psock->maps_lock); | ||
1573 | list_for_each_entry_safe(e, tmp, &psock->maps, list) { | 1671 | list_for_each_entry_safe(e, tmp, &psock->maps, list) { |
1574 | if (e->entry == entry || e->hash_link == hash_link) { | 1672 | if (e->entry == entry) |
1575 | list_del(&e->list); | 1673 | list_del(&e->list); |
1576 | break; | ||
1577 | } | ||
1578 | } | 1674 | } |
1675 | spin_unlock_bh(&psock->maps_lock); | ||
1676 | } | ||
1677 | |||
1678 | static void smap_list_hash_remove(struct smap_psock *psock, | ||
1679 | struct htab_elem *hash_link) | ||
1680 | { | ||
1681 | struct smap_psock_map_entry *e, *tmp; | ||
1682 | |||
1683 | spin_lock_bh(&psock->maps_lock); | ||
1684 | list_for_each_entry_safe(e, tmp, &psock->maps, list) { | ||
1685 | struct htab_elem *c = rcu_dereference(e->hash_link); | ||
1686 | |||
1687 | if (c == hash_link) | ||
1688 | list_del(&e->list); | ||
1689 | } | ||
1690 | spin_unlock_bh(&psock->maps_lock); | ||
1579 | } | 1691 | } |
1580 | 1692 | ||
1581 | static void sock_map_free(struct bpf_map *map) | 1693 | static void sock_map_free(struct bpf_map *map) |
@@ -1601,7 +1713,6 @@ static void sock_map_free(struct bpf_map *map) | |||
1601 | if (!sock) | 1713 | if (!sock) |
1602 | continue; | 1714 | continue; |
1603 | 1715 | ||
1604 | write_lock_bh(&sock->sk_callback_lock); | ||
1605 | psock = smap_psock_sk(sock); | 1716 | psock = smap_psock_sk(sock); |
1606 | /* This check handles a racing sock event that can get the | 1717 | /* This check handles a racing sock event that can get the |
1607 | * sk_callback_lock before this case but after xchg happens | 1718 | * sk_callback_lock before this case but after xchg happens |
@@ -1609,10 +1720,9 @@ static void sock_map_free(struct bpf_map *map) | |||
1609 | * to be null and queued for garbage collection. | 1720 | * to be null and queued for garbage collection. |
1610 | */ | 1721 | */ |
1611 | if (likely(psock)) { | 1722 | if (likely(psock)) { |
1612 | smap_list_remove(psock, &stab->sock_map[i], NULL); | 1723 | smap_list_map_remove(psock, &stab->sock_map[i]); |
1613 | smap_release_sock(psock, sock); | 1724 | smap_release_sock(psock, sock); |
1614 | } | 1725 | } |
1615 | write_unlock_bh(&sock->sk_callback_lock); | ||
1616 | } | 1726 | } |
1617 | rcu_read_unlock(); | 1727 | rcu_read_unlock(); |
1618 | 1728 | ||
@@ -1661,17 +1771,15 @@ static int sock_map_delete_elem(struct bpf_map *map, void *key) | |||
1661 | if (!sock) | 1771 | if (!sock) |
1662 | return -EINVAL; | 1772 | return -EINVAL; |
1663 | 1773 | ||
1664 | write_lock_bh(&sock->sk_callback_lock); | ||
1665 | psock = smap_psock_sk(sock); | 1774 | psock = smap_psock_sk(sock); |
1666 | if (!psock) | 1775 | if (!psock) |
1667 | goto out; | 1776 | goto out; |
1668 | 1777 | ||
1669 | if (psock->bpf_parse) | 1778 | if (psock->bpf_parse) |
1670 | smap_stop_sock(psock, sock); | 1779 | smap_stop_sock(psock, sock); |
1671 | smap_list_remove(psock, &stab->sock_map[k], NULL); | 1780 | smap_list_map_remove(psock, &stab->sock_map[k]); |
1672 | smap_release_sock(psock, sock); | 1781 | smap_release_sock(psock, sock); |
1673 | out: | 1782 | out: |
1674 | write_unlock_bh(&sock->sk_callback_lock); | ||
1675 | return 0; | 1783 | return 0; |
1676 | } | 1784 | } |
1677 | 1785 | ||
@@ -1752,7 +1860,6 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, | |||
1752 | } | 1860 | } |
1753 | } | 1861 | } |
1754 | 1862 | ||
1755 | write_lock_bh(&sock->sk_callback_lock); | ||
1756 | psock = smap_psock_sk(sock); | 1863 | psock = smap_psock_sk(sock); |
1757 | 1864 | ||
1758 | /* 2. Do not allow inheriting programs if psock exists and has | 1865 | /* 2. Do not allow inheriting programs if psock exists and has |
@@ -1809,7 +1916,9 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, | |||
1809 | if (err) | 1916 | if (err) |
1810 | goto out_free; | 1917 | goto out_free; |
1811 | smap_init_progs(psock, verdict, parse); | 1918 | smap_init_progs(psock, verdict, parse); |
1919 | write_lock_bh(&sock->sk_callback_lock); | ||
1812 | smap_start_sock(psock, sock); | 1920 | smap_start_sock(psock, sock); |
1921 | write_unlock_bh(&sock->sk_callback_lock); | ||
1813 | } | 1922 | } |
1814 | 1923 | ||
1815 | /* 4. Place psock in sockmap for use and stop any programs on | 1924 | /* 4. Place psock in sockmap for use and stop any programs on |
@@ -1819,9 +1928,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, | |||
1819 | */ | 1928 | */ |
1820 | if (map_link) { | 1929 | if (map_link) { |
1821 | e->entry = map_link; | 1930 | e->entry = map_link; |
1931 | spin_lock_bh(&psock->maps_lock); | ||
1822 | list_add_tail(&e->list, &psock->maps); | 1932 | list_add_tail(&e->list, &psock->maps); |
1933 | spin_unlock_bh(&psock->maps_lock); | ||
1823 | } | 1934 | } |
1824 | write_unlock_bh(&sock->sk_callback_lock); | ||
1825 | return err; | 1935 | return err; |
1826 | out_free: | 1936 | out_free: |
1827 | smap_release_sock(psock, sock); | 1937 | smap_release_sock(psock, sock); |
@@ -1832,7 +1942,6 @@ out_progs: | |||
1832 | } | 1942 | } |
1833 | if (tx_msg) | 1943 | if (tx_msg) |
1834 | bpf_prog_put(tx_msg); | 1944 | bpf_prog_put(tx_msg); |
1835 | write_unlock_bh(&sock->sk_callback_lock); | ||
1836 | kfree(e); | 1945 | kfree(e); |
1837 | return err; | 1946 | return err; |
1838 | } | 1947 | } |
@@ -1869,10 +1978,8 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops, | |||
1869 | if (osock) { | 1978 | if (osock) { |
1870 | struct smap_psock *opsock = smap_psock_sk(osock); | 1979 | struct smap_psock *opsock = smap_psock_sk(osock); |
1871 | 1980 | ||
1872 | write_lock_bh(&osock->sk_callback_lock); | 1981 | smap_list_map_remove(opsock, &stab->sock_map[i]); |
1873 | smap_list_remove(opsock, &stab->sock_map[i], NULL); | ||
1874 | smap_release_sock(opsock, osock); | 1982 | smap_release_sock(opsock, osock); |
1875 | write_unlock_bh(&osock->sk_callback_lock); | ||
1876 | } | 1983 | } |
1877 | out: | 1984 | out: |
1878 | return err; | 1985 | return err; |
@@ -1915,6 +2022,24 @@ int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type) | |||
1915 | return 0; | 2022 | return 0; |
1916 | } | 2023 | } |
1917 | 2024 | ||
2025 | int sockmap_get_from_fd(const union bpf_attr *attr, int type, | ||
2026 | struct bpf_prog *prog) | ||
2027 | { | ||
2028 | int ufd = attr->target_fd; | ||
2029 | struct bpf_map *map; | ||
2030 | struct fd f; | ||
2031 | int err; | ||
2032 | |||
2033 | f = fdget(ufd); | ||
2034 | map = __bpf_map_get(f); | ||
2035 | if (IS_ERR(map)) | ||
2036 | return PTR_ERR(map); | ||
2037 | |||
2038 | err = sock_map_prog(map, prog, attr->attach_type); | ||
2039 | fdput(f); | ||
2040 | return err; | ||
2041 | } | ||
2042 | |||
1918 | static void *sock_map_lookup(struct bpf_map *map, void *key) | 2043 | static void *sock_map_lookup(struct bpf_map *map, void *key) |
1919 | { | 2044 | { |
1920 | return NULL; | 2045 | return NULL; |
@@ -2043,14 +2168,13 @@ free_htab: | |||
2043 | return ERR_PTR(err); | 2168 | return ERR_PTR(err); |
2044 | } | 2169 | } |
2045 | 2170 | ||
2046 | static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) | 2171 | static void __bpf_htab_free(struct rcu_head *rcu) |
2047 | { | 2172 | { |
2048 | return &htab->buckets[hash & (htab->n_buckets - 1)]; | 2173 | struct bpf_htab *htab; |
2049 | } | ||
2050 | 2174 | ||
2051 | static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash) | 2175 | htab = container_of(rcu, struct bpf_htab, rcu); |
2052 | { | 2176 | bpf_map_area_free(htab->buckets); |
2053 | return &__select_bucket(htab, hash)->head; | 2177 | kfree(htab); |
2054 | } | 2178 | } |
2055 | 2179 | ||
2056 | static void sock_hash_free(struct bpf_map *map) | 2180 | static void sock_hash_free(struct bpf_map *map) |
@@ -2069,16 +2193,18 @@ static void sock_hash_free(struct bpf_map *map) | |||
2069 | */ | 2193 | */ |
2070 | rcu_read_lock(); | 2194 | rcu_read_lock(); |
2071 | for (i = 0; i < htab->n_buckets; i++) { | 2195 | for (i = 0; i < htab->n_buckets; i++) { |
2072 | struct hlist_head *head = select_bucket(htab, i); | 2196 | struct bucket *b = __select_bucket(htab, i); |
2197 | struct hlist_head *head; | ||
2073 | struct hlist_node *n; | 2198 | struct hlist_node *n; |
2074 | struct htab_elem *l; | 2199 | struct htab_elem *l; |
2075 | 2200 | ||
2201 | raw_spin_lock_bh(&b->lock); | ||
2202 | head = &b->head; | ||
2076 | hlist_for_each_entry_safe(l, n, head, hash_node) { | 2203 | hlist_for_each_entry_safe(l, n, head, hash_node) { |
2077 | struct sock *sock = l->sk; | 2204 | struct sock *sock = l->sk; |
2078 | struct smap_psock *psock; | 2205 | struct smap_psock *psock; |
2079 | 2206 | ||
2080 | hlist_del_rcu(&l->hash_node); | 2207 | hlist_del_rcu(&l->hash_node); |
2081 | write_lock_bh(&sock->sk_callback_lock); | ||
2082 | psock = smap_psock_sk(sock); | 2208 | psock = smap_psock_sk(sock); |
2083 | /* This check handles a racing sock event that can get | 2209 | /* This check handles a racing sock event that can get |
2084 | * the sk_callback_lock before this case but after xchg | 2210 | * the sk_callback_lock before this case but after xchg |
@@ -2086,16 +2212,15 @@ static void sock_hash_free(struct bpf_map *map) | |||
2086 | * (psock) to be null and queued for garbage collection. | 2212 | * (psock) to be null and queued for garbage collection. |
2087 | */ | 2213 | */ |
2088 | if (likely(psock)) { | 2214 | if (likely(psock)) { |
2089 | smap_list_remove(psock, NULL, l); | 2215 | smap_list_hash_remove(psock, l); |
2090 | smap_release_sock(psock, sock); | 2216 | smap_release_sock(psock, sock); |
2091 | } | 2217 | } |
2092 | write_unlock_bh(&sock->sk_callback_lock); | 2218 | free_htab_elem(htab, l); |
2093 | kfree(l); | ||
2094 | } | 2219 | } |
2220 | raw_spin_unlock_bh(&b->lock); | ||
2095 | } | 2221 | } |
2096 | rcu_read_unlock(); | 2222 | rcu_read_unlock(); |
2097 | bpf_map_area_free(htab->buckets); | 2223 | call_rcu(&htab->rcu, __bpf_htab_free); |
2098 | kfree(htab); | ||
2099 | } | 2224 | } |
2100 | 2225 | ||
2101 | static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab, | 2226 | static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab, |
@@ -2122,19 +2247,6 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab, | |||
2122 | return l_new; | 2247 | return l_new; |
2123 | } | 2248 | } |
2124 | 2249 | ||
2125 | static struct htab_elem *lookup_elem_raw(struct hlist_head *head, | ||
2126 | u32 hash, void *key, u32 key_size) | ||
2127 | { | ||
2128 | struct htab_elem *l; | ||
2129 | |||
2130 | hlist_for_each_entry_rcu(l, head, hash_node) { | ||
2131 | if (l->hash == hash && !memcmp(&l->key, key, key_size)) | ||
2132 | return l; | ||
2133 | } | ||
2134 | |||
2135 | return NULL; | ||
2136 | } | ||
2137 | |||
2138 | static inline u32 htab_map_hash(const void *key, u32 key_len) | 2250 | static inline u32 htab_map_hash(const void *key, u32 key_len) |
2139 | { | 2251 | { |
2140 | return jhash(key, key_len, 0); | 2252 | return jhash(key, key_len, 0); |
@@ -2254,9 +2366,12 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops, | |||
2254 | goto bucket_err; | 2366 | goto bucket_err; |
2255 | } | 2367 | } |
2256 | 2368 | ||
2257 | e->hash_link = l_new; | 2369 | rcu_assign_pointer(e->hash_link, l_new); |
2258 | e->htab = container_of(map, struct bpf_htab, map); | 2370 | rcu_assign_pointer(e->htab, |
2371 | container_of(map, struct bpf_htab, map)); | ||
2372 | spin_lock_bh(&psock->maps_lock); | ||
2259 | list_add_tail(&e->list, &psock->maps); | 2373 | list_add_tail(&e->list, &psock->maps); |
2374 | spin_unlock_bh(&psock->maps_lock); | ||
2260 | 2375 | ||
2261 | /* add new element to the head of the list, so that | 2376 | /* add new element to the head of the list, so that |
2262 | * concurrent search will find it before old elem | 2377 | * concurrent search will find it before old elem |
@@ -2266,7 +2381,7 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops, | |||
2266 | psock = smap_psock_sk(l_old->sk); | 2381 | psock = smap_psock_sk(l_old->sk); |
2267 | 2382 | ||
2268 | hlist_del_rcu(&l_old->hash_node); | 2383 | hlist_del_rcu(&l_old->hash_node); |
2269 | smap_list_remove(psock, NULL, l_old); | 2384 | smap_list_hash_remove(psock, l_old); |
2270 | smap_release_sock(psock, l_old->sk); | 2385 | smap_release_sock(psock, l_old->sk); |
2271 | free_htab_elem(htab, l_old); | 2386 | free_htab_elem(htab, l_old); |
2272 | } | 2387 | } |
@@ -2326,7 +2441,6 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key) | |||
2326 | struct smap_psock *psock; | 2441 | struct smap_psock *psock; |
2327 | 2442 | ||
2328 | hlist_del_rcu(&l->hash_node); | 2443 | hlist_del_rcu(&l->hash_node); |
2329 | write_lock_bh(&sock->sk_callback_lock); | ||
2330 | psock = smap_psock_sk(sock); | 2444 | psock = smap_psock_sk(sock); |
2331 | /* This check handles a racing sock event that can get the | 2445 | /* This check handles a racing sock event that can get the |
2332 | * sk_callback_lock before this case but after xchg happens | 2446 | * sk_callback_lock before this case but after xchg happens |
@@ -2334,10 +2448,9 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key) | |||
2334 | * to be null and queued for garbage collection. | 2448 | * to be null and queued for garbage collection. |
2335 | */ | 2449 | */ |
2336 | if (likely(psock)) { | 2450 | if (likely(psock)) { |
2337 | smap_list_remove(psock, NULL, l); | 2451 | smap_list_hash_remove(psock, l); |
2338 | smap_release_sock(psock, sock); | 2452 | smap_release_sock(psock, sock); |
2339 | } | 2453 | } |
2340 | write_unlock_bh(&sock->sk_callback_lock); | ||
2341 | free_htab_elem(htab, l); | 2454 | free_htab_elem(htab, l); |
2342 | ret = 0; | 2455 | ret = 0; |
2343 | } | 2456 | } |
@@ -2383,6 +2496,7 @@ const struct bpf_map_ops sock_hash_ops = { | |||
2383 | .map_get_next_key = sock_hash_get_next_key, | 2496 | .map_get_next_key = sock_hash_get_next_key, |
2384 | .map_update_elem = sock_hash_update_elem, | 2497 | .map_update_elem = sock_hash_update_elem, |
2385 | .map_delete_elem = sock_hash_delete_elem, | 2498 | .map_delete_elem = sock_hash_delete_elem, |
2499 | .map_release_uref = sock_map_release, | ||
2386 | }; | 2500 | }; |
2387 | 2501 | ||
2388 | BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, | 2502 | BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, |
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 35dc466641f2..d10ecd78105f 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -1483,8 +1483,6 @@ out_free_tp: | |||
1483 | return err; | 1483 | return err; |
1484 | } | 1484 | } |
1485 | 1485 | ||
1486 | #ifdef CONFIG_CGROUP_BPF | ||
1487 | |||
1488 | static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, | 1486 | static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, |
1489 | enum bpf_attach_type attach_type) | 1487 | enum bpf_attach_type attach_type) |
1490 | { | 1488 | { |
@@ -1499,40 +1497,6 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, | |||
1499 | 1497 | ||
1500 | #define BPF_PROG_ATTACH_LAST_FIELD attach_flags | 1498 | #define BPF_PROG_ATTACH_LAST_FIELD attach_flags |
1501 | 1499 | ||
1502 | static int sockmap_get_from_fd(const union bpf_attr *attr, | ||
1503 | int type, bool attach) | ||
1504 | { | ||
1505 | struct bpf_prog *prog = NULL; | ||
1506 | int ufd = attr->target_fd; | ||
1507 | struct bpf_map *map; | ||
1508 | struct fd f; | ||
1509 | int err; | ||
1510 | |||
1511 | f = fdget(ufd); | ||
1512 | map = __bpf_map_get(f); | ||
1513 | if (IS_ERR(map)) | ||
1514 | return PTR_ERR(map); | ||
1515 | |||
1516 | if (attach) { | ||
1517 | prog = bpf_prog_get_type(attr->attach_bpf_fd, type); | ||
1518 | if (IS_ERR(prog)) { | ||
1519 | fdput(f); | ||
1520 | return PTR_ERR(prog); | ||
1521 | } | ||
1522 | } | ||
1523 | |||
1524 | err = sock_map_prog(map, prog, attr->attach_type); | ||
1525 | if (err) { | ||
1526 | fdput(f); | ||
1527 | if (prog) | ||
1528 | bpf_prog_put(prog); | ||
1529 | return err; | ||
1530 | } | ||
1531 | |||
1532 | fdput(f); | ||
1533 | return 0; | ||
1534 | } | ||
1535 | |||
1536 | #define BPF_F_ATTACH_MASK \ | 1500 | #define BPF_F_ATTACH_MASK \ |
1537 | (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI) | 1501 | (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI) |
1538 | 1502 | ||
@@ -1540,7 +1504,6 @@ static int bpf_prog_attach(const union bpf_attr *attr) | |||
1540 | { | 1504 | { |
1541 | enum bpf_prog_type ptype; | 1505 | enum bpf_prog_type ptype; |
1542 | struct bpf_prog *prog; | 1506 | struct bpf_prog *prog; |
1543 | struct cgroup *cgrp; | ||
1544 | int ret; | 1507 | int ret; |
1545 | 1508 | ||
1546 | if (!capable(CAP_NET_ADMIN)) | 1509 | if (!capable(CAP_NET_ADMIN)) |
@@ -1577,12 +1540,15 @@ static int bpf_prog_attach(const union bpf_attr *attr) | |||
1577 | ptype = BPF_PROG_TYPE_CGROUP_DEVICE; | 1540 | ptype = BPF_PROG_TYPE_CGROUP_DEVICE; |
1578 | break; | 1541 | break; |
1579 | case BPF_SK_MSG_VERDICT: | 1542 | case BPF_SK_MSG_VERDICT: |
1580 | return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, true); | 1543 | ptype = BPF_PROG_TYPE_SK_MSG; |
1544 | break; | ||
1581 | case BPF_SK_SKB_STREAM_PARSER: | 1545 | case BPF_SK_SKB_STREAM_PARSER: |
1582 | case BPF_SK_SKB_STREAM_VERDICT: | 1546 | case BPF_SK_SKB_STREAM_VERDICT: |
1583 | return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, true); | 1547 | ptype = BPF_PROG_TYPE_SK_SKB; |
1548 | break; | ||
1584 | case BPF_LIRC_MODE2: | 1549 | case BPF_LIRC_MODE2: |
1585 | return lirc_prog_attach(attr); | 1550 | ptype = BPF_PROG_TYPE_LIRC_MODE2; |
1551 | break; | ||
1586 | default: | 1552 | default: |
1587 | return -EINVAL; | 1553 | return -EINVAL; |
1588 | } | 1554 | } |
@@ -1596,18 +1562,20 @@ static int bpf_prog_attach(const union bpf_attr *attr) | |||
1596 | return -EINVAL; | 1562 | return -EINVAL; |
1597 | } | 1563 | } |
1598 | 1564 | ||
1599 | cgrp = cgroup_get_from_fd(attr->target_fd); | 1565 | switch (ptype) { |
1600 | if (IS_ERR(cgrp)) { | 1566 | case BPF_PROG_TYPE_SK_SKB: |
1601 | bpf_prog_put(prog); | 1567 | case BPF_PROG_TYPE_SK_MSG: |
1602 | return PTR_ERR(cgrp); | 1568 | ret = sockmap_get_from_fd(attr, ptype, prog); |
1569 | break; | ||
1570 | case BPF_PROG_TYPE_LIRC_MODE2: | ||
1571 | ret = lirc_prog_attach(attr, prog); | ||
1572 | break; | ||
1573 | default: | ||
1574 | ret = cgroup_bpf_prog_attach(attr, ptype, prog); | ||
1603 | } | 1575 | } |
1604 | 1576 | ||
1605 | ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type, | ||
1606 | attr->attach_flags); | ||
1607 | if (ret) | 1577 | if (ret) |
1608 | bpf_prog_put(prog); | 1578 | bpf_prog_put(prog); |
1609 | cgroup_put(cgrp); | ||
1610 | |||
1611 | return ret; | 1579 | return ret; |
1612 | } | 1580 | } |
1613 | 1581 | ||
@@ -1616,9 +1584,6 @@ static int bpf_prog_attach(const union bpf_attr *attr) | |||
1616 | static int bpf_prog_detach(const union bpf_attr *attr) | 1584 | static int bpf_prog_detach(const union bpf_attr *attr) |
1617 | { | 1585 | { |
1618 | enum bpf_prog_type ptype; | 1586 | enum bpf_prog_type ptype; |
1619 | struct bpf_prog *prog; | ||
1620 | struct cgroup *cgrp; | ||
1621 | int ret; | ||
1622 | 1587 | ||
1623 | if (!capable(CAP_NET_ADMIN)) | 1588 | if (!capable(CAP_NET_ADMIN)) |
1624 | return -EPERM; | 1589 | return -EPERM; |
@@ -1651,29 +1616,17 @@ static int bpf_prog_detach(const union bpf_attr *attr) | |||
1651 | ptype = BPF_PROG_TYPE_CGROUP_DEVICE; | 1616 | ptype = BPF_PROG_TYPE_CGROUP_DEVICE; |
1652 | break; | 1617 | break; |
1653 | case BPF_SK_MSG_VERDICT: | 1618 | case BPF_SK_MSG_VERDICT: |
1654 | return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, false); | 1619 | return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, NULL); |
1655 | case BPF_SK_SKB_STREAM_PARSER: | 1620 | case BPF_SK_SKB_STREAM_PARSER: |
1656 | case BPF_SK_SKB_STREAM_VERDICT: | 1621 | case BPF_SK_SKB_STREAM_VERDICT: |
1657 | return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, false); | 1622 | return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, NULL); |
1658 | case BPF_LIRC_MODE2: | 1623 | case BPF_LIRC_MODE2: |
1659 | return lirc_prog_detach(attr); | 1624 | return lirc_prog_detach(attr); |
1660 | default: | 1625 | default: |
1661 | return -EINVAL; | 1626 | return -EINVAL; |
1662 | } | 1627 | } |
1663 | 1628 | ||
1664 | cgrp = cgroup_get_from_fd(attr->target_fd); | 1629 | return cgroup_bpf_prog_detach(attr, ptype); |
1665 | if (IS_ERR(cgrp)) | ||
1666 | return PTR_ERR(cgrp); | ||
1667 | |||
1668 | prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); | ||
1669 | if (IS_ERR(prog)) | ||
1670 | prog = NULL; | ||
1671 | |||
1672 | ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0); | ||
1673 | if (prog) | ||
1674 | bpf_prog_put(prog); | ||
1675 | cgroup_put(cgrp); | ||
1676 | return ret; | ||
1677 | } | 1630 | } |
1678 | 1631 | ||
1679 | #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt | 1632 | #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt |
@@ -1681,9 +1634,6 @@ static int bpf_prog_detach(const union bpf_attr *attr) | |||
1681 | static int bpf_prog_query(const union bpf_attr *attr, | 1634 | static int bpf_prog_query(const union bpf_attr *attr, |
1682 | union bpf_attr __user *uattr) | 1635 | union bpf_attr __user *uattr) |
1683 | { | 1636 | { |
1684 | struct cgroup *cgrp; | ||
1685 | int ret; | ||
1686 | |||
1687 | if (!capable(CAP_NET_ADMIN)) | 1637 | if (!capable(CAP_NET_ADMIN)) |
1688 | return -EPERM; | 1638 | return -EPERM; |
1689 | if (CHECK_ATTR(BPF_PROG_QUERY)) | 1639 | if (CHECK_ATTR(BPF_PROG_QUERY)) |
@@ -1711,14 +1661,9 @@ static int bpf_prog_query(const union bpf_attr *attr, | |||
1711 | default: | 1661 | default: |
1712 | return -EINVAL; | 1662 | return -EINVAL; |
1713 | } | 1663 | } |
1714 | cgrp = cgroup_get_from_fd(attr->query.target_fd); | 1664 | |
1715 | if (IS_ERR(cgrp)) | 1665 | return cgroup_bpf_prog_query(attr, uattr); |
1716 | return PTR_ERR(cgrp); | ||
1717 | ret = cgroup_bpf_query(cgrp, attr, uattr); | ||
1718 | cgroup_put(cgrp); | ||
1719 | return ret; | ||
1720 | } | 1666 | } |
1721 | #endif /* CONFIG_CGROUP_BPF */ | ||
1722 | 1667 | ||
1723 | #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration | 1668 | #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration |
1724 | 1669 | ||
@@ -2365,7 +2310,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz | |||
2365 | case BPF_OBJ_GET: | 2310 | case BPF_OBJ_GET: |
2366 | err = bpf_obj_get(&attr); | 2311 | err = bpf_obj_get(&attr); |
2367 | break; | 2312 | break; |
2368 | #ifdef CONFIG_CGROUP_BPF | ||
2369 | case BPF_PROG_ATTACH: | 2313 | case BPF_PROG_ATTACH: |
2370 | err = bpf_prog_attach(&attr); | 2314 | err = bpf_prog_attach(&attr); |
2371 | break; | 2315 | break; |
@@ -2375,7 +2319,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz | |||
2375 | case BPF_PROG_QUERY: | 2319 | case BPF_PROG_QUERY: |
2376 | err = bpf_prog_query(&attr, uattr); | 2320 | err = bpf_prog_query(&attr, uattr); |
2377 | break; | 2321 | break; |
2378 | #endif | ||
2379 | case BPF_PROG_TEST_RUN: | 2322 | case BPF_PROG_TEST_RUN: |
2380 | err = bpf_prog_test_run(&attr, uattr); | 2323 | err = bpf_prog_test_run(&attr, uattr); |
2381 | break; | 2324 | break; |
diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 60aedc879361..08d3d59dca17 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c | |||
@@ -5282,21 +5282,31 @@ static struct bpf_test tests[] = { | |||
5282 | { /* Mainly checking JIT here. */ | 5282 | { /* Mainly checking JIT here. */ |
5283 | "BPF_MAXINSNS: Ctx heavy transformations", | 5283 | "BPF_MAXINSNS: Ctx heavy transformations", |
5284 | { }, | 5284 | { }, |
5285 | #if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390) | ||
5286 | CLASSIC | FLAG_EXPECTED_FAIL, | ||
5287 | #else | ||
5285 | CLASSIC, | 5288 | CLASSIC, |
5289 | #endif | ||
5286 | { }, | 5290 | { }, |
5287 | { | 5291 | { |
5288 | { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }, | 5292 | { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }, |
5289 | { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) } | 5293 | { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) } |
5290 | }, | 5294 | }, |
5291 | .fill_helper = bpf_fill_maxinsns6, | 5295 | .fill_helper = bpf_fill_maxinsns6, |
5296 | .expected_errcode = -ENOTSUPP, | ||
5292 | }, | 5297 | }, |
5293 | { /* Mainly checking JIT here. */ | 5298 | { /* Mainly checking JIT here. */ |
5294 | "BPF_MAXINSNS: Call heavy transformations", | 5299 | "BPF_MAXINSNS: Call heavy transformations", |
5295 | { }, | 5300 | { }, |
5301 | #if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390) | ||
5302 | CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, | ||
5303 | #else | ||
5296 | CLASSIC | FLAG_NO_DATA, | 5304 | CLASSIC | FLAG_NO_DATA, |
5305 | #endif | ||
5297 | { }, | 5306 | { }, |
5298 | { { 1, 0 }, { 10, 0 } }, | 5307 | { { 1, 0 }, { 10, 0 } }, |
5299 | .fill_helper = bpf_fill_maxinsns7, | 5308 | .fill_helper = bpf_fill_maxinsns7, |
5309 | .expected_errcode = -ENOTSUPP, | ||
5300 | }, | 5310 | }, |
5301 | { /* Mainly checking JIT here. */ | 5311 | { /* Mainly checking JIT here. */ |
5302 | "BPF_MAXINSNS: Jump heavy test", | 5312 | "BPF_MAXINSNS: Jump heavy test", |
@@ -5347,18 +5357,28 @@ static struct bpf_test tests[] = { | |||
5347 | { | 5357 | { |
5348 | "BPF_MAXINSNS: exec all MSH", | 5358 | "BPF_MAXINSNS: exec all MSH", |
5349 | { }, | 5359 | { }, |
5360 | #if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390) | ||
5361 | CLASSIC | FLAG_EXPECTED_FAIL, | ||
5362 | #else | ||
5350 | CLASSIC, | 5363 | CLASSIC, |
5364 | #endif | ||
5351 | { 0xfa, 0xfb, 0xfc, 0xfd, }, | 5365 | { 0xfa, 0xfb, 0xfc, 0xfd, }, |
5352 | { { 4, 0xababab83 } }, | 5366 | { { 4, 0xababab83 } }, |
5353 | .fill_helper = bpf_fill_maxinsns13, | 5367 | .fill_helper = bpf_fill_maxinsns13, |
5368 | .expected_errcode = -ENOTSUPP, | ||
5354 | }, | 5369 | }, |
5355 | { | 5370 | { |
5356 | "BPF_MAXINSNS: ld_abs+get_processor_id", | 5371 | "BPF_MAXINSNS: ld_abs+get_processor_id", |
5357 | { }, | 5372 | { }, |
5373 | #if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390) | ||
5374 | CLASSIC | FLAG_EXPECTED_FAIL, | ||
5375 | #else | ||
5358 | CLASSIC, | 5376 | CLASSIC, |
5377 | #endif | ||
5359 | { }, | 5378 | { }, |
5360 | { { 1, 0xbee } }, | 5379 | { { 1, 0xbee } }, |
5361 | .fill_helper = bpf_fill_ld_abs_get_processor_id, | 5380 | .fill_helper = bpf_fill_ld_abs_get_processor_id, |
5381 | .expected_errcode = -ENOTSUPP, | ||
5362 | }, | 5382 | }, |
5363 | /* | 5383 | /* |
5364 | * LD_IND / LD_ABS on fragmented SKBs | 5384 | * LD_IND / LD_ABS on fragmented SKBs |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 73a65789271b..8ccee3d01822 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -693,7 +693,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head, | |||
693 | out_unlock: | 693 | out_unlock: |
694 | rcu_read_unlock(); | 694 | rcu_read_unlock(); |
695 | out: | 695 | out: |
696 | NAPI_GRO_CB(skb)->flush |= flush; | 696 | skb_gro_flush_final(skb, pp, flush); |
697 | 697 | ||
698 | return pp; | 698 | return pp; |
699 | } | 699 | } |
diff --git a/net/Makefile b/net/Makefile index 13ec0d5415c7..bdaf53925acd 100644 --- a/net/Makefile +++ b/net/Makefile | |||
@@ -20,11 +20,7 @@ obj-$(CONFIG_TLS) += tls/ | |||
20 | obj-$(CONFIG_XFRM) += xfrm/ | 20 | obj-$(CONFIG_XFRM) += xfrm/ |
21 | obj-$(CONFIG_UNIX) += unix/ | 21 | obj-$(CONFIG_UNIX) += unix/ |
22 | obj-$(CONFIG_NET) += ipv6/ | 22 | obj-$(CONFIG_NET) += ipv6/ |
23 | ifneq ($(CC_CAN_LINK),y) | ||
24 | $(warning CC cannot link executables. Skipping bpfilter.) | ||
25 | else | ||
26 | obj-$(CONFIG_BPFILTER) += bpfilter/ | 23 | obj-$(CONFIG_BPFILTER) += bpfilter/ |
27 | endif | ||
28 | obj-$(CONFIG_PACKET) += packet/ | 24 | obj-$(CONFIG_PACKET) += packet/ |
29 | obj-$(CONFIG_NET_KEY) += key/ | 25 | obj-$(CONFIG_NET_KEY) += key/ |
30 | obj-$(CONFIG_BRIDGE) += bridge/ | 26 | obj-$(CONFIG_BRIDGE) += bridge/ |
diff --git a/net/bpfilter/Kconfig b/net/bpfilter/Kconfig index a948b072c28f..76deb6615883 100644 --- a/net/bpfilter/Kconfig +++ b/net/bpfilter/Kconfig | |||
@@ -1,6 +1,5 @@ | |||
1 | menuconfig BPFILTER | 1 | menuconfig BPFILTER |
2 | bool "BPF based packet filtering framework (BPFILTER)" | 2 | bool "BPF based packet filtering framework (BPFILTER)" |
3 | default n | ||
4 | depends on NET && BPF && INET | 3 | depends on NET && BPF && INET |
5 | help | 4 | help |
6 | This builds experimental bpfilter framework that is aiming to | 5 | This builds experimental bpfilter framework that is aiming to |
@@ -9,6 +8,7 @@ menuconfig BPFILTER | |||
9 | if BPFILTER | 8 | if BPFILTER |
10 | config BPFILTER_UMH | 9 | config BPFILTER_UMH |
11 | tristate "bpfilter kernel module with user mode helper" | 10 | tristate "bpfilter kernel module with user mode helper" |
11 | depends on $(success,$(srctree)/scripts/cc-can-link.sh $(CC)) | ||
12 | default m | 12 | default m |
13 | help | 13 | help |
14 | This builds bpfilter kernel module with embedded user mode helper | 14 | This builds bpfilter kernel module with embedded user mode helper |
diff --git a/net/bpfilter/Makefile b/net/bpfilter/Makefile index 051dc18b8ccb..39c6980b5d99 100644 --- a/net/bpfilter/Makefile +++ b/net/bpfilter/Makefile | |||
@@ -15,20 +15,7 @@ ifeq ($(CONFIG_BPFILTER_UMH), y) | |||
15 | HOSTLDFLAGS += -static | 15 | HOSTLDFLAGS += -static |
16 | endif | 16 | endif |
17 | 17 | ||
18 | # a bit of elf magic to convert bpfilter_umh binary into a binary blob | 18 | $(obj)/bpfilter_umh_blob.o: $(obj)/bpfilter_umh |
19 | # inside bpfilter_umh.o elf file referenced by | ||
20 | # _binary_net_bpfilter_bpfilter_umh_start symbol | ||
21 | # which bpfilter_kern.c passes further into umh blob loader at run-time | ||
22 | quiet_cmd_copy_umh = GEN $@ | ||
23 | cmd_copy_umh = echo ':' > $(obj)/.bpfilter_umh.o.cmd; \ | ||
24 | $(OBJCOPY) -I binary \ | ||
25 | `LC_ALL=C $(OBJDUMP) -f net/bpfilter/bpfilter_umh \ | ||
26 | |awk -F' |,' '/file format/{print "-O",$$NF} \ | ||
27 | /^architecture:/{print "-B",$$2}'` \ | ||
28 | --rename-section .data=.init.rodata $< $@ | ||
29 | |||
30 | $(obj)/bpfilter_umh.o: $(obj)/bpfilter_umh | ||
31 | $(call cmd,copy_umh) | ||
32 | 19 | ||
33 | obj-$(CONFIG_BPFILTER_UMH) += bpfilter.o | 20 | obj-$(CONFIG_BPFILTER_UMH) += bpfilter.o |
34 | bpfilter-objs += bpfilter_kern.o bpfilter_umh.o | 21 | bpfilter-objs += bpfilter_kern.o bpfilter_umh_blob.o |
diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index 09522573f611..f0fc182d3db7 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c | |||
@@ -10,11 +10,8 @@ | |||
10 | #include <linux/file.h> | 10 | #include <linux/file.h> |
11 | #include "msgfmt.h" | 11 | #include "msgfmt.h" |
12 | 12 | ||
13 | #define UMH_start _binary_net_bpfilter_bpfilter_umh_start | 13 | extern char bpfilter_umh_start; |
14 | #define UMH_end _binary_net_bpfilter_bpfilter_umh_end | 14 | extern char bpfilter_umh_end; |
15 | |||
16 | extern char UMH_start; | ||
17 | extern char UMH_end; | ||
18 | 15 | ||
19 | static struct umh_info info; | 16 | static struct umh_info info; |
20 | /* since ip_getsockopt() can run in parallel, serialize access to umh */ | 17 | /* since ip_getsockopt() can run in parallel, serialize access to umh */ |
@@ -93,7 +90,9 @@ static int __init load_umh(void) | |||
93 | int err; | 90 | int err; |
94 | 91 | ||
95 | /* fork usermode process */ | 92 | /* fork usermode process */ |
96 | err = fork_usermode_blob(&UMH_start, &UMH_end - &UMH_start, &info); | 93 | err = fork_usermode_blob(&bpfilter_umh_start, |
94 | &bpfilter_umh_end - &bpfilter_umh_start, | ||
95 | &info); | ||
97 | if (err) | 96 | if (err) |
98 | return err; | 97 | return err; |
99 | pr_info("Loaded bpfilter_umh pid %d\n", info.pid); | 98 | pr_info("Loaded bpfilter_umh pid %d\n", info.pid); |
diff --git a/net/bpfilter/bpfilter_umh_blob.S b/net/bpfilter/bpfilter_umh_blob.S new file mode 100644 index 000000000000..40311d10d2f2 --- /dev/null +++ b/net/bpfilter/bpfilter_umh_blob.S | |||
@@ -0,0 +1,7 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | .section .init.rodata, "a" | ||
3 | .global bpfilter_umh_start | ||
4 | bpfilter_umh_start: | ||
5 | .incbin "net/bpfilter/bpfilter_umh" | ||
6 | .global bpfilter_umh_end | ||
7 | bpfilter_umh_end: | ||
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index a04e1e88bf3a..50537ff961a7 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c | |||
@@ -285,16 +285,9 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) | |||
285 | if (ifr->ifr_qlen < 0) | 285 | if (ifr->ifr_qlen < 0) |
286 | return -EINVAL; | 286 | return -EINVAL; |
287 | if (dev->tx_queue_len ^ ifr->ifr_qlen) { | 287 | if (dev->tx_queue_len ^ ifr->ifr_qlen) { |
288 | unsigned int orig_len = dev->tx_queue_len; | 288 | err = dev_change_tx_queue_len(dev, ifr->ifr_qlen); |
289 | 289 | if (err) | |
290 | dev->tx_queue_len = ifr->ifr_qlen; | ||
291 | err = call_netdevice_notifiers( | ||
292 | NETDEV_CHANGE_TX_QUEUE_LEN, dev); | ||
293 | err = notifier_to_errno(err); | ||
294 | if (err) { | ||
295 | dev->tx_queue_len = orig_len; | ||
296 | return err; | 290 | return err; |
297 | } | ||
298 | } | 291 | } |
299 | return 0; | 292 | return 0; |
300 | 293 | ||
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 126ffc5bc630..f64aa13811ea 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
@@ -416,6 +416,14 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops, | |||
416 | if (rule->mark && r->mark != rule->mark) | 416 | if (rule->mark && r->mark != rule->mark) |
417 | continue; | 417 | continue; |
418 | 418 | ||
419 | if (rule->suppress_ifgroup != -1 && | ||
420 | r->suppress_ifgroup != rule->suppress_ifgroup) | ||
421 | continue; | ||
422 | |||
423 | if (rule->suppress_prefixlen != -1 && | ||
424 | r->suppress_prefixlen != rule->suppress_prefixlen) | ||
425 | continue; | ||
426 | |||
419 | if (rule->mark_mask && r->mark_mask != rule->mark_mask) | 427 | if (rule->mark_mask && r->mark_mask != rule->mark_mask) |
420 | continue; | 428 | continue; |
421 | 429 | ||
@@ -436,6 +444,9 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops, | |||
436 | if (rule->ip_proto && r->ip_proto != rule->ip_proto) | 444 | if (rule->ip_proto && r->ip_proto != rule->ip_proto) |
437 | continue; | 445 | continue; |
438 | 446 | ||
447 | if (rule->proto && r->proto != rule->proto) | ||
448 | continue; | ||
449 | |||
439 | if (fib_rule_port_range_set(&rule->sport_range) && | 450 | if (fib_rule_port_range_set(&rule->sport_range) && |
440 | !fib_rule_port_range_compare(&r->sport_range, | 451 | !fib_rule_port_range_compare(&r->sport_range, |
441 | &rule->sport_range)) | 452 | &rule->sport_range)) |
@@ -645,6 +656,73 @@ errout: | |||
645 | return err; | 656 | return err; |
646 | } | 657 | } |
647 | 658 | ||
659 | static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh, | ||
660 | struct nlattr **tb, struct fib_rule *rule) | ||
661 | { | ||
662 | struct fib_rule *r; | ||
663 | |||
664 | list_for_each_entry(r, &ops->rules_list, list) { | ||
665 | if (r->action != rule->action) | ||
666 | continue; | ||
667 | |||
668 | if (r->table != rule->table) | ||
669 | continue; | ||
670 | |||
671 | if (r->pref != rule->pref) | ||
672 | continue; | ||
673 | |||
674 | if (memcmp(r->iifname, rule->iifname, IFNAMSIZ)) | ||
675 | continue; | ||
676 | |||
677 | if (memcmp(r->oifname, rule->oifname, IFNAMSIZ)) | ||
678 | continue; | ||
679 | |||
680 | if (r->mark != rule->mark) | ||
681 | continue; | ||
682 | |||
683 | if (r->suppress_ifgroup != rule->suppress_ifgroup) | ||
684 | continue; | ||
685 | |||
686 | if (r->suppress_prefixlen != rule->suppress_prefixlen) | ||
687 | continue; | ||
688 | |||
689 | if (r->mark_mask != rule->mark_mask) | ||
690 | continue; | ||
691 | |||
692 | if (r->tun_id != rule->tun_id) | ||
693 | continue; | ||
694 | |||
695 | if (r->fr_net != rule->fr_net) | ||
696 | continue; | ||
697 | |||
698 | if (r->l3mdev != rule->l3mdev) | ||
699 | continue; | ||
700 | |||
701 | if (!uid_eq(r->uid_range.start, rule->uid_range.start) || | ||
702 | !uid_eq(r->uid_range.end, rule->uid_range.end)) | ||
703 | continue; | ||
704 | |||
705 | if (r->ip_proto != rule->ip_proto) | ||
706 | continue; | ||
707 | |||
708 | if (r->proto != rule->proto) | ||
709 | continue; | ||
710 | |||
711 | if (!fib_rule_port_range_compare(&r->sport_range, | ||
712 | &rule->sport_range)) | ||
713 | continue; | ||
714 | |||
715 | if (!fib_rule_port_range_compare(&r->dport_range, | ||
716 | &rule->dport_range)) | ||
717 | continue; | ||
718 | |||
719 | if (!ops->compare(r, frh, tb)) | ||
720 | continue; | ||
721 | return 1; | ||
722 | } | ||
723 | return 0; | ||
724 | } | ||
725 | |||
648 | int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, | 726 | int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, |
649 | struct netlink_ext_ack *extack) | 727 | struct netlink_ext_ack *extack) |
650 | { | 728 | { |
@@ -679,7 +757,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
679 | goto errout; | 757 | goto errout; |
680 | 758 | ||
681 | if ((nlh->nlmsg_flags & NLM_F_EXCL) && | 759 | if ((nlh->nlmsg_flags & NLM_F_EXCL) && |
682 | rule_find(ops, frh, tb, rule, user_priority)) { | 760 | rule_exists(ops, frh, tb, rule)) { |
683 | err = -EEXIST; | 761 | err = -EEXIST; |
684 | goto errout_free; | 762 | goto errout_free; |
685 | } | 763 | } |
diff --git a/net/core/filter.c b/net/core/filter.c index e7f12e9f598c..0ca6907d7efe 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -4073,8 +4073,9 @@ static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, | |||
4073 | memcpy(params->smac, dev->dev_addr, ETH_ALEN); | 4073 | memcpy(params->smac, dev->dev_addr, ETH_ALEN); |
4074 | params->h_vlan_TCI = 0; | 4074 | params->h_vlan_TCI = 0; |
4075 | params->h_vlan_proto = 0; | 4075 | params->h_vlan_proto = 0; |
4076 | params->ifindex = dev->ifindex; | ||
4076 | 4077 | ||
4077 | return dev->ifindex; | 4078 | return 0; |
4078 | } | 4079 | } |
4079 | #endif | 4080 | #endif |
4080 | 4081 | ||
@@ -4098,7 +4099,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4098 | /* verify forwarding is enabled on this interface */ | 4099 | /* verify forwarding is enabled on this interface */ |
4099 | in_dev = __in_dev_get_rcu(dev); | 4100 | in_dev = __in_dev_get_rcu(dev); |
4100 | if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev))) | 4101 | if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev))) |
4101 | return 0; | 4102 | return BPF_FIB_LKUP_RET_FWD_DISABLED; |
4102 | 4103 | ||
4103 | if (flags & BPF_FIB_LOOKUP_OUTPUT) { | 4104 | if (flags & BPF_FIB_LOOKUP_OUTPUT) { |
4104 | fl4.flowi4_iif = 1; | 4105 | fl4.flowi4_iif = 1; |
@@ -4123,7 +4124,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4123 | 4124 | ||
4124 | tb = fib_get_table(net, tbid); | 4125 | tb = fib_get_table(net, tbid); |
4125 | if (unlikely(!tb)) | 4126 | if (unlikely(!tb)) |
4126 | return 0; | 4127 | return BPF_FIB_LKUP_RET_NOT_FWDED; |
4127 | 4128 | ||
4128 | err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF); | 4129 | err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF); |
4129 | } else { | 4130 | } else { |
@@ -4135,8 +4136,20 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4135 | err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF); | 4136 | err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF); |
4136 | } | 4137 | } |
4137 | 4138 | ||
4138 | if (err || res.type != RTN_UNICAST) | 4139 | if (err) { |
4139 | return 0; | 4140 | /* map fib lookup errors to RTN_ type */ |
4141 | if (err == -EINVAL) | ||
4142 | return BPF_FIB_LKUP_RET_BLACKHOLE; | ||
4143 | if (err == -EHOSTUNREACH) | ||
4144 | return BPF_FIB_LKUP_RET_UNREACHABLE; | ||
4145 | if (err == -EACCES) | ||
4146 | return BPF_FIB_LKUP_RET_PROHIBIT; | ||
4147 | |||
4148 | return BPF_FIB_LKUP_RET_NOT_FWDED; | ||
4149 | } | ||
4150 | |||
4151 | if (res.type != RTN_UNICAST) | ||
4152 | return BPF_FIB_LKUP_RET_NOT_FWDED; | ||
4140 | 4153 | ||
4141 | if (res.fi->fib_nhs > 1) | 4154 | if (res.fi->fib_nhs > 1) |
4142 | fib_select_path(net, &res, &fl4, NULL); | 4155 | fib_select_path(net, &res, &fl4, NULL); |
@@ -4144,19 +4157,16 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4144 | if (check_mtu) { | 4157 | if (check_mtu) { |
4145 | mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst); | 4158 | mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst); |
4146 | if (params->tot_len > mtu) | 4159 | if (params->tot_len > mtu) |
4147 | return 0; | 4160 | return BPF_FIB_LKUP_RET_FRAG_NEEDED; |
4148 | } | 4161 | } |
4149 | 4162 | ||
4150 | nh = &res.fi->fib_nh[res.nh_sel]; | 4163 | nh = &res.fi->fib_nh[res.nh_sel]; |
4151 | 4164 | ||
4152 | /* do not handle lwt encaps right now */ | 4165 | /* do not handle lwt encaps right now */ |
4153 | if (nh->nh_lwtstate) | 4166 | if (nh->nh_lwtstate) |
4154 | return 0; | 4167 | return BPF_FIB_LKUP_RET_UNSUPP_LWT; |
4155 | 4168 | ||
4156 | dev = nh->nh_dev; | 4169 | dev = nh->nh_dev; |
4157 | if (unlikely(!dev)) | ||
4158 | return 0; | ||
4159 | |||
4160 | if (nh->nh_gw) | 4170 | if (nh->nh_gw) |
4161 | params->ipv4_dst = nh->nh_gw; | 4171 | params->ipv4_dst = nh->nh_gw; |
4162 | 4172 | ||
@@ -4166,10 +4176,10 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4166 | * rcu_read_lock_bh is not needed here | 4176 | * rcu_read_lock_bh is not needed here |
4167 | */ | 4177 | */ |
4168 | neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)params->ipv4_dst); | 4178 | neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)params->ipv4_dst); |
4169 | if (neigh) | 4179 | if (!neigh) |
4170 | return bpf_fib_set_fwd_params(params, neigh, dev); | 4180 | return BPF_FIB_LKUP_RET_NO_NEIGH; |
4171 | 4181 | ||
4172 | return 0; | 4182 | return bpf_fib_set_fwd_params(params, neigh, dev); |
4173 | } | 4183 | } |
4174 | #endif | 4184 | #endif |
4175 | 4185 | ||
@@ -4190,7 +4200,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4190 | 4200 | ||
4191 | /* link local addresses are never forwarded */ | 4201 | /* link local addresses are never forwarded */ |
4192 | if (rt6_need_strict(dst) || rt6_need_strict(src)) | 4202 | if (rt6_need_strict(dst) || rt6_need_strict(src)) |
4193 | return 0; | 4203 | return BPF_FIB_LKUP_RET_NOT_FWDED; |
4194 | 4204 | ||
4195 | dev = dev_get_by_index_rcu(net, params->ifindex); | 4205 | dev = dev_get_by_index_rcu(net, params->ifindex); |
4196 | if (unlikely(!dev)) | 4206 | if (unlikely(!dev)) |
@@ -4198,7 +4208,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4198 | 4208 | ||
4199 | idev = __in6_dev_get_safely(dev); | 4209 | idev = __in6_dev_get_safely(dev); |
4200 | if (unlikely(!idev || !net->ipv6.devconf_all->forwarding)) | 4210 | if (unlikely(!idev || !net->ipv6.devconf_all->forwarding)) |
4201 | return 0; | 4211 | return BPF_FIB_LKUP_RET_FWD_DISABLED; |
4202 | 4212 | ||
4203 | if (flags & BPF_FIB_LOOKUP_OUTPUT) { | 4213 | if (flags & BPF_FIB_LOOKUP_OUTPUT) { |
4204 | fl6.flowi6_iif = 1; | 4214 | fl6.flowi6_iif = 1; |
@@ -4225,7 +4235,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4225 | 4235 | ||
4226 | tb = ipv6_stub->fib6_get_table(net, tbid); | 4236 | tb = ipv6_stub->fib6_get_table(net, tbid); |
4227 | if (unlikely(!tb)) | 4237 | if (unlikely(!tb)) |
4228 | return 0; | 4238 | return BPF_FIB_LKUP_RET_NOT_FWDED; |
4229 | 4239 | ||
4230 | f6i = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, strict); | 4240 | f6i = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, strict); |
4231 | } else { | 4241 | } else { |
@@ -4238,11 +4248,23 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4238 | } | 4248 | } |
4239 | 4249 | ||
4240 | if (unlikely(IS_ERR_OR_NULL(f6i) || f6i == net->ipv6.fib6_null_entry)) | 4250 | if (unlikely(IS_ERR_OR_NULL(f6i) || f6i == net->ipv6.fib6_null_entry)) |
4241 | return 0; | 4251 | return BPF_FIB_LKUP_RET_NOT_FWDED; |
4252 | |||
4253 | if (unlikely(f6i->fib6_flags & RTF_REJECT)) { | ||
4254 | switch (f6i->fib6_type) { | ||
4255 | case RTN_BLACKHOLE: | ||
4256 | return BPF_FIB_LKUP_RET_BLACKHOLE; | ||
4257 | case RTN_UNREACHABLE: | ||
4258 | return BPF_FIB_LKUP_RET_UNREACHABLE; | ||
4259 | case RTN_PROHIBIT: | ||
4260 | return BPF_FIB_LKUP_RET_PROHIBIT; | ||
4261 | default: | ||
4262 | return BPF_FIB_LKUP_RET_NOT_FWDED; | ||
4263 | } | ||
4264 | } | ||
4242 | 4265 | ||
4243 | if (unlikely(f6i->fib6_flags & RTF_REJECT || | 4266 | if (f6i->fib6_type != RTN_UNICAST) |
4244 | f6i->fib6_type != RTN_UNICAST)) | 4267 | return BPF_FIB_LKUP_RET_NOT_FWDED; |
4245 | return 0; | ||
4246 | 4268 | ||
4247 | if (f6i->fib6_nsiblings && fl6.flowi6_oif == 0) | 4269 | if (f6i->fib6_nsiblings && fl6.flowi6_oif == 0) |
4248 | f6i = ipv6_stub->fib6_multipath_select(net, f6i, &fl6, | 4270 | f6i = ipv6_stub->fib6_multipath_select(net, f6i, &fl6, |
@@ -4252,11 +4274,11 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4252 | if (check_mtu) { | 4274 | if (check_mtu) { |
4253 | mtu = ipv6_stub->ip6_mtu_from_fib6(f6i, dst, src); | 4275 | mtu = ipv6_stub->ip6_mtu_from_fib6(f6i, dst, src); |
4254 | if (params->tot_len > mtu) | 4276 | if (params->tot_len > mtu) |
4255 | return 0; | 4277 | return BPF_FIB_LKUP_RET_FRAG_NEEDED; |
4256 | } | 4278 | } |
4257 | 4279 | ||
4258 | if (f6i->fib6_nh.nh_lwtstate) | 4280 | if (f6i->fib6_nh.nh_lwtstate) |
4259 | return 0; | 4281 | return BPF_FIB_LKUP_RET_UNSUPP_LWT; |
4260 | 4282 | ||
4261 | if (f6i->fib6_flags & RTF_GATEWAY) | 4283 | if (f6i->fib6_flags & RTF_GATEWAY) |
4262 | *dst = f6i->fib6_nh.nh_gw; | 4284 | *dst = f6i->fib6_nh.nh_gw; |
@@ -4270,10 +4292,10 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4270 | */ | 4292 | */ |
4271 | neigh = ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128, | 4293 | neigh = ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128, |
4272 | ndisc_hashfn, dst, dev); | 4294 | ndisc_hashfn, dst, dev); |
4273 | if (neigh) | 4295 | if (!neigh) |
4274 | return bpf_fib_set_fwd_params(params, neigh, dev); | 4296 | return BPF_FIB_LKUP_RET_NO_NEIGH; |
4275 | 4297 | ||
4276 | return 0; | 4298 | return bpf_fib_set_fwd_params(params, neigh, dev); |
4277 | } | 4299 | } |
4278 | #endif | 4300 | #endif |
4279 | 4301 | ||
@@ -4315,7 +4337,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb, | |||
4315 | struct bpf_fib_lookup *, params, int, plen, u32, flags) | 4337 | struct bpf_fib_lookup *, params, int, plen, u32, flags) |
4316 | { | 4338 | { |
4317 | struct net *net = dev_net(skb->dev); | 4339 | struct net *net = dev_net(skb->dev); |
4318 | int index = -EAFNOSUPPORT; | 4340 | int rc = -EAFNOSUPPORT; |
4319 | 4341 | ||
4320 | if (plen < sizeof(*params)) | 4342 | if (plen < sizeof(*params)) |
4321 | return -EINVAL; | 4343 | return -EINVAL; |
@@ -4326,25 +4348,25 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb, | |||
4326 | switch (params->family) { | 4348 | switch (params->family) { |
4327 | #if IS_ENABLED(CONFIG_INET) | 4349 | #if IS_ENABLED(CONFIG_INET) |
4328 | case AF_INET: | 4350 | case AF_INET: |
4329 | index = bpf_ipv4_fib_lookup(net, params, flags, false); | 4351 | rc = bpf_ipv4_fib_lookup(net, params, flags, false); |
4330 | break; | 4352 | break; |
4331 | #endif | 4353 | #endif |
4332 | #if IS_ENABLED(CONFIG_IPV6) | 4354 | #if IS_ENABLED(CONFIG_IPV6) |
4333 | case AF_INET6: | 4355 | case AF_INET6: |
4334 | index = bpf_ipv6_fib_lookup(net, params, flags, false); | 4356 | rc = bpf_ipv6_fib_lookup(net, params, flags, false); |
4335 | break; | 4357 | break; |
4336 | #endif | 4358 | #endif |
4337 | } | 4359 | } |
4338 | 4360 | ||
4339 | if (index > 0) { | 4361 | if (!rc) { |
4340 | struct net_device *dev; | 4362 | struct net_device *dev; |
4341 | 4363 | ||
4342 | dev = dev_get_by_index_rcu(net, index); | 4364 | dev = dev_get_by_index_rcu(net, params->ifindex); |
4343 | if (!is_skb_forwardable(dev, skb)) | 4365 | if (!is_skb_forwardable(dev, skb)) |
4344 | index = 0; | 4366 | rc = BPF_FIB_LKUP_RET_FRAG_NEEDED; |
4345 | } | 4367 | } |
4346 | 4368 | ||
4347 | return index; | 4369 | return rc; |
4348 | } | 4370 | } |
4349 | 4371 | ||
4350 | static const struct bpf_func_proto bpf_skb_fib_lookup_proto = { | 4372 | static const struct bpf_func_proto bpf_skb_fib_lookup_proto = { |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c642304f178c..eba8dae22c25 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -5276,8 +5276,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, | |||
5276 | if (npages >= 1 << order) { | 5276 | if (npages >= 1 << order) { |
5277 | page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | | 5277 | page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | |
5278 | __GFP_COMP | | 5278 | __GFP_COMP | |
5279 | __GFP_NOWARN | | 5279 | __GFP_NOWARN, |
5280 | __GFP_NORETRY, | ||
5281 | order); | 5280 | order); |
5282 | if (page) | 5281 | if (page) |
5283 | goto fill_page; | 5282 | goto fill_page; |
diff --git a/net/core/sock.c b/net/core/sock.c index bcc41829a16d..9e8f65585b81 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -3243,7 +3243,8 @@ static int req_prot_init(const struct proto *prot) | |||
3243 | 3243 | ||
3244 | rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name, | 3244 | rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name, |
3245 | rsk_prot->obj_size, 0, | 3245 | rsk_prot->obj_size, 0, |
3246 | prot->slab_flags, NULL); | 3246 | SLAB_ACCOUNT | prot->slab_flags, |
3247 | NULL); | ||
3247 | 3248 | ||
3248 | if (!rsk_prot->slab) { | 3249 | if (!rsk_prot->slab) { |
3249 | pr_crit("%s: Can't create request sock SLAB cache!\n", | 3250 | pr_crit("%s: Can't create request sock SLAB cache!\n", |
@@ -3258,7 +3259,8 @@ int proto_register(struct proto *prot, int alloc_slab) | |||
3258 | if (alloc_slab) { | 3259 | if (alloc_slab) { |
3259 | prot->slab = kmem_cache_create_usercopy(prot->name, | 3260 | prot->slab = kmem_cache_create_usercopy(prot->name, |
3260 | prot->obj_size, 0, | 3261 | prot->obj_size, 0, |
3261 | SLAB_HWCACHE_ALIGN | prot->slab_flags, | 3262 | SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | |
3263 | prot->slab_flags, | ||
3262 | prot->useroffset, prot->usersize, | 3264 | prot->useroffset, prot->usersize, |
3263 | NULL); | 3265 | NULL); |
3264 | 3266 | ||
@@ -3281,6 +3283,7 @@ int proto_register(struct proto *prot, int alloc_slab) | |||
3281 | kmem_cache_create(prot->twsk_prot->twsk_slab_name, | 3283 | kmem_cache_create(prot->twsk_prot->twsk_slab_name, |
3282 | prot->twsk_prot->twsk_obj_size, | 3284 | prot->twsk_prot->twsk_obj_size, |
3283 | 0, | 3285 | 0, |
3286 | SLAB_ACCOUNT | | ||
3284 | prot->slab_flags, | 3287 | prot->slab_flags, |
3285 | NULL); | 3288 | NULL); |
3286 | if (prot->twsk_prot->twsk_slab == NULL) | 3289 | if (prot->twsk_prot->twsk_slab == NULL) |
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 1540db65241a..c9ec1603666b 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c | |||
@@ -448,9 +448,7 @@ next_proto: | |||
448 | out_unlock: | 448 | out_unlock: |
449 | rcu_read_unlock(); | 449 | rcu_read_unlock(); |
450 | out: | 450 | out: |
451 | NAPI_GRO_CB(skb)->flush |= flush; | 451 | skb_gro_flush_final_remcsum(skb, pp, flush, &grc); |
452 | skb_gro_remcsum_cleanup(skb, &grc); | ||
453 | skb->remcsum_offload = 0; | ||
454 | 452 | ||
455 | return pp; | 453 | return pp; |
456 | } | 454 | } |
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 1859c473b21a..6a7d980105f6 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c | |||
@@ -223,7 +223,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, | |||
223 | out_unlock: | 223 | out_unlock: |
224 | rcu_read_unlock(); | 224 | rcu_read_unlock(); |
225 | out: | 225 | out: |
226 | NAPI_GRO_CB(skb)->flush |= flush; | 226 | skb_gro_flush_final(skb, pp, flush); |
227 | 227 | ||
228 | return pp; | 228 | return pp; |
229 | } | 229 | } |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index d06247ba08b2..af0a857d8352 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -265,8 +265,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write, | |||
265 | ipv4.sysctl_tcp_fastopen); | 265 | ipv4.sysctl_tcp_fastopen); |
266 | struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) }; | 266 | struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) }; |
267 | struct tcp_fastopen_context *ctxt; | 267 | struct tcp_fastopen_context *ctxt; |
268 | int ret; | ||
269 | u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */ | 268 | u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */ |
269 | __le32 key[4]; | ||
270 | int ret, i; | ||
270 | 271 | ||
271 | tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL); | 272 | tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL); |
272 | if (!tbl.data) | 273 | if (!tbl.data) |
@@ -275,11 +276,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write, | |||
275 | rcu_read_lock(); | 276 | rcu_read_lock(); |
276 | ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx); | 277 | ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx); |
277 | if (ctxt) | 278 | if (ctxt) |
278 | memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); | 279 | memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); |
279 | else | 280 | else |
280 | memset(user_key, 0, sizeof(user_key)); | 281 | memset(key, 0, sizeof(key)); |
281 | rcu_read_unlock(); | 282 | rcu_read_unlock(); |
282 | 283 | ||
284 | for (i = 0; i < ARRAY_SIZE(key); i++) | ||
285 | user_key[i] = le32_to_cpu(key[i]); | ||
286 | |||
283 | snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x", | 287 | snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x", |
284 | user_key[0], user_key[1], user_key[2], user_key[3]); | 288 | user_key[0], user_key[1], user_key[2], user_key[3]); |
285 | ret = proc_dostring(&tbl, write, buffer, lenp, ppos); | 289 | ret = proc_dostring(&tbl, write, buffer, lenp, ppos); |
@@ -290,13 +294,17 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write, | |||
290 | ret = -EINVAL; | 294 | ret = -EINVAL; |
291 | goto bad_key; | 295 | goto bad_key; |
292 | } | 296 | } |
293 | tcp_fastopen_reset_cipher(net, NULL, user_key, | 297 | |
298 | for (i = 0; i < ARRAY_SIZE(user_key); i++) | ||
299 | key[i] = cpu_to_le32(user_key[i]); | ||
300 | |||
301 | tcp_fastopen_reset_cipher(net, NULL, key, | ||
294 | TCP_FASTOPEN_KEY_LENGTH); | 302 | TCP_FASTOPEN_KEY_LENGTH); |
295 | } | 303 | } |
296 | 304 | ||
297 | bad_key: | 305 | bad_key: |
298 | pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n", | 306 | pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n", |
299 | user_key[0], user_key[1], user_key[2], user_key[3], | 307 | user_key[0], user_key[1], user_key[2], user_key[3], |
300 | (char *)tbl.data, ret); | 308 | (char *)tbl.data, ret); |
301 | kfree(tbl.data); | 309 | kfree(tbl.data); |
302 | return ret; | 310 | return ret; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 355d3dffd021..8e5522c6833a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -265,7 +265,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) | |||
265 | * it is probably a retransmit. | 265 | * it is probably a retransmit. |
266 | */ | 266 | */ |
267 | if (tp->ecn_flags & TCP_ECN_SEEN) | 267 | if (tp->ecn_flags & TCP_ECN_SEEN) |
268 | tcp_enter_quickack_mode(sk, 1); | 268 | tcp_enter_quickack_mode(sk, 2); |
269 | break; | 269 | break; |
270 | case INET_ECN_CE: | 270 | case INET_ECN_CE: |
271 | if (tcp_ca_needs_ecn(sk)) | 271 | if (tcp_ca_needs_ecn(sk)) |
@@ -273,7 +273,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) | |||
273 | 273 | ||
274 | if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { | 274 | if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { |
275 | /* Better not delay acks, sender can have a very low cwnd */ | 275 | /* Better not delay acks, sender can have a very low cwnd */ |
276 | tcp_enter_quickack_mode(sk, 1); | 276 | tcp_enter_quickack_mode(sk, 2); |
277 | tp->ecn_flags |= TCP_ECN_DEMAND_CWR; | 277 | tp->ecn_flags |= TCP_ECN_DEMAND_CWR; |
278 | } | 278 | } |
279 | tp->ecn_flags |= TCP_ECN_SEEN; | 279 | tp->ecn_flags |= TCP_ECN_SEEN; |
@@ -3181,6 +3181,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, | |||
3181 | 3181 | ||
3182 | if (tcp_is_reno(tp)) { | 3182 | if (tcp_is_reno(tp)) { |
3183 | tcp_remove_reno_sacks(sk, pkts_acked); | 3183 | tcp_remove_reno_sacks(sk, pkts_acked); |
3184 | |||
3185 | /* If any of the cumulatively ACKed segments was | ||
3186 | * retransmitted, non-SACK case cannot confirm that | ||
3187 | * progress was due to original transmission due to | ||
3188 | * lack of TCPCB_SACKED_ACKED bits even if some of | ||
3189 | * the packets may have been never retransmitted. | ||
3190 | */ | ||
3191 | if (flag & FLAG_RETRANS_DATA_ACKED) | ||
3192 | flag &= ~FLAG_ORIG_SACK_ACKED; | ||
3184 | } else { | 3193 | } else { |
3185 | int delta; | 3194 | int delta; |
3186 | 3195 | ||
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 92dc9e5a7ff3..69c54540d5b4 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c | |||
@@ -394,7 +394,7 @@ unflush: | |||
394 | out_unlock: | 394 | out_unlock: |
395 | rcu_read_unlock(); | 395 | rcu_read_unlock(); |
396 | out: | 396 | out: |
397 | NAPI_GRO_CB(skb)->flush |= flush; | 397 | skb_gro_flush_final(skb, pp, flush); |
398 | return pp; | 398 | return pp; |
399 | } | 399 | } |
400 | EXPORT_SYMBOL(udp_gro_receive); | 400 | EXPORT_SYMBOL(udp_gro_receive); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index c134286d6a41..91580c62bb86 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -4528,6 +4528,7 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp, | |||
4528 | unsigned long expires, u32 flags) | 4528 | unsigned long expires, u32 flags) |
4529 | { | 4529 | { |
4530 | struct fib6_info *f6i; | 4530 | struct fib6_info *f6i; |
4531 | u32 prio; | ||
4531 | 4532 | ||
4532 | f6i = addrconf_get_prefix_route(&ifp->addr, | 4533 | f6i = addrconf_get_prefix_route(&ifp->addr, |
4533 | ifp->prefix_len, | 4534 | ifp->prefix_len, |
@@ -4536,13 +4537,15 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp, | |||
4536 | if (!f6i) | 4537 | if (!f6i) |
4537 | return -ENOENT; | 4538 | return -ENOENT; |
4538 | 4539 | ||
4539 | if (f6i->fib6_metric != ifp->rt_priority) { | 4540 | prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF; |
4541 | if (f6i->fib6_metric != prio) { | ||
4542 | /* delete old one */ | ||
4543 | ip6_del_rt(dev_net(ifp->idev->dev), f6i); | ||
4544 | |||
4540 | /* add new one */ | 4545 | /* add new one */ |
4541 | addrconf_prefix_route(&ifp->addr, ifp->prefix_len, | 4546 | addrconf_prefix_route(&ifp->addr, ifp->prefix_len, |
4542 | ifp->rt_priority, ifp->idev->dev, | 4547 | ifp->rt_priority, ifp->idev->dev, |
4543 | expires, flags, GFP_KERNEL); | 4548 | expires, flags, GFP_KERNEL); |
4544 | /* delete old one */ | ||
4545 | ip6_del_rt(dev_net(ifp->idev->dev), f6i); | ||
4546 | } else { | 4549 | } else { |
4547 | if (!expires) | 4550 | if (!expires) |
4548 | fib6_clean_expires(f6i); | 4551 | fib6_clean_expires(f6i); |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 5e0332014c17..a452d99c9f52 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -107,7 +107,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net) | |||
107 | if (hdr == NULL) | 107 | if (hdr == NULL) |
108 | goto err_reg; | 108 | goto err_reg; |
109 | 109 | ||
110 | net->nf_frag.sysctl.frags_hdr = hdr; | 110 | net->nf_frag_frags_hdr = hdr; |
111 | return 0; | 111 | return 0; |
112 | 112 | ||
113 | err_reg: | 113 | err_reg: |
@@ -121,8 +121,8 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net) | |||
121 | { | 121 | { |
122 | struct ctl_table *table; | 122 | struct ctl_table *table; |
123 | 123 | ||
124 | table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg; | 124 | table = net->nf_frag_frags_hdr->ctl_table_arg; |
125 | unregister_net_sysctl_table(net->nf_frag.sysctl.frags_hdr); | 125 | unregister_net_sysctl_table(net->nf_frag_frags_hdr); |
126 | if (!net_eq(net, &init_net)) | 126 | if (!net_eq(net, &init_net)) |
127 | kfree(table); | 127 | kfree(table); |
128 | } | 128 | } |
diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c index 33fb35cbfac1..558fe8cc6d43 100644 --- a/net/ipv6/seg6_hmac.c +++ b/net/ipv6/seg6_hmac.c | |||
@@ -373,7 +373,7 @@ static int seg6_hmac_init_algo(void) | |||
373 | return -ENOMEM; | 373 | return -ENOMEM; |
374 | 374 | ||
375 | for_each_possible_cpu(cpu) { | 375 | for_each_possible_cpu(cpu) { |
376 | tfm = crypto_alloc_shash(algo->name, 0, GFP_KERNEL); | 376 | tfm = crypto_alloc_shash(algo->name, 0, 0); |
377 | if (IS_ERR(tfm)) | 377 | if (IS_ERR(tfm)) |
378 | return PTR_ERR(tfm); | 378 | return PTR_ERR(tfm); |
379 | p_tfm = per_cpu_ptr(algo->tfms, cpu); | 379 | p_tfm = per_cpu_ptr(algo->tfms, cpu); |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 44b5dfe8727d..fa1f1e63a264 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -4845,7 +4845,9 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, | |||
4845 | skb_reset_network_header(skb); | 4845 | skb_reset_network_header(skb); |
4846 | skb_reset_mac_header(skb); | 4846 | skb_reset_mac_header(skb); |
4847 | 4847 | ||
4848 | local_bh_disable(); | ||
4848 | __ieee80211_subif_start_xmit(skb, skb->dev, flags); | 4849 | __ieee80211_subif_start_xmit(skb, skb->dev, flags); |
4850 | local_bh_enable(); | ||
4849 | 4851 | ||
4850 | return 0; | 4852 | return 0; |
4851 | } | 4853 | } |
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c index d8383609fe28..510039862aa9 100644 --- a/net/netfilter/nf_conncount.c +++ b/net/netfilter/nf_conncount.c | |||
@@ -47,6 +47,8 @@ struct nf_conncount_tuple { | |||
47 | struct hlist_node node; | 47 | struct hlist_node node; |
48 | struct nf_conntrack_tuple tuple; | 48 | struct nf_conntrack_tuple tuple; |
49 | struct nf_conntrack_zone zone; | 49 | struct nf_conntrack_zone zone; |
50 | int cpu; | ||
51 | u32 jiffies32; | ||
50 | }; | 52 | }; |
51 | 53 | ||
52 | struct nf_conncount_rb { | 54 | struct nf_conncount_rb { |
@@ -91,11 +93,42 @@ bool nf_conncount_add(struct hlist_head *head, | |||
91 | return false; | 93 | return false; |
92 | conn->tuple = *tuple; | 94 | conn->tuple = *tuple; |
93 | conn->zone = *zone; | 95 | conn->zone = *zone; |
96 | conn->cpu = raw_smp_processor_id(); | ||
97 | conn->jiffies32 = (u32)jiffies; | ||
94 | hlist_add_head(&conn->node, head); | 98 | hlist_add_head(&conn->node, head); |
95 | return true; | 99 | return true; |
96 | } | 100 | } |
97 | EXPORT_SYMBOL_GPL(nf_conncount_add); | 101 | EXPORT_SYMBOL_GPL(nf_conncount_add); |
98 | 102 | ||
103 | static const struct nf_conntrack_tuple_hash * | ||
104 | find_or_evict(struct net *net, struct nf_conncount_tuple *conn) | ||
105 | { | ||
106 | const struct nf_conntrack_tuple_hash *found; | ||
107 | unsigned long a, b; | ||
108 | int cpu = raw_smp_processor_id(); | ||
109 | __s32 age; | ||
110 | |||
111 | found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple); | ||
112 | if (found) | ||
113 | return found; | ||
114 | b = conn->jiffies32; | ||
115 | a = (u32)jiffies; | ||
116 | |||
117 | /* conn might have been added just before by another cpu and | ||
118 | * might still be unconfirmed. In this case, nf_conntrack_find() | ||
119 | * returns no result. Thus only evict if this cpu added the | ||
120 | * stale entry or if the entry is older than two jiffies. | ||
121 | */ | ||
122 | age = a - b; | ||
123 | if (conn->cpu == cpu || age >= 2) { | ||
124 | hlist_del(&conn->node); | ||
125 | kmem_cache_free(conncount_conn_cachep, conn); | ||
126 | return ERR_PTR(-ENOENT); | ||
127 | } | ||
128 | |||
129 | return ERR_PTR(-EAGAIN); | ||
130 | } | ||
131 | |||
99 | unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head, | 132 | unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head, |
100 | const struct nf_conntrack_tuple *tuple, | 133 | const struct nf_conntrack_tuple *tuple, |
101 | const struct nf_conntrack_zone *zone, | 134 | const struct nf_conntrack_zone *zone, |
@@ -103,18 +136,27 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head, | |||
103 | { | 136 | { |
104 | const struct nf_conntrack_tuple_hash *found; | 137 | const struct nf_conntrack_tuple_hash *found; |
105 | struct nf_conncount_tuple *conn; | 138 | struct nf_conncount_tuple *conn; |
106 | struct hlist_node *n; | ||
107 | struct nf_conn *found_ct; | 139 | struct nf_conn *found_ct; |
140 | struct hlist_node *n; | ||
108 | unsigned int length = 0; | 141 | unsigned int length = 0; |
109 | 142 | ||
110 | *addit = tuple ? true : false; | 143 | *addit = tuple ? true : false; |
111 | 144 | ||
112 | /* check the saved connections */ | 145 | /* check the saved connections */ |
113 | hlist_for_each_entry_safe(conn, n, head, node) { | 146 | hlist_for_each_entry_safe(conn, n, head, node) { |
114 | found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple); | 147 | found = find_or_evict(net, conn); |
115 | if (found == NULL) { | 148 | if (IS_ERR(found)) { |
116 | hlist_del(&conn->node); | 149 | /* Not found, but might be about to be confirmed */ |
117 | kmem_cache_free(conncount_conn_cachep, conn); | 150 | if (PTR_ERR(found) == -EAGAIN) { |
151 | length++; | ||
152 | if (!tuple) | ||
153 | continue; | ||
154 | |||
155 | if (nf_ct_tuple_equal(&conn->tuple, tuple) && | ||
156 | nf_ct_zone_id(&conn->zone, conn->zone.dir) == | ||
157 | nf_ct_zone_id(zone, zone->dir)) | ||
158 | *addit = false; | ||
159 | } | ||
118 | continue; | 160 | continue; |
119 | } | 161 | } |
120 | 162 | ||
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 551a1eddf0fa..a75b11c39312 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c | |||
@@ -465,6 +465,11 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me) | |||
465 | 465 | ||
466 | nf_ct_expect_iterate_destroy(expect_iter_me, NULL); | 466 | nf_ct_expect_iterate_destroy(expect_iter_me, NULL); |
467 | nf_ct_iterate_destroy(unhelp, me); | 467 | nf_ct_iterate_destroy(unhelp, me); |
468 | |||
469 | /* Maybe someone has gotten the helper already when unhelp above. | ||
470 | * So need to wait it. | ||
471 | */ | ||
472 | synchronize_rcu(); | ||
468 | } | 473 | } |
469 | EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister); | 474 | EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister); |
470 | 475 | ||
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 426457047578..a61d6df6e5f6 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c | |||
@@ -424,6 +424,10 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write, | |||
424 | if (write) { | 424 | if (write) { |
425 | struct ctl_table tmp = *table; | 425 | struct ctl_table tmp = *table; |
426 | 426 | ||
427 | /* proc_dostring() can append to existing strings, so we need to | ||
428 | * initialize it as an empty string. | ||
429 | */ | ||
430 | buf[0] = '\0'; | ||
427 | tmp.data = buf; | 431 | tmp.data = buf; |
428 | r = proc_dostring(&tmp, write, buffer, lenp, ppos); | 432 | r = proc_dostring(&tmp, write, buffer, lenp, ppos); |
429 | if (r) | 433 | if (r) |
@@ -442,14 +446,17 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write, | |||
442 | rcu_assign_pointer(net->nf.nf_loggers[tindex], logger); | 446 | rcu_assign_pointer(net->nf.nf_loggers[tindex], logger); |
443 | mutex_unlock(&nf_log_mutex); | 447 | mutex_unlock(&nf_log_mutex); |
444 | } else { | 448 | } else { |
449 | struct ctl_table tmp = *table; | ||
450 | |||
451 | tmp.data = buf; | ||
445 | mutex_lock(&nf_log_mutex); | 452 | mutex_lock(&nf_log_mutex); |
446 | logger = nft_log_dereference(net->nf.nf_loggers[tindex]); | 453 | logger = nft_log_dereference(net->nf.nf_loggers[tindex]); |
447 | if (!logger) | 454 | if (!logger) |
448 | table->data = "NONE"; | 455 | strlcpy(buf, "NONE", sizeof(buf)); |
449 | else | 456 | else |
450 | table->data = logger->name; | 457 | strlcpy(buf, logger->name, sizeof(buf)); |
451 | r = proc_dostring(table, write, buffer, lenp, ppos); | ||
452 | mutex_unlock(&nf_log_mutex); | 458 | mutex_unlock(&nf_log_mutex); |
459 | r = proc_dostring(&tmp, write, buffer, lenp, ppos); | ||
453 | } | 460 | } |
454 | 461 | ||
455 | return r; | 462 | return r; |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 4ccd2988f9db..ea4ba551abb2 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -1243,6 +1243,9 @@ static int nfqnl_recv_unsupp(struct net *net, struct sock *ctnl, | |||
1243 | static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { | 1243 | static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { |
1244 | [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, | 1244 | [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, |
1245 | [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, | 1245 | [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, |
1246 | [NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 }, | ||
1247 | [NFQA_CFG_MASK] = { .type = NLA_U32 }, | ||
1248 | [NFQA_CFG_FLAGS] = { .type = NLA_U32 }, | ||
1246 | }; | 1249 | }; |
1247 | 1250 | ||
1248 | static const struct nf_queue_handler nfqh = { | 1251 | static const struct nf_queue_handler nfqh = { |
diff --git a/net/rds/connection.c b/net/rds/connection.c index abef75da89a7..cfb05953b0e5 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c | |||
@@ -659,11 +659,19 @@ static void rds_conn_info(struct socket *sock, unsigned int len, | |||
659 | 659 | ||
660 | int rds_conn_init(void) | 660 | int rds_conn_init(void) |
661 | { | 661 | { |
662 | int ret; | ||
663 | |||
664 | ret = rds_loop_net_init(); /* register pernet callback */ | ||
665 | if (ret) | ||
666 | return ret; | ||
667 | |||
662 | rds_conn_slab = kmem_cache_create("rds_connection", | 668 | rds_conn_slab = kmem_cache_create("rds_connection", |
663 | sizeof(struct rds_connection), | 669 | sizeof(struct rds_connection), |
664 | 0, 0, NULL); | 670 | 0, 0, NULL); |
665 | if (!rds_conn_slab) | 671 | if (!rds_conn_slab) { |
672 | rds_loop_net_exit(); | ||
666 | return -ENOMEM; | 673 | return -ENOMEM; |
674 | } | ||
667 | 675 | ||
668 | rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info); | 676 | rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info); |
669 | rds_info_register_func(RDS_INFO_SEND_MESSAGES, | 677 | rds_info_register_func(RDS_INFO_SEND_MESSAGES, |
@@ -676,6 +684,7 @@ int rds_conn_init(void) | |||
676 | 684 | ||
677 | void rds_conn_exit(void) | 685 | void rds_conn_exit(void) |
678 | { | 686 | { |
687 | rds_loop_net_exit(); /* unregister pernet callback */ | ||
679 | rds_loop_exit(); | 688 | rds_loop_exit(); |
680 | 689 | ||
681 | WARN_ON(!hlist_empty(rds_conn_hash)); | 690 | WARN_ON(!hlist_empty(rds_conn_hash)); |
diff --git a/net/rds/loop.c b/net/rds/loop.c index dac6218a460e..feea1f96ee2a 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | #include <linux/in.h> | 35 | #include <linux/in.h> |
36 | #include <net/net_namespace.h> | ||
37 | #include <net/netns/generic.h> | ||
36 | 38 | ||
37 | #include "rds_single_path.h" | 39 | #include "rds_single_path.h" |
38 | #include "rds.h" | 40 | #include "rds.h" |
@@ -40,6 +42,17 @@ | |||
40 | 42 | ||
41 | static DEFINE_SPINLOCK(loop_conns_lock); | 43 | static DEFINE_SPINLOCK(loop_conns_lock); |
42 | static LIST_HEAD(loop_conns); | 44 | static LIST_HEAD(loop_conns); |
45 | static atomic_t rds_loop_unloading = ATOMIC_INIT(0); | ||
46 | |||
47 | static void rds_loop_set_unloading(void) | ||
48 | { | ||
49 | atomic_set(&rds_loop_unloading, 1); | ||
50 | } | ||
51 | |||
52 | static bool rds_loop_is_unloading(struct rds_connection *conn) | ||
53 | { | ||
54 | return atomic_read(&rds_loop_unloading) != 0; | ||
55 | } | ||
43 | 56 | ||
44 | /* | 57 | /* |
45 | * This 'loopback' transport is a special case for flows that originate | 58 | * This 'loopback' transport is a special case for flows that originate |
@@ -165,6 +178,8 @@ void rds_loop_exit(void) | |||
165 | struct rds_loop_connection *lc, *_lc; | 178 | struct rds_loop_connection *lc, *_lc; |
166 | LIST_HEAD(tmp_list); | 179 | LIST_HEAD(tmp_list); |
167 | 180 | ||
181 | rds_loop_set_unloading(); | ||
182 | synchronize_rcu(); | ||
168 | /* avoid calling conn_destroy with irqs off */ | 183 | /* avoid calling conn_destroy with irqs off */ |
169 | spin_lock_irq(&loop_conns_lock); | 184 | spin_lock_irq(&loop_conns_lock); |
170 | list_splice(&loop_conns, &tmp_list); | 185 | list_splice(&loop_conns, &tmp_list); |
@@ -177,6 +192,46 @@ void rds_loop_exit(void) | |||
177 | } | 192 | } |
178 | } | 193 | } |
179 | 194 | ||
195 | static void rds_loop_kill_conns(struct net *net) | ||
196 | { | ||
197 | struct rds_loop_connection *lc, *_lc; | ||
198 | LIST_HEAD(tmp_list); | ||
199 | |||
200 | spin_lock_irq(&loop_conns_lock); | ||
201 | list_for_each_entry_safe(lc, _lc, &loop_conns, loop_node) { | ||
202 | struct net *c_net = read_pnet(&lc->conn->c_net); | ||
203 | |||
204 | if (net != c_net) | ||
205 | continue; | ||
206 | list_move_tail(&lc->loop_node, &tmp_list); | ||
207 | } | ||
208 | spin_unlock_irq(&loop_conns_lock); | ||
209 | |||
210 | list_for_each_entry_safe(lc, _lc, &tmp_list, loop_node) { | ||
211 | WARN_ON(lc->conn->c_passive); | ||
212 | rds_conn_destroy(lc->conn); | ||
213 | } | ||
214 | } | ||
215 | |||
216 | static void __net_exit rds_loop_exit_net(struct net *net) | ||
217 | { | ||
218 | rds_loop_kill_conns(net); | ||
219 | } | ||
220 | |||
221 | static struct pernet_operations rds_loop_net_ops = { | ||
222 | .exit = rds_loop_exit_net, | ||
223 | }; | ||
224 | |||
225 | int rds_loop_net_init(void) | ||
226 | { | ||
227 | return register_pernet_device(&rds_loop_net_ops); | ||
228 | } | ||
229 | |||
230 | void rds_loop_net_exit(void) | ||
231 | { | ||
232 | unregister_pernet_device(&rds_loop_net_ops); | ||
233 | } | ||
234 | |||
180 | /* | 235 | /* |
181 | * This is missing .xmit_* because loop doesn't go through generic | 236 | * This is missing .xmit_* because loop doesn't go through generic |
182 | * rds_send_xmit() and doesn't call rds_recv_incoming(). .listen_stop and | 237 | * rds_send_xmit() and doesn't call rds_recv_incoming(). .listen_stop and |
@@ -194,4 +249,5 @@ struct rds_transport rds_loop_transport = { | |||
194 | .inc_free = rds_loop_inc_free, | 249 | .inc_free = rds_loop_inc_free, |
195 | .t_name = "loopback", | 250 | .t_name = "loopback", |
196 | .t_type = RDS_TRANS_LOOP, | 251 | .t_type = RDS_TRANS_LOOP, |
252 | .t_unloading = rds_loop_is_unloading, | ||
197 | }; | 253 | }; |
diff --git a/net/rds/loop.h b/net/rds/loop.h index 469fa4b2da4f..bbc8cdd030df 100644 --- a/net/rds/loop.h +++ b/net/rds/loop.h | |||
@@ -5,6 +5,8 @@ | |||
5 | /* loop.c */ | 5 | /* loop.c */ |
6 | extern struct rds_transport rds_loop_transport; | 6 | extern struct rds_transport rds_loop_transport; |
7 | 7 | ||
8 | int rds_loop_net_init(void); | ||
9 | void rds_loop_net_exit(void); | ||
8 | void rds_loop_exit(void); | 10 | void rds_loop_exit(void); |
9 | 11 | ||
10 | #endif | 12 | #endif |
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 973b4471b532..e017b6a4452b 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
@@ -45,6 +45,7 @@ static DEFINE_MUTEX(smc_create_lgr_pending); /* serialize link group | |||
45 | */ | 45 | */ |
46 | 46 | ||
47 | static void smc_tcp_listen_work(struct work_struct *); | 47 | static void smc_tcp_listen_work(struct work_struct *); |
48 | static void smc_connect_work(struct work_struct *); | ||
48 | 49 | ||
49 | static void smc_set_keepalive(struct sock *sk, int val) | 50 | static void smc_set_keepalive(struct sock *sk, int val) |
50 | { | 51 | { |
@@ -122,6 +123,12 @@ static int smc_release(struct socket *sock) | |||
122 | goto out; | 123 | goto out; |
123 | 124 | ||
124 | smc = smc_sk(sk); | 125 | smc = smc_sk(sk); |
126 | |||
127 | /* cleanup for a dangling non-blocking connect */ | ||
128 | flush_work(&smc->connect_work); | ||
129 | kfree(smc->connect_info); | ||
130 | smc->connect_info = NULL; | ||
131 | |||
125 | if (sk->sk_state == SMC_LISTEN) | 132 | if (sk->sk_state == SMC_LISTEN) |
126 | /* smc_close_non_accepted() is called and acquires | 133 | /* smc_close_non_accepted() is called and acquires |
127 | * sock lock for child sockets again | 134 | * sock lock for child sockets again |
@@ -186,6 +193,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock, | |||
186 | sk->sk_protocol = protocol; | 193 | sk->sk_protocol = protocol; |
187 | smc = smc_sk(sk); | 194 | smc = smc_sk(sk); |
188 | INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work); | 195 | INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work); |
196 | INIT_WORK(&smc->connect_work, smc_connect_work); | ||
189 | INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work); | 197 | INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work); |
190 | INIT_LIST_HEAD(&smc->accept_q); | 198 | INIT_LIST_HEAD(&smc->accept_q); |
191 | spin_lock_init(&smc->accept_q_lock); | 199 | spin_lock_init(&smc->accept_q_lock); |
@@ -576,6 +584,35 @@ static int __smc_connect(struct smc_sock *smc) | |||
576 | return 0; | 584 | return 0; |
577 | } | 585 | } |
578 | 586 | ||
587 | static void smc_connect_work(struct work_struct *work) | ||
588 | { | ||
589 | struct smc_sock *smc = container_of(work, struct smc_sock, | ||
590 | connect_work); | ||
591 | int rc; | ||
592 | |||
593 | lock_sock(&smc->sk); | ||
594 | rc = kernel_connect(smc->clcsock, &smc->connect_info->addr, | ||
595 | smc->connect_info->alen, smc->connect_info->flags); | ||
596 | if (smc->clcsock->sk->sk_err) { | ||
597 | smc->sk.sk_err = smc->clcsock->sk->sk_err; | ||
598 | goto out; | ||
599 | } | ||
600 | if (rc < 0) { | ||
601 | smc->sk.sk_err = -rc; | ||
602 | goto out; | ||
603 | } | ||
604 | |||
605 | rc = __smc_connect(smc); | ||
606 | if (rc < 0) | ||
607 | smc->sk.sk_err = -rc; | ||
608 | |||
609 | out: | ||
610 | smc->sk.sk_state_change(&smc->sk); | ||
611 | kfree(smc->connect_info); | ||
612 | smc->connect_info = NULL; | ||
613 | release_sock(&smc->sk); | ||
614 | } | ||
615 | |||
579 | static int smc_connect(struct socket *sock, struct sockaddr *addr, | 616 | static int smc_connect(struct socket *sock, struct sockaddr *addr, |
580 | int alen, int flags) | 617 | int alen, int flags) |
581 | { | 618 | { |
@@ -605,15 +642,32 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr, | |||
605 | 642 | ||
606 | smc_copy_sock_settings_to_clc(smc); | 643 | smc_copy_sock_settings_to_clc(smc); |
607 | tcp_sk(smc->clcsock->sk)->syn_smc = 1; | 644 | tcp_sk(smc->clcsock->sk)->syn_smc = 1; |
608 | rc = kernel_connect(smc->clcsock, addr, alen, flags); | 645 | if (flags & O_NONBLOCK) { |
609 | if (rc) | 646 | if (smc->connect_info) { |
610 | goto out; | 647 | rc = -EALREADY; |
648 | goto out; | ||
649 | } | ||
650 | smc->connect_info = kzalloc(alen + 2 * sizeof(int), GFP_KERNEL); | ||
651 | if (!smc->connect_info) { | ||
652 | rc = -ENOMEM; | ||
653 | goto out; | ||
654 | } | ||
655 | smc->connect_info->alen = alen; | ||
656 | smc->connect_info->flags = flags ^ O_NONBLOCK; | ||
657 | memcpy(&smc->connect_info->addr, addr, alen); | ||
658 | schedule_work(&smc->connect_work); | ||
659 | rc = -EINPROGRESS; | ||
660 | } else { | ||
661 | rc = kernel_connect(smc->clcsock, addr, alen, flags); | ||
662 | if (rc) | ||
663 | goto out; | ||
611 | 664 | ||
612 | rc = __smc_connect(smc); | 665 | rc = __smc_connect(smc); |
613 | if (rc < 0) | 666 | if (rc < 0) |
614 | goto out; | 667 | goto out; |
615 | else | 668 | else |
616 | rc = 0; /* success cases including fallback */ | 669 | rc = 0; /* success cases including fallback */ |
670 | } | ||
617 | 671 | ||
618 | out: | 672 | out: |
619 | release_sock(sk); | 673 | release_sock(sk); |
@@ -1279,40 +1333,18 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, | |||
1279 | struct sock *sk = sock->sk; | 1333 | struct sock *sk = sock->sk; |
1280 | __poll_t mask = 0; | 1334 | __poll_t mask = 0; |
1281 | struct smc_sock *smc; | 1335 | struct smc_sock *smc; |
1282 | int rc; | ||
1283 | 1336 | ||
1284 | if (!sk) | 1337 | if (!sk) |
1285 | return EPOLLNVAL; | 1338 | return EPOLLNVAL; |
1286 | 1339 | ||
1287 | smc = smc_sk(sock->sk); | 1340 | smc = smc_sk(sock->sk); |
1288 | sock_hold(sk); | ||
1289 | lock_sock(sk); | ||
1290 | if ((sk->sk_state == SMC_INIT) || smc->use_fallback) { | 1341 | if ((sk->sk_state == SMC_INIT) || smc->use_fallback) { |
1291 | /* delegate to CLC child sock */ | 1342 | /* delegate to CLC child sock */ |
1292 | release_sock(sk); | ||
1293 | mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); | 1343 | mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); |
1294 | lock_sock(sk); | ||
1295 | sk->sk_err = smc->clcsock->sk->sk_err; | 1344 | sk->sk_err = smc->clcsock->sk->sk_err; |
1296 | if (sk->sk_err) { | 1345 | if (sk->sk_err) |
1297 | mask |= EPOLLERR; | 1346 | mask |= EPOLLERR; |
1298 | } else { | ||
1299 | /* if non-blocking connect finished ... */ | ||
1300 | if (sk->sk_state == SMC_INIT && | ||
1301 | mask & EPOLLOUT && | ||
1302 | smc->clcsock->sk->sk_state != TCP_CLOSE) { | ||
1303 | rc = __smc_connect(smc); | ||
1304 | if (rc < 0) | ||
1305 | mask |= EPOLLERR; | ||
1306 | /* success cases including fallback */ | ||
1307 | mask |= EPOLLOUT | EPOLLWRNORM; | ||
1308 | } | ||
1309 | } | ||
1310 | } else { | 1347 | } else { |
1311 | if (sk->sk_state != SMC_CLOSED) { | ||
1312 | release_sock(sk); | ||
1313 | sock_poll_wait(file, sk_sleep(sk), wait); | ||
1314 | lock_sock(sk); | ||
1315 | } | ||
1316 | if (sk->sk_err) | 1348 | if (sk->sk_err) |
1317 | mask |= EPOLLERR; | 1349 | mask |= EPOLLERR; |
1318 | if ((sk->sk_shutdown == SHUTDOWN_MASK) || | 1350 | if ((sk->sk_shutdown == SHUTDOWN_MASK) || |
@@ -1340,8 +1372,6 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, | |||
1340 | mask |= EPOLLPRI; | 1372 | mask |= EPOLLPRI; |
1341 | 1373 | ||
1342 | } | 1374 | } |
1343 | release_sock(sk); | ||
1344 | sock_put(sk); | ||
1345 | 1375 | ||
1346 | return mask; | 1376 | return mask; |
1347 | } | 1377 | } |
diff --git a/net/smc/smc.h b/net/smc/smc.h index 51ae1f10d81a..d7ca26570482 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h | |||
@@ -187,11 +187,19 @@ struct smc_connection { | |||
187 | struct work_struct close_work; /* peer sent some closing */ | 187 | struct work_struct close_work; /* peer sent some closing */ |
188 | }; | 188 | }; |
189 | 189 | ||
190 | struct smc_connect_info { | ||
191 | int flags; | ||
192 | int alen; | ||
193 | struct sockaddr addr; | ||
194 | }; | ||
195 | |||
190 | struct smc_sock { /* smc sock container */ | 196 | struct smc_sock { /* smc sock container */ |
191 | struct sock sk; | 197 | struct sock sk; |
192 | struct socket *clcsock; /* internal tcp socket */ | 198 | struct socket *clcsock; /* internal tcp socket */ |
193 | struct smc_connection conn; /* smc connection */ | 199 | struct smc_connection conn; /* smc connection */ |
194 | struct smc_sock *listen_smc; /* listen parent */ | 200 | struct smc_sock *listen_smc; /* listen parent */ |
201 | struct smc_connect_info *connect_info; /* connect address & flags */ | ||
202 | struct work_struct connect_work; /* handle non-blocking connect*/ | ||
195 | struct work_struct tcp_listen_work;/* handle tcp socket accepts */ | 203 | struct work_struct tcp_listen_work;/* handle tcp socket accepts */ |
196 | struct work_struct smc_listen_work;/* prepare new accept socket */ | 204 | struct work_struct smc_listen_work;/* prepare new accept socket */ |
197 | struct list_head accept_q; /* sockets to be accepted */ | 205 | struct list_head accept_q; /* sockets to be accepted */ |
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index 373836615c57..625acb27efcc 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c | |||
@@ -35,7 +35,6 @@ struct _strp_msg { | |||
35 | */ | 35 | */ |
36 | struct strp_msg strp; | 36 | struct strp_msg strp; |
37 | int accum_len; | 37 | int accum_len; |
38 | int early_eaten; | ||
39 | }; | 38 | }; |
40 | 39 | ||
41 | static inline struct _strp_msg *_strp_msg(struct sk_buff *skb) | 40 | static inline struct _strp_msg *_strp_msg(struct sk_buff *skb) |
@@ -115,20 +114,6 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, | |||
115 | head = strp->skb_head; | 114 | head = strp->skb_head; |
116 | if (head) { | 115 | if (head) { |
117 | /* Message already in progress */ | 116 | /* Message already in progress */ |
118 | |||
119 | stm = _strp_msg(head); | ||
120 | if (unlikely(stm->early_eaten)) { | ||
121 | /* Already some number of bytes on the receive sock | ||
122 | * data saved in skb_head, just indicate they | ||
123 | * are consumed. | ||
124 | */ | ||
125 | eaten = orig_len <= stm->early_eaten ? | ||
126 | orig_len : stm->early_eaten; | ||
127 | stm->early_eaten -= eaten; | ||
128 | |||
129 | return eaten; | ||
130 | } | ||
131 | |||
132 | if (unlikely(orig_offset)) { | 117 | if (unlikely(orig_offset)) { |
133 | /* Getting data with a non-zero offset when a message is | 118 | /* Getting data with a non-zero offset when a message is |
134 | * in progress is not expected. If it does happen, we | 119 | * in progress is not expected. If it does happen, we |
@@ -297,9 +282,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, | |||
297 | } | 282 | } |
298 | 283 | ||
299 | stm->accum_len += cand_len; | 284 | stm->accum_len += cand_len; |
285 | eaten += cand_len; | ||
300 | strp->need_bytes = stm->strp.full_len - | 286 | strp->need_bytes = stm->strp.full_len - |
301 | stm->accum_len; | 287 | stm->accum_len; |
302 | stm->early_eaten = cand_len; | ||
303 | STRP_STATS_ADD(strp->stats.bytes, cand_len); | 288 | STRP_STATS_ADD(strp->stats.bytes, cand_len); |
304 | desc->count = 0; /* Stop reading socket */ | 289 | desc->count = 0; /* Stop reading socket */ |
305 | break; | 290 | break; |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c7bbe5f0aae8..4eece06be1e7 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -6231,7 +6231,7 @@ do { \ | |||
6231 | nl80211_check_s32); | 6231 | nl80211_check_s32); |
6232 | /* | 6232 | /* |
6233 | * Check HT operation mode based on | 6233 | * Check HT operation mode based on |
6234 | * IEEE 802.11 2012 8.4.2.59 HT Operation element. | 6234 | * IEEE 802.11-2016 9.4.2.57 HT Operation element. |
6235 | */ | 6235 | */ |
6236 | if (tb[NL80211_MESHCONF_HT_OPMODE]) { | 6236 | if (tb[NL80211_MESHCONF_HT_OPMODE]) { |
6237 | ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]); | 6237 | ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]); |
@@ -6241,22 +6241,9 @@ do { \ | |||
6241 | IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) | 6241 | IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) |
6242 | return -EINVAL; | 6242 | return -EINVAL; |
6243 | 6243 | ||
6244 | if ((ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) && | 6244 | /* NON_HT_STA bit is reserved, but some programs set it */ |
6245 | (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) | 6245 | ht_opmode &= ~IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT; |
6246 | return -EINVAL; | ||
6247 | 6246 | ||
6248 | switch (ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION) { | ||
6249 | case IEEE80211_HT_OP_MODE_PROTECTION_NONE: | ||
6250 | case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: | ||
6251 | if (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT) | ||
6252 | return -EINVAL; | ||
6253 | break; | ||
6254 | case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: | ||
6255 | case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: | ||
6256 | if (!(ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) | ||
6257 | return -EINVAL; | ||
6258 | break; | ||
6259 | } | ||
6260 | cfg->ht_opmode = ht_opmode; | 6247 | cfg->ht_opmode = ht_opmode; |
6261 | mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1)); | 6248 | mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1)); |
6262 | } | 6249 | } |
@@ -10962,9 +10949,12 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) | |||
10962 | rem) { | 10949 | rem) { |
10963 | u8 *mask_pat; | 10950 | u8 *mask_pat; |
10964 | 10951 | ||
10965 | nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, | 10952 | err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, |
10966 | nl80211_packet_pattern_policy, | 10953 | nl80211_packet_pattern_policy, |
10967 | info->extack); | 10954 | info->extack); |
10955 | if (err) | ||
10956 | goto error; | ||
10957 | |||
10968 | err = -EINVAL; | 10958 | err = -EINVAL; |
10969 | if (!pat_tb[NL80211_PKTPAT_MASK] || | 10959 | if (!pat_tb[NL80211_PKTPAT_MASK] || |
10970 | !pat_tb[NL80211_PKTPAT_PATTERN]) | 10960 | !pat_tb[NL80211_PKTPAT_PATTERN]) |
@@ -11213,8 +11203,11 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev, | |||
11213 | rem) { | 11203 | rem) { |
11214 | u8 *mask_pat; | 11204 | u8 *mask_pat; |
11215 | 11205 | ||
11216 | nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, | 11206 | err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, |
11217 | nl80211_packet_pattern_policy, NULL); | 11207 | nl80211_packet_pattern_policy, NULL); |
11208 | if (err) | ||
11209 | return err; | ||
11210 | |||
11218 | if (!pat_tb[NL80211_PKTPAT_MASK] || | 11211 | if (!pat_tb[NL80211_PKTPAT_MASK] || |
11219 | !pat_tb[NL80211_PKTPAT_PATTERN]) | 11212 | !pat_tb[NL80211_PKTPAT_PATTERN]) |
11220 | return -EINVAL; | 11213 | return -EINVAL; |
diff --git a/samples/bpf/xdp_fwd_kern.c b/samples/bpf/xdp_fwd_kern.c index 6673cdb9f55c..a7e94e7ff87d 100644 --- a/samples/bpf/xdp_fwd_kern.c +++ b/samples/bpf/xdp_fwd_kern.c | |||
@@ -48,9 +48,9 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags) | |||
48 | struct ethhdr *eth = data; | 48 | struct ethhdr *eth = data; |
49 | struct ipv6hdr *ip6h; | 49 | struct ipv6hdr *ip6h; |
50 | struct iphdr *iph; | 50 | struct iphdr *iph; |
51 | int out_index; | ||
52 | u16 h_proto; | 51 | u16 h_proto; |
53 | u64 nh_off; | 52 | u64 nh_off; |
53 | int rc; | ||
54 | 54 | ||
55 | nh_off = sizeof(*eth); | 55 | nh_off = sizeof(*eth); |
56 | if (data + nh_off > data_end) | 56 | if (data + nh_off > data_end) |
@@ -101,7 +101,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags) | |||
101 | 101 | ||
102 | fib_params.ifindex = ctx->ingress_ifindex; | 102 | fib_params.ifindex = ctx->ingress_ifindex; |
103 | 103 | ||
104 | out_index = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags); | 104 | rc = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags); |
105 | 105 | ||
106 | /* verify egress index has xdp support | 106 | /* verify egress index has xdp support |
107 | * TO-DO bpf_map_lookup_elem(&tx_port, &key) fails with | 107 | * TO-DO bpf_map_lookup_elem(&tx_port, &key) fails with |
@@ -109,7 +109,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags) | |||
109 | * NOTE: without verification that egress index supports XDP | 109 | * NOTE: without verification that egress index supports XDP |
110 | * forwarding packets are dropped. | 110 | * forwarding packets are dropped. |
111 | */ | 111 | */ |
112 | if (out_index > 0) { | 112 | if (rc == 0) { |
113 | if (h_proto == htons(ETH_P_IP)) | 113 | if (h_proto == htons(ETH_P_IP)) |
114 | ip_decrease_ttl(iph); | 114 | ip_decrease_ttl(iph); |
115 | else if (h_proto == htons(ETH_P_IPV6)) | 115 | else if (h_proto == htons(ETH_P_IPV6)) |
@@ -117,7 +117,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags) | |||
117 | 117 | ||
118 | memcpy(eth->h_dest, fib_params.dmac, ETH_ALEN); | 118 | memcpy(eth->h_dest, fib_params.dmac, ETH_ALEN); |
119 | memcpy(eth->h_source, fib_params.smac, ETH_ALEN); | 119 | memcpy(eth->h_source, fib_params.smac, ETH_ALEN); |
120 | return bpf_redirect_map(&tx_port, out_index, 0); | 120 | return bpf_redirect_map(&tx_port, fib_params.ifindex, 0); |
121 | } | 121 | } |
122 | 122 | ||
123 | return XDP_PASS; | 123 | return XDP_PASS; |
diff --git a/scripts/cc-can-link.sh b/scripts/cc-can-link.sh index 208eb2825dab..6efcead31989 100755 --- a/scripts/cc-can-link.sh +++ b/scripts/cc-can-link.sh | |||
@@ -1,7 +1,7 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/sh |
2 | # SPDX-License-Identifier: GPL-2.0 | 2 | # SPDX-License-Identifier: GPL-2.0 |
3 | 3 | ||
4 | cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1 && echo "y" | 4 | cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1 |
5 | #include <stdio.h> | 5 | #include <stdio.h> |
6 | int main(void) | 6 | int main(void) |
7 | { | 7 | { |
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 05f42a46d6ed..959aa53ab678 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c | |||
@@ -694,15 +694,19 @@ static int do_load(int argc, char **argv) | |||
694 | return -1; | 694 | return -1; |
695 | } | 695 | } |
696 | 696 | ||
697 | if (do_pin_fd(prog_fd, argv[1])) { | 697 | if (do_pin_fd(prog_fd, argv[1])) |
698 | p_err("failed to pin program"); | 698 | goto err_close_obj; |
699 | return -1; | ||
700 | } | ||
701 | 699 | ||
702 | if (json_output) | 700 | if (json_output) |
703 | jsonw_null(json_wtr); | 701 | jsonw_null(json_wtr); |
704 | 702 | ||
703 | bpf_object__close(obj); | ||
704 | |||
705 | return 0; | 705 | return 0; |
706 | |||
707 | err_close_obj: | ||
708 | bpf_object__close(obj); | ||
709 | return -1; | ||
706 | } | 710 | } |
707 | 711 | ||
708 | static int do_help(int argc, char **argv) | 712 | static int do_help(int argc, char **argv) |
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index 7eb613ffef55..b4994a94968b 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config | |||
@@ -6,6 +6,7 @@ CONFIG_TEST_BPF=m | |||
6 | CONFIG_CGROUP_BPF=y | 6 | CONFIG_CGROUP_BPF=y |
7 | CONFIG_NETDEVSIM=m | 7 | CONFIG_NETDEVSIM=m |
8 | CONFIG_NET_CLS_ACT=y | 8 | CONFIG_NET_CLS_ACT=y |
9 | CONFIG_NET_SCHED=y | ||
9 | CONFIG_NET_SCH_INGRESS=y | 10 | CONFIG_NET_SCH_INGRESS=y |
10 | CONFIG_NET_IPIP=y | 11 | CONFIG_NET_IPIP=y |
11 | CONFIG_IPV6=y | 12 | CONFIG_IPV6=y |
diff --git a/tools/testing/selftests/bpf/test_kmod.sh b/tools/testing/selftests/bpf/test_kmod.sh index 35669ccd4d23..9df0d2ac45f8 100755 --- a/tools/testing/selftests/bpf/test_kmod.sh +++ b/tools/testing/selftests/bpf/test_kmod.sh | |||
@@ -1,6 +1,15 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/sh |
2 | # SPDX-License-Identifier: GPL-2.0 | 2 | # SPDX-License-Identifier: GPL-2.0 |
3 | 3 | ||
4 | # Kselftest framework requirement - SKIP code is 4. | ||
5 | ksft_skip=4 | ||
6 | |||
7 | msg="skip all tests:" | ||
8 | if [ "$(id -u)" != "0" ]; then | ||
9 | echo $msg please run this as root >&2 | ||
10 | exit $ksft_skip | ||
11 | fi | ||
12 | |||
4 | SRC_TREE=../../../../ | 13 | SRC_TREE=../../../../ |
5 | 14 | ||
6 | test_run() | 15 | test_run() |
diff --git a/tools/testing/selftests/bpf/test_lirc_mode2.sh b/tools/testing/selftests/bpf/test_lirc_mode2.sh index ce2e15e4f976..677686198df3 100755 --- a/tools/testing/selftests/bpf/test_lirc_mode2.sh +++ b/tools/testing/selftests/bpf/test_lirc_mode2.sh | |||
@@ -1,6 +1,15 @@ | |||
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | # SPDX-License-Identifier: GPL-2.0 | 2 | # SPDX-License-Identifier: GPL-2.0 |
3 | 3 | ||
4 | # Kselftest framework requirement - SKIP code is 4. | ||
5 | ksft_skip=4 | ||
6 | |||
7 | msg="skip all tests:" | ||
8 | if [ $UID != 0 ]; then | ||
9 | echo $msg please run this as root >&2 | ||
10 | exit $ksft_skip | ||
11 | fi | ||
12 | |||
4 | GREEN='\033[0;92m' | 13 | GREEN='\033[0;92m' |
5 | RED='\033[0;31m' | 14 | RED='\033[0;31m' |
6 | NC='\033[0m' # No Color | 15 | NC='\033[0m' # No Color |
diff --git a/tools/testing/selftests/bpf/test_lwt_seg6local.sh b/tools/testing/selftests/bpf/test_lwt_seg6local.sh index 1c77994b5e71..270fa8f49573 100755 --- a/tools/testing/selftests/bpf/test_lwt_seg6local.sh +++ b/tools/testing/selftests/bpf/test_lwt_seg6local.sh | |||
@@ -21,6 +21,15 @@ | |||
21 | # An UDP datagram is sent from fb00::1 to fb00::6. The test succeeds if this | 21 | # An UDP datagram is sent from fb00::1 to fb00::6. The test succeeds if this |
22 | # datagram can be read on NS6 when binding to fb00::6. | 22 | # datagram can be read on NS6 when binding to fb00::6. |
23 | 23 | ||
24 | # Kselftest framework requirement - SKIP code is 4. | ||
25 | ksft_skip=4 | ||
26 | |||
27 | msg="skip all tests:" | ||
28 | if [ $UID != 0 ]; then | ||
29 | echo $msg please run this as root >&2 | ||
30 | exit $ksft_skip | ||
31 | fi | ||
32 | |||
24 | TMP_FILE="/tmp/selftest_lwt_seg6local.txt" | 33 | TMP_FILE="/tmp/selftest_lwt_seg6local.txt" |
25 | 34 | ||
26 | cleanup() | 35 | cleanup() |
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index 05c8cb71724a..9e78df207919 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c | |||
@@ -1413,18 +1413,12 @@ out: | |||
1413 | 1413 | ||
1414 | int main(int argc, char **argv) | 1414 | int main(int argc, char **argv) |
1415 | { | 1415 | { |
1416 | struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY}; | ||
1417 | int iov_count = 1, length = 1024, rate = 1; | 1416 | int iov_count = 1, length = 1024, rate = 1; |
1418 | struct sockmap_options options = {0}; | 1417 | struct sockmap_options options = {0}; |
1419 | int opt, longindex, err, cg_fd = 0; | 1418 | int opt, longindex, err, cg_fd = 0; |
1420 | char *bpf_file = BPF_SOCKMAP_FILENAME; | 1419 | char *bpf_file = BPF_SOCKMAP_FILENAME; |
1421 | int test = PING_PONG; | 1420 | int test = PING_PONG; |
1422 | 1421 | ||
1423 | if (setrlimit(RLIMIT_MEMLOCK, &r)) { | ||
1424 | perror("setrlimit(RLIMIT_MEMLOCK)"); | ||
1425 | return 1; | ||
1426 | } | ||
1427 | |||
1428 | if (argc < 2) | 1422 | if (argc < 2) |
1429 | return test_suite(); | 1423 | return test_suite(); |
1430 | 1424 | ||
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 78245d60d8bc..78245d60d8bc 100644..100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh | |||