diff options
127 files changed, 1802 insertions, 544 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 8c68de3cfd80..825dc2b7453d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2848,6 +2848,9 @@ F: include/uapi/linux/if_bonding.h | |||
2848 | BPF (Safe dynamic programs and tools) | 2848 | BPF (Safe dynamic programs and tools) |
2849 | M: Alexei Starovoitov <ast@kernel.org> | 2849 | M: Alexei Starovoitov <ast@kernel.org> |
2850 | M: Daniel Borkmann <daniel@iogearbox.net> | 2850 | M: Daniel Borkmann <daniel@iogearbox.net> |
2851 | R: Martin KaFai Lau <kafai@fb.com> | ||
2852 | R: Song Liu <songliubraving@fb.com> | ||
2853 | R: Yonghong Song <yhs@fb.com> | ||
2851 | L: netdev@vger.kernel.org | 2854 | L: netdev@vger.kernel.org |
2852 | L: linux-kernel@vger.kernel.org | 2855 | L: linux-kernel@vger.kernel.org |
2853 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git | 2856 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git |
@@ -2873,6 +2876,8 @@ F: samples/bpf/ | |||
2873 | F: tools/bpf/ | 2876 | F: tools/bpf/ |
2874 | F: tools/lib/bpf/ | 2877 | F: tools/lib/bpf/ |
2875 | F: tools/testing/selftests/bpf/ | 2878 | F: tools/testing/selftests/bpf/ |
2879 | K: bpf | ||
2880 | N: bpf | ||
2876 | 2881 | ||
2877 | BPF JIT for ARM | 2882 | BPF JIT for ARM |
2878 | M: Shubham Bansal <illusionist.neo@gmail.com> | 2883 | M: Shubham Bansal <illusionist.neo@gmail.com> |
@@ -12868,6 +12873,13 @@ F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt | |||
12868 | F: drivers/net/dsa/realtek-smi* | 12873 | F: drivers/net/dsa/realtek-smi* |
12869 | F: drivers/net/dsa/rtl83* | 12874 | F: drivers/net/dsa/rtl83* |
12870 | 12875 | ||
12876 | REDPINE WIRELESS DRIVER | ||
12877 | M: Amitkumar Karwar <amitkarwar@gmail.com> | ||
12878 | M: Siva Rebbagondla <siva8118@gmail.com> | ||
12879 | L: linux-wireless@vger.kernel.org | ||
12880 | S: Maintained | ||
12881 | F: drivers/net/wireless/rsi/ | ||
12882 | |||
12871 | REGISTER MAP ABSTRACTION | 12883 | REGISTER MAP ABSTRACTION |
12872 | M: Mark Brown <broonie@kernel.org> | 12884 | M: Mark Brown <broonie@kernel.org> |
12873 | L: linux-kernel@vger.kernel.org | 12885 | L: linux-kernel@vger.kernel.org |
@@ -13696,6 +13708,15 @@ L: netdev@vger.kernel.org | |||
13696 | S: Supported | 13708 | S: Supported |
13697 | F: drivers/net/ethernet/sfc/ | 13709 | F: drivers/net/ethernet/sfc/ |
13698 | 13710 | ||
13711 | SFF/SFP/SFP+ MODULE SUPPORT | ||
13712 | M: Russell King <linux@armlinux.org.uk> | ||
13713 | L: netdev@vger.kernel.org | ||
13714 | S: Maintained | ||
13715 | F: drivers/net/phy/phylink.c | ||
13716 | F: drivers/net/phy/sfp* | ||
13717 | F: include/linux/phylink.h | ||
13718 | F: include/linux/sfp.h | ||
13719 | |||
13699 | SGI GRU DRIVER | 13720 | SGI GRU DRIVER |
13700 | M: Dimitri Sivanich <sivanich@sgi.com> | 13721 | M: Dimitri Sivanich <sivanich@sgi.com> |
13701 | S: Maintained | 13722 | S: Maintained |
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c index 211ed6cffd10..578978711887 100644 --- a/drivers/isdn/mISDN/timerdev.c +++ b/drivers/isdn/mISDN/timerdev.c | |||
@@ -170,8 +170,8 @@ dev_expire_timer(struct timer_list *t) | |||
170 | spin_lock_irqsave(&timer->dev->lock, flags); | 170 | spin_lock_irqsave(&timer->dev->lock, flags); |
171 | if (timer->id >= 0) | 171 | if (timer->id >= 0) |
172 | list_move_tail(&timer->list, &timer->dev->expired); | 172 | list_move_tail(&timer->list, &timer->dev->expired); |
173 | spin_unlock_irqrestore(&timer->dev->lock, flags); | ||
174 | wake_up_interruptible(&timer->dev->wait); | 173 | wake_up_interruptible(&timer->dev->wait); |
174 | spin_unlock_irqrestore(&timer->dev->lock, flags); | ||
175 | } | 175 | } |
176 | 176 | ||
177 | static int | 177 | static int |
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index 90f514252987..d9c56a779c08 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c | |||
@@ -511,9 +511,6 @@ static void b53_srab_prepare_irq(struct platform_device *pdev) | |||
511 | /* Clear all pending interrupts */ | 511 | /* Clear all pending interrupts */ |
512 | writel(0xffffffff, priv->regs + B53_SRAB_INTR); | 512 | writel(0xffffffff, priv->regs + B53_SRAB_INTR); |
513 | 513 | ||
514 | if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID) | ||
515 | return; | ||
516 | |||
517 | for (i = 0; i < B53_N_PORTS; i++) { | 514 | for (i = 0; i < B53_N_PORTS; i++) { |
518 | port = &priv->port_intrs[i]; | 515 | port = &priv->port_intrs[i]; |
519 | 516 | ||
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c index 5200e4bdce93..ea243840ee0f 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_atu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c | |||
@@ -314,6 +314,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) | |||
314 | { | 314 | { |
315 | struct mv88e6xxx_chip *chip = dev_id; | 315 | struct mv88e6xxx_chip *chip = dev_id; |
316 | struct mv88e6xxx_atu_entry entry; | 316 | struct mv88e6xxx_atu_entry entry; |
317 | int spid; | ||
317 | int err; | 318 | int err; |
318 | u16 val; | 319 | u16 val; |
319 | 320 | ||
@@ -336,6 +337,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) | |||
336 | if (err) | 337 | if (err) |
337 | goto out; | 338 | goto out; |
338 | 339 | ||
340 | spid = entry.state; | ||
341 | |||
339 | if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) { | 342 | if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) { |
340 | dev_err_ratelimited(chip->dev, | 343 | dev_err_ratelimited(chip->dev, |
341 | "ATU age out violation for %pM\n", | 344 | "ATU age out violation for %pM\n", |
@@ -344,23 +347,23 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) | |||
344 | 347 | ||
345 | if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { | 348 | if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { |
346 | dev_err_ratelimited(chip->dev, | 349 | dev_err_ratelimited(chip->dev, |
347 | "ATU member violation for %pM portvec %x\n", | 350 | "ATU member violation for %pM portvec %x spid %d\n", |
348 | entry.mac, entry.portvec); | 351 | entry.mac, entry.portvec, spid); |
349 | chip->ports[entry.portvec].atu_member_violation++; | 352 | chip->ports[spid].atu_member_violation++; |
350 | } | 353 | } |
351 | 354 | ||
352 | if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) { | 355 | if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) { |
353 | dev_err_ratelimited(chip->dev, | 356 | dev_err_ratelimited(chip->dev, |
354 | "ATU miss violation for %pM portvec %x\n", | 357 | "ATU miss violation for %pM portvec %x spid %d\n", |
355 | entry.mac, entry.portvec); | 358 | entry.mac, entry.portvec, spid); |
356 | chip->ports[entry.portvec].atu_miss_violation++; | 359 | chip->ports[spid].atu_miss_violation++; |
357 | } | 360 | } |
358 | 361 | ||
359 | if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) { | 362 | if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) { |
360 | dev_err_ratelimited(chip->dev, | 363 | dev_err_ratelimited(chip->dev, |
361 | "ATU full violation for %pM portvec %x\n", | 364 | "ATU full violation for %pM portvec %x spid %d\n", |
362 | entry.mac, entry.portvec); | 365 | entry.mac, entry.portvec, spid); |
363 | chip->ports[entry.portvec].atu_full_violation++; | 366 | chip->ports[spid].atu_full_violation++; |
364 | } | 367 | } |
365 | mutex_unlock(&chip->reg_lock); | 368 | mutex_unlock(&chip->reg_lock); |
366 | 369 | ||
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index f9521d0274b7..28c9b0bdf2f6 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
@@ -520,7 +520,6 @@ static void bcm_sysport_get_wol(struct net_device *dev, | |||
520 | struct ethtool_wolinfo *wol) | 520 | struct ethtool_wolinfo *wol) |
521 | { | 521 | { |
522 | struct bcm_sysport_priv *priv = netdev_priv(dev); | 522 | struct bcm_sysport_priv *priv = netdev_priv(dev); |
523 | u32 reg; | ||
524 | 523 | ||
525 | wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; | 524 | wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; |
526 | wol->wolopts = priv->wolopts; | 525 | wol->wolopts = priv->wolopts; |
@@ -528,11 +527,7 @@ static void bcm_sysport_get_wol(struct net_device *dev, | |||
528 | if (!(priv->wolopts & WAKE_MAGICSECURE)) | 527 | if (!(priv->wolopts & WAKE_MAGICSECURE)) |
529 | return; | 528 | return; |
530 | 529 | ||
531 | /* Return the programmed SecureOn password */ | 530 | memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); |
532 | reg = umac_readl(priv, UMAC_PSW_MS); | ||
533 | put_unaligned_be16(reg, &wol->sopass[0]); | ||
534 | reg = umac_readl(priv, UMAC_PSW_LS); | ||
535 | put_unaligned_be32(reg, &wol->sopass[2]); | ||
536 | } | 531 | } |
537 | 532 | ||
538 | static int bcm_sysport_set_wol(struct net_device *dev, | 533 | static int bcm_sysport_set_wol(struct net_device *dev, |
@@ -548,13 +543,8 @@ static int bcm_sysport_set_wol(struct net_device *dev, | |||
548 | if (wol->wolopts & ~supported) | 543 | if (wol->wolopts & ~supported) |
549 | return -EINVAL; | 544 | return -EINVAL; |
550 | 545 | ||
551 | /* Program the SecureOn password */ | 546 | if (wol->wolopts & WAKE_MAGICSECURE) |
552 | if (wol->wolopts & WAKE_MAGICSECURE) { | 547 | memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); |
553 | umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), | ||
554 | UMAC_PSW_MS); | ||
555 | umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), | ||
556 | UMAC_PSW_LS); | ||
557 | } | ||
558 | 548 | ||
559 | /* Flag the device and relevant IRQ as wakeup capable */ | 549 | /* Flag the device and relevant IRQ as wakeup capable */ |
560 | if (wol->wolopts) { | 550 | if (wol->wolopts) { |
@@ -2649,13 +2639,18 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) | |||
2649 | unsigned int index, i = 0; | 2639 | unsigned int index, i = 0; |
2650 | u32 reg; | 2640 | u32 reg; |
2651 | 2641 | ||
2652 | /* Password has already been programmed */ | ||
2653 | reg = umac_readl(priv, UMAC_MPD_CTRL); | 2642 | reg = umac_readl(priv, UMAC_MPD_CTRL); |
2654 | if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) | 2643 | if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) |
2655 | reg |= MPD_EN; | 2644 | reg |= MPD_EN; |
2656 | reg &= ~PSW_EN; | 2645 | reg &= ~PSW_EN; |
2657 | if (priv->wolopts & WAKE_MAGICSECURE) | 2646 | if (priv->wolopts & WAKE_MAGICSECURE) { |
2647 | /* Program the SecureOn password */ | ||
2648 | umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), | ||
2649 | UMAC_PSW_MS); | ||
2650 | umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), | ||
2651 | UMAC_PSW_LS); | ||
2658 | reg |= PSW_EN; | 2652 | reg |= PSW_EN; |
2653 | } | ||
2659 | umac_writel(priv, reg, UMAC_MPD_CTRL); | 2654 | umac_writel(priv, reg, UMAC_MPD_CTRL); |
2660 | 2655 | ||
2661 | if (priv->wolopts & WAKE_FILTER) { | 2656 | if (priv->wolopts & WAKE_FILTER) { |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 0887e6356649..0b192fea9c5d 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #define __BCM_SYSPORT_H | 12 | #define __BCM_SYSPORT_H |
13 | 13 | ||
14 | #include <linux/bitmap.h> | 14 | #include <linux/bitmap.h> |
15 | #include <linux/ethtool.h> | ||
15 | #include <linux/if_vlan.h> | 16 | #include <linux/if_vlan.h> |
16 | #include <linux/net_dim.h> | 17 | #include <linux/net_dim.h> |
17 | 18 | ||
@@ -778,6 +779,7 @@ struct bcm_sysport_priv { | |||
778 | unsigned int crc_fwd:1; | 779 | unsigned int crc_fwd:1; |
779 | u16 rev; | 780 | u16 rev; |
780 | u32 wolopts; | 781 | u32 wolopts; |
782 | u8 sopass[SOPASS_MAX]; | ||
781 | unsigned int wol_irq_disabled:1; | 783 | unsigned int wol_irq_disabled:1; |
782 | 784 | ||
783 | /* MIB related fields */ | 785 | /* MIB related fields */ |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6a512871176b..8bc7e495b027 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -4973,12 +4973,18 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) | |||
4973 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; | 4973 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
4974 | struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; | 4974 | struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; |
4975 | u32 map_idx = ring->map_idx; | 4975 | u32 map_idx = ring->map_idx; |
4976 | unsigned int vector; | ||
4976 | 4977 | ||
4978 | vector = bp->irq_tbl[map_idx].vector; | ||
4979 | disable_irq_nosync(vector); | ||
4977 | rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); | 4980 | rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); |
4978 | if (rc) | 4981 | if (rc) { |
4982 | enable_irq(vector); | ||
4979 | goto err_out; | 4983 | goto err_out; |
4984 | } | ||
4980 | bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); | 4985 | bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); |
4981 | bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); | 4986 | bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); |
4987 | enable_irq(vector); | ||
4982 | bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; | 4988 | bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; |
4983 | 4989 | ||
4984 | if (!i) { | 4990 | if (!i) { |
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 5db9f4158e62..134ae2862efa 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c | |||
@@ -1288,7 +1288,7 @@ static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, | |||
1288 | * for transmits, we just free buffers. | 1288 | * for transmits, we just free buffers. |
1289 | */ | 1289 | */ |
1290 | 1290 | ||
1291 | dev_kfree_skb_irq(sb); | 1291 | dev_consume_skb_irq(sb); |
1292 | 1292 | ||
1293 | /* | 1293 | /* |
1294 | * .. and advance to the next buffer. | 1294 | * .. and advance to the next buffer. |
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 5f03199a3acf..05f4a3b21e29 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig | |||
@@ -54,7 +54,6 @@ config CAVIUM_PTP | |||
54 | tristate "Cavium PTP coprocessor as PTP clock" | 54 | tristate "Cavium PTP coprocessor as PTP clock" |
55 | depends on 64BIT && PCI | 55 | depends on 64BIT && PCI |
56 | imply PTP_1588_CLOCK | 56 | imply PTP_1588_CLOCK |
57 | default y | ||
58 | ---help--- | 57 | ---help--- |
59 | This driver adds support for the Precision Time Protocol Clocks and | 58 | This driver adds support for the Precision Time Protocol Clocks and |
60 | Timestamping coprocessor (PTP) found on Cavium processors. | 59 | Timestamping coprocessor (PTP) found on Cavium processors. |
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 60641e202534..9a7f70db20c7 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c | |||
@@ -1434,7 +1434,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, | |||
1434 | * csum is correct or is zero. | 1434 | * csum is correct or is zero. |
1435 | */ | 1435 | */ |
1436 | if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && | 1436 | if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && |
1437 | tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) { | 1437 | tcp_udp_csum_ok && outer_csum_ok && |
1438 | (ipv4_csum_ok || ipv6)) { | ||
1438 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1439 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1439 | skb->csum_level = encap; | 1440 | skb->csum_level = encap; |
1440 | } | 1441 | } |
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index 13430f75496c..f1a2da15dd0a 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c | |||
@@ -585,7 +585,7 @@ static void de_tx (struct de_private *de) | |||
585 | netif_dbg(de, tx_done, de->dev, | 585 | netif_dbg(de, tx_done, de->dev, |
586 | "tx done, slot %d\n", tx_tail); | 586 | "tx done, slot %d\n", tx_tail); |
587 | } | 587 | } |
588 | dev_kfree_skb_irq(skb); | 588 | dev_consume_skb_irq(skb); |
589 | } | 589 | } |
590 | 590 | ||
591 | next: | 591 | next: |
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index b90bab72efdb..c1968b3ecec8 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c | |||
@@ -369,7 +369,7 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id) | |||
369 | dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len, | 369 | dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len, |
370 | DMA_TO_DEVICE); | 370 | DMA_TO_DEVICE); |
371 | 371 | ||
372 | dev_kfree_skb_irq(skb); | 372 | dev_consume_skb_irq(skb); |
373 | } | 373 | } |
374 | spin_unlock(&priv->lock); | 374 | spin_unlock(&priv->lock); |
375 | 375 | ||
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index c3d539e209ed..eb3e65e8868f 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c | |||
@@ -1879,6 +1879,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth) | |||
1879 | u16 i, j; | 1879 | u16 i, j; |
1880 | u8 __iomem *bd; | 1880 | u8 __iomem *bd; |
1881 | 1881 | ||
1882 | netdev_reset_queue(ugeth->ndev); | ||
1883 | |||
1882 | ug_info = ugeth->ug_info; | 1884 | ug_info = ugeth->ug_info; |
1883 | uf_info = &ug_info->uf_info; | 1885 | uf_info = &ug_info->uf_info; |
1884 | 1886 | ||
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 04fd1f135011..654ac534b10e 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c | |||
@@ -152,8 +152,10 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, | |||
152 | memset(p, 0, regs->len); | 152 | memset(p, 0, regs->len); |
153 | memcpy_fromio(p, io, B3_RAM_ADDR); | 153 | memcpy_fromio(p, io, B3_RAM_ADDR); |
154 | 154 | ||
155 | memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, | 155 | if (regs->len > B3_RI_WTO_R1) { |
156 | regs->len - B3_RI_WTO_R1); | 156 | memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, |
157 | regs->len - B3_RI_WTO_R1); | ||
158 | } | ||
157 | } | 159 | } |
158 | 160 | ||
159 | /* Wake on Lan only supported on Yukon chips with rev 1 or above */ | 161 | /* Wake on Lan only supported on Yukon chips with rev 1 or above */ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 046948ead152..f3c7ab6faea5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c | |||
@@ -256,6 +256,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, | |||
256 | e->m_neigh.family = n->ops->family; | 256 | e->m_neigh.family = n->ops->family; |
257 | memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); | 257 | memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); |
258 | e->out_dev = out_dev; | 258 | e->out_dev = out_dev; |
259 | e->route_dev = route_dev; | ||
259 | 260 | ||
260 | /* It's important to add the neigh to the hash table before checking | 261 | /* It's important to add the neigh to the hash table before checking |
261 | * the neigh validity state. So if we'll get a notification, in case the | 262 | * the neigh validity state. So if we'll get a notification, in case the |
@@ -369,6 +370,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, | |||
369 | e->m_neigh.family = n->ops->family; | 370 | e->m_neigh.family = n->ops->family; |
370 | memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); | 371 | memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); |
371 | e->out_dev = out_dev; | 372 | e->out_dev = out_dev; |
373 | e->route_dev = route_dev; | ||
372 | 374 | ||
373 | /* It's importent to add the neigh to the hash table before checking | 375 | /* It's importent to add the neigh to the hash table before checking |
374 | * the neigh validity state. So if we'll get a notification, in case the | 376 | * the neigh validity state. So if we'll get a notification, in case the |
@@ -612,16 +614,18 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, | |||
612 | struct mlx5_flow_spec *spec, | 614 | struct mlx5_flow_spec *spec, |
613 | struct tc_cls_flower_offload *f, | 615 | struct tc_cls_flower_offload *f, |
614 | void *headers_c, | 616 | void *headers_c, |
615 | void *headers_v) | 617 | void *headers_v, u8 *match_level) |
616 | { | 618 | { |
617 | int tunnel_type; | 619 | int tunnel_type; |
618 | int err = 0; | 620 | int err = 0; |
619 | 621 | ||
620 | tunnel_type = mlx5e_tc_tun_get_type(filter_dev); | 622 | tunnel_type = mlx5e_tc_tun_get_type(filter_dev); |
621 | if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { | 623 | if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { |
624 | *match_level = MLX5_MATCH_L4; | ||
622 | err = mlx5e_tc_tun_parse_vxlan(priv, spec, f, | 625 | err = mlx5e_tc_tun_parse_vxlan(priv, spec, f, |
623 | headers_c, headers_v); | 626 | headers_c, headers_v); |
624 | } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) { | 627 | } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) { |
628 | *match_level = MLX5_MATCH_L3; | ||
625 | err = mlx5e_tc_tun_parse_gretap(priv, spec, f, | 629 | err = mlx5e_tc_tun_parse_gretap(priv, spec, f, |
626 | headers_c, headers_v); | 630 | headers_c, headers_v); |
627 | } else { | 631 | } else { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h index 706ce7bf15e7..b63f15de899d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h | |||
@@ -39,6 +39,6 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, | |||
39 | struct mlx5_flow_spec *spec, | 39 | struct mlx5_flow_spec *spec, |
40 | struct tc_cls_flower_offload *f, | 40 | struct tc_cls_flower_offload *f, |
41 | void *headers_c, | 41 | void *headers_c, |
42 | void *headers_v); | 42 | void *headers_v, u8 *match_level); |
43 | 43 | ||
44 | #endif //__MLX5_EN_TC_TUNNEL_H__ | 44 | #endif //__MLX5_EN_TC_TUNNEL_H__ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index f2573c2d2b5c..ef9e472daffb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
@@ -596,6 +596,10 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv, | |||
596 | if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { | 596 | if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { |
597 | ether_addr_copy(e->h_dest, ha); | 597 | ether_addr_copy(e->h_dest, ha); |
598 | ether_addr_copy(eth->h_dest, ha); | 598 | ether_addr_copy(eth->h_dest, ha); |
599 | /* Update the encap source mac, in case that we delete | ||
600 | * the flows when encap source mac changed. | ||
601 | */ | ||
602 | ether_addr_copy(eth->h_source, e->route_dev->dev_addr); | ||
599 | 603 | ||
600 | mlx5e_tc_encap_flows_add(priv, e); | 604 | mlx5e_tc_encap_flows_add(priv, e); |
601 | } | 605 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index edd722824697..36eafc877e6b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h | |||
@@ -148,6 +148,7 @@ struct mlx5e_encap_entry { | |||
148 | unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ | 148 | unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ |
149 | 149 | ||
150 | struct net_device *out_dev; | 150 | struct net_device *out_dev; |
151 | struct net_device *route_dev; | ||
151 | int tunnel_type; | 152 | int tunnel_type; |
152 | int tunnel_hlen; | 153 | int tunnel_hlen; |
153 | int reformat_type; | 154 | int reformat_type; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index cae6c6d48984..b5c1b039375a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -128,6 +128,7 @@ struct mlx5e_tc_flow_parse_attr { | |||
128 | struct net_device *filter_dev; | 128 | struct net_device *filter_dev; |
129 | struct mlx5_flow_spec spec; | 129 | struct mlx5_flow_spec spec; |
130 | int num_mod_hdr_actions; | 130 | int num_mod_hdr_actions; |
131 | int max_mod_hdr_actions; | ||
131 | void *mod_hdr_actions; | 132 | void *mod_hdr_actions; |
132 | int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; | 133 | int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; |
133 | }; | 134 | }; |
@@ -1302,7 +1303,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, | |||
1302 | static int parse_tunnel_attr(struct mlx5e_priv *priv, | 1303 | static int parse_tunnel_attr(struct mlx5e_priv *priv, |
1303 | struct mlx5_flow_spec *spec, | 1304 | struct mlx5_flow_spec *spec, |
1304 | struct tc_cls_flower_offload *f, | 1305 | struct tc_cls_flower_offload *f, |
1305 | struct net_device *filter_dev) | 1306 | struct net_device *filter_dev, u8 *match_level) |
1306 | { | 1307 | { |
1307 | struct netlink_ext_ack *extack = f->common.extack; | 1308 | struct netlink_ext_ack *extack = f->common.extack; |
1308 | void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, | 1309 | void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, |
@@ -1317,7 +1318,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, | |||
1317 | int err = 0; | 1318 | int err = 0; |
1318 | 1319 | ||
1319 | err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, | 1320 | err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, |
1320 | headers_c, headers_v); | 1321 | headers_c, headers_v, match_level); |
1321 | if (err) { | 1322 | if (err) { |
1322 | NL_SET_ERR_MSG_MOD(extack, | 1323 | NL_SET_ERR_MSG_MOD(extack, |
1323 | "failed to parse tunnel attributes"); | 1324 | "failed to parse tunnel attributes"); |
@@ -1426,7 +1427,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, | |||
1426 | struct mlx5_flow_spec *spec, | 1427 | struct mlx5_flow_spec *spec, |
1427 | struct tc_cls_flower_offload *f, | 1428 | struct tc_cls_flower_offload *f, |
1428 | struct net_device *filter_dev, | 1429 | struct net_device *filter_dev, |
1429 | u8 *match_level) | 1430 | u8 *match_level, u8 *tunnel_match_level) |
1430 | { | 1431 | { |
1431 | struct netlink_ext_ack *extack = f->common.extack; | 1432 | struct netlink_ext_ack *extack = f->common.extack; |
1432 | void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, | 1433 | void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, |
@@ -1477,7 +1478,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, | |||
1477 | switch (key->addr_type) { | 1478 | switch (key->addr_type) { |
1478 | case FLOW_DISSECTOR_KEY_IPV4_ADDRS: | 1479 | case FLOW_DISSECTOR_KEY_IPV4_ADDRS: |
1479 | case FLOW_DISSECTOR_KEY_IPV6_ADDRS: | 1480 | case FLOW_DISSECTOR_KEY_IPV6_ADDRS: |
1480 | if (parse_tunnel_attr(priv, spec, f, filter_dev)) | 1481 | if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level)) |
1481 | return -EOPNOTSUPP; | 1482 | return -EOPNOTSUPP; |
1482 | break; | 1483 | break; |
1483 | default: | 1484 | default: |
@@ -1826,11 +1827,11 @@ static int parse_cls_flower(struct mlx5e_priv *priv, | |||
1826 | struct mlx5_core_dev *dev = priv->mdev; | 1827 | struct mlx5_core_dev *dev = priv->mdev; |
1827 | struct mlx5_eswitch *esw = dev->priv.eswitch; | 1828 | struct mlx5_eswitch *esw = dev->priv.eswitch; |
1828 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | 1829 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1830 | u8 match_level, tunnel_match_level = MLX5_MATCH_NONE; | ||
1829 | struct mlx5_eswitch_rep *rep; | 1831 | struct mlx5_eswitch_rep *rep; |
1830 | u8 match_level; | ||
1831 | int err; | 1832 | int err; |
1832 | 1833 | ||
1833 | err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level); | 1834 | err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level); |
1834 | 1835 | ||
1835 | if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { | 1836 | if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { |
1836 | rep = rpriv->rep; | 1837 | rep = rpriv->rep; |
@@ -1846,10 +1847,12 @@ static int parse_cls_flower(struct mlx5e_priv *priv, | |||
1846 | } | 1847 | } |
1847 | } | 1848 | } |
1848 | 1849 | ||
1849 | if (flow->flags & MLX5E_TC_FLOW_ESWITCH) | 1850 | if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { |
1850 | flow->esw_attr->match_level = match_level; | 1851 | flow->esw_attr->match_level = match_level; |
1851 | else | 1852 | flow->esw_attr->tunnel_match_level = tunnel_match_level; |
1853 | } else { | ||
1852 | flow->nic_attr->match_level = match_level; | 1854 | flow->nic_attr->match_level = match_level; |
1855 | } | ||
1853 | 1856 | ||
1854 | return err; | 1857 | return err; |
1855 | } | 1858 | } |
@@ -1934,9 +1937,9 @@ static struct mlx5_fields fields[] = { | |||
1934 | OFFLOAD(UDP_DPORT, 2, udp.dest, 0), | 1937 | OFFLOAD(UDP_DPORT, 2, udp.dest, 0), |
1935 | }; | 1938 | }; |
1936 | 1939 | ||
1937 | /* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at | 1940 | /* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at |
1938 | * max from the SW pedit action. On success, it says how many HW actions were | 1941 | * max from the SW pedit action. On success, attr->num_mod_hdr_actions |
1939 | * actually parsed. | 1942 | * says how many HW actions were actually parsed. |
1940 | */ | 1943 | */ |
1941 | static int offload_pedit_fields(struct pedit_headers *masks, | 1944 | static int offload_pedit_fields(struct pedit_headers *masks, |
1942 | struct pedit_headers *vals, | 1945 | struct pedit_headers *vals, |
@@ -1960,9 +1963,11 @@ static int offload_pedit_fields(struct pedit_headers *masks, | |||
1960 | add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD]; | 1963 | add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD]; |
1961 | 1964 | ||
1962 | action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); | 1965 | action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); |
1963 | action = parse_attr->mod_hdr_actions; | 1966 | action = parse_attr->mod_hdr_actions + |
1964 | max_actions = parse_attr->num_mod_hdr_actions; | 1967 | parse_attr->num_mod_hdr_actions * action_size; |
1965 | nactions = 0; | 1968 | |
1969 | max_actions = parse_attr->max_mod_hdr_actions; | ||
1970 | nactions = parse_attr->num_mod_hdr_actions; | ||
1966 | 1971 | ||
1967 | for (i = 0; i < ARRAY_SIZE(fields); i++) { | 1972 | for (i = 0; i < ARRAY_SIZE(fields); i++) { |
1968 | f = &fields[i]; | 1973 | f = &fields[i]; |
@@ -2073,7 +2078,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv, | |||
2073 | if (!parse_attr->mod_hdr_actions) | 2078 | if (!parse_attr->mod_hdr_actions) |
2074 | return -ENOMEM; | 2079 | return -ENOMEM; |
2075 | 2080 | ||
2076 | parse_attr->num_mod_hdr_actions = max_actions; | 2081 | parse_attr->max_mod_hdr_actions = max_actions; |
2077 | return 0; | 2082 | return 0; |
2078 | } | 2083 | } |
2079 | 2084 | ||
@@ -2119,9 +2124,11 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv, | |||
2119 | goto out_err; | 2124 | goto out_err; |
2120 | } | 2125 | } |
2121 | 2126 | ||
2122 | err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr); | 2127 | if (!parse_attr->mod_hdr_actions) { |
2123 | if (err) | 2128 | err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr); |
2124 | goto out_err; | 2129 | if (err) |
2130 | goto out_err; | ||
2131 | } | ||
2125 | 2132 | ||
2126 | err = offload_pedit_fields(masks, vals, parse_attr, extack); | 2133 | err = offload_pedit_fields(masks, vals, parse_attr, extack); |
2127 | if (err < 0) | 2134 | if (err < 0) |
@@ -2179,6 +2186,7 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, | |||
2179 | 2186 | ||
2180 | static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | 2187 | static bool modify_header_match_supported(struct mlx5_flow_spec *spec, |
2181 | struct tcf_exts *exts, | 2188 | struct tcf_exts *exts, |
2189 | u32 actions, | ||
2182 | struct netlink_ext_ack *extack) | 2190 | struct netlink_ext_ack *extack) |
2183 | { | 2191 | { |
2184 | const struct tc_action *a; | 2192 | const struct tc_action *a; |
@@ -2188,7 +2196,11 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | |||
2188 | u16 ethertype; | 2196 | u16 ethertype; |
2189 | int nkeys, i; | 2197 | int nkeys, i; |
2190 | 2198 | ||
2191 | headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); | 2199 | if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) |
2200 | headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers); | ||
2201 | else | ||
2202 | headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); | ||
2203 | |||
2192 | ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); | 2204 | ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); |
2193 | 2205 | ||
2194 | /* for non-IP we only re-write MACs, so we're okay */ | 2206 | /* for non-IP we only re-write MACs, so we're okay */ |
@@ -2245,7 +2257,7 @@ static bool actions_match_supported(struct mlx5e_priv *priv, | |||
2245 | 2257 | ||
2246 | if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) | 2258 | if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) |
2247 | return modify_header_match_supported(&parse_attr->spec, exts, | 2259 | return modify_header_match_supported(&parse_attr->spec, exts, |
2248 | extack); | 2260 | actions, extack); |
2249 | 2261 | ||
2250 | return true; | 2262 | return true; |
2251 | } | 2263 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 598ad7e4d5c9..0e55cd1f2e98 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | |||
@@ -387,8 +387,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |||
387 | num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); | 387 | num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); |
388 | contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); | 388 | contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); |
389 | if (unlikely(contig_wqebbs_room < num_wqebbs)) { | 389 | if (unlikely(contig_wqebbs_room < num_wqebbs)) { |
390 | #ifdef CONFIG_MLX5_EN_IPSEC | ||
391 | struct mlx5_wqe_eth_seg cur_eth = wqe->eth; | ||
392 | #endif | ||
390 | mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); | 393 | mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); |
391 | mlx5e_sq_fetch_wqe(sq, &wqe, &pi); | 394 | mlx5e_sq_fetch_wqe(sq, &wqe, &pi); |
395 | #ifdef CONFIG_MLX5_EN_IPSEC | ||
396 | wqe->eth = cur_eth; | ||
397 | #endif | ||
392 | } | 398 | } |
393 | 399 | ||
394 | /* fill wqe */ | 400 | /* fill wqe */ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 9c89eea9b2c3..748ff178a1d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | |||
@@ -312,6 +312,7 @@ struct mlx5_esw_flow_attr { | |||
312 | } dests[MLX5_MAX_FLOW_FWD_VPORTS]; | 312 | } dests[MLX5_MAX_FLOW_FWD_VPORTS]; |
313 | u32 mod_hdr_id; | 313 | u32 mod_hdr_id; |
314 | u8 match_level; | 314 | u8 match_level; |
315 | u8 tunnel_match_level; | ||
315 | struct mlx5_fc *counter; | 316 | struct mlx5_fc *counter; |
316 | u32 chain; | 317 | u32 chain; |
317 | u16 prio; | 318 | u16 prio; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 53065b6ae593..d4e6fe5b9300 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
@@ -160,14 +160,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, | |||
160 | MLX5_SET_TO_ONES(fte_match_set_misc, misc, | 160 | MLX5_SET_TO_ONES(fte_match_set_misc, misc, |
161 | source_eswitch_owner_vhca_id); | 161 | source_eswitch_owner_vhca_id); |
162 | 162 | ||
163 | if (attr->match_level == MLX5_MATCH_NONE) | 163 | spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; |
164 | spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; | 164 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { |
165 | else | 165 | if (attr->tunnel_match_level != MLX5_MATCH_NONE) |
166 | spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | | 166 | spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; |
167 | MLX5_MATCH_MISC_PARAMETERS; | 167 | if (attr->match_level != MLX5_MATCH_NONE) |
168 | 168 | spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; | |
169 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) | 169 | } else if (attr->match_level != MLX5_MATCH_NONE) { |
170 | spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; | 170 | spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; |
171 | } | ||
171 | 172 | ||
172 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) | 173 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) |
173 | flow_act.modify_id = attr->mod_hdr_id; | 174 | flow_act.modify_id = attr->mod_hdr_id; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 24a90163775e..2d8a77cc156b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h | |||
@@ -53,7 +53,7 @@ | |||
53 | extern const struct qed_common_ops qed_common_ops_pass; | 53 | extern const struct qed_common_ops qed_common_ops_pass; |
54 | 54 | ||
55 | #define QED_MAJOR_VERSION 8 | 55 | #define QED_MAJOR_VERSION 8 |
56 | #define QED_MINOR_VERSION 33 | 56 | #define QED_MINOR_VERSION 37 |
57 | #define QED_REVISION_VERSION 0 | 57 | #define QED_REVISION_VERSION 0 |
58 | #define QED_ENGINEERING_VERSION 20 | 58 | #define QED_ENGINEERING_VERSION 20 |
59 | 59 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index e68ca83ae915..58be1c4c6668 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c | |||
@@ -2216,7 +2216,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, | |||
2216 | u16 num_queues = 0; | 2216 | u16 num_queues = 0; |
2217 | 2217 | ||
2218 | /* Since the feature controls only queue-zones, | 2218 | /* Since the feature controls only queue-zones, |
2219 | * make sure we have the contexts [rx, tx, xdp] to | 2219 | * make sure we have the contexts [rx, xdp, tcs] to |
2220 | * match. | 2220 | * match. |
2221 | */ | 2221 | */ |
2222 | for_each_hwfn(cdev, i) { | 2222 | for_each_hwfn(cdev, i) { |
@@ -2226,7 +2226,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, | |||
2226 | u16 cids; | 2226 | u16 cids; |
2227 | 2227 | ||
2228 | cids = hwfn->pf_params.eth_pf_params.num_cons; | 2228 | cids = hwfn->pf_params.eth_pf_params.num_cons; |
2229 | num_queues += min_t(u16, l2_queues, cids / 3); | 2229 | cids /= (2 + info->num_tc); |
2230 | num_queues += min_t(u16, l2_queues, cids); | ||
2230 | } | 2231 | } |
2231 | 2232 | ||
2232 | /* queues might theoretically be >256, but interrupts' | 2233 | /* queues might theoretically be >256, but interrupts' |
@@ -2870,7 +2871,8 @@ static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle) | |||
2870 | p_hwfn = p_cid->p_owner; | 2871 | p_hwfn = p_cid->p_owner; |
2871 | rc = qed_get_queue_coalesce(p_hwfn, coal, handle); | 2872 | rc = qed_get_queue_coalesce(p_hwfn, coal, handle); |
2872 | if (rc) | 2873 | if (rc) |
2873 | DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); | 2874 | DP_VERBOSE(cdev, QED_MSG_DEBUG, |
2875 | "Unable to read queue coalescing\n"); | ||
2874 | 2876 | ||
2875 | return rc; | 2877 | return rc; |
2876 | } | 2878 | } |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 4179c9013fc6..96ab77ae6af5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h | |||
@@ -382,6 +382,7 @@ void qed_consq_setup(struct qed_hwfn *p_hwfn); | |||
382 | * @param p_hwfn | 382 | * @param p_hwfn |
383 | */ | 383 | */ |
384 | void qed_consq_free(struct qed_hwfn *p_hwfn); | 384 | void qed_consq_free(struct qed_hwfn *p_hwfn); |
385 | int qed_spq_pend_post(struct qed_hwfn *p_hwfn); | ||
385 | 386 | ||
386 | /** | 387 | /** |
387 | * @file | 388 | * @file |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 888274fa208b..5a495fda9e9d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | |||
@@ -604,6 +604,9 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn) | |||
604 | 604 | ||
605 | p_ent->ramrod.pf_update.update_mf_vlan_flag = true; | 605 | p_ent->ramrod.pf_update.update_mf_vlan_flag = true; |
606 | p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan); | 606 | p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan); |
607 | if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) | ||
608 | p_ent->ramrod.pf_update.mf_vlan |= | ||
609 | cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); | ||
607 | 610 | ||
608 | return qed_spq_post(p_hwfn, p_ent, NULL); | 611 | return qed_spq_post(p_hwfn, p_ent, NULL); |
609 | } | 612 | } |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index eb88bbc6b193..ba64ff9bedbd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c | |||
@@ -397,6 +397,11 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) | |||
397 | 397 | ||
398 | qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); | 398 | qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); |
399 | 399 | ||
400 | /* Attempt to post pending requests */ | ||
401 | spin_lock_bh(&p_hwfn->p_spq->lock); | ||
402 | rc = qed_spq_pend_post(p_hwfn); | ||
403 | spin_unlock_bh(&p_hwfn->p_spq->lock); | ||
404 | |||
400 | return rc; | 405 | return rc; |
401 | } | 406 | } |
402 | 407 | ||
@@ -767,7 +772,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn, | |||
767 | return 0; | 772 | return 0; |
768 | } | 773 | } |
769 | 774 | ||
770 | static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) | 775 | int qed_spq_pend_post(struct qed_hwfn *p_hwfn) |
771 | { | 776 | { |
772 | struct qed_spq *p_spq = p_hwfn->p_spq; | 777 | struct qed_spq *p_spq = p_hwfn->p_spq; |
773 | struct qed_spq_entry *p_ent = NULL; | 778 | struct qed_spq_entry *p_ent = NULL; |
@@ -905,7 +910,6 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, | |||
905 | struct qed_spq_entry *p_ent = NULL; | 910 | struct qed_spq_entry *p_ent = NULL; |
906 | struct qed_spq_entry *tmp; | 911 | struct qed_spq_entry *tmp; |
907 | struct qed_spq_entry *found = NULL; | 912 | struct qed_spq_entry *found = NULL; |
908 | int rc; | ||
909 | 913 | ||
910 | if (!p_hwfn) | 914 | if (!p_hwfn) |
911 | return -EINVAL; | 915 | return -EINVAL; |
@@ -963,12 +967,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, | |||
963 | */ | 967 | */ |
964 | qed_spq_return_entry(p_hwfn, found); | 968 | qed_spq_return_entry(p_hwfn, found); |
965 | 969 | ||
966 | /* Attempt to post pending requests */ | 970 | return 0; |
967 | spin_lock_bh(&p_spq->lock); | ||
968 | rc = qed_spq_pend_post(p_hwfn); | ||
969 | spin_unlock_bh(&p_spq->lock); | ||
970 | |||
971 | return rc; | ||
972 | } | 971 | } |
973 | 972 | ||
974 | int qed_consq_alloc(struct qed_hwfn *p_hwfn) | 973 | int qed_consq_alloc(struct qed_hwfn *p_hwfn) |
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 613249d1e967..730997b13747 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h | |||
@@ -56,7 +56,7 @@ | |||
56 | #include <net/tc_act/tc_gact.h> | 56 | #include <net/tc_act/tc_gact.h> |
57 | 57 | ||
58 | #define QEDE_MAJOR_VERSION 8 | 58 | #define QEDE_MAJOR_VERSION 8 |
59 | #define QEDE_MINOR_VERSION 33 | 59 | #define QEDE_MINOR_VERSION 37 |
60 | #define QEDE_REVISION_VERSION 0 | 60 | #define QEDE_REVISION_VERSION 0 |
61 | #define QEDE_ENGINEERING_VERSION 20 | 61 | #define QEDE_ENGINEERING_VERSION 20 |
62 | #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ | 62 | #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ |
@@ -494,6 +494,9 @@ struct qede_reload_args { | |||
494 | 494 | ||
495 | /* Datapath functions definition */ | 495 | /* Datapath functions definition */ |
496 | netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); | 496 | netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); |
497 | u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, | ||
498 | struct net_device *sb_dev, | ||
499 | select_queue_fallback_t fallback); | ||
497 | netdev_features_t qede_features_check(struct sk_buff *skb, | 500 | netdev_features_t qede_features_check(struct sk_buff *skb, |
498 | struct net_device *dev, | 501 | struct net_device *dev, |
499 | netdev_features_t features); | 502 | netdev_features_t features); |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index bdf816fe5a16..31b046e24565 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c | |||
@@ -1695,6 +1695,19 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1695 | return NETDEV_TX_OK; | 1695 | return NETDEV_TX_OK; |
1696 | } | 1696 | } |
1697 | 1697 | ||
1698 | u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, | ||
1699 | struct net_device *sb_dev, | ||
1700 | select_queue_fallback_t fallback) | ||
1701 | { | ||
1702 | struct qede_dev *edev = netdev_priv(dev); | ||
1703 | int total_txq; | ||
1704 | |||
1705 | total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc; | ||
1706 | |||
1707 | return QEDE_TSS_COUNT(edev) ? | ||
1708 | fallback(dev, skb, NULL) % total_txq : 0; | ||
1709 | } | ||
1710 | |||
1698 | /* 8B udp header + 8B base tunnel header + 32B option length */ | 1711 | /* 8B udp header + 8B base tunnel header + 32B option length */ |
1699 | #define QEDE_MAX_TUN_HDR_LEN 48 | 1712 | #define QEDE_MAX_TUN_HDR_LEN 48 |
1700 | 1713 | ||
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 5a74fcbdbc2b..9790f26d17c4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c | |||
@@ -631,6 +631,7 @@ static const struct net_device_ops qede_netdev_ops = { | |||
631 | .ndo_open = qede_open, | 631 | .ndo_open = qede_open, |
632 | .ndo_stop = qede_close, | 632 | .ndo_stop = qede_close, |
633 | .ndo_start_xmit = qede_start_xmit, | 633 | .ndo_start_xmit = qede_start_xmit, |
634 | .ndo_select_queue = qede_select_queue, | ||
634 | .ndo_set_rx_mode = qede_set_rx_mode, | 635 | .ndo_set_rx_mode = qede_set_rx_mode, |
635 | .ndo_set_mac_address = qede_set_mac_addr, | 636 | .ndo_set_mac_address = qede_set_mac_addr, |
636 | .ndo_validate_addr = eth_validate_addr, | 637 | .ndo_validate_addr = eth_validate_addr, |
@@ -666,6 +667,7 @@ static const struct net_device_ops qede_netdev_vf_ops = { | |||
666 | .ndo_open = qede_open, | 667 | .ndo_open = qede_open, |
667 | .ndo_stop = qede_close, | 668 | .ndo_stop = qede_close, |
668 | .ndo_start_xmit = qede_start_xmit, | 669 | .ndo_start_xmit = qede_start_xmit, |
670 | .ndo_select_queue = qede_select_queue, | ||
669 | .ndo_set_rx_mode = qede_set_rx_mode, | 671 | .ndo_set_rx_mode = qede_set_rx_mode, |
670 | .ndo_set_mac_address = qede_set_mac_addr, | 672 | .ndo_set_mac_address = qede_set_mac_addr, |
671 | .ndo_validate_addr = eth_validate_addr, | 673 | .ndo_validate_addr = eth_validate_addr, |
@@ -684,6 +686,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = { | |||
684 | .ndo_open = qede_open, | 686 | .ndo_open = qede_open, |
685 | .ndo_stop = qede_close, | 687 | .ndo_stop = qede_close, |
686 | .ndo_start_xmit = qede_start_xmit, | 688 | .ndo_start_xmit = qede_start_xmit, |
689 | .ndo_select_queue = qede_select_queue, | ||
687 | .ndo_set_rx_mode = qede_set_rx_mode, | 690 | .ndo_set_rx_mode = qede_set_rx_mode, |
688 | .ndo_set_mac_address = qede_set_mac_addr, | 691 | .ndo_set_mac_address = qede_set_mac_addr, |
689 | .ndo_validate_addr = eth_validate_addr, | 692 | .ndo_validate_addr = eth_validate_addr, |
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 15c62c160953..be47d864f8b9 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c | |||
@@ -1037,7 +1037,7 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep) | |||
1037 | skb = ep->tx_skbuff[entry]; | 1037 | skb = ep->tx_skbuff[entry]; |
1038 | pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, | 1038 | pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, |
1039 | skb->len, PCI_DMA_TODEVICE); | 1039 | skb->len, PCI_DMA_TODEVICE); |
1040 | dev_kfree_skb_irq(skb); | 1040 | dev_consume_skb_irq(skb); |
1041 | ep->tx_skbuff[entry] = NULL; | 1041 | ep->tx_skbuff[entry] = NULL; |
1042 | } | 1042 | } |
1043 | 1043 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index d1f61c25d82b..5d85742a2be0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | |||
@@ -721,8 +721,11 @@ static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) | |||
721 | { | 721 | { |
722 | unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); | 722 | unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); |
723 | 723 | ||
724 | if (!clk) | 724 | if (!clk) { |
725 | return 0; | 725 | clk = priv->plat->clk_ref_rate; |
726 | if (!clk) | ||
727 | return 0; | ||
728 | } | ||
726 | 729 | ||
727 | return (usec * (clk / 1000000)) / 256; | 730 | return (usec * (clk / 1000000)) / 256; |
728 | } | 731 | } |
@@ -731,8 +734,11 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv) | |||
731 | { | 734 | { |
732 | unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); | 735 | unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); |
733 | 736 | ||
734 | if (!clk) | 737 | if (!clk) { |
735 | return 0; | 738 | clk = priv->plat->clk_ref_rate; |
739 | if (!clk) | ||
740 | return 0; | ||
741 | } | ||
736 | 742 | ||
737 | return (riwt * 256) / (clk / 1000000); | 743 | return (riwt * 256) / (clk / 1000000); |
738 | } | 744 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 5afba69981cf..685d20472358 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -3023,10 +3023,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3023 | 3023 | ||
3024 | tx_q = &priv->tx_queue[queue]; | 3024 | tx_q = &priv->tx_queue[queue]; |
3025 | 3025 | ||
3026 | if (priv->tx_path_in_lpi_mode) | ||
3027 | stmmac_disable_eee_mode(priv); | ||
3028 | |||
3026 | /* Manage oversized TCP frames for GMAC4 device */ | 3029 | /* Manage oversized TCP frames for GMAC4 device */ |
3027 | if (skb_is_gso(skb) && priv->tso) { | 3030 | if (skb_is_gso(skb) && priv->tso) { |
3028 | if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) | 3031 | if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { |
3032 | /* | ||
3033 | * There is no way to determine the number of TSO | ||
3034 | * capable Queues. Let's use always the Queue 0 | ||
3035 | * because if TSO is supported then at least this | ||
3036 | * one will be capable. | ||
3037 | */ | ||
3038 | skb_set_queue_mapping(skb, 0); | ||
3039 | |||
3029 | return stmmac_tso_xmit(skb, dev); | 3040 | return stmmac_tso_xmit(skb, dev); |
3041 | } | ||
3030 | } | 3042 | } |
3031 | 3043 | ||
3032 | if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { | 3044 | if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { |
@@ -3041,9 +3053,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3041 | return NETDEV_TX_BUSY; | 3053 | return NETDEV_TX_BUSY; |
3042 | } | 3054 | } |
3043 | 3055 | ||
3044 | if (priv->tx_path_in_lpi_mode) | ||
3045 | stmmac_disable_eee_mode(priv); | ||
3046 | |||
3047 | entry = tx_q->cur_tx; | 3056 | entry = tx_q->cur_tx; |
3048 | first_entry = entry; | 3057 | first_entry = entry; |
3049 | WARN_ON(tx_q->tx_skbuff[first_entry]); | 3058 | WARN_ON(tx_q->tx_skbuff[first_entry]); |
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 7ec4eb74fe21..6fc05c106afc 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c | |||
@@ -1898,7 +1898,7 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) | |||
1898 | cp->net_stats[ring].tx_packets++; | 1898 | cp->net_stats[ring].tx_packets++; |
1899 | cp->net_stats[ring].tx_bytes += skb->len; | 1899 | cp->net_stats[ring].tx_bytes += skb->len; |
1900 | spin_unlock(&cp->stat_lock[ring]); | 1900 | spin_unlock(&cp->stat_lock[ring]); |
1901 | dev_kfree_skb_irq(skb); | 1901 | dev_consume_skb_irq(skb); |
1902 | } | 1902 | } |
1903 | cp->tx_old[ring] = entry; | 1903 | cp->tx_old[ring] = entry; |
1904 | 1904 | ||
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index 720b7ac77f3b..e9b757b03b56 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c | |||
@@ -781,7 +781,7 @@ static void bigmac_tx(struct bigmac *bp) | |||
781 | 781 | ||
782 | DTX(("skb(%p) ", skb)); | 782 | DTX(("skb(%p) ", skb)); |
783 | bp->tx_skbs[elem] = NULL; | 783 | bp->tx_skbs[elem] = NULL; |
784 | dev_kfree_skb_irq(skb); | 784 | dev_consume_skb_irq(skb); |
785 | 785 | ||
786 | elem = NEXT_TX(elem); | 786 | elem = NEXT_TX(elem); |
787 | } | 787 | } |
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index ff641cf30a4e..d007dfeba5c3 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c | |||
@@ -1962,7 +1962,7 @@ static void happy_meal_tx(struct happy_meal *hp) | |||
1962 | this = &txbase[elem]; | 1962 | this = &txbase[elem]; |
1963 | } | 1963 | } |
1964 | 1964 | ||
1965 | dev_kfree_skb_irq(skb); | 1965 | dev_consume_skb_irq(skb); |
1966 | dev->stats.tx_packets++; | 1966 | dev->stats.tx_packets++; |
1967 | } | 1967 | } |
1968 | hp->tx_old = elem; | 1968 | hp->tx_old = elem; |
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index dc966ddb6d81..b24c11187017 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c | |||
@@ -1739,7 +1739,7 @@ static void bdx_tx_cleanup(struct bdx_priv *priv) | |||
1739 | tx_level -= db->rptr->len; /* '-' koz len is negative */ | 1739 | tx_level -= db->rptr->len; /* '-' koz len is negative */ |
1740 | 1740 | ||
1741 | /* now should come skb pointer - free it */ | 1741 | /* now should come skb pointer - free it */ |
1742 | dev_kfree_skb_irq(db->rptr->addr.skb); | 1742 | dev_consume_skb_irq(db->rptr->addr.skb); |
1743 | bdx_tx_db_inc_rptr(db); | 1743 | bdx_tx_db_inc_rptr(db); |
1744 | } | 1744 | } |
1745 | 1745 | ||
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 82412691ee66..27f6cf140845 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c | |||
@@ -1740,7 +1740,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, | |||
1740 | dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], | 1740 | dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], |
1741 | le16_to_cpu(pktlen), DMA_TO_DEVICE); | 1741 | le16_to_cpu(pktlen), DMA_TO_DEVICE); |
1742 | } | 1742 | } |
1743 | dev_kfree_skb_irq(skb); | 1743 | dev_consume_skb_irq(skb); |
1744 | tdinfo->skb = NULL; | 1744 | tdinfo->skb = NULL; |
1745 | } | 1745 | } |
1746 | 1746 | ||
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 38ac8ef41f5f..56b7791911bf 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c | |||
@@ -3512,7 +3512,7 @@ static int dfx_xmt_done(DFX_board_t *bp) | |||
3512 | bp->descr_block_virt->xmt_data[comp].long_1, | 3512 | bp->descr_block_virt->xmt_data[comp].long_1, |
3513 | p_xmt_drv_descr->p_skb->len, | 3513 | p_xmt_drv_descr->p_skb->len, |
3514 | DMA_TO_DEVICE); | 3514 | DMA_TO_DEVICE); |
3515 | dev_kfree_skb_irq(p_xmt_drv_descr->p_skb); | 3515 | dev_consume_skb_irq(p_xmt_drv_descr->p_skb); |
3516 | 3516 | ||
3517 | /* | 3517 | /* |
3518 | * Move to start of next packet by updating completion index | 3518 | * Move to start of next packet by updating completion index |
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 58bbba8582b0..3377ac66a347 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c | |||
@@ -1512,9 +1512,13 @@ static void geneve_link_config(struct net_device *dev, | |||
1512 | } | 1512 | } |
1513 | #if IS_ENABLED(CONFIG_IPV6) | 1513 | #if IS_ENABLED(CONFIG_IPV6) |
1514 | case AF_INET6: { | 1514 | case AF_INET6: { |
1515 | struct rt6_info *rt = rt6_lookup(geneve->net, | 1515 | struct rt6_info *rt; |
1516 | &info->key.u.ipv6.dst, NULL, 0, | 1516 | |
1517 | NULL, 0); | 1517 | if (!__in6_dev_get(dev)) |
1518 | break; | ||
1519 | |||
1520 | rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0, | ||
1521 | NULL, 0); | ||
1518 | 1522 | ||
1519 | if (rt && rt->dst.dev) | 1523 | if (rt && rt->dst.dev) |
1520 | ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN; | 1524 | ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN; |
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index 44de81e5f140..c589f5ae75bb 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c | |||
@@ -905,9 +905,9 @@ mcr20a_irq_clean_complete(void *context) | |||
905 | } | 905 | } |
906 | break; | 906 | break; |
907 | case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ): | 907 | case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ): |
908 | /* rx is starting */ | 908 | /* rx is starting */ |
909 | dev_dbg(printdev(lp), "RX is starting\n"); | 909 | dev_dbg(printdev(lp), "RX is starting\n"); |
910 | mcr20a_handle_rx(lp); | 910 | mcr20a_handle_rx(lp); |
911 | break; | 911 | break; |
912 | case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): | 912 | case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): |
913 | if (lp->is_tx) { | 913 | if (lp->is_tx) { |
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 19bdde60680c..7cdac77d0c68 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c | |||
@@ -100,12 +100,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval, | |||
100 | err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); | 100 | err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); |
101 | if (!err) { | 101 | if (!err) { |
102 | mdev->l3mdev_ops = &ipvl_l3mdev_ops; | 102 | mdev->l3mdev_ops = &ipvl_l3mdev_ops; |
103 | mdev->priv_flags |= IFF_L3MDEV_MASTER; | 103 | mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER; |
104 | } else | 104 | } else |
105 | goto fail; | 105 | goto fail; |
106 | } else if (port->mode == IPVLAN_MODE_L3S) { | 106 | } else if (port->mode == IPVLAN_MODE_L3S) { |
107 | /* Old mode was L3S */ | 107 | /* Old mode was L3S */ |
108 | mdev->priv_flags &= ~IFF_L3MDEV_MASTER; | 108 | mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER; |
109 | ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); | 109 | ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); |
110 | mdev->l3mdev_ops = NULL; | 110 | mdev->l3mdev_ops = NULL; |
111 | } | 111 | } |
@@ -167,7 +167,7 @@ static void ipvlan_port_destroy(struct net_device *dev) | |||
167 | struct sk_buff *skb; | 167 | struct sk_buff *skb; |
168 | 168 | ||
169 | if (port->mode == IPVLAN_MODE_L3S) { | 169 | if (port->mode == IPVLAN_MODE_L3S) { |
170 | dev->priv_flags &= ~IFF_L3MDEV_MASTER; | 170 | dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER; |
171 | ipvlan_unregister_nf_hook(dev_net(dev)); | 171 | ipvlan_unregister_nf_hook(dev_net(dev)); |
172 | dev->l3mdev_ops = NULL; | 172 | dev->l3mdev_ops = NULL; |
173 | } | 173 | } |
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 18b41bc345ab..6e8807212aa3 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c | |||
@@ -898,14 +898,14 @@ static void decode_txts(struct dp83640_private *dp83640, | |||
898 | struct phy_txts *phy_txts) | 898 | struct phy_txts *phy_txts) |
899 | { | 899 | { |
900 | struct skb_shared_hwtstamps shhwtstamps; | 900 | struct skb_shared_hwtstamps shhwtstamps; |
901 | struct dp83640_skb_info *skb_info; | ||
901 | struct sk_buff *skb; | 902 | struct sk_buff *skb; |
902 | u64 ns; | ||
903 | u8 overflow; | 903 | u8 overflow; |
904 | u64 ns; | ||
904 | 905 | ||
905 | /* We must already have the skb that triggered this. */ | 906 | /* We must already have the skb that triggered this. */ |
906 | 907 | again: | |
907 | skb = skb_dequeue(&dp83640->tx_queue); | 908 | skb = skb_dequeue(&dp83640->tx_queue); |
908 | |||
909 | if (!skb) { | 909 | if (!skb) { |
910 | pr_debug("have timestamp but tx_queue empty\n"); | 910 | pr_debug("have timestamp but tx_queue empty\n"); |
911 | return; | 911 | return; |
@@ -920,6 +920,11 @@ static void decode_txts(struct dp83640_private *dp83640, | |||
920 | } | 920 | } |
921 | return; | 921 | return; |
922 | } | 922 | } |
923 | skb_info = (struct dp83640_skb_info *)skb->cb; | ||
924 | if (time_after(jiffies, skb_info->tmo)) { | ||
925 | kfree_skb(skb); | ||
926 | goto again; | ||
927 | } | ||
923 | 928 | ||
924 | ns = phy2txts(phy_txts); | 929 | ns = phy2txts(phy_txts); |
925 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); | 930 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
@@ -1472,6 +1477,7 @@ static bool dp83640_rxtstamp(struct phy_device *phydev, | |||
1472 | static void dp83640_txtstamp(struct phy_device *phydev, | 1477 | static void dp83640_txtstamp(struct phy_device *phydev, |
1473 | struct sk_buff *skb, int type) | 1478 | struct sk_buff *skb, int type) |
1474 | { | 1479 | { |
1480 | struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb; | ||
1475 | struct dp83640_private *dp83640 = phydev->priv; | 1481 | struct dp83640_private *dp83640 = phydev->priv; |
1476 | 1482 | ||
1477 | switch (dp83640->hwts_tx_en) { | 1483 | switch (dp83640->hwts_tx_en) { |
@@ -1484,6 +1490,7 @@ static void dp83640_txtstamp(struct phy_device *phydev, | |||
1484 | /* fall through */ | 1490 | /* fall through */ |
1485 | case HWTSTAMP_TX_ON: | 1491 | case HWTSTAMP_TX_ON: |
1486 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | 1492 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
1493 | skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT; | ||
1487 | skb_queue_tail(&dp83640->tx_queue, skb); | 1494 | skb_queue_tail(&dp83640->tx_queue, skb); |
1488 | break; | 1495 | break; |
1489 | 1496 | ||
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 2e12f982534f..abb7876a8776 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c | |||
@@ -847,7 +847,6 @@ static int m88e1510_config_init(struct phy_device *phydev) | |||
847 | 847 | ||
848 | /* SGMII-to-Copper mode initialization */ | 848 | /* SGMII-to-Copper mode initialization */ |
849 | if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { | 849 | if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { |
850 | |||
851 | /* Select page 18 */ | 850 | /* Select page 18 */ |
852 | err = marvell_set_page(phydev, 18); | 851 | err = marvell_set_page(phydev, 18); |
853 | if (err < 0) | 852 | if (err < 0) |
@@ -870,21 +869,6 @@ static int m88e1510_config_init(struct phy_device *phydev) | |||
870 | err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); | 869 | err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); |
871 | if (err < 0) | 870 | if (err < 0) |
872 | return err; | 871 | return err; |
873 | |||
874 | /* There appears to be a bug in the 88e1512 when used in | ||
875 | * SGMII to copper mode, where the AN advertisement register | ||
876 | * clears the pause bits each time a negotiation occurs. | ||
877 | * This means we can never be truely sure what was advertised, | ||
878 | * so disable Pause support. | ||
879 | */ | ||
880 | linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, | ||
881 | phydev->supported); | ||
882 | linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, | ||
883 | phydev->supported); | ||
884 | linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, | ||
885 | phydev->advertising); | ||
886 | linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, | ||
887 | phydev->advertising); | ||
888 | } | 872 | } |
889 | 873 | ||
890 | return m88e1318_config_init(phydev); | 874 | return m88e1318_config_init(phydev); |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 18656c4094b3..fed298c0cb39 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -866,8 +866,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file, | |||
866 | if (rtnl_dereference(tun->xdp_prog)) | 866 | if (rtnl_dereference(tun->xdp_prog)) |
867 | sock_set_flag(&tfile->sk, SOCK_XDP); | 867 | sock_set_flag(&tfile->sk, SOCK_XDP); |
868 | 868 | ||
869 | tun_set_real_num_queues(tun); | ||
870 | |||
871 | /* device is allowed to go away first, so no need to hold extra | 869 | /* device is allowed to go away first, so no need to hold extra |
872 | * refcnt. | 870 | * refcnt. |
873 | */ | 871 | */ |
@@ -879,6 +877,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, | |||
879 | rcu_assign_pointer(tfile->tun, tun); | 877 | rcu_assign_pointer(tfile->tun, tun); |
880 | rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); | 878 | rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); |
881 | tun->numqueues++; | 879 | tun->numqueues++; |
880 | tun_set_real_num_queues(tun); | ||
882 | out: | 881 | out: |
883 | return err; | 882 | return err; |
884 | } | 883 | } |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 8fadd8eaf601..4cfceb789eea 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -57,6 +57,8 @@ module_param(napi_tx, bool, 0644); | |||
57 | #define VIRTIO_XDP_TX BIT(0) | 57 | #define VIRTIO_XDP_TX BIT(0) |
58 | #define VIRTIO_XDP_REDIR BIT(1) | 58 | #define VIRTIO_XDP_REDIR BIT(1) |
59 | 59 | ||
60 | #define VIRTIO_XDP_FLAG BIT(0) | ||
61 | |||
60 | /* RX packet size EWMA. The average packet size is used to determine the packet | 62 | /* RX packet size EWMA. The average packet size is used to determine the packet |
61 | * buffer size when refilling RX rings. As the entire RX ring may be refilled | 63 | * buffer size when refilling RX rings. As the entire RX ring may be refilled |
62 | * at once, the weight is chosen so that the EWMA will be insensitive to short- | 64 | * at once, the weight is chosen so that the EWMA will be insensitive to short- |
@@ -252,6 +254,21 @@ struct padded_vnet_hdr { | |||
252 | char padding[4]; | 254 | char padding[4]; |
253 | }; | 255 | }; |
254 | 256 | ||
257 | static bool is_xdp_frame(void *ptr) | ||
258 | { | ||
259 | return (unsigned long)ptr & VIRTIO_XDP_FLAG; | ||
260 | } | ||
261 | |||
262 | static void *xdp_to_ptr(struct xdp_frame *ptr) | ||
263 | { | ||
264 | return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG); | ||
265 | } | ||
266 | |||
267 | static struct xdp_frame *ptr_to_xdp(void *ptr) | ||
268 | { | ||
269 | return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG); | ||
270 | } | ||
271 | |||
255 | /* Converting between virtqueue no. and kernel tx/rx queue no. | 272 | /* Converting between virtqueue no. and kernel tx/rx queue no. |
256 | * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq | 273 | * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq |
257 | */ | 274 | */ |
@@ -462,7 +479,8 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, | |||
462 | 479 | ||
463 | sg_init_one(sq->sg, xdpf->data, xdpf->len); | 480 | sg_init_one(sq->sg, xdpf->data, xdpf->len); |
464 | 481 | ||
465 | err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC); | 482 | err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), |
483 | GFP_ATOMIC); | ||
466 | if (unlikely(err)) | 484 | if (unlikely(err)) |
467 | return -ENOSPC; /* Caller handle free/refcnt */ | 485 | return -ENOSPC; /* Caller handle free/refcnt */ |
468 | 486 | ||
@@ -482,36 +500,47 @@ static int virtnet_xdp_xmit(struct net_device *dev, | |||
482 | { | 500 | { |
483 | struct virtnet_info *vi = netdev_priv(dev); | 501 | struct virtnet_info *vi = netdev_priv(dev); |
484 | struct receive_queue *rq = vi->rq; | 502 | struct receive_queue *rq = vi->rq; |
485 | struct xdp_frame *xdpf_sent; | ||
486 | struct bpf_prog *xdp_prog; | 503 | struct bpf_prog *xdp_prog; |
487 | struct send_queue *sq; | 504 | struct send_queue *sq; |
488 | unsigned int len; | 505 | unsigned int len; |
506 | int packets = 0; | ||
507 | int bytes = 0; | ||
489 | int drops = 0; | 508 | int drops = 0; |
490 | int kicks = 0; | 509 | int kicks = 0; |
491 | int ret, err; | 510 | int ret, err; |
511 | void *ptr; | ||
492 | int i; | 512 | int i; |
493 | 513 | ||
494 | sq = virtnet_xdp_sq(vi); | ||
495 | |||
496 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { | ||
497 | ret = -EINVAL; | ||
498 | drops = n; | ||
499 | goto out; | ||
500 | } | ||
501 | |||
502 | /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this | 514 | /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this |
503 | * indicate XDP resources have been successfully allocated. | 515 | * indicate XDP resources have been successfully allocated. |
504 | */ | 516 | */ |
505 | xdp_prog = rcu_dereference(rq->xdp_prog); | 517 | xdp_prog = rcu_dereference(rq->xdp_prog); |
506 | if (!xdp_prog) { | 518 | if (!xdp_prog) |
507 | ret = -ENXIO; | 519 | return -ENXIO; |
520 | |||
521 | sq = virtnet_xdp_sq(vi); | ||
522 | |||
523 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { | ||
524 | ret = -EINVAL; | ||
508 | drops = n; | 525 | drops = n; |
509 | goto out; | 526 | goto out; |
510 | } | 527 | } |
511 | 528 | ||
512 | /* Free up any pending old buffers before queueing new ones. */ | 529 | /* Free up any pending old buffers before queueing new ones. */ |
513 | while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) | 530 | while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { |
514 | xdp_return_frame(xdpf_sent); | 531 | if (likely(is_xdp_frame(ptr))) { |
532 | struct xdp_frame *frame = ptr_to_xdp(ptr); | ||
533 | |||
534 | bytes += frame->len; | ||
535 | xdp_return_frame(frame); | ||
536 | } else { | ||
537 | struct sk_buff *skb = ptr; | ||
538 | |||
539 | bytes += skb->len; | ||
540 | napi_consume_skb(skb, false); | ||
541 | } | ||
542 | packets++; | ||
543 | } | ||
515 | 544 | ||
516 | for (i = 0; i < n; i++) { | 545 | for (i = 0; i < n; i++) { |
517 | struct xdp_frame *xdpf = frames[i]; | 546 | struct xdp_frame *xdpf = frames[i]; |
@@ -530,6 +559,8 @@ static int virtnet_xdp_xmit(struct net_device *dev, | |||
530 | } | 559 | } |
531 | out: | 560 | out: |
532 | u64_stats_update_begin(&sq->stats.syncp); | 561 | u64_stats_update_begin(&sq->stats.syncp); |
562 | sq->stats.bytes += bytes; | ||
563 | sq->stats.packets += packets; | ||
533 | sq->stats.xdp_tx += n; | 564 | sq->stats.xdp_tx += n; |
534 | sq->stats.xdp_tx_drops += drops; | 565 | sq->stats.xdp_tx_drops += drops; |
535 | sq->stats.kicks += kicks; | 566 | sq->stats.kicks += kicks; |
@@ -1332,18 +1363,26 @@ static int virtnet_receive(struct receive_queue *rq, int budget, | |||
1332 | 1363 | ||
1333 | static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) | 1364 | static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) |
1334 | { | 1365 | { |
1335 | struct sk_buff *skb; | ||
1336 | unsigned int len; | 1366 | unsigned int len; |
1337 | unsigned int packets = 0; | 1367 | unsigned int packets = 0; |
1338 | unsigned int bytes = 0; | 1368 | unsigned int bytes = 0; |
1369 | void *ptr; | ||
1339 | 1370 | ||
1340 | while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { | 1371 | while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { |
1341 | pr_debug("Sent skb %p\n", skb); | 1372 | if (likely(!is_xdp_frame(ptr))) { |
1373 | struct sk_buff *skb = ptr; | ||
1342 | 1374 | ||
1343 | bytes += skb->len; | 1375 | pr_debug("Sent skb %p\n", skb); |
1344 | packets++; | 1376 | |
1377 | bytes += skb->len; | ||
1378 | napi_consume_skb(skb, in_napi); | ||
1379 | } else { | ||
1380 | struct xdp_frame *frame = ptr_to_xdp(ptr); | ||
1345 | 1381 | ||
1346 | napi_consume_skb(skb, in_napi); | 1382 | bytes += frame->len; |
1383 | xdp_return_frame(frame); | ||
1384 | } | ||
1385 | packets++; | ||
1347 | } | 1386 | } |
1348 | 1387 | ||
1349 | /* Avoid overhead when no packets have been processed | 1388 | /* Avoid overhead when no packets have been processed |
@@ -1358,6 +1397,16 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) | |||
1358 | u64_stats_update_end(&sq->stats.syncp); | 1397 | u64_stats_update_end(&sq->stats.syncp); |
1359 | } | 1398 | } |
1360 | 1399 | ||
1400 | static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) | ||
1401 | { | ||
1402 | if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) | ||
1403 | return false; | ||
1404 | else if (q < vi->curr_queue_pairs) | ||
1405 | return true; | ||
1406 | else | ||
1407 | return false; | ||
1408 | } | ||
1409 | |||
1361 | static void virtnet_poll_cleantx(struct receive_queue *rq) | 1410 | static void virtnet_poll_cleantx(struct receive_queue *rq) |
1362 | { | 1411 | { |
1363 | struct virtnet_info *vi = rq->vq->vdev->priv; | 1412 | struct virtnet_info *vi = rq->vq->vdev->priv; |
@@ -1365,7 +1414,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq) | |||
1365 | struct send_queue *sq = &vi->sq[index]; | 1414 | struct send_queue *sq = &vi->sq[index]; |
1366 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); | 1415 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); |
1367 | 1416 | ||
1368 | if (!sq->napi.weight) | 1417 | if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) |
1369 | return; | 1418 | return; |
1370 | 1419 | ||
1371 | if (__netif_tx_trylock(txq)) { | 1420 | if (__netif_tx_trylock(txq)) { |
@@ -1442,8 +1491,16 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) | |||
1442 | { | 1491 | { |
1443 | struct send_queue *sq = container_of(napi, struct send_queue, napi); | 1492 | struct send_queue *sq = container_of(napi, struct send_queue, napi); |
1444 | struct virtnet_info *vi = sq->vq->vdev->priv; | 1493 | struct virtnet_info *vi = sq->vq->vdev->priv; |
1445 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); | 1494 | unsigned int index = vq2txq(sq->vq); |
1495 | struct netdev_queue *txq; | ||
1446 | 1496 | ||
1497 | if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { | ||
1498 | /* We don't need to enable cb for XDP */ | ||
1499 | napi_complete_done(napi, 0); | ||
1500 | return 0; | ||
1501 | } | ||
1502 | |||
1503 | txq = netdev_get_tx_queue(vi->dev, index); | ||
1447 | __netif_tx_lock(txq, raw_smp_processor_id()); | 1504 | __netif_tx_lock(txq, raw_smp_processor_id()); |
1448 | free_old_xmit_skbs(sq, true); | 1505 | free_old_xmit_skbs(sq, true); |
1449 | __netif_tx_unlock(txq); | 1506 | __netif_tx_unlock(txq); |
@@ -2395,6 +2452,10 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, | |||
2395 | return -ENOMEM; | 2452 | return -ENOMEM; |
2396 | } | 2453 | } |
2397 | 2454 | ||
2455 | old_prog = rtnl_dereference(vi->rq[0].xdp_prog); | ||
2456 | if (!prog && !old_prog) | ||
2457 | return 0; | ||
2458 | |||
2398 | if (prog) { | 2459 | if (prog) { |
2399 | prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); | 2460 | prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); |
2400 | if (IS_ERR(prog)) | 2461 | if (IS_ERR(prog)) |
@@ -2402,36 +2463,62 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, | |||
2402 | } | 2463 | } |
2403 | 2464 | ||
2404 | /* Make sure NAPI is not using any XDP TX queues for RX. */ | 2465 | /* Make sure NAPI is not using any XDP TX queues for RX. */ |
2405 | if (netif_running(dev)) | 2466 | if (netif_running(dev)) { |
2406 | for (i = 0; i < vi->max_queue_pairs; i++) | 2467 | for (i = 0; i < vi->max_queue_pairs; i++) { |
2407 | napi_disable(&vi->rq[i].napi); | 2468 | napi_disable(&vi->rq[i].napi); |
2469 | virtnet_napi_tx_disable(&vi->sq[i].napi); | ||
2470 | } | ||
2471 | } | ||
2472 | |||
2473 | if (!prog) { | ||
2474 | for (i = 0; i < vi->max_queue_pairs; i++) { | ||
2475 | rcu_assign_pointer(vi->rq[i].xdp_prog, prog); | ||
2476 | if (i == 0) | ||
2477 | virtnet_restore_guest_offloads(vi); | ||
2478 | } | ||
2479 | synchronize_net(); | ||
2480 | } | ||
2408 | 2481 | ||
2409 | netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); | ||
2410 | err = _virtnet_set_queues(vi, curr_qp + xdp_qp); | 2482 | err = _virtnet_set_queues(vi, curr_qp + xdp_qp); |
2411 | if (err) | 2483 | if (err) |
2412 | goto err; | 2484 | goto err; |
2485 | netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); | ||
2413 | vi->xdp_queue_pairs = xdp_qp; | 2486 | vi->xdp_queue_pairs = xdp_qp; |
2414 | 2487 | ||
2415 | for (i = 0; i < vi->max_queue_pairs; i++) { | 2488 | if (prog) { |
2416 | old_prog = rtnl_dereference(vi->rq[i].xdp_prog); | 2489 | for (i = 0; i < vi->max_queue_pairs; i++) { |
2417 | rcu_assign_pointer(vi->rq[i].xdp_prog, prog); | 2490 | rcu_assign_pointer(vi->rq[i].xdp_prog, prog); |
2418 | if (i == 0) { | 2491 | if (i == 0 && !old_prog) |
2419 | if (!old_prog) | ||
2420 | virtnet_clear_guest_offloads(vi); | 2492 | virtnet_clear_guest_offloads(vi); |
2421 | if (!prog) | ||
2422 | virtnet_restore_guest_offloads(vi); | ||
2423 | } | 2493 | } |
2494 | } | ||
2495 | |||
2496 | for (i = 0; i < vi->max_queue_pairs; i++) { | ||
2424 | if (old_prog) | 2497 | if (old_prog) |
2425 | bpf_prog_put(old_prog); | 2498 | bpf_prog_put(old_prog); |
2426 | if (netif_running(dev)) | 2499 | if (netif_running(dev)) { |
2427 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); | 2500 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
2501 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, | ||
2502 | &vi->sq[i].napi); | ||
2503 | } | ||
2428 | } | 2504 | } |
2429 | 2505 | ||
2430 | return 0; | 2506 | return 0; |
2431 | 2507 | ||
2432 | err: | 2508 | err: |
2433 | for (i = 0; i < vi->max_queue_pairs; i++) | 2509 | if (!prog) { |
2434 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); | 2510 | virtnet_clear_guest_offloads(vi); |
2511 | for (i = 0; i < vi->max_queue_pairs; i++) | ||
2512 | rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); | ||
2513 | } | ||
2514 | |||
2515 | if (netif_running(dev)) { | ||
2516 | for (i = 0; i < vi->max_queue_pairs; i++) { | ||
2517 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); | ||
2518 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, | ||
2519 | &vi->sq[i].napi); | ||
2520 | } | ||
2521 | } | ||
2435 | if (prog) | 2522 | if (prog) |
2436 | bpf_prog_sub(prog, vi->max_queue_pairs - 1); | 2523 | bpf_prog_sub(prog, vi->max_queue_pairs - 1); |
2437 | return err; | 2524 | return err; |
@@ -2613,16 +2700,6 @@ static void free_receive_page_frags(struct virtnet_info *vi) | |||
2613 | put_page(vi->rq[i].alloc_frag.page); | 2700 | put_page(vi->rq[i].alloc_frag.page); |
2614 | } | 2701 | } |
2615 | 2702 | ||
2616 | static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) | ||
2617 | { | ||
2618 | if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) | ||
2619 | return false; | ||
2620 | else if (q < vi->curr_queue_pairs) | ||
2621 | return true; | ||
2622 | else | ||
2623 | return false; | ||
2624 | } | ||
2625 | |||
2626 | static void free_unused_bufs(struct virtnet_info *vi) | 2703 | static void free_unused_bufs(struct virtnet_info *vi) |
2627 | { | 2704 | { |
2628 | void *buf; | 2705 | void *buf; |
@@ -2631,10 +2708,10 @@ static void free_unused_bufs(struct virtnet_info *vi) | |||
2631 | for (i = 0; i < vi->max_queue_pairs; i++) { | 2708 | for (i = 0; i < vi->max_queue_pairs; i++) { |
2632 | struct virtqueue *vq = vi->sq[i].vq; | 2709 | struct virtqueue *vq = vi->sq[i].vq; |
2633 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { | 2710 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { |
2634 | if (!is_xdp_raw_buffer_queue(vi, i)) | 2711 | if (!is_xdp_frame(buf)) |
2635 | dev_kfree_skb(buf); | 2712 | dev_kfree_skb(buf); |
2636 | else | 2713 | else |
2637 | put_page(virt_to_head_page(buf)); | 2714 | xdp_return_frame(ptr_to_xdp(buf)); |
2638 | } | 2715 | } |
2639 | } | 2716 | } |
2640 | 2717 | ||
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index c0b0f525c87c..27decf8ae840 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c | |||
@@ -1575,7 +1575,7 @@ try: | |||
1575 | dev->stats.tx_packets++; | 1575 | dev->stats.tx_packets++; |
1576 | dev->stats.tx_bytes += skb->len; | 1576 | dev->stats.tx_bytes += skb->len; |
1577 | } | 1577 | } |
1578 | dev_kfree_skb_irq(skb); | 1578 | dev_consume_skb_irq(skb); |
1579 | dpriv->tx_skbuff[cur] = NULL; | 1579 | dpriv->tx_skbuff[cur] = NULL; |
1580 | ++dpriv->tx_dirty; | 1580 | ++dpriv->tx_dirty; |
1581 | } else { | 1581 | } else { |
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 66d889d54e58..a08f04c3f644 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c | |||
@@ -482,7 +482,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv) | |||
482 | memset(priv->tx_buffer + | 482 | memset(priv->tx_buffer + |
483 | (be32_to_cpu(bd->buf) - priv->dma_tx_addr), | 483 | (be32_to_cpu(bd->buf) - priv->dma_tx_addr), |
484 | 0, skb->len); | 484 | 0, skb->len); |
485 | dev_kfree_skb_irq(skb); | 485 | dev_consume_skb_irq(skb); |
486 | 486 | ||
487 | priv->tx_skbuff[priv->skb_dirtytx] = NULL; | 487 | priv->tx_skbuff[priv->skb_dirtytx] = NULL; |
488 | priv->skb_dirtytx = | 488 | priv->skb_dirtytx = |
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 399b501f3c3c..e8891f5fc83a 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c | |||
@@ -548,7 +548,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { | |||
548 | { | 548 | { |
549 | .id = WCN3990_HW_1_0_DEV_VERSION, | 549 | .id = WCN3990_HW_1_0_DEV_VERSION, |
550 | .dev_id = 0, | 550 | .dev_id = 0, |
551 | .bus = ATH10K_BUS_PCI, | 551 | .bus = ATH10K_BUS_SNOC, |
552 | .name = "wcn3990 hw1.0", | 552 | .name = "wcn3990 hw1.0", |
553 | .continuous_frag_desc = true, | 553 | .continuous_frag_desc = true, |
554 | .tx_chain_mask = 0x7, | 554 | .tx_chain_mask = 0x7, |
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index 491ca3c8b43c..83d5bceea08f 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config IWLWIFI | 1 | config IWLWIFI |
2 | tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " | 2 | tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " |
3 | depends on PCI && HAS_IOMEM | 3 | depends on PCI && HAS_IOMEM && CFG80211 |
4 | select FW_LOADER | 4 | select FW_LOADER |
5 | ---help--- | 5 | ---help--- |
6 | Select to build the driver supporting the: | 6 | Select to build the driver supporting the: |
@@ -47,6 +47,7 @@ if IWLWIFI | |||
47 | config IWLWIFI_LEDS | 47 | config IWLWIFI_LEDS |
48 | bool | 48 | bool |
49 | depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI | 49 | depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI |
50 | depends on IWLMVM || IWLDVM | ||
50 | select LEDS_TRIGGERS | 51 | select LEDS_TRIGGERS |
51 | select MAC80211_LEDS | 52 | select MAC80211_LEDS |
52 | default y | 53 | default y |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index 497e762978cc..b2cabce1d74d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c | |||
@@ -212,24 +212,24 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev) | |||
212 | mt76x02_add_rate_power_offset(t, delta); | 212 | mt76x02_add_rate_power_offset(t, delta); |
213 | } | 213 | } |
214 | 214 | ||
215 | void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) | 215 | void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp) |
216 | { | 216 | { |
217 | struct mt76x0_chan_map { | 217 | struct mt76x0_chan_map { |
218 | u8 chan; | 218 | u8 chan; |
219 | u8 offset; | 219 | u8 offset; |
220 | } chan_map[] = { | 220 | } chan_map[] = { |
221 | { 2, 0 }, { 4, 1 }, { 6, 2 }, { 8, 3 }, | 221 | { 2, 0 }, { 4, 2 }, { 6, 4 }, { 8, 6 }, |
222 | { 10, 4 }, { 12, 5 }, { 14, 6 }, { 38, 0 }, | 222 | { 10, 8 }, { 12, 10 }, { 14, 12 }, { 38, 0 }, |
223 | { 44, 1 }, { 48, 2 }, { 54, 3 }, { 60, 4 }, | 223 | { 44, 2 }, { 48, 4 }, { 54, 6 }, { 60, 8 }, |
224 | { 64, 5 }, { 102, 6 }, { 108, 7 }, { 112, 8 }, | 224 | { 64, 10 }, { 102, 12 }, { 108, 14 }, { 112, 16 }, |
225 | { 118, 9 }, { 124, 10 }, { 128, 11 }, { 134, 12 }, | 225 | { 118, 18 }, { 124, 20 }, { 128, 22 }, { 134, 24 }, |
226 | { 140, 13 }, { 151, 14 }, { 157, 15 }, { 161, 16 }, | 226 | { 140, 26 }, { 151, 28 }, { 157, 30 }, { 161, 32 }, |
227 | { 167, 17 }, { 171, 18 }, { 173, 19 }, | 227 | { 167, 34 }, { 171, 36 }, { 175, 38 }, |
228 | }; | 228 | }; |
229 | struct ieee80211_channel *chan = dev->mt76.chandef.chan; | 229 | struct ieee80211_channel *chan = dev->mt76.chandef.chan; |
230 | u8 offset, addr; | 230 | u8 offset, addr; |
231 | int i, idx = 0; | ||
231 | u16 data; | 232 | u16 data; |
232 | int i; | ||
233 | 233 | ||
234 | if (mt76x0_tssi_enabled(dev)) { | 234 | if (mt76x0_tssi_enabled(dev)) { |
235 | s8 target_power; | 235 | s8 target_power; |
@@ -239,14 +239,14 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) | |||
239 | else | 239 | else |
240 | data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER); | 240 | data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER); |
241 | target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7]; | 241 | target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7]; |
242 | info[0] = target_power + mt76x0_get_delta(dev); | 242 | *tp = target_power + mt76x0_get_delta(dev); |
243 | info[1] = 0; | ||
244 | 243 | ||
245 | return; | 244 | return; |
246 | } | 245 | } |
247 | 246 | ||
248 | for (i = 0; i < ARRAY_SIZE(chan_map); i++) { | 247 | for (i = 0; i < ARRAY_SIZE(chan_map); i++) { |
249 | if (chan_map[i].chan <= chan->hw_value) { | 248 | if (chan->hw_value <= chan_map[i].chan) { |
249 | idx = (chan->hw_value == chan_map[i].chan); | ||
250 | offset = chan_map[i].offset; | 250 | offset = chan_map[i].offset; |
251 | break; | 251 | break; |
252 | } | 252 | } |
@@ -258,13 +258,16 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) | |||
258 | addr = MT_EE_TX_POWER_DELTA_BW80 + offset; | 258 | addr = MT_EE_TX_POWER_DELTA_BW80 + offset; |
259 | } else { | 259 | } else { |
260 | switch (chan->hw_value) { | 260 | switch (chan->hw_value) { |
261 | case 42: | ||
262 | offset = 2; | ||
263 | break; | ||
261 | case 58: | 264 | case 58: |
262 | offset = 8; | 265 | offset = 8; |
263 | break; | 266 | break; |
264 | case 106: | 267 | case 106: |
265 | offset = 14; | 268 | offset = 14; |
266 | break; | 269 | break; |
267 | case 112: | 270 | case 122: |
268 | offset = 20; | 271 | offset = 20; |
269 | break; | 272 | break; |
270 | case 155: | 273 | case 155: |
@@ -277,14 +280,9 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) | |||
277 | } | 280 | } |
278 | 281 | ||
279 | data = mt76x02_eeprom_get(dev, addr); | 282 | data = mt76x02_eeprom_get(dev, addr); |
280 | 283 | *tp = data >> (8 * idx); | |
281 | info[0] = data; | 284 | if (*tp < 0 || *tp > 0x3f) |
282 | if (!info[0] || info[0] > 0x3f) | 285 | *tp = 5; |
283 | info[0] = 5; | ||
284 | |||
285 | info[1] = data >> 8; | ||
286 | if (!info[1] || info[1] > 0x3f) | ||
287 | info[1] = 5; | ||
288 | } | 286 | } |
289 | 287 | ||
290 | static int mt76x0_check_eeprom(struct mt76x02_dev *dev) | 288 | static int mt76x0_check_eeprom(struct mt76x02_dev *dev) |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h index ee9ade9f3c8b..42b259f90b6d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h | |||
@@ -26,7 +26,7 @@ struct mt76x02_dev; | |||
26 | int mt76x0_eeprom_init(struct mt76x02_dev *dev); | 26 | int mt76x0_eeprom_init(struct mt76x02_dev *dev); |
27 | void mt76x0_read_rx_gain(struct mt76x02_dev *dev); | 27 | void mt76x0_read_rx_gain(struct mt76x02_dev *dev); |
28 | void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev); | 28 | void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev); |
29 | void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info); | 29 | void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp); |
30 | 30 | ||
31 | static inline s8 s6_to_s8(u32 val) | 31 | static inline s8 s6_to_s8(u32 val) |
32 | { | 32 | { |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c index 1eb1a802ed20..b6166703ad76 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c | |||
@@ -845,17 +845,17 @@ static void mt76x0_phy_tssi_calibrate(struct mt76x02_dev *dev) | |||
845 | void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) | 845 | void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) |
846 | { | 846 | { |
847 | struct mt76_rate_power *t = &dev->mt76.rate_power; | 847 | struct mt76_rate_power *t = &dev->mt76.rate_power; |
848 | u8 info[2]; | 848 | s8 info; |
849 | 849 | ||
850 | mt76x0_get_tx_power_per_rate(dev); | 850 | mt76x0_get_tx_power_per_rate(dev); |
851 | mt76x0_get_power_info(dev, info); | 851 | mt76x0_get_power_info(dev, &info); |
852 | 852 | ||
853 | mt76x02_add_rate_power_offset(t, info[0]); | 853 | mt76x02_add_rate_power_offset(t, info); |
854 | mt76x02_limit_rate_power(t, dev->mt76.txpower_conf); | 854 | mt76x02_limit_rate_power(t, dev->mt76.txpower_conf); |
855 | dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t); | 855 | dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t); |
856 | mt76x02_add_rate_power_offset(t, -info[0]); | 856 | mt76x02_add_rate_power_offset(t, -info); |
857 | 857 | ||
858 | mt76x02_phy_set_txpower(dev, info[0], info[1]); | 858 | mt76x02_phy_set_txpower(dev, info, info); |
859 | } | 859 | } |
860 | 860 | ||
861 | void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) | 861 | void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) |
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index bd10165d7eec..4d4b07701149 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c | |||
@@ -164,6 +164,12 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) | |||
164 | } | 164 | } |
165 | 165 | ||
166 | sdio_claim_host(func); | 166 | sdio_claim_host(func); |
167 | /* | ||
168 | * To guarantee that the SDIO card is power cycled, as required to make | ||
169 | * the FW programming to succeed, let's do a brute force HW reset. | ||
170 | */ | ||
171 | mmc_hw_reset(card->host); | ||
172 | |||
167 | sdio_enable_func(func); | 173 | sdio_enable_func(func); |
168 | sdio_release_host(func); | 174 | sdio_release_host(func); |
169 | 175 | ||
@@ -174,20 +180,13 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue) | |||
174 | { | 180 | { |
175 | struct sdio_func *func = dev_to_sdio_func(glue->dev); | 181 | struct sdio_func *func = dev_to_sdio_func(glue->dev); |
176 | struct mmc_card *card = func->card; | 182 | struct mmc_card *card = func->card; |
177 | int error; | ||
178 | 183 | ||
179 | sdio_claim_host(func); | 184 | sdio_claim_host(func); |
180 | sdio_disable_func(func); | 185 | sdio_disable_func(func); |
181 | sdio_release_host(func); | 186 | sdio_release_host(func); |
182 | 187 | ||
183 | /* Let runtime PM know the card is powered off */ | 188 | /* Let runtime PM know the card is powered off */ |
184 | error = pm_runtime_put(&card->dev); | 189 | pm_runtime_put(&card->dev); |
185 | if (error < 0 && error != -EBUSY) { | ||
186 | dev_err(&card->dev, "%s failed: %i\n", __func__, error); | ||
187 | |||
188 | return error; | ||
189 | } | ||
190 | |||
191 | return 0; | 190 | return 0; |
192 | } | 191 | } |
193 | 192 | ||
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 0ee026947f20..122059ecad84 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/hashtable.h> | 22 | #include <linux/hashtable.h> |
23 | #include <linux/ip.h> | 23 | #include <linux/ip.h> |
24 | #include <linux/refcount.h> | 24 | #include <linux/refcount.h> |
25 | #include <linux/workqueue.h> | ||
25 | 26 | ||
26 | #include <net/ipv6.h> | 27 | #include <net/ipv6.h> |
27 | #include <net/if_inet6.h> | 28 | #include <net/if_inet6.h> |
@@ -789,6 +790,7 @@ struct qeth_card { | |||
789 | struct qeth_seqno seqno; | 790 | struct qeth_seqno seqno; |
790 | struct qeth_card_options options; | 791 | struct qeth_card_options options; |
791 | 792 | ||
793 | struct workqueue_struct *event_wq; | ||
792 | wait_queue_head_t wait_q; | 794 | wait_queue_head_t wait_q; |
793 | spinlock_t mclock; | 795 | spinlock_t mclock; |
794 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; | 796 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; |
@@ -962,7 +964,6 @@ extern const struct attribute_group *qeth_osn_attr_groups[]; | |||
962 | extern const struct attribute_group qeth_device_attr_group; | 964 | extern const struct attribute_group qeth_device_attr_group; |
963 | extern const struct attribute_group qeth_device_blkt_group; | 965 | extern const struct attribute_group qeth_device_blkt_group; |
964 | extern const struct device_type qeth_generic_devtype; | 966 | extern const struct device_type qeth_generic_devtype; |
965 | extern struct workqueue_struct *qeth_wq; | ||
966 | 967 | ||
967 | int qeth_card_hw_is_reachable(struct qeth_card *); | 968 | int qeth_card_hw_is_reachable(struct qeth_card *); |
968 | const char *qeth_get_cardname_short(struct qeth_card *); | 969 | const char *qeth_get_cardname_short(struct qeth_card *); |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index e63e03143ca7..89f912213e62 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -74,8 +74,7 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, | |||
74 | static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); | 74 | static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); |
75 | static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); | 75 | static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); |
76 | 76 | ||
77 | struct workqueue_struct *qeth_wq; | 77 | static struct workqueue_struct *qeth_wq; |
78 | EXPORT_SYMBOL_GPL(qeth_wq); | ||
79 | 78 | ||
80 | int qeth_card_hw_is_reachable(struct qeth_card *card) | 79 | int qeth_card_hw_is_reachable(struct qeth_card *card) |
81 | { | 80 | { |
@@ -566,6 +565,7 @@ static int __qeth_issue_next_read(struct qeth_card *card) | |||
566 | QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", | 565 | QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", |
567 | rc, CARD_DEVID(card)); | 566 | rc, CARD_DEVID(card)); |
568 | atomic_set(&channel->irq_pending, 0); | 567 | atomic_set(&channel->irq_pending, 0); |
568 | qeth_release_buffer(channel, iob); | ||
569 | card->read_or_write_problem = 1; | 569 | card->read_or_write_problem = 1; |
570 | qeth_schedule_recovery(card); | 570 | qeth_schedule_recovery(card); |
571 | wake_up(&card->wait_q); | 571 | wake_up(&card->wait_q); |
@@ -1127,6 +1127,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, | |||
1127 | rc = qeth_get_problem(card, cdev, irb); | 1127 | rc = qeth_get_problem(card, cdev, irb); |
1128 | if (rc) { | 1128 | if (rc) { |
1129 | card->read_or_write_problem = 1; | 1129 | card->read_or_write_problem = 1; |
1130 | if (iob) | ||
1131 | qeth_release_buffer(iob->channel, iob); | ||
1130 | qeth_clear_ipacmd_list(card); | 1132 | qeth_clear_ipacmd_list(card); |
1131 | qeth_schedule_recovery(card); | 1133 | qeth_schedule_recovery(card); |
1132 | goto out; | 1134 | goto out; |
@@ -1466,6 +1468,10 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) | |||
1466 | CARD_RDEV(card) = gdev->cdev[0]; | 1468 | CARD_RDEV(card) = gdev->cdev[0]; |
1467 | CARD_WDEV(card) = gdev->cdev[1]; | 1469 | CARD_WDEV(card) = gdev->cdev[1]; |
1468 | CARD_DDEV(card) = gdev->cdev[2]; | 1470 | CARD_DDEV(card) = gdev->cdev[2]; |
1471 | |||
1472 | card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev)); | ||
1473 | if (!card->event_wq) | ||
1474 | goto out_wq; | ||
1469 | if (qeth_setup_channel(&card->read, true)) | 1475 | if (qeth_setup_channel(&card->read, true)) |
1470 | goto out_ip; | 1476 | goto out_ip; |
1471 | if (qeth_setup_channel(&card->write, true)) | 1477 | if (qeth_setup_channel(&card->write, true)) |
@@ -1481,6 +1487,8 @@ out_data: | |||
1481 | out_channel: | 1487 | out_channel: |
1482 | qeth_clean_channel(&card->read); | 1488 | qeth_clean_channel(&card->read); |
1483 | out_ip: | 1489 | out_ip: |
1490 | destroy_workqueue(card->event_wq); | ||
1491 | out_wq: | ||
1484 | dev_set_drvdata(&gdev->dev, NULL); | 1492 | dev_set_drvdata(&gdev->dev, NULL); |
1485 | kfree(card); | 1493 | kfree(card); |
1486 | out: | 1494 | out: |
@@ -1809,6 +1817,7 @@ static int qeth_idx_activate_get_answer(struct qeth_card *card, | |||
1809 | QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); | 1817 | QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); |
1810 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); | 1818 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); |
1811 | atomic_set(&channel->irq_pending, 0); | 1819 | atomic_set(&channel->irq_pending, 0); |
1820 | qeth_release_buffer(channel, iob); | ||
1812 | wake_up(&card->wait_q); | 1821 | wake_up(&card->wait_q); |
1813 | return rc; | 1822 | return rc; |
1814 | } | 1823 | } |
@@ -1878,6 +1887,7 @@ static int qeth_idx_activate_channel(struct qeth_card *card, | |||
1878 | rc); | 1887 | rc); |
1879 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); | 1888 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
1880 | atomic_set(&channel->irq_pending, 0); | 1889 | atomic_set(&channel->irq_pending, 0); |
1890 | qeth_release_buffer(channel, iob); | ||
1881 | wake_up(&card->wait_q); | 1891 | wake_up(&card->wait_q); |
1882 | return rc; | 1892 | return rc; |
1883 | } | 1893 | } |
@@ -2058,6 +2068,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, | |||
2058 | } | 2068 | } |
2059 | reply = qeth_alloc_reply(card); | 2069 | reply = qeth_alloc_reply(card); |
2060 | if (!reply) { | 2070 | if (!reply) { |
2071 | qeth_release_buffer(channel, iob); | ||
2061 | return -ENOMEM; | 2072 | return -ENOMEM; |
2062 | } | 2073 | } |
2063 | reply->callback = reply_cb; | 2074 | reply->callback = reply_cb; |
@@ -2389,11 +2400,12 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) | |||
2389 | return 0; | 2400 | return 0; |
2390 | } | 2401 | } |
2391 | 2402 | ||
2392 | static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q) | 2403 | static void qeth_free_output_queue(struct qeth_qdio_out_q *q) |
2393 | { | 2404 | { |
2394 | if (!q) | 2405 | if (!q) |
2395 | return; | 2406 | return; |
2396 | 2407 | ||
2408 | qeth_clear_outq_buffers(q, 1); | ||
2397 | qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); | 2409 | qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); |
2398 | kfree(q); | 2410 | kfree(q); |
2399 | } | 2411 | } |
@@ -2467,10 +2479,8 @@ out_freeoutqbufs: | |||
2467 | card->qdio.out_qs[i]->bufs[j] = NULL; | 2479 | card->qdio.out_qs[i]->bufs[j] = NULL; |
2468 | } | 2480 | } |
2469 | out_freeoutq: | 2481 | out_freeoutq: |
2470 | while (i > 0) { | 2482 | while (i > 0) |
2471 | qeth_free_qdio_out_buf(card->qdio.out_qs[--i]); | 2483 | qeth_free_output_queue(card->qdio.out_qs[--i]); |
2472 | qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); | ||
2473 | } | ||
2474 | kfree(card->qdio.out_qs); | 2484 | kfree(card->qdio.out_qs); |
2475 | card->qdio.out_qs = NULL; | 2485 | card->qdio.out_qs = NULL; |
2476 | out_freepool: | 2486 | out_freepool: |
@@ -2503,10 +2513,8 @@ static void qeth_free_qdio_buffers(struct qeth_card *card) | |||
2503 | qeth_free_buffer_pool(card); | 2513 | qeth_free_buffer_pool(card); |
2504 | /* free outbound qdio_qs */ | 2514 | /* free outbound qdio_qs */ |
2505 | if (card->qdio.out_qs) { | 2515 | if (card->qdio.out_qs) { |
2506 | for (i = 0; i < card->qdio.no_out_queues; ++i) { | 2516 | for (i = 0; i < card->qdio.no_out_queues; i++) |
2507 | qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); | 2517 | qeth_free_output_queue(card->qdio.out_qs[i]); |
2508 | qeth_free_qdio_out_buf(card->qdio.out_qs[i]); | ||
2509 | } | ||
2510 | kfree(card->qdio.out_qs); | 2518 | kfree(card->qdio.out_qs); |
2511 | card->qdio.out_qs = NULL; | 2519 | card->qdio.out_qs = NULL; |
2512 | } | 2520 | } |
@@ -5028,6 +5036,7 @@ static void qeth_core_free_card(struct qeth_card *card) | |||
5028 | qeth_clean_channel(&card->read); | 5036 | qeth_clean_channel(&card->read); |
5029 | qeth_clean_channel(&card->write); | 5037 | qeth_clean_channel(&card->write); |
5030 | qeth_clean_channel(&card->data); | 5038 | qeth_clean_channel(&card->data); |
5039 | destroy_workqueue(card->event_wq); | ||
5031 | qeth_free_qdio_buffers(card); | 5040 | qeth_free_qdio_buffers(card); |
5032 | unregister_service_level(&card->qeth_service_level); | 5041 | unregister_service_level(&card->qeth_service_level); |
5033 | dev_set_drvdata(&card->gdev->dev, NULL); | 5042 | dev_set_drvdata(&card->gdev->dev, NULL); |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index f108d4b44605..a43de2f9bcac 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -369,6 +369,8 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) | |||
369 | qeth_clear_cmd_buffers(&card->read); | 369 | qeth_clear_cmd_buffers(&card->read); |
370 | qeth_clear_cmd_buffers(&card->write); | 370 | qeth_clear_cmd_buffers(&card->write); |
371 | } | 371 | } |
372 | |||
373 | flush_workqueue(card->event_wq); | ||
372 | } | 374 | } |
373 | 375 | ||
374 | static int qeth_l2_process_inbound_buffer(struct qeth_card *card, | 376 | static int qeth_l2_process_inbound_buffer(struct qeth_card *card, |
@@ -801,6 +803,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) | |||
801 | 803 | ||
802 | if (cgdev->state == CCWGROUP_ONLINE) | 804 | if (cgdev->state == CCWGROUP_ONLINE) |
803 | qeth_l2_set_offline(cgdev); | 805 | qeth_l2_set_offline(cgdev); |
806 | |||
807 | cancel_work_sync(&card->close_dev_work); | ||
804 | if (qeth_netdev_is_registered(card->dev)) | 808 | if (qeth_netdev_is_registered(card->dev)) |
805 | unregister_netdev(card->dev); | 809 | unregister_netdev(card->dev); |
806 | } | 810 | } |
@@ -1434,7 +1438,7 @@ static void qeth_bridge_state_change(struct qeth_card *card, | |||
1434 | data->card = card; | 1438 | data->card = card; |
1435 | memcpy(&data->qports, qports, | 1439 | memcpy(&data->qports, qports, |
1436 | sizeof(struct qeth_sbp_state_change) + extrasize); | 1440 | sizeof(struct qeth_sbp_state_change) + extrasize); |
1437 | queue_work(qeth_wq, &data->worker); | 1441 | queue_work(card->event_wq, &data->worker); |
1438 | } | 1442 | } |
1439 | 1443 | ||
1440 | struct qeth_bridge_host_data { | 1444 | struct qeth_bridge_host_data { |
@@ -1506,7 +1510,7 @@ static void qeth_bridge_host_event(struct qeth_card *card, | |||
1506 | data->card = card; | 1510 | data->card = card; |
1507 | memcpy(&data->hostevs, hostevs, | 1511 | memcpy(&data->hostevs, hostevs, |
1508 | sizeof(struct qeth_ipacmd_addr_change) + extrasize); | 1512 | sizeof(struct qeth_ipacmd_addr_change) + extrasize); |
1509 | queue_work(qeth_wq, &data->worker); | 1513 | queue_work(card->event_wq, &data->worker); |
1510 | } | 1514 | } |
1511 | 1515 | ||
1512 | /* SETBRIDGEPORT support; sending commands */ | 1516 | /* SETBRIDGEPORT support; sending commands */ |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 42a7cdc59b76..df34bff4ac31 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -1433,6 +1433,8 @@ static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode) | |||
1433 | qeth_clear_cmd_buffers(&card->read); | 1433 | qeth_clear_cmd_buffers(&card->read); |
1434 | qeth_clear_cmd_buffers(&card->write); | 1434 | qeth_clear_cmd_buffers(&card->write); |
1435 | } | 1435 | } |
1436 | |||
1437 | flush_workqueue(card->event_wq); | ||
1436 | } | 1438 | } |
1437 | 1439 | ||
1438 | /* | 1440 | /* |
@@ -2338,6 +2340,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) | |||
2338 | if (cgdev->state == CCWGROUP_ONLINE) | 2340 | if (cgdev->state == CCWGROUP_ONLINE) |
2339 | qeth_l3_set_offline(cgdev); | 2341 | qeth_l3_set_offline(cgdev); |
2340 | 2342 | ||
2343 | cancel_work_sync(&card->close_dev_work); | ||
2341 | if (qeth_netdev_is_registered(card->dev)) | 2344 | if (qeth_netdev_is_registered(card->dev)) |
2342 | unregister_netdev(card->dev); | 2345 | unregister_netdev(card->dev); |
2343 | qeth_l3_clear_ip_htable(card, 0); | 2346 | qeth_l3_clear_ip_htable(card, 0); |
diff --git a/include/linux/filter.h b/include/linux/filter.h index ad106d845b22..e532fcc6e4b5 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
@@ -591,8 +591,8 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb) | |||
591 | return qdisc_skb_cb(skb)->data; | 591 | return qdisc_skb_cb(skb)->data; |
592 | } | 592 | } |
593 | 593 | ||
594 | static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, | 594 | static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, |
595 | struct sk_buff *skb) | 595 | struct sk_buff *skb) |
596 | { | 596 | { |
597 | u8 *cb_data = bpf_skb_cb(skb); | 597 | u8 *cb_data = bpf_skb_cb(skb); |
598 | u8 cb_saved[BPF_SKB_CB_LEN]; | 598 | u8 cb_saved[BPF_SKB_CB_LEN]; |
@@ -611,15 +611,30 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, | |||
611 | return res; | 611 | return res; |
612 | } | 612 | } |
613 | 613 | ||
614 | static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, | ||
615 | struct sk_buff *skb) | ||
616 | { | ||
617 | u32 res; | ||
618 | |||
619 | preempt_disable(); | ||
620 | res = __bpf_prog_run_save_cb(prog, skb); | ||
621 | preempt_enable(); | ||
622 | return res; | ||
623 | } | ||
624 | |||
614 | static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, | 625 | static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, |
615 | struct sk_buff *skb) | 626 | struct sk_buff *skb) |
616 | { | 627 | { |
617 | u8 *cb_data = bpf_skb_cb(skb); | 628 | u8 *cb_data = bpf_skb_cb(skb); |
629 | u32 res; | ||
618 | 630 | ||
619 | if (unlikely(prog->cb_access)) | 631 | if (unlikely(prog->cb_access)) |
620 | memset(cb_data, 0, BPF_SKB_CB_LEN); | 632 | memset(cb_data, 0, BPF_SKB_CB_LEN); |
621 | 633 | ||
622 | return BPF_PROG_RUN(prog, skb); | 634 | preempt_disable(); |
635 | res = BPF_PROG_RUN(prog, skb); | ||
636 | preempt_enable(); | ||
637 | return res; | ||
623 | } | 638 | } |
624 | 639 | ||
625 | static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, | 640 | static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 1377d085ef99..86dbb3e29139 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -1483,6 +1483,7 @@ struct net_device_ops { | |||
1483 | * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook | 1483 | * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook |
1484 | * @IFF_FAILOVER: device is a failover master device | 1484 | * @IFF_FAILOVER: device is a failover master device |
1485 | * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device | 1485 | * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device |
1486 | * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device | ||
1486 | */ | 1487 | */ |
1487 | enum netdev_priv_flags { | 1488 | enum netdev_priv_flags { |
1488 | IFF_802_1Q_VLAN = 1<<0, | 1489 | IFF_802_1Q_VLAN = 1<<0, |
@@ -1514,6 +1515,7 @@ enum netdev_priv_flags { | |||
1514 | IFF_NO_RX_HANDLER = 1<<26, | 1515 | IFF_NO_RX_HANDLER = 1<<26, |
1515 | IFF_FAILOVER = 1<<27, | 1516 | IFF_FAILOVER = 1<<27, |
1516 | IFF_FAILOVER_SLAVE = 1<<28, | 1517 | IFF_FAILOVER_SLAVE = 1<<28, |
1518 | IFF_L3MDEV_RX_HANDLER = 1<<29, | ||
1517 | }; | 1519 | }; |
1518 | 1520 | ||
1519 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN | 1521 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN |
@@ -1544,6 +1546,7 @@ enum netdev_priv_flags { | |||
1544 | #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER | 1546 | #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER |
1545 | #define IFF_FAILOVER IFF_FAILOVER | 1547 | #define IFF_FAILOVER IFF_FAILOVER |
1546 | #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE | 1548 | #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE |
1549 | #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER | ||
1547 | 1550 | ||
1548 | /** | 1551 | /** |
1549 | * struct net_device - The DEVICE structure. | 1552 | * struct net_device - The DEVICE structure. |
@@ -4549,6 +4552,11 @@ static inline bool netif_supports_nofcs(struct net_device *dev) | |||
4549 | return dev->priv_flags & IFF_SUPP_NOFCS; | 4552 | return dev->priv_flags & IFF_SUPP_NOFCS; |
4550 | } | 4553 | } |
4551 | 4554 | ||
4555 | static inline bool netif_has_l3_rx_handler(const struct net_device *dev) | ||
4556 | { | ||
4557 | return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; | ||
4558 | } | ||
4559 | |||
4552 | static inline bool netif_is_l3_master(const struct net_device *dev) | 4560 | static inline bool netif_is_l3_master(const struct net_device *dev) |
4553 | { | 4561 | { |
4554 | return dev->priv_flags & IFF_L3MDEV_MASTER; | 4562 | return dev->priv_flags & IFF_L3MDEV_MASTER; |
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 7ddfc65586b0..4335bd771ce5 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h | |||
@@ -184,6 +184,7 @@ struct plat_stmmacenet_data { | |||
184 | struct clk *pclk; | 184 | struct clk *pclk; |
185 | struct clk *clk_ptp_ref; | 185 | struct clk *clk_ptp_ref; |
186 | unsigned int clk_ptp_rate; | 186 | unsigned int clk_ptp_rate; |
187 | unsigned int clk_ref_rate; | ||
187 | struct reset_control *stmmac_rst; | 188 | struct reset_control *stmmac_rst; |
188 | struct stmmac_axi *axi; | 189 | struct stmmac_axi *axi; |
189 | int has_gmac4; | 190 | int has_gmac4; |
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h index 78fa0ac4613c..5175fd63cd82 100644 --- a/include/net/l3mdev.h +++ b/include/net/l3mdev.h | |||
@@ -153,7 +153,8 @@ struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto) | |||
153 | 153 | ||
154 | if (netif_is_l3_slave(skb->dev)) | 154 | if (netif_is_l3_slave(skb->dev)) |
155 | master = netdev_master_upper_dev_get_rcu(skb->dev); | 155 | master = netdev_master_upper_dev_get_rcu(skb->dev); |
156 | else if (netif_is_l3_master(skb->dev)) | 156 | else if (netif_is_l3_master(skb->dev) || |
157 | netif_has_l3_rx_handler(skb->dev)) | ||
157 | master = skb->dev; | 158 | master = skb->dev; |
158 | 159 | ||
159 | if (master && master->l3mdev_ops->l3mdev_l3_rcv) | 160 | if (master && master->l3mdev_ops->l3mdev_l3_rcv) |
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 841835a387e1..b4984bbbe157 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h | |||
@@ -469,9 +469,7 @@ struct nft_set_binding { | |||
469 | int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, | 469 | int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, |
470 | struct nft_set_binding *binding); | 470 | struct nft_set_binding *binding); |
471 | void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, | 471 | void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, |
472 | struct nft_set_binding *binding); | 472 | struct nft_set_binding *binding, bool commit); |
473 | void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set, | ||
474 | struct nft_set_binding *binding); | ||
475 | void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set); | 473 | void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set); |
476 | 474 | ||
477 | /** | 475 | /** |
@@ -721,6 +719,13 @@ struct nft_expr_type { | |||
721 | #define NFT_EXPR_STATEFUL 0x1 | 719 | #define NFT_EXPR_STATEFUL 0x1 |
722 | #define NFT_EXPR_GC 0x2 | 720 | #define NFT_EXPR_GC 0x2 |
723 | 721 | ||
722 | enum nft_trans_phase { | ||
723 | NFT_TRANS_PREPARE, | ||
724 | NFT_TRANS_ABORT, | ||
725 | NFT_TRANS_COMMIT, | ||
726 | NFT_TRANS_RELEASE | ||
727 | }; | ||
728 | |||
724 | /** | 729 | /** |
725 | * struct nft_expr_ops - nf_tables expression operations | 730 | * struct nft_expr_ops - nf_tables expression operations |
726 | * | 731 | * |
@@ -750,7 +755,8 @@ struct nft_expr_ops { | |||
750 | void (*activate)(const struct nft_ctx *ctx, | 755 | void (*activate)(const struct nft_ctx *ctx, |
751 | const struct nft_expr *expr); | 756 | const struct nft_expr *expr); |
752 | void (*deactivate)(const struct nft_ctx *ctx, | 757 | void (*deactivate)(const struct nft_ctx *ctx, |
753 | const struct nft_expr *expr); | 758 | const struct nft_expr *expr, |
759 | enum nft_trans_phase phase); | ||
754 | void (*destroy)(const struct nft_ctx *ctx, | 760 | void (*destroy)(const struct nft_ctx *ctx, |
755 | const struct nft_expr *expr); | 761 | const struct nft_expr *expr); |
756 | void (*destroy_clone)(const struct nft_ctx *ctx, | 762 | void (*destroy_clone)(const struct nft_ctx *ctx, |
@@ -1323,12 +1329,15 @@ struct nft_trans_rule { | |||
1323 | struct nft_trans_set { | 1329 | struct nft_trans_set { |
1324 | struct nft_set *set; | 1330 | struct nft_set *set; |
1325 | u32 set_id; | 1331 | u32 set_id; |
1332 | bool bound; | ||
1326 | }; | 1333 | }; |
1327 | 1334 | ||
1328 | #define nft_trans_set(trans) \ | 1335 | #define nft_trans_set(trans) \ |
1329 | (((struct nft_trans_set *)trans->data)->set) | 1336 | (((struct nft_trans_set *)trans->data)->set) |
1330 | #define nft_trans_set_id(trans) \ | 1337 | #define nft_trans_set_id(trans) \ |
1331 | (((struct nft_trans_set *)trans->data)->set_id) | 1338 | (((struct nft_trans_set *)trans->data)->set_id) |
1339 | #define nft_trans_set_bound(trans) \ | ||
1340 | (((struct nft_trans_set *)trans->data)->bound) | ||
1332 | 1341 | ||
1333 | struct nft_trans_chain { | 1342 | struct nft_trans_chain { |
1334 | bool update; | 1343 | bool update; |
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index befe570be5ba..c57bd10340ed 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c | |||
@@ -1459,7 +1459,8 @@ static int btf_modifier_resolve(struct btf_verifier_env *env, | |||
1459 | 1459 | ||
1460 | /* "typedef void new_void", "const void"...etc */ | 1460 | /* "typedef void new_void", "const void"...etc */ |
1461 | if (!btf_type_is_void(next_type) && | 1461 | if (!btf_type_is_void(next_type) && |
1462 | !btf_type_is_fwd(next_type)) { | 1462 | !btf_type_is_fwd(next_type) && |
1463 | !btf_type_is_func_proto(next_type)) { | ||
1463 | btf_verifier_log_type(env, v->t, "Invalid type_id"); | 1464 | btf_verifier_log_type(env, v->t, "Invalid type_id"); |
1464 | return -EINVAL; | 1465 | return -EINVAL; |
1465 | } | 1466 | } |
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index ab612fe9862f..d17d05570a3f 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c | |||
@@ -572,7 +572,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, | |||
572 | bpf_compute_and_save_data_end(skb, &saved_data_end); | 572 | bpf_compute_and_save_data_end(skb, &saved_data_end); |
573 | 573 | ||
574 | ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, | 574 | ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, |
575 | bpf_prog_run_save_cb); | 575 | __bpf_prog_run_save_cb); |
576 | bpf_restore_data_end(skb, saved_data_end); | 576 | bpf_restore_data_end(skb, saved_data_end); |
577 | __skb_pull(skb, offset); | 577 | __skb_pull(skb, offset); |
578 | skb->sk = save_sk; | 578 | skb->sk = save_sk; |
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 4b7c76765d9d..f9274114c88d 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c | |||
@@ -686,7 +686,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) | |||
686 | } | 686 | } |
687 | 687 | ||
688 | if (htab_is_prealloc(htab)) { | 688 | if (htab_is_prealloc(htab)) { |
689 | pcpu_freelist_push(&htab->freelist, &l->fnode); | 689 | __pcpu_freelist_push(&htab->freelist, &l->fnode); |
690 | } else { | 690 | } else { |
691 | atomic_dec(&htab->count); | 691 | atomic_dec(&htab->count); |
692 | l->htab = htab; | 692 | l->htab = htab; |
@@ -748,7 +748,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, | |||
748 | } else { | 748 | } else { |
749 | struct pcpu_freelist_node *l; | 749 | struct pcpu_freelist_node *l; |
750 | 750 | ||
751 | l = pcpu_freelist_pop(&htab->freelist); | 751 | l = __pcpu_freelist_pop(&htab->freelist); |
752 | if (!l) | 752 | if (!l) |
753 | return ERR_PTR(-E2BIG); | 753 | return ERR_PTR(-E2BIG); |
754 | l_new = container_of(l, struct htab_elem, fnode); | 754 | l_new = container_of(l, struct htab_elem, fnode); |
diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c index 673fa6fe2d73..0c1b4ba9e90e 100644 --- a/kernel/bpf/percpu_freelist.c +++ b/kernel/bpf/percpu_freelist.c | |||
@@ -28,8 +28,8 @@ void pcpu_freelist_destroy(struct pcpu_freelist *s) | |||
28 | free_percpu(s->freelist); | 28 | free_percpu(s->freelist); |
29 | } | 29 | } |
30 | 30 | ||
31 | static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head, | 31 | static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head, |
32 | struct pcpu_freelist_node *node) | 32 | struct pcpu_freelist_node *node) |
33 | { | 33 | { |
34 | raw_spin_lock(&head->lock); | 34 | raw_spin_lock(&head->lock); |
35 | node->next = head->first; | 35 | node->next = head->first; |
@@ -37,12 +37,22 @@ static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head, | |||
37 | raw_spin_unlock(&head->lock); | 37 | raw_spin_unlock(&head->lock); |
38 | } | 38 | } |
39 | 39 | ||
40 | void pcpu_freelist_push(struct pcpu_freelist *s, | 40 | void __pcpu_freelist_push(struct pcpu_freelist *s, |
41 | struct pcpu_freelist_node *node) | 41 | struct pcpu_freelist_node *node) |
42 | { | 42 | { |
43 | struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist); | 43 | struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist); |
44 | 44 | ||
45 | __pcpu_freelist_push(head, node); | 45 | ___pcpu_freelist_push(head, node); |
46 | } | ||
47 | |||
48 | void pcpu_freelist_push(struct pcpu_freelist *s, | ||
49 | struct pcpu_freelist_node *node) | ||
50 | { | ||
51 | unsigned long flags; | ||
52 | |||
53 | local_irq_save(flags); | ||
54 | __pcpu_freelist_push(s, node); | ||
55 | local_irq_restore(flags); | ||
46 | } | 56 | } |
47 | 57 | ||
48 | void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, | 58 | void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, |
@@ -63,7 +73,7 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, | |||
63 | for_each_possible_cpu(cpu) { | 73 | for_each_possible_cpu(cpu) { |
64 | again: | 74 | again: |
65 | head = per_cpu_ptr(s->freelist, cpu); | 75 | head = per_cpu_ptr(s->freelist, cpu); |
66 | __pcpu_freelist_push(head, buf); | 76 | ___pcpu_freelist_push(head, buf); |
67 | i++; | 77 | i++; |
68 | buf += elem_size; | 78 | buf += elem_size; |
69 | if (i == nr_elems) | 79 | if (i == nr_elems) |
@@ -74,14 +84,12 @@ again: | |||
74 | local_irq_restore(flags); | 84 | local_irq_restore(flags); |
75 | } | 85 | } |
76 | 86 | ||
77 | struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) | 87 | struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s) |
78 | { | 88 | { |
79 | struct pcpu_freelist_head *head; | 89 | struct pcpu_freelist_head *head; |
80 | struct pcpu_freelist_node *node; | 90 | struct pcpu_freelist_node *node; |
81 | unsigned long flags; | ||
82 | int orig_cpu, cpu; | 91 | int orig_cpu, cpu; |
83 | 92 | ||
84 | local_irq_save(flags); | ||
85 | orig_cpu = cpu = raw_smp_processor_id(); | 93 | orig_cpu = cpu = raw_smp_processor_id(); |
86 | while (1) { | 94 | while (1) { |
87 | head = per_cpu_ptr(s->freelist, cpu); | 95 | head = per_cpu_ptr(s->freelist, cpu); |
@@ -89,16 +97,25 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) | |||
89 | node = head->first; | 97 | node = head->first; |
90 | if (node) { | 98 | if (node) { |
91 | head->first = node->next; | 99 | head->first = node->next; |
92 | raw_spin_unlock_irqrestore(&head->lock, flags); | 100 | raw_spin_unlock(&head->lock); |
93 | return node; | 101 | return node; |
94 | } | 102 | } |
95 | raw_spin_unlock(&head->lock); | 103 | raw_spin_unlock(&head->lock); |
96 | cpu = cpumask_next(cpu, cpu_possible_mask); | 104 | cpu = cpumask_next(cpu, cpu_possible_mask); |
97 | if (cpu >= nr_cpu_ids) | 105 | if (cpu >= nr_cpu_ids) |
98 | cpu = 0; | 106 | cpu = 0; |
99 | if (cpu == orig_cpu) { | 107 | if (cpu == orig_cpu) |
100 | local_irq_restore(flags); | ||
101 | return NULL; | 108 | return NULL; |
102 | } | ||
103 | } | 109 | } |
104 | } | 110 | } |
111 | |||
112 | struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) | ||
113 | { | ||
114 | struct pcpu_freelist_node *ret; | ||
115 | unsigned long flags; | ||
116 | |||
117 | local_irq_save(flags); | ||
118 | ret = __pcpu_freelist_pop(s); | ||
119 | local_irq_restore(flags); | ||
120 | return ret; | ||
121 | } | ||
diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h index 3049aae8ea1e..c3960118e617 100644 --- a/kernel/bpf/percpu_freelist.h +++ b/kernel/bpf/percpu_freelist.h | |||
@@ -22,8 +22,12 @@ struct pcpu_freelist_node { | |||
22 | struct pcpu_freelist_node *next; | 22 | struct pcpu_freelist_node *next; |
23 | }; | 23 | }; |
24 | 24 | ||
25 | /* pcpu_freelist_* do spin_lock_irqsave. */ | ||
25 | void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); | 26 | void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); |
26 | struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *); | 27 | struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *); |
28 | /* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */ | ||
29 | void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); | ||
30 | struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *); | ||
27 | void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, | 31 | void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, |
28 | u32 nr_elems); | 32 | u32 nr_elems); |
29 | int pcpu_freelist_init(struct pcpu_freelist *); | 33 | int pcpu_freelist_init(struct pcpu_freelist *); |
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index b155cd17c1bd..8577bb7f8be6 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -713,8 +713,13 @@ static int map_lookup_elem(union bpf_attr *attr) | |||
713 | 713 | ||
714 | if (bpf_map_is_dev_bound(map)) { | 714 | if (bpf_map_is_dev_bound(map)) { |
715 | err = bpf_map_offload_lookup_elem(map, key, value); | 715 | err = bpf_map_offload_lookup_elem(map, key, value); |
716 | } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || | 716 | goto done; |
717 | map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { | 717 | } |
718 | |||
719 | preempt_disable(); | ||
720 | this_cpu_inc(bpf_prog_active); | ||
721 | if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || | ||
722 | map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { | ||
718 | err = bpf_percpu_hash_copy(map, key, value); | 723 | err = bpf_percpu_hash_copy(map, key, value); |
719 | } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { | 724 | } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { |
720 | err = bpf_percpu_array_copy(map, key, value); | 725 | err = bpf_percpu_array_copy(map, key, value); |
@@ -744,7 +749,10 @@ static int map_lookup_elem(union bpf_attr *attr) | |||
744 | } | 749 | } |
745 | rcu_read_unlock(); | 750 | rcu_read_unlock(); |
746 | } | 751 | } |
752 | this_cpu_dec(bpf_prog_active); | ||
753 | preempt_enable(); | ||
747 | 754 | ||
755 | done: | ||
748 | if (err) | 756 | if (err) |
749 | goto free_value; | 757 | goto free_value; |
750 | 758 | ||
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 8b068adb9da1..f1a86a0d881d 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c | |||
@@ -1204,22 +1204,12 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog * | |||
1204 | 1204 | ||
1205 | int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | 1205 | int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) |
1206 | { | 1206 | { |
1207 | int err; | 1207 | return __bpf_probe_register(btp, prog); |
1208 | |||
1209 | mutex_lock(&bpf_event_mutex); | ||
1210 | err = __bpf_probe_register(btp, prog); | ||
1211 | mutex_unlock(&bpf_event_mutex); | ||
1212 | return err; | ||
1213 | } | 1208 | } |
1214 | 1209 | ||
1215 | int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | 1210 | int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) |
1216 | { | 1211 | { |
1217 | int err; | 1212 | return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); |
1218 | |||
1219 | mutex_lock(&bpf_event_mutex); | ||
1220 | err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); | ||
1221 | mutex_unlock(&bpf_event_mutex); | ||
1222 | return err; | ||
1223 | } | 1213 | } |
1224 | 1214 | ||
1225 | int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, | 1215 | int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, |
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 6a8ac7626797..e52f8cafe227 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c | |||
@@ -541,38 +541,45 @@ static unsigned int __init print_ht(struct rhltable *rhlt) | |||
541 | static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects, | 541 | static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects, |
542 | int cnt, bool slow) | 542 | int cnt, bool slow) |
543 | { | 543 | { |
544 | struct rhltable rhlt; | 544 | struct rhltable *rhlt; |
545 | unsigned int i, ret; | 545 | unsigned int i, ret; |
546 | const char *key; | 546 | const char *key; |
547 | int err = 0; | 547 | int err = 0; |
548 | 548 | ||
549 | err = rhltable_init(&rhlt, &test_rht_params_dup); | 549 | rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL); |
550 | if (WARN_ON(err)) | 550 | if (WARN_ON(!rhlt)) |
551 | return -EINVAL; | ||
552 | |||
553 | err = rhltable_init(rhlt, &test_rht_params_dup); | ||
554 | if (WARN_ON(err)) { | ||
555 | kfree(rhlt); | ||
551 | return err; | 556 | return err; |
557 | } | ||
552 | 558 | ||
553 | for (i = 0; i < cnt; i++) { | 559 | for (i = 0; i < cnt; i++) { |
554 | rhl_test_objects[i].value.tid = i; | 560 | rhl_test_objects[i].value.tid = i; |
555 | key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead); | 561 | key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead); |
556 | key += test_rht_params_dup.key_offset; | 562 | key += test_rht_params_dup.key_offset; |
557 | 563 | ||
558 | if (slow) { | 564 | if (slow) { |
559 | err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key, | 565 | err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key, |
560 | &rhl_test_objects[i].list_node.rhead)); | 566 | &rhl_test_objects[i].list_node.rhead)); |
561 | if (err == -EAGAIN) | 567 | if (err == -EAGAIN) |
562 | err = 0; | 568 | err = 0; |
563 | } else | 569 | } else |
564 | err = rhltable_insert(&rhlt, | 570 | err = rhltable_insert(rhlt, |
565 | &rhl_test_objects[i].list_node, | 571 | &rhl_test_objects[i].list_node, |
566 | test_rht_params_dup); | 572 | test_rht_params_dup); |
567 | if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast")) | 573 | if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast")) |
568 | goto skip_print; | 574 | goto skip_print; |
569 | } | 575 | } |
570 | 576 | ||
571 | ret = print_ht(&rhlt); | 577 | ret = print_ht(rhlt); |
572 | WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast"); | 578 | WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast"); |
573 | 579 | ||
574 | skip_print: | 580 | skip_print: |
575 | rhltable_destroy(&rhlt); | 581 | rhltable_destroy(rhlt); |
582 | kfree(rhlt); | ||
576 | 583 | ||
577 | return 0; | 584 | return 0; |
578 | } | 585 | } |
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index e8090f099eb8..ef0dec20c7d8 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c | |||
@@ -104,6 +104,9 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh) | |||
104 | 104 | ||
105 | ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo); | 105 | ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo); |
106 | 106 | ||
107 | /* free the TID stats immediately */ | ||
108 | cfg80211_sinfo_release_content(&sinfo); | ||
109 | |||
107 | dev_put(real_netdev); | 110 | dev_put(real_netdev); |
108 | if (ret == -ENOENT) { | 111 | if (ret == -ENOENT) { |
109 | /* Node is not associated anymore! It would be | 112 | /* Node is not associated anymore! It would be |
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 508f4416dfc9..415d494cbe22 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include "main.h" | 20 | #include "main.h" |
21 | 21 | ||
22 | #include <linux/atomic.h> | 22 | #include <linux/atomic.h> |
23 | #include <linux/bug.h> | ||
24 | #include <linux/byteorder/generic.h> | 23 | #include <linux/byteorder/generic.h> |
25 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
26 | #include <linux/gfp.h> | 25 | #include <linux/gfp.h> |
@@ -179,8 +178,10 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev) | |||
179 | parent_dev = __dev_get_by_index((struct net *)parent_net, | 178 | parent_dev = __dev_get_by_index((struct net *)parent_net, |
180 | dev_get_iflink(net_dev)); | 179 | dev_get_iflink(net_dev)); |
181 | /* if we got a NULL parent_dev there is something broken.. */ | 180 | /* if we got a NULL parent_dev there is something broken.. */ |
182 | if (WARN(!parent_dev, "Cannot find parent device")) | 181 | if (!parent_dev) { |
182 | pr_err("Cannot find parent device\n"); | ||
183 | return false; | 183 | return false; |
184 | } | ||
184 | 185 | ||
185 | if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net)) | 186 | if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net)) |
186 | return false; | 187 | return false; |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 5db5a0a4c959..b85ca809e509 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -221,6 +221,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb, | |||
221 | 221 | ||
222 | netif_trans_update(soft_iface); | 222 | netif_trans_update(soft_iface); |
223 | vid = batadv_get_vid(skb, 0); | 223 | vid = batadv_get_vid(skb, 0); |
224 | |||
225 | skb_reset_mac_header(skb); | ||
224 | ethhdr = eth_hdr(skb); | 226 | ethhdr = eth_hdr(skb); |
225 | 227 | ||
226 | switch (ntohs(ethhdr->h_proto)) { | 228 | switch (ntohs(ethhdr->h_proto)) { |
diff --git a/net/core/filter.c b/net/core/filter.c index 7559d6835ecb..7a54dc11ac2d 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -4112,10 +4112,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, | |||
4112 | /* Only some socketops are supported */ | 4112 | /* Only some socketops are supported */ |
4113 | switch (optname) { | 4113 | switch (optname) { |
4114 | case SO_RCVBUF: | 4114 | case SO_RCVBUF: |
4115 | val = min_t(u32, val, sysctl_rmem_max); | ||
4115 | sk->sk_userlocks |= SOCK_RCVBUF_LOCK; | 4116 | sk->sk_userlocks |= SOCK_RCVBUF_LOCK; |
4116 | sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); | 4117 | sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); |
4117 | break; | 4118 | break; |
4118 | case SO_SNDBUF: | 4119 | case SO_SNDBUF: |
4120 | val = min_t(u32, val, sysctl_wmem_max); | ||
4119 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; | 4121 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; |
4120 | sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); | 4122 | sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); |
4121 | break; | 4123 | break; |
diff --git a/net/core/skmsg.c b/net/core/skmsg.c index d6d5c20d7044..8c826603bf36 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c | |||
@@ -545,8 +545,7 @@ static void sk_psock_destroy_deferred(struct work_struct *gc) | |||
545 | struct sk_psock *psock = container_of(gc, struct sk_psock, gc); | 545 | struct sk_psock *psock = container_of(gc, struct sk_psock, gc); |
546 | 546 | ||
547 | /* No sk_callback_lock since already detached. */ | 547 | /* No sk_callback_lock since already detached. */ |
548 | if (psock->parser.enabled) | 548 | strp_done(&psock->parser.strp); |
549 | strp_done(&psock->parser.strp); | ||
550 | 549 | ||
551 | cancel_work_sync(&psock->work); | 550 | cancel_work_sync(&psock->work); |
552 | 551 | ||
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index 6eb837a47b5c..baaaeb2b2c42 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h | |||
@@ -202,7 +202,7 @@ static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk, | |||
202 | static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk, | 202 | static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk, |
203 | u8 pkt, u8 opt, u8 *val, u8 len) | 203 | u8 pkt, u8 opt, u8 *val, u8 len) |
204 | { | 204 | { |
205 | if (ccid->ccid_ops->ccid_hc_tx_parse_options == NULL) | 205 | if (!ccid || !ccid->ccid_ops->ccid_hc_tx_parse_options) |
206 | return 0; | 206 | return 0; |
207 | return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len); | 207 | return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len); |
208 | } | 208 | } |
@@ -214,7 +214,7 @@ static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk, | |||
214 | static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk, | 214 | static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk, |
215 | u8 pkt, u8 opt, u8 *val, u8 len) | 215 | u8 pkt, u8 opt, u8 *val, u8 len) |
216 | { | 216 | { |
217 | if (ccid->ccid_ops->ccid_hc_rx_parse_options == NULL) | 217 | if (!ccid || !ccid->ccid_ops->ccid_hc_rx_parse_options) |
218 | return 0; | 218 | return 0; |
219 | return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len); | 219 | return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len); |
220 | } | 220 | } |
diff --git a/net/dsa/master.c b/net/dsa/master.c index 71bb15f491c8..54f5551fb799 100644 --- a/net/dsa/master.c +++ b/net/dsa/master.c | |||
@@ -205,6 +205,8 @@ static void dsa_master_reset_mtu(struct net_device *dev) | |||
205 | rtnl_unlock(); | 205 | rtnl_unlock(); |
206 | } | 206 | } |
207 | 207 | ||
208 | static struct lock_class_key dsa_master_addr_list_lock_key; | ||
209 | |||
208 | int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) | 210 | int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) |
209 | { | 211 | { |
210 | int ret; | 212 | int ret; |
@@ -218,6 +220,8 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) | |||
218 | wmb(); | 220 | wmb(); |
219 | 221 | ||
220 | dev->dsa_ptr = cpu_dp; | 222 | dev->dsa_ptr = cpu_dp; |
223 | lockdep_set_class(&dev->addr_list_lock, | ||
224 | &dsa_master_addr_list_lock_key); | ||
221 | 225 | ||
222 | ret = dsa_master_ethtool_setup(dev); | 226 | ret = dsa_master_ethtool_setup(dev); |
223 | if (ret) | 227 | if (ret) |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index a3fcc1d01615..a1c9fe155057 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -140,11 +140,14 @@ static int dsa_slave_close(struct net_device *dev) | |||
140 | static void dsa_slave_change_rx_flags(struct net_device *dev, int change) | 140 | static void dsa_slave_change_rx_flags(struct net_device *dev, int change) |
141 | { | 141 | { |
142 | struct net_device *master = dsa_slave_to_master(dev); | 142 | struct net_device *master = dsa_slave_to_master(dev); |
143 | 143 | if (dev->flags & IFF_UP) { | |
144 | if (change & IFF_ALLMULTI) | 144 | if (change & IFF_ALLMULTI) |
145 | dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1); | 145 | dev_set_allmulti(master, |
146 | if (change & IFF_PROMISC) | 146 | dev->flags & IFF_ALLMULTI ? 1 : -1); |
147 | dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1); | 147 | if (change & IFF_PROMISC) |
148 | dev_set_promiscuity(master, | ||
149 | dev->flags & IFF_PROMISC ? 1 : -1); | ||
150 | } | ||
148 | } | 151 | } |
149 | 152 | ||
150 | static void dsa_slave_set_rx_mode(struct net_device *dev) | 153 | static void dsa_slave_set_rx_mode(struct net_device *dev) |
@@ -639,7 +642,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e) | |||
639 | int ret; | 642 | int ret; |
640 | 643 | ||
641 | /* Port's PHY and MAC both need to be EEE capable */ | 644 | /* Port's PHY and MAC both need to be EEE capable */ |
642 | if (!dev->phydev && !dp->pl) | 645 | if (!dev->phydev || !dp->pl) |
643 | return -ENODEV; | 646 | return -ENODEV; |
644 | 647 | ||
645 | if (!ds->ops->set_mac_eee) | 648 | if (!ds->ops->set_mac_eee) |
@@ -659,7 +662,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e) | |||
659 | int ret; | 662 | int ret; |
660 | 663 | ||
661 | /* Port's PHY and MAC both need to be EEE capable */ | 664 | /* Port's PHY and MAC both need to be EEE capable */ |
662 | if (!dev->phydev && !dp->pl) | 665 | if (!dev->phydev || !dp->pl) |
663 | return -ENODEV; | 666 | return -ENODEV; |
664 | 667 | ||
665 | if (!ds->ops->get_mac_eee) | 668 | if (!ds->ops->get_mac_eee) |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 20a64fe6254b..3978f807fa8b 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -1455,12 +1455,17 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) | |||
1455 | { | 1455 | { |
1456 | struct ip_tunnel *t = netdev_priv(dev); | 1456 | struct ip_tunnel *t = netdev_priv(dev); |
1457 | struct ip_tunnel_parm *p = &t->parms; | 1457 | struct ip_tunnel_parm *p = &t->parms; |
1458 | __be16 o_flags = p->o_flags; | ||
1459 | |||
1460 | if ((t->erspan_ver == 1 || t->erspan_ver == 2) && | ||
1461 | !t->collect_md) | ||
1462 | o_flags |= TUNNEL_KEY; | ||
1458 | 1463 | ||
1459 | if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || | 1464 | if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || |
1460 | nla_put_be16(skb, IFLA_GRE_IFLAGS, | 1465 | nla_put_be16(skb, IFLA_GRE_IFLAGS, |
1461 | gre_tnl_flags_to_gre_flags(p->i_flags)) || | 1466 | gre_tnl_flags_to_gre_flags(p->i_flags)) || |
1462 | nla_put_be16(skb, IFLA_GRE_OFLAGS, | 1467 | nla_put_be16(skb, IFLA_GRE_OFLAGS, |
1463 | gre_tnl_flags_to_gre_flags(p->o_flags)) || | 1468 | gre_tnl_flags_to_gre_flags(o_flags)) || |
1464 | nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || | 1469 | nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || |
1465 | nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || | 1470 | nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || |
1466 | nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) || | 1471 | nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) || |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 4416368dbd49..801a9a0c217e 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -2098,12 +2098,17 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) | |||
2098 | { | 2098 | { |
2099 | struct ip6_tnl *t = netdev_priv(dev); | 2099 | struct ip6_tnl *t = netdev_priv(dev); |
2100 | struct __ip6_tnl_parm *p = &t->parms; | 2100 | struct __ip6_tnl_parm *p = &t->parms; |
2101 | __be16 o_flags = p->o_flags; | ||
2102 | |||
2103 | if ((p->erspan_ver == 1 || p->erspan_ver == 2) && | ||
2104 | !p->collect_md) | ||
2105 | o_flags |= TUNNEL_KEY; | ||
2101 | 2106 | ||
2102 | if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || | 2107 | if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || |
2103 | nla_put_be16(skb, IFLA_GRE_IFLAGS, | 2108 | nla_put_be16(skb, IFLA_GRE_IFLAGS, |
2104 | gre_tnl_flags_to_gre_flags(p->i_flags)) || | 2109 | gre_tnl_flags_to_gre_flags(p->i_flags)) || |
2105 | nla_put_be16(skb, IFLA_GRE_OFLAGS, | 2110 | nla_put_be16(skb, IFLA_GRE_OFLAGS, |
2106 | gre_tnl_flags_to_gre_flags(p->o_flags)) || | 2111 | gre_tnl_flags_to_gre_flags(o_flags)) || |
2107 | nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || | 2112 | nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || |
2108 | nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || | 2113 | nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || |
2109 | nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) || | 2114 | nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) || |
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 8b075f0bc351..6d0b1f3e927b 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c | |||
@@ -23,9 +23,11 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb) | |||
23 | struct sock *sk = sk_to_full_sk(skb->sk); | 23 | struct sock *sk = sk_to_full_sk(skb->sk); |
24 | unsigned int hh_len; | 24 | unsigned int hh_len; |
25 | struct dst_entry *dst; | 25 | struct dst_entry *dst; |
26 | int strict = (ipv6_addr_type(&iph->daddr) & | ||
27 | (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)); | ||
26 | struct flowi6 fl6 = { | 28 | struct flowi6 fl6 = { |
27 | .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if : | 29 | .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if : |
28 | rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0, | 30 | strict ? skb_dst(skb)->dev->ifindex : 0, |
29 | .flowi6_mark = skb->mark, | 31 | .flowi6_mark = skb->mark, |
30 | .flowi6_uid = sock_net_uid(net, sk), | 32 | .flowi6_uid = sock_net_uid(net, sk), |
31 | .daddr = iph->daddr, | 33 | .daddr = iph->daddr, |
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 8181ee7e1e27..ee5403cbe655 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c | |||
@@ -146,6 +146,8 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) | |||
146 | } else { | 146 | } else { |
147 | ip6_flow_hdr(hdr, 0, flowlabel); | 147 | ip6_flow_hdr(hdr, 0, flowlabel); |
148 | hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb)); | 148 | hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb)); |
149 | |||
150 | memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); | ||
149 | } | 151 | } |
150 | 152 | ||
151 | hdr->nexthdr = NEXTHDR_ROUTING; | 153 | hdr->nexthdr = NEXTHDR_ROUTING; |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 1e03305c0549..e8a1dabef803 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -546,7 +546,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info) | |||
546 | } | 546 | } |
547 | 547 | ||
548 | err = 0; | 548 | err = 0; |
549 | if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len)) | 549 | if (__in6_dev_get(skb->dev) && |
550 | !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len)) | ||
550 | goto out; | 551 | goto out; |
551 | 552 | ||
552 | if (t->parms.iph.daddr == 0) | 553 | if (t->parms.iph.daddr == 0) |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 26f1d435696a..fed6becc5daf 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -83,8 +83,7 @@ | |||
83 | #define L2TP_SLFLAG_S 0x40000000 | 83 | #define L2TP_SLFLAG_S 0x40000000 |
84 | #define L2TP_SL_SEQ_MASK 0x00ffffff | 84 | #define L2TP_SL_SEQ_MASK 0x00ffffff |
85 | 85 | ||
86 | #define L2TP_HDR_SIZE_SEQ 10 | 86 | #define L2TP_HDR_SIZE_MAX 14 |
87 | #define L2TP_HDR_SIZE_NOSEQ 6 | ||
88 | 87 | ||
89 | /* Default trace flags */ | 88 | /* Default trace flags */ |
90 | #define L2TP_DEFAULT_DEBUG_FLAGS 0 | 89 | #define L2TP_DEFAULT_DEBUG_FLAGS 0 |
@@ -808,7 +807,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) | |||
808 | __skb_pull(skb, sizeof(struct udphdr)); | 807 | __skb_pull(skb, sizeof(struct udphdr)); |
809 | 808 | ||
810 | /* Short packet? */ | 809 | /* Short packet? */ |
811 | if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) { | 810 | if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) { |
812 | l2tp_info(tunnel, L2TP_MSG_DATA, | 811 | l2tp_info(tunnel, L2TP_MSG_DATA, |
813 | "%s: recv short packet (len=%d)\n", | 812 | "%s: recv short packet (len=%d)\n", |
814 | tunnel->name, skb->len); | 813 | tunnel->name, skb->len); |
@@ -884,6 +883,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) | |||
884 | goto error; | 883 | goto error; |
885 | } | 884 | } |
886 | 885 | ||
886 | if (tunnel->version == L2TP_HDR_VER_3 && | ||
887 | l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) | ||
888 | goto error; | ||
889 | |||
887 | l2tp_recv_common(session, skb, ptr, optr, hdrflags, length); | 890 | l2tp_recv_common(session, skb, ptr, optr, hdrflags, length); |
888 | l2tp_session_dec_refcount(session); | 891 | l2tp_session_dec_refcount(session); |
889 | 892 | ||
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 9c9afe94d389..b2ce90260c35 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
@@ -301,6 +301,26 @@ static inline bool l2tp_tunnel_uses_xfrm(const struct l2tp_tunnel *tunnel) | |||
301 | } | 301 | } |
302 | #endif | 302 | #endif |
303 | 303 | ||
304 | static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb, | ||
305 | unsigned char **ptr, unsigned char **optr) | ||
306 | { | ||
307 | int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session); | ||
308 | |||
309 | if (opt_len > 0) { | ||
310 | int off = *ptr - *optr; | ||
311 | |||
312 | if (!pskb_may_pull(skb, off + opt_len)) | ||
313 | return -1; | ||
314 | |||
315 | if (skb->data != *optr) { | ||
316 | *optr = skb->data; | ||
317 | *ptr = skb->data + off; | ||
318 | } | ||
319 | } | ||
320 | |||
321 | return 0; | ||
322 | } | ||
323 | |||
304 | #define l2tp_printk(ptr, type, func, fmt, ...) \ | 324 | #define l2tp_printk(ptr, type, func, fmt, ...) \ |
305 | do { \ | 325 | do { \ |
306 | if (((ptr)->debug) & (type)) \ | 326 | if (((ptr)->debug) & (type)) \ |
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 35f6f86d4dcc..d4c60523c549 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
@@ -165,6 +165,9 @@ static int l2tp_ip_recv(struct sk_buff *skb) | |||
165 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); | 165 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); |
166 | } | 166 | } |
167 | 167 | ||
168 | if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) | ||
169 | goto discard_sess; | ||
170 | |||
168 | l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); | 171 | l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); |
169 | l2tp_session_dec_refcount(session); | 172 | l2tp_session_dec_refcount(session); |
170 | 173 | ||
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 237f1a4a0b0c..0ae6899edac0 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
@@ -178,6 +178,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb) | |||
178 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); | 178 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); |
179 | } | 179 | } |
180 | 180 | ||
181 | if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) | ||
182 | goto discard_sess; | ||
183 | |||
181 | l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); | 184 | l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); |
182 | l2tp_session_dec_refcount(session); | 185 | l2tp_session_dec_refcount(session); |
183 | 186 | ||
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index f170d6c6629a..928f13a208b0 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1938,9 +1938,16 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, | |||
1938 | int head_need, bool may_encrypt) | 1938 | int head_need, bool may_encrypt) |
1939 | { | 1939 | { |
1940 | struct ieee80211_local *local = sdata->local; | 1940 | struct ieee80211_local *local = sdata->local; |
1941 | struct ieee80211_hdr *hdr; | ||
1942 | bool enc_tailroom; | ||
1941 | int tail_need = 0; | 1943 | int tail_need = 0; |
1942 | 1944 | ||
1943 | if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) { | 1945 | hdr = (struct ieee80211_hdr *) skb->data; |
1946 | enc_tailroom = may_encrypt && | ||
1947 | (sdata->crypto_tx_tailroom_needed_cnt || | ||
1948 | ieee80211_is_mgmt(hdr->frame_control)); | ||
1949 | |||
1950 | if (enc_tailroom) { | ||
1944 | tail_need = IEEE80211_ENCRYPT_TAILROOM; | 1951 | tail_need = IEEE80211_ENCRYPT_TAILROOM; |
1945 | tail_need -= skb_tailroom(skb); | 1952 | tail_need -= skb_tailroom(skb); |
1946 | tail_need = max_t(int, tail_need, 0); | 1953 | tail_need = max_t(int, tail_need, 0); |
@@ -1948,8 +1955,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, | |||
1948 | 1955 | ||
1949 | if (skb_cloned(skb) && | 1956 | if (skb_cloned(skb) && |
1950 | (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) || | 1957 | (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) || |
1951 | !skb_clone_writable(skb, ETH_HLEN) || | 1958 | !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom)) |
1952 | (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt))) | ||
1953 | I802_DEBUG_INC(local->tx_expand_skb_head_cloned); | 1959 | I802_DEBUG_INC(local->tx_expand_skb_head_cloned); |
1954 | else if (head_need || tail_need) | 1960 | else if (head_need || tail_need) |
1955 | I802_DEBUG_INC(local->tx_expand_skb_head); | 1961 | I802_DEBUG_INC(local->tx_expand_skb_head); |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 741b533148ba..db4d46332e86 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -1007,6 +1007,22 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, | |||
1007 | } | 1007 | } |
1008 | 1008 | ||
1009 | if (nf_ct_key_equal(h, tuple, zone, net)) { | 1009 | if (nf_ct_key_equal(h, tuple, zone, net)) { |
1010 | /* Tuple is taken already, so caller will need to find | ||
1011 | * a new source port to use. | ||
1012 | * | ||
1013 | * Only exception: | ||
1014 | * If the *original tuples* are identical, then both | ||
1015 | * conntracks refer to the same flow. | ||
1016 | * This is a rare situation, it can occur e.g. when | ||
1017 | * more than one UDP packet is sent from same socket | ||
1018 | * in different threads. | ||
1019 | * | ||
1020 | * Let nf_ct_resolve_clash() deal with this later. | ||
1021 | */ | ||
1022 | if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | ||
1023 | &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)) | ||
1024 | continue; | ||
1025 | |||
1010 | NF_CT_STAT_INC_ATOMIC(net, found); | 1026 | NF_CT_STAT_INC_ATOMIC(net, found); |
1011 | rcu_read_unlock(); | 1027 | rcu_read_unlock(); |
1012 | return 1; | 1028 | return 1; |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index fb07f6cfc719..5a92f23f179f 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -116,6 +116,23 @@ static void nft_trans_destroy(struct nft_trans *trans) | |||
116 | kfree(trans); | 116 | kfree(trans); |
117 | } | 117 | } |
118 | 118 | ||
119 | static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set) | ||
120 | { | ||
121 | struct net *net = ctx->net; | ||
122 | struct nft_trans *trans; | ||
123 | |||
124 | if (!nft_set_is_anonymous(set)) | ||
125 | return; | ||
126 | |||
127 | list_for_each_entry_reverse(trans, &net->nft.commit_list, list) { | ||
128 | if (trans->msg_type == NFT_MSG_NEWSET && | ||
129 | nft_trans_set(trans) == set) { | ||
130 | nft_trans_set_bound(trans) = true; | ||
131 | break; | ||
132 | } | ||
133 | } | ||
134 | } | ||
135 | |||
119 | static int nf_tables_register_hook(struct net *net, | 136 | static int nf_tables_register_hook(struct net *net, |
120 | const struct nft_table *table, | 137 | const struct nft_table *table, |
121 | struct nft_chain *chain) | 138 | struct nft_chain *chain) |
@@ -211,18 +228,6 @@ static int nft_delchain(struct nft_ctx *ctx) | |||
211 | return err; | 228 | return err; |
212 | } | 229 | } |
213 | 230 | ||
214 | /* either expr ops provide both activate/deactivate, or neither */ | ||
215 | static bool nft_expr_check_ops(const struct nft_expr_ops *ops) | ||
216 | { | ||
217 | if (!ops) | ||
218 | return true; | ||
219 | |||
220 | if (WARN_ON_ONCE((!ops->activate ^ !ops->deactivate))) | ||
221 | return false; | ||
222 | |||
223 | return true; | ||
224 | } | ||
225 | |||
226 | static void nft_rule_expr_activate(const struct nft_ctx *ctx, | 231 | static void nft_rule_expr_activate(const struct nft_ctx *ctx, |
227 | struct nft_rule *rule) | 232 | struct nft_rule *rule) |
228 | { | 233 | { |
@@ -238,14 +243,15 @@ static void nft_rule_expr_activate(const struct nft_ctx *ctx, | |||
238 | } | 243 | } |
239 | 244 | ||
240 | static void nft_rule_expr_deactivate(const struct nft_ctx *ctx, | 245 | static void nft_rule_expr_deactivate(const struct nft_ctx *ctx, |
241 | struct nft_rule *rule) | 246 | struct nft_rule *rule, |
247 | enum nft_trans_phase phase) | ||
242 | { | 248 | { |
243 | struct nft_expr *expr; | 249 | struct nft_expr *expr; |
244 | 250 | ||
245 | expr = nft_expr_first(rule); | 251 | expr = nft_expr_first(rule); |
246 | while (expr != nft_expr_last(rule) && expr->ops) { | 252 | while (expr != nft_expr_last(rule) && expr->ops) { |
247 | if (expr->ops->deactivate) | 253 | if (expr->ops->deactivate) |
248 | expr->ops->deactivate(ctx, expr); | 254 | expr->ops->deactivate(ctx, expr, phase); |
249 | 255 | ||
250 | expr = nft_expr_next(expr); | 256 | expr = nft_expr_next(expr); |
251 | } | 257 | } |
@@ -296,7 +302,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule) | |||
296 | nft_trans_destroy(trans); | 302 | nft_trans_destroy(trans); |
297 | return err; | 303 | return err; |
298 | } | 304 | } |
299 | nft_rule_expr_deactivate(ctx, rule); | 305 | nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_PREPARE); |
300 | 306 | ||
301 | return 0; | 307 | return 0; |
302 | } | 308 | } |
@@ -1929,9 +1935,6 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk, | |||
1929 | */ | 1935 | */ |
1930 | int nft_register_expr(struct nft_expr_type *type) | 1936 | int nft_register_expr(struct nft_expr_type *type) |
1931 | { | 1937 | { |
1932 | if (!nft_expr_check_ops(type->ops)) | ||
1933 | return -EINVAL; | ||
1934 | |||
1935 | nfnl_lock(NFNL_SUBSYS_NFTABLES); | 1938 | nfnl_lock(NFNL_SUBSYS_NFTABLES); |
1936 | if (type->family == NFPROTO_UNSPEC) | 1939 | if (type->family == NFPROTO_UNSPEC) |
1937 | list_add_tail_rcu(&type->list, &nf_tables_expressions); | 1940 | list_add_tail_rcu(&type->list, &nf_tables_expressions); |
@@ -2079,10 +2082,6 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx, | |||
2079 | err = PTR_ERR(ops); | 2082 | err = PTR_ERR(ops); |
2080 | goto err1; | 2083 | goto err1; |
2081 | } | 2084 | } |
2082 | if (!nft_expr_check_ops(ops)) { | ||
2083 | err = -EINVAL; | ||
2084 | goto err1; | ||
2085 | } | ||
2086 | } else | 2085 | } else |
2087 | ops = type->ops; | 2086 | ops = type->ops; |
2088 | 2087 | ||
@@ -2511,7 +2510,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, | |||
2511 | static void nf_tables_rule_release(const struct nft_ctx *ctx, | 2510 | static void nf_tables_rule_release(const struct nft_ctx *ctx, |
2512 | struct nft_rule *rule) | 2511 | struct nft_rule *rule) |
2513 | { | 2512 | { |
2514 | nft_rule_expr_deactivate(ctx, rule); | 2513 | nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE); |
2515 | nf_tables_rule_destroy(ctx, rule); | 2514 | nf_tables_rule_destroy(ctx, rule); |
2516 | } | 2515 | } |
2517 | 2516 | ||
@@ -3708,39 +3707,30 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, | |||
3708 | bind: | 3707 | bind: |
3709 | binding->chain = ctx->chain; | 3708 | binding->chain = ctx->chain; |
3710 | list_add_tail_rcu(&binding->list, &set->bindings); | 3709 | list_add_tail_rcu(&binding->list, &set->bindings); |
3710 | nft_set_trans_bind(ctx, set); | ||
3711 | |||
3711 | return 0; | 3712 | return 0; |
3712 | } | 3713 | } |
3713 | EXPORT_SYMBOL_GPL(nf_tables_bind_set); | 3714 | EXPORT_SYMBOL_GPL(nf_tables_bind_set); |
3714 | 3715 | ||
3715 | void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set, | ||
3716 | struct nft_set_binding *binding) | ||
3717 | { | ||
3718 | if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && | ||
3719 | nft_is_active(ctx->net, set)) | ||
3720 | list_add_tail_rcu(&set->list, &ctx->table->sets); | ||
3721 | |||
3722 | list_add_tail_rcu(&binding->list, &set->bindings); | ||
3723 | } | ||
3724 | EXPORT_SYMBOL_GPL(nf_tables_rebind_set); | ||
3725 | |||
3726 | void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, | 3716 | void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, |
3727 | struct nft_set_binding *binding) | 3717 | struct nft_set_binding *binding, bool event) |
3728 | { | 3718 | { |
3729 | list_del_rcu(&binding->list); | 3719 | list_del_rcu(&binding->list); |
3730 | 3720 | ||
3731 | if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && | 3721 | if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) { |
3732 | nft_is_active(ctx->net, set)) | ||
3733 | list_del_rcu(&set->list); | 3722 | list_del_rcu(&set->list); |
3723 | if (event) | ||
3724 | nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, | ||
3725 | GFP_KERNEL); | ||
3726 | } | ||
3734 | } | 3727 | } |
3735 | EXPORT_SYMBOL_GPL(nf_tables_unbind_set); | 3728 | EXPORT_SYMBOL_GPL(nf_tables_unbind_set); |
3736 | 3729 | ||
3737 | void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set) | 3730 | void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set) |
3738 | { | 3731 | { |
3739 | if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && | 3732 | if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) |
3740 | nft_is_active(ctx->net, set)) { | ||
3741 | nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC); | ||
3742 | nft_set_destroy(set); | 3733 | nft_set_destroy(set); |
3743 | } | ||
3744 | } | 3734 | } |
3745 | EXPORT_SYMBOL_GPL(nf_tables_destroy_set); | 3735 | EXPORT_SYMBOL_GPL(nf_tables_destroy_set); |
3746 | 3736 | ||
@@ -6535,6 +6525,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) | |||
6535 | nf_tables_rule_notify(&trans->ctx, | 6525 | nf_tables_rule_notify(&trans->ctx, |
6536 | nft_trans_rule(trans), | 6526 | nft_trans_rule(trans), |
6537 | NFT_MSG_DELRULE); | 6527 | NFT_MSG_DELRULE); |
6528 | nft_rule_expr_deactivate(&trans->ctx, | ||
6529 | nft_trans_rule(trans), | ||
6530 | NFT_TRANS_COMMIT); | ||
6538 | break; | 6531 | break; |
6539 | case NFT_MSG_NEWSET: | 6532 | case NFT_MSG_NEWSET: |
6540 | nft_clear(net, nft_trans_set(trans)); | 6533 | nft_clear(net, nft_trans_set(trans)); |
@@ -6621,7 +6614,8 @@ static void nf_tables_abort_release(struct nft_trans *trans) | |||
6621 | nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); | 6614 | nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); |
6622 | break; | 6615 | break; |
6623 | case NFT_MSG_NEWSET: | 6616 | case NFT_MSG_NEWSET: |
6624 | nft_set_destroy(nft_trans_set(trans)); | 6617 | if (!nft_trans_set_bound(trans)) |
6618 | nft_set_destroy(nft_trans_set(trans)); | ||
6625 | break; | 6619 | break; |
6626 | case NFT_MSG_NEWSETELEM: | 6620 | case NFT_MSG_NEWSETELEM: |
6627 | nft_set_elem_destroy(nft_trans_elem_set(trans), | 6621 | nft_set_elem_destroy(nft_trans_elem_set(trans), |
@@ -6682,7 +6676,9 @@ static int __nf_tables_abort(struct net *net) | |||
6682 | case NFT_MSG_NEWRULE: | 6676 | case NFT_MSG_NEWRULE: |
6683 | trans->ctx.chain->use--; | 6677 | trans->ctx.chain->use--; |
6684 | list_del_rcu(&nft_trans_rule(trans)->list); | 6678 | list_del_rcu(&nft_trans_rule(trans)->list); |
6685 | nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans)); | 6679 | nft_rule_expr_deactivate(&trans->ctx, |
6680 | nft_trans_rule(trans), | ||
6681 | NFT_TRANS_ABORT); | ||
6686 | break; | 6682 | break; |
6687 | case NFT_MSG_DELRULE: | 6683 | case NFT_MSG_DELRULE: |
6688 | trans->ctx.chain->use++; | 6684 | trans->ctx.chain->use++; |
@@ -6692,7 +6688,8 @@ static int __nf_tables_abort(struct net *net) | |||
6692 | break; | 6688 | break; |
6693 | case NFT_MSG_NEWSET: | 6689 | case NFT_MSG_NEWSET: |
6694 | trans->ctx.table->use--; | 6690 | trans->ctx.table->use--; |
6695 | list_del_rcu(&nft_trans_set(trans)->list); | 6691 | if (!nft_trans_set_bound(trans)) |
6692 | list_del_rcu(&nft_trans_set(trans)->list); | ||
6696 | break; | 6693 | break; |
6697 | case NFT_MSG_DELSET: | 6694 | case NFT_MSG_DELSET: |
6698 | trans->ctx.table->use++; | 6695 | trans->ctx.table->use++; |
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 5eb269428832..fe64df848365 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
@@ -61,6 +61,21 @@ static struct nft_compat_net *nft_compat_pernet(struct net *net) | |||
61 | return net_generic(net, nft_compat_net_id); | 61 | return net_generic(net, nft_compat_net_id); |
62 | } | 62 | } |
63 | 63 | ||
64 | static void nft_xt_get(struct nft_xt *xt) | ||
65 | { | ||
66 | /* refcount_inc() warns on 0 -> 1 transition, but we can't | ||
67 | * init the reference count to 1 in .select_ops -- we can't | ||
68 | * undo such an increase when another expression inside the same | ||
69 | * rule fails afterwards. | ||
70 | */ | ||
71 | if (xt->listcnt == 0) | ||
72 | refcount_set(&xt->refcnt, 1); | ||
73 | else | ||
74 | refcount_inc(&xt->refcnt); | ||
75 | |||
76 | xt->listcnt++; | ||
77 | } | ||
78 | |||
64 | static bool nft_xt_put(struct nft_xt *xt) | 79 | static bool nft_xt_put(struct nft_xt *xt) |
65 | { | 80 | { |
66 | if (refcount_dec_and_test(&xt->refcnt)) { | 81 | if (refcount_dec_and_test(&xt->refcnt)) { |
@@ -291,7 +306,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
291 | return -EINVAL; | 306 | return -EINVAL; |
292 | 307 | ||
293 | nft_xt = container_of(expr->ops, struct nft_xt, ops); | 308 | nft_xt = container_of(expr->ops, struct nft_xt, ops); |
294 | refcount_inc(&nft_xt->refcnt); | 309 | nft_xt_get(nft_xt); |
295 | return 0; | 310 | return 0; |
296 | } | 311 | } |
297 | 312 | ||
@@ -504,7 +519,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
504 | return ret; | 519 | return ret; |
505 | 520 | ||
506 | nft_xt = container_of(expr->ops, struct nft_xt, ops); | 521 | nft_xt = container_of(expr->ops, struct nft_xt, ops); |
507 | refcount_inc(&nft_xt->refcnt); | 522 | nft_xt_get(nft_xt); |
508 | return 0; | 523 | return 0; |
509 | } | 524 | } |
510 | 525 | ||
@@ -558,41 +573,16 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) | |||
558 | __nft_match_destroy(ctx, expr, nft_expr_priv(expr)); | 573 | __nft_match_destroy(ctx, expr, nft_expr_priv(expr)); |
559 | } | 574 | } |
560 | 575 | ||
561 | static void nft_compat_activate(const struct nft_ctx *ctx, | ||
562 | const struct nft_expr *expr, | ||
563 | struct list_head *h) | ||
564 | { | ||
565 | struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops); | ||
566 | |||
567 | if (xt->listcnt == 0) | ||
568 | list_add(&xt->head, h); | ||
569 | |||
570 | xt->listcnt++; | ||
571 | } | ||
572 | |||
573 | static void nft_compat_activate_mt(const struct nft_ctx *ctx, | ||
574 | const struct nft_expr *expr) | ||
575 | { | ||
576 | struct nft_compat_net *cn = nft_compat_pernet(ctx->net); | ||
577 | |||
578 | nft_compat_activate(ctx, expr, &cn->nft_match_list); | ||
579 | } | ||
580 | |||
581 | static void nft_compat_activate_tg(const struct nft_ctx *ctx, | ||
582 | const struct nft_expr *expr) | ||
583 | { | ||
584 | struct nft_compat_net *cn = nft_compat_pernet(ctx->net); | ||
585 | |||
586 | nft_compat_activate(ctx, expr, &cn->nft_target_list); | ||
587 | } | ||
588 | |||
589 | static void nft_compat_deactivate(const struct nft_ctx *ctx, | 576 | static void nft_compat_deactivate(const struct nft_ctx *ctx, |
590 | const struct nft_expr *expr) | 577 | const struct nft_expr *expr, |
578 | enum nft_trans_phase phase) | ||
591 | { | 579 | { |
592 | struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops); | 580 | struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops); |
593 | 581 | ||
594 | if (--xt->listcnt == 0) | 582 | if (phase == NFT_TRANS_ABORT || phase == NFT_TRANS_COMMIT) { |
595 | list_del_init(&xt->head); | 583 | if (--xt->listcnt == 0) |
584 | list_del_init(&xt->head); | ||
585 | } | ||
596 | } | 586 | } |
597 | 587 | ||
598 | static void | 588 | static void |
@@ -848,7 +838,6 @@ nft_match_select_ops(const struct nft_ctx *ctx, | |||
848 | nft_match->ops.eval = nft_match_eval; | 838 | nft_match->ops.eval = nft_match_eval; |
849 | nft_match->ops.init = nft_match_init; | 839 | nft_match->ops.init = nft_match_init; |
850 | nft_match->ops.destroy = nft_match_destroy; | 840 | nft_match->ops.destroy = nft_match_destroy; |
851 | nft_match->ops.activate = nft_compat_activate_mt; | ||
852 | nft_match->ops.deactivate = nft_compat_deactivate; | 841 | nft_match->ops.deactivate = nft_compat_deactivate; |
853 | nft_match->ops.dump = nft_match_dump; | 842 | nft_match->ops.dump = nft_match_dump; |
854 | nft_match->ops.validate = nft_match_validate; | 843 | nft_match->ops.validate = nft_match_validate; |
@@ -866,7 +855,7 @@ nft_match_select_ops(const struct nft_ctx *ctx, | |||
866 | 855 | ||
867 | nft_match->ops.size = matchsize; | 856 | nft_match->ops.size = matchsize; |
868 | 857 | ||
869 | nft_match->listcnt = 1; | 858 | nft_match->listcnt = 0; |
870 | list_add(&nft_match->head, &cn->nft_match_list); | 859 | list_add(&nft_match->head, &cn->nft_match_list); |
871 | 860 | ||
872 | return &nft_match->ops; | 861 | return &nft_match->ops; |
@@ -953,7 +942,6 @@ nft_target_select_ops(const struct nft_ctx *ctx, | |||
953 | nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); | 942 | nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); |
954 | nft_target->ops.init = nft_target_init; | 943 | nft_target->ops.init = nft_target_init; |
955 | nft_target->ops.destroy = nft_target_destroy; | 944 | nft_target->ops.destroy = nft_target_destroy; |
956 | nft_target->ops.activate = nft_compat_activate_tg; | ||
957 | nft_target->ops.deactivate = nft_compat_deactivate; | 945 | nft_target->ops.deactivate = nft_compat_deactivate; |
958 | nft_target->ops.dump = nft_target_dump; | 946 | nft_target->ops.dump = nft_target_dump; |
959 | nft_target->ops.validate = nft_target_validate; | 947 | nft_target->ops.validate = nft_target_validate; |
@@ -964,7 +952,7 @@ nft_target_select_ops(const struct nft_ctx *ctx, | |||
964 | else | 952 | else |
965 | nft_target->ops.eval = nft_target_eval_xt; | 953 | nft_target->ops.eval = nft_target_eval_xt; |
966 | 954 | ||
967 | nft_target->listcnt = 1; | 955 | nft_target->listcnt = 0; |
968 | list_add(&nft_target->head, &cn->nft_target_list); | 956 | list_add(&nft_target->head, &cn->nft_target_list); |
969 | 957 | ||
970 | return &nft_target->ops; | 958 | return &nft_target->ops; |
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 07d4efd3d851..f1172f99752b 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c | |||
@@ -235,20 +235,17 @@ err1: | |||
235 | return err; | 235 | return err; |
236 | } | 236 | } |
237 | 237 | ||
238 | static void nft_dynset_activate(const struct nft_ctx *ctx, | ||
239 | const struct nft_expr *expr) | ||
240 | { | ||
241 | struct nft_dynset *priv = nft_expr_priv(expr); | ||
242 | |||
243 | nf_tables_rebind_set(ctx, priv->set, &priv->binding); | ||
244 | } | ||
245 | |||
246 | static void nft_dynset_deactivate(const struct nft_ctx *ctx, | 238 | static void nft_dynset_deactivate(const struct nft_ctx *ctx, |
247 | const struct nft_expr *expr) | 239 | const struct nft_expr *expr, |
240 | enum nft_trans_phase phase) | ||
248 | { | 241 | { |
249 | struct nft_dynset *priv = nft_expr_priv(expr); | 242 | struct nft_dynset *priv = nft_expr_priv(expr); |
250 | 243 | ||
251 | nf_tables_unbind_set(ctx, priv->set, &priv->binding); | 244 | if (phase == NFT_TRANS_PREPARE) |
245 | return; | ||
246 | |||
247 | nf_tables_unbind_set(ctx, priv->set, &priv->binding, | ||
248 | phase == NFT_TRANS_COMMIT); | ||
252 | } | 249 | } |
253 | 250 | ||
254 | static void nft_dynset_destroy(const struct nft_ctx *ctx, | 251 | static void nft_dynset_destroy(const struct nft_ctx *ctx, |
@@ -296,7 +293,6 @@ static const struct nft_expr_ops nft_dynset_ops = { | |||
296 | .eval = nft_dynset_eval, | 293 | .eval = nft_dynset_eval, |
297 | .init = nft_dynset_init, | 294 | .init = nft_dynset_init, |
298 | .destroy = nft_dynset_destroy, | 295 | .destroy = nft_dynset_destroy, |
299 | .activate = nft_dynset_activate, | ||
300 | .deactivate = nft_dynset_deactivate, | 296 | .deactivate = nft_dynset_deactivate, |
301 | .dump = nft_dynset_dump, | 297 | .dump = nft_dynset_dump, |
302 | }; | 298 | }; |
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index 0777a93211e2..3f6d1d2a6281 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c | |||
@@ -72,10 +72,14 @@ static void nft_immediate_activate(const struct nft_ctx *ctx, | |||
72 | } | 72 | } |
73 | 73 | ||
74 | static void nft_immediate_deactivate(const struct nft_ctx *ctx, | 74 | static void nft_immediate_deactivate(const struct nft_ctx *ctx, |
75 | const struct nft_expr *expr) | 75 | const struct nft_expr *expr, |
76 | enum nft_trans_phase phase) | ||
76 | { | 77 | { |
77 | const struct nft_immediate_expr *priv = nft_expr_priv(expr); | 78 | const struct nft_immediate_expr *priv = nft_expr_priv(expr); |
78 | 79 | ||
80 | if (phase == NFT_TRANS_COMMIT) | ||
81 | return; | ||
82 | |||
79 | return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg)); | 83 | return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg)); |
80 | } | 84 | } |
81 | 85 | ||
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c index 227b2b15a19c..14496da5141d 100644 --- a/net/netfilter/nft_lookup.c +++ b/net/netfilter/nft_lookup.c | |||
@@ -121,20 +121,17 @@ static int nft_lookup_init(const struct nft_ctx *ctx, | |||
121 | return 0; | 121 | return 0; |
122 | } | 122 | } |
123 | 123 | ||
124 | static void nft_lookup_activate(const struct nft_ctx *ctx, | ||
125 | const struct nft_expr *expr) | ||
126 | { | ||
127 | struct nft_lookup *priv = nft_expr_priv(expr); | ||
128 | |||
129 | nf_tables_rebind_set(ctx, priv->set, &priv->binding); | ||
130 | } | ||
131 | |||
132 | static void nft_lookup_deactivate(const struct nft_ctx *ctx, | 124 | static void nft_lookup_deactivate(const struct nft_ctx *ctx, |
133 | const struct nft_expr *expr) | 125 | const struct nft_expr *expr, |
126 | enum nft_trans_phase phase) | ||
134 | { | 127 | { |
135 | struct nft_lookup *priv = nft_expr_priv(expr); | 128 | struct nft_lookup *priv = nft_expr_priv(expr); |
136 | 129 | ||
137 | nf_tables_unbind_set(ctx, priv->set, &priv->binding); | 130 | if (phase == NFT_TRANS_PREPARE) |
131 | return; | ||
132 | |||
133 | nf_tables_unbind_set(ctx, priv->set, &priv->binding, | ||
134 | phase == NFT_TRANS_COMMIT); | ||
138 | } | 135 | } |
139 | 136 | ||
140 | static void nft_lookup_destroy(const struct nft_ctx *ctx, | 137 | static void nft_lookup_destroy(const struct nft_ctx *ctx, |
@@ -225,7 +222,6 @@ static const struct nft_expr_ops nft_lookup_ops = { | |||
225 | .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)), | 222 | .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)), |
226 | .eval = nft_lookup_eval, | 223 | .eval = nft_lookup_eval, |
227 | .init = nft_lookup_init, | 224 | .init = nft_lookup_init, |
228 | .activate = nft_lookup_activate, | ||
229 | .deactivate = nft_lookup_deactivate, | 225 | .deactivate = nft_lookup_deactivate, |
230 | .destroy = nft_lookup_destroy, | 226 | .destroy = nft_lookup_destroy, |
231 | .dump = nft_lookup_dump, | 227 | .dump = nft_lookup_dump, |
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c index a3185ca2a3a9..ae178e914486 100644 --- a/net/netfilter/nft_objref.c +++ b/net/netfilter/nft_objref.c | |||
@@ -155,20 +155,17 @@ nla_put_failure: | |||
155 | return -1; | 155 | return -1; |
156 | } | 156 | } |
157 | 157 | ||
158 | static void nft_objref_map_activate(const struct nft_ctx *ctx, | ||
159 | const struct nft_expr *expr) | ||
160 | { | ||
161 | struct nft_objref_map *priv = nft_expr_priv(expr); | ||
162 | |||
163 | nf_tables_rebind_set(ctx, priv->set, &priv->binding); | ||
164 | } | ||
165 | |||
166 | static void nft_objref_map_deactivate(const struct nft_ctx *ctx, | 158 | static void nft_objref_map_deactivate(const struct nft_ctx *ctx, |
167 | const struct nft_expr *expr) | 159 | const struct nft_expr *expr, |
160 | enum nft_trans_phase phase) | ||
168 | { | 161 | { |
169 | struct nft_objref_map *priv = nft_expr_priv(expr); | 162 | struct nft_objref_map *priv = nft_expr_priv(expr); |
170 | 163 | ||
171 | nf_tables_unbind_set(ctx, priv->set, &priv->binding); | 164 | if (phase == NFT_TRANS_PREPARE) |
165 | return; | ||
166 | |||
167 | nf_tables_unbind_set(ctx, priv->set, &priv->binding, | ||
168 | phase == NFT_TRANS_COMMIT); | ||
172 | } | 169 | } |
173 | 170 | ||
174 | static void nft_objref_map_destroy(const struct nft_ctx *ctx, | 171 | static void nft_objref_map_destroy(const struct nft_ctx *ctx, |
@@ -185,7 +182,6 @@ static const struct nft_expr_ops nft_objref_map_ops = { | |||
185 | .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)), | 182 | .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)), |
186 | .eval = nft_objref_map_eval, | 183 | .eval = nft_objref_map_eval, |
187 | .init = nft_objref_map_init, | 184 | .init = nft_objref_map_init, |
188 | .activate = nft_objref_map_activate, | ||
189 | .deactivate = nft_objref_map_deactivate, | 185 | .deactivate = nft_objref_map_deactivate, |
190 | .destroy = nft_objref_map_destroy, | 186 | .destroy = nft_objref_map_destroy, |
191 | .dump = nft_objref_map_dump, | 187 | .dump = nft_objref_map_dump, |
diff --git a/net/rds/bind.c b/net/rds/bind.c index 762d2c6788a3..17c9d9f0c848 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c | |||
@@ -78,10 +78,10 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port, | |||
78 | __rds_create_bind_key(key, addr, port, scope_id); | 78 | __rds_create_bind_key(key, addr, port, scope_id); |
79 | rcu_read_lock(); | 79 | rcu_read_lock(); |
80 | rs = rhashtable_lookup(&bind_hash_table, key, ht_parms); | 80 | rs = rhashtable_lookup(&bind_hash_table, key, ht_parms); |
81 | if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) | 81 | if (rs && (sock_flag(rds_rs_to_sk(rs), SOCK_DEAD) || |
82 | rds_sock_addref(rs); | 82 | !refcount_inc_not_zero(&rds_rs_to_sk(rs)->sk_refcnt))) |
83 | else | ||
84 | rs = NULL; | 83 | rs = NULL; |
84 | |||
85 | rcu_read_unlock(); | 85 | rcu_read_unlock(); |
86 | 86 | ||
87 | rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr, | 87 | rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr, |
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index eaf19ebaa964..3f7bb11f3290 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c | |||
@@ -596,6 +596,7 @@ error_requeue_call: | |||
596 | } | 596 | } |
597 | error_no_call: | 597 | error_no_call: |
598 | release_sock(&rx->sk); | 598 | release_sock(&rx->sk); |
599 | error_trace: | ||
599 | trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); | 600 | trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); |
600 | return ret; | 601 | return ret; |
601 | 602 | ||
@@ -604,7 +605,7 @@ wait_interrupted: | |||
604 | wait_error: | 605 | wait_error: |
605 | finish_wait(sk_sleep(&rx->sk), &wait); | 606 | finish_wait(sk_sleep(&rx->sk), &wait); |
606 | call = NULL; | 607 | call = NULL; |
607 | goto error_no_call; | 608 | goto error_trace; |
608 | } | 609 | } |
609 | 610 | ||
610 | /** | 611 | /** |
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index f6aa57fbbbaf..12ca9d13db83 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c | |||
@@ -1371,7 +1371,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
1371 | if (!tc_skip_hw(fnew->flags)) { | 1371 | if (!tc_skip_hw(fnew->flags)) { |
1372 | err = fl_hw_replace_filter(tp, fnew, extack); | 1372 | err = fl_hw_replace_filter(tp, fnew, extack); |
1373 | if (err) | 1373 | if (err) |
1374 | goto errout_mask; | 1374 | goto errout_mask_ht; |
1375 | } | 1375 | } |
1376 | 1376 | ||
1377 | if (!tc_in_hw(fnew->flags)) | 1377 | if (!tc_in_hw(fnew->flags)) |
@@ -1401,6 +1401,10 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
1401 | kfree(mask); | 1401 | kfree(mask); |
1402 | return 0; | 1402 | return 0; |
1403 | 1403 | ||
1404 | errout_mask_ht: | ||
1405 | rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node, | ||
1406 | fnew->mask->filter_ht_params); | ||
1407 | |||
1404 | errout_mask: | 1408 | errout_mask: |
1405 | fl_mask_put(head, fnew->mask, false); | 1409 | fl_mask_put(head, fnew->mask, false); |
1406 | 1410 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index f93c3cf9e567..65d6d04546ae 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -2027,7 +2027,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) | |||
2027 | struct sctp_endpoint *ep = sctp_sk(sk)->ep; | 2027 | struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
2028 | struct sctp_transport *transport = NULL; | 2028 | struct sctp_transport *transport = NULL; |
2029 | struct sctp_sndrcvinfo _sinfo, *sinfo; | 2029 | struct sctp_sndrcvinfo _sinfo, *sinfo; |
2030 | struct sctp_association *asoc; | 2030 | struct sctp_association *asoc, *tmp; |
2031 | struct sctp_cmsgs cmsgs; | 2031 | struct sctp_cmsgs cmsgs; |
2032 | union sctp_addr *daddr; | 2032 | union sctp_addr *daddr; |
2033 | bool new = false; | 2033 | bool new = false; |
@@ -2053,7 +2053,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) | |||
2053 | 2053 | ||
2054 | /* SCTP_SENDALL process */ | 2054 | /* SCTP_SENDALL process */ |
2055 | if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) { | 2055 | if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) { |
2056 | list_for_each_entry(asoc, &ep->asocs, asocs) { | 2056 | list_for_each_entry_safe(asoc, tmp, &ep->asocs, asocs) { |
2057 | err = sctp_sendmsg_check_sflags(asoc, sflags, msg, | 2057 | err = sctp_sendmsg_check_sflags(asoc, sflags, msg, |
2058 | msg_len); | 2058 | msg_len); |
2059 | if (err == 0) | 2059 | if (err == 0) |
diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 80e0ae5534ec..f24633114dfd 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c | |||
@@ -84,6 +84,19 @@ static void fa_zero(struct flex_array *fa, size_t index, size_t count) | |||
84 | } | 84 | } |
85 | } | 85 | } |
86 | 86 | ||
87 | static size_t fa_index(struct flex_array *fa, void *elem, size_t count) | ||
88 | { | ||
89 | size_t index = 0; | ||
90 | |||
91 | while (count--) { | ||
92 | if (elem == flex_array_get(fa, index)) | ||
93 | break; | ||
94 | index++; | ||
95 | } | ||
96 | |||
97 | return index; | ||
98 | } | ||
99 | |||
87 | /* Migrates chunks from stream queues to new stream queues if needed, | 100 | /* Migrates chunks from stream queues to new stream queues if needed, |
88 | * but not across associations. Also, removes those chunks to streams | 101 | * but not across associations. Also, removes those chunks to streams |
89 | * higher than the new max. | 102 | * higher than the new max. |
@@ -147,6 +160,13 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt, | |||
147 | 160 | ||
148 | if (stream->out) { | 161 | if (stream->out) { |
149 | fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt)); | 162 | fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt)); |
163 | if (stream->out_curr) { | ||
164 | size_t index = fa_index(stream->out, stream->out_curr, | ||
165 | stream->outcnt); | ||
166 | |||
167 | BUG_ON(index == stream->outcnt); | ||
168 | stream->out_curr = flex_array_get(out, index); | ||
169 | } | ||
150 | fa_free(stream->out); | 170 | fa_free(stream->out); |
151 | } | 171 | } |
152 | 172 | ||
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index c4e56602e0c6..b04a813fc865 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
@@ -1505,6 +1505,11 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, | |||
1505 | 1505 | ||
1506 | smc = smc_sk(sk); | 1506 | smc = smc_sk(sk); |
1507 | lock_sock(sk); | 1507 | lock_sock(sk); |
1508 | if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) { | ||
1509 | /* socket was connected before, no more data to read */ | ||
1510 | rc = 0; | ||
1511 | goto out; | ||
1512 | } | ||
1508 | if ((sk->sk_state == SMC_INIT) || | 1513 | if ((sk->sk_state == SMC_INIT) || |
1509 | (sk->sk_state == SMC_LISTEN) || | 1514 | (sk->sk_state == SMC_LISTEN) || |
1510 | (sk->sk_state == SMC_CLOSED)) | 1515 | (sk->sk_state == SMC_CLOSED)) |
@@ -1840,7 +1845,11 @@ static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos, | |||
1840 | 1845 | ||
1841 | smc = smc_sk(sk); | 1846 | smc = smc_sk(sk); |
1842 | lock_sock(sk); | 1847 | lock_sock(sk); |
1843 | 1848 | if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) { | |
1849 | /* socket was connected before, no more data to read */ | ||
1850 | rc = 0; | ||
1851 | goto out; | ||
1852 | } | ||
1844 | if (sk->sk_state == SMC_INIT || | 1853 | if (sk->sk_state == SMC_INIT || |
1845 | sk->sk_state == SMC_LISTEN || | 1854 | sk->sk_state == SMC_LISTEN || |
1846 | sk->sk_state == SMC_CLOSED) | 1855 | sk->sk_state == SMC_CLOSED) |
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index db83332ac1c8..a712c9f8699b 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c | |||
@@ -21,13 +21,6 @@ | |||
21 | 21 | ||
22 | /********************************** send *************************************/ | 22 | /********************************** send *************************************/ |
23 | 23 | ||
24 | struct smc_cdc_tx_pend { | ||
25 | struct smc_connection *conn; /* socket connection */ | ||
26 | union smc_host_cursor cursor; /* tx sndbuf cursor sent */ | ||
27 | union smc_host_cursor p_cursor; /* rx RMBE cursor produced */ | ||
28 | u16 ctrl_seq; /* conn. tx sequence # */ | ||
29 | }; | ||
30 | |||
31 | /* handler for send/transmission completion of a CDC msg */ | 24 | /* handler for send/transmission completion of a CDC msg */ |
32 | static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, | 25 | static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, |
33 | struct smc_link *link, | 26 | struct smc_link *link, |
@@ -61,12 +54,14 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, | |||
61 | 54 | ||
62 | int smc_cdc_get_free_slot(struct smc_connection *conn, | 55 | int smc_cdc_get_free_slot(struct smc_connection *conn, |
63 | struct smc_wr_buf **wr_buf, | 56 | struct smc_wr_buf **wr_buf, |
57 | struct smc_rdma_wr **wr_rdma_buf, | ||
64 | struct smc_cdc_tx_pend **pend) | 58 | struct smc_cdc_tx_pend **pend) |
65 | { | 59 | { |
66 | struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; | 60 | struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; |
67 | int rc; | 61 | int rc; |
68 | 62 | ||
69 | rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf, | 63 | rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf, |
64 | wr_rdma_buf, | ||
70 | (struct smc_wr_tx_pend_priv **)pend); | 65 | (struct smc_wr_tx_pend_priv **)pend); |
71 | if (!conn->alert_token_local) | 66 | if (!conn->alert_token_local) |
72 | /* abnormal termination */ | 67 | /* abnormal termination */ |
@@ -96,6 +91,7 @@ int smc_cdc_msg_send(struct smc_connection *conn, | |||
96 | struct smc_wr_buf *wr_buf, | 91 | struct smc_wr_buf *wr_buf, |
97 | struct smc_cdc_tx_pend *pend) | 92 | struct smc_cdc_tx_pend *pend) |
98 | { | 93 | { |
94 | union smc_host_cursor cfed; | ||
99 | struct smc_link *link; | 95 | struct smc_link *link; |
100 | int rc; | 96 | int rc; |
101 | 97 | ||
@@ -107,10 +103,10 @@ int smc_cdc_msg_send(struct smc_connection *conn, | |||
107 | conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; | 103 | conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; |
108 | smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, | 104 | smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, |
109 | &conn->local_tx_ctrl, conn); | 105 | &conn->local_tx_ctrl, conn); |
106 | smc_curs_copy(&cfed, &((struct smc_host_cdc_msg *)wr_buf)->cons, conn); | ||
110 | rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); | 107 | rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); |
111 | if (!rc) | 108 | if (!rc) |
112 | smc_curs_copy(&conn->rx_curs_confirmed, | 109 | smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn); |
113 | &conn->local_tx_ctrl.cons, conn); | ||
114 | 110 | ||
115 | return rc; | 111 | return rc; |
116 | } | 112 | } |
@@ -121,11 +117,14 @@ static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn) | |||
121 | struct smc_wr_buf *wr_buf; | 117 | struct smc_wr_buf *wr_buf; |
122 | int rc; | 118 | int rc; |
123 | 119 | ||
124 | rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend); | 120 | rc = smc_cdc_get_free_slot(conn, &wr_buf, NULL, &pend); |
125 | if (rc) | 121 | if (rc) |
126 | return rc; | 122 | return rc; |
127 | 123 | ||
128 | return smc_cdc_msg_send(conn, wr_buf, pend); | 124 | spin_lock_bh(&conn->send_lock); |
125 | rc = smc_cdc_msg_send(conn, wr_buf, pend); | ||
126 | spin_unlock_bh(&conn->send_lock); | ||
127 | return rc; | ||
129 | } | 128 | } |
130 | 129 | ||
131 | int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) | 130 | int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) |
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h index b5bfe38c7f9b..271e2524dc8f 100644 --- a/net/smc/smc_cdc.h +++ b/net/smc/smc_cdc.h | |||
@@ -160,7 +160,9 @@ static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt, | |||
160 | #endif | 160 | #endif |
161 | } | 161 | } |
162 | 162 | ||
163 | /* calculate cursor difference between old and new, where old <= new */ | 163 | /* calculate cursor difference between old and new, where old <= new and |
164 | * difference cannot exceed size | ||
165 | */ | ||
164 | static inline int smc_curs_diff(unsigned int size, | 166 | static inline int smc_curs_diff(unsigned int size, |
165 | union smc_host_cursor *old, | 167 | union smc_host_cursor *old, |
166 | union smc_host_cursor *new) | 168 | union smc_host_cursor *new) |
@@ -185,6 +187,28 @@ static inline int smc_curs_comp(unsigned int size, | |||
185 | return smc_curs_diff(size, old, new); | 187 | return smc_curs_diff(size, old, new); |
186 | } | 188 | } |
187 | 189 | ||
190 | /* calculate cursor difference between old and new, where old <= new and | ||
191 | * difference may exceed size | ||
192 | */ | ||
193 | static inline int smc_curs_diff_large(unsigned int size, | ||
194 | union smc_host_cursor *old, | ||
195 | union smc_host_cursor *new) | ||
196 | { | ||
197 | if (old->wrap < new->wrap) | ||
198 | return min_t(int, | ||
199 | (size - old->count) + new->count + | ||
200 | (new->wrap - old->wrap - 1) * size, | ||
201 | size); | ||
202 | |||
203 | if (old->wrap > new->wrap) /* wrap has switched from 0xffff to 0x0000 */ | ||
204 | return min_t(int, | ||
205 | (size - old->count) + new->count + | ||
206 | (new->wrap + 0xffff - old->wrap) * size, | ||
207 | size); | ||
208 | |||
209 | return max_t(int, 0, (new->count - old->count)); | ||
210 | } | ||
211 | |||
188 | static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer, | 212 | static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer, |
189 | union smc_host_cursor *local, | 213 | union smc_host_cursor *local, |
190 | struct smc_connection *conn) | 214 | struct smc_connection *conn) |
@@ -270,10 +294,16 @@ static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, | |||
270 | smcr_cdc_msg_to_host(local, peer, conn); | 294 | smcr_cdc_msg_to_host(local, peer, conn); |
271 | } | 295 | } |
272 | 296 | ||
273 | struct smc_cdc_tx_pend; | 297 | struct smc_cdc_tx_pend { |
298 | struct smc_connection *conn; /* socket connection */ | ||
299 | union smc_host_cursor cursor; /* tx sndbuf cursor sent */ | ||
300 | union smc_host_cursor p_cursor; /* rx RMBE cursor produced */ | ||
301 | u16 ctrl_seq; /* conn. tx sequence # */ | ||
302 | }; | ||
274 | 303 | ||
275 | int smc_cdc_get_free_slot(struct smc_connection *conn, | 304 | int smc_cdc_get_free_slot(struct smc_connection *conn, |
276 | struct smc_wr_buf **wr_buf, | 305 | struct smc_wr_buf **wr_buf, |
306 | struct smc_rdma_wr **wr_rdma_buf, | ||
277 | struct smc_cdc_tx_pend **pend); | 307 | struct smc_cdc_tx_pend **pend); |
278 | void smc_cdc_tx_dismiss_slots(struct smc_connection *conn); | 308 | void smc_cdc_tx_dismiss_slots(struct smc_connection *conn); |
279 | int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf, | 309 | int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf, |
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 776e9dfc915d..d53fd588d1f5 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c | |||
@@ -378,7 +378,7 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info) | |||
378 | vec.iov_len = sizeof(struct smc_clc_msg_decline); | 378 | vec.iov_len = sizeof(struct smc_clc_msg_decline); |
379 | len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, | 379 | len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, |
380 | sizeof(struct smc_clc_msg_decline)); | 380 | sizeof(struct smc_clc_msg_decline)); |
381 | if (len < sizeof(struct smc_clc_msg_decline)) | 381 | if (len < 0 || len < sizeof(struct smc_clc_msg_decline)) |
382 | len = -EPROTO; | 382 | len = -EPROTO; |
383 | return len > 0 ? 0 : len; | 383 | return len > 0 ? 0 : len; |
384 | } | 384 | } |
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index ea2b87f29469..e39cadda1bf5 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c | |||
@@ -345,14 +345,7 @@ static void smc_close_passive_work(struct work_struct *work) | |||
345 | 345 | ||
346 | switch (sk->sk_state) { | 346 | switch (sk->sk_state) { |
347 | case SMC_INIT: | 347 | case SMC_INIT: |
348 | if (atomic_read(&conn->bytes_to_rcv) || | 348 | sk->sk_state = SMC_APPCLOSEWAIT1; |
349 | (rxflags->peer_done_writing && | ||
350 | !smc_cdc_rxed_any_close(conn))) { | ||
351 | sk->sk_state = SMC_APPCLOSEWAIT1; | ||
352 | } else { | ||
353 | sk->sk_state = SMC_CLOSED; | ||
354 | sock_put(sk); /* passive closing */ | ||
355 | } | ||
356 | break; | 349 | break; |
357 | case SMC_ACTIVE: | 350 | case SMC_ACTIVE: |
358 | sk->sk_state = SMC_APPCLOSEWAIT1; | 351 | sk->sk_state = SMC_APPCLOSEWAIT1; |
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 35c1cdc93e1c..aa1c551cee81 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c | |||
@@ -128,6 +128,8 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn) | |||
128 | { | 128 | { |
129 | struct smc_link_group *lgr = conn->lgr; | 129 | struct smc_link_group *lgr = conn->lgr; |
130 | 130 | ||
131 | if (!lgr) | ||
132 | return; | ||
131 | write_lock_bh(&lgr->conns_lock); | 133 | write_lock_bh(&lgr->conns_lock); |
132 | if (conn->alert_token_local) { | 134 | if (conn->alert_token_local) { |
133 | __smc_lgr_unregister_conn(conn); | 135 | __smc_lgr_unregister_conn(conn); |
@@ -300,13 +302,13 @@ static void smc_buf_unuse(struct smc_connection *conn, | |||
300 | conn->sndbuf_desc->used = 0; | 302 | conn->sndbuf_desc->used = 0; |
301 | if (conn->rmb_desc) { | 303 | if (conn->rmb_desc) { |
302 | if (!conn->rmb_desc->regerr) { | 304 | if (!conn->rmb_desc->regerr) { |
303 | conn->rmb_desc->used = 0; | ||
304 | if (!lgr->is_smcd) { | 305 | if (!lgr->is_smcd) { |
305 | /* unregister rmb with peer */ | 306 | /* unregister rmb with peer */ |
306 | smc_llc_do_delete_rkey( | 307 | smc_llc_do_delete_rkey( |
307 | &lgr->lnk[SMC_SINGLE_LINK], | 308 | &lgr->lnk[SMC_SINGLE_LINK], |
308 | conn->rmb_desc); | 309 | conn->rmb_desc); |
309 | } | 310 | } |
311 | conn->rmb_desc->used = 0; | ||
310 | } else { | 312 | } else { |
311 | /* buf registration failed, reuse not possible */ | 313 | /* buf registration failed, reuse not possible */ |
312 | write_lock_bh(&lgr->rmbs_lock); | 314 | write_lock_bh(&lgr->rmbs_lock); |
@@ -628,6 +630,8 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, | |||
628 | local_contact = SMC_REUSE_CONTACT; | 630 | local_contact = SMC_REUSE_CONTACT; |
629 | conn->lgr = lgr; | 631 | conn->lgr = lgr; |
630 | smc_lgr_register_conn(conn); /* add smc conn to lgr */ | 632 | smc_lgr_register_conn(conn); /* add smc conn to lgr */ |
633 | if (delayed_work_pending(&lgr->free_work)) | ||
634 | cancel_delayed_work(&lgr->free_work); | ||
631 | write_unlock_bh(&lgr->conns_lock); | 635 | write_unlock_bh(&lgr->conns_lock); |
632 | break; | 636 | break; |
633 | } | 637 | } |
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index b00287989a3d..8806d2afa6ed 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h | |||
@@ -52,6 +52,24 @@ enum smc_wr_reg_state { | |||
52 | FAILED /* ib_wr_reg_mr response: failure */ | 52 | FAILED /* ib_wr_reg_mr response: failure */ |
53 | }; | 53 | }; |
54 | 54 | ||
55 | struct smc_rdma_sge { /* sges for RDMA writes */ | ||
56 | struct ib_sge wr_tx_rdma_sge[SMC_IB_MAX_SEND_SGE]; | ||
57 | }; | ||
58 | |||
59 | #define SMC_MAX_RDMA_WRITES 2 /* max. # of RDMA writes per | ||
60 | * message send | ||
61 | */ | ||
62 | |||
63 | struct smc_rdma_sges { /* sges per message send */ | ||
64 | struct smc_rdma_sge tx_rdma_sge[SMC_MAX_RDMA_WRITES]; | ||
65 | }; | ||
66 | |||
67 | struct smc_rdma_wr { /* work requests per message | ||
68 | * send | ||
69 | */ | ||
70 | struct ib_rdma_wr wr_tx_rdma[SMC_MAX_RDMA_WRITES]; | ||
71 | }; | ||
72 | |||
55 | struct smc_link { | 73 | struct smc_link { |
56 | struct smc_ib_device *smcibdev; /* ib-device */ | 74 | struct smc_ib_device *smcibdev; /* ib-device */ |
57 | u8 ibport; /* port - values 1 | 2 */ | 75 | u8 ibport; /* port - values 1 | 2 */ |
@@ -64,6 +82,8 @@ struct smc_link { | |||
64 | struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */ | 82 | struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */ |
65 | struct ib_send_wr *wr_tx_ibs; /* WR send meta data */ | 83 | struct ib_send_wr *wr_tx_ibs; /* WR send meta data */ |
66 | struct ib_sge *wr_tx_sges; /* WR send gather meta data */ | 84 | struct ib_sge *wr_tx_sges; /* WR send gather meta data */ |
85 | struct smc_rdma_sges *wr_tx_rdma_sges;/*RDMA WRITE gather meta data*/ | ||
86 | struct smc_rdma_wr *wr_tx_rdmas; /* WR RDMA WRITE */ | ||
67 | struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */ | 87 | struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */ |
68 | /* above four vectors have wr_tx_cnt elements and use the same index */ | 88 | /* above four vectors have wr_tx_cnt elements and use the same index */ |
69 | dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */ | 89 | dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */ |
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index e519ef29c0ff..76487a16934e 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c | |||
@@ -289,8 +289,8 @@ int smc_ib_create_protection_domain(struct smc_link *lnk) | |||
289 | 289 | ||
290 | static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) | 290 | static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) |
291 | { | 291 | { |
292 | struct smc_ib_device *smcibdev = | 292 | struct smc_link *lnk = (struct smc_link *)priv; |
293 | (struct smc_ib_device *)ibevent->device; | 293 | struct smc_ib_device *smcibdev = lnk->smcibdev; |
294 | u8 port_idx; | 294 | u8 port_idx; |
295 | 295 | ||
296 | switch (ibevent->event) { | 296 | switch (ibevent->event) { |
@@ -298,7 +298,7 @@ static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) | |||
298 | case IB_EVENT_GID_CHANGE: | 298 | case IB_EVENT_GID_CHANGE: |
299 | case IB_EVENT_PORT_ERR: | 299 | case IB_EVENT_PORT_ERR: |
300 | case IB_EVENT_QP_ACCESS_ERR: | 300 | case IB_EVENT_QP_ACCESS_ERR: |
301 | port_idx = ibevent->element.port_num - 1; | 301 | port_idx = ibevent->element.qp->port - 1; |
302 | set_bit(port_idx, &smcibdev->port_event_mask); | 302 | set_bit(port_idx, &smcibdev->port_event_mask); |
303 | schedule_work(&smcibdev->port_event_work); | 303 | schedule_work(&smcibdev->port_event_work); |
304 | break; | 304 | break; |
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index a6d3623d06f4..4fd60c522802 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c | |||
@@ -166,7 +166,8 @@ static int smc_llc_add_pending_send(struct smc_link *link, | |||
166 | { | 166 | { |
167 | int rc; | 167 | int rc; |
168 | 168 | ||
169 | rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, pend); | 169 | rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL, |
170 | pend); | ||
170 | if (rc < 0) | 171 | if (rc < 0) |
171 | return rc; | 172 | return rc; |
172 | BUILD_BUG_ON_MSG( | 173 | BUILD_BUG_ON_MSG( |
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 7cb3e4f07c10..632c3109dee5 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c | |||
@@ -27,7 +27,7 @@ | |||
27 | static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = { | 27 | static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = { |
28 | [SMC_PNETID_NAME] = { | 28 | [SMC_PNETID_NAME] = { |
29 | .type = NLA_NUL_STRING, | 29 | .type = NLA_NUL_STRING, |
30 | .len = SMC_MAX_PNETID_LEN - 1 | 30 | .len = SMC_MAX_PNETID_LEN |
31 | }, | 31 | }, |
32 | [SMC_PNETID_ETHNAME] = { | 32 | [SMC_PNETID_ETHNAME] = { |
33 | .type = NLA_NUL_STRING, | 33 | .type = NLA_NUL_STRING, |
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index d8366ed51757..f93f3580c100 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c | |||
@@ -165,12 +165,11 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) | |||
165 | conn->local_tx_ctrl.prod_flags.urg_data_pending = 1; | 165 | conn->local_tx_ctrl.prod_flags.urg_data_pending = 1; |
166 | 166 | ||
167 | if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) { | 167 | if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) { |
168 | if (send_done) | ||
169 | return send_done; | ||
168 | rc = smc_tx_wait(smc, msg->msg_flags); | 170 | rc = smc_tx_wait(smc, msg->msg_flags); |
169 | if (rc) { | 171 | if (rc) |
170 | if (send_done) | ||
171 | return send_done; | ||
172 | goto out_err; | 172 | goto out_err; |
173 | } | ||
174 | continue; | 173 | continue; |
175 | } | 174 | } |
176 | 175 | ||
@@ -267,27 +266,23 @@ int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len, | |||
267 | 266 | ||
268 | /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */ | 267 | /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */ |
269 | static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, | 268 | static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, |
270 | int num_sges, struct ib_sge sges[]) | 269 | int num_sges, struct ib_rdma_wr *rdma_wr) |
271 | { | 270 | { |
272 | struct smc_link_group *lgr = conn->lgr; | 271 | struct smc_link_group *lgr = conn->lgr; |
273 | struct ib_rdma_wr rdma_wr; | ||
274 | struct smc_link *link; | 272 | struct smc_link *link; |
275 | int rc; | 273 | int rc; |
276 | 274 | ||
277 | memset(&rdma_wr, 0, sizeof(rdma_wr)); | ||
278 | link = &lgr->lnk[SMC_SINGLE_LINK]; | 275 | link = &lgr->lnk[SMC_SINGLE_LINK]; |
279 | rdma_wr.wr.wr_id = smc_wr_tx_get_next_wr_id(link); | 276 | rdma_wr->wr.wr_id = smc_wr_tx_get_next_wr_id(link); |
280 | rdma_wr.wr.sg_list = sges; | 277 | rdma_wr->wr.num_sge = num_sges; |
281 | rdma_wr.wr.num_sge = num_sges; | 278 | rdma_wr->remote_addr = |
282 | rdma_wr.wr.opcode = IB_WR_RDMA_WRITE; | ||
283 | rdma_wr.remote_addr = | ||
284 | lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr + | 279 | lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr + |
285 | /* RMBE within RMB */ | 280 | /* RMBE within RMB */ |
286 | conn->tx_off + | 281 | conn->tx_off + |
287 | /* offset within RMBE */ | 282 | /* offset within RMBE */ |
288 | peer_rmbe_offset; | 283 | peer_rmbe_offset; |
289 | rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey; | 284 | rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey; |
290 | rc = ib_post_send(link->roce_qp, &rdma_wr.wr, NULL); | 285 | rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL); |
291 | if (rc) { | 286 | if (rc) { |
292 | conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; | 287 | conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; |
293 | smc_lgr_terminate(lgr); | 288 | smc_lgr_terminate(lgr); |
@@ -314,24 +309,25 @@ static inline void smc_tx_advance_cursors(struct smc_connection *conn, | |||
314 | /* SMC-R helper for smc_tx_rdma_writes() */ | 309 | /* SMC-R helper for smc_tx_rdma_writes() */ |
315 | static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, | 310 | static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, |
316 | size_t src_off, size_t src_len, | 311 | size_t src_off, size_t src_len, |
317 | size_t dst_off, size_t dst_len) | 312 | size_t dst_off, size_t dst_len, |
313 | struct smc_rdma_wr *wr_rdma_buf) | ||
318 | { | 314 | { |
319 | dma_addr_t dma_addr = | 315 | dma_addr_t dma_addr = |
320 | sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl); | 316 | sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl); |
321 | struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; | ||
322 | int src_len_sum = src_len, dst_len_sum = dst_len; | 317 | int src_len_sum = src_len, dst_len_sum = dst_len; |
323 | struct ib_sge sges[SMC_IB_MAX_SEND_SGE]; | ||
324 | int sent_count = src_off; | 318 | int sent_count = src_off; |
325 | int srcchunk, dstchunk; | 319 | int srcchunk, dstchunk; |
326 | int num_sges; | 320 | int num_sges; |
327 | int rc; | 321 | int rc; |
328 | 322 | ||
329 | for (dstchunk = 0; dstchunk < 2; dstchunk++) { | 323 | for (dstchunk = 0; dstchunk < 2; dstchunk++) { |
324 | struct ib_sge *sge = | ||
325 | wr_rdma_buf->wr_tx_rdma[dstchunk].wr.sg_list; | ||
326 | |||
330 | num_sges = 0; | 327 | num_sges = 0; |
331 | for (srcchunk = 0; srcchunk < 2; srcchunk++) { | 328 | for (srcchunk = 0; srcchunk < 2; srcchunk++) { |
332 | sges[srcchunk].addr = dma_addr + src_off; | 329 | sge[srcchunk].addr = dma_addr + src_off; |
333 | sges[srcchunk].length = src_len; | 330 | sge[srcchunk].length = src_len; |
334 | sges[srcchunk].lkey = link->roce_pd->local_dma_lkey; | ||
335 | num_sges++; | 331 | num_sges++; |
336 | 332 | ||
337 | src_off += src_len; | 333 | src_off += src_len; |
@@ -344,7 +340,8 @@ static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, | |||
344 | src_len = dst_len - src_len; /* remainder */ | 340 | src_len = dst_len - src_len; /* remainder */ |
345 | src_len_sum += src_len; | 341 | src_len_sum += src_len; |
346 | } | 342 | } |
347 | rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges); | 343 | rc = smc_tx_rdma_write(conn, dst_off, num_sges, |
344 | &wr_rdma_buf->wr_tx_rdma[dstchunk]); | ||
348 | if (rc) | 345 | if (rc) |
349 | return rc; | 346 | return rc; |
350 | if (dst_len_sum == len) | 347 | if (dst_len_sum == len) |
@@ -403,7 +400,8 @@ static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len, | |||
403 | /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit; | 400 | /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit; |
404 | * usable snd_wnd as max transmit | 401 | * usable snd_wnd as max transmit |
405 | */ | 402 | */ |
406 | static int smc_tx_rdma_writes(struct smc_connection *conn) | 403 | static int smc_tx_rdma_writes(struct smc_connection *conn, |
404 | struct smc_rdma_wr *wr_rdma_buf) | ||
407 | { | 405 | { |
408 | size_t len, src_len, dst_off, dst_len; /* current chunk values */ | 406 | size_t len, src_len, dst_off, dst_len; /* current chunk values */ |
409 | union smc_host_cursor sent, prep, prod, cons; | 407 | union smc_host_cursor sent, prep, prod, cons; |
@@ -464,7 +462,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) | |||
464 | dst_off, dst_len); | 462 | dst_off, dst_len); |
465 | else | 463 | else |
466 | rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len, | 464 | rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len, |
467 | dst_off, dst_len); | 465 | dst_off, dst_len, wr_rdma_buf); |
468 | if (rc) | 466 | if (rc) |
469 | return rc; | 467 | return rc; |
470 | 468 | ||
@@ -485,31 +483,30 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) | |||
485 | static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) | 483 | static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) |
486 | { | 484 | { |
487 | struct smc_cdc_producer_flags *pflags; | 485 | struct smc_cdc_producer_flags *pflags; |
486 | struct smc_rdma_wr *wr_rdma_buf; | ||
488 | struct smc_cdc_tx_pend *pend; | 487 | struct smc_cdc_tx_pend *pend; |
489 | struct smc_wr_buf *wr_buf; | 488 | struct smc_wr_buf *wr_buf; |
490 | int rc; | 489 | int rc; |
491 | 490 | ||
492 | spin_lock_bh(&conn->send_lock); | 491 | rc = smc_cdc_get_free_slot(conn, &wr_buf, &wr_rdma_buf, &pend); |
493 | rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend); | ||
494 | if (rc < 0) { | 492 | if (rc < 0) { |
495 | if (rc == -EBUSY) { | 493 | if (rc == -EBUSY) { |
496 | struct smc_sock *smc = | 494 | struct smc_sock *smc = |
497 | container_of(conn, struct smc_sock, conn); | 495 | container_of(conn, struct smc_sock, conn); |
498 | 496 | ||
499 | if (smc->sk.sk_err == ECONNABORTED) { | 497 | if (smc->sk.sk_err == ECONNABORTED) |
500 | rc = sock_error(&smc->sk); | 498 | return sock_error(&smc->sk); |
501 | goto out_unlock; | ||
502 | } | ||
503 | rc = 0; | 499 | rc = 0; |
504 | if (conn->alert_token_local) /* connection healthy */ | 500 | if (conn->alert_token_local) /* connection healthy */ |
505 | mod_delayed_work(system_wq, &conn->tx_work, | 501 | mod_delayed_work(system_wq, &conn->tx_work, |
506 | SMC_TX_WORK_DELAY); | 502 | SMC_TX_WORK_DELAY); |
507 | } | 503 | } |
508 | goto out_unlock; | 504 | return rc; |
509 | } | 505 | } |
510 | 506 | ||
507 | spin_lock_bh(&conn->send_lock); | ||
511 | if (!conn->local_tx_ctrl.prod_flags.urg_data_present) { | 508 | if (!conn->local_tx_ctrl.prod_flags.urg_data_present) { |
512 | rc = smc_tx_rdma_writes(conn); | 509 | rc = smc_tx_rdma_writes(conn, wr_rdma_buf); |
513 | if (rc) { | 510 | if (rc) { |
514 | smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK], | 511 | smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK], |
515 | (struct smc_wr_tx_pend_priv *)pend); | 512 | (struct smc_wr_tx_pend_priv *)pend); |
@@ -536,7 +533,7 @@ static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn) | |||
536 | 533 | ||
537 | spin_lock_bh(&conn->send_lock); | 534 | spin_lock_bh(&conn->send_lock); |
538 | if (!pflags->urg_data_present) | 535 | if (!pflags->urg_data_present) |
539 | rc = smc_tx_rdma_writes(conn); | 536 | rc = smc_tx_rdma_writes(conn, NULL); |
540 | if (!rc) | 537 | if (!rc) |
541 | rc = smcd_cdc_msg_send(conn); | 538 | rc = smcd_cdc_msg_send(conn); |
542 | 539 | ||
@@ -598,7 +595,8 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force) | |||
598 | if (to_confirm > conn->rmbe_update_limit) { | 595 | if (to_confirm > conn->rmbe_update_limit) { |
599 | smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn); | 596 | smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn); |
600 | sender_free = conn->rmb_desc->len - | 597 | sender_free = conn->rmb_desc->len - |
601 | smc_curs_diff(conn->rmb_desc->len, &prod, &cfed); | 598 | smc_curs_diff_large(conn->rmb_desc->len, |
599 | &cfed, &prod); | ||
602 | } | 600 | } |
603 | 601 | ||
604 | if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || | 602 | if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || |
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index c2694750a6a8..253aa75dc2b6 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c | |||
@@ -160,6 +160,7 @@ static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx) | |||
160 | * @link: Pointer to smc_link used to later send the message. | 160 | * @link: Pointer to smc_link used to later send the message. |
161 | * @handler: Send completion handler function pointer. | 161 | * @handler: Send completion handler function pointer. |
162 | * @wr_buf: Out value returns pointer to message buffer. | 162 | * @wr_buf: Out value returns pointer to message buffer. |
163 | * @wr_rdma_buf: Out value returns pointer to rdma work request. | ||
163 | * @wr_pend_priv: Out value returns pointer serving as handler context. | 164 | * @wr_pend_priv: Out value returns pointer serving as handler context. |
164 | * | 165 | * |
165 | * Return: 0 on success, or -errno on error. | 166 | * Return: 0 on success, or -errno on error. |
@@ -167,6 +168,7 @@ static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx) | |||
167 | int smc_wr_tx_get_free_slot(struct smc_link *link, | 168 | int smc_wr_tx_get_free_slot(struct smc_link *link, |
168 | smc_wr_tx_handler handler, | 169 | smc_wr_tx_handler handler, |
169 | struct smc_wr_buf **wr_buf, | 170 | struct smc_wr_buf **wr_buf, |
171 | struct smc_rdma_wr **wr_rdma_buf, | ||
170 | struct smc_wr_tx_pend_priv **wr_pend_priv) | 172 | struct smc_wr_tx_pend_priv **wr_pend_priv) |
171 | { | 173 | { |
172 | struct smc_wr_tx_pend *wr_pend; | 174 | struct smc_wr_tx_pend *wr_pend; |
@@ -204,6 +206,8 @@ int smc_wr_tx_get_free_slot(struct smc_link *link, | |||
204 | wr_ib = &link->wr_tx_ibs[idx]; | 206 | wr_ib = &link->wr_tx_ibs[idx]; |
205 | wr_ib->wr_id = wr_id; | 207 | wr_ib->wr_id = wr_id; |
206 | *wr_buf = &link->wr_tx_bufs[idx]; | 208 | *wr_buf = &link->wr_tx_bufs[idx]; |
209 | if (wr_rdma_buf) | ||
210 | *wr_rdma_buf = &link->wr_tx_rdmas[idx]; | ||
207 | *wr_pend_priv = &wr_pend->priv; | 211 | *wr_pend_priv = &wr_pend->priv; |
208 | return 0; | 212 | return 0; |
209 | } | 213 | } |
@@ -218,10 +222,10 @@ int smc_wr_tx_put_slot(struct smc_link *link, | |||
218 | u32 idx = pend->idx; | 222 | u32 idx = pend->idx; |
219 | 223 | ||
220 | /* clear the full struct smc_wr_tx_pend including .priv */ | 224 | /* clear the full struct smc_wr_tx_pend including .priv */ |
221 | memset(&link->wr_tx_pends[pend->idx], 0, | 225 | memset(&link->wr_tx_pends[idx], 0, |
222 | sizeof(link->wr_tx_pends[pend->idx])); | 226 | sizeof(link->wr_tx_pends[idx])); |
223 | memset(&link->wr_tx_bufs[pend->idx], 0, | 227 | memset(&link->wr_tx_bufs[idx], 0, |
224 | sizeof(link->wr_tx_bufs[pend->idx])); | 228 | sizeof(link->wr_tx_bufs[idx])); |
225 | test_and_clear_bit(idx, link->wr_tx_mask); | 229 | test_and_clear_bit(idx, link->wr_tx_mask); |
226 | return 1; | 230 | return 1; |
227 | } | 231 | } |
@@ -465,12 +469,26 @@ static void smc_wr_init_sge(struct smc_link *lnk) | |||
465 | lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE; | 469 | lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE; |
466 | lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE; | 470 | lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE; |
467 | lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey; | 471 | lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey; |
472 | lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey = | ||
473 | lnk->roce_pd->local_dma_lkey; | ||
474 | lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey = | ||
475 | lnk->roce_pd->local_dma_lkey; | ||
476 | lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey = | ||
477 | lnk->roce_pd->local_dma_lkey; | ||
478 | lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey = | ||
479 | lnk->roce_pd->local_dma_lkey; | ||
468 | lnk->wr_tx_ibs[i].next = NULL; | 480 | lnk->wr_tx_ibs[i].next = NULL; |
469 | lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i]; | 481 | lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i]; |
470 | lnk->wr_tx_ibs[i].num_sge = 1; | 482 | lnk->wr_tx_ibs[i].num_sge = 1; |
471 | lnk->wr_tx_ibs[i].opcode = IB_WR_SEND; | 483 | lnk->wr_tx_ibs[i].opcode = IB_WR_SEND; |
472 | lnk->wr_tx_ibs[i].send_flags = | 484 | lnk->wr_tx_ibs[i].send_flags = |
473 | IB_SEND_SIGNALED | IB_SEND_SOLICITED; | 485 | IB_SEND_SIGNALED | IB_SEND_SOLICITED; |
486 | lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE; | ||
487 | lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE; | ||
488 | lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list = | ||
489 | lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge; | ||
490 | lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list = | ||
491 | lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge; | ||
474 | } | 492 | } |
475 | for (i = 0; i < lnk->wr_rx_cnt; i++) { | 493 | for (i = 0; i < lnk->wr_rx_cnt; i++) { |
476 | lnk->wr_rx_sges[i].addr = | 494 | lnk->wr_rx_sges[i].addr = |
@@ -521,8 +539,12 @@ void smc_wr_free_link_mem(struct smc_link *lnk) | |||
521 | lnk->wr_tx_mask = NULL; | 539 | lnk->wr_tx_mask = NULL; |
522 | kfree(lnk->wr_tx_sges); | 540 | kfree(lnk->wr_tx_sges); |
523 | lnk->wr_tx_sges = NULL; | 541 | lnk->wr_tx_sges = NULL; |
542 | kfree(lnk->wr_tx_rdma_sges); | ||
543 | lnk->wr_tx_rdma_sges = NULL; | ||
524 | kfree(lnk->wr_rx_sges); | 544 | kfree(lnk->wr_rx_sges); |
525 | lnk->wr_rx_sges = NULL; | 545 | lnk->wr_rx_sges = NULL; |
546 | kfree(lnk->wr_tx_rdmas); | ||
547 | lnk->wr_tx_rdmas = NULL; | ||
526 | kfree(lnk->wr_rx_ibs); | 548 | kfree(lnk->wr_rx_ibs); |
527 | lnk->wr_rx_ibs = NULL; | 549 | lnk->wr_rx_ibs = NULL; |
528 | kfree(lnk->wr_tx_ibs); | 550 | kfree(lnk->wr_tx_ibs); |
@@ -552,10 +574,20 @@ int smc_wr_alloc_link_mem(struct smc_link *link) | |||
552 | GFP_KERNEL); | 574 | GFP_KERNEL); |
553 | if (!link->wr_rx_ibs) | 575 | if (!link->wr_rx_ibs) |
554 | goto no_mem_wr_tx_ibs; | 576 | goto no_mem_wr_tx_ibs; |
577 | link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT, | ||
578 | sizeof(link->wr_tx_rdmas[0]), | ||
579 | GFP_KERNEL); | ||
580 | if (!link->wr_tx_rdmas) | ||
581 | goto no_mem_wr_rx_ibs; | ||
582 | link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT, | ||
583 | sizeof(link->wr_tx_rdma_sges[0]), | ||
584 | GFP_KERNEL); | ||
585 | if (!link->wr_tx_rdma_sges) | ||
586 | goto no_mem_wr_tx_rdmas; | ||
555 | link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]), | 587 | link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]), |
556 | GFP_KERNEL); | 588 | GFP_KERNEL); |
557 | if (!link->wr_tx_sges) | 589 | if (!link->wr_tx_sges) |
558 | goto no_mem_wr_rx_ibs; | 590 | goto no_mem_wr_tx_rdma_sges; |
559 | link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3, | 591 | link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3, |
560 | sizeof(link->wr_rx_sges[0]), | 592 | sizeof(link->wr_rx_sges[0]), |
561 | GFP_KERNEL); | 593 | GFP_KERNEL); |
@@ -579,6 +611,10 @@ no_mem_wr_rx_sges: | |||
579 | kfree(link->wr_rx_sges); | 611 | kfree(link->wr_rx_sges); |
580 | no_mem_wr_tx_sges: | 612 | no_mem_wr_tx_sges: |
581 | kfree(link->wr_tx_sges); | 613 | kfree(link->wr_tx_sges); |
614 | no_mem_wr_tx_rdma_sges: | ||
615 | kfree(link->wr_tx_rdma_sges); | ||
616 | no_mem_wr_tx_rdmas: | ||
617 | kfree(link->wr_tx_rdmas); | ||
582 | no_mem_wr_rx_ibs: | 618 | no_mem_wr_rx_ibs: |
583 | kfree(link->wr_rx_ibs); | 619 | kfree(link->wr_rx_ibs); |
584 | no_mem_wr_tx_ibs: | 620 | no_mem_wr_tx_ibs: |
diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h index 1d85bb14fd6f..09bf32fd3959 100644 --- a/net/smc/smc_wr.h +++ b/net/smc/smc_wr.h | |||
@@ -85,6 +85,7 @@ void smc_wr_add_dev(struct smc_ib_device *smcibdev); | |||
85 | 85 | ||
86 | int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler, | 86 | int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler, |
87 | struct smc_wr_buf **wr_buf, | 87 | struct smc_wr_buf **wr_buf, |
88 | struct smc_rdma_wr **wrs, | ||
88 | struct smc_wr_tx_pend_priv **wr_pend_priv); | 89 | struct smc_wr_tx_pend_priv **wr_pend_priv); |
89 | int smc_wr_tx_put_slot(struct smc_link *link, | 90 | int smc_wr_tx_put_slot(struct smc_link *link, |
90 | struct smc_wr_tx_pend_priv *wr_pend_priv); | 91 | struct smc_wr_tx_pend_priv *wr_pend_priv); |
diff --git a/net/socket.c b/net/socket.c index e89884e2197b..d80d87a395ea 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -941,8 +941,7 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) | |||
941 | EXPORT_SYMBOL(dlci_ioctl_set); | 941 | EXPORT_SYMBOL(dlci_ioctl_set); |
942 | 942 | ||
943 | static long sock_do_ioctl(struct net *net, struct socket *sock, | 943 | static long sock_do_ioctl(struct net *net, struct socket *sock, |
944 | unsigned int cmd, unsigned long arg, | 944 | unsigned int cmd, unsigned long arg) |
945 | unsigned int ifreq_size) | ||
946 | { | 945 | { |
947 | int err; | 946 | int err; |
948 | void __user *argp = (void __user *)arg; | 947 | void __user *argp = (void __user *)arg; |
@@ -968,11 +967,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock, | |||
968 | } else { | 967 | } else { |
969 | struct ifreq ifr; | 968 | struct ifreq ifr; |
970 | bool need_copyout; | 969 | bool need_copyout; |
971 | if (copy_from_user(&ifr, argp, ifreq_size)) | 970 | if (copy_from_user(&ifr, argp, sizeof(struct ifreq))) |
972 | return -EFAULT; | 971 | return -EFAULT; |
973 | err = dev_ioctl(net, cmd, &ifr, &need_copyout); | 972 | err = dev_ioctl(net, cmd, &ifr, &need_copyout); |
974 | if (!err && need_copyout) | 973 | if (!err && need_copyout) |
975 | if (copy_to_user(argp, &ifr, ifreq_size)) | 974 | if (copy_to_user(argp, &ifr, sizeof(struct ifreq))) |
976 | return -EFAULT; | 975 | return -EFAULT; |
977 | } | 976 | } |
978 | return err; | 977 | return err; |
@@ -1071,8 +1070,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
1071 | err = open_related_ns(&net->ns, get_net_ns); | 1070 | err = open_related_ns(&net->ns, get_net_ns); |
1072 | break; | 1071 | break; |
1073 | default: | 1072 | default: |
1074 | err = sock_do_ioctl(net, sock, cmd, arg, | 1073 | err = sock_do_ioctl(net, sock, cmd, arg); |
1075 | sizeof(struct ifreq)); | ||
1076 | break; | 1074 | break; |
1077 | } | 1075 | } |
1078 | return err; | 1076 | return err; |
@@ -2780,8 +2778,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock, | |||
2780 | int err; | 2778 | int err; |
2781 | 2779 | ||
2782 | set_fs(KERNEL_DS); | 2780 | set_fs(KERNEL_DS); |
2783 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv, | 2781 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); |
2784 | sizeof(struct compat_ifreq)); | ||
2785 | set_fs(old_fs); | 2782 | set_fs(old_fs); |
2786 | if (!err) | 2783 | if (!err) |
2787 | err = compat_put_timeval(&ktv, up); | 2784 | err = compat_put_timeval(&ktv, up); |
@@ -2797,8 +2794,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock, | |||
2797 | int err; | 2794 | int err; |
2798 | 2795 | ||
2799 | set_fs(KERNEL_DS); | 2796 | set_fs(KERNEL_DS); |
2800 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts, | 2797 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); |
2801 | sizeof(struct compat_ifreq)); | ||
2802 | set_fs(old_fs); | 2798 | set_fs(old_fs); |
2803 | if (!err) | 2799 | if (!err) |
2804 | err = compat_put_timespec(&kts, up); | 2800 | err = compat_put_timespec(&kts, up); |
@@ -2994,6 +2990,54 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd, | |||
2994 | return dev_ioctl(net, cmd, &ifreq, NULL); | 2990 | return dev_ioctl(net, cmd, &ifreq, NULL); |
2995 | } | 2991 | } |
2996 | 2992 | ||
2993 | static int compat_ifreq_ioctl(struct net *net, struct socket *sock, | ||
2994 | unsigned int cmd, | ||
2995 | struct compat_ifreq __user *uifr32) | ||
2996 | { | ||
2997 | struct ifreq __user *uifr; | ||
2998 | int err; | ||
2999 | |||
3000 | /* Handle the fact that while struct ifreq has the same *layout* on | ||
3001 | * 32/64 for everything but ifreq::ifru_ifmap and ifreq::ifru_data, | ||
3002 | * which are handled elsewhere, it still has different *size* due to | ||
3003 | * ifreq::ifru_ifmap (which is 16 bytes on 32 bit, 24 bytes on 64-bit, | ||
3004 | * resulting in struct ifreq being 32 and 40 bytes respectively). | ||
3005 | * As a result, if the struct happens to be at the end of a page and | ||
3006 | * the next page isn't readable/writable, we get a fault. To prevent | ||
3007 | * that, copy back and forth to the full size. | ||
3008 | */ | ||
3009 | |||
3010 | uifr = compat_alloc_user_space(sizeof(*uifr)); | ||
3011 | if (copy_in_user(uifr, uifr32, sizeof(*uifr32))) | ||
3012 | return -EFAULT; | ||
3013 | |||
3014 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr); | ||
3015 | |||
3016 | if (!err) { | ||
3017 | switch (cmd) { | ||
3018 | case SIOCGIFFLAGS: | ||
3019 | case SIOCGIFMETRIC: | ||
3020 | case SIOCGIFMTU: | ||
3021 | case SIOCGIFMEM: | ||
3022 | case SIOCGIFHWADDR: | ||
3023 | case SIOCGIFINDEX: | ||
3024 | case SIOCGIFADDR: | ||
3025 | case SIOCGIFBRDADDR: | ||
3026 | case SIOCGIFDSTADDR: | ||
3027 | case SIOCGIFNETMASK: | ||
3028 | case SIOCGIFPFLAGS: | ||
3029 | case SIOCGIFTXQLEN: | ||
3030 | case SIOCGMIIPHY: | ||
3031 | case SIOCGMIIREG: | ||
3032 | case SIOCGIFNAME: | ||
3033 | if (copy_in_user(uifr32, uifr, sizeof(*uifr32))) | ||
3034 | err = -EFAULT; | ||
3035 | break; | ||
3036 | } | ||
3037 | } | ||
3038 | return err; | ||
3039 | } | ||
3040 | |||
2997 | static int compat_sioc_ifmap(struct net *net, unsigned int cmd, | 3041 | static int compat_sioc_ifmap(struct net *net, unsigned int cmd, |
2998 | struct compat_ifreq __user *uifr32) | 3042 | struct compat_ifreq __user *uifr32) |
2999 | { | 3043 | { |
@@ -3109,8 +3153,7 @@ static int routing_ioctl(struct net *net, struct socket *sock, | |||
3109 | } | 3153 | } |
3110 | 3154 | ||
3111 | set_fs(KERNEL_DS); | 3155 | set_fs(KERNEL_DS); |
3112 | ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r, | 3156 | ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); |
3113 | sizeof(struct compat_ifreq)); | ||
3114 | set_fs(old_fs); | 3157 | set_fs(old_fs); |
3115 | 3158 | ||
3116 | out: | 3159 | out: |
@@ -3210,21 +3253,22 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, | |||
3210 | case SIOCSIFTXQLEN: | 3253 | case SIOCSIFTXQLEN: |
3211 | case SIOCBRADDIF: | 3254 | case SIOCBRADDIF: |
3212 | case SIOCBRDELIF: | 3255 | case SIOCBRDELIF: |
3256 | case SIOCGIFNAME: | ||
3213 | case SIOCSIFNAME: | 3257 | case SIOCSIFNAME: |
3214 | case SIOCGMIIPHY: | 3258 | case SIOCGMIIPHY: |
3215 | case SIOCGMIIREG: | 3259 | case SIOCGMIIREG: |
3216 | case SIOCSMIIREG: | 3260 | case SIOCSMIIREG: |
3217 | case SIOCSARP: | ||
3218 | case SIOCGARP: | ||
3219 | case SIOCDARP: | ||
3220 | case SIOCATMARK: | ||
3221 | case SIOCBONDENSLAVE: | 3261 | case SIOCBONDENSLAVE: |
3222 | case SIOCBONDRELEASE: | 3262 | case SIOCBONDRELEASE: |
3223 | case SIOCBONDSETHWADDR: | 3263 | case SIOCBONDSETHWADDR: |
3224 | case SIOCBONDCHANGEACTIVE: | 3264 | case SIOCBONDCHANGEACTIVE: |
3225 | case SIOCGIFNAME: | 3265 | return compat_ifreq_ioctl(net, sock, cmd, argp); |
3226 | return sock_do_ioctl(net, sock, cmd, arg, | 3266 | |
3227 | sizeof(struct compat_ifreq)); | 3267 | case SIOCSARP: |
3268 | case SIOCGARP: | ||
3269 | case SIOCDARP: | ||
3270 | case SIOCATMARK: | ||
3271 | return sock_do_ioctl(net, sock, cmd, arg); | ||
3228 | } | 3272 | } |
3229 | 3273 | ||
3230 | return -ENOIOCTLCMD; | 3274 | return -ENOIOCTLCMD; |
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 5d3cce9e8744..15eb5d3d4750 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c | |||
@@ -75,6 +75,9 @@ static u32 virtio_transport_get_local_cid(void) | |||
75 | { | 75 | { |
76 | struct virtio_vsock *vsock = virtio_vsock_get(); | 76 | struct virtio_vsock *vsock = virtio_vsock_get(); |
77 | 77 | ||
78 | if (!vsock) | ||
79 | return VMADDR_CID_ANY; | ||
80 | |||
78 | return vsock->guest_cid; | 81 | return vsock->guest_cid; |
79 | } | 82 | } |
80 | 83 | ||
@@ -584,10 +587,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev) | |||
584 | 587 | ||
585 | virtio_vsock_update_guest_cid(vsock); | 588 | virtio_vsock_update_guest_cid(vsock); |
586 | 589 | ||
587 | ret = vsock_core_init(&virtio_transport.transport); | ||
588 | if (ret < 0) | ||
589 | goto out_vqs; | ||
590 | |||
591 | vsock->rx_buf_nr = 0; | 590 | vsock->rx_buf_nr = 0; |
592 | vsock->rx_buf_max_nr = 0; | 591 | vsock->rx_buf_max_nr = 0; |
593 | atomic_set(&vsock->queued_replies, 0); | 592 | atomic_set(&vsock->queued_replies, 0); |
@@ -618,8 +617,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev) | |||
618 | mutex_unlock(&the_virtio_vsock_mutex); | 617 | mutex_unlock(&the_virtio_vsock_mutex); |
619 | return 0; | 618 | return 0; |
620 | 619 | ||
621 | out_vqs: | ||
622 | vsock->vdev->config->del_vqs(vsock->vdev); | ||
623 | out: | 620 | out: |
624 | kfree(vsock); | 621 | kfree(vsock); |
625 | mutex_unlock(&the_virtio_vsock_mutex); | 622 | mutex_unlock(&the_virtio_vsock_mutex); |
@@ -637,6 +634,9 @@ static void virtio_vsock_remove(struct virtio_device *vdev) | |||
637 | flush_work(&vsock->event_work); | 634 | flush_work(&vsock->event_work); |
638 | flush_work(&vsock->send_pkt_work); | 635 | flush_work(&vsock->send_pkt_work); |
639 | 636 | ||
637 | /* Reset all connected sockets when the device disappear */ | ||
638 | vsock_for_each_connected_socket(virtio_vsock_reset_sock); | ||
639 | |||
640 | vdev->config->reset(vdev); | 640 | vdev->config->reset(vdev); |
641 | 641 | ||
642 | mutex_lock(&vsock->rx_lock); | 642 | mutex_lock(&vsock->rx_lock); |
@@ -669,7 +669,6 @@ static void virtio_vsock_remove(struct virtio_device *vdev) | |||
669 | 669 | ||
670 | mutex_lock(&the_virtio_vsock_mutex); | 670 | mutex_lock(&the_virtio_vsock_mutex); |
671 | the_virtio_vsock = NULL; | 671 | the_virtio_vsock = NULL; |
672 | vsock_core_exit(); | ||
673 | mutex_unlock(&the_virtio_vsock_mutex); | 672 | mutex_unlock(&the_virtio_vsock_mutex); |
674 | 673 | ||
675 | vdev->config->del_vqs(vdev); | 674 | vdev->config->del_vqs(vdev); |
@@ -702,14 +701,28 @@ static int __init virtio_vsock_init(void) | |||
702 | virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0); | 701 | virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0); |
703 | if (!virtio_vsock_workqueue) | 702 | if (!virtio_vsock_workqueue) |
704 | return -ENOMEM; | 703 | return -ENOMEM; |
704 | |||
705 | ret = register_virtio_driver(&virtio_vsock_driver); | 705 | ret = register_virtio_driver(&virtio_vsock_driver); |
706 | if (ret) | 706 | if (ret) |
707 | destroy_workqueue(virtio_vsock_workqueue); | 707 | goto out_wq; |
708 | |||
709 | ret = vsock_core_init(&virtio_transport.transport); | ||
710 | if (ret) | ||
711 | goto out_vdr; | ||
712 | |||
713 | return 0; | ||
714 | |||
715 | out_vdr: | ||
716 | unregister_virtio_driver(&virtio_vsock_driver); | ||
717 | out_wq: | ||
718 | destroy_workqueue(virtio_vsock_workqueue); | ||
708 | return ret; | 719 | return ret; |
720 | |||
709 | } | 721 | } |
710 | 722 | ||
711 | static void __exit virtio_vsock_exit(void) | 723 | static void __exit virtio_vsock_exit(void) |
712 | { | 724 | { |
725 | vsock_core_exit(); | ||
713 | unregister_virtio_driver(&virtio_vsock_driver); | 726 | unregister_virtio_driver(&virtio_vsock_driver); |
714 | destroy_workqueue(virtio_vsock_workqueue); | 727 | destroy_workqueue(virtio_vsock_workqueue); |
715 | } | 728 | } |
diff --git a/net/wireless/ap.c b/net/wireless/ap.c index 882d97bdc6bf..550ac9d827fe 100644 --- a/net/wireless/ap.c +++ b/net/wireless/ap.c | |||
@@ -41,6 +41,8 @@ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev, | |||
41 | cfg80211_sched_dfs_chan_update(rdev); | 41 | cfg80211_sched_dfs_chan_update(rdev); |
42 | } | 42 | } |
43 | 43 | ||
44 | schedule_work(&cfg80211_disconnect_work); | ||
45 | |||
44 | return err; | 46 | return err; |
45 | } | 47 | } |
46 | 48 | ||
diff --git a/net/wireless/core.h b/net/wireless/core.h index c5d6f3418601..f6b40563dc63 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -445,6 +445,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev); | |||
445 | bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, | 445 | bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, |
446 | u32 center_freq_khz, u32 bw_khz); | 446 | u32 center_freq_khz, u32 bw_khz); |
447 | 447 | ||
448 | extern struct work_struct cfg80211_disconnect_work; | ||
449 | |||
448 | /** | 450 | /** |
449 | * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable | 451 | * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable |
450 | * @wiphy: the wiphy to validate against | 452 | * @wiphy: the wiphy to validate against |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index f741d8376a46..7d34cb884840 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -667,7 +667,7 @@ static void disconnect_work(struct work_struct *work) | |||
667 | rtnl_unlock(); | 667 | rtnl_unlock(); |
668 | } | 668 | } |
669 | 669 | ||
670 | static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); | 670 | DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); |
671 | 671 | ||
672 | 672 | ||
673 | /* | 673 | /* |
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 897483457bf0..f7261fad45c1 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c | |||
@@ -297,10 +297,8 @@ char *get_fdinfo(int fd, const char *key) | |||
297 | snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd); | 297 | snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd); |
298 | 298 | ||
299 | fdi = fopen(path, "r"); | 299 | fdi = fopen(path, "r"); |
300 | if (!fdi) { | 300 | if (!fdi) |
301 | p_err("can't open fdinfo: %s", strerror(errno)); | ||
302 | return NULL; | 301 | return NULL; |
303 | } | ||
304 | 302 | ||
305 | while ((n = getline(&line, &line_n, fdi)) > 0) { | 303 | while ((n = getline(&line, &line_n, fdi)) > 0) { |
306 | char *value; | 304 | char *value; |
@@ -313,7 +311,6 @@ char *get_fdinfo(int fd, const char *key) | |||
313 | 311 | ||
314 | value = strchr(line, '\t'); | 312 | value = strchr(line, '\t'); |
315 | if (!value || !value[1]) { | 313 | if (!value || !value[1]) { |
316 | p_err("malformed fdinfo!?"); | ||
317 | free(line); | 314 | free(line); |
318 | return NULL; | 315 | return NULL; |
319 | } | 316 | } |
@@ -326,7 +323,6 @@ char *get_fdinfo(int fd, const char *key) | |||
326 | return line; | 323 | return line; |
327 | } | 324 | } |
328 | 325 | ||
329 | p_err("key '%s' not found in fdinfo", key); | ||
330 | free(line); | 326 | free(line); |
331 | fclose(fdi); | 327 | fclose(fdi); |
332 | return NULL; | 328 | return NULL; |
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 2037e3dc864b..1ef1ee2280a2 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c | |||
@@ -347,6 +347,20 @@ static char **parse_bytes(char **argv, const char *name, unsigned char *val, | |||
347 | return argv + i; | 347 | return argv + i; |
348 | } | 348 | } |
349 | 349 | ||
350 | /* on per cpu maps we must copy the provided value on all value instances */ | ||
351 | static void fill_per_cpu_value(struct bpf_map_info *info, void *value) | ||
352 | { | ||
353 | unsigned int i, n, step; | ||
354 | |||
355 | if (!map_is_per_cpu(info->type)) | ||
356 | return; | ||
357 | |||
358 | n = get_possible_cpus(); | ||
359 | step = round_up(info->value_size, 8); | ||
360 | for (i = 1; i < n; i++) | ||
361 | memcpy(value + i * step, value, info->value_size); | ||
362 | } | ||
363 | |||
350 | static int parse_elem(char **argv, struct bpf_map_info *info, | 364 | static int parse_elem(char **argv, struct bpf_map_info *info, |
351 | void *key, void *value, __u32 key_size, __u32 value_size, | 365 | void *key, void *value, __u32 key_size, __u32 value_size, |
352 | __u32 *flags, __u32 **value_fd) | 366 | __u32 *flags, __u32 **value_fd) |
@@ -426,6 +440,8 @@ static int parse_elem(char **argv, struct bpf_map_info *info, | |||
426 | argv = parse_bytes(argv, "value", value, value_size); | 440 | argv = parse_bytes(argv, "value", value, value_size); |
427 | if (!argv) | 441 | if (!argv) |
428 | return -1; | 442 | return -1; |
443 | |||
444 | fill_per_cpu_value(info, value); | ||
429 | } | 445 | } |
430 | 446 | ||
431 | return parse_elem(argv, info, key, NULL, key_size, value_size, | 447 | return parse_elem(argv, info, key, NULL, key_size, value_size, |
@@ -497,10 +513,9 @@ static int show_map_close_json(int fd, struct bpf_map_info *info) | |||
497 | jsonw_uint_field(json_wtr, "owner_prog_type", | 513 | jsonw_uint_field(json_wtr, "owner_prog_type", |
498 | prog_type); | 514 | prog_type); |
499 | } | 515 | } |
500 | if (atoi(owner_jited)) | 516 | if (owner_jited) |
501 | jsonw_bool_field(json_wtr, "owner_jited", true); | 517 | jsonw_bool_field(json_wtr, "owner_jited", |
502 | else | 518 | !!atoi(owner_jited)); |
503 | jsonw_bool_field(json_wtr, "owner_jited", false); | ||
504 | 519 | ||
505 | free(owner_prog_type); | 520 | free(owner_prog_type); |
506 | free(owner_jited); | 521 | free(owner_jited); |
@@ -553,7 +568,8 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info) | |||
553 | char *owner_prog_type = get_fdinfo(fd, "owner_prog_type"); | 568 | char *owner_prog_type = get_fdinfo(fd, "owner_prog_type"); |
554 | char *owner_jited = get_fdinfo(fd, "owner_jited"); | 569 | char *owner_jited = get_fdinfo(fd, "owner_jited"); |
555 | 570 | ||
556 | printf("\n\t"); | 571 | if (owner_prog_type || owner_jited) |
572 | printf("\n\t"); | ||
557 | if (owner_prog_type) { | 573 | if (owner_prog_type) { |
558 | unsigned int prog_type = atoi(owner_prog_type); | 574 | unsigned int prog_type = atoi(owner_prog_type); |
559 | 575 | ||
@@ -563,10 +579,9 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info) | |||
563 | else | 579 | else |
564 | printf("owner_prog_type %d ", prog_type); | 580 | printf("owner_prog_type %d ", prog_type); |
565 | } | 581 | } |
566 | if (atoi(owner_jited)) | 582 | if (owner_jited) |
567 | printf("owner jited"); | 583 | printf("owner%s jited", |
568 | else | 584 | atoi(owner_jited) ? "" : " not"); |
569 | printf("owner not jited"); | ||
570 | 585 | ||
571 | free(owner_prog_type); | 586 | free(owner_prog_type); |
572 | free(owner_jited); | 587 | free(owner_jited); |
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 2d1bb7d6ff51..b54ed82b9589 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c | |||
@@ -78,13 +78,14 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size) | |||
78 | 78 | ||
79 | static int prog_fd_by_tag(unsigned char *tag) | 79 | static int prog_fd_by_tag(unsigned char *tag) |
80 | { | 80 | { |
81 | struct bpf_prog_info info = {}; | ||
82 | __u32 len = sizeof(info); | ||
83 | unsigned int id = 0; | 81 | unsigned int id = 0; |
84 | int err; | 82 | int err; |
85 | int fd; | 83 | int fd; |
86 | 84 | ||
87 | while (true) { | 85 | while (true) { |
86 | struct bpf_prog_info info = {}; | ||
87 | __u32 len = sizeof(info); | ||
88 | |||
88 | err = bpf_prog_get_next_id(id, &id); | 89 | err = bpf_prog_get_next_id(id, &id); |
89 | if (err) { | 90 | if (err) { |
90 | p_err("%s", strerror(errno)); | 91 | p_err("%s", strerror(errno)); |
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h index 315a44fa32af..84fd6f1bf33e 100644 --- a/tools/testing/selftests/bpf/bpf_util.h +++ b/tools/testing/selftests/bpf/bpf_util.h | |||
@@ -13,7 +13,7 @@ static inline unsigned int bpf_num_possible_cpus(void) | |||
13 | unsigned int start, end, possible_cpus = 0; | 13 | unsigned int start, end, possible_cpus = 0; |
14 | char buff[128]; | 14 | char buff[128]; |
15 | FILE *fp; | 15 | FILE *fp; |
16 | int n; | 16 | int len, n, i, j = 0; |
17 | 17 | ||
18 | fp = fopen(fcpu, "r"); | 18 | fp = fopen(fcpu, "r"); |
19 | if (!fp) { | 19 | if (!fp) { |
@@ -21,17 +21,27 @@ static inline unsigned int bpf_num_possible_cpus(void) | |||
21 | exit(1); | 21 | exit(1); |
22 | } | 22 | } |
23 | 23 | ||
24 | while (fgets(buff, sizeof(buff), fp)) { | 24 | if (!fgets(buff, sizeof(buff), fp)) { |
25 | n = sscanf(buff, "%u-%u", &start, &end); | 25 | printf("Failed to read %s!\n", fcpu); |
26 | if (n == 0) { | 26 | exit(1); |
27 | printf("Failed to retrieve # possible CPUs!\n"); | 27 | } |
28 | exit(1); | 28 | |
29 | } else if (n == 1) { | 29 | len = strlen(buff); |
30 | end = start; | 30 | for (i = 0; i <= len; i++) { |
31 | if (buff[i] == ',' || buff[i] == '\0') { | ||
32 | buff[i] = '\0'; | ||
33 | n = sscanf(&buff[j], "%u-%u", &start, &end); | ||
34 | if (n <= 0) { | ||
35 | printf("Failed to retrieve # possible CPUs!\n"); | ||
36 | exit(1); | ||
37 | } else if (n == 1) { | ||
38 | end = start; | ||
39 | } | ||
40 | possible_cpus += end - start + 1; | ||
41 | j = i + 1; | ||
31 | } | 42 | } |
32 | possible_cpus = start == 0 ? end + 1 : 0; | ||
33 | break; | ||
34 | } | 43 | } |
44 | |||
35 | fclose(fp); | 45 | fclose(fp); |
36 | 46 | ||
37 | return possible_cpus; | 47 | return possible_cpus; |
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index a0bd04befe87..91420fa83b08 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c | |||
@@ -1881,13 +1881,12 @@ static struct btf_raw_test raw_tests[] = { | |||
1881 | }, | 1881 | }, |
1882 | 1882 | ||
1883 | { | 1883 | { |
1884 | .descr = "func proto (CONST=>TYPEDEF=>FUNC_PROTO)", | 1884 | .descr = "func proto (TYPEDEF=>FUNC_PROTO)", |
1885 | .raw_types = { | 1885 | .raw_types = { |
1886 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | 1886 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ |
1887 | BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ | 1887 | BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ |
1888 | BTF_CONST_ENC(4), /* [3] */ | 1888 | BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */ |
1889 | BTF_TYPEDEF_ENC(NAME_TBD, 5), /* [4] */ | 1889 | BTF_FUNC_PROTO_ENC(0, 2), /* [4] */ |
1890 | BTF_FUNC_PROTO_ENC(0, 2), /* [5] */ | ||
1891 | BTF_FUNC_PROTO_ARG_ENC(0, 1), | 1890 | BTF_FUNC_PROTO_ARG_ENC(0, 1), |
1892 | BTF_FUNC_PROTO_ARG_ENC(0, 2), | 1891 | BTF_FUNC_PROTO_ARG_ENC(0, 2), |
1893 | BTF_END_RAW, | 1892 | BTF_END_RAW, |
@@ -1901,8 +1900,6 @@ static struct btf_raw_test raw_tests[] = { | |||
1901 | .key_type_id = 1, | 1900 | .key_type_id = 1, |
1902 | .value_type_id = 1, | 1901 | .value_type_id = 1, |
1903 | .max_entries = 4, | 1902 | .max_entries = 4, |
1904 | .btf_load_err = true, | ||
1905 | .err_str = "Invalid type_id", | ||
1906 | }, | 1903 | }, |
1907 | 1904 | ||
1908 | { | 1905 | { |
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile index 47ed6cef93fb..c9ff2b47bd1c 100644 --- a/tools/testing/selftests/netfilter/Makefile +++ b/tools/testing/selftests/netfilter/Makefile | |||
@@ -1,6 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | # Makefile for netfilter selftests | 2 | # Makefile for netfilter selftests |
3 | 3 | ||
4 | TEST_PROGS := nft_trans_stress.sh | 4 | TEST_PROGS := nft_trans_stress.sh nft_nat.sh |
5 | 5 | ||
6 | include ../lib.mk | 6 | include ../lib.mk |
diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config index 1017313e41a8..59caa8f71cd8 100644 --- a/tools/testing/selftests/netfilter/config +++ b/tools/testing/selftests/netfilter/config | |||
@@ -1,2 +1,2 @@ | |||
1 | CONFIG_NET_NS=y | 1 | CONFIG_NET_NS=y |
2 | NF_TABLES_INET=y | 2 | CONFIG_NF_TABLES_INET=y |
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh new file mode 100755 index 000000000000..8ec76681605c --- /dev/null +++ b/tools/testing/selftests/netfilter/nft_nat.sh | |||
@@ -0,0 +1,762 @@ | |||
1 | #!/bin/bash | ||
2 | # | ||
3 | # This test is for basic NAT functionality: snat, dnat, redirect, masquerade. | ||
4 | # | ||
5 | |||
6 | # Kselftest framework requirement - SKIP code is 4. | ||
7 | ksft_skip=4 | ||
8 | ret=0 | ||
9 | |||
10 | nft --version > /dev/null 2>&1 | ||
11 | if [ $? -ne 0 ];then | ||
12 | echo "SKIP: Could not run test without nft tool" | ||
13 | exit $ksft_skip | ||
14 | fi | ||
15 | |||
16 | ip -Version > /dev/null 2>&1 | ||
17 | if [ $? -ne 0 ];then | ||
18 | echo "SKIP: Could not run test without ip tool" | ||
19 | exit $ksft_skip | ||
20 | fi | ||
21 | |||
22 | ip netns add ns0 | ||
23 | ip netns add ns1 | ||
24 | ip netns add ns2 | ||
25 | |||
26 | ip link add veth0 netns ns0 type veth peer name eth0 netns ns1 | ||
27 | ip link add veth1 netns ns0 type veth peer name eth0 netns ns2 | ||
28 | |||
29 | ip -net ns0 link set lo up | ||
30 | ip -net ns0 link set veth0 up | ||
31 | ip -net ns0 addr add 10.0.1.1/24 dev veth0 | ||
32 | ip -net ns0 addr add dead:1::1/64 dev veth0 | ||
33 | |||
34 | ip -net ns0 link set veth1 up | ||
35 | ip -net ns0 addr add 10.0.2.1/24 dev veth1 | ||
36 | ip -net ns0 addr add dead:2::1/64 dev veth1 | ||
37 | |||
38 | for i in 1 2; do | ||
39 | ip -net ns$i link set lo up | ||
40 | ip -net ns$i link set eth0 up | ||
41 | ip -net ns$i addr add 10.0.$i.99/24 dev eth0 | ||
42 | ip -net ns$i route add default via 10.0.$i.1 | ||
43 | ip -net ns$i addr add dead:$i::99/64 dev eth0 | ||
44 | ip -net ns$i route add default via dead:$i::1 | ||
45 | done | ||
46 | |||
47 | bad_counter() | ||
48 | { | ||
49 | local ns=$1 | ||
50 | local counter=$2 | ||
51 | local expect=$3 | ||
52 | |||
53 | echo "ERROR: $counter counter in $ns has unexpected value (expected $expect)" 1>&2 | ||
54 | ip netns exec $ns nft list counter inet filter $counter 1>&2 | ||
55 | } | ||
56 | |||
57 | check_counters() | ||
58 | { | ||
59 | ns=$1 | ||
60 | local lret=0 | ||
61 | |||
62 | cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q "packets 1 bytes 84") | ||
63 | if [ $? -ne 0 ]; then | ||
64 | bad_counter $ns ns0in "packets 1 bytes 84" | ||
65 | lret=1 | ||
66 | fi | ||
67 | cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q "packets 1 bytes 84") | ||
68 | if [ $? -ne 0 ]; then | ||
69 | bad_counter $ns ns0out "packets 1 bytes 84" | ||
70 | lret=1 | ||
71 | fi | ||
72 | |||
73 | expect="packets 1 bytes 104" | ||
74 | cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q "$expect") | ||
75 | if [ $? -ne 0 ]; then | ||
76 | bad_counter $ns ns0in6 "$expect" | ||
77 | lret=1 | ||
78 | fi | ||
79 | cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q "$expect") | ||
80 | if [ $? -ne 0 ]; then | ||
81 | bad_counter $ns ns0out6 "$expect" | ||
82 | lret=1 | ||
83 | fi | ||
84 | |||
85 | return $lret | ||
86 | } | ||
87 | |||
88 | check_ns0_counters() | ||
89 | { | ||
90 | local ns=$1 | ||
91 | local lret=0 | ||
92 | |||
93 | cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q "packets 0 bytes 0") | ||
94 | if [ $? -ne 0 ]; then | ||
95 | bad_counter ns0 ns0in "packets 0 bytes 0" | ||
96 | lret=1 | ||
97 | fi | ||
98 | |||
99 | cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0") | ||
100 | if [ $? -ne 0 ]; then | ||
101 | bad_counter ns0 ns0in6 "packets 0 bytes 0" | ||
102 | lret=1 | ||
103 | fi | ||
104 | |||
105 | cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q "packets 0 bytes 0") | ||
106 | if [ $? -ne 0 ]; then | ||
107 | bad_counter ns0 ns0out "packets 0 bytes 0" | ||
108 | lret=1 | ||
109 | fi | ||
110 | cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0") | ||
111 | if [ $? -ne 0 ]; then | ||
112 | bad_counter ns0 ns0out6 "packets 0 bytes 0" | ||
113 | lret=1 | ||
114 | fi | ||
115 | |||
116 | for dir in "in" "out" ; do | ||
117 | expect="packets 1 bytes 84" | ||
118 | cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir} | grep -q "$expect") | ||
119 | if [ $? -ne 0 ]; then | ||
120 | bad_counter ns0 $ns$dir "$expect" | ||
121 | lret=1 | ||
122 | fi | ||
123 | |||
124 | expect="packets 1 bytes 104" | ||
125 | cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir}6 | grep -q "$expect") | ||
126 | if [ $? -ne 0 ]; then | ||
127 | bad_counter ns0 $ns$dir6 "$expect" | ||
128 | lret=1 | ||
129 | fi | ||
130 | done | ||
131 | |||
132 | return $lret | ||
133 | } | ||
134 | |||
135 | reset_counters() | ||
136 | { | ||
137 | for i in 0 1 2;do | ||
138 | ip netns exec ns$i nft reset counters inet > /dev/null | ||
139 | done | ||
140 | } | ||
141 | |||
142 | test_local_dnat6() | ||
143 | { | ||
144 | local lret=0 | ||
145 | ip netns exec ns0 nft -f - <<EOF | ||
146 | table ip6 nat { | ||
147 | chain output { | ||
148 | type nat hook output priority 0; policy accept; | ||
149 | ip6 daddr dead:1::99 dnat to dead:2::99 | ||
150 | } | ||
151 | } | ||
152 | EOF | ||
153 | if [ $? -ne 0 ]; then | ||
154 | echo "SKIP: Could not add add ip6 dnat hook" | ||
155 | return $ksft_skip | ||
156 | fi | ||
157 | |||
158 | # ping netns1, expect rewrite to netns2 | ||
159 | ip netns exec ns0 ping -q -c 1 dead:1::99 > /dev/null | ||
160 | if [ $? -ne 0 ]; then | ||
161 | lret=1 | ||
162 | echo "ERROR: ping6 failed" | ||
163 | return $lret | ||
164 | fi | ||
165 | |||
166 | expect="packets 0 bytes 0" | ||
167 | for dir in "in6" "out6" ; do | ||
168 | cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
169 | if [ $? -ne 0 ]; then | ||
170 | bad_counter ns0 ns1$dir "$expect" | ||
171 | lret=1 | ||
172 | fi | ||
173 | done | ||
174 | |||
175 | expect="packets 1 bytes 104" | ||
176 | for dir in "in6" "out6" ; do | ||
177 | cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
178 | if [ $? -ne 0 ]; then | ||
179 | bad_counter ns0 ns2$dir "$expect" | ||
180 | lret=1 | ||
181 | fi | ||
182 | done | ||
183 | |||
184 | # expect 0 count in ns1 | ||
185 | expect="packets 0 bytes 0" | ||
186 | for dir in "in6" "out6" ; do | ||
187 | cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
188 | if [ $? -ne 0 ]; then | ||
189 | bad_counter ns1 ns0$dir "$expect" | ||
190 | lret=1 | ||
191 | fi | ||
192 | done | ||
193 | |||
194 | # expect 1 packet in ns2 | ||
195 | expect="packets 1 bytes 104" | ||
196 | for dir in "in6" "out6" ; do | ||
197 | cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
198 | if [ $? -ne 0 ]; then | ||
199 | bad_counter ns2 ns0$dir "$expect" | ||
200 | lret=1 | ||
201 | fi | ||
202 | done | ||
203 | |||
204 | test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was NATted to ns2" | ||
205 | ip netns exec ns0 nft flush chain ip6 nat output | ||
206 | |||
207 | return $lret | ||
208 | } | ||
209 | |||
210 | test_local_dnat() | ||
211 | { | ||
212 | local lret=0 | ||
213 | ip netns exec ns0 nft -f - <<EOF | ||
214 | table ip nat { | ||
215 | chain output { | ||
216 | type nat hook output priority 0; policy accept; | ||
217 | ip daddr 10.0.1.99 dnat to 10.0.2.99 | ||
218 | } | ||
219 | } | ||
220 | EOF | ||
221 | # ping netns1, expect rewrite to netns2 | ||
222 | ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null | ||
223 | if [ $? -ne 0 ]; then | ||
224 | lret=1 | ||
225 | echo "ERROR: ping failed" | ||
226 | return $lret | ||
227 | fi | ||
228 | |||
229 | expect="packets 0 bytes 0" | ||
230 | for dir in "in" "out" ; do | ||
231 | cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
232 | if [ $? -ne 0 ]; then | ||
233 | bad_counter ns0 ns1$dir "$expect" | ||
234 | lret=1 | ||
235 | fi | ||
236 | done | ||
237 | |||
238 | expect="packets 1 bytes 84" | ||
239 | for dir in "in" "out" ; do | ||
240 | cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
241 | if [ $? -ne 0 ]; then | ||
242 | bad_counter ns0 ns2$dir "$expect" | ||
243 | lret=1 | ||
244 | fi | ||
245 | done | ||
246 | |||
247 | # expect 0 count in ns1 | ||
248 | expect="packets 0 bytes 0" | ||
249 | for dir in "in" "out" ; do | ||
250 | cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
251 | if [ $? -ne 0 ]; then | ||
252 | bad_counter ns1 ns0$dir "$expect" | ||
253 | lret=1 | ||
254 | fi | ||
255 | done | ||
256 | |||
257 | # expect 1 packet in ns2 | ||
258 | expect="packets 1 bytes 84" | ||
259 | for dir in "in" "out" ; do | ||
260 | cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
261 | if [ $? -ne 0 ]; then | ||
262 | bad_counter ns2 ns0$dir "$expect" | ||
263 | lret=1 | ||
264 | fi | ||
265 | done | ||
266 | |||
267 | test $lret -eq 0 && echo "PASS: ping to ns1 was NATted to ns2" | ||
268 | |||
269 | ip netns exec ns0 nft flush chain ip nat output | ||
270 | |||
271 | reset_counters | ||
272 | ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null | ||
273 | if [ $? -ne 0 ]; then | ||
274 | lret=1 | ||
275 | echo "ERROR: ping failed" | ||
276 | return $lret | ||
277 | fi | ||
278 | |||
279 | expect="packets 1 bytes 84" | ||
280 | for dir in "in" "out" ; do | ||
281 | cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
282 | if [ $? -ne 0 ]; then | ||
283 | bad_counter ns1 ns1$dir "$expect" | ||
284 | lret=1 | ||
285 | fi | ||
286 | done | ||
287 | expect="packets 0 bytes 0" | ||
288 | for dir in "in" "out" ; do | ||
289 | cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
290 | if [ $? -ne 0 ]; then | ||
291 | bad_counter ns0 ns2$dir "$expect" | ||
292 | lret=1 | ||
293 | fi | ||
294 | done | ||
295 | |||
296 | # expect 1 count in ns1 | ||
297 | expect="packets 1 bytes 84" | ||
298 | for dir in "in" "out" ; do | ||
299 | cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
300 | if [ $? -ne 0 ]; then | ||
301 | bad_counter ns0 ns0$dir "$expect" | ||
302 | lret=1 | ||
303 | fi | ||
304 | done | ||
305 | |||
306 | # expect 0 packet in ns2 | ||
307 | expect="packets 0 bytes 0" | ||
308 | for dir in "in" "out" ; do | ||
309 | cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
310 | if [ $? -ne 0 ]; then | ||
311 | bad_counter ns2 ns2$dir "$expect" | ||
312 | lret=1 | ||
313 | fi | ||
314 | done | ||
315 | |||
316 | test $lret -eq 0 && echo "PASS: ping to ns1 OK after nat output chain flush" | ||
317 | |||
318 | return $lret | ||
319 | } | ||
320 | |||
321 | |||
322 | test_masquerade6() | ||
323 | { | ||
324 | local lret=0 | ||
325 | |||
326 | ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null | ||
327 | |||
328 | ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 | ||
329 | if [ $? -ne 0 ] ; then | ||
330 | echo "ERROR: cannot ping ns1 from ns2 via ipv6" | ||
331 | return 1 | ||
332 | lret=1 | ||
333 | fi | ||
334 | |||
335 | expect="packets 1 bytes 104" | ||
336 | for dir in "in6" "out6" ; do | ||
337 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
338 | if [ $? -ne 0 ]; then | ||
339 | bad_counter ns1 ns2$dir "$expect" | ||
340 | lret=1 | ||
341 | fi | ||
342 | |||
343 | cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
344 | if [ $? -ne 0 ]; then | ||
345 | bad_counter ns2 ns1$dir "$expect" | ||
346 | lret=1 | ||
347 | fi | ||
348 | done | ||
349 | |||
350 | reset_counters | ||
351 | |||
352 | # add masquerading rule | ||
353 | ip netns exec ns0 nft -f - <<EOF | ||
354 | table ip6 nat { | ||
355 | chain postrouting { | ||
356 | type nat hook postrouting priority 0; policy accept; | ||
357 | meta oif veth0 masquerade | ||
358 | } | ||
359 | } | ||
360 | EOF | ||
361 | ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 | ||
362 | if [ $? -ne 0 ] ; then | ||
363 | echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading" | ||
364 | lret=1 | ||
365 | fi | ||
366 | |||
367 | # ns1 should have seen packets from ns0, due to masquerade | ||
368 | expect="packets 1 bytes 104" | ||
369 | for dir in "in6" "out6" ; do | ||
370 | |||
371 | cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
372 | if [ $? -ne 0 ]; then | ||
373 | bad_counter ns1 ns0$dir "$expect" | ||
374 | lret=1 | ||
375 | fi | ||
376 | |||
377 | cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
378 | if [ $? -ne 0 ]; then | ||
379 | bad_counter ns2 ns1$dir "$expect" | ||
380 | lret=1 | ||
381 | fi | ||
382 | done | ||
383 | |||
384 | # ns1 should not have seen packets from ns2, due to masquerade | ||
385 | expect="packets 0 bytes 0" | ||
386 | for dir in "in6" "out6" ; do | ||
387 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
388 | if [ $? -ne 0 ]; then | ||
389 | bad_counter ns1 ns0$dir "$expect" | ||
390 | lret=1 | ||
391 | fi | ||
392 | |||
393 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
394 | if [ $? -ne 0 ]; then | ||
395 | bad_counter ns2 ns1$dir "$expect" | ||
396 | lret=1 | ||
397 | fi | ||
398 | done | ||
399 | |||
400 | ip netns exec ns0 nft flush chain ip6 nat postrouting | ||
401 | if [ $? -ne 0 ]; then | ||
402 | echo "ERROR: Could not flush ip6 nat postrouting" 1>&2 | ||
403 | lret=1 | ||
404 | fi | ||
405 | |||
406 | test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2" | ||
407 | |||
408 | return $lret | ||
409 | } | ||
410 | |||
411 | test_masquerade() | ||
412 | { | ||
413 | local lret=0 | ||
414 | |||
415 | ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null | ||
416 | ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null | ||
417 | |||
418 | ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 | ||
419 | if [ $? -ne 0 ] ; then | ||
420 | echo "ERROR: canot ping ns1 from ns2" | ||
421 | lret=1 | ||
422 | fi | ||
423 | |||
424 | expect="packets 1 bytes 84" | ||
425 | for dir in "in" "out" ; do | ||
426 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
427 | if [ $? -ne 0 ]; then | ||
428 | bad_counter ns1 ns2$dir "$expect" | ||
429 | lret=1 | ||
430 | fi | ||
431 | |||
432 | cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
433 | if [ $? -ne 0 ]; then | ||
434 | bad_counter ns2 ns1$dir "$expect" | ||
435 | lret=1 | ||
436 | fi | ||
437 | done | ||
438 | |||
439 | reset_counters | ||
440 | |||
441 | # add masquerading rule | ||
442 | ip netns exec ns0 nft -f - <<EOF | ||
443 | table ip nat { | ||
444 | chain postrouting { | ||
445 | type nat hook postrouting priority 0; policy accept; | ||
446 | meta oif veth0 masquerade | ||
447 | } | ||
448 | } | ||
449 | EOF | ||
450 | ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 | ||
451 | if [ $? -ne 0 ] ; then | ||
452 | echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading" | ||
453 | lret=1 | ||
454 | fi | ||
455 | |||
456 | # ns1 should have seen packets from ns0, due to masquerade | ||
457 | expect="packets 1 bytes 84" | ||
458 | for dir in "in" "out" ; do | ||
459 | cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
460 | if [ $? -ne 0 ]; then | ||
461 | bad_counter ns1 ns0$dir "$expect" | ||
462 | lret=1 | ||
463 | fi | ||
464 | |||
465 | cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
466 | if [ $? -ne 0 ]; then | ||
467 | bad_counter ns2 ns1$dir "$expect" | ||
468 | lret=1 | ||
469 | fi | ||
470 | done | ||
471 | |||
472 | # ns1 should not have seen packets from ns2, due to masquerade | ||
473 | expect="packets 0 bytes 0" | ||
474 | for dir in "in" "out" ; do | ||
475 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
476 | if [ $? -ne 0 ]; then | ||
477 | bad_counter ns1 ns0$dir "$expect" | ||
478 | lret=1 | ||
479 | fi | ||
480 | |||
481 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
482 | if [ $? -ne 0 ]; then | ||
483 | bad_counter ns2 ns1$dir "$expect" | ||
484 | lret=1 | ||
485 | fi | ||
486 | done | ||
487 | |||
488 | ip netns exec ns0 nft flush chain ip nat postrouting | ||
489 | if [ $? -ne 0 ]; then | ||
490 | echo "ERROR: Could not flush nat postrouting" 1>&2 | ||
491 | lret=1 | ||
492 | fi | ||
493 | |||
494 | test $lret -eq 0 && echo "PASS: IP masquerade for ns2" | ||
495 | |||
496 | return $lret | ||
497 | } | ||
498 | |||
499 | test_redirect6() | ||
500 | { | ||
501 | local lret=0 | ||
502 | |||
503 | ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null | ||
504 | |||
505 | ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 | ||
506 | if [ $? -ne 0 ] ; then | ||
507 | echo "ERROR: cannnot ping ns1 from ns2 via ipv6" | ||
508 | lret=1 | ||
509 | fi | ||
510 | |||
511 | expect="packets 1 bytes 104" | ||
512 | for dir in "in6" "out6" ; do | ||
513 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
514 | if [ $? -ne 0 ]; then | ||
515 | bad_counter ns1 ns2$dir "$expect" | ||
516 | lret=1 | ||
517 | fi | ||
518 | |||
519 | cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
520 | if [ $? -ne 0 ]; then | ||
521 | bad_counter ns2 ns1$dir "$expect" | ||
522 | lret=1 | ||
523 | fi | ||
524 | done | ||
525 | |||
526 | reset_counters | ||
527 | |||
528 | # add redirect rule | ||
529 | ip netns exec ns0 nft -f - <<EOF | ||
530 | table ip6 nat { | ||
531 | chain prerouting { | ||
532 | type nat hook prerouting priority 0; policy accept; | ||
533 | meta iif veth1 meta l4proto icmpv6 ip6 saddr dead:2::99 ip6 daddr dead:1::99 redirect | ||
534 | } | ||
535 | } | ||
536 | EOF | ||
537 | ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 | ||
538 | if [ $? -ne 0 ] ; then | ||
539 | echo "ERROR: cannot ping ns1 from ns2 with active ip6 redirect" | ||
540 | lret=1 | ||
541 | fi | ||
542 | |||
543 | # ns1 should have seen no packets from ns2, due to redirection | ||
544 | expect="packets 0 bytes 0" | ||
545 | for dir in "in6" "out6" ; do | ||
546 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
547 | if [ $? -ne 0 ]; then | ||
548 | bad_counter ns1 ns0$dir "$expect" | ||
549 | lret=1 | ||
550 | fi | ||
551 | done | ||
552 | |||
553 | # ns0 should have seen packets from ns2, due to masquerade | ||
554 | expect="packets 1 bytes 104" | ||
555 | for dir in "in6" "out6" ; do | ||
556 | cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
557 | if [ $? -ne 0 ]; then | ||
558 | bad_counter ns1 ns0$dir "$expect" | ||
559 | lret=1 | ||
560 | fi | ||
561 | done | ||
562 | |||
563 | ip netns exec ns0 nft delete table ip6 nat | ||
564 | if [ $? -ne 0 ]; then | ||
565 | echo "ERROR: Could not delete ip6 nat table" 1>&2 | ||
566 | lret=1 | ||
567 | fi | ||
568 | |||
569 | test $lret -eq 0 && echo "PASS: IPv6 redirection for ns2" | ||
570 | |||
571 | return $lret | ||
572 | } | ||
573 | |||
574 | test_redirect() | ||
575 | { | ||
576 | local lret=0 | ||
577 | |||
578 | ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null | ||
579 | ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null | ||
580 | |||
581 | ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 | ||
582 | if [ $? -ne 0 ] ; then | ||
583 | echo "ERROR: cannot ping ns1 from ns2" | ||
584 | lret=1 | ||
585 | fi | ||
586 | |||
587 | expect="packets 1 bytes 84" | ||
588 | for dir in "in" "out" ; do | ||
589 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
590 | if [ $? -ne 0 ]; then | ||
591 | bad_counter ns1 ns2$dir "$expect" | ||
592 | lret=1 | ||
593 | fi | ||
594 | |||
595 | cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
596 | if [ $? -ne 0 ]; then | ||
597 | bad_counter ns2 ns1$dir "$expect" | ||
598 | lret=1 | ||
599 | fi | ||
600 | done | ||
601 | |||
602 | reset_counters | ||
603 | |||
604 | # add redirect rule | ||
605 | ip netns exec ns0 nft -f - <<EOF | ||
606 | table ip nat { | ||
607 | chain prerouting { | ||
608 | type nat hook prerouting priority 0; policy accept; | ||
609 | meta iif veth1 ip protocol icmp ip saddr 10.0.2.99 ip daddr 10.0.1.99 redirect | ||
610 | } | ||
611 | } | ||
612 | EOF | ||
613 | ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 | ||
614 | if [ $? -ne 0 ] ; then | ||
615 | echo "ERROR: cannot ping ns1 from ns2 with active ip redirect" | ||
616 | lret=1 | ||
617 | fi | ||
618 | |||
619 | # ns1 should have seen no packets from ns2, due to redirection | ||
620 | expect="packets 0 bytes 0" | ||
621 | for dir in "in" "out" ; do | ||
622 | |||
623 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
624 | if [ $? -ne 0 ]; then | ||
625 | bad_counter ns1 ns0$dir "$expect" | ||
626 | lret=1 | ||
627 | fi | ||
628 | done | ||
629 | |||
630 | # ns0 should have seen packets from ns2, due to masquerade | ||
631 | expect="packets 1 bytes 84" | ||
632 | for dir in "in" "out" ; do | ||
633 | cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
634 | if [ $? -ne 0 ]; then | ||
635 | bad_counter ns1 ns0$dir "$expect" | ||
636 | lret=1 | ||
637 | fi | ||
638 | done | ||
639 | |||
640 | ip netns exec ns0 nft delete table ip nat | ||
641 | if [ $? -ne 0 ]; then | ||
642 | echo "ERROR: Could not delete nat table" 1>&2 | ||
643 | lret=1 | ||
644 | fi | ||
645 | |||
646 | test $lret -eq 0 && echo "PASS: IP redirection for ns2" | ||
647 | |||
648 | return $lret | ||
649 | } | ||
650 | |||
651 | |||
652 | # ip netns exec ns0 ping -c 1 -q 10.0.$i.99 | ||
653 | for i in 0 1 2; do | ||
654 | ip netns exec ns$i nft -f - <<EOF | ||
655 | table inet filter { | ||
656 | counter ns0in {} | ||
657 | counter ns1in {} | ||
658 | counter ns2in {} | ||
659 | |||
660 | counter ns0out {} | ||
661 | counter ns1out {} | ||
662 | counter ns2out {} | ||
663 | |||
664 | counter ns0in6 {} | ||
665 | counter ns1in6 {} | ||
666 | counter ns2in6 {} | ||
667 | |||
668 | counter ns0out6 {} | ||
669 | counter ns1out6 {} | ||
670 | counter ns2out6 {} | ||
671 | |||
672 | map nsincounter { | ||
673 | type ipv4_addr : counter | ||
674 | elements = { 10.0.1.1 : "ns0in", | ||
675 | 10.0.2.1 : "ns0in", | ||
676 | 10.0.1.99 : "ns1in", | ||
677 | 10.0.2.99 : "ns2in" } | ||
678 | } | ||
679 | |||
680 | map nsincounter6 { | ||
681 | type ipv6_addr : counter | ||
682 | elements = { dead:1::1 : "ns0in6", | ||
683 | dead:2::1 : "ns0in6", | ||
684 | dead:1::99 : "ns1in6", | ||
685 | dead:2::99 : "ns2in6" } | ||
686 | } | ||
687 | |||
688 | map nsoutcounter { | ||
689 | type ipv4_addr : counter | ||
690 | elements = { 10.0.1.1 : "ns0out", | ||
691 | 10.0.2.1 : "ns0out", | ||
692 | 10.0.1.99: "ns1out", | ||
693 | 10.0.2.99: "ns2out" } | ||
694 | } | ||
695 | |||
696 | map nsoutcounter6 { | ||
697 | type ipv6_addr : counter | ||
698 | elements = { dead:1::1 : "ns0out6", | ||
699 | dead:2::1 : "ns0out6", | ||
700 | dead:1::99 : "ns1out6", | ||
701 | dead:2::99 : "ns2out6" } | ||
702 | } | ||
703 | |||
704 | chain input { | ||
705 | type filter hook input priority 0; policy accept; | ||
706 | counter name ip saddr map @nsincounter | ||
707 | icmpv6 type { "echo-request", "echo-reply" } counter name ip6 saddr map @nsincounter6 | ||
708 | } | ||
709 | chain output { | ||
710 | type filter hook output priority 0; policy accept; | ||
711 | counter name ip daddr map @nsoutcounter | ||
712 | icmpv6 type { "echo-request", "echo-reply" } counter name ip6 daddr map @nsoutcounter6 | ||
713 | } | ||
714 | } | ||
715 | EOF | ||
716 | done | ||
717 | |||
718 | sleep 3 | ||
719 | # test basic connectivity | ||
720 | for i in 1 2; do | ||
721 | ip netns exec ns0 ping -c 1 -q 10.0.$i.99 > /dev/null | ||
722 | if [ $? -ne 0 ];then | ||
723 | echo "ERROR: Could not reach other namespace(s)" 1>&2 | ||
724 | ret=1 | ||
725 | fi | ||
726 | |||
727 | ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null | ||
728 | if [ $? -ne 0 ];then | ||
729 | echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2 | ||
730 | ret=1 | ||
731 | fi | ||
732 | check_counters ns$i | ||
733 | if [ $? -ne 0 ]; then | ||
734 | ret=1 | ||
735 | fi | ||
736 | |||
737 | check_ns0_counters ns$i | ||
738 | if [ $? -ne 0 ]; then | ||
739 | ret=1 | ||
740 | fi | ||
741 | reset_counters | ||
742 | done | ||
743 | |||
744 | if [ $ret -eq 0 ];then | ||
745 | echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2" | ||
746 | fi | ||
747 | |||
748 | reset_counters | ||
749 | test_local_dnat | ||
750 | test_local_dnat6 | ||
751 | |||
752 | reset_counters | ||
753 | test_masquerade | ||
754 | test_masquerade6 | ||
755 | |||
756 | reset_counters | ||
757 | test_redirect | ||
758 | test_redirect6 | ||
759 | |||
760 | for i in 0 1 2; do ip netns del ns$i;done | ||
761 | |||
762 | exit $ret | ||