diff options
67 files changed, 537 insertions, 335 deletions
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt index 3ceeb8de1196..35694c0c376b 100644 --- a/Documentation/devicetree/bindings/net/dsa/dsa.txt +++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt | |||
| @@ -7,7 +7,7 @@ limitations. | |||
| 7 | Current Binding | 7 | Current Binding |
| 8 | --------------- | 8 | --------------- |
| 9 | 9 | ||
| 10 | Switches are true Linux devices and can be probes by any means. Once | 10 | Switches are true Linux devices and can be probed by any means. Once |
| 11 | probed, they register to the DSA framework, passing a node | 11 | probed, they register to the DSA framework, passing a node |
| 12 | pointer. This node is expected to fulfil the following binding, and | 12 | pointer. This node is expected to fulfil the following binding, and |
| 13 | may contain additional properties as required by the device it is | 13 | may contain additional properties as required by the device it is |
diff --git a/MAINTAINERS b/MAINTAINERS index eac2bf1abec9..03c46f483143 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -180,6 +180,7 @@ F: drivers/net/hamradio/6pack.c | |||
| 180 | 180 | ||
| 181 | 8169 10/100/1000 GIGABIT ETHERNET DRIVER | 181 | 8169 10/100/1000 GIGABIT ETHERNET DRIVER |
| 182 | M: Realtek linux nic maintainers <nic_swsd@realtek.com> | 182 | M: Realtek linux nic maintainers <nic_swsd@realtek.com> |
| 183 | M: Heiner Kallweit <hkallweit1@gmail.com> | ||
| 183 | L: netdev@vger.kernel.org | 184 | L: netdev@vger.kernel.org |
| 184 | S: Maintained | 185 | S: Maintained |
| 185 | F: drivers/net/ethernet/realtek/r8169.c | 186 | F: drivers/net/ethernet/realtek/r8169.c |
| @@ -5534,6 +5535,7 @@ F: net/bridge/ | |||
| 5534 | ETHERNET PHY LIBRARY | 5535 | ETHERNET PHY LIBRARY |
| 5535 | M: Andrew Lunn <andrew@lunn.ch> | 5536 | M: Andrew Lunn <andrew@lunn.ch> |
| 5536 | M: Florian Fainelli <f.fainelli@gmail.com> | 5537 | M: Florian Fainelli <f.fainelli@gmail.com> |
| 5538 | M: Heiner Kallweit <hkallweit1@gmail.com> | ||
| 5537 | L: netdev@vger.kernel.org | 5539 | L: netdev@vger.kernel.org |
| 5538 | S: Maintained | 5540 | S: Maintained |
| 5539 | F: Documentation/ABI/testing/sysfs-bus-mdio | 5541 | F: Documentation/ABI/testing/sysfs-bus-mdio |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 18956e7604a3..a70bb1bb90e7 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c | |||
| @@ -1848,6 +1848,8 @@ static void ena_down(struct ena_adapter *adapter) | |||
| 1848 | rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); | 1848 | rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); |
| 1849 | if (rc) | 1849 | if (rc) |
| 1850 | dev_err(&adapter->pdev->dev, "Device reset failed\n"); | 1850 | dev_err(&adapter->pdev->dev, "Device reset failed\n"); |
| 1851 | /* stop submitting admin commands on a device that was reset */ | ||
| 1852 | ena_com_set_admin_running_state(adapter->ena_dev, false); | ||
| 1851 | } | 1853 | } |
| 1852 | 1854 | ||
| 1853 | ena_destroy_all_io_queues(adapter); | 1855 | ena_destroy_all_io_queues(adapter); |
| @@ -1914,6 +1916,9 @@ static int ena_close(struct net_device *netdev) | |||
| 1914 | 1916 | ||
| 1915 | netif_dbg(adapter, ifdown, netdev, "%s\n", __func__); | 1917 | netif_dbg(adapter, ifdown, netdev, "%s\n", __func__); |
| 1916 | 1918 | ||
| 1919 | if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) | ||
| 1920 | return 0; | ||
| 1921 | |||
| 1917 | if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) | 1922 | if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) |
| 1918 | ena_down(adapter); | 1923 | ena_down(adapter); |
| 1919 | 1924 | ||
| @@ -2613,9 +2618,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) | |||
| 2613 | ena_down(adapter); | 2618 | ena_down(adapter); |
| 2614 | 2619 | ||
| 2615 | /* Stop the device from sending AENQ events (in case reset flag is set | 2620 | /* Stop the device from sending AENQ events (in case reset flag is set |
| 2616 | * and device is up, ena_close already reset the device | 2621 | * and device is up, ena_down() already reset the device. |
| 2617 | * In case the reset flag is set and the device is up, ena_down() | ||
| 2618 | * already perform the reset, so it can be skipped. | ||
| 2619 | */ | 2622 | */ |
| 2620 | if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) | 2623 | if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) |
| 2621 | ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); | 2624 | ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); |
| @@ -2694,8 +2697,8 @@ err_device_destroy: | |||
| 2694 | ena_com_abort_admin_commands(ena_dev); | 2697 | ena_com_abort_admin_commands(ena_dev); |
| 2695 | ena_com_wait_for_abort_completion(ena_dev); | 2698 | ena_com_wait_for_abort_completion(ena_dev); |
| 2696 | ena_com_admin_destroy(ena_dev); | 2699 | ena_com_admin_destroy(ena_dev); |
| 2697 | ena_com_mmio_reg_read_request_destroy(ena_dev); | ||
| 2698 | ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); | 2700 | ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); |
| 2701 | ena_com_mmio_reg_read_request_destroy(ena_dev); | ||
| 2699 | err: | 2702 | err: |
| 2700 | clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); | 2703 | clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); |
| 2701 | clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); | 2704 | clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); |
| @@ -3452,6 +3455,8 @@ err_rss: | |||
| 3452 | ena_com_rss_destroy(ena_dev); | 3455 | ena_com_rss_destroy(ena_dev); |
| 3453 | err_free_msix: | 3456 | err_free_msix: |
| 3454 | ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR); | 3457 | ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR); |
| 3458 | /* stop submitting admin commands on a device that was reset */ | ||
| 3459 | ena_com_set_admin_running_state(ena_dev, false); | ||
| 3455 | ena_free_mgmnt_irq(adapter); | 3460 | ena_free_mgmnt_irq(adapter); |
| 3456 | ena_disable_msix(adapter); | 3461 | ena_disable_msix(adapter); |
| 3457 | err_worker_destroy: | 3462 | err_worker_destroy: |
| @@ -3498,18 +3503,12 @@ static void ena_remove(struct pci_dev *pdev) | |||
| 3498 | 3503 | ||
| 3499 | cancel_work_sync(&adapter->reset_task); | 3504 | cancel_work_sync(&adapter->reset_task); |
| 3500 | 3505 | ||
| 3501 | unregister_netdev(netdev); | ||
| 3502 | |||
| 3503 | /* If the device is running then we want to make sure the device will be | ||
| 3504 | * reset to make sure no more events will be issued by the device. | ||
| 3505 | */ | ||
| 3506 | if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) | ||
| 3507 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | ||
| 3508 | |||
| 3509 | rtnl_lock(); | 3506 | rtnl_lock(); |
| 3510 | ena_destroy_device(adapter, true); | 3507 | ena_destroy_device(adapter, true); |
| 3511 | rtnl_unlock(); | 3508 | rtnl_unlock(); |
| 3512 | 3509 | ||
| 3510 | unregister_netdev(netdev); | ||
| 3511 | |||
| 3513 | free_netdev(netdev); | 3512 | free_netdev(netdev); |
| 3514 | 3513 | ||
| 3515 | ena_com_rss_destroy(ena_dev); | 3514 | ena_com_rss_destroy(ena_dev); |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 521873642339..dc8b6173d8d8 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h | |||
| @@ -45,7 +45,7 @@ | |||
| 45 | 45 | ||
| 46 | #define DRV_MODULE_VER_MAJOR 2 | 46 | #define DRV_MODULE_VER_MAJOR 2 |
| 47 | #define DRV_MODULE_VER_MINOR 0 | 47 | #define DRV_MODULE_VER_MINOR 0 |
| 48 | #define DRV_MODULE_VER_SUBMINOR 1 | 48 | #define DRV_MODULE_VER_SUBMINOR 2 |
| 49 | 49 | ||
| 50 | #define DRV_MODULE_NAME "ena" | 50 | #define DRV_MODULE_NAME "ena" |
| 51 | #ifndef DRV_MODULE_VERSION | 51 | #ifndef DRV_MODULE_VERSION |
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index b4fc0ed5bce8..9d4899826823 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c | |||
| @@ -1419,7 +1419,7 @@ static int sparc_lance_probe_one(struct platform_device *op, | |||
| 1419 | 1419 | ||
| 1420 | prop = of_get_property(nd, "tpe-link-test?", NULL); | 1420 | prop = of_get_property(nd, "tpe-link-test?", NULL); |
| 1421 | if (!prop) | 1421 | if (!prop) |
| 1422 | goto no_link_test; | 1422 | goto node_put; |
| 1423 | 1423 | ||
| 1424 | if (strcmp(prop, "true")) { | 1424 | if (strcmp(prop, "true")) { |
| 1425 | printk(KERN_NOTICE "SunLance: warning: overriding option " | 1425 | printk(KERN_NOTICE "SunLance: warning: overriding option " |
| @@ -1428,6 +1428,8 @@ static int sparc_lance_probe_one(struct platform_device *op, | |||
| 1428 | "to ecd@skynet.be\n"); | 1428 | "to ecd@skynet.be\n"); |
| 1429 | auxio_set_lte(AUXIO_LTE_ON); | 1429 | auxio_set_lte(AUXIO_LTE_ON); |
| 1430 | } | 1430 | } |
| 1431 | node_put: | ||
| 1432 | of_node_put(nd); | ||
| 1431 | no_link_test: | 1433 | no_link_test: |
| 1432 | lp->auto_select = 1; | 1434 | lp->auto_select = 1; |
| 1433 | lp->tpe = 0; | 1435 | lp->tpe = 0; |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 89295306f161..432c3b867084 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
| @@ -12422,6 +12422,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e | |||
| 12422 | { | 12422 | { |
| 12423 | struct tg3 *tp = netdev_priv(dev); | 12423 | struct tg3 *tp = netdev_priv(dev); |
| 12424 | int i, irq_sync = 0, err = 0; | 12424 | int i, irq_sync = 0, err = 0; |
| 12425 | bool reset_phy = false; | ||
| 12425 | 12426 | ||
| 12426 | if ((ering->rx_pending > tp->rx_std_ring_mask) || | 12427 | if ((ering->rx_pending > tp->rx_std_ring_mask) || |
| 12427 | (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || | 12428 | (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || |
| @@ -12453,7 +12454,13 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e | |||
| 12453 | 12454 | ||
| 12454 | if (netif_running(dev)) { | 12455 | if (netif_running(dev)) { |
| 12455 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 12456 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
| 12456 | err = tg3_restart_hw(tp, false); | 12457 | /* Reset PHY to avoid PHY lock up */ |
| 12458 | if (tg3_asic_rev(tp) == ASIC_REV_5717 || | ||
| 12459 | tg3_asic_rev(tp) == ASIC_REV_5719 || | ||
| 12460 | tg3_asic_rev(tp) == ASIC_REV_5720) | ||
| 12461 | reset_phy = true; | ||
| 12462 | |||
| 12463 | err = tg3_restart_hw(tp, reset_phy); | ||
| 12457 | if (!err) | 12464 | if (!err) |
| 12458 | tg3_netif_start(tp); | 12465 | tg3_netif_start(tp); |
| 12459 | } | 12466 | } |
| @@ -12487,6 +12494,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
| 12487 | { | 12494 | { |
| 12488 | struct tg3 *tp = netdev_priv(dev); | 12495 | struct tg3 *tp = netdev_priv(dev); |
| 12489 | int err = 0; | 12496 | int err = 0; |
| 12497 | bool reset_phy = false; | ||
| 12490 | 12498 | ||
| 12491 | if (tp->link_config.autoneg == AUTONEG_ENABLE) | 12499 | if (tp->link_config.autoneg == AUTONEG_ENABLE) |
| 12492 | tg3_warn_mgmt_link_flap(tp); | 12500 | tg3_warn_mgmt_link_flap(tp); |
| @@ -12556,7 +12564,13 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
| 12556 | 12564 | ||
| 12557 | if (netif_running(dev)) { | 12565 | if (netif_running(dev)) { |
| 12558 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 12566 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
| 12559 | err = tg3_restart_hw(tp, false); | 12567 | /* Reset PHY to avoid PHY lock up */ |
| 12568 | if (tg3_asic_rev(tp) == ASIC_REV_5717 || | ||
| 12569 | tg3_asic_rev(tp) == ASIC_REV_5719 || | ||
| 12570 | tg3_asic_rev(tp) == ASIC_REV_5720) | ||
| 12571 | reset_phy = true; | ||
| 12572 | |||
| 12573 | err = tg3_restart_hw(tp, reset_phy); | ||
| 12560 | if (!err) | 12574 | if (!err) |
| 12561 | tg3_netif_start(tp); | 12575 | tg3_netif_start(tp); |
| 12562 | } | 12576 | } |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 768f584f8392..88f8a8fa93cd 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c | |||
| @@ -1784,6 +1784,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) | |||
| 1784 | bool if_up = netif_running(nic->netdev); | 1784 | bool if_up = netif_running(nic->netdev); |
| 1785 | struct bpf_prog *old_prog; | 1785 | struct bpf_prog *old_prog; |
| 1786 | bool bpf_attached = false; | 1786 | bool bpf_attached = false; |
| 1787 | int ret = 0; | ||
| 1787 | 1788 | ||
| 1788 | /* For now just support only the usual MTU sized frames */ | 1789 | /* For now just support only the usual MTU sized frames */ |
| 1789 | if (prog && (dev->mtu > 1500)) { | 1790 | if (prog && (dev->mtu > 1500)) { |
| @@ -1817,8 +1818,12 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) | |||
| 1817 | if (nic->xdp_prog) { | 1818 | if (nic->xdp_prog) { |
| 1818 | /* Attach BPF program */ | 1819 | /* Attach BPF program */ |
| 1819 | nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1); | 1820 | nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1); |
| 1820 | if (!IS_ERR(nic->xdp_prog)) | 1821 | if (!IS_ERR(nic->xdp_prog)) { |
| 1821 | bpf_attached = true; | 1822 | bpf_attached = true; |
| 1823 | } else { | ||
| 1824 | ret = PTR_ERR(nic->xdp_prog); | ||
| 1825 | nic->xdp_prog = NULL; | ||
| 1826 | } | ||
| 1822 | } | 1827 | } |
| 1823 | 1828 | ||
| 1824 | /* Calculate Tx queues needed for XDP and network stack */ | 1829 | /* Calculate Tx queues needed for XDP and network stack */ |
| @@ -1830,7 +1835,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) | |||
| 1830 | netif_trans_update(nic->netdev); | 1835 | netif_trans_update(nic->netdev); |
| 1831 | } | 1836 | } |
| 1832 | 1837 | ||
| 1833 | return 0; | 1838 | return ret; |
| 1834 | } | 1839 | } |
| 1835 | 1840 | ||
| 1836 | static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) | 1841 | static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 187a249ff2d1..fcaf18fa3904 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c | |||
| @@ -585,10 +585,12 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) | |||
| 585 | if (!sq->dmem.base) | 585 | if (!sq->dmem.base) |
| 586 | return; | 586 | return; |
| 587 | 587 | ||
| 588 | if (sq->tso_hdrs) | 588 | if (sq->tso_hdrs) { |
| 589 | dma_free_coherent(&nic->pdev->dev, | 589 | dma_free_coherent(&nic->pdev->dev, |
| 590 | sq->dmem.q_len * TSO_HEADER_SIZE, | 590 | sq->dmem.q_len * TSO_HEADER_SIZE, |
| 591 | sq->tso_hdrs, sq->tso_hdrs_phys); | 591 | sq->tso_hdrs, sq->tso_hdrs_phys); |
| 592 | sq->tso_hdrs = NULL; | ||
| 593 | } | ||
| 592 | 594 | ||
| 593 | /* Free pending skbs in the queue */ | 595 | /* Free pending skbs in the queue */ |
| 594 | smp_rmb(); | 596 | smp_rmb(); |
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index ceec467f590d..949103db8a8a 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c | |||
| @@ -660,7 +660,7 @@ static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq, | |||
| 660 | 660 | ||
| 661 | u64_stats_update_begin(&port->tx_stats_syncp); | 661 | u64_stats_update_begin(&port->tx_stats_syncp); |
| 662 | port->tx_frag_stats[nfrags]++; | 662 | port->tx_frag_stats[nfrags]++; |
| 663 | u64_stats_update_end(&port->ir_stats_syncp); | 663 | u64_stats_update_end(&port->tx_stats_syncp); |
| 664 | } | 664 | } |
| 665 | } | 665 | } |
| 666 | 666 | ||
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index 570caeb8ee9e..084f24daf2b5 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c | |||
| @@ -872,11 +872,10 @@ static irqreturn_t ftmac100_interrupt(int irq, void *dev_id) | |||
| 872 | struct net_device *netdev = dev_id; | 872 | struct net_device *netdev = dev_id; |
| 873 | struct ftmac100 *priv = netdev_priv(netdev); | 873 | struct ftmac100 *priv = netdev_priv(netdev); |
| 874 | 874 | ||
| 875 | if (likely(netif_running(netdev))) { | 875 | /* Disable interrupts for polling */ |
| 876 | /* Disable interrupts for polling */ | 876 | ftmac100_disable_all_int(priv); |
| 877 | ftmac100_disable_all_int(priv); | 877 | if (likely(netif_running(netdev))) |
| 878 | napi_schedule(&priv->napi); | 878 | napi_schedule(&priv->napi); |
| 879 | } | ||
| 880 | 879 | ||
| 881 | return IRQ_HANDLED; | 880 | return IRQ_HANDLED; |
| 882 | } | 881 | } |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index c9d5d0a7fbf1..c0203a0d5e3b 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
| @@ -485,8 +485,8 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter) | |||
| 485 | 485 | ||
| 486 | for (j = 0; j < rx_pool->size; j++) { | 486 | for (j = 0; j < rx_pool->size; j++) { |
| 487 | if (rx_pool->rx_buff[j].skb) { | 487 | if (rx_pool->rx_buff[j].skb) { |
| 488 | dev_kfree_skb_any(rx_pool->rx_buff[i].skb); | 488 | dev_kfree_skb_any(rx_pool->rx_buff[j].skb); |
| 489 | rx_pool->rx_buff[i].skb = NULL; | 489 | rx_pool->rx_buff[j].skb = NULL; |
| 490 | } | 490 | } |
| 491 | } | 491 | } |
| 492 | 492 | ||
| @@ -1103,20 +1103,15 @@ static int ibmvnic_open(struct net_device *netdev) | |||
| 1103 | return 0; | 1103 | return 0; |
| 1104 | } | 1104 | } |
| 1105 | 1105 | ||
| 1106 | mutex_lock(&adapter->reset_lock); | ||
| 1107 | |||
| 1108 | if (adapter->state != VNIC_CLOSED) { | 1106 | if (adapter->state != VNIC_CLOSED) { |
| 1109 | rc = ibmvnic_login(netdev); | 1107 | rc = ibmvnic_login(netdev); |
| 1110 | if (rc) { | 1108 | if (rc) |
| 1111 | mutex_unlock(&adapter->reset_lock); | ||
| 1112 | return rc; | 1109 | return rc; |
| 1113 | } | ||
| 1114 | 1110 | ||
| 1115 | rc = init_resources(adapter); | 1111 | rc = init_resources(adapter); |
| 1116 | if (rc) { | 1112 | if (rc) { |
| 1117 | netdev_err(netdev, "failed to initialize resources\n"); | 1113 | netdev_err(netdev, "failed to initialize resources\n"); |
| 1118 | release_resources(adapter); | 1114 | release_resources(adapter); |
| 1119 | mutex_unlock(&adapter->reset_lock); | ||
| 1120 | return rc; | 1115 | return rc; |
| 1121 | } | 1116 | } |
| 1122 | } | 1117 | } |
| @@ -1124,8 +1119,6 @@ static int ibmvnic_open(struct net_device *netdev) | |||
| 1124 | rc = __ibmvnic_open(netdev); | 1119 | rc = __ibmvnic_open(netdev); |
| 1125 | netif_carrier_on(netdev); | 1120 | netif_carrier_on(netdev); |
| 1126 | 1121 | ||
| 1127 | mutex_unlock(&adapter->reset_lock); | ||
| 1128 | |||
| 1129 | return rc; | 1122 | return rc; |
| 1130 | } | 1123 | } |
| 1131 | 1124 | ||
| @@ -1269,10 +1262,8 @@ static int ibmvnic_close(struct net_device *netdev) | |||
| 1269 | return 0; | 1262 | return 0; |
| 1270 | } | 1263 | } |
| 1271 | 1264 | ||
| 1272 | mutex_lock(&adapter->reset_lock); | ||
| 1273 | rc = __ibmvnic_close(netdev); | 1265 | rc = __ibmvnic_close(netdev); |
| 1274 | ibmvnic_cleanup(netdev); | 1266 | ibmvnic_cleanup(netdev); |
| 1275 | mutex_unlock(&adapter->reset_lock); | ||
| 1276 | 1267 | ||
| 1277 | return rc; | 1268 | return rc; |
| 1278 | } | 1269 | } |
| @@ -1746,6 +1737,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
| 1746 | struct ibmvnic_rwi *rwi, u32 reset_state) | 1737 | struct ibmvnic_rwi *rwi, u32 reset_state) |
| 1747 | { | 1738 | { |
| 1748 | u64 old_num_rx_queues, old_num_tx_queues; | 1739 | u64 old_num_rx_queues, old_num_tx_queues; |
| 1740 | u64 old_num_rx_slots, old_num_tx_slots; | ||
| 1749 | struct net_device *netdev = adapter->netdev; | 1741 | struct net_device *netdev = adapter->netdev; |
| 1750 | int i, rc; | 1742 | int i, rc; |
| 1751 | 1743 | ||
| @@ -1757,6 +1749,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
| 1757 | 1749 | ||
| 1758 | old_num_rx_queues = adapter->req_rx_queues; | 1750 | old_num_rx_queues = adapter->req_rx_queues; |
| 1759 | old_num_tx_queues = adapter->req_tx_queues; | 1751 | old_num_tx_queues = adapter->req_tx_queues; |
| 1752 | old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq; | ||
| 1753 | old_num_tx_slots = adapter->req_tx_entries_per_subcrq; | ||
| 1760 | 1754 | ||
| 1761 | ibmvnic_cleanup(netdev); | 1755 | ibmvnic_cleanup(netdev); |
| 1762 | 1756 | ||
| @@ -1819,21 +1813,20 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
| 1819 | if (rc) | 1813 | if (rc) |
| 1820 | return rc; | 1814 | return rc; |
| 1821 | } else if (adapter->req_rx_queues != old_num_rx_queues || | 1815 | } else if (adapter->req_rx_queues != old_num_rx_queues || |
| 1822 | adapter->req_tx_queues != old_num_tx_queues) { | 1816 | adapter->req_tx_queues != old_num_tx_queues || |
| 1823 | adapter->map_id = 1; | 1817 | adapter->req_rx_add_entries_per_subcrq != |
| 1818 | old_num_rx_slots || | ||
| 1819 | adapter->req_tx_entries_per_subcrq != | ||
| 1820 | old_num_tx_slots) { | ||
| 1824 | release_rx_pools(adapter); | 1821 | release_rx_pools(adapter); |
| 1825 | release_tx_pools(adapter); | 1822 | release_tx_pools(adapter); |
| 1826 | rc = init_rx_pools(netdev); | ||
| 1827 | if (rc) | ||
| 1828 | return rc; | ||
| 1829 | rc = init_tx_pools(netdev); | ||
| 1830 | if (rc) | ||
| 1831 | return rc; | ||
| 1832 | |||
| 1833 | release_napi(adapter); | 1823 | release_napi(adapter); |
| 1834 | rc = init_napi(adapter); | 1824 | release_vpd_data(adapter); |
| 1825 | |||
| 1826 | rc = init_resources(adapter); | ||
| 1835 | if (rc) | 1827 | if (rc) |
| 1836 | return rc; | 1828 | return rc; |
| 1829 | |||
| 1837 | } else { | 1830 | } else { |
| 1838 | rc = reset_tx_pools(adapter); | 1831 | rc = reset_tx_pools(adapter); |
| 1839 | if (rc) | 1832 | if (rc) |
| @@ -1917,17 +1910,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, | |||
| 1917 | adapter->state = VNIC_PROBED; | 1910 | adapter->state = VNIC_PROBED; |
| 1918 | return 0; | 1911 | return 0; |
| 1919 | } | 1912 | } |
| 1920 | /* netif_set_real_num_xx_queues needs to take rtnl lock here | 1913 | |
| 1921 | * unless wait_for_reset is set, in which case the rtnl lock | 1914 | rc = init_resources(adapter); |
| 1922 | * has already been taken before initializing the reset | ||
| 1923 | */ | ||
| 1924 | if (!adapter->wait_for_reset) { | ||
| 1925 | rtnl_lock(); | ||
| 1926 | rc = init_resources(adapter); | ||
| 1927 | rtnl_unlock(); | ||
| 1928 | } else { | ||
| 1929 | rc = init_resources(adapter); | ||
| 1930 | } | ||
| 1931 | if (rc) | 1915 | if (rc) |
| 1932 | return rc; | 1916 | return rc; |
| 1933 | 1917 | ||
| @@ -1986,13 +1970,21 @@ static void __ibmvnic_reset(struct work_struct *work) | |||
| 1986 | struct ibmvnic_rwi *rwi; | 1970 | struct ibmvnic_rwi *rwi; |
| 1987 | struct ibmvnic_adapter *adapter; | 1971 | struct ibmvnic_adapter *adapter; |
| 1988 | struct net_device *netdev; | 1972 | struct net_device *netdev; |
| 1973 | bool we_lock_rtnl = false; | ||
| 1989 | u32 reset_state; | 1974 | u32 reset_state; |
| 1990 | int rc = 0; | 1975 | int rc = 0; |
| 1991 | 1976 | ||
| 1992 | adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); | 1977 | adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); |
| 1993 | netdev = adapter->netdev; | 1978 | netdev = adapter->netdev; |
| 1994 | 1979 | ||
| 1995 | mutex_lock(&adapter->reset_lock); | 1980 | /* netif_set_real_num_xx_queues needs to take rtnl lock here |
| 1981 | * unless wait_for_reset is set, in which case the rtnl lock | ||
| 1982 | * has already been taken before initializing the reset | ||
| 1983 | */ | ||
| 1984 | if (!adapter->wait_for_reset) { | ||
| 1985 | rtnl_lock(); | ||
| 1986 | we_lock_rtnl = true; | ||
| 1987 | } | ||
| 1996 | reset_state = adapter->state; | 1988 | reset_state = adapter->state; |
| 1997 | 1989 | ||
| 1998 | rwi = get_next_rwi(adapter); | 1990 | rwi = get_next_rwi(adapter); |
| @@ -2020,12 +2012,11 @@ static void __ibmvnic_reset(struct work_struct *work) | |||
| 2020 | if (rc) { | 2012 | if (rc) { |
| 2021 | netdev_dbg(adapter->netdev, "Reset failed\n"); | 2013 | netdev_dbg(adapter->netdev, "Reset failed\n"); |
| 2022 | free_all_rwi(adapter); | 2014 | free_all_rwi(adapter); |
| 2023 | mutex_unlock(&adapter->reset_lock); | ||
| 2024 | return; | ||
| 2025 | } | 2015 | } |
| 2026 | 2016 | ||
| 2027 | adapter->resetting = false; | 2017 | adapter->resetting = false; |
| 2028 | mutex_unlock(&adapter->reset_lock); | 2018 | if (we_lock_rtnl) |
| 2019 | rtnl_unlock(); | ||
| 2029 | } | 2020 | } |
| 2030 | 2021 | ||
| 2031 | static int ibmvnic_reset(struct ibmvnic_adapter *adapter, | 2022 | static int ibmvnic_reset(struct ibmvnic_adapter *adapter, |
| @@ -4768,7 +4759,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
| 4768 | 4759 | ||
| 4769 | INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); | 4760 | INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); |
| 4770 | INIT_LIST_HEAD(&adapter->rwi_list); | 4761 | INIT_LIST_HEAD(&adapter->rwi_list); |
| 4771 | mutex_init(&adapter->reset_lock); | ||
| 4772 | mutex_init(&adapter->rwi_lock); | 4762 | mutex_init(&adapter->rwi_lock); |
| 4773 | adapter->resetting = false; | 4763 | adapter->resetting = false; |
| 4774 | 4764 | ||
| @@ -4840,8 +4830,8 @@ static int ibmvnic_remove(struct vio_dev *dev) | |||
| 4840 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | 4830 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 4841 | 4831 | ||
| 4842 | adapter->state = VNIC_REMOVING; | 4832 | adapter->state = VNIC_REMOVING; |
| 4843 | unregister_netdev(netdev); | 4833 | rtnl_lock(); |
| 4844 | mutex_lock(&adapter->reset_lock); | 4834 | unregister_netdevice(netdev); |
| 4845 | 4835 | ||
| 4846 | release_resources(adapter); | 4836 | release_resources(adapter); |
| 4847 | release_sub_crqs(adapter, 1); | 4837 | release_sub_crqs(adapter, 1); |
| @@ -4852,7 +4842,7 @@ static int ibmvnic_remove(struct vio_dev *dev) | |||
| 4852 | 4842 | ||
| 4853 | adapter->state = VNIC_REMOVED; | 4843 | adapter->state = VNIC_REMOVED; |
| 4854 | 4844 | ||
| 4855 | mutex_unlock(&adapter->reset_lock); | 4845 | rtnl_unlock(); |
| 4856 | device_remove_file(&dev->dev, &dev_attr_failover); | 4846 | device_remove_file(&dev->dev, &dev_attr_failover); |
| 4857 | free_netdev(netdev); | 4847 | free_netdev(netdev); |
| 4858 | dev_set_drvdata(&dev->dev, NULL); | 4848 | dev_set_drvdata(&dev->dev, NULL); |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 18103b811d4d..99c4f8d331ce 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h | |||
| @@ -1075,7 +1075,7 @@ struct ibmvnic_adapter { | |||
| 1075 | struct tasklet_struct tasklet; | 1075 | struct tasklet_struct tasklet; |
| 1076 | enum vnic_state state; | 1076 | enum vnic_state state; |
| 1077 | enum ibmvnic_reset_reason reset_reason; | 1077 | enum ibmvnic_reset_reason reset_reason; |
| 1078 | struct mutex reset_lock, rwi_lock; | 1078 | struct mutex rwi_lock; |
| 1079 | struct list_head rwi_list; | 1079 | struct list_head rwi_list; |
| 1080 | struct work_struct ibmvnic_reset; | 1080 | struct work_struct ibmvnic_reset; |
| 1081 | bool resetting; | 1081 | bool resetting; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index d7fbd5b6ac95..118324802926 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
| @@ -569,6 +569,7 @@ struct mlx5e_rq { | |||
| 569 | 569 | ||
| 570 | unsigned long state; | 570 | unsigned long state; |
| 571 | int ix; | 571 | int ix; |
| 572 | unsigned int hw_mtu; | ||
| 572 | 573 | ||
| 573 | struct net_dim dim; /* Dynamic Interrupt Moderation */ | 574 | struct net_dim dim; /* Dynamic Interrupt Moderation */ |
| 574 | 575 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 023dc4bccd28..4a37713023be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c | |||
| @@ -88,10 +88,8 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) | |||
| 88 | 88 | ||
| 89 | eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); | 89 | eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); |
| 90 | *speed = mlx5e_port_ptys2speed(eth_proto_oper); | 90 | *speed = mlx5e_port_ptys2speed(eth_proto_oper); |
| 91 | if (!(*speed)) { | 91 | if (!(*speed)) |
| 92 | mlx5_core_warn(mdev, "cannot get port speed\n"); | ||
| 93 | err = -EINVAL; | 92 | err = -EINVAL; |
| 94 | } | ||
| 95 | 93 | ||
| 96 | return err; | 94 | return err; |
| 97 | } | 95 | } |
| @@ -258,7 +256,7 @@ static int mlx5e_fec_admin_field(u32 *pplm, | |||
| 258 | case 40000: | 256 | case 40000: |
| 259 | if (!write) | 257 | if (!write) |
| 260 | *fec_policy = MLX5_GET(pplm_reg, pplm, | 258 | *fec_policy = MLX5_GET(pplm_reg, pplm, |
| 261 | fec_override_cap_10g_40g); | 259 | fec_override_admin_10g_40g); |
| 262 | else | 260 | else |
| 263 | MLX5_SET(pplm_reg, pplm, | 261 | MLX5_SET(pplm_reg, pplm, |
| 264 | fec_override_admin_10g_40g, *fec_policy); | 262 | fec_override_admin_10g_40g, *fec_policy); |
| @@ -310,7 +308,7 @@ static int mlx5e_get_fec_cap_field(u32 *pplm, | |||
| 310 | case 10000: | 308 | case 10000: |
| 311 | case 40000: | 309 | case 40000: |
| 312 | *fec_cap = MLX5_GET(pplm_reg, pplm, | 310 | *fec_cap = MLX5_GET(pplm_reg, pplm, |
| 313 | fec_override_admin_10g_40g); | 311 | fec_override_cap_10g_40g); |
| 314 | break; | 312 | break; |
| 315 | case 25000: | 313 | case 25000: |
| 316 | *fec_cap = MLX5_GET(pplm_reg, pplm, | 314 | *fec_cap = MLX5_GET(pplm_reg, pplm, |
| @@ -394,12 +392,12 @@ int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active, | |||
| 394 | 392 | ||
| 395 | int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy) | 393 | int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy) |
| 396 | { | 394 | { |
| 395 | u8 fec_policy_nofec = BIT(MLX5E_FEC_NOFEC); | ||
| 397 | bool fec_mode_not_supp_in_speed = false; | 396 | bool fec_mode_not_supp_in_speed = false; |
| 398 | u8 no_fec_policy = BIT(MLX5E_FEC_NOFEC); | ||
| 399 | u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {}; | 397 | u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {}; |
| 400 | u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {}; | 398 | u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {}; |
| 401 | int sz = MLX5_ST_SZ_BYTES(pplm_reg); | 399 | int sz = MLX5_ST_SZ_BYTES(pplm_reg); |
| 402 | u32 current_fec_speed; | 400 | u8 fec_policy_auto = 0; |
| 403 | u8 fec_caps = 0; | 401 | u8 fec_caps = 0; |
| 404 | int err; | 402 | int err; |
| 405 | int i; | 403 | int i; |
| @@ -415,23 +413,19 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy) | |||
| 415 | if (err) | 413 | if (err) |
| 416 | return err; | 414 | return err; |
| 417 | 415 | ||
| 418 | err = mlx5e_port_linkspeed(dev, ¤t_fec_speed); | 416 | MLX5_SET(pplm_reg, out, local_port, 1); |
| 419 | if (err) | ||
| 420 | return err; | ||
| 421 | 417 | ||
| 422 | memset(in, 0, sz); | 418 | for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS; i++) { |
| 423 | MLX5_SET(pplm_reg, in, local_port, 1); | ||
| 424 | for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS && !!fec_policy; i++) { | ||
| 425 | mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]); | 419 | mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]); |
| 426 | /* policy supported for link speed */ | 420 | /* policy supported for link speed, or policy is auto */ |
| 427 | if (!!(fec_caps & fec_policy)) { | 421 | if (fec_caps & fec_policy || fec_policy == fec_policy_auto) { |
| 428 | mlx5e_fec_admin_field(in, &fec_policy, 1, | 422 | mlx5e_fec_admin_field(out, &fec_policy, 1, |
| 429 | fec_supported_speeds[i]); | 423 | fec_supported_speeds[i]); |
| 430 | } else { | 424 | } else { |
| 431 | if (fec_supported_speeds[i] == current_fec_speed) | 425 | /* turn off FEC if supported. Else, leave it the same */ |
| 432 | return -EOPNOTSUPP; | 426 | if (fec_caps & fec_policy_nofec) |
| 433 | mlx5e_fec_admin_field(in, &no_fec_policy, 1, | 427 | mlx5e_fec_admin_field(out, &fec_policy_nofec, 1, |
| 434 | fec_supported_speeds[i]); | 428 | fec_supported_speeds[i]); |
| 435 | fec_mode_not_supp_in_speed = true; | 429 | fec_mode_not_supp_in_speed = true; |
| 436 | } | 430 | } |
| 437 | } | 431 | } |
| @@ -441,5 +435,5 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy) | |||
| 441 | "FEC policy 0x%x is not supported for some speeds", | 435 | "FEC policy 0x%x is not supported for some speeds", |
| 442 | fec_policy); | 436 | fec_policy); |
| 443 | 437 | ||
| 444 | return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 1); | 438 | return mlx5_core_access_reg(dev, out, sz, out, sz, MLX5_REG_PPLM, 0, 1); |
| 445 | } | 439 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index c047da8752da..eac245a93f91 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c | |||
| @@ -130,8 +130,10 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) | |||
| 130 | int err; | 130 | int err; |
| 131 | 131 | ||
| 132 | err = mlx5e_port_linkspeed(priv->mdev, &speed); | 132 | err = mlx5e_port_linkspeed(priv->mdev, &speed); |
| 133 | if (err) | 133 | if (err) { |
| 134 | mlx5_core_warn(priv->mdev, "cannot get port speed\n"); | ||
| 134 | return 0; | 135 | return 0; |
| 136 | } | ||
| 135 | 137 | ||
| 136 | xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; | 138 | xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; |
| 137 | 139 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 3e770abfd802..25c1c4f96841 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
| @@ -843,8 +843,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev, | |||
| 843 | ethtool_link_ksettings_add_link_mode(link_ksettings, supported, | 843 | ethtool_link_ksettings_add_link_mode(link_ksettings, supported, |
| 844 | Autoneg); | 844 | Autoneg); |
| 845 | 845 | ||
| 846 | err = get_fec_supported_advertised(mdev, link_ksettings); | 846 | if (get_fec_supported_advertised(mdev, link_ksettings)) |
| 847 | if (err) | ||
| 848 | netdev_dbg(netdev, "%s: FEC caps query failed: %d\n", | 847 | netdev_dbg(netdev, "%s: FEC caps query failed: %d\n", |
| 849 | __func__, err); | 848 | __func__, err); |
| 850 | 849 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 1243edbedc9e..871313d6b34d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
| @@ -502,6 +502,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, | |||
| 502 | rq->channel = c; | 502 | rq->channel = c; |
| 503 | rq->ix = c->ix; | 503 | rq->ix = c->ix; |
| 504 | rq->mdev = mdev; | 504 | rq->mdev = mdev; |
| 505 | rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); | ||
| 505 | rq->stats = &c->priv->channel_stats[c->ix].rq; | 506 | rq->stats = &c->priv->channel_stats[c->ix].rq; |
| 506 | 507 | ||
| 507 | rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; | 508 | rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; |
| @@ -1623,13 +1624,15 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev, | |||
| 1623 | int err; | 1624 | int err; |
| 1624 | u32 i; | 1625 | u32 i; |
| 1625 | 1626 | ||
| 1627 | err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); | ||
| 1628 | if (err) | ||
| 1629 | return err; | ||
| 1630 | |||
| 1626 | err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, | 1631 | err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, |
| 1627 | &cq->wq_ctrl); | 1632 | &cq->wq_ctrl); |
| 1628 | if (err) | 1633 | if (err) |
| 1629 | return err; | 1634 | return err; |
| 1630 | 1635 | ||
| 1631 | mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); | ||
| 1632 | |||
| 1633 | mcq->cqe_sz = 64; | 1636 | mcq->cqe_sz = 64; |
| 1634 | mcq->set_ci_db = cq->wq_ctrl.db.db; | 1637 | mcq->set_ci_db = cq->wq_ctrl.db.db; |
| 1635 | mcq->arm_db = cq->wq_ctrl.db.db + 1; | 1638 | mcq->arm_db = cq->wq_ctrl.db.db + 1; |
| @@ -1687,6 +1690,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) | |||
| 1687 | int eqn; | 1690 | int eqn; |
| 1688 | int err; | 1691 | int err; |
| 1689 | 1692 | ||
| 1693 | err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); | ||
| 1694 | if (err) | ||
| 1695 | return err; | ||
| 1696 | |||
| 1690 | inlen = MLX5_ST_SZ_BYTES(create_cq_in) + | 1697 | inlen = MLX5_ST_SZ_BYTES(create_cq_in) + |
| 1691 | sizeof(u64) * cq->wq_ctrl.buf.npages; | 1698 | sizeof(u64) * cq->wq_ctrl.buf.npages; |
| 1692 | in = kvzalloc(inlen, GFP_KERNEL); | 1699 | in = kvzalloc(inlen, GFP_KERNEL); |
| @@ -1700,8 +1707,6 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) | |||
| 1700 | mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, | 1707 | mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, |
| 1701 | (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); | 1708 | (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); |
| 1702 | 1709 | ||
| 1703 | mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); | ||
| 1704 | |||
| 1705 | MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); | 1710 | MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); |
| 1706 | MLX5_SET(cqc, cqc, c_eqn, eqn); | 1711 | MLX5_SET(cqc, cqc, c_eqn, eqn); |
| 1707 | MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); | 1712 | MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); |
| @@ -1921,6 +1926,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, | |||
| 1921 | int err; | 1926 | int err; |
| 1922 | int eqn; | 1927 | int eqn; |
| 1923 | 1928 | ||
| 1929 | err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq); | ||
| 1930 | if (err) | ||
| 1931 | return err; | ||
| 1932 | |||
| 1924 | c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); | 1933 | c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); |
| 1925 | if (!c) | 1934 | if (!c) |
| 1926 | return -ENOMEM; | 1935 | return -ENOMEM; |
| @@ -1937,7 +1946,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, | |||
| 1937 | c->xdp = !!params->xdp_prog; | 1946 | c->xdp = !!params->xdp_prog; |
| 1938 | c->stats = &priv->channel_stats[ix].ch; | 1947 | c->stats = &priv->channel_stats[ix].ch; |
| 1939 | 1948 | ||
| 1940 | mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq); | ||
| 1941 | c->irq_desc = irq_to_desc(irq); | 1949 | c->irq_desc = irq_to_desc(irq); |
| 1942 | 1950 | ||
| 1943 | netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); | 1951 | netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); |
| @@ -3574,6 +3582,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable) | |||
| 3574 | return 0; | 3582 | return 0; |
| 3575 | } | 3583 | } |
| 3576 | 3584 | ||
| 3585 | #ifdef CONFIG_MLX5_ESWITCH | ||
| 3577 | static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) | 3586 | static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) |
| 3578 | { | 3587 | { |
| 3579 | struct mlx5e_priv *priv = netdev_priv(netdev); | 3588 | struct mlx5e_priv *priv = netdev_priv(netdev); |
| @@ -3586,6 +3595,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) | |||
| 3586 | 3595 | ||
| 3587 | return 0; | 3596 | return 0; |
| 3588 | } | 3597 | } |
| 3598 | #endif | ||
| 3589 | 3599 | ||
| 3590 | static int set_feature_rx_all(struct net_device *netdev, bool enable) | 3600 | static int set_feature_rx_all(struct net_device *netdev, bool enable) |
| 3591 | { | 3601 | { |
| @@ -3684,7 +3694,9 @@ static int mlx5e_set_features(struct net_device *netdev, | |||
| 3684 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); | 3694 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); |
| 3685 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, | 3695 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, |
| 3686 | set_feature_cvlan_filter); | 3696 | set_feature_cvlan_filter); |
| 3697 | #ifdef CONFIG_MLX5_ESWITCH | ||
| 3687 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters); | 3698 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters); |
| 3699 | #endif | ||
| 3688 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); | 3700 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); |
| 3689 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); | 3701 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); |
| 3690 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); | 3702 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); |
| @@ -3755,10 +3767,11 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, | |||
| 3755 | } | 3767 | } |
| 3756 | 3768 | ||
| 3757 | if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { | 3769 | if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { |
| 3770 | bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_channels.params); | ||
| 3758 | u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); | 3771 | u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); |
| 3759 | u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); | 3772 | u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); |
| 3760 | 3773 | ||
| 3761 | reset = reset && (ppw_old != ppw_new); | 3774 | reset = reset && (is_linear || (ppw_old != ppw_new)); |
| 3762 | } | 3775 | } |
| 3763 | 3776 | ||
| 3764 | if (!reset) { | 3777 | if (!reset) { |
| @@ -4678,7 +4691,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) | |||
| 4678 | FT_CAP(modify_root) && | 4691 | FT_CAP(modify_root) && |
| 4679 | FT_CAP(identified_miss_table_mode) && | 4692 | FT_CAP(identified_miss_table_mode) && |
| 4680 | FT_CAP(flow_table_modify)) { | 4693 | FT_CAP(flow_table_modify)) { |
| 4694 | #ifdef CONFIG_MLX5_ESWITCH | ||
| 4681 | netdev->hw_features |= NETIF_F_HW_TC; | 4695 | netdev->hw_features |= NETIF_F_HW_TC; |
| 4696 | #endif | ||
| 4682 | #ifdef CONFIG_MLX5_EN_ARFS | 4697 | #ifdef CONFIG_MLX5_EN_ARFS |
| 4683 | netdev->hw_features |= NETIF_F_NTUPLE; | 4698 | netdev->hw_features |= NETIF_F_NTUPLE; |
| 4684 | #endif | 4699 | #endif |
| @@ -5004,11 +5019,21 @@ err_free_netdev: | |||
| 5004 | int mlx5e_attach_netdev(struct mlx5e_priv *priv) | 5019 | int mlx5e_attach_netdev(struct mlx5e_priv *priv) |
| 5005 | { | 5020 | { |
| 5006 | const struct mlx5e_profile *profile; | 5021 | const struct mlx5e_profile *profile; |
| 5022 | int max_nch; | ||
| 5007 | int err; | 5023 | int err; |
| 5008 | 5024 | ||
| 5009 | profile = priv->profile; | 5025 | profile = priv->profile; |
| 5010 | clear_bit(MLX5E_STATE_DESTROYING, &priv->state); | 5026 | clear_bit(MLX5E_STATE_DESTROYING, &priv->state); |
| 5011 | 5027 | ||
| 5028 | /* max number of channels may have changed */ | ||
| 5029 | max_nch = mlx5e_get_max_num_channels(priv->mdev); | ||
| 5030 | if (priv->channels.params.num_channels > max_nch) { | ||
| 5031 | mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch); | ||
| 5032 | priv->channels.params.num_channels = max_nch; | ||
| 5033 | mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt, | ||
| 5034 | MLX5E_INDIR_RQT_SIZE, max_nch); | ||
| 5035 | } | ||
| 5036 | |||
| 5012 | err = profile->init_tx(priv); | 5037 | err = profile->init_tx(priv); |
| 5013 | if (err) | 5038 | if (err) |
| 5014 | goto out; | 5039 | goto out; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 79638dcbae78..16985ca3248d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
| @@ -1104,6 +1104,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, | |||
| 1104 | u32 frag_size; | 1104 | u32 frag_size; |
| 1105 | bool consumed; | 1105 | bool consumed; |
| 1106 | 1106 | ||
| 1107 | /* Check packet size. Note LRO doesn't use linear SKB */ | ||
| 1108 | if (unlikely(cqe_bcnt > rq->hw_mtu)) { | ||
| 1109 | rq->stats->oversize_pkts_sw_drop++; | ||
| 1110 | return NULL; | ||
| 1111 | } | ||
| 1112 | |||
| 1107 | va = page_address(di->page) + head_offset; | 1113 | va = page_address(di->page) + head_offset; |
| 1108 | data = va + rx_headroom; | 1114 | data = va + rx_headroom; |
| 1109 | frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); | 1115 | frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 35ded91203f5..4382ef85488c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c | |||
| @@ -98,18 +98,17 @@ static int mlx5e_test_link_speed(struct mlx5e_priv *priv) | |||
| 98 | return 1; | 98 | return 1; |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | #ifdef CONFIG_INET | ||
| 102 | /* loopback test */ | ||
| 103 | #define MLX5E_TEST_PKT_SIZE (MLX5E_RX_MAX_HEAD - NET_IP_ALIGN) | ||
| 104 | static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST"; | ||
| 105 | #define MLX5E_TEST_MAGIC 0x5AEED15C001ULL | ||
| 106 | |||
| 107 | struct mlx5ehdr { | 101 | struct mlx5ehdr { |
| 108 | __be32 version; | 102 | __be32 version; |
| 109 | __be64 magic; | 103 | __be64 magic; |
| 110 | char text[ETH_GSTRING_LEN]; | ||
| 111 | }; | 104 | }; |
| 112 | 105 | ||
| 106 | #ifdef CONFIG_INET | ||
| 107 | /* loopback test */ | ||
| 108 | #define MLX5E_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) +\ | ||
| 109 | sizeof(struct udphdr) + sizeof(struct mlx5ehdr)) | ||
| 110 | #define MLX5E_TEST_MAGIC 0x5AEED15C001ULL | ||
| 111 | |||
| 113 | static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) | 112 | static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) |
| 114 | { | 113 | { |
| 115 | struct sk_buff *skb = NULL; | 114 | struct sk_buff *skb = NULL; |
| @@ -117,10 +116,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) | |||
| 117 | struct ethhdr *ethh; | 116 | struct ethhdr *ethh; |
| 118 | struct udphdr *udph; | 117 | struct udphdr *udph; |
| 119 | struct iphdr *iph; | 118 | struct iphdr *iph; |
| 120 | int datalen, iplen; | 119 | int iplen; |
| 121 | |||
| 122 | datalen = MLX5E_TEST_PKT_SIZE - | ||
| 123 | (sizeof(*ethh) + sizeof(*iph) + sizeof(*udph)); | ||
| 124 | 120 | ||
| 125 | skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE); | 121 | skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE); |
| 126 | if (!skb) { | 122 | if (!skb) { |
| @@ -149,7 +145,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) | |||
| 149 | /* Fill UDP header */ | 145 | /* Fill UDP header */ |
| 150 | udph->source = htons(9); | 146 | udph->source = htons(9); |
| 151 | udph->dest = htons(9); /* Discard Protocol */ | 147 | udph->dest = htons(9); /* Discard Protocol */ |
| 152 | udph->len = htons(datalen + sizeof(struct udphdr)); | 148 | udph->len = htons(sizeof(struct mlx5ehdr) + sizeof(struct udphdr)); |
| 153 | udph->check = 0; | 149 | udph->check = 0; |
| 154 | 150 | ||
| 155 | /* Fill IP header */ | 151 | /* Fill IP header */ |
| @@ -157,7 +153,8 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) | |||
| 157 | iph->ttl = 32; | 153 | iph->ttl = 32; |
| 158 | iph->version = 4; | 154 | iph->version = 4; |
| 159 | iph->protocol = IPPROTO_UDP; | 155 | iph->protocol = IPPROTO_UDP; |
| 160 | iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + datalen; | 156 | iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + |
| 157 | sizeof(struct mlx5ehdr); | ||
| 161 | iph->tot_len = htons(iplen); | 158 | iph->tot_len = htons(iplen); |
| 162 | iph->frag_off = 0; | 159 | iph->frag_off = 0; |
| 163 | iph->saddr = 0; | 160 | iph->saddr = 0; |
| @@ -170,9 +167,6 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) | |||
| 170 | mlxh = skb_put(skb, sizeof(*mlxh)); | 167 | mlxh = skb_put(skb, sizeof(*mlxh)); |
| 171 | mlxh->version = 0; | 168 | mlxh->version = 0; |
| 172 | mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC); | 169 | mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC); |
| 173 | strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text)); | ||
| 174 | datalen -= sizeof(*mlxh); | ||
| 175 | skb_put_zero(skb, datalen); | ||
| 176 | 170 | ||
| 177 | skb->csum = 0; | 171 | skb->csum = 0; |
| 178 | skb->ip_summed = CHECKSUM_PARTIAL; | 172 | skb->ip_summed = CHECKSUM_PARTIAL; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 1e55b9c27ffc..3e99d0728b2f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | |||
| @@ -83,6 +83,7 @@ static const struct counter_desc sw_stats_desc[] = { | |||
| 83 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, | 83 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, |
| 84 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) }, | 84 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) }, |
| 85 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) }, | 85 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) }, |
| 86 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) }, | ||
| 86 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, | 87 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, |
| 87 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, | 88 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, |
| 88 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, | 89 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, |
| @@ -161,6 +162,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) | |||
| 161 | s->rx_wqe_err += rq_stats->wqe_err; | 162 | s->rx_wqe_err += rq_stats->wqe_err; |
| 162 | s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; | 163 | s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; |
| 163 | s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; | 164 | s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; |
| 165 | s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop; | ||
| 164 | s->rx_buff_alloc_err += rq_stats->buff_alloc_err; | 166 | s->rx_buff_alloc_err += rq_stats->buff_alloc_err; |
| 165 | s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; | 167 | s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; |
| 166 | s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; | 168 | s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; |
| @@ -1189,6 +1191,7 @@ static const struct counter_desc rq_stats_desc[] = { | |||
| 1189 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, | 1191 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, |
| 1190 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, | 1192 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, |
| 1191 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, | 1193 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, |
| 1194 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) }, | ||
| 1192 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, | 1195 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, |
| 1193 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, | 1196 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, |
| 1194 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, | 1197 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 77f74ce11280..3f8e870ef4c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | |||
| @@ -96,6 +96,7 @@ struct mlx5e_sw_stats { | |||
| 96 | u64 rx_wqe_err; | 96 | u64 rx_wqe_err; |
| 97 | u64 rx_mpwqe_filler_cqes; | 97 | u64 rx_mpwqe_filler_cqes; |
| 98 | u64 rx_mpwqe_filler_strides; | 98 | u64 rx_mpwqe_filler_strides; |
| 99 | u64 rx_oversize_pkts_sw_drop; | ||
| 99 | u64 rx_buff_alloc_err; | 100 | u64 rx_buff_alloc_err; |
| 100 | u64 rx_cqe_compress_blks; | 101 | u64 rx_cqe_compress_blks; |
| 101 | u64 rx_cqe_compress_pkts; | 102 | u64 rx_cqe_compress_pkts; |
| @@ -193,6 +194,7 @@ struct mlx5e_rq_stats { | |||
| 193 | u64 wqe_err; | 194 | u64 wqe_err; |
| 194 | u64 mpwqe_filler_cqes; | 195 | u64 mpwqe_filler_cqes; |
| 195 | u64 mpwqe_filler_strides; | 196 | u64 mpwqe_filler_strides; |
| 197 | u64 oversize_pkts_sw_drop; | ||
| 196 | u64 buff_alloc_err; | 198 | u64 buff_alloc_err; |
| 197 | u64 cqe_compress_blks; | 199 | u64 cqe_compress_blks; |
| 198 | u64 cqe_compress_pkts; | 200 | u64 cqe_compress_pkts; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 608025ca5c04..fca6f4132c91 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
| @@ -1447,31 +1447,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, | |||
| 1447 | inner_headers); | 1447 | inner_headers); |
| 1448 | } | 1448 | } |
| 1449 | 1449 | ||
| 1450 | if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { | 1450 | if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { |
| 1451 | struct flow_dissector_key_eth_addrs *key = | 1451 | struct flow_dissector_key_basic *key = |
| 1452 | skb_flow_dissector_target(f->dissector, | 1452 | skb_flow_dissector_target(f->dissector, |
| 1453 | FLOW_DISSECTOR_KEY_ETH_ADDRS, | 1453 | FLOW_DISSECTOR_KEY_BASIC, |
| 1454 | f->key); | 1454 | f->key); |
| 1455 | struct flow_dissector_key_eth_addrs *mask = | 1455 | struct flow_dissector_key_basic *mask = |
| 1456 | skb_flow_dissector_target(f->dissector, | 1456 | skb_flow_dissector_target(f->dissector, |
| 1457 | FLOW_DISSECTOR_KEY_ETH_ADDRS, | 1457 | FLOW_DISSECTOR_KEY_BASIC, |
| 1458 | f->mask); | 1458 | f->mask); |
| 1459 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, | ||
| 1460 | ntohs(mask->n_proto)); | ||
| 1461 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, | ||
| 1462 | ntohs(key->n_proto)); | ||
| 1459 | 1463 | ||
| 1460 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, | 1464 | if (mask->n_proto) |
| 1461 | dmac_47_16), | ||
| 1462 | mask->dst); | ||
| 1463 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, | ||
| 1464 | dmac_47_16), | ||
| 1465 | key->dst); | ||
| 1466 | |||
| 1467 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, | ||
| 1468 | smac_47_16), | ||
| 1469 | mask->src); | ||
| 1470 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, | ||
| 1471 | smac_47_16), | ||
| 1472 | key->src); | ||
| 1473 | |||
| 1474 | if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst)) | ||
| 1475 | *match_level = MLX5_MATCH_L2; | 1465 | *match_level = MLX5_MATCH_L2; |
| 1476 | } | 1466 | } |
| 1477 | 1467 | ||
| @@ -1505,9 +1495,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, | |||
| 1505 | 1495 | ||
| 1506 | *match_level = MLX5_MATCH_L2; | 1496 | *match_level = MLX5_MATCH_L2; |
| 1507 | } | 1497 | } |
| 1508 | } else { | 1498 | } else if (*match_level != MLX5_MATCH_NONE) { |
| 1509 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1); | 1499 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1); |
| 1510 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); | 1500 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); |
| 1501 | *match_level = MLX5_MATCH_L2; | ||
| 1511 | } | 1502 | } |
| 1512 | 1503 | ||
| 1513 | if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) { | 1504 | if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) { |
| @@ -1545,21 +1536,31 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, | |||
| 1545 | } | 1536 | } |
| 1546 | } | 1537 | } |
| 1547 | 1538 | ||
| 1548 | if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { | 1539 | if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { |
| 1549 | struct flow_dissector_key_basic *key = | 1540 | struct flow_dissector_key_eth_addrs *key = |
| 1550 | skb_flow_dissector_target(f->dissector, | 1541 | skb_flow_dissector_target(f->dissector, |
| 1551 | FLOW_DISSECTOR_KEY_BASIC, | 1542 | FLOW_DISSECTOR_KEY_ETH_ADDRS, |
| 1552 | f->key); | 1543 | f->key); |
| 1553 | struct flow_dissector_key_basic *mask = | 1544 | struct flow_dissector_key_eth_addrs *mask = |
| 1554 | skb_flow_dissector_target(f->dissector, | 1545 | skb_flow_dissector_target(f->dissector, |
| 1555 | FLOW_DISSECTOR_KEY_BASIC, | 1546 | FLOW_DISSECTOR_KEY_ETH_ADDRS, |
| 1556 | f->mask); | 1547 | f->mask); |
| 1557 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, | ||
| 1558 | ntohs(mask->n_proto)); | ||
| 1559 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, | ||
| 1560 | ntohs(key->n_proto)); | ||
| 1561 | 1548 | ||
| 1562 | if (mask->n_proto) | 1549 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, |
| 1550 | dmac_47_16), | ||
| 1551 | mask->dst); | ||
| 1552 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, | ||
| 1553 | dmac_47_16), | ||
| 1554 | key->dst); | ||
| 1555 | |||
| 1556 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, | ||
| 1557 | smac_47_16), | ||
| 1558 | mask->src); | ||
| 1559 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, | ||
| 1560 | smac_47_16), | ||
| 1561 | key->src); | ||
| 1562 | |||
| 1563 | if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst)) | ||
| 1563 | *match_level = MLX5_MATCH_L2; | 1564 | *match_level = MLX5_MATCH_L2; |
| 1564 | } | 1565 | } |
| 1565 | 1566 | ||
| @@ -1586,10 +1587,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, | |||
| 1586 | 1587 | ||
| 1587 | /* the HW doesn't need L3 inline to match on frag=no */ | 1588 | /* the HW doesn't need L3 inline to match on frag=no */ |
| 1588 | if (!(key->flags & FLOW_DIS_IS_FRAGMENT)) | 1589 | if (!(key->flags & FLOW_DIS_IS_FRAGMENT)) |
| 1589 | *match_level = MLX5_INLINE_MODE_L2; | 1590 | *match_level = MLX5_MATCH_L2; |
| 1590 | /* *** L2 attributes parsing up to here *** */ | 1591 | /* *** L2 attributes parsing up to here *** */ |
| 1591 | else | 1592 | else |
| 1592 | *match_level = MLX5_INLINE_MODE_IP; | 1593 | *match_level = MLX5_MATCH_L3; |
| 1593 | } | 1594 | } |
| 1594 | } | 1595 | } |
| 1595 | 1596 | ||
| @@ -2979,7 +2980,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
| 2979 | if (!actions_match_supported(priv, exts, parse_attr, flow, extack)) | 2980 | if (!actions_match_supported(priv, exts, parse_attr, flow, extack)) |
| 2980 | return -EOPNOTSUPP; | 2981 | return -EOPNOTSUPP; |
| 2981 | 2982 | ||
| 2982 | if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { | 2983 | if (attr->mirror_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { |
| 2983 | NL_SET_ERR_MSG_MOD(extack, | 2984 | NL_SET_ERR_MSG_MOD(extack, |
| 2984 | "current firmware doesn't support split rule for port mirroring"); | 2985 | "current firmware doesn't support split rule for port mirroring"); |
| 2985 | netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n"); | 2986 | netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n"); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 515e3d6de051..5a22c5874f3b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | |||
| @@ -83,8 +83,14 @@ struct mlx5_fpga_ipsec_rule { | |||
| 83 | }; | 83 | }; |
| 84 | 84 | ||
| 85 | static const struct rhashtable_params rhash_sa = { | 85 | static const struct rhashtable_params rhash_sa = { |
| 86 | .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa), | 86 | /* Keep out "cmd" field from the key as it's |
| 87 | .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa), | 87 | * value is not constant during the lifetime |
| 88 | * of the key object. | ||
| 89 | */ | ||
| 90 | .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) - | ||
| 91 | FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), | ||
| 92 | .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) + | ||
| 93 | FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), | ||
| 88 | .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash), | 94 | .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash), |
| 89 | .automatic_shrinking = true, | 95 | .automatic_shrinking = true, |
| 90 | .min_size = 1, | 96 | .min_size = 1, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index b59953daf8b4..11dabd62e2c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | |||
| @@ -560,9 +560,9 @@ static int mlx5i_close(struct net_device *netdev) | |||
| 560 | 560 | ||
| 561 | netif_carrier_off(epriv->netdev); | 561 | netif_carrier_off(epriv->netdev); |
| 562 | mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); | 562 | mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); |
| 563 | mlx5i_uninit_underlay_qp(epriv); | ||
| 564 | mlx5e_deactivate_priv_channels(epriv); | 563 | mlx5e_deactivate_priv_channels(epriv); |
| 565 | mlx5e_close_channels(&epriv->channels); | 564 | mlx5e_close_channels(&epriv->channels); |
| 565 | mlx5i_uninit_underlay_qp(epriv); | ||
| 566 | unlock: | 566 | unlock: |
| 567 | mutex_unlock(&epriv->state_lock); | 567 | mutex_unlock(&epriv->state_lock); |
| 568 | return 0; | 568 | return 0; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index cff141077558..88a8576ca9ce 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c | |||
| @@ -485,8 +485,16 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, | |||
| 485 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; | 485 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
| 486 | 486 | ||
| 487 | /* Can't have multiple flags set here */ | 487 | /* Can't have multiple flags set here */ |
| 488 | if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1) | 488 | if (bitmap_weight((unsigned long *)&pq_flags, |
| 489 | sizeof(pq_flags) * BITS_PER_BYTE) > 1) { | ||
| 490 | DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags); | ||
| 489 | goto err; | 491 | goto err; |
| 492 | } | ||
| 493 | |||
| 494 | if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) { | ||
| 495 | DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags); | ||
| 496 | goto err; | ||
| 497 | } | ||
| 490 | 498 | ||
| 491 | switch (pq_flags) { | 499 | switch (pq_flags) { |
| 492 | case PQ_FLAGS_RLS: | 500 | case PQ_FLAGS_RLS: |
| @@ -510,8 +518,7 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, | |||
| 510 | } | 518 | } |
| 511 | 519 | ||
| 512 | err: | 520 | err: |
| 513 | DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags); | 521 | return &qm_info->start_pq; |
| 514 | return NULL; | ||
| 515 | } | 522 | } |
| 516 | 523 | ||
| 517 | /* save pq index in qm info */ | 524 | /* save pq index in qm info */ |
| @@ -535,20 +542,32 @@ u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc) | |||
| 535 | { | 542 | { |
| 536 | u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn); | 543 | u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn); |
| 537 | 544 | ||
| 545 | if (max_tc == 0) { | ||
| 546 | DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n", | ||
| 547 | PQ_FLAGS_MCOS); | ||
| 548 | return p_hwfn->qm_info.start_pq; | ||
| 549 | } | ||
| 550 | |||
| 538 | if (tc > max_tc) | 551 | if (tc > max_tc) |
| 539 | DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); | 552 | DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); |
| 540 | 553 | ||
| 541 | return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc; | 554 | return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc); |
| 542 | } | 555 | } |
| 543 | 556 | ||
| 544 | u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf) | 557 | u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf) |
| 545 | { | 558 | { |
| 546 | u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn); | 559 | u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn); |
| 547 | 560 | ||
| 561 | if (max_vf == 0) { | ||
| 562 | DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n", | ||
| 563 | PQ_FLAGS_VFS); | ||
| 564 | return p_hwfn->qm_info.start_pq; | ||
| 565 | } | ||
| 566 | |||
| 548 | if (vf > max_vf) | 567 | if (vf > max_vf) |
| 549 | DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); | 568 | DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); |
| 550 | 569 | ||
| 551 | return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; | 570 | return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf); |
| 552 | } | 571 | } |
| 553 | 572 | ||
| 554 | u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc) | 573 | u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc) |
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c index a2e59f4f6f01..7cae17517744 100644 --- a/drivers/net/phy/mscc.c +++ b/drivers/net/phy/mscc.c | |||
| @@ -810,17 +810,13 @@ static int vsc85xx_default_config(struct phy_device *phydev) | |||
| 810 | 810 | ||
| 811 | phydev->mdix_ctrl = ETH_TP_MDI_AUTO; | 811 | phydev->mdix_ctrl = ETH_TP_MDI_AUTO; |
| 812 | mutex_lock(&phydev->lock); | 812 | mutex_lock(&phydev->lock); |
| 813 | rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2); | ||
| 814 | if (rc < 0) | ||
| 815 | goto out_unlock; | ||
| 816 | 813 | ||
| 817 | reg_val = phy_read(phydev, MSCC_PHY_RGMII_CNTL); | 814 | reg_val = RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS; |
| 818 | reg_val &= ~(RGMII_RX_CLK_DELAY_MASK); | 815 | |
| 819 | reg_val |= (RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS); | 816 | rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2, |
| 820 | phy_write(phydev, MSCC_PHY_RGMII_CNTL, reg_val); | 817 | MSCC_PHY_RGMII_CNTL, RGMII_RX_CLK_DELAY_MASK, |
| 818 | reg_val); | ||
| 821 | 819 | ||
| 822 | out_unlock: | ||
| 823 | rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc); | ||
| 824 | mutex_unlock(&phydev->lock); | 820 | mutex_unlock(&phydev->lock); |
| 825 | 821 | ||
| 826 | return rc; | 822 | return rc; |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index db633ae9f784..364f514d56d8 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
| @@ -985,8 +985,6 @@ static void team_port_disable(struct team *team, | |||
| 985 | team->en_port_count--; | 985 | team->en_port_count--; |
| 986 | team_queue_override_port_del(team, port); | 986 | team_queue_override_port_del(team, port); |
| 987 | team_adjust_ops(team); | 987 | team_adjust_ops(team); |
| 988 | team_notify_peers(team); | ||
| 989 | team_mcast_rejoin(team); | ||
| 990 | team_lower_state_changed(port); | 988 | team_lower_state_changed(port); |
| 991 | } | 989 | } |
| 992 | 990 | ||
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 3e2c041d76ac..cecfd77c9f3c 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -70,7 +70,8 @@ static const unsigned long guest_offloads[] = { | |||
| 70 | VIRTIO_NET_F_GUEST_TSO4, | 70 | VIRTIO_NET_F_GUEST_TSO4, |
| 71 | VIRTIO_NET_F_GUEST_TSO6, | 71 | VIRTIO_NET_F_GUEST_TSO6, |
| 72 | VIRTIO_NET_F_GUEST_ECN, | 72 | VIRTIO_NET_F_GUEST_ECN, |
| 73 | VIRTIO_NET_F_GUEST_UFO | 73 | VIRTIO_NET_F_GUEST_UFO, |
| 74 | VIRTIO_NET_F_GUEST_CSUM | ||
| 74 | }; | 75 | }; |
| 75 | 76 | ||
| 76 | struct virtnet_stat_desc { | 77 | struct virtnet_stat_desc { |
| @@ -2334,9 +2335,6 @@ static int virtnet_clear_guest_offloads(struct virtnet_info *vi) | |||
| 2334 | if (!vi->guest_offloads) | 2335 | if (!vi->guest_offloads) |
| 2335 | return 0; | 2336 | return 0; |
| 2336 | 2337 | ||
| 2337 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) | ||
| 2338 | offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM; | ||
| 2339 | |||
| 2340 | return virtnet_set_guest_offloads(vi, offloads); | 2338 | return virtnet_set_guest_offloads(vi, offloads); |
| 2341 | } | 2339 | } |
| 2342 | 2340 | ||
| @@ -2346,8 +2344,6 @@ static int virtnet_restore_guest_offloads(struct virtnet_info *vi) | |||
| 2346 | 2344 | ||
| 2347 | if (!vi->guest_offloads) | 2345 | if (!vi->guest_offloads) |
| 2348 | return 0; | 2346 | return 0; |
| 2349 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) | ||
| 2350 | offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM; | ||
| 2351 | 2347 | ||
| 2352 | return virtnet_set_guest_offloads(vi, offloads); | 2348 | return virtnet_set_guest_offloads(vi, offloads); |
| 2353 | } | 2349 | } |
| @@ -2365,8 +2361,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, | |||
| 2365 | && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || | 2361 | && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || |
| 2366 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || | 2362 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || |
| 2367 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || | 2363 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || |
| 2368 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) { | 2364 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || |
| 2369 | NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first"); | 2365 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) { |
| 2366 | NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first"); | ||
| 2370 | return -EOPNOTSUPP; | 2367 | return -EOPNOTSUPP; |
| 2371 | } | 2368 | } |
| 2372 | 2369 | ||
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index a1c2801ded10..7e49342bae38 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c | |||
| @@ -6867,7 +6867,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | |||
| 6867 | u32 bitmap; | 6867 | u32 bitmap; |
| 6868 | 6868 | ||
| 6869 | if (drop) { | 6869 | if (drop) { |
| 6870 | if (vif->type == NL80211_IFTYPE_STATION) { | 6870 | if (vif && vif->type == NL80211_IFTYPE_STATION) { |
| 6871 | bitmap = ~(1 << WMI_MGMT_TID); | 6871 | bitmap = ~(1 << WMI_MGMT_TID); |
| 6872 | list_for_each_entry(arvif, &ar->arvifs, list) { | 6872 | list_for_each_entry(arvif, &ar->arvifs, list) { |
| 6873 | if (arvif->vdev_type == WMI_VDEV_TYPE_STA) | 6873 | if (arvif->vdev_type == WMI_VDEV_TYPE_STA) |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 1e3b5f4a4cf9..f23cb2f3d296 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
| @@ -1251,6 +1251,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, | |||
| 1251 | struct ath_vif *avp = (void *)vif->drv_priv; | 1251 | struct ath_vif *avp = (void *)vif->drv_priv; |
| 1252 | struct ath_node *an = &avp->mcast_node; | 1252 | struct ath_node *an = &avp->mcast_node; |
| 1253 | 1253 | ||
| 1254 | mutex_lock(&sc->mutex); | ||
| 1254 | if (IS_ENABLED(CONFIG_ATH9K_TX99)) { | 1255 | if (IS_ENABLED(CONFIG_ATH9K_TX99)) { |
| 1255 | if (sc->cur_chan->nvifs >= 1) { | 1256 | if (sc->cur_chan->nvifs >= 1) { |
| 1256 | mutex_unlock(&sc->mutex); | 1257 | mutex_unlock(&sc->mutex); |
| @@ -1259,8 +1260,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, | |||
| 1259 | sc->tx99_vif = vif; | 1260 | sc->tx99_vif = vif; |
| 1260 | } | 1261 | } |
| 1261 | 1262 | ||
| 1262 | mutex_lock(&sc->mutex); | ||
| 1263 | |||
| 1264 | ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); | 1263 | ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); |
| 1265 | sc->cur_chan->nvifs++; | 1264 | sc->cur_chan->nvifs++; |
| 1266 | 1265 | ||
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 230a378c26fc..7f0a5bade70a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | |||
| @@ -6005,7 +6005,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, | |||
| 6005 | * for subsequent chanspecs. | 6005 | * for subsequent chanspecs. |
| 6006 | */ | 6006 | */ |
| 6007 | channel->flags = IEEE80211_CHAN_NO_HT40 | | 6007 | channel->flags = IEEE80211_CHAN_NO_HT40 | |
| 6008 | IEEE80211_CHAN_NO_80MHZ; | 6008 | IEEE80211_CHAN_NO_80MHZ | |
| 6009 | IEEE80211_CHAN_NO_160MHZ; | ||
| 6009 | ch.bw = BRCMU_CHAN_BW_20; | 6010 | ch.bw = BRCMU_CHAN_BW_20; |
| 6010 | cfg->d11inf.encchspec(&ch); | 6011 | cfg->d11inf.encchspec(&ch); |
| 6011 | chaninfo = ch.chspec; | 6012 | chaninfo = ch.chspec; |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c index e7584b842dce..eb5db94f5745 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c | |||
| @@ -193,6 +193,9 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch) | |||
| 193 | } | 193 | } |
| 194 | break; | 194 | break; |
| 195 | case BRCMU_CHSPEC_D11AC_BW_160: | 195 | case BRCMU_CHSPEC_D11AC_BW_160: |
| 196 | ch->bw = BRCMU_CHAN_BW_160; | ||
| 197 | ch->sb = brcmu_maskget16(ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK, | ||
| 198 | BRCMU_CHSPEC_D11AC_SB_SHIFT); | ||
| 196 | switch (ch->sb) { | 199 | switch (ch->sb) { |
| 197 | case BRCMU_CHAN_SB_LLL: | 200 | case BRCMU_CHAN_SB_LLL: |
| 198 | ch->control_ch_num -= CH_70MHZ_APART; | 201 | ch->control_ch_num -= CH_70MHZ_APART; |
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index 2439e98431ee..7492dfb6729b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | * GPL LICENSE SUMMARY | 6 | * GPL LICENSE SUMMARY |
| 7 | * | 7 | * |
| 8 | * Copyright(c) 2017 Intel Deutschland GmbH | 8 | * Copyright(c) 2017 Intel Deutschland GmbH |
| 9 | * Copyright(c) 2018 Intel Corporation | ||
| 9 | * | 10 | * |
| 10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
| 11 | * it under the terms of version 2 of the GNU General Public License as | 12 | * it under the terms of version 2 of the GNU General Public License as |
| @@ -26,6 +27,7 @@ | |||
| 26 | * BSD LICENSE | 27 | * BSD LICENSE |
| 27 | * | 28 | * |
| 28 | * Copyright(c) 2017 Intel Deutschland GmbH | 29 | * Copyright(c) 2017 Intel Deutschland GmbH |
| 30 | * Copyright(c) 2018 Intel Corporation | ||
| 29 | * All rights reserved. | 31 | * All rights reserved. |
| 30 | * | 32 | * |
| 31 | * Redistribution and use in source and binary forms, with or without | 33 | * Redistribution and use in source and binary forms, with or without |
| @@ -81,7 +83,7 @@ | |||
| 81 | #define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2) | 83 | #define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2) |
| 82 | #define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \ | 84 | #define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \ |
| 83 | ACPI_SAR_TABLE_SIZE + 3) | 85 | ACPI_SAR_TABLE_SIZE + 3) |
| 84 | #define ACPI_WGDS_WIFI_DATA_SIZE 18 | 86 | #define ACPI_WGDS_WIFI_DATA_SIZE 19 |
| 85 | #define ACPI_WRDD_WIFI_DATA_SIZE 2 | 87 | #define ACPI_WRDD_WIFI_DATA_SIZE 2 |
| 86 | #define ACPI_SPLC_WIFI_DATA_SIZE 2 | 88 | #define ACPI_SPLC_WIFI_DATA_SIZE 2 |
| 87 | 89 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 6b95d0e75889..2b8b50a77990 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h | |||
| @@ -154,7 +154,11 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, | |||
| 154 | const struct iwl_fw_runtime_ops *ops, void *ops_ctx, | 154 | const struct iwl_fw_runtime_ops *ops, void *ops_ctx, |
| 155 | struct dentry *dbgfs_dir); | 155 | struct dentry *dbgfs_dir); |
| 156 | 156 | ||
| 157 | void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt); | 157 | static inline void iwl_fw_runtime_free(struct iwl_fw_runtime *fwrt) |
| 158 | { | ||
| 159 | kfree(fwrt->dump.d3_debug_data); | ||
| 160 | fwrt->dump.d3_debug_data = NULL; | ||
| 161 | } | ||
| 158 | 162 | ||
| 159 | void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt); | 163 | void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt); |
| 160 | 164 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index dade206d5511..2ba890445c35 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c | |||
| @@ -893,7 +893,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) | |||
| 893 | IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n"); | 893 | IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n"); |
| 894 | 894 | ||
| 895 | BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS * | 895 | BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS * |
| 896 | ACPI_WGDS_TABLE_SIZE != ACPI_WGDS_WIFI_DATA_SIZE); | 896 | ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE); |
| 897 | 897 | ||
| 898 | BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES); | 898 | BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES); |
| 899 | 899 | ||
| @@ -928,6 +928,11 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) | |||
| 928 | return -ENOENT; | 928 | return -ENOENT; |
| 929 | } | 929 | } |
| 930 | 930 | ||
| 931 | static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm) | ||
| 932 | { | ||
| 933 | return -ENOENT; | ||
| 934 | } | ||
| 935 | |||
| 931 | static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) | 936 | static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) |
| 932 | { | 937 | { |
| 933 | return 0; | 938 | return 0; |
| @@ -954,8 +959,11 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm) | |||
| 954 | IWL_DEBUG_RADIO(mvm, | 959 | IWL_DEBUG_RADIO(mvm, |
| 955 | "WRDS SAR BIOS table invalid or unavailable. (%d)\n", | 960 | "WRDS SAR BIOS table invalid or unavailable. (%d)\n", |
| 956 | ret); | 961 | ret); |
| 957 | /* if not available, don't fail and don't bother with EWRD */ | 962 | /* |
| 958 | return 0; | 963 | * If not available, don't fail and don't bother with EWRD. |
| 964 | * Return 1 to tell that we can't use WGDS either. | ||
| 965 | */ | ||
| 966 | return 1; | ||
| 959 | } | 967 | } |
| 960 | 968 | ||
| 961 | ret = iwl_mvm_sar_get_ewrd_table(mvm); | 969 | ret = iwl_mvm_sar_get_ewrd_table(mvm); |
| @@ -968,9 +976,13 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm) | |||
| 968 | /* choose profile 1 (WRDS) as default for both chains */ | 976 | /* choose profile 1 (WRDS) as default for both chains */ |
| 969 | ret = iwl_mvm_sar_select_profile(mvm, 1, 1); | 977 | ret = iwl_mvm_sar_select_profile(mvm, 1, 1); |
| 970 | 978 | ||
| 971 | /* if we don't have profile 0 from BIOS, just skip it */ | 979 | /* |
| 980 | * If we don't have profile 0 from BIOS, just skip it. This | ||
| 981 | * means that SAR Geo will not be enabled either, even if we | ||
| 982 | * have other valid profiles. | ||
| 983 | */ | ||
| 972 | if (ret == -ENOENT) | 984 | if (ret == -ENOENT) |
| 973 | return 0; | 985 | return 1; |
| 974 | 986 | ||
| 975 | return ret; | 987 | return ret; |
| 976 | } | 988 | } |
| @@ -1168,11 +1180,19 @@ int iwl_mvm_up(struct iwl_mvm *mvm) | |||
| 1168 | iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); | 1180 | iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); |
| 1169 | 1181 | ||
| 1170 | ret = iwl_mvm_sar_init(mvm); | 1182 | ret = iwl_mvm_sar_init(mvm); |
| 1171 | if (ret) | 1183 | if (ret == 0) { |
| 1172 | goto error; | 1184 | ret = iwl_mvm_sar_geo_init(mvm); |
| 1185 | } else if (ret > 0 && !iwl_mvm_sar_get_wgds_table(mvm)) { | ||
| 1186 | /* | ||
| 1187 | * If basic SAR is not available, we check for WGDS, | ||
| 1188 | * which should *not* be available either. If it is | ||
| 1189 | * available, issue an error, because we can't use SAR | ||
| 1190 | * Geo without basic SAR. | ||
| 1191 | */ | ||
| 1192 | IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n"); | ||
| 1193 | } | ||
| 1173 | 1194 | ||
| 1174 | ret = iwl_mvm_sar_geo_init(mvm); | 1195 | if (ret < 0) |
| 1175 | if (ret) | ||
| 1176 | goto error; | 1196 | goto error; |
| 1177 | 1197 | ||
| 1178 | iwl_mvm_leds_sync(mvm); | 1198 | iwl_mvm_leds_sync(mvm); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 505b0385d800..00f831d88366 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | |||
| @@ -301,8 +301,12 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, | |||
| 301 | goto out; | 301 | goto out; |
| 302 | } | 302 | } |
| 303 | 303 | ||
| 304 | if (changed) | 304 | if (changed) { |
| 305 | *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE); | 305 | u32 status = le32_to_cpu(resp->status); |
| 306 | |||
| 307 | *changed = (status == MCC_RESP_NEW_CHAN_PROFILE || | ||
| 308 | status == MCC_RESP_ILLEGAL); | ||
| 309 | } | ||
| 306 | 310 | ||
| 307 | regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, | 311 | regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, |
| 308 | __le32_to_cpu(resp->n_channels), | 312 | __le32_to_cpu(resp->n_channels), |
| @@ -4444,10 +4448,6 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, | |||
| 4444 | sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); | 4448 | sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); |
| 4445 | } | 4449 | } |
| 4446 | 4450 | ||
| 4447 | if (!fw_has_capa(&mvm->fw->ucode_capa, | ||
| 4448 | IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) | ||
| 4449 | return; | ||
| 4450 | |||
| 4451 | /* if beacon filtering isn't on mac80211 does it anyway */ | 4451 | /* if beacon filtering isn't on mac80211 does it anyway */ |
| 4452 | if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) | 4452 | if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) |
| 4453 | return; | 4453 | return; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 3633f27d048a..6fc5cc1f2b5b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c | |||
| @@ -539,9 +539,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, | |||
| 539 | } | 539 | } |
| 540 | 540 | ||
| 541 | IWL_DEBUG_LAR(mvm, | 541 | IWL_DEBUG_LAR(mvm, |
| 542 | "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n", | 542 | "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n", |
| 543 | status, mcc, mcc >> 8, mcc & 0xff, | 543 | status, mcc, mcc >> 8, mcc & 0xff, n_channels); |
| 544 | !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels); | ||
| 545 | 544 | ||
| 546 | exit: | 545 | exit: |
| 547 | iwl_free_resp(&cmd); | 546 | iwl_free_resp(&cmd); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 0e2092526fae..af3fba10abc1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c | |||
| @@ -858,6 +858,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, | |||
| 858 | iwl_mvm_thermal_exit(mvm); | 858 | iwl_mvm_thermal_exit(mvm); |
| 859 | out_free: | 859 | out_free: |
| 860 | iwl_fw_flush_dump(&mvm->fwrt); | 860 | iwl_fw_flush_dump(&mvm->fwrt); |
| 861 | iwl_fw_runtime_free(&mvm->fwrt); | ||
| 861 | 862 | ||
| 862 | if (iwlmvm_mod_params.init_dbg) | 863 | if (iwlmvm_mod_params.init_dbg) |
| 863 | return op_mode; | 864 | return op_mode; |
| @@ -910,6 +911,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) | |||
| 910 | 911 | ||
| 911 | iwl_mvm_tof_clean(mvm); | 912 | iwl_mvm_tof_clean(mvm); |
| 912 | 913 | ||
| 914 | iwl_fw_runtime_free(&mvm->fwrt); | ||
| 913 | mutex_destroy(&mvm->mutex); | 915 | mutex_destroy(&mvm->mutex); |
| 914 | mutex_destroy(&mvm->d0i3_suspend_mutex); | 916 | mutex_destroy(&mvm->d0i3_suspend_mutex); |
| 915 | 917 | ||
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig index 0ccbcd7e887d..c30d8f5bbf2a 100644 --- a/drivers/net/wireless/mediatek/mt76/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/Kconfig | |||
| @@ -1,6 +1,12 @@ | |||
| 1 | config MT76_CORE | 1 | config MT76_CORE |
| 2 | tristate | 2 | tristate |
| 3 | 3 | ||
| 4 | config MT76_LEDS | ||
| 5 | bool | ||
| 6 | depends on MT76_CORE | ||
| 7 | depends on LEDS_CLASS=y || MT76_CORE=LEDS_CLASS | ||
| 8 | default y | ||
| 9 | |||
| 4 | config MT76_USB | 10 | config MT76_USB |
| 5 | tristate | 11 | tristate |
| 6 | depends on MT76_CORE | 12 | depends on MT76_CORE |
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 2a699e8b79bf..7d219ff2d480 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c | |||
| @@ -345,9 +345,11 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, | |||
| 345 | mt76_check_sband(dev, NL80211_BAND_2GHZ); | 345 | mt76_check_sband(dev, NL80211_BAND_2GHZ); |
| 346 | mt76_check_sband(dev, NL80211_BAND_5GHZ); | 346 | mt76_check_sband(dev, NL80211_BAND_5GHZ); |
| 347 | 347 | ||
| 348 | ret = mt76_led_init(dev); | 348 | if (IS_ENABLED(CONFIG_MT76_LEDS)) { |
| 349 | if (ret) | 349 | ret = mt76_led_init(dev); |
| 350 | return ret; | 350 | if (ret) |
| 351 | return ret; | ||
| 352 | } | ||
| 351 | 353 | ||
| 352 | return ieee80211_register_hw(hw); | 354 | return ieee80211_register_hw(hw); |
| 353 | } | 355 | } |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h index 47c42c607964..7806963b1905 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h | |||
| @@ -71,7 +71,6 @@ struct mt76x02_dev { | |||
| 71 | struct mac_address macaddr_list[8]; | 71 | struct mac_address macaddr_list[8]; |
| 72 | 72 | ||
| 73 | struct mutex phy_mutex; | 73 | struct mutex phy_mutex; |
| 74 | struct mutex mutex; | ||
| 75 | 74 | ||
| 76 | u8 txdone_seq; | 75 | u8 txdone_seq; |
| 77 | DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status); | 76 | DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status); |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c index 3824290b219d..fd125722d1fb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c | |||
| @@ -507,8 +507,10 @@ int mt76x2_register_device(struct mt76x02_dev *dev) | |||
| 507 | mt76x2_dfs_init_detector(dev); | 507 | mt76x2_dfs_init_detector(dev); |
| 508 | 508 | ||
| 509 | /* init led callbacks */ | 509 | /* init led callbacks */ |
| 510 | dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness; | 510 | if (IS_ENABLED(CONFIG_MT76_LEDS)) { |
| 511 | dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink; | 511 | dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness; |
| 512 | dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink; | ||
| 513 | } | ||
| 512 | 514 | ||
| 513 | ret = mt76_register_device(&dev->mt76, true, mt76x02_rates, | 515 | ret = mt76_register_device(&dev->mt76, true, mt76x02_rates, |
| 514 | ARRAY_SIZE(mt76x02_rates)); | 516 | ARRAY_SIZE(mt76x02_rates)); |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c index 034a06295668..3f001bd6806c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c | |||
| @@ -272,9 +272,9 @@ mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val) | |||
| 272 | if (val != ~0 && val > 0xffff) | 272 | if (val != ~0 && val > 0xffff) |
| 273 | return -EINVAL; | 273 | return -EINVAL; |
| 274 | 274 | ||
| 275 | mutex_lock(&dev->mutex); | 275 | mutex_lock(&dev->mt76.mutex); |
| 276 | mt76x2_mac_set_tx_protection(dev, val); | 276 | mt76x2_mac_set_tx_protection(dev, val); |
| 277 | mutex_unlock(&dev->mutex); | 277 | mutex_unlock(&dev->mt76.mutex); |
| 278 | 278 | ||
| 279 | return 0; | 279 | return 0; |
| 280 | } | 280 | } |
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 4c2154b9e6a3..bd10165d7eec 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c | |||
| @@ -285,7 +285,7 @@ static int wl1271_probe(struct sdio_func *func, | |||
| 285 | struct resource res[2]; | 285 | struct resource res[2]; |
| 286 | mmc_pm_flag_t mmcflags; | 286 | mmc_pm_flag_t mmcflags; |
| 287 | int ret = -ENOMEM; | 287 | int ret = -ENOMEM; |
| 288 | int irq, wakeirq; | 288 | int irq, wakeirq, num_irqs; |
| 289 | const char *chip_family; | 289 | const char *chip_family; |
| 290 | 290 | ||
| 291 | /* We are only able to handle the wlan function */ | 291 | /* We are only able to handle the wlan function */ |
| @@ -353,12 +353,17 @@ static int wl1271_probe(struct sdio_func *func, | |||
| 353 | irqd_get_trigger_type(irq_get_irq_data(irq)); | 353 | irqd_get_trigger_type(irq_get_irq_data(irq)); |
| 354 | res[0].name = "irq"; | 354 | res[0].name = "irq"; |
| 355 | 355 | ||
| 356 | res[1].start = wakeirq; | ||
| 357 | res[1].flags = IORESOURCE_IRQ | | ||
| 358 | irqd_get_trigger_type(irq_get_irq_data(wakeirq)); | ||
| 359 | res[1].name = "wakeirq"; | ||
| 360 | 356 | ||
| 361 | ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res)); | 357 | if (wakeirq > 0) { |
| 358 | res[1].start = wakeirq; | ||
| 359 | res[1].flags = IORESOURCE_IRQ | | ||
| 360 | irqd_get_trigger_type(irq_get_irq_data(wakeirq)); | ||
| 361 | res[1].name = "wakeirq"; | ||
| 362 | num_irqs = 2; | ||
| 363 | } else { | ||
| 364 | num_irqs = 1; | ||
| 365 | } | ||
| 366 | ret = platform_device_add_resources(glue->core, res, num_irqs); | ||
| 362 | if (ret) { | 367 | if (ret) { |
| 363 | dev_err(glue->dev, "can't add resources\n"); | 368 | dev_err(glue->dev, "can't add resources\n"); |
| 364 | goto out_dev_put; | 369 | goto out_dev_put; |
diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h index c79e859408e6..fd458389f7d1 100644 --- a/include/linux/net_dim.h +++ b/include/linux/net_dim.h | |||
| @@ -406,6 +406,8 @@ static inline void net_dim(struct net_dim *dim, | |||
| 406 | } | 406 | } |
| 407 | /* fall through */ | 407 | /* fall through */ |
| 408 | case NET_DIM_START_MEASURE: | 408 | case NET_DIM_START_MEASURE: |
| 409 | net_dim_sample(end_sample.event_ctr, end_sample.pkt_ctr, end_sample.byte_ctr, | ||
| 410 | &dim->start_sample); | ||
| 409 | dim->state = NET_DIM_MEASURE_IN_PROGRESS; | 411 | dim->state = NET_DIM_MEASURE_IN_PROGRESS; |
| 410 | break; | 412 | break; |
| 411 | case NET_DIM_APPLY_NEW_PROFILE: | 413 | case NET_DIM_APPLY_NEW_PROFILE: |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 0ba687454267..0d1b2c3f127b 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
| @@ -1326,6 +1326,22 @@ static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg) | |||
| 1326 | } | 1326 | } |
| 1327 | } | 1327 | } |
| 1328 | 1328 | ||
| 1329 | static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val) | ||
| 1330 | { | ||
| 1331 | skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL); | ||
| 1332 | skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG; | ||
| 1333 | } | ||
| 1334 | |||
| 1335 | static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb) | ||
| 1336 | { | ||
| 1337 | return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL; | ||
| 1338 | } | ||
| 1339 | |||
| 1340 | static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb) | ||
| 1341 | { | ||
| 1342 | return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL); | ||
| 1343 | } | ||
| 1344 | |||
| 1329 | /* Release a reference on a zerocopy structure */ | 1345 | /* Release a reference on a zerocopy structure */ |
| 1330 | static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy) | 1346 | static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy) |
| 1331 | { | 1347 | { |
| @@ -1335,7 +1351,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy) | |||
| 1335 | if (uarg->callback == sock_zerocopy_callback) { | 1351 | if (uarg->callback == sock_zerocopy_callback) { |
| 1336 | uarg->zerocopy = uarg->zerocopy && zerocopy; | 1352 | uarg->zerocopy = uarg->zerocopy && zerocopy; |
| 1337 | sock_zerocopy_put(uarg); | 1353 | sock_zerocopy_put(uarg); |
| 1338 | } else { | 1354 | } else if (!skb_zcopy_is_nouarg(skb)) { |
| 1339 | uarg->callback(uarg, zerocopy); | 1355 | uarg->callback(uarg, zerocopy); |
| 1340 | } | 1356 | } |
| 1341 | 1357 | ||
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 8ed77bb4ed86..a9b0280687d5 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
| @@ -196,6 +196,7 @@ struct tcp_sock { | |||
| 196 | u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ | 196 | u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ |
| 197 | u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ | 197 | u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ |
| 198 | u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */ | 198 | u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */ |
| 199 | u32 compressed_ack_rcv_nxt; | ||
| 199 | 200 | ||
| 200 | u32 tsoffset; /* timestamp offset */ | 201 | u32 tsoffset; /* timestamp offset */ |
| 201 | 202 | ||
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 8c2caa370e0f..ab9242e51d9e 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
| @@ -608,4 +608,16 @@ static inline __u32 sctp_dst_mtu(const struct dst_entry *dst) | |||
| 608 | SCTP_DEFAULT_MINSEGMENT)); | 608 | SCTP_DEFAULT_MINSEGMENT)); |
| 609 | } | 609 | } |
| 610 | 610 | ||
| 611 | static inline bool sctp_transport_pmtu_check(struct sctp_transport *t) | ||
| 612 | { | ||
| 613 | __u32 pmtu = sctp_dst_mtu(t->dst); | ||
| 614 | |||
| 615 | if (t->pathmtu == pmtu) | ||
| 616 | return true; | ||
| 617 | |||
| 618 | t->pathmtu = pmtu; | ||
| 619 | |||
| 620 | return false; | ||
| 621 | } | ||
| 622 | |||
| 611 | #endif /* __net_sctp_h__ */ | 623 | #endif /* __net_sctp_h__ */ |
diff --git a/net/core/dev.c b/net/core/dev.c index 066aa902d85c..ddc551f24ba2 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -5970,11 +5970,14 @@ bool napi_complete_done(struct napi_struct *n, int work_done) | |||
| 5970 | if (work_done) | 5970 | if (work_done) |
| 5971 | timeout = n->dev->gro_flush_timeout; | 5971 | timeout = n->dev->gro_flush_timeout; |
| 5972 | 5972 | ||
| 5973 | /* When the NAPI instance uses a timeout and keeps postponing | ||
| 5974 | * it, we need to bound somehow the time packets are kept in | ||
| 5975 | * the GRO layer | ||
| 5976 | */ | ||
| 5977 | napi_gro_flush(n, !!timeout); | ||
| 5973 | if (timeout) | 5978 | if (timeout) |
| 5974 | hrtimer_start(&n->timer, ns_to_ktime(timeout), | 5979 | hrtimer_start(&n->timer, ns_to_ktime(timeout), |
| 5975 | HRTIMER_MODE_REL_PINNED); | 5980 | HRTIMER_MODE_REL_PINNED); |
| 5976 | else | ||
| 5977 | napi_gro_flush(n, false); | ||
| 5978 | } | 5981 | } |
| 5979 | if (unlikely(!list_empty(&n->poll_list))) { | 5982 | if (unlikely(!list_empty(&n->poll_list))) { |
| 5980 | /* If n->poll_list is not empty, we need to mask irqs */ | 5983 | /* If n->poll_list is not empty, we need to mask irqs */ |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b4ee5c8b928f..a8217e221e19 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -4854,6 +4854,11 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) | |||
| 4854 | nf_reset(skb); | 4854 | nf_reset(skb); |
| 4855 | nf_reset_trace(skb); | 4855 | nf_reset_trace(skb); |
| 4856 | 4856 | ||
| 4857 | #ifdef CONFIG_NET_SWITCHDEV | ||
| 4858 | skb->offload_fwd_mark = 0; | ||
| 4859 | skb->offload_mr_fwd_mark = 0; | ||
| 4860 | #endif | ||
| 4861 | |||
| 4857 | if (!xnet) | 4862 | if (!xnet) |
| 4858 | return; | 4863 | return; |
| 4859 | 4864 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2868ef28ce52..1e37c1388189 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -4268,7 +4268,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) | |||
| 4268 | * If the sack array is full, forget about the last one. | 4268 | * If the sack array is full, forget about the last one. |
| 4269 | */ | 4269 | */ |
| 4270 | if (this_sack >= TCP_NUM_SACKS) { | 4270 | if (this_sack >= TCP_NUM_SACKS) { |
| 4271 | if (tp->compressed_ack) | 4271 | if (tp->compressed_ack > TCP_FASTRETRANS_THRESH) |
| 4272 | tcp_send_ack(sk); | 4272 | tcp_send_ack(sk); |
| 4273 | this_sack--; | 4273 | this_sack--; |
| 4274 | tp->rx_opt.num_sacks--; | 4274 | tp->rx_opt.num_sacks--; |
| @@ -4363,6 +4363,7 @@ static bool tcp_try_coalesce(struct sock *sk, | |||
| 4363 | if (TCP_SKB_CB(from)->has_rxtstamp) { | 4363 | if (TCP_SKB_CB(from)->has_rxtstamp) { |
| 4364 | TCP_SKB_CB(to)->has_rxtstamp = true; | 4364 | TCP_SKB_CB(to)->has_rxtstamp = true; |
| 4365 | to->tstamp = from->tstamp; | 4365 | to->tstamp = from->tstamp; |
| 4366 | skb_hwtstamps(to)->hwtstamp = skb_hwtstamps(from)->hwtstamp; | ||
| 4366 | } | 4367 | } |
| 4367 | 4368 | ||
| 4368 | return true; | 4369 | return true; |
| @@ -5188,7 +5189,17 @@ send_now: | |||
| 5188 | if (!tcp_is_sack(tp) || | 5189 | if (!tcp_is_sack(tp) || |
| 5189 | tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr) | 5190 | tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr) |
| 5190 | goto send_now; | 5191 | goto send_now; |
| 5191 | tp->compressed_ack++; | 5192 | |
| 5193 | if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) { | ||
| 5194 | tp->compressed_ack_rcv_nxt = tp->rcv_nxt; | ||
| 5195 | if (tp->compressed_ack > TCP_FASTRETRANS_THRESH) | ||
| 5196 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, | ||
| 5197 | tp->compressed_ack - TCP_FASTRETRANS_THRESH); | ||
| 5198 | tp->compressed_ack = 0; | ||
| 5199 | } | ||
| 5200 | |||
| 5201 | if (++tp->compressed_ack <= TCP_FASTRETRANS_THRESH) | ||
| 5202 | goto send_now; | ||
| 5192 | 5203 | ||
| 5193 | if (hrtimer_is_queued(&tp->compressed_ack_timer)) | 5204 | if (hrtimer_is_queued(&tp->compressed_ack_timer)) |
| 5194 | return; | 5205 | return; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 9c34b97d365d..3f510cad0b3e 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -180,10 +180,10 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, | |||
| 180 | { | 180 | { |
| 181 | struct tcp_sock *tp = tcp_sk(sk); | 181 | struct tcp_sock *tp = tcp_sk(sk); |
| 182 | 182 | ||
| 183 | if (unlikely(tp->compressed_ack)) { | 183 | if (unlikely(tp->compressed_ack > TCP_FASTRETRANS_THRESH)) { |
| 184 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, | 184 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, |
| 185 | tp->compressed_ack); | 185 | tp->compressed_ack - TCP_FASTRETRANS_THRESH); |
| 186 | tp->compressed_ack = 0; | 186 | tp->compressed_ack = TCP_FASTRETRANS_THRESH; |
| 187 | if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) | 187 | if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) |
| 188 | __sock_put(sk); | 188 | __sock_put(sk); |
| 189 | } | 189 | } |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 676020663ce8..5f8b6d3cd855 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
| @@ -740,7 +740,7 @@ static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer) | |||
| 740 | 740 | ||
| 741 | bh_lock_sock(sk); | 741 | bh_lock_sock(sk); |
| 742 | if (!sock_owned_by_user(sk)) { | 742 | if (!sock_owned_by_user(sk)) { |
| 743 | if (tp->compressed_ack) | 743 | if (tp->compressed_ack > TCP_FASTRETRANS_THRESH) |
| 744 | tcp_send_ack(sk); | 744 | tcp_send_ack(sk); |
| 745 | } else { | 745 | } else { |
| 746 | if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, | 746 | if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 63a808d5af15..045597b9a7c0 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -179,7 +179,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp); | |||
| 179 | static void addrconf_dad_work(struct work_struct *w); | 179 | static void addrconf_dad_work(struct work_struct *w); |
| 180 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, | 180 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, |
| 181 | bool send_na); | 181 | bool send_na); |
| 182 | static void addrconf_dad_run(struct inet6_dev *idev); | 182 | static void addrconf_dad_run(struct inet6_dev *idev, bool restart); |
| 183 | static void addrconf_rs_timer(struct timer_list *t); | 183 | static void addrconf_rs_timer(struct timer_list *t); |
| 184 | static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); | 184 | static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); |
| 185 | static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); | 185 | static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); |
| @@ -3439,6 +3439,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
| 3439 | void *ptr) | 3439 | void *ptr) |
| 3440 | { | 3440 | { |
| 3441 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | 3441 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
| 3442 | struct netdev_notifier_change_info *change_info; | ||
| 3442 | struct netdev_notifier_changeupper_info *info; | 3443 | struct netdev_notifier_changeupper_info *info; |
| 3443 | struct inet6_dev *idev = __in6_dev_get(dev); | 3444 | struct inet6_dev *idev = __in6_dev_get(dev); |
| 3444 | struct net *net = dev_net(dev); | 3445 | struct net *net = dev_net(dev); |
| @@ -3513,7 +3514,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
| 3513 | break; | 3514 | break; |
| 3514 | } | 3515 | } |
| 3515 | 3516 | ||
| 3516 | if (idev) { | 3517 | if (!IS_ERR_OR_NULL(idev)) { |
| 3517 | if (idev->if_flags & IF_READY) { | 3518 | if (idev->if_flags & IF_READY) { |
| 3518 | /* device is already configured - | 3519 | /* device is already configured - |
| 3519 | * but resend MLD reports, we might | 3520 | * but resend MLD reports, we might |
| @@ -3521,6 +3522,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
| 3521 | * multicast snooping switches | 3522 | * multicast snooping switches |
| 3522 | */ | 3523 | */ |
| 3523 | ipv6_mc_up(idev); | 3524 | ipv6_mc_up(idev); |
| 3525 | change_info = ptr; | ||
| 3526 | if (change_info->flags_changed & IFF_NOARP) | ||
| 3527 | addrconf_dad_run(idev, true); | ||
| 3524 | rt6_sync_up(dev, RTNH_F_LINKDOWN); | 3528 | rt6_sync_up(dev, RTNH_F_LINKDOWN); |
| 3525 | break; | 3529 | break; |
| 3526 | } | 3530 | } |
| @@ -3555,7 +3559,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
| 3555 | 3559 | ||
| 3556 | if (!IS_ERR_OR_NULL(idev)) { | 3560 | if (!IS_ERR_OR_NULL(idev)) { |
| 3557 | if (run_pending) | 3561 | if (run_pending) |
| 3558 | addrconf_dad_run(idev); | 3562 | addrconf_dad_run(idev, false); |
| 3559 | 3563 | ||
| 3560 | /* Device has an address by now */ | 3564 | /* Device has an address by now */ |
| 3561 | rt6_sync_up(dev, RTNH_F_DEAD); | 3565 | rt6_sync_up(dev, RTNH_F_DEAD); |
| @@ -4173,16 +4177,19 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, | |||
| 4173 | addrconf_verify_rtnl(); | 4177 | addrconf_verify_rtnl(); |
| 4174 | } | 4178 | } |
| 4175 | 4179 | ||
| 4176 | static void addrconf_dad_run(struct inet6_dev *idev) | 4180 | static void addrconf_dad_run(struct inet6_dev *idev, bool restart) |
| 4177 | { | 4181 | { |
| 4178 | struct inet6_ifaddr *ifp; | 4182 | struct inet6_ifaddr *ifp; |
| 4179 | 4183 | ||
| 4180 | read_lock_bh(&idev->lock); | 4184 | read_lock_bh(&idev->lock); |
| 4181 | list_for_each_entry(ifp, &idev->addr_list, if_list) { | 4185 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
| 4182 | spin_lock(&ifp->lock); | 4186 | spin_lock(&ifp->lock); |
| 4183 | if (ifp->flags & IFA_F_TENTATIVE && | 4187 | if ((ifp->flags & IFA_F_TENTATIVE && |
| 4184 | ifp->state == INET6_IFADDR_STATE_DAD) | 4188 | ifp->state == INET6_IFADDR_STATE_DAD) || restart) { |
| 4189 | if (restart) | ||
| 4190 | ifp->state = INET6_IFADDR_STATE_PREDAD; | ||
| 4185 | addrconf_dad_kick(ifp); | 4191 | addrconf_dad_kick(ifp); |
| 4192 | } | ||
| 4186 | spin_unlock(&ifp->lock); | 4193 | spin_unlock(&ifp->lock); |
| 4187 | } | 4194 | } |
| 4188 | read_unlock_bh(&idev->lock); | 4195 | read_unlock_bh(&idev->lock); |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ec3095f13aae..a74650e98f42 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -2394,7 +2394,7 @@ static void tpacket_destruct_skb(struct sk_buff *skb) | |||
| 2394 | void *ph; | 2394 | void *ph; |
| 2395 | __u32 ts; | 2395 | __u32 ts; |
| 2396 | 2396 | ||
| 2397 | ph = skb_shinfo(skb)->destructor_arg; | 2397 | ph = skb_zcopy_get_nouarg(skb); |
| 2398 | packet_dec_pending(&po->tx_ring); | 2398 | packet_dec_pending(&po->tx_ring); |
| 2399 | 2399 | ||
| 2400 | ts = __packet_set_timestamp(po, ph, skb); | 2400 | ts = __packet_set_timestamp(po, ph, skb); |
| @@ -2461,7 +2461,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, | |||
| 2461 | skb->mark = po->sk.sk_mark; | 2461 | skb->mark = po->sk.sk_mark; |
| 2462 | skb->tstamp = sockc->transmit_time; | 2462 | skb->tstamp = sockc->transmit_time; |
| 2463 | sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); | 2463 | sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); |
| 2464 | skb_shinfo(skb)->destructor_arg = ph.raw; | 2464 | skb_zcopy_set_nouarg(skb, ph.raw); |
| 2465 | 2465 | ||
| 2466 | skb_reserve(skb, hlen); | 2466 | skb_reserve(skb, hlen); |
| 2467 | skb_reset_network_header(skb); | 2467 | skb_reset_network_header(skb); |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 052855d47354..37c9b8f0e10f 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
| @@ -27,10 +27,7 @@ struct tcf_police_params { | |||
| 27 | u32 tcfp_ewma_rate; | 27 | u32 tcfp_ewma_rate; |
| 28 | s64 tcfp_burst; | 28 | s64 tcfp_burst; |
| 29 | u32 tcfp_mtu; | 29 | u32 tcfp_mtu; |
| 30 | s64 tcfp_toks; | ||
| 31 | s64 tcfp_ptoks; | ||
| 32 | s64 tcfp_mtu_ptoks; | 30 | s64 tcfp_mtu_ptoks; |
| 33 | s64 tcfp_t_c; | ||
| 34 | struct psched_ratecfg rate; | 31 | struct psched_ratecfg rate; |
| 35 | bool rate_present; | 32 | bool rate_present; |
| 36 | struct psched_ratecfg peak; | 33 | struct psched_ratecfg peak; |
| @@ -41,6 +38,11 @@ struct tcf_police_params { | |||
| 41 | struct tcf_police { | 38 | struct tcf_police { |
| 42 | struct tc_action common; | 39 | struct tc_action common; |
| 43 | struct tcf_police_params __rcu *params; | 40 | struct tcf_police_params __rcu *params; |
| 41 | |||
| 42 | spinlock_t tcfp_lock ____cacheline_aligned_in_smp; | ||
| 43 | s64 tcfp_toks; | ||
| 44 | s64 tcfp_ptoks; | ||
| 45 | s64 tcfp_t_c; | ||
| 44 | }; | 46 | }; |
| 45 | 47 | ||
| 46 | #define to_police(pc) ((struct tcf_police *)pc) | 48 | #define to_police(pc) ((struct tcf_police *)pc) |
| @@ -122,6 +124,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
| 122 | return ret; | 124 | return ret; |
| 123 | } | 125 | } |
| 124 | ret = ACT_P_CREATED; | 126 | ret = ACT_P_CREATED; |
| 127 | spin_lock_init(&(to_police(*a)->tcfp_lock)); | ||
| 125 | } else if (!ovr) { | 128 | } else if (!ovr) { |
| 126 | tcf_idr_release(*a, bind); | 129 | tcf_idr_release(*a, bind); |
| 127 | return -EEXIST; | 130 | return -EEXIST; |
| @@ -186,12 +189,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
| 186 | } | 189 | } |
| 187 | 190 | ||
| 188 | new->tcfp_burst = PSCHED_TICKS2NS(parm->burst); | 191 | new->tcfp_burst = PSCHED_TICKS2NS(parm->burst); |
| 189 | new->tcfp_toks = new->tcfp_burst; | 192 | if (new->peak_present) |
| 190 | if (new->peak_present) { | ||
| 191 | new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak, | 193 | new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak, |
| 192 | new->tcfp_mtu); | 194 | new->tcfp_mtu); |
| 193 | new->tcfp_ptoks = new->tcfp_mtu_ptoks; | ||
| 194 | } | ||
| 195 | 195 | ||
| 196 | if (tb[TCA_POLICE_AVRATE]) | 196 | if (tb[TCA_POLICE_AVRATE]) |
| 197 | new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); | 197 | new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); |
| @@ -207,7 +207,12 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | spin_lock_bh(&police->tcf_lock); | 209 | spin_lock_bh(&police->tcf_lock); |
| 210 | new->tcfp_t_c = ktime_get_ns(); | 210 | spin_lock_bh(&police->tcfp_lock); |
| 211 | police->tcfp_t_c = ktime_get_ns(); | ||
| 212 | police->tcfp_toks = new->tcfp_burst; | ||
| 213 | if (new->peak_present) | ||
| 214 | police->tcfp_ptoks = new->tcfp_mtu_ptoks; | ||
| 215 | spin_unlock_bh(&police->tcfp_lock); | ||
| 211 | police->tcf_action = parm->action; | 216 | police->tcf_action = parm->action; |
| 212 | rcu_swap_protected(police->params, | 217 | rcu_swap_protected(police->params, |
| 213 | new, | 218 | new, |
| @@ -257,25 +262,28 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a, | |||
| 257 | } | 262 | } |
| 258 | 263 | ||
| 259 | now = ktime_get_ns(); | 264 | now = ktime_get_ns(); |
| 260 | toks = min_t(s64, now - p->tcfp_t_c, p->tcfp_burst); | 265 | spin_lock_bh(&police->tcfp_lock); |
| 266 | toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst); | ||
| 261 | if (p->peak_present) { | 267 | if (p->peak_present) { |
| 262 | ptoks = toks + p->tcfp_ptoks; | 268 | ptoks = toks + police->tcfp_ptoks; |
| 263 | if (ptoks > p->tcfp_mtu_ptoks) | 269 | if (ptoks > p->tcfp_mtu_ptoks) |
| 264 | ptoks = p->tcfp_mtu_ptoks; | 270 | ptoks = p->tcfp_mtu_ptoks; |
| 265 | ptoks -= (s64)psched_l2t_ns(&p->peak, | 271 | ptoks -= (s64)psched_l2t_ns(&p->peak, |
| 266 | qdisc_pkt_len(skb)); | 272 | qdisc_pkt_len(skb)); |
| 267 | } | 273 | } |
| 268 | toks += p->tcfp_toks; | 274 | toks += police->tcfp_toks; |
| 269 | if (toks > p->tcfp_burst) | 275 | if (toks > p->tcfp_burst) |
| 270 | toks = p->tcfp_burst; | 276 | toks = p->tcfp_burst; |
| 271 | toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb)); | 277 | toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb)); |
| 272 | if ((toks|ptoks) >= 0) { | 278 | if ((toks|ptoks) >= 0) { |
| 273 | p->tcfp_t_c = now; | 279 | police->tcfp_t_c = now; |
| 274 | p->tcfp_toks = toks; | 280 | police->tcfp_toks = toks; |
| 275 | p->tcfp_ptoks = ptoks; | 281 | police->tcfp_ptoks = ptoks; |
| 282 | spin_unlock_bh(&police->tcfp_lock); | ||
| 276 | ret = p->tcfp_result; | 283 | ret = p->tcfp_result; |
| 277 | goto inc_drops; | 284 | goto inc_drops; |
| 278 | } | 285 | } |
| 286 | spin_unlock_bh(&police->tcfp_lock); | ||
| 279 | } | 287 | } |
| 280 | 288 | ||
| 281 | inc_overlimits: | 289 | inc_overlimits: |
diff --git a/net/sctp/output.c b/net/sctp/output.c index 67939ad99c01..b0e74a3e77ec 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
| @@ -118,6 +118,9 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, | |||
| 118 | sctp_transport_route(tp, NULL, sp); | 118 | sctp_transport_route(tp, NULL, sp); |
| 119 | if (asoc->param_flags & SPP_PMTUD_ENABLE) | 119 | if (asoc->param_flags & SPP_PMTUD_ENABLE) |
| 120 | sctp_assoc_sync_pmtu(asoc); | 120 | sctp_assoc_sync_pmtu(asoc); |
| 121 | } else if (!sctp_transport_pmtu_check(tp)) { | ||
| 122 | if (asoc->param_flags & SPP_PMTUD_ENABLE) | ||
| 123 | sctp_assoc_sync_pmtu(asoc); | ||
| 121 | } | 124 | } |
| 122 | 125 | ||
| 123 | if (asoc->pmtu_pending) { | 126 | if (asoc->pmtu_pending) { |
| @@ -396,25 +399,6 @@ finish: | |||
| 396 | return retval; | 399 | return retval; |
| 397 | } | 400 | } |
| 398 | 401 | ||
| 399 | static void sctp_packet_release_owner(struct sk_buff *skb) | ||
| 400 | { | ||
| 401 | sk_free(skb->sk); | ||
| 402 | } | ||
| 403 | |||
| 404 | static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk) | ||
| 405 | { | ||
| 406 | skb_orphan(skb); | ||
| 407 | skb->sk = sk; | ||
| 408 | skb->destructor = sctp_packet_release_owner; | ||
| 409 | |||
| 410 | /* | ||
| 411 | * The data chunks have already been accounted for in sctp_sendmsg(), | ||
| 412 | * therefore only reserve a single byte to keep socket around until | ||
| 413 | * the packet has been transmitted. | ||
| 414 | */ | ||
| 415 | refcount_inc(&sk->sk_wmem_alloc); | ||
| 416 | } | ||
| 417 | |||
| 418 | static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb) | 402 | static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb) |
| 419 | { | 403 | { |
| 420 | if (SCTP_OUTPUT_CB(head)->last == head) | 404 | if (SCTP_OUTPUT_CB(head)->last == head) |
| @@ -601,7 +585,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) | |||
| 601 | if (!head) | 585 | if (!head) |
| 602 | goto out; | 586 | goto out; |
| 603 | skb_reserve(head, packet->overhead + MAX_HEADER); | 587 | skb_reserve(head, packet->overhead + MAX_HEADER); |
| 604 | sctp_packet_set_owner_w(head, sk); | 588 | skb_set_owner_w(head, sk); |
| 605 | 589 | ||
| 606 | /* set sctp header */ | 590 | /* set sctp header */ |
| 607 | sh = skb_push(head, sizeof(struct sctphdr)); | 591 | sh = skb_push(head, sizeof(struct sctphdr)); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 739f3e50120d..bf618d1b41fd 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -3940,32 +3940,16 @@ static int sctp_setsockopt_pr_supported(struct sock *sk, | |||
| 3940 | unsigned int optlen) | 3940 | unsigned int optlen) |
| 3941 | { | 3941 | { |
| 3942 | struct sctp_assoc_value params; | 3942 | struct sctp_assoc_value params; |
| 3943 | struct sctp_association *asoc; | ||
| 3944 | int retval = -EINVAL; | ||
| 3945 | 3943 | ||
| 3946 | if (optlen != sizeof(params)) | 3944 | if (optlen != sizeof(params)) |
| 3947 | goto out; | 3945 | return -EINVAL; |
| 3948 | |||
| 3949 | if (copy_from_user(¶ms, optval, optlen)) { | ||
| 3950 | retval = -EFAULT; | ||
| 3951 | goto out; | ||
| 3952 | } | ||
| 3953 | |||
| 3954 | asoc = sctp_id2assoc(sk, params.assoc_id); | ||
| 3955 | if (asoc) { | ||
| 3956 | asoc->prsctp_enable = !!params.assoc_value; | ||
| 3957 | } else if (!params.assoc_id) { | ||
| 3958 | struct sctp_sock *sp = sctp_sk(sk); | ||
| 3959 | 3946 | ||
| 3960 | sp->ep->prsctp_enable = !!params.assoc_value; | 3947 | if (copy_from_user(¶ms, optval, optlen)) |
| 3961 | } else { | 3948 | return -EFAULT; |
| 3962 | goto out; | ||
| 3963 | } | ||
| 3964 | 3949 | ||
| 3965 | retval = 0; | 3950 | sctp_sk(sk)->ep->prsctp_enable = !!params.assoc_value; |
| 3966 | 3951 | ||
| 3967 | out: | 3952 | return 0; |
| 3968 | return retval; | ||
| 3969 | } | 3953 | } |
| 3970 | 3954 | ||
| 3971 | static int sctp_setsockopt_default_prinfo(struct sock *sk, | 3955 | static int sctp_setsockopt_default_prinfo(struct sock *sk, |
diff --git a/net/sctp/stream.c b/net/sctp/stream.c index ffb940d3b57c..3892e7630f3a 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c | |||
| @@ -535,7 +535,6 @@ int sctp_send_add_streams(struct sctp_association *asoc, | |||
| 535 | goto out; | 535 | goto out; |
| 536 | } | 536 | } |
| 537 | 537 | ||
| 538 | stream->incnt = incnt; | ||
| 539 | stream->outcnt = outcnt; | 538 | stream->outcnt = outcnt; |
| 540 | 539 | ||
| 541 | asoc->strreset_outstanding = !!out + !!in; | 540 | asoc->strreset_outstanding = !!out + !!in; |
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 80e2119f1c70..5fbaf1901571 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
| @@ -127,6 +127,8 @@ static int smc_release(struct socket *sock) | |||
| 127 | smc = smc_sk(sk); | 127 | smc = smc_sk(sk); |
| 128 | 128 | ||
| 129 | /* cleanup for a dangling non-blocking connect */ | 129 | /* cleanup for a dangling non-blocking connect */ |
| 130 | if (smc->connect_info && sk->sk_state == SMC_INIT) | ||
| 131 | tcp_abort(smc->clcsock->sk, ECONNABORTED); | ||
| 130 | flush_work(&smc->connect_work); | 132 | flush_work(&smc->connect_work); |
| 131 | kfree(smc->connect_info); | 133 | kfree(smc->connect_info); |
| 132 | smc->connect_info = NULL; | 134 | smc->connect_info = NULL; |
| @@ -547,7 +549,8 @@ static int smc_connect_rdma(struct smc_sock *smc, | |||
| 547 | 549 | ||
| 548 | mutex_lock(&smc_create_lgr_pending); | 550 | mutex_lock(&smc_create_lgr_pending); |
| 549 | local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev, | 551 | local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev, |
| 550 | ibport, &aclc->lcl, NULL, 0); | 552 | ibport, ntoh24(aclc->qpn), &aclc->lcl, |
| 553 | NULL, 0); | ||
| 551 | if (local_contact < 0) { | 554 | if (local_contact < 0) { |
| 552 | if (local_contact == -ENOMEM) | 555 | if (local_contact == -ENOMEM) |
| 553 | reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ | 556 | reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ |
| @@ -618,7 +621,7 @@ static int smc_connect_ism(struct smc_sock *smc, | |||
| 618 | int rc = 0; | 621 | int rc = 0; |
| 619 | 622 | ||
| 620 | mutex_lock(&smc_create_lgr_pending); | 623 | mutex_lock(&smc_create_lgr_pending); |
| 621 | local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, | 624 | local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, 0, |
| 622 | NULL, ismdev, aclc->gid); | 625 | NULL, ismdev, aclc->gid); |
| 623 | if (local_contact < 0) | 626 | if (local_contact < 0) |
| 624 | return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0); | 627 | return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0); |
| @@ -1083,7 +1086,7 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc, | |||
| 1083 | int *local_contact) | 1086 | int *local_contact) |
| 1084 | { | 1087 | { |
| 1085 | /* allocate connection / link group */ | 1088 | /* allocate connection / link group */ |
| 1086 | *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport, | 1089 | *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport, 0, |
| 1087 | &pclc->lcl, NULL, 0); | 1090 | &pclc->lcl, NULL, 0); |
| 1088 | if (*local_contact < 0) { | 1091 | if (*local_contact < 0) { |
| 1089 | if (*local_contact == -ENOMEM) | 1092 | if (*local_contact == -ENOMEM) |
| @@ -1107,7 +1110,7 @@ static int smc_listen_ism_init(struct smc_sock *new_smc, | |||
| 1107 | struct smc_clc_msg_smcd *pclc_smcd; | 1110 | struct smc_clc_msg_smcd *pclc_smcd; |
| 1108 | 1111 | ||
| 1109 | pclc_smcd = smc_get_clc_msg_smcd(pclc); | 1112 | pclc_smcd = smc_get_clc_msg_smcd(pclc); |
| 1110 | *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, NULL, | 1113 | *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, 0, NULL, |
| 1111 | ismdev, pclc_smcd->gid); | 1114 | ismdev, pclc_smcd->gid); |
| 1112 | if (*local_contact < 0) { | 1115 | if (*local_contact < 0) { |
| 1113 | if (*local_contact == -ENOMEM) | 1116 | if (*local_contact == -ENOMEM) |
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index ed5dcf03fe0b..db83332ac1c8 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c | |||
| @@ -81,7 +81,7 @@ static inline void smc_cdc_add_pending_send(struct smc_connection *conn, | |||
| 81 | sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE, | 81 | sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE, |
| 82 | "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)"); | 82 | "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)"); |
| 83 | BUILD_BUG_ON_MSG( | 83 | BUILD_BUG_ON_MSG( |
| 84 | sizeof(struct smc_cdc_msg) != SMC_WR_TX_SIZE, | 84 | offsetofend(struct smc_cdc_msg, reserved) > SMC_WR_TX_SIZE, |
| 85 | "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()"); | 85 | "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()"); |
| 86 | BUILD_BUG_ON_MSG( | 86 | BUILD_BUG_ON_MSG( |
| 87 | sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE, | 87 | sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE, |
| @@ -177,23 +177,24 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn) | |||
| 177 | int smcd_cdc_msg_send(struct smc_connection *conn) | 177 | int smcd_cdc_msg_send(struct smc_connection *conn) |
| 178 | { | 178 | { |
| 179 | struct smc_sock *smc = container_of(conn, struct smc_sock, conn); | 179 | struct smc_sock *smc = container_of(conn, struct smc_sock, conn); |
| 180 | union smc_host_cursor curs; | ||
| 180 | struct smcd_cdc_msg cdc; | 181 | struct smcd_cdc_msg cdc; |
| 181 | int rc, diff; | 182 | int rc, diff; |
| 182 | 183 | ||
| 183 | memset(&cdc, 0, sizeof(cdc)); | 184 | memset(&cdc, 0, sizeof(cdc)); |
| 184 | cdc.common.type = SMC_CDC_MSG_TYPE; | 185 | cdc.common.type = SMC_CDC_MSG_TYPE; |
| 185 | cdc.prod_wrap = conn->local_tx_ctrl.prod.wrap; | 186 | curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.prod.acurs); |
| 186 | cdc.prod_count = conn->local_tx_ctrl.prod.count; | 187 | cdc.prod.wrap = curs.wrap; |
| 187 | 188 | cdc.prod.count = curs.count; | |
| 188 | cdc.cons_wrap = conn->local_tx_ctrl.cons.wrap; | 189 | curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.cons.acurs); |
| 189 | cdc.cons_count = conn->local_tx_ctrl.cons.count; | 190 | cdc.cons.wrap = curs.wrap; |
| 190 | cdc.prod_flags = conn->local_tx_ctrl.prod_flags; | 191 | cdc.cons.count = curs.count; |
| 191 | cdc.conn_state_flags = conn->local_tx_ctrl.conn_state_flags; | 192 | cdc.cons.prod_flags = conn->local_tx_ctrl.prod_flags; |
| 193 | cdc.cons.conn_state_flags = conn->local_tx_ctrl.conn_state_flags; | ||
| 192 | rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1); | 194 | rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1); |
| 193 | if (rc) | 195 | if (rc) |
| 194 | return rc; | 196 | return rc; |
| 195 | smc_curs_copy(&conn->rx_curs_confirmed, &conn->local_tx_ctrl.cons, | 197 | smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn); |
| 196 | conn); | ||
| 197 | /* Calculate transmitted data and increment free send buffer space */ | 198 | /* Calculate transmitted data and increment free send buffer space */ |
| 198 | diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin, | 199 | diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin, |
| 199 | &conn->tx_curs_sent); | 200 | &conn->tx_curs_sent); |
| @@ -331,13 +332,16 @@ static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc) | |||
| 331 | static void smcd_cdc_rx_tsklet(unsigned long data) | 332 | static void smcd_cdc_rx_tsklet(unsigned long data) |
| 332 | { | 333 | { |
| 333 | struct smc_connection *conn = (struct smc_connection *)data; | 334 | struct smc_connection *conn = (struct smc_connection *)data; |
| 335 | struct smcd_cdc_msg *data_cdc; | ||
| 334 | struct smcd_cdc_msg cdc; | 336 | struct smcd_cdc_msg cdc; |
| 335 | struct smc_sock *smc; | 337 | struct smc_sock *smc; |
| 336 | 338 | ||
| 337 | if (!conn) | 339 | if (!conn) |
| 338 | return; | 340 | return; |
| 339 | 341 | ||
| 340 | memcpy(&cdc, conn->rmb_desc->cpu_addr, sizeof(cdc)); | 342 | data_cdc = (struct smcd_cdc_msg *)conn->rmb_desc->cpu_addr; |
| 343 | smcd_curs_copy(&cdc.prod, &data_cdc->prod, conn); | ||
| 344 | smcd_curs_copy(&cdc.cons, &data_cdc->cons, conn); | ||
| 341 | smc = container_of(conn, struct smc_sock, conn); | 345 | smc = container_of(conn, struct smc_sock, conn); |
| 342 | smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc); | 346 | smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc); |
| 343 | } | 347 | } |
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h index 934df4473a7c..b5bfe38c7f9b 100644 --- a/net/smc/smc_cdc.h +++ b/net/smc/smc_cdc.h | |||
| @@ -48,21 +48,31 @@ struct smc_cdc_msg { | |||
| 48 | struct smc_cdc_producer_flags prod_flags; | 48 | struct smc_cdc_producer_flags prod_flags; |
| 49 | struct smc_cdc_conn_state_flags conn_state_flags; | 49 | struct smc_cdc_conn_state_flags conn_state_flags; |
| 50 | u8 reserved[18]; | 50 | u8 reserved[18]; |
| 51 | } __packed; /* format defined in RFC7609 */ | 51 | }; |
| 52 | |||
| 53 | /* SMC-D cursor format */ | ||
| 54 | union smcd_cdc_cursor { | ||
| 55 | struct { | ||
| 56 | u16 wrap; | ||
| 57 | u32 count; | ||
| 58 | struct smc_cdc_producer_flags prod_flags; | ||
| 59 | struct smc_cdc_conn_state_flags conn_state_flags; | ||
| 60 | } __packed; | ||
| 61 | #ifdef KERNEL_HAS_ATOMIC64 | ||
| 62 | atomic64_t acurs; /* for atomic processing */ | ||
| 63 | #else | ||
| 64 | u64 acurs; /* for atomic processing */ | ||
| 65 | #endif | ||
| 66 | } __aligned(8); | ||
| 52 | 67 | ||
| 53 | /* CDC message for SMC-D */ | 68 | /* CDC message for SMC-D */ |
| 54 | struct smcd_cdc_msg { | 69 | struct smcd_cdc_msg { |
| 55 | struct smc_wr_rx_hdr common; /* Type = 0xFE */ | 70 | struct smc_wr_rx_hdr common; /* Type = 0xFE */ |
| 56 | u8 res1[7]; | 71 | u8 res1[7]; |
| 57 | u16 prod_wrap; | 72 | union smcd_cdc_cursor prod; |
| 58 | u32 prod_count; | 73 | union smcd_cdc_cursor cons; |
| 59 | u8 res2[2]; | ||
| 60 | u16 cons_wrap; | ||
| 61 | u32 cons_count; | ||
| 62 | struct smc_cdc_producer_flags prod_flags; | ||
| 63 | struct smc_cdc_conn_state_flags conn_state_flags; | ||
| 64 | u8 res3[8]; | 74 | u8 res3[8]; |
| 65 | } __packed; | 75 | } __aligned(8); |
| 66 | 76 | ||
| 67 | static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn) | 77 | static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn) |
| 68 | { | 78 | { |
| @@ -135,6 +145,21 @@ static inline void smc_curs_copy_net(union smc_cdc_cursor *tgt, | |||
| 135 | #endif | 145 | #endif |
| 136 | } | 146 | } |
| 137 | 147 | ||
| 148 | static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt, | ||
| 149 | union smcd_cdc_cursor *src, | ||
| 150 | struct smc_connection *conn) | ||
| 151 | { | ||
| 152 | #ifndef KERNEL_HAS_ATOMIC64 | ||
| 153 | unsigned long flags; | ||
| 154 | |||
| 155 | spin_lock_irqsave(&conn->acurs_lock, flags); | ||
| 156 | tgt->acurs = src->acurs; | ||
| 157 | spin_unlock_irqrestore(&conn->acurs_lock, flags); | ||
| 158 | #else | ||
| 159 | atomic64_set(&tgt->acurs, atomic64_read(&src->acurs)); | ||
| 160 | #endif | ||
| 161 | } | ||
| 162 | |||
| 138 | /* calculate cursor difference between old and new, where old <= new */ | 163 | /* calculate cursor difference between old and new, where old <= new */ |
| 139 | static inline int smc_curs_diff(unsigned int size, | 164 | static inline int smc_curs_diff(unsigned int size, |
| 140 | union smc_host_cursor *old, | 165 | union smc_host_cursor *old, |
| @@ -222,12 +247,17 @@ static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg *local, | |||
| 222 | static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local, | 247 | static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local, |
| 223 | struct smcd_cdc_msg *peer) | 248 | struct smcd_cdc_msg *peer) |
| 224 | { | 249 | { |
| 225 | local->prod.wrap = peer->prod_wrap; | 250 | union smc_host_cursor temp; |
| 226 | local->prod.count = peer->prod_count; | 251 | |
| 227 | local->cons.wrap = peer->cons_wrap; | 252 | temp.wrap = peer->prod.wrap; |
| 228 | local->cons.count = peer->cons_count; | 253 | temp.count = peer->prod.count; |
| 229 | local->prod_flags = peer->prod_flags; | 254 | atomic64_set(&local->prod.acurs, atomic64_read(&temp.acurs)); |
| 230 | local->conn_state_flags = peer->conn_state_flags; | 255 | |
| 256 | temp.wrap = peer->cons.wrap; | ||
| 257 | temp.count = peer->cons.count; | ||
| 258 | atomic64_set(&local->cons.acurs, atomic64_read(&temp.acurs)); | ||
| 259 | local->prod_flags = peer->cons.prod_flags; | ||
| 260 | local->conn_state_flags = peer->cons.conn_state_flags; | ||
| 231 | } | 261 | } |
| 232 | 262 | ||
| 233 | static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, | 263 | static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, |
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 18daebcef181..1c9fa7f0261a 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c | |||
| @@ -184,6 +184,8 @@ free: | |||
| 184 | 184 | ||
| 185 | if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE) | 185 | if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE) |
| 186 | smc_llc_link_inactive(lnk); | 186 | smc_llc_link_inactive(lnk); |
| 187 | if (lgr->is_smcd) | ||
| 188 | smc_ism_signal_shutdown(lgr); | ||
| 187 | smc_lgr_free(lgr); | 189 | smc_lgr_free(lgr); |
| 188 | } | 190 | } |
| 189 | } | 191 | } |
| @@ -485,7 +487,7 @@ void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport) | |||
| 485 | } | 487 | } |
| 486 | 488 | ||
| 487 | /* Called when SMC-D device is terminated or peer is lost */ | 489 | /* Called when SMC-D device is terminated or peer is lost */ |
| 488 | void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid) | 490 | void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan) |
| 489 | { | 491 | { |
| 490 | struct smc_link_group *lgr, *l; | 492 | struct smc_link_group *lgr, *l; |
| 491 | LIST_HEAD(lgr_free_list); | 493 | LIST_HEAD(lgr_free_list); |
| @@ -495,7 +497,7 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid) | |||
| 495 | list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) { | 497 | list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) { |
| 496 | if (lgr->is_smcd && lgr->smcd == dev && | 498 | if (lgr->is_smcd && lgr->smcd == dev && |
| 497 | (!peer_gid || lgr->peer_gid == peer_gid) && | 499 | (!peer_gid || lgr->peer_gid == peer_gid) && |
| 498 | !list_empty(&lgr->list)) { | 500 | (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) { |
| 499 | __smc_lgr_terminate(lgr); | 501 | __smc_lgr_terminate(lgr); |
| 500 | list_move(&lgr->list, &lgr_free_list); | 502 | list_move(&lgr->list, &lgr_free_list); |
| 501 | } | 503 | } |
| @@ -506,6 +508,8 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid) | |||
| 506 | list_for_each_entry_safe(lgr, l, &lgr_free_list, list) { | 508 | list_for_each_entry_safe(lgr, l, &lgr_free_list, list) { |
| 507 | list_del_init(&lgr->list); | 509 | list_del_init(&lgr->list); |
| 508 | cancel_delayed_work_sync(&lgr->free_work); | 510 | cancel_delayed_work_sync(&lgr->free_work); |
| 511 | if (!peer_gid && vlan == VLAN_VID_MASK) /* dev terminated? */ | ||
| 512 | smc_ism_signal_shutdown(lgr); | ||
| 509 | smc_lgr_free(lgr); | 513 | smc_lgr_free(lgr); |
| 510 | } | 514 | } |
| 511 | } | 515 | } |
| @@ -559,7 +563,7 @@ out: | |||
| 559 | 563 | ||
| 560 | static bool smcr_lgr_match(struct smc_link_group *lgr, | 564 | static bool smcr_lgr_match(struct smc_link_group *lgr, |
| 561 | struct smc_clc_msg_local *lcl, | 565 | struct smc_clc_msg_local *lcl, |
| 562 | enum smc_lgr_role role) | 566 | enum smc_lgr_role role, u32 clcqpn) |
| 563 | { | 567 | { |
| 564 | return !memcmp(lgr->peer_systemid, lcl->id_for_peer, | 568 | return !memcmp(lgr->peer_systemid, lcl->id_for_peer, |
| 565 | SMC_SYSTEMID_LEN) && | 569 | SMC_SYSTEMID_LEN) && |
| @@ -567,7 +571,9 @@ static bool smcr_lgr_match(struct smc_link_group *lgr, | |||
| 567 | SMC_GID_SIZE) && | 571 | SMC_GID_SIZE) && |
| 568 | !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac, | 572 | !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac, |
| 569 | sizeof(lcl->mac)) && | 573 | sizeof(lcl->mac)) && |
| 570 | lgr->role == role; | 574 | lgr->role == role && |
| 575 | (lgr->role == SMC_SERV || | ||
| 576 | lgr->lnk[SMC_SINGLE_LINK].peer_qpn == clcqpn); | ||
| 571 | } | 577 | } |
| 572 | 578 | ||
| 573 | static bool smcd_lgr_match(struct smc_link_group *lgr, | 579 | static bool smcd_lgr_match(struct smc_link_group *lgr, |
| @@ -578,7 +584,7 @@ static bool smcd_lgr_match(struct smc_link_group *lgr, | |||
| 578 | 584 | ||
| 579 | /* create a new SMC connection (and a new link group if necessary) */ | 585 | /* create a new SMC connection (and a new link group if necessary) */ |
| 580 | int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, | 586 | int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, |
| 581 | struct smc_ib_device *smcibdev, u8 ibport, | 587 | struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn, |
| 582 | struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, | 588 | struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, |
| 583 | u64 peer_gid) | 589 | u64 peer_gid) |
| 584 | { | 590 | { |
| @@ -603,7 +609,7 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, | |||
| 603 | list_for_each_entry(lgr, &smc_lgr_list.list, list) { | 609 | list_for_each_entry(lgr, &smc_lgr_list.list, list) { |
| 604 | write_lock_bh(&lgr->conns_lock); | 610 | write_lock_bh(&lgr->conns_lock); |
| 605 | if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) : | 611 | if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) : |
| 606 | smcr_lgr_match(lgr, lcl, role)) && | 612 | smcr_lgr_match(lgr, lcl, role, clcqpn)) && |
| 607 | !lgr->sync_err && | 613 | !lgr->sync_err && |
| 608 | lgr->vlan_id == vlan_id && | 614 | lgr->vlan_id == vlan_id && |
| 609 | (role == SMC_CLNT || | 615 | (role == SMC_CLNT || |
| @@ -1024,6 +1030,8 @@ void smc_core_exit(void) | |||
| 1024 | smc_llc_link_inactive(lnk); | 1030 | smc_llc_link_inactive(lnk); |
| 1025 | } | 1031 | } |
| 1026 | cancel_delayed_work_sync(&lgr->free_work); | 1032 | cancel_delayed_work_sync(&lgr->free_work); |
| 1033 | if (lgr->is_smcd) | ||
| 1034 | smc_ism_signal_shutdown(lgr); | ||
| 1027 | smc_lgr_free(lgr); /* free link group */ | 1035 | smc_lgr_free(lgr); /* free link group */ |
| 1028 | } | 1036 | } |
| 1029 | } | 1037 | } |
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index c156674733c9..cf98f4d6093e 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h | |||
| @@ -247,7 +247,8 @@ void smc_lgr_free(struct smc_link_group *lgr); | |||
| 247 | void smc_lgr_forget(struct smc_link_group *lgr); | 247 | void smc_lgr_forget(struct smc_link_group *lgr); |
| 248 | void smc_lgr_terminate(struct smc_link_group *lgr); | 248 | void smc_lgr_terminate(struct smc_link_group *lgr); |
| 249 | void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport); | 249 | void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport); |
| 250 | void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid); | 250 | void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, |
| 251 | unsigned short vlan); | ||
| 251 | int smc_buf_create(struct smc_sock *smc, bool is_smcd); | 252 | int smc_buf_create(struct smc_sock *smc, bool is_smcd); |
| 252 | int smc_uncompress_bufsize(u8 compressed); | 253 | int smc_uncompress_bufsize(u8 compressed); |
| 253 | int smc_rmb_rtoken_handling(struct smc_connection *conn, | 254 | int smc_rmb_rtoken_handling(struct smc_connection *conn, |
| @@ -262,7 +263,7 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id); | |||
| 262 | 263 | ||
| 263 | void smc_conn_free(struct smc_connection *conn); | 264 | void smc_conn_free(struct smc_connection *conn); |
| 264 | int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, | 265 | int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, |
| 265 | struct smc_ib_device *smcibdev, u8 ibport, | 266 | struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn, |
| 266 | struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, | 267 | struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, |
| 267 | u64 peer_gid); | 268 | u64 peer_gid); |
| 268 | void smcd_conn_free(struct smc_connection *conn); | 269 | void smcd_conn_free(struct smc_connection *conn); |
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c index e36f21ce7252..2fff79db1a59 100644 --- a/net/smc/smc_ism.c +++ b/net/smc/smc_ism.c | |||
| @@ -187,22 +187,28 @@ struct smc_ism_event_work { | |||
| 187 | #define ISM_EVENT_REQUEST 0x0001 | 187 | #define ISM_EVENT_REQUEST 0x0001 |
| 188 | #define ISM_EVENT_RESPONSE 0x0002 | 188 | #define ISM_EVENT_RESPONSE 0x0002 |
| 189 | #define ISM_EVENT_REQUEST_IR 0x00000001 | 189 | #define ISM_EVENT_REQUEST_IR 0x00000001 |
| 190 | #define ISM_EVENT_CODE_SHUTDOWN 0x80 | ||
| 190 | #define ISM_EVENT_CODE_TESTLINK 0x83 | 191 | #define ISM_EVENT_CODE_TESTLINK 0x83 |
| 191 | 192 | ||
| 193 | union smcd_sw_event_info { | ||
| 194 | u64 info; | ||
| 195 | struct { | ||
| 196 | u8 uid[SMC_LGR_ID_SIZE]; | ||
| 197 | unsigned short vlan_id; | ||
| 198 | u16 code; | ||
| 199 | }; | ||
| 200 | }; | ||
| 201 | |||
| 192 | static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) | 202 | static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) |
| 193 | { | 203 | { |
| 194 | union { | 204 | union smcd_sw_event_info ev_info; |
| 195 | u64 info; | ||
| 196 | struct { | ||
| 197 | u32 uid; | ||
| 198 | unsigned short vlanid; | ||
| 199 | u16 code; | ||
| 200 | }; | ||
| 201 | } ev_info; | ||
| 202 | 205 | ||
| 206 | ev_info.info = wrk->event.info; | ||
| 203 | switch (wrk->event.code) { | 207 | switch (wrk->event.code) { |
| 208 | case ISM_EVENT_CODE_SHUTDOWN: /* Peer shut down DMBs */ | ||
| 209 | smc_smcd_terminate(wrk->smcd, wrk->event.tok, ev_info.vlan_id); | ||
| 210 | break; | ||
| 204 | case ISM_EVENT_CODE_TESTLINK: /* Activity timer */ | 211 | case ISM_EVENT_CODE_TESTLINK: /* Activity timer */ |
| 205 | ev_info.info = wrk->event.info; | ||
| 206 | if (ev_info.code == ISM_EVENT_REQUEST) { | 212 | if (ev_info.code == ISM_EVENT_REQUEST) { |
| 207 | ev_info.code = ISM_EVENT_RESPONSE; | 213 | ev_info.code = ISM_EVENT_RESPONSE; |
| 208 | wrk->smcd->ops->signal_event(wrk->smcd, | 214 | wrk->smcd->ops->signal_event(wrk->smcd, |
| @@ -215,6 +221,21 @@ static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) | |||
| 215 | } | 221 | } |
| 216 | } | 222 | } |
| 217 | 223 | ||
| 224 | int smc_ism_signal_shutdown(struct smc_link_group *lgr) | ||
| 225 | { | ||
| 226 | int rc; | ||
| 227 | union smcd_sw_event_info ev_info; | ||
| 228 | |||
| 229 | memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE); | ||
| 230 | ev_info.vlan_id = lgr->vlan_id; | ||
| 231 | ev_info.code = ISM_EVENT_REQUEST; | ||
| 232 | rc = lgr->smcd->ops->signal_event(lgr->smcd, lgr->peer_gid, | ||
| 233 | ISM_EVENT_REQUEST_IR, | ||
| 234 | ISM_EVENT_CODE_SHUTDOWN, | ||
| 235 | ev_info.info); | ||
| 236 | return rc; | ||
| 237 | } | ||
| 238 | |||
| 218 | /* worker for SMC-D events */ | 239 | /* worker for SMC-D events */ |
| 219 | static void smc_ism_event_work(struct work_struct *work) | 240 | static void smc_ism_event_work(struct work_struct *work) |
| 220 | { | 241 | { |
| @@ -223,7 +244,7 @@ static void smc_ism_event_work(struct work_struct *work) | |||
| 223 | 244 | ||
| 224 | switch (wrk->event.type) { | 245 | switch (wrk->event.type) { |
| 225 | case ISM_EVENT_GID: /* GID event, token is peer GID */ | 246 | case ISM_EVENT_GID: /* GID event, token is peer GID */ |
| 226 | smc_smcd_terminate(wrk->smcd, wrk->event.tok); | 247 | smc_smcd_terminate(wrk->smcd, wrk->event.tok, VLAN_VID_MASK); |
| 227 | break; | 248 | break; |
| 228 | case ISM_EVENT_DMB: | 249 | case ISM_EVENT_DMB: |
| 229 | break; | 250 | break; |
| @@ -289,7 +310,7 @@ void smcd_unregister_dev(struct smcd_dev *smcd) | |||
| 289 | spin_unlock(&smcd_dev_list.lock); | 310 | spin_unlock(&smcd_dev_list.lock); |
| 290 | flush_workqueue(smcd->event_wq); | 311 | flush_workqueue(smcd->event_wq); |
| 291 | destroy_workqueue(smcd->event_wq); | 312 | destroy_workqueue(smcd->event_wq); |
| 292 | smc_smcd_terminate(smcd, 0); | 313 | smc_smcd_terminate(smcd, 0, VLAN_VID_MASK); |
| 293 | 314 | ||
| 294 | device_del(&smcd->dev); | 315 | device_del(&smcd->dev); |
| 295 | } | 316 | } |
diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h index aee45b860b79..4da946cbfa29 100644 --- a/net/smc/smc_ism.h +++ b/net/smc/smc_ism.h | |||
| @@ -45,4 +45,5 @@ int smc_ism_register_dmb(struct smc_link_group *lgr, int buf_size, | |||
| 45 | int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc); | 45 | int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc); |
| 46 | int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos, | 46 | int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos, |
| 47 | void *data, size_t len); | 47 | void *data, size_t len); |
| 48 | int smc_ism_signal_shutdown(struct smc_link_group *lgr); | ||
| 48 | #endif | 49 | #endif |
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index 3c458d279855..c2694750a6a8 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c | |||
| @@ -215,12 +215,14 @@ int smc_wr_tx_put_slot(struct smc_link *link, | |||
| 215 | 215 | ||
| 216 | pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv); | 216 | pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv); |
| 217 | if (pend->idx < link->wr_tx_cnt) { | 217 | if (pend->idx < link->wr_tx_cnt) { |
| 218 | u32 idx = pend->idx; | ||
| 219 | |||
| 218 | /* clear the full struct smc_wr_tx_pend including .priv */ | 220 | /* clear the full struct smc_wr_tx_pend including .priv */ |
| 219 | memset(&link->wr_tx_pends[pend->idx], 0, | 221 | memset(&link->wr_tx_pends[pend->idx], 0, |
| 220 | sizeof(link->wr_tx_pends[pend->idx])); | 222 | sizeof(link->wr_tx_pends[pend->idx])); |
| 221 | memset(&link->wr_tx_bufs[pend->idx], 0, | 223 | memset(&link->wr_tx_bufs[pend->idx], 0, |
| 222 | sizeof(link->wr_tx_bufs[pend->idx])); | 224 | sizeof(link->wr_tx_bufs[pend->idx])); |
| 223 | test_and_clear_bit(pend->idx, link->wr_tx_mask); | 225 | test_and_clear_bit(idx, link->wr_tx_mask); |
| 224 | return 1; | 226 | return 1; |
| 225 | } | 227 | } |
| 226 | 228 | ||
