diff options
125 files changed, 1118 insertions, 741 deletions
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt index 2df5894353d6..cd7303d7fa25 100644 --- a/Documentation/networking/rxrpc.txt +++ b/Documentation/networking/rxrpc.txt | |||
@@ -1009,16 +1009,18 @@ The kernel interface functions are as follows: | |||
1009 | 1009 | ||
1010 | (*) Check call still alive. | 1010 | (*) Check call still alive. |
1011 | 1011 | ||
1012 | u32 rxrpc_kernel_check_life(struct socket *sock, | 1012 | bool rxrpc_kernel_check_life(struct socket *sock, |
1013 | struct rxrpc_call *call); | 1013 | struct rxrpc_call *call, |
1014 | u32 *_life); | ||
1014 | void rxrpc_kernel_probe_life(struct socket *sock, | 1015 | void rxrpc_kernel_probe_life(struct socket *sock, |
1015 | struct rxrpc_call *call); | 1016 | struct rxrpc_call *call); |
1016 | 1017 | ||
1017 | The first function returns a number that is updated when ACKs are received | 1018 | The first function passes back in *_life a number that is updated when |
1018 | from the peer (notably including PING RESPONSE ACKs which we can elicit by | 1019 | ACKs are received from the peer (notably including PING RESPONSE ACKs |
1019 | sending PING ACKs to see if the call still exists on the server). The | 1020 | which we can elicit by sending PING ACKs to see if the call still exists |
1020 | caller should compare the numbers of two calls to see if the call is still | 1021 | on the server). The caller should compare the numbers of two calls to see |
1021 | alive after waiting for a suitable interval. | 1022 | if the call is still alive after waiting for a suitable interval. It also |
1023 | returns true as long as the call hasn't yet reached the completed state. | ||
1022 | 1024 | ||
1023 | This allows the caller to work out if the server is still contactable and | 1025 | This allows the caller to work out if the server is still contactable and |
1024 | if the call is still alive on the server while waiting for the server to | 1026 | if the call is still alive on the server while waiting for the server to |
diff --git a/MAINTAINERS b/MAINTAINERS index 3671fdea5010..d18b4d62cc21 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -10145,7 +10145,7 @@ F: drivers/spi/spi-at91-usart.c | |||
10145 | F: Documentation/devicetree/bindings/mfd/atmel-usart.txt | 10145 | F: Documentation/devicetree/bindings/mfd/atmel-usart.txt |
10146 | 10146 | ||
10147 | MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER | 10147 | MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER |
10148 | M: Woojung Huh <Woojung.Huh@microchip.com> | 10148 | M: Woojung Huh <woojung.huh@microchip.com> |
10149 | M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com> | 10149 | M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com> |
10150 | L: netdev@vger.kernel.org | 10150 | L: netdev@vger.kernel.org |
10151 | S: Maintained | 10151 | S: Maintained |
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index 4ab8b1b6608f..a14e35d40538 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c | |||
@@ -710,10 +710,10 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) | |||
710 | struct sock *sk = sock->sk; | 710 | struct sock *sk = sock->sk; |
711 | int err = 0; | 711 | int err = 0; |
712 | 712 | ||
713 | if (!maddr || maddr->family != AF_ISDN) | 713 | if (addr_len < sizeof(struct sockaddr_mISDN)) |
714 | return -EINVAL; | 714 | return -EINVAL; |
715 | 715 | ||
716 | if (addr_len < sizeof(struct sockaddr_mISDN)) | 716 | if (!maddr || maddr->family != AF_ISDN) |
717 | return -EINVAL; | 717 | return -EINVAL; |
718 | 718 | ||
719 | lock_sock(sk); | 719 | lock_sock(sk); |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b59708c35faf..ee610721098e 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -3213,8 +3213,12 @@ static int bond_netdev_event(struct notifier_block *this, | |||
3213 | return NOTIFY_DONE; | 3213 | return NOTIFY_DONE; |
3214 | 3214 | ||
3215 | if (event_dev->flags & IFF_MASTER) { | 3215 | if (event_dev->flags & IFF_MASTER) { |
3216 | int ret; | ||
3217 | |||
3216 | netdev_dbg(event_dev, "IFF_MASTER\n"); | 3218 | netdev_dbg(event_dev, "IFF_MASTER\n"); |
3217 | return bond_master_netdev_event(event, event_dev); | 3219 | ret = bond_master_netdev_event(event, event_dev); |
3220 | if (ret != NOTIFY_DONE) | ||
3221 | return ret; | ||
3218 | } | 3222 | } |
3219 | 3223 | ||
3220 | if (event_dev->flags & IFF_SLAVE) { | 3224 | if (event_dev->flags & IFF_SLAVE) { |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index a9bdc21873d3..10ff37d6dc78 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | |||
@@ -957,7 +957,7 @@ int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) | |||
957 | bnx2x_sample_bulletin(bp); | 957 | bnx2x_sample_bulletin(bp); |
958 | 958 | ||
959 | if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) { | 959 | if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) { |
960 | BNX2X_ERR("Hypervisor will dicline the request, avoiding\n"); | 960 | BNX2X_ERR("Hypervisor will decline the request, avoiding\n"); |
961 | rc = -EINVAL; | 961 | rc = -EINVAL; |
962 | goto out; | 962 | goto out; |
963 | } | 963 | } |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 28eac9056211..c032bef1b776 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c | |||
@@ -32,6 +32,13 @@ | |||
32 | #define DRV_NAME "nicvf" | 32 | #define DRV_NAME "nicvf" |
33 | #define DRV_VERSION "1.0" | 33 | #define DRV_VERSION "1.0" |
34 | 34 | ||
35 | /* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs | ||
36 | * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed | ||
37 | * this value, keeping headroom for the 14 byte Ethernet header and two | ||
38 | * VLAN tags (for QinQ) | ||
39 | */ | ||
40 | #define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2) | ||
41 | |||
35 | /* Supported devices */ | 42 | /* Supported devices */ |
36 | static const struct pci_device_id nicvf_id_table[] = { | 43 | static const struct pci_device_id nicvf_id_table[] = { |
37 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, | 44 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, |
@@ -1582,6 +1589,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) | |||
1582 | struct nicvf *nic = netdev_priv(netdev); | 1589 | struct nicvf *nic = netdev_priv(netdev); |
1583 | int orig_mtu = netdev->mtu; | 1590 | int orig_mtu = netdev->mtu; |
1584 | 1591 | ||
1592 | /* For now just support only the usual MTU sized frames, | ||
1593 | * plus some headroom for VLAN, QinQ. | ||
1594 | */ | ||
1595 | if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) { | ||
1596 | netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", | ||
1597 | netdev->mtu); | ||
1598 | return -EINVAL; | ||
1599 | } | ||
1600 | |||
1585 | netdev->mtu = new_mtu; | 1601 | netdev->mtu = new_mtu; |
1586 | 1602 | ||
1587 | if (!netif_running(netdev)) | 1603 | if (!netif_running(netdev)) |
@@ -1830,8 +1846,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) | |||
1830 | bool bpf_attached = false; | 1846 | bool bpf_attached = false; |
1831 | int ret = 0; | 1847 | int ret = 0; |
1832 | 1848 | ||
1833 | /* For now just support only the usual MTU sized frames */ | 1849 | /* For now just support only the usual MTU sized frames, |
1834 | if (prog && (dev->mtu > 1500)) { | 1850 | * plus some headroom for VLAN, QinQ. |
1851 | */ | ||
1852 | if (prog && dev->mtu > MAX_XDP_MTU) { | ||
1835 | netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", | 1853 | netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", |
1836 | dev->mtu); | 1854 | dev->mtu); |
1837 | return -EOPNOTSUPP; | 1855 | return -EOPNOTSUPP; |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 697c2427f2b7..a96ad20ee484 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -1840,13 +1840,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) | |||
1840 | int ret; | 1840 | int ret; |
1841 | 1841 | ||
1842 | if (enable) { | 1842 | if (enable) { |
1843 | ret = clk_prepare_enable(fep->clk_ahb); | ||
1844 | if (ret) | ||
1845 | return ret; | ||
1846 | |||
1847 | ret = clk_prepare_enable(fep->clk_enet_out); | 1843 | ret = clk_prepare_enable(fep->clk_enet_out); |
1848 | if (ret) | 1844 | if (ret) |
1849 | goto failed_clk_enet_out; | 1845 | return ret; |
1850 | 1846 | ||
1851 | if (fep->clk_ptp) { | 1847 | if (fep->clk_ptp) { |
1852 | mutex_lock(&fep->ptp_clk_mutex); | 1848 | mutex_lock(&fep->ptp_clk_mutex); |
@@ -1866,7 +1862,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) | |||
1866 | 1862 | ||
1867 | phy_reset_after_clk_enable(ndev->phydev); | 1863 | phy_reset_after_clk_enable(ndev->phydev); |
1868 | } else { | 1864 | } else { |
1869 | clk_disable_unprepare(fep->clk_ahb); | ||
1870 | clk_disable_unprepare(fep->clk_enet_out); | 1865 | clk_disable_unprepare(fep->clk_enet_out); |
1871 | if (fep->clk_ptp) { | 1866 | if (fep->clk_ptp) { |
1872 | mutex_lock(&fep->ptp_clk_mutex); | 1867 | mutex_lock(&fep->ptp_clk_mutex); |
@@ -1885,8 +1880,6 @@ failed_clk_ref: | |||
1885 | failed_clk_ptp: | 1880 | failed_clk_ptp: |
1886 | if (fep->clk_enet_out) | 1881 | if (fep->clk_enet_out) |
1887 | clk_disable_unprepare(fep->clk_enet_out); | 1882 | clk_disable_unprepare(fep->clk_enet_out); |
1888 | failed_clk_enet_out: | ||
1889 | clk_disable_unprepare(fep->clk_ahb); | ||
1890 | 1883 | ||
1891 | return ret; | 1884 | return ret; |
1892 | } | 1885 | } |
@@ -3470,6 +3463,9 @@ fec_probe(struct platform_device *pdev) | |||
3470 | ret = clk_prepare_enable(fep->clk_ipg); | 3463 | ret = clk_prepare_enable(fep->clk_ipg); |
3471 | if (ret) | 3464 | if (ret) |
3472 | goto failed_clk_ipg; | 3465 | goto failed_clk_ipg; |
3466 | ret = clk_prepare_enable(fep->clk_ahb); | ||
3467 | if (ret) | ||
3468 | goto failed_clk_ahb; | ||
3473 | 3469 | ||
3474 | fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); | 3470 | fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); |
3475 | if (!IS_ERR(fep->reg_phy)) { | 3471 | if (!IS_ERR(fep->reg_phy)) { |
@@ -3563,6 +3559,9 @@ failed_reset: | |||
3563 | pm_runtime_put(&pdev->dev); | 3559 | pm_runtime_put(&pdev->dev); |
3564 | pm_runtime_disable(&pdev->dev); | 3560 | pm_runtime_disable(&pdev->dev); |
3565 | failed_regulator: | 3561 | failed_regulator: |
3562 | clk_disable_unprepare(fep->clk_ahb); | ||
3563 | failed_clk_ahb: | ||
3564 | clk_disable_unprepare(fep->clk_ipg); | ||
3566 | failed_clk_ipg: | 3565 | failed_clk_ipg: |
3567 | fec_enet_clk_enable(ndev, false); | 3566 | fec_enet_clk_enable(ndev, false); |
3568 | failed_clk: | 3567 | failed_clk: |
@@ -3686,6 +3685,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev) | |||
3686 | struct net_device *ndev = dev_get_drvdata(dev); | 3685 | struct net_device *ndev = dev_get_drvdata(dev); |
3687 | struct fec_enet_private *fep = netdev_priv(ndev); | 3686 | struct fec_enet_private *fep = netdev_priv(ndev); |
3688 | 3687 | ||
3688 | clk_disable_unprepare(fep->clk_ahb); | ||
3689 | clk_disable_unprepare(fep->clk_ipg); | 3689 | clk_disable_unprepare(fep->clk_ipg); |
3690 | 3690 | ||
3691 | return 0; | 3691 | return 0; |
@@ -3695,8 +3695,20 @@ static int __maybe_unused fec_runtime_resume(struct device *dev) | |||
3695 | { | 3695 | { |
3696 | struct net_device *ndev = dev_get_drvdata(dev); | 3696 | struct net_device *ndev = dev_get_drvdata(dev); |
3697 | struct fec_enet_private *fep = netdev_priv(ndev); | 3697 | struct fec_enet_private *fep = netdev_priv(ndev); |
3698 | int ret; | ||
3698 | 3699 | ||
3699 | return clk_prepare_enable(fep->clk_ipg); | 3700 | ret = clk_prepare_enable(fep->clk_ahb); |
3701 | if (ret) | ||
3702 | return ret; | ||
3703 | ret = clk_prepare_enable(fep->clk_ipg); | ||
3704 | if (ret) | ||
3705 | goto failed_clk_ipg; | ||
3706 | |||
3707 | return 0; | ||
3708 | |||
3709 | failed_clk_ipg: | ||
3710 | clk_disable_unprepare(fep->clk_ahb); | ||
3711 | return ret; | ||
3700 | } | 3712 | } |
3701 | 3713 | ||
3702 | static const struct dev_pm_ops fec_pm_ops = { | 3714 | static const struct dev_pm_ops fec_pm_ops = { |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 51cfe95f3e24..3dfb2d131eb7 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
@@ -3762,6 +3762,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) | |||
3762 | { | 3762 | { |
3763 | struct device *dev = &adapter->vdev->dev; | 3763 | struct device *dev = &adapter->vdev->dev; |
3764 | struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; | 3764 | struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; |
3765 | netdev_features_t old_hw_features = 0; | ||
3765 | union ibmvnic_crq crq; | 3766 | union ibmvnic_crq crq; |
3766 | int i; | 3767 | int i; |
3767 | 3768 | ||
@@ -3837,24 +3838,41 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) | |||
3837 | adapter->ip_offload_ctrl.large_rx_ipv4 = 0; | 3838 | adapter->ip_offload_ctrl.large_rx_ipv4 = 0; |
3838 | adapter->ip_offload_ctrl.large_rx_ipv6 = 0; | 3839 | adapter->ip_offload_ctrl.large_rx_ipv6 = 0; |
3839 | 3840 | ||
3840 | adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO; | 3841 | if (adapter->state != VNIC_PROBING) { |
3842 | old_hw_features = adapter->netdev->hw_features; | ||
3843 | adapter->netdev->hw_features = 0; | ||
3844 | } | ||
3845 | |||
3846 | adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; | ||
3841 | 3847 | ||
3842 | if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) | 3848 | if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) |
3843 | adapter->netdev->features |= NETIF_F_IP_CSUM; | 3849 | adapter->netdev->hw_features |= NETIF_F_IP_CSUM; |
3844 | 3850 | ||
3845 | if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) | 3851 | if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) |
3846 | adapter->netdev->features |= NETIF_F_IPV6_CSUM; | 3852 | adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM; |
3847 | 3853 | ||
3848 | if ((adapter->netdev->features & | 3854 | if ((adapter->netdev->features & |
3849 | (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) | 3855 | (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) |
3850 | adapter->netdev->features |= NETIF_F_RXCSUM; | 3856 | adapter->netdev->hw_features |= NETIF_F_RXCSUM; |
3851 | 3857 | ||
3852 | if (buf->large_tx_ipv4) | 3858 | if (buf->large_tx_ipv4) |
3853 | adapter->netdev->features |= NETIF_F_TSO; | 3859 | adapter->netdev->hw_features |= NETIF_F_TSO; |
3854 | if (buf->large_tx_ipv6) | 3860 | if (buf->large_tx_ipv6) |
3855 | adapter->netdev->features |= NETIF_F_TSO6; | 3861 | adapter->netdev->hw_features |= NETIF_F_TSO6; |
3856 | 3862 | ||
3857 | adapter->netdev->hw_features |= adapter->netdev->features; | 3863 | if (adapter->state == VNIC_PROBING) { |
3864 | adapter->netdev->features |= adapter->netdev->hw_features; | ||
3865 | } else if (old_hw_features != adapter->netdev->hw_features) { | ||
3866 | netdev_features_t tmp = 0; | ||
3867 | |||
3868 | /* disable features no longer supported */ | ||
3869 | adapter->netdev->features &= adapter->netdev->hw_features; | ||
3870 | /* turn on features now supported if previously enabled */ | ||
3871 | tmp = (old_hw_features ^ adapter->netdev->hw_features) & | ||
3872 | adapter->netdev->hw_features; | ||
3873 | adapter->netdev->features |= | ||
3874 | tmp & adapter->netdev->wanted_features; | ||
3875 | } | ||
3858 | 3876 | ||
3859 | memset(&crq, 0, sizeof(crq)); | 3877 | memset(&crq, 0, sizeof(crq)); |
3860 | crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; | 3878 | crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 71c65cc17904..d3eaf2ceaa39 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
@@ -858,6 +858,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs); | |||
858 | * switching channels | 858 | * switching channels |
859 | */ | 859 | */ |
860 | typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv); | 860 | typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv); |
861 | int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv); | ||
861 | int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, | 862 | int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, |
862 | struct mlx5e_channels *new_chs, | 863 | struct mlx5e_channels *new_chs, |
863 | mlx5e_fp_hw_modify hw_modify); | 864 | mlx5e_fp_hw_modify hw_modify); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 9d38e62cdf24..476dd97f7f2f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c | |||
@@ -186,12 +186,17 @@ static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx) | |||
186 | 186 | ||
187 | static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv) | 187 | static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv) |
188 | { | 188 | { |
189 | int err; | 189 | int err = 0; |
190 | 190 | ||
191 | rtnl_lock(); | 191 | rtnl_lock(); |
192 | mutex_lock(&priv->state_lock); | 192 | mutex_lock(&priv->state_lock); |
193 | mlx5e_close_locked(priv->netdev); | 193 | |
194 | err = mlx5e_open_locked(priv->netdev); | 194 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) |
195 | goto out; | ||
196 | |||
197 | err = mlx5e_safe_reopen_channels(priv); | ||
198 | |||
199 | out: | ||
195 | mutex_unlock(&priv->state_lock); | 200 | mutex_unlock(&priv->state_lock); |
196 | rtnl_unlock(); | 201 | rtnl_unlock(); |
197 | 202 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index fa2a3c444cdc..eec07b34b4ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c | |||
@@ -39,6 +39,10 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv, | |||
39 | return -EOPNOTSUPP; | 39 | return -EOPNOTSUPP; |
40 | } | 40 | } |
41 | 41 | ||
42 | if (!(mlx5e_eswitch_rep(*out_dev) && | ||
43 | mlx5e_is_uplink_rep(netdev_priv(*out_dev)))) | ||
44 | return -EOPNOTSUPP; | ||
45 | |||
42 | return 0; | 46 | return 0; |
43 | } | 47 | } |
44 | 48 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 5efce4a3ff79..76a3d01a489e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
@@ -1768,7 +1768,8 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) | |||
1768 | struct mlx5e_channel *c; | 1768 | struct mlx5e_channel *c; |
1769 | int i; | 1769 | int i; |
1770 | 1770 | ||
1771 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) | 1771 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || |
1772 | priv->channels.params.xdp_prog) | ||
1772 | return 0; | 1773 | return 0; |
1773 | 1774 | ||
1774 | for (i = 0; i < channels->num; i++) { | 1775 | for (i = 0; i < channels->num; i++) { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b5fdbd3190d9..f7eb521db580 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
@@ -951,7 +951,11 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, | |||
951 | if (params->rx_dim_enabled) | 951 | if (params->rx_dim_enabled) |
952 | __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); | 952 | __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); |
953 | 953 | ||
954 | if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE)) | 954 | /* We disable csum_complete when XDP is enabled since |
955 | * XDP programs might manipulate packets which will render | ||
956 | * skb->checksum incorrect. | ||
957 | */ | ||
958 | if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp) | ||
955 | __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); | 959 | __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); |
956 | 960 | ||
957 | return 0; | 961 | return 0; |
@@ -2937,6 +2941,14 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, | |||
2937 | return 0; | 2941 | return 0; |
2938 | } | 2942 | } |
2939 | 2943 | ||
2944 | int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv) | ||
2945 | { | ||
2946 | struct mlx5e_channels new_channels = {}; | ||
2947 | |||
2948 | new_channels.params = priv->channels.params; | ||
2949 | return mlx5e_safe_switch_channels(priv, &new_channels, NULL); | ||
2950 | } | ||
2951 | |||
2940 | void mlx5e_timestamp_init(struct mlx5e_priv *priv) | 2952 | void mlx5e_timestamp_init(struct mlx5e_priv *priv) |
2941 | { | 2953 | { |
2942 | priv->tstamp.tx_type = HWTSTAMP_TX_OFF; | 2954 | priv->tstamp.tx_type = HWTSTAMP_TX_OFF; |
@@ -4161,11 +4173,10 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) | |||
4161 | if (!report_failed) | 4173 | if (!report_failed) |
4162 | goto unlock; | 4174 | goto unlock; |
4163 | 4175 | ||
4164 | mlx5e_close_locked(priv->netdev); | 4176 | err = mlx5e_safe_reopen_channels(priv); |
4165 | err = mlx5e_open_locked(priv->netdev); | ||
4166 | if (err) | 4177 | if (err) |
4167 | netdev_err(priv->netdev, | 4178 | netdev_err(priv->netdev, |
4168 | "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n", | 4179 | "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n", |
4169 | err); | 4180 | err); |
4170 | 4181 | ||
4171 | unlock: | 4182 | unlock: |
@@ -4553,7 +4564,7 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, | |||
4553 | { | 4564 | { |
4554 | enum mlx5e_traffic_types tt; | 4565 | enum mlx5e_traffic_types tt; |
4555 | 4566 | ||
4556 | rss_params->hfunc = ETH_RSS_HASH_XOR; | 4567 | rss_params->hfunc = ETH_RSS_HASH_TOP; |
4557 | netdev_rss_key_fill(rss_params->toeplitz_hash_key, | 4568 | netdev_rss_key_fill(rss_params->toeplitz_hash_key, |
4558 | sizeof(rss_params->toeplitz_hash_key)); | 4569 | sizeof(rss_params->toeplitz_hash_key)); |
4559 | mlx5e_build_default_indir_rqt(rss_params->indirection_rqt, | 4570 | mlx5e_build_default_indir_rqt(rss_params->indirection_rqt, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 3dde5c7e0739..c3b3002ff62f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
@@ -692,7 +692,14 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth, | |||
692 | { | 692 | { |
693 | *proto = ((struct ethhdr *)skb->data)->h_proto; | 693 | *proto = ((struct ethhdr *)skb->data)->h_proto; |
694 | *proto = __vlan_get_protocol(skb, *proto, network_depth); | 694 | *proto = __vlan_get_protocol(skb, *proto, network_depth); |
695 | return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6)); | 695 | |
696 | if (*proto == htons(ETH_P_IP)) | ||
697 | return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr)); | ||
698 | |||
699 | if (*proto == htons(ETH_P_IPV6)) | ||
700 | return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr)); | ||
701 | |||
702 | return false; | ||
696 | } | 703 | } |
697 | 704 | ||
698 | static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) | 705 | static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) |
@@ -712,17 +719,6 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) | |||
712 | rq->stats->ecn_mark += !!rc; | 719 | rq->stats->ecn_mark += !!rc; |
713 | } | 720 | } |
714 | 721 | ||
715 | static u32 mlx5e_get_fcs(const struct sk_buff *skb) | ||
716 | { | ||
717 | const void *fcs_bytes; | ||
718 | u32 _fcs_bytes; | ||
719 | |||
720 | fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN, | ||
721 | ETH_FCS_LEN, &_fcs_bytes); | ||
722 | |||
723 | return __get_unaligned_cpu32(fcs_bytes); | ||
724 | } | ||
725 | |||
726 | static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) | 722 | static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) |
727 | { | 723 | { |
728 | void *ip_p = skb->data + network_depth; | 724 | void *ip_p = skb->data + network_depth; |
@@ -733,6 +729,68 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) | |||
733 | 729 | ||
734 | #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) | 730 | #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) |
735 | 731 | ||
732 | #define MAX_PADDING 8 | ||
733 | |||
734 | static void | ||
735 | tail_padding_csum_slow(struct sk_buff *skb, int offset, int len, | ||
736 | struct mlx5e_rq_stats *stats) | ||
737 | { | ||
738 | stats->csum_complete_tail_slow++; | ||
739 | skb->csum = csum_block_add(skb->csum, | ||
740 | skb_checksum(skb, offset, len, 0), | ||
741 | offset); | ||
742 | } | ||
743 | |||
744 | static void | ||
745 | tail_padding_csum(struct sk_buff *skb, int offset, | ||
746 | struct mlx5e_rq_stats *stats) | ||
747 | { | ||
748 | u8 tail_padding[MAX_PADDING]; | ||
749 | int len = skb->len - offset; | ||
750 | void *tail; | ||
751 | |||
752 | if (unlikely(len > MAX_PADDING)) { | ||
753 | tail_padding_csum_slow(skb, offset, len, stats); | ||
754 | return; | ||
755 | } | ||
756 | |||
757 | tail = skb_header_pointer(skb, offset, len, tail_padding); | ||
758 | if (unlikely(!tail)) { | ||
759 | tail_padding_csum_slow(skb, offset, len, stats); | ||
760 | return; | ||
761 | } | ||
762 | |||
763 | stats->csum_complete_tail++; | ||
764 | skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset); | ||
765 | } | ||
766 | |||
767 | static void | ||
768 | mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto, | ||
769 | struct mlx5e_rq_stats *stats) | ||
770 | { | ||
771 | struct ipv6hdr *ip6; | ||
772 | struct iphdr *ip4; | ||
773 | int pkt_len; | ||
774 | |||
775 | switch (proto) { | ||
776 | case htons(ETH_P_IP): | ||
777 | ip4 = (struct iphdr *)(skb->data + network_depth); | ||
778 | pkt_len = network_depth + ntohs(ip4->tot_len); | ||
779 | break; | ||
780 | case htons(ETH_P_IPV6): | ||
781 | ip6 = (struct ipv6hdr *)(skb->data + network_depth); | ||
782 | pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len); | ||
783 | break; | ||
784 | default: | ||
785 | return; | ||
786 | } | ||
787 | |||
788 | if (likely(pkt_len >= skb->len)) | ||
789 | return; | ||
790 | |||
791 | tail_padding_csum(skb, pkt_len, stats); | ||
792 | } | ||
793 | |||
736 | static inline void mlx5e_handle_csum(struct net_device *netdev, | 794 | static inline void mlx5e_handle_csum(struct net_device *netdev, |
737 | struct mlx5_cqe64 *cqe, | 795 | struct mlx5_cqe64 *cqe, |
738 | struct mlx5e_rq *rq, | 796 | struct mlx5e_rq *rq, |
@@ -752,7 +810,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, | |||
752 | return; | 810 | return; |
753 | } | 811 | } |
754 | 812 | ||
755 | if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))) | 813 | /* True when explicitly set via priv flag, or XDP prog is loaded */ |
814 | if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)) | ||
756 | goto csum_unnecessary; | 815 | goto csum_unnecessary; |
757 | 816 | ||
758 | /* CQE csum doesn't cover padding octets in short ethernet | 817 | /* CQE csum doesn't cover padding octets in short ethernet |
@@ -780,18 +839,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, | |||
780 | skb->csum = csum_partial(skb->data + ETH_HLEN, | 839 | skb->csum = csum_partial(skb->data + ETH_HLEN, |
781 | network_depth - ETH_HLEN, | 840 | network_depth - ETH_HLEN, |
782 | skb->csum); | 841 | skb->csum); |
783 | if (unlikely(netdev->features & NETIF_F_RXFCS)) | 842 | |
784 | skb->csum = csum_block_add(skb->csum, | 843 | mlx5e_skb_padding_csum(skb, network_depth, proto, stats); |
785 | (__force __wsum)mlx5e_get_fcs(skb), | ||
786 | skb->len - ETH_FCS_LEN); | ||
787 | stats->csum_complete++; | 844 | stats->csum_complete++; |
788 | return; | 845 | return; |
789 | } | 846 | } |
790 | 847 | ||
791 | csum_unnecessary: | 848 | csum_unnecessary: |
792 | if (likely((cqe->hds_ip_ext & CQE_L3_OK) && | 849 | if (likely((cqe->hds_ip_ext & CQE_L3_OK) && |
793 | ((cqe->hds_ip_ext & CQE_L4_OK) || | 850 | (cqe->hds_ip_ext & CQE_L4_OK))) { |
794 | (get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) { | ||
795 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 851 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
796 | if (cqe_is_tunneled(cqe)) { | 852 | if (cqe_is_tunneled(cqe)) { |
797 | skb->csum_level = 1; | 853 | skb->csum_level = 1; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 1a78e05cbba8..b75aa8b8bf04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | |||
@@ -59,6 +59,8 @@ static const struct counter_desc sw_stats_desc[] = { | |||
59 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, | 59 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, |
60 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, | 60 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, |
61 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, | 61 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, |
62 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) }, | ||
63 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) }, | ||
62 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, | 64 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, |
63 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, | 65 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, |
64 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, | 66 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, |
@@ -151,6 +153,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) | |||
151 | s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; | 153 | s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; |
152 | s->rx_csum_none += rq_stats->csum_none; | 154 | s->rx_csum_none += rq_stats->csum_none; |
153 | s->rx_csum_complete += rq_stats->csum_complete; | 155 | s->rx_csum_complete += rq_stats->csum_complete; |
156 | s->rx_csum_complete_tail += rq_stats->csum_complete_tail; | ||
157 | s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow; | ||
154 | s->rx_csum_unnecessary += rq_stats->csum_unnecessary; | 158 | s->rx_csum_unnecessary += rq_stats->csum_unnecessary; |
155 | s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; | 159 | s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; |
156 | s->rx_xdp_drop += rq_stats->xdp_drop; | 160 | s->rx_xdp_drop += rq_stats->xdp_drop; |
@@ -1190,6 +1194,8 @@ static const struct counter_desc rq_stats_desc[] = { | |||
1190 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, | 1194 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, |
1191 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, | 1195 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, |
1192 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, | 1196 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, |
1197 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) }, | ||
1198 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) }, | ||
1193 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, | 1199 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, |
1194 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, | 1200 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, |
1195 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, | 1201 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 4640d4f986f8..16c3b785f282 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | |||
@@ -71,6 +71,8 @@ struct mlx5e_sw_stats { | |||
71 | u64 rx_csum_unnecessary; | 71 | u64 rx_csum_unnecessary; |
72 | u64 rx_csum_none; | 72 | u64 rx_csum_none; |
73 | u64 rx_csum_complete; | 73 | u64 rx_csum_complete; |
74 | u64 rx_csum_complete_tail; | ||
75 | u64 rx_csum_complete_tail_slow; | ||
74 | u64 rx_csum_unnecessary_inner; | 76 | u64 rx_csum_unnecessary_inner; |
75 | u64 rx_xdp_drop; | 77 | u64 rx_xdp_drop; |
76 | u64 rx_xdp_redirect; | 78 | u64 rx_xdp_redirect; |
@@ -181,6 +183,8 @@ struct mlx5e_rq_stats { | |||
181 | u64 packets; | 183 | u64 packets; |
182 | u64 bytes; | 184 | u64 bytes; |
183 | u64 csum_complete; | 185 | u64 csum_complete; |
186 | u64 csum_complete_tail; | ||
187 | u64 csum_complete_tail_slow; | ||
184 | u64 csum_unnecessary; | 188 | u64 csum_unnecessary; |
185 | u64 csum_unnecessary_inner; | 189 | u64 csum_unnecessary_inner; |
186 | u64 csum_none; | 190 | u64 csum_none; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c index 8de64e88c670..22a2ef111514 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c | |||
@@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, | |||
148 | return ret; | 148 | return ret; |
149 | } | 149 | } |
150 | 150 | ||
151 | static void mlx5_fpga_tls_release_swid(struct idr *idr, | 151 | static void *mlx5_fpga_tls_release_swid(struct idr *idr, |
152 | spinlock_t *idr_spinlock, u32 swid) | 152 | spinlock_t *idr_spinlock, u32 swid) |
153 | { | 153 | { |
154 | unsigned long flags; | 154 | unsigned long flags; |
155 | void *ptr; | ||
155 | 156 | ||
156 | spin_lock_irqsave(idr_spinlock, flags); | 157 | spin_lock_irqsave(idr_spinlock, flags); |
157 | idr_remove(idr, swid); | 158 | ptr = idr_remove(idr, swid); |
158 | spin_unlock_irqrestore(idr_spinlock, flags); | 159 | spin_unlock_irqrestore(idr_spinlock, flags); |
160 | return ptr; | ||
159 | } | 161 | } |
160 | 162 | ||
161 | static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, | 163 | static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, |
@@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, | |||
165 | kfree(buf); | 167 | kfree(buf); |
166 | } | 168 | } |
167 | 169 | ||
168 | struct mlx5_teardown_stream_context { | ||
169 | struct mlx5_fpga_tls_command_context cmd; | ||
170 | u32 swid; | ||
171 | }; | ||
172 | |||
173 | static void | 170 | static void |
174 | mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, | 171 | mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, |
175 | struct mlx5_fpga_device *fdev, | 172 | struct mlx5_fpga_device *fdev, |
176 | struct mlx5_fpga_tls_command_context *cmd, | 173 | struct mlx5_fpga_tls_command_context *cmd, |
177 | struct mlx5_fpga_dma_buf *resp) | 174 | struct mlx5_fpga_dma_buf *resp) |
178 | { | 175 | { |
179 | struct mlx5_teardown_stream_context *ctx = | ||
180 | container_of(cmd, struct mlx5_teardown_stream_context, cmd); | ||
181 | |||
182 | if (resp) { | 176 | if (resp) { |
183 | u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); | 177 | u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); |
184 | 178 | ||
@@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, | |||
186 | mlx5_fpga_err(fdev, | 180 | mlx5_fpga_err(fdev, |
187 | "Teardown stream failed with syndrome = %d", | 181 | "Teardown stream failed with syndrome = %d", |
188 | syndrome); | 182 | syndrome); |
189 | else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx)) | ||
190 | mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr, | ||
191 | &fdev->tls->tx_idr_spinlock, | ||
192 | ctx->swid); | ||
193 | else | ||
194 | mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr, | ||
195 | &fdev->tls->rx_idr_spinlock, | ||
196 | ctx->swid); | ||
197 | } | 183 | } |
198 | mlx5_fpga_tls_put_command_ctx(cmd); | 184 | mlx5_fpga_tls_put_command_ctx(cmd); |
199 | } | 185 | } |
@@ -217,22 +203,22 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, | |||
217 | void *cmd; | 203 | void *cmd; |
218 | int ret; | 204 | int ret; |
219 | 205 | ||
220 | rcu_read_lock(); | ||
221 | flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); | ||
222 | rcu_read_unlock(); | ||
223 | |||
224 | if (!flow) { | ||
225 | WARN_ONCE(1, "Received NULL pointer for handle\n"); | ||
226 | return -EINVAL; | ||
227 | } | ||
228 | |||
229 | buf = kzalloc(size, GFP_ATOMIC); | 206 | buf = kzalloc(size, GFP_ATOMIC); |
230 | if (!buf) | 207 | if (!buf) |
231 | return -ENOMEM; | 208 | return -ENOMEM; |
232 | 209 | ||
233 | cmd = (buf + 1); | 210 | cmd = (buf + 1); |
234 | 211 | ||
212 | rcu_read_lock(); | ||
213 | flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); | ||
214 | if (unlikely(!flow)) { | ||
215 | rcu_read_unlock(); | ||
216 | WARN_ONCE(1, "Received NULL pointer for handle\n"); | ||
217 | kfree(buf); | ||
218 | return -EINVAL; | ||
219 | } | ||
235 | mlx5_fpga_tls_flow_to_cmd(flow, cmd); | 220 | mlx5_fpga_tls_flow_to_cmd(flow, cmd); |
221 | rcu_read_unlock(); | ||
236 | 222 | ||
237 | MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); | 223 | MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); |
238 | MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn)); | 224 | MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn)); |
@@ -253,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, | |||
253 | static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, | 239 | static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, |
254 | void *flow, u32 swid, gfp_t flags) | 240 | void *flow, u32 swid, gfp_t flags) |
255 | { | 241 | { |
256 | struct mlx5_teardown_stream_context *ctx; | 242 | struct mlx5_fpga_tls_command_context *ctx; |
257 | struct mlx5_fpga_dma_buf *buf; | 243 | struct mlx5_fpga_dma_buf *buf; |
258 | void *cmd; | 244 | void *cmd; |
259 | 245 | ||
@@ -261,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, | |||
261 | if (!ctx) | 247 | if (!ctx) |
262 | return; | 248 | return; |
263 | 249 | ||
264 | buf = &ctx->cmd.buf; | 250 | buf = &ctx->buf; |
265 | cmd = (ctx + 1); | 251 | cmd = (ctx + 1); |
266 | MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM); | 252 | MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM); |
267 | MLX5_SET(tls_cmd, cmd, swid, swid); | 253 | MLX5_SET(tls_cmd, cmd, swid, swid); |
@@ -272,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, | |||
272 | buf->sg[0].data = cmd; | 258 | buf->sg[0].data = cmd; |
273 | buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; | 259 | buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; |
274 | 260 | ||
275 | ctx->swid = swid; | 261 | mlx5_fpga_tls_cmd_send(mdev->fpga, ctx, |
276 | mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd, | ||
277 | mlx5_fpga_tls_teardown_completion); | 262 | mlx5_fpga_tls_teardown_completion); |
278 | } | 263 | } |
279 | 264 | ||
@@ -283,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, | |||
283 | struct mlx5_fpga_tls *tls = mdev->fpga->tls; | 268 | struct mlx5_fpga_tls *tls = mdev->fpga->tls; |
284 | void *flow; | 269 | void *flow; |
285 | 270 | ||
286 | rcu_read_lock(); | ||
287 | if (direction_sx) | 271 | if (direction_sx) |
288 | flow = idr_find(&tls->tx_idr, swid); | 272 | flow = mlx5_fpga_tls_release_swid(&tls->tx_idr, |
273 | &tls->tx_idr_spinlock, | ||
274 | swid); | ||
289 | else | 275 | else |
290 | flow = idr_find(&tls->rx_idr, swid); | 276 | flow = mlx5_fpga_tls_release_swid(&tls->rx_idr, |
291 | 277 | &tls->rx_idr_spinlock, | |
292 | rcu_read_unlock(); | 278 | swid); |
293 | 279 | ||
294 | if (!flow) { | 280 | if (!flow) { |
295 | mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n", | 281 | mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n", |
@@ -297,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, | |||
297 | return; | 283 | return; |
298 | } | 284 | } |
299 | 285 | ||
286 | synchronize_rcu(); /* before kfree(flow) */ | ||
300 | mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags); | 287 | mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags); |
301 | } | 288 | } |
302 | 289 | ||
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index d23d53c0e284..f26a4ca29363 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c | |||
@@ -568,7 +568,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) | |||
568 | if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) | 568 | if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) |
569 | return 0; | 569 | return 0; |
570 | 570 | ||
571 | emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0); | 571 | emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0); |
572 | if (!emad_wq) | 572 | if (!emad_wq) |
573 | return -ENOMEM; | 573 | return -ENOMEM; |
574 | mlxsw_core->emad_wq = emad_wq; | 574 | mlxsw_core->emad_wq = emad_wq; |
@@ -1958,10 +1958,10 @@ static int __init mlxsw_core_module_init(void) | |||
1958 | { | 1958 | { |
1959 | int err; | 1959 | int err; |
1960 | 1960 | ||
1961 | mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0); | 1961 | mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0); |
1962 | if (!mlxsw_wq) | 1962 | if (!mlxsw_wq) |
1963 | return -ENOMEM; | 1963 | return -ENOMEM; |
1964 | mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM, | 1964 | mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0, |
1965 | mlxsw_core_driver_name); | 1965 | mlxsw_core_driver_name); |
1966 | if (!mlxsw_owq) { | 1966 | if (!mlxsw_owq) { |
1967 | err = -ENOMEM; | 1967 | err = -ENOMEM; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 9a79b5e11597..d633bef5f105 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c | |||
@@ -70,6 +70,7 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = { | |||
70 | {MLXSW_REG_SBXX_DIR_EGRESS, 1}, | 70 | {MLXSW_REG_SBXX_DIR_EGRESS, 1}, |
71 | {MLXSW_REG_SBXX_DIR_EGRESS, 2}, | 71 | {MLXSW_REG_SBXX_DIR_EGRESS, 2}, |
72 | {MLXSW_REG_SBXX_DIR_EGRESS, 3}, | 72 | {MLXSW_REG_SBXX_DIR_EGRESS, 3}, |
73 | {MLXSW_REG_SBXX_DIR_EGRESS, 15}, | ||
73 | }; | 74 | }; |
74 | 75 | ||
75 | #define MLXSW_SP_SB_ING_TC_COUNT 8 | 76 | #define MLXSW_SP_SB_ING_TC_COUNT 8 |
@@ -428,6 +429,7 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = { | |||
428 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), | 429 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), |
429 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), | 430 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), |
430 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), | 431 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), |
432 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI), | ||
431 | }; | 433 | }; |
432 | 434 | ||
433 | static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, | 435 | static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, |
@@ -517,14 +519,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = { | |||
517 | MLXSW_SP_SB_CM(0, 7, 4), | 519 | MLXSW_SP_SB_CM(0, 7, 4), |
518 | MLXSW_SP_SB_CM(0, 7, 4), | 520 | MLXSW_SP_SB_CM(0, 7, 4), |
519 | MLXSW_SP_SB_CM(0, 7, 4), | 521 | MLXSW_SP_SB_CM(0, 7, 4), |
520 | MLXSW_SP_SB_CM(0, 7, 4), | 522 | MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), |
521 | MLXSW_SP_SB_CM(0, 7, 4), | 523 | MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), |
522 | MLXSW_SP_SB_CM(0, 7, 4), | 524 | MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), |
523 | MLXSW_SP_SB_CM(0, 7, 4), | 525 | MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), |
524 | MLXSW_SP_SB_CM(0, 7, 4), | 526 | MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), |
525 | MLXSW_SP_SB_CM(0, 7, 4), | 527 | MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), |
526 | MLXSW_SP_SB_CM(0, 7, 4), | 528 | MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), |
527 | MLXSW_SP_SB_CM(0, 7, 4), | 529 | MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), |
528 | MLXSW_SP_SB_CM(1, 0xff, 4), | 530 | MLXSW_SP_SB_CM(1, 0xff, 4), |
529 | }; | 531 | }; |
530 | 532 | ||
@@ -671,6 +673,7 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = { | |||
671 | MLXSW_SP_SB_PM(0, 0), | 673 | MLXSW_SP_SB_PM(0, 0), |
672 | MLXSW_SP_SB_PM(0, 0), | 674 | MLXSW_SP_SB_PM(0, 0), |
673 | MLXSW_SP_SB_PM(0, 0), | 675 | MLXSW_SP_SB_PM(0, 0), |
676 | MLXSW_SP_SB_PM(10000, 90000), | ||
674 | }; | 677 | }; |
675 | 678 | ||
676 | static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) | 679 | static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 52fed8c7bf1e..902e766a8ed3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
@@ -6781,7 +6781,7 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp, | |||
6781 | /* A RIF is not created for macvlan netdevs. Their MAC is used to | 6781 | /* A RIF is not created for macvlan netdevs. Their MAC is used to |
6782 | * populate the FDB | 6782 | * populate the FDB |
6783 | */ | 6783 | */ |
6784 | if (netif_is_macvlan(dev)) | 6784 | if (netif_is_macvlan(dev) || netif_is_l3_master(dev)) |
6785 | return 0; | 6785 | return 0; |
6786 | 6786 | ||
6787 | for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { | 6787 | for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index f6ce386c3036..50111f228d77 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
@@ -1630,7 +1630,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1630 | u16 fid_index; | 1630 | u16 fid_index; |
1631 | int err = 0; | 1631 | int err = 0; |
1632 | 1632 | ||
1633 | if (switchdev_trans_ph_prepare(trans)) | 1633 | if (switchdev_trans_ph_commit(trans)) |
1634 | return 0; | 1634 | return 0; |
1635 | 1635 | ||
1636 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); | 1636 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); |
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index a1d0d6e42533..d715ef4fc92f 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c | |||
@@ -613,7 +613,7 @@ static int ocelot_mact_mc_add(struct ocelot_port *port, | |||
613 | struct netdev_hw_addr *hw_addr) | 613 | struct netdev_hw_addr *hw_addr) |
614 | { | 614 | { |
615 | struct ocelot *ocelot = port->ocelot; | 615 | struct ocelot *ocelot = port->ocelot; |
616 | struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_KERNEL); | 616 | struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_ATOMIC); |
617 | 617 | ||
618 | if (!ha) | 618 | if (!ha) |
619 | return -ENOMEM; | 619 | return -ENOMEM; |
@@ -959,10 +959,8 @@ static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data) | |||
959 | ETH_GSTRING_LEN); | 959 | ETH_GSTRING_LEN); |
960 | } | 960 | } |
961 | 961 | ||
962 | static void ocelot_check_stats(struct work_struct *work) | 962 | static void ocelot_update_stats(struct ocelot *ocelot) |
963 | { | 963 | { |
964 | struct delayed_work *del_work = to_delayed_work(work); | ||
965 | struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work); | ||
966 | int i, j; | 964 | int i, j; |
967 | 965 | ||
968 | mutex_lock(&ocelot->stats_lock); | 966 | mutex_lock(&ocelot->stats_lock); |
@@ -986,11 +984,19 @@ static void ocelot_check_stats(struct work_struct *work) | |||
986 | } | 984 | } |
987 | } | 985 | } |
988 | 986 | ||
989 | cancel_delayed_work(&ocelot->stats_work); | 987 | mutex_unlock(&ocelot->stats_lock); |
988 | } | ||
989 | |||
990 | static void ocelot_check_stats_work(struct work_struct *work) | ||
991 | { | ||
992 | struct delayed_work *del_work = to_delayed_work(work); | ||
993 | struct ocelot *ocelot = container_of(del_work, struct ocelot, | ||
994 | stats_work); | ||
995 | |||
996 | ocelot_update_stats(ocelot); | ||
997 | |||
990 | queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, | 998 | queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, |
991 | OCELOT_STATS_CHECK_DELAY); | 999 | OCELOT_STATS_CHECK_DELAY); |
992 | |||
993 | mutex_unlock(&ocelot->stats_lock); | ||
994 | } | 1000 | } |
995 | 1001 | ||
996 | static void ocelot_get_ethtool_stats(struct net_device *dev, | 1002 | static void ocelot_get_ethtool_stats(struct net_device *dev, |
@@ -1001,7 +1007,7 @@ static void ocelot_get_ethtool_stats(struct net_device *dev, | |||
1001 | int i; | 1007 | int i; |
1002 | 1008 | ||
1003 | /* check and update now */ | 1009 | /* check and update now */ |
1004 | ocelot_check_stats(&ocelot->stats_work.work); | 1010 | ocelot_update_stats(ocelot); |
1005 | 1011 | ||
1006 | /* Copy all counters */ | 1012 | /* Copy all counters */ |
1007 | for (i = 0; i < ocelot->num_stats; i++) | 1013 | for (i = 0; i < ocelot->num_stats; i++) |
@@ -1809,7 +1815,7 @@ int ocelot_init(struct ocelot *ocelot) | |||
1809 | ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6), | 1815 | ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6), |
1810 | ANA_CPUQ_8021_CFG, i); | 1816 | ANA_CPUQ_8021_CFG, i); |
1811 | 1817 | ||
1812 | INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats); | 1818 | INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work); |
1813 | queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, | 1819 | queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, |
1814 | OCELOT_STATS_CHECK_DELAY); | 1820 | OCELOT_STATS_CHECK_DELAY); |
1815 | return 0; | 1821 | return 0; |
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index 7cde387e5ec6..51cd57ab3d95 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c | |||
@@ -2366,6 +2366,7 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, | |||
2366 | dma_object->addr))) { | 2366 | dma_object->addr))) { |
2367 | vxge_os_dma_free(devh->pdev, memblock, | 2367 | vxge_os_dma_free(devh->pdev, memblock, |
2368 | &dma_object->acc_handle); | 2368 | &dma_object->acc_handle); |
2369 | memblock = NULL; | ||
2369 | goto exit; | 2370 | goto exit; |
2370 | } | 2371 | } |
2371 | 2372 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 43a57ec296fd..127c89b22ef0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h | |||
@@ -431,12 +431,16 @@ struct qed_qm_info { | |||
431 | u8 num_pf_rls; | 431 | u8 num_pf_rls; |
432 | }; | 432 | }; |
433 | 433 | ||
434 | #define QED_OVERFLOW_BIT 1 | ||
435 | |||
434 | struct qed_db_recovery_info { | 436 | struct qed_db_recovery_info { |
435 | struct list_head list; | 437 | struct list_head list; |
436 | 438 | ||
437 | /* Lock to protect the doorbell recovery mechanism list */ | 439 | /* Lock to protect the doorbell recovery mechanism list */ |
438 | spinlock_t lock; | 440 | spinlock_t lock; |
441 | bool dorq_attn; | ||
439 | u32 db_recovery_counter; | 442 | u32 db_recovery_counter; |
443 | unsigned long overflow; | ||
440 | }; | 444 | }; |
441 | 445 | ||
442 | struct storm_stats { | 446 | struct storm_stats { |
@@ -920,8 +924,7 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc); | |||
920 | 924 | ||
921 | /* doorbell recovery mechanism */ | 925 | /* doorbell recovery mechanism */ |
922 | void qed_db_recovery_dp(struct qed_hwfn *p_hwfn); | 926 | void qed_db_recovery_dp(struct qed_hwfn *p_hwfn); |
923 | void qed_db_recovery_execute(struct qed_hwfn *p_hwfn, | 927 | void qed_db_recovery_execute(struct qed_hwfn *p_hwfn); |
924 | enum qed_db_rec_exec db_exec); | ||
925 | bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); | 928 | bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); |
926 | 929 | ||
927 | /* Other Linux specific common definitions */ | 930 | /* Other Linux specific common definitions */ |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 9df8c4b3b54e..866cdc86a3f2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c | |||
@@ -102,11 +102,15 @@ static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn, | |||
102 | 102 | ||
103 | /* Doorbell address sanity (address within doorbell bar range) */ | 103 | /* Doorbell address sanity (address within doorbell bar range) */ |
104 | static bool qed_db_rec_sanity(struct qed_dev *cdev, | 104 | static bool qed_db_rec_sanity(struct qed_dev *cdev, |
105 | void __iomem *db_addr, void *db_data) | 105 | void __iomem *db_addr, |
106 | enum qed_db_rec_width db_width, | ||
107 | void *db_data) | ||
106 | { | 108 | { |
109 | u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64; | ||
110 | |||
107 | /* Make sure doorbell address is within the doorbell bar */ | 111 | /* Make sure doorbell address is within the doorbell bar */ |
108 | if (db_addr < cdev->doorbells || | 112 | if (db_addr < cdev->doorbells || |
109 | (u8 __iomem *)db_addr > | 113 | (u8 __iomem *)db_addr + width > |
110 | (u8 __iomem *)cdev->doorbells + cdev->db_size) { | 114 | (u8 __iomem *)cdev->doorbells + cdev->db_size) { |
111 | WARN(true, | 115 | WARN(true, |
112 | "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n", | 116 | "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n", |
@@ -159,7 +163,7 @@ int qed_db_recovery_add(struct qed_dev *cdev, | |||
159 | } | 163 | } |
160 | 164 | ||
161 | /* Sanitize doorbell address */ | 165 | /* Sanitize doorbell address */ |
162 | if (!qed_db_rec_sanity(cdev, db_addr, db_data)) | 166 | if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data)) |
163 | return -EINVAL; | 167 | return -EINVAL; |
164 | 168 | ||
165 | /* Obtain hwfn from doorbell address */ | 169 | /* Obtain hwfn from doorbell address */ |
@@ -205,10 +209,6 @@ int qed_db_recovery_del(struct qed_dev *cdev, | |||
205 | return 0; | 209 | return 0; |
206 | } | 210 | } |
207 | 211 | ||
208 | /* Sanitize doorbell address */ | ||
209 | if (!qed_db_rec_sanity(cdev, db_addr, db_data)) | ||
210 | return -EINVAL; | ||
211 | |||
212 | /* Obtain hwfn from doorbell address */ | 212 | /* Obtain hwfn from doorbell address */ |
213 | p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); | 213 | p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); |
214 | 214 | ||
@@ -300,31 +300,24 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn) | |||
300 | 300 | ||
301 | /* Ring the doorbell of a single doorbell recovery entry */ | 301 | /* Ring the doorbell of a single doorbell recovery entry */ |
302 | static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, | 302 | static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, |
303 | struct qed_db_recovery_entry *db_entry, | 303 | struct qed_db_recovery_entry *db_entry) |
304 | enum qed_db_rec_exec db_exec) | 304 | { |
305 | { | 305 | /* Print according to width */ |
306 | if (db_exec != DB_REC_ONCE) { | 306 | if (db_entry->db_width == DB_REC_WIDTH_32B) { |
307 | /* Print according to width */ | 307 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, |
308 | if (db_entry->db_width == DB_REC_WIDTH_32B) { | 308 | "ringing doorbell address %p data %x\n", |
309 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, | 309 | db_entry->db_addr, |
310 | "%s doorbell address %p data %x\n", | 310 | *(u32 *)db_entry->db_data); |
311 | db_exec == DB_REC_DRY_RUN ? | 311 | } else { |
312 | "would have rung" : "ringing", | 312 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, |
313 | db_entry->db_addr, | 313 | "ringing doorbell address %p data %llx\n", |
314 | *(u32 *)db_entry->db_data); | 314 | db_entry->db_addr, |
315 | } else { | 315 | *(u64 *)(db_entry->db_data)); |
316 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, | ||
317 | "%s doorbell address %p data %llx\n", | ||
318 | db_exec == DB_REC_DRY_RUN ? | ||
319 | "would have rung" : "ringing", | ||
320 | db_entry->db_addr, | ||
321 | *(u64 *)(db_entry->db_data)); | ||
322 | } | ||
323 | } | 316 | } |
324 | 317 | ||
325 | /* Sanity */ | 318 | /* Sanity */ |
326 | if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr, | 319 | if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr, |
327 | db_entry->db_data)) | 320 | db_entry->db_width, db_entry->db_data)) |
328 | return; | 321 | return; |
329 | 322 | ||
330 | /* Flush the write combined buffer. Since there are multiple doorbelling | 323 | /* Flush the write combined buffer. Since there are multiple doorbelling |
@@ -334,14 +327,12 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, | |||
334 | wmb(); | 327 | wmb(); |
335 | 328 | ||
336 | /* Ring the doorbell */ | 329 | /* Ring the doorbell */ |
337 | if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) { | 330 | if (db_entry->db_width == DB_REC_WIDTH_32B) |
338 | if (db_entry->db_width == DB_REC_WIDTH_32B) | 331 | DIRECT_REG_WR(db_entry->db_addr, |
339 | DIRECT_REG_WR(db_entry->db_addr, | 332 | *(u32 *)(db_entry->db_data)); |
340 | *(u32 *)(db_entry->db_data)); | 333 | else |
341 | else | 334 | DIRECT_REG_WR64(db_entry->db_addr, |
342 | DIRECT_REG_WR64(db_entry->db_addr, | 335 | *(u64 *)(db_entry->db_data)); |
343 | *(u64 *)(db_entry->db_data)); | ||
344 | } | ||
345 | 336 | ||
346 | /* Flush the write combined buffer. Next doorbell may come from a | 337 | /* Flush the write combined buffer. Next doorbell may come from a |
347 | * different entity to the same address... | 338 | * different entity to the same address... |
@@ -350,29 +341,21 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, | |||
350 | } | 341 | } |
351 | 342 | ||
352 | /* Traverse the doorbell recovery entry list and ring all the doorbells */ | 343 | /* Traverse the doorbell recovery entry list and ring all the doorbells */ |
353 | void qed_db_recovery_execute(struct qed_hwfn *p_hwfn, | 344 | void qed_db_recovery_execute(struct qed_hwfn *p_hwfn) |
354 | enum qed_db_rec_exec db_exec) | ||
355 | { | 345 | { |
356 | struct qed_db_recovery_entry *db_entry = NULL; | 346 | struct qed_db_recovery_entry *db_entry = NULL; |
357 | 347 | ||
358 | if (db_exec != DB_REC_ONCE) { | 348 | DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n", |
359 | DP_NOTICE(p_hwfn, | 349 | p_hwfn->db_recovery_info.db_recovery_counter); |
360 | "Executing doorbell recovery. Counter was %d\n", | ||
361 | p_hwfn->db_recovery_info.db_recovery_counter); | ||
362 | 350 | ||
363 | /* Track amount of times recovery was executed */ | 351 | /* Track amount of times recovery was executed */ |
364 | p_hwfn->db_recovery_info.db_recovery_counter++; | 352 | p_hwfn->db_recovery_info.db_recovery_counter++; |
365 | } | ||
366 | 353 | ||
367 | /* Protect the list */ | 354 | /* Protect the list */ |
368 | spin_lock_bh(&p_hwfn->db_recovery_info.lock); | 355 | spin_lock_bh(&p_hwfn->db_recovery_info.lock); |
369 | list_for_each_entry(db_entry, | 356 | list_for_each_entry(db_entry, |
370 | &p_hwfn->db_recovery_info.list, list_entry) { | 357 | &p_hwfn->db_recovery_info.list, list_entry) |
371 | qed_db_recovery_ring(p_hwfn, db_entry, db_exec); | 358 | qed_db_recovery_ring(p_hwfn, db_entry); |
372 | if (db_exec == DB_REC_ONCE) | ||
373 | break; | ||
374 | } | ||
375 | |||
376 | spin_unlock_bh(&p_hwfn->db_recovery_info.lock); | 359 | spin_unlock_bh(&p_hwfn->db_recovery_info.lock); |
377 | } | 360 | } |
378 | 361 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index e23980e301b6..8848d5bed6e5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c | |||
@@ -378,6 +378,9 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn, | |||
378 | u32 count = QED_DB_REC_COUNT; | 378 | u32 count = QED_DB_REC_COUNT; |
379 | u32 usage = 1; | 379 | u32 usage = 1; |
380 | 380 | ||
381 | /* Flush any pending (e)dpms as they may never arrive */ | ||
382 | qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); | ||
383 | |||
381 | /* wait for usage to zero or count to run out. This is necessary since | 384 | /* wait for usage to zero or count to run out. This is necessary since |
382 | * EDPM doorbell transactions can take multiple 64b cycles, and as such | 385 | * EDPM doorbell transactions can take multiple 64b cycles, and as such |
383 | * can "split" over the pci. Possibly, the doorbell drop can happen with | 386 | * can "split" over the pci. Possibly, the doorbell drop can happen with |
@@ -406,51 +409,74 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn, | |||
406 | 409 | ||
407 | int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 410 | int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
408 | { | 411 | { |
409 | u32 overflow; | 412 | u32 attn_ovfl, cur_ovfl; |
410 | int rc; | 413 | int rc; |
411 | 414 | ||
412 | overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); | 415 | attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT, |
413 | DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow); | 416 | &p_hwfn->db_recovery_info.overflow); |
414 | if (!overflow) { | 417 | cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); |
415 | qed_db_recovery_execute(p_hwfn, DB_REC_ONCE); | 418 | if (!cur_ovfl && !attn_ovfl) |
416 | return 0; | 419 | return 0; |
417 | } | ||
418 | 420 | ||
419 | if (qed_edpm_enabled(p_hwfn)) { | 421 | DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n", |
422 | attn_ovfl, cur_ovfl); | ||
423 | |||
424 | if (cur_ovfl && !p_hwfn->db_bar_no_edpm) { | ||
420 | rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); | 425 | rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); |
421 | if (rc) | 426 | if (rc) |
422 | return rc; | 427 | return rc; |
423 | } | 428 | } |
424 | 429 | ||
425 | /* Flush any pending (e)dpm as they may never arrive */ | ||
426 | qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); | ||
427 | |||
428 | /* Release overflow sticky indication (stop silently dropping everything) */ | 430 | /* Release overflow sticky indication (stop silently dropping everything) */ |
429 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); | 431 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); |
430 | 432 | ||
431 | /* Repeat all last doorbells (doorbell drop recovery) */ | 433 | /* Repeat all last doorbells (doorbell drop recovery) */ |
432 | qed_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL); | 434 | qed_db_recovery_execute(p_hwfn); |
433 | 435 | ||
434 | return 0; | 436 | return 0; |
435 | } | 437 | } |
436 | 438 | ||
437 | static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) | 439 | static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn) |
438 | { | 440 | { |
439 | u32 int_sts, first_drop_reason, details, address, all_drops_reason; | ||
440 | struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; | 441 | struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; |
442 | u32 overflow; | ||
441 | int rc; | 443 | int rc; |
442 | 444 | ||
443 | int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); | 445 | overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); |
444 | DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); | 446 | if (!overflow) |
447 | goto out; | ||
448 | |||
449 | /* Run PF doorbell recovery in next periodic handler */ | ||
450 | set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow); | ||
451 | |||
452 | if (!p_hwfn->db_bar_no_edpm) { | ||
453 | rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); | ||
454 | if (rc) | ||
455 | goto out; | ||
456 | } | ||
457 | |||
458 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); | ||
459 | out: | ||
460 | /* Schedule the handler even if overflow was not detected */ | ||
461 | qed_periodic_db_rec_start(p_hwfn); | ||
462 | } | ||
463 | |||
464 | static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn) | ||
465 | { | ||
466 | u32 int_sts, first_drop_reason, details, address, all_drops_reason; | ||
467 | struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; | ||
445 | 468 | ||
446 | /* int_sts may be zero since all PFs were interrupted for doorbell | 469 | /* int_sts may be zero since all PFs were interrupted for doorbell |
447 | * overflow but another one already handled it. Can abort here. If | 470 | * overflow but another one already handled it. Can abort here. If |
448 | * This PF also requires overflow recovery we will be interrupted again. | 471 | * This PF also requires overflow recovery we will be interrupted again. |
449 | * The masked almost full indication may also be set. Ignoring. | 472 | * The masked almost full indication may also be set. Ignoring. |
450 | */ | 473 | */ |
474 | int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); | ||
451 | if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) | 475 | if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) |
452 | return 0; | 476 | return 0; |
453 | 477 | ||
478 | DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); | ||
479 | |||
454 | /* check if db_drop or overflow happened */ | 480 | /* check if db_drop or overflow happened */ |
455 | if (int_sts & (DORQ_REG_INT_STS_DB_DROP | | 481 | if (int_sts & (DORQ_REG_INT_STS_DB_DROP | |
456 | DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { | 482 | DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { |
@@ -477,11 +503,6 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) | |||
477 | GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, | 503 | GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, |
478 | first_drop_reason, all_drops_reason); | 504 | first_drop_reason, all_drops_reason); |
479 | 505 | ||
480 | rc = qed_db_rec_handler(p_hwfn, p_ptt); | ||
481 | qed_periodic_db_rec_start(p_hwfn); | ||
482 | if (rc) | ||
483 | return rc; | ||
484 | |||
485 | /* Clear the doorbell drop details and prepare for next drop */ | 506 | /* Clear the doorbell drop details and prepare for next drop */ |
486 | qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); | 507 | qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); |
487 | 508 | ||
@@ -507,6 +528,25 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) | |||
507 | return -EINVAL; | 528 | return -EINVAL; |
508 | } | 529 | } |
509 | 530 | ||
531 | static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) | ||
532 | { | ||
533 | p_hwfn->db_recovery_info.dorq_attn = true; | ||
534 | qed_dorq_attn_overflow(p_hwfn); | ||
535 | |||
536 | return qed_dorq_attn_int_sts(p_hwfn); | ||
537 | } | ||
538 | |||
539 | static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn) | ||
540 | { | ||
541 | if (p_hwfn->db_recovery_info.dorq_attn) | ||
542 | goto out; | ||
543 | |||
544 | /* Call DORQ callback if the attention was missed */ | ||
545 | qed_dorq_attn_cb(p_hwfn); | ||
546 | out: | ||
547 | p_hwfn->db_recovery_info.dorq_attn = false; | ||
548 | } | ||
549 | |||
510 | /* Instead of major changes to the data-structure, we have a some 'special' | 550 | /* Instead of major changes to the data-structure, we have a some 'special' |
511 | * identifiers for sources that changed meaning between adapters. | 551 | * identifiers for sources that changed meaning between adapters. |
512 | */ | 552 | */ |
@@ -1080,6 +1120,9 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, | |||
1080 | } | 1120 | } |
1081 | } | 1121 | } |
1082 | 1122 | ||
1123 | /* Handle missed DORQ attention */ | ||
1124 | qed_dorq_attn_handler(p_hwfn); | ||
1125 | |||
1083 | /* Clear IGU indication for the deasserted bits */ | 1126 | /* Clear IGU indication for the deasserted bits */ |
1084 | DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + | 1127 | DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + |
1085 | GTT_BAR0_MAP_REG_IGU_CMD + | 1128 | GTT_BAR0_MAP_REG_IGU_CMD + |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 1f356ed4f761..d473b522afc5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h | |||
@@ -192,8 +192,8 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev); | |||
192 | 192 | ||
193 | /** | 193 | /** |
194 | * @brief - Doorbell Recovery handler. | 194 | * @brief - Doorbell Recovery handler. |
195 | * Run DB_REAL_DEAL doorbell recovery in case of PF overflow | 195 | * Run doorbell recovery in case of PF overflow (and flush DORQ if |
196 | * (and flush DORQ if needed), otherwise run DB_REC_ONCE. | 196 | * needed). |
197 | * | 197 | * |
198 | * @param p_hwfn | 198 | * @param p_hwfn |
199 | * @param p_ptt | 199 | * @param p_ptt |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index f164d4acebcb..6de23b56b294 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
@@ -970,7 +970,7 @@ static void qed_update_pf_params(struct qed_dev *cdev, | |||
970 | } | 970 | } |
971 | } | 971 | } |
972 | 972 | ||
973 | #define QED_PERIODIC_DB_REC_COUNT 100 | 973 | #define QED_PERIODIC_DB_REC_COUNT 10 |
974 | #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 | 974 | #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 |
975 | #define QED_PERIODIC_DB_REC_INTERVAL \ | 975 | #define QED_PERIODIC_DB_REC_INTERVAL \ |
976 | msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) | 976 | msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 9faaa6df78ed..2f318aaf2b05 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c | |||
@@ -1591,7 +1591,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, | |||
1591 | p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; | 1591 | p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; |
1592 | } else { | 1592 | } else { |
1593 | DP_INFO(p_hwfn, | 1593 | DP_INFO(p_hwfn, |
1594 | "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n", | 1594 | "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n", |
1595 | vf->abs_vf_id, | 1595 | vf->abs_vf_id, |
1596 | req->vfdev_info.eth_fp_hsi_major, | 1596 | req->vfdev_info.eth_fp_hsi_major, |
1597 | req->vfdev_info.eth_fp_hsi_minor, | 1597 | req->vfdev_info.eth_fp_hsi_minor, |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 5f3f42a25361..bddb2b5982dc 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c | |||
@@ -490,18 +490,17 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc) | |||
490 | 490 | ||
491 | ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); | 491 | ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); |
492 | if (IS_ERR(ptp->clock)) { | 492 | if (IS_ERR(ptp->clock)) { |
493 | rc = -EINVAL; | ||
494 | DP_ERR(edev, "PTP clock registration failed\n"); | 493 | DP_ERR(edev, "PTP clock registration failed\n"); |
494 | qede_ptp_disable(edev); | ||
495 | rc = -EINVAL; | ||
495 | goto err2; | 496 | goto err2; |
496 | } | 497 | } |
497 | 498 | ||
498 | return 0; | 499 | return 0; |
499 | 500 | ||
500 | err2: | ||
501 | qede_ptp_disable(edev); | ||
502 | ptp->clock = NULL; | ||
503 | err1: | 501 | err1: |
504 | kfree(ptp); | 502 | kfree(ptp); |
503 | err2: | ||
505 | edev->ptp = NULL; | 504 | edev->ptp = NULL; |
506 | 505 | ||
507 | return rc; | 506 | return rc; |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 6ed96fdfd96d..9ce61b019aad 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -1246,6 +1246,23 @@ static int team_port_add(struct team *team, struct net_device *port_dev, | |||
1246 | goto err_option_port_add; | 1246 | goto err_option_port_add; |
1247 | } | 1247 | } |
1248 | 1248 | ||
1249 | /* set promiscuity level to new slave */ | ||
1250 | if (dev->flags & IFF_PROMISC) { | ||
1251 | err = dev_set_promiscuity(port_dev, 1); | ||
1252 | if (err) | ||
1253 | goto err_set_slave_promisc; | ||
1254 | } | ||
1255 | |||
1256 | /* set allmulti level to new slave */ | ||
1257 | if (dev->flags & IFF_ALLMULTI) { | ||
1258 | err = dev_set_allmulti(port_dev, 1); | ||
1259 | if (err) { | ||
1260 | if (dev->flags & IFF_PROMISC) | ||
1261 | dev_set_promiscuity(port_dev, -1); | ||
1262 | goto err_set_slave_promisc; | ||
1263 | } | ||
1264 | } | ||
1265 | |||
1249 | netif_addr_lock_bh(dev); | 1266 | netif_addr_lock_bh(dev); |
1250 | dev_uc_sync_multiple(port_dev, dev); | 1267 | dev_uc_sync_multiple(port_dev, dev); |
1251 | dev_mc_sync_multiple(port_dev, dev); | 1268 | dev_mc_sync_multiple(port_dev, dev); |
@@ -1262,6 +1279,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev, | |||
1262 | 1279 | ||
1263 | return 0; | 1280 | return 0; |
1264 | 1281 | ||
1282 | err_set_slave_promisc: | ||
1283 | __team_option_inst_del_port(team, port); | ||
1284 | |||
1265 | err_option_port_add: | 1285 | err_option_port_add: |
1266 | team_upper_dev_unlink(team, port); | 1286 | team_upper_dev_unlink(team, port); |
1267 | 1287 | ||
@@ -1307,6 +1327,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev) | |||
1307 | 1327 | ||
1308 | team_port_disable(team, port); | 1328 | team_port_disable(team, port); |
1309 | list_del_rcu(&port->list); | 1329 | list_del_rcu(&port->list); |
1330 | |||
1331 | if (dev->flags & IFF_PROMISC) | ||
1332 | dev_set_promiscuity(port_dev, -1); | ||
1333 | if (dev->flags & IFF_ALLMULTI) | ||
1334 | dev_set_allmulti(port_dev, -1); | ||
1335 | |||
1310 | team_upper_dev_unlink(team, port); | 1336 | team_upper_dev_unlink(team, port); |
1311 | netdev_rx_handler_unregister(port_dev); | 1337 | netdev_rx_handler_unregister(port_dev); |
1312 | team_port_disable_netpoll(port); | 1338 | team_port_disable_netpoll(port); |
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index a20ea270d519..1acc622d2183 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c | |||
@@ -2728,7 +2728,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) | |||
2728 | num_msdus++; | 2728 | num_msdus++; |
2729 | num_bytes += ret; | 2729 | num_bytes += ret; |
2730 | } | 2730 | } |
2731 | ieee80211_return_txq(hw, txq); | 2731 | ieee80211_return_txq(hw, txq, false); |
2732 | ieee80211_txq_schedule_end(hw, txq->ac); | 2732 | ieee80211_txq_schedule_end(hw, txq->ac); |
2733 | 2733 | ||
2734 | record->num_msdus = cpu_to_le16(num_msdus); | 2734 | record->num_msdus = cpu_to_le16(num_msdus); |
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index b73c23d4ce86..41e89db244d2 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c | |||
@@ -4089,7 +4089,7 @@ static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac) | |||
4089 | if (ret < 0) | 4089 | if (ret < 0) |
4090 | break; | 4090 | break; |
4091 | } | 4091 | } |
4092 | ieee80211_return_txq(hw, txq); | 4092 | ieee80211_return_txq(hw, txq, false); |
4093 | ath10k_htt_tx_txq_update(hw, txq); | 4093 | ath10k_htt_tx_txq_update(hw, txq); |
4094 | if (ret == -EBUSY) | 4094 | if (ret == -EBUSY) |
4095 | break; | 4095 | break; |
@@ -4374,7 +4374,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, | |||
4374 | if (ret < 0) | 4374 | if (ret < 0) |
4375 | break; | 4375 | break; |
4376 | } | 4376 | } |
4377 | ieee80211_return_txq(hw, txq); | 4377 | ieee80211_return_txq(hw, txq, false); |
4378 | ath10k_htt_tx_txq_update(hw, txq); | 4378 | ath10k_htt_tx_txq_update(hw, txq); |
4379 | out: | 4379 | out: |
4380 | ieee80211_txq_schedule_end(hw, ac); | 4380 | ieee80211_txq_schedule_end(hw, ac); |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 773d428ff1b0..b17e1ca40995 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -1938,12 +1938,15 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) | |||
1938 | goto out; | 1938 | goto out; |
1939 | 1939 | ||
1940 | while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) { | 1940 | while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) { |
1941 | bool force; | ||
1942 | |||
1941 | tid = (struct ath_atx_tid *)queue->drv_priv; | 1943 | tid = (struct ath_atx_tid *)queue->drv_priv; |
1942 | 1944 | ||
1943 | ret = ath_tx_sched_aggr(sc, txq, tid); | 1945 | ret = ath_tx_sched_aggr(sc, txq, tid); |
1944 | ath_dbg(common, QUEUE, "ath_tx_sched_aggr returned %d\n", ret); | 1946 | ath_dbg(common, QUEUE, "ath_tx_sched_aggr returned %d\n", ret); |
1945 | 1947 | ||
1946 | ieee80211_return_txq(hw, queue); | 1948 | force = !skb_queue_empty(&tid->retry_q); |
1949 | ieee80211_return_txq(hw, queue, force); | ||
1947 | } | 1950 | } |
1948 | 1951 | ||
1949 | out: | 1952 | out: |
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index fdc56f821b5a..eb6defb6d0cd 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c | |||
@@ -82,6 +82,7 @@ | |||
82 | #define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" | 82 | #define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" |
83 | #define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-" | 83 | #define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-" |
84 | #define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-" | 84 | #define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-" |
85 | #define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-" | ||
85 | #define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-" | 86 | #define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-" |
86 | #define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-" | 87 | #define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-" |
87 | #define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-" | 88 | #define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-" |
@@ -105,8 +106,8 @@ | |||
105 | IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode" | 106 | IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode" |
106 | #define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \ | 107 | #define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \ |
107 | IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode" | 108 | IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode" |
108 | #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ | 109 | #define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \ |
109 | IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" | 110 | IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode" |
110 | #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ | 111 | #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ |
111 | IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" | 112 | IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" |
112 | #define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \ | 113 | #define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \ |
@@ -235,8 +236,20 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = { | |||
235 | .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, | 236 | .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, |
236 | }; | 237 | }; |
237 | 238 | ||
238 | const struct iwl_cfg iwl22260_2ax_cfg = { | 239 | const struct iwl_cfg iwl_ax101_cfg_quz_hr = { |
239 | .name = "Intel(R) Wireless-AX 22260", | 240 | .name = "Intel(R) Wi-Fi 6 AX101", |
241 | .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, | ||
242 | IWL_DEVICE_22500, | ||
243 | /* | ||
244 | * This device doesn't support receiving BlockAck with a large bitmap | ||
245 | * so we need to restrict the size of transmitted aggregation to the | ||
246 | * HT size; mac80211 would otherwise pick the HE max (256) by default. | ||
247 | */ | ||
248 | .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, | ||
249 | }; | ||
250 | |||
251 | const struct iwl_cfg iwl_ax200_cfg_cc = { | ||
252 | .name = "Intel(R) Wi-Fi 6 AX200 160MHz", | ||
240 | .fw_name_pre = IWL_CC_A_FW_PRE, | 253 | .fw_name_pre = IWL_CC_A_FW_PRE, |
241 | IWL_DEVICE_22500, | 254 | IWL_DEVICE_22500, |
242 | /* | 255 | /* |
@@ -249,7 +262,7 @@ const struct iwl_cfg iwl22260_2ax_cfg = { | |||
249 | }; | 262 | }; |
250 | 263 | ||
251 | const struct iwl_cfg killer1650x_2ax_cfg = { | 264 | const struct iwl_cfg killer1650x_2ax_cfg = { |
252 | .name = "Killer(R) Wireless-AX 1650x Wireless Network Adapter (200NGW)", | 265 | .name = "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)", |
253 | .fw_name_pre = IWL_CC_A_FW_PRE, | 266 | .fw_name_pre = IWL_CC_A_FW_PRE, |
254 | IWL_DEVICE_22500, | 267 | IWL_DEVICE_22500, |
255 | /* | 268 | /* |
@@ -262,7 +275,7 @@ const struct iwl_cfg killer1650x_2ax_cfg = { | |||
262 | }; | 275 | }; |
263 | 276 | ||
264 | const struct iwl_cfg killer1650w_2ax_cfg = { | 277 | const struct iwl_cfg killer1650w_2ax_cfg = { |
265 | .name = "Killer(R) Wireless-AX 1650w Wireless Network Adapter (200D2W)", | 278 | .name = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)", |
266 | .fw_name_pre = IWL_CC_A_FW_PRE, | 279 | .fw_name_pre = IWL_CC_A_FW_PRE, |
267 | IWL_DEVICE_22500, | 280 | IWL_DEVICE_22500, |
268 | /* | 281 | /* |
@@ -328,7 +341,7 @@ const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = { | |||
328 | }; | 341 | }; |
329 | 342 | ||
330 | const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { | 343 | const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { |
331 | .name = "Killer(R) Wireless-AX 1650i Wireless Network Adapter (22560NGW)", | 344 | .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)", |
332 | .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, | 345 | .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, |
333 | IWL_DEVICE_22500, | 346 | IWL_DEVICE_22500, |
334 | /* | 347 | /* |
@@ -340,7 +353,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { | |||
340 | }; | 353 | }; |
341 | 354 | ||
342 | const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { | 355 | const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { |
343 | .name = "Killer(R) Wireless-AX 1650s Wireless Network Adapter (22560D2W)", | 356 | .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)", |
344 | .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, | 357 | .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, |
345 | IWL_DEVICE_22500, | 358 | IWL_DEVICE_22500, |
346 | /* | 359 | /* |
@@ -444,6 +457,7 @@ MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); | |||
444 | MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); | 457 | MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); |
445 | MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); | 458 | MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); |
446 | MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); | 459 | MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); |
460 | MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); | ||
447 | MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); | 461 | MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); |
448 | MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); | 462 | MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); |
449 | MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); | 463 | MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); |
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index f119c49cd39c..d7380016f1c0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c | |||
@@ -1614,6 +1614,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, | |||
1614 | if (!range) { | 1614 | if (!range) { |
1615 | IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n", | 1615 | IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n", |
1616 | le32_to_cpu(reg->region_id), type); | 1616 | le32_to_cpu(reg->region_id), type); |
1617 | memset(*data, 0, le32_to_cpu((*data)->len)); | ||
1617 | return; | 1618 | return; |
1618 | } | 1619 | } |
1619 | 1620 | ||
@@ -1623,6 +1624,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, | |||
1623 | if (range_size < 0) { | 1624 | if (range_size < 0) { |
1624 | IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n", | 1625 | IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n", |
1625 | le32_to_cpu(reg->region_id), type); | 1626 | le32_to_cpu(reg->region_id), type); |
1627 | memset(*data, 0, le32_to_cpu((*data)->len)); | ||
1626 | return; | 1628 | return; |
1627 | } | 1629 | } |
1628 | range = range + range_size; | 1630 | range = range + range_size; |
@@ -1807,12 +1809,12 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, | |||
1807 | 1809 | ||
1808 | trigger = fwrt->dump.active_trigs[id].trig; | 1810 | trigger = fwrt->dump.active_trigs[id].trig; |
1809 | 1811 | ||
1810 | size = sizeof(*dump_file); | 1812 | size = iwl_fw_ini_get_trigger_len(fwrt, trigger); |
1811 | size += iwl_fw_ini_get_trigger_len(fwrt, trigger); | ||
1812 | |||
1813 | if (!size) | 1813 | if (!size) |
1814 | return NULL; | 1814 | return NULL; |
1815 | 1815 | ||
1816 | size += sizeof(*dump_file); | ||
1817 | |||
1816 | dump_file = vzalloc(size); | 1818 | dump_file = vzalloc(size); |
1817 | if (!dump_file) | 1819 | if (!dump_file) |
1818 | return NULL; | 1820 | return NULL; |
@@ -1942,14 +1944,10 @@ int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt, | |||
1942 | iwl_dump_error_desc->len = 0; | 1944 | iwl_dump_error_desc->len = 0; |
1943 | 1945 | ||
1944 | ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0); | 1946 | ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0); |
1945 | if (ret) { | 1947 | if (ret) |
1946 | kfree(iwl_dump_error_desc); | 1948 | kfree(iwl_dump_error_desc); |
1947 | } else { | 1949 | else |
1948 | set_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status); | 1950 | iwl_trans_sync_nmi(fwrt->trans); |
1949 | |||
1950 | /* trigger nmi to halt the fw */ | ||
1951 | iwl_force_nmi(fwrt->trans); | ||
1952 | } | ||
1953 | 1951 | ||
1954 | return ret; | 1952 | return ret; |
1955 | } | 1953 | } |
@@ -2489,22 +2487,6 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point); | |||
2489 | 2487 | ||
2490 | void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt) | 2488 | void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt) |
2491 | { | 2489 | { |
2492 | /* if the wait event timeout elapses instead of wake up then | ||
2493 | * the driver did not receive NMI interrupt and can not assume the FW | ||
2494 | * is halted | ||
2495 | */ | ||
2496 | int ret = wait_event_timeout(fwrt->trans->fw_halt_waitq, | ||
2497 | !test_bit(STATUS_FW_WAIT_DUMP, | ||
2498 | &fwrt->trans->status), | ||
2499 | msecs_to_jiffies(2000)); | ||
2500 | if (!ret) { | ||
2501 | /* failed to receive NMI interrupt, assuming the FW is stuck */ | ||
2502 | set_bit(STATUS_FW_ERROR, &fwrt->trans->status); | ||
2503 | |||
2504 | clear_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status); | ||
2505 | } | ||
2506 | |||
2507 | /* Assuming the op mode mutex is held at this point */ | ||
2508 | iwl_fw_dbg_collect_sync(fwrt); | 2490 | iwl_fw_dbg_collect_sync(fwrt); |
2509 | 2491 | ||
2510 | iwl_trans_stop_device(fwrt->trans); | 2492 | iwl_trans_stop_device(fwrt->trans); |
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index 7adf4e4e841a..12310e3d2fc5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c | |||
@@ -76,7 +76,6 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, | |||
76 | fwrt->ops_ctx = ops_ctx; | 76 | fwrt->ops_ctx = ops_ctx; |
77 | INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk); | 77 | INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk); |
78 | iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir); | 78 | iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir); |
79 | init_waitqueue_head(&fwrt->trans->fw_halt_waitq); | ||
80 | } | 79 | } |
81 | IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); | 80 | IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); |
82 | 81 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index f5f87773667b..93070848280a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h | |||
@@ -549,8 +549,9 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr; | |||
549 | extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; | 549 | extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; |
550 | extern const struct iwl_cfg iwl22000_2ac_cfg_jf; | 550 | extern const struct iwl_cfg iwl22000_2ac_cfg_jf; |
551 | extern const struct iwl_cfg iwl_ax101_cfg_qu_hr; | 551 | extern const struct iwl_cfg iwl_ax101_cfg_qu_hr; |
552 | extern const struct iwl_cfg iwl_ax101_cfg_quz_hr; | ||
552 | extern const struct iwl_cfg iwl22000_2ax_cfg_hr; | 553 | extern const struct iwl_cfg iwl22000_2ax_cfg_hr; |
553 | extern const struct iwl_cfg iwl22260_2ax_cfg; | 554 | extern const struct iwl_cfg iwl_ax200_cfg_cc; |
554 | extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; | 555 | extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; |
555 | extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; | 556 | extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; |
556 | extern const struct iwl_cfg killer1650x_2ax_cfg; | 557 | extern const struct iwl_cfg killer1650x_2ax_cfg; |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index aea6d03e545a..e539bc94eff7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h | |||
@@ -327,6 +327,7 @@ enum { | |||
327 | #define CSR_HW_REV_TYPE_NONE (0x00001F0) | 327 | #define CSR_HW_REV_TYPE_NONE (0x00001F0) |
328 | #define CSR_HW_REV_TYPE_QNJ (0x0000360) | 328 | #define CSR_HW_REV_TYPE_QNJ (0x0000360) |
329 | #define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364) | 329 | #define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364) |
330 | #define CSR_HW_REV_TYPE_QUZ (0x0000354) | ||
330 | #define CSR_HW_REV_TYPE_HR_CDB (0x0000340) | 331 | #define CSR_HW_REV_TYPE_HR_CDB (0x0000340) |
331 | #define CSR_HW_REV_TYPE_SO (0x0000370) | 332 | #define CSR_HW_REV_TYPE_SO (0x0000370) |
332 | #define CSR_HW_REV_TYPE_TY (0x0000420) | 333 | #define CSR_HW_REV_TYPE_TY (0x0000420) |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index bbebbf3efd57..d8690acee40c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h | |||
@@ -338,7 +338,6 @@ enum iwl_d3_status { | |||
338 | * are sent | 338 | * are sent |
339 | * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent | 339 | * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent |
340 | * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation | 340 | * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation |
341 | * @STATUS_FW_WAIT_DUMP: if set, wait until cleared before collecting dump | ||
342 | */ | 341 | */ |
343 | enum iwl_trans_status { | 342 | enum iwl_trans_status { |
344 | STATUS_SYNC_HCMD_ACTIVE, | 343 | STATUS_SYNC_HCMD_ACTIVE, |
@@ -351,7 +350,6 @@ enum iwl_trans_status { | |||
351 | STATUS_TRANS_GOING_IDLE, | 350 | STATUS_TRANS_GOING_IDLE, |
352 | STATUS_TRANS_IDLE, | 351 | STATUS_TRANS_IDLE, |
353 | STATUS_TRANS_DEAD, | 352 | STATUS_TRANS_DEAD, |
354 | STATUS_FW_WAIT_DUMP, | ||
355 | }; | 353 | }; |
356 | 354 | ||
357 | static inline int | 355 | static inline int |
@@ -618,6 +616,7 @@ struct iwl_trans_ops { | |||
618 | struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, | 616 | struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, |
619 | u32 dump_mask); | 617 | u32 dump_mask); |
620 | void (*debugfs_cleanup)(struct iwl_trans *trans); | 618 | void (*debugfs_cleanup)(struct iwl_trans *trans); |
619 | void (*sync_nmi)(struct iwl_trans *trans); | ||
621 | }; | 620 | }; |
622 | 621 | ||
623 | /** | 622 | /** |
@@ -831,7 +830,6 @@ struct iwl_trans { | |||
831 | u32 lmac_error_event_table[2]; | 830 | u32 lmac_error_event_table[2]; |
832 | u32 umac_error_event_table; | 831 | u32 umac_error_event_table; |
833 | unsigned int error_event_table_tlv_status; | 832 | unsigned int error_event_table_tlv_status; |
834 | wait_queue_head_t fw_halt_waitq; | ||
835 | 833 | ||
836 | /* pointer to trans specific struct */ | 834 | /* pointer to trans specific struct */ |
837 | /*Ensure that this pointer will always be aligned to sizeof pointer */ | 835 | /*Ensure that this pointer will always be aligned to sizeof pointer */ |
@@ -1239,10 +1237,12 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans) | |||
1239 | /* prevent double restarts due to the same erroneous FW */ | 1237 | /* prevent double restarts due to the same erroneous FW */ |
1240 | if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) | 1238 | if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) |
1241 | iwl_op_mode_nic_error(trans->op_mode); | 1239 | iwl_op_mode_nic_error(trans->op_mode); |
1240 | } | ||
1242 | 1241 | ||
1243 | if (test_and_clear_bit(STATUS_FW_WAIT_DUMP, &trans->status)) | 1242 | static inline void iwl_trans_sync_nmi(struct iwl_trans *trans) |
1244 | wake_up(&trans->fw_halt_waitq); | 1243 | { |
1245 | 1244 | if (trans->ops->sync_nmi) | |
1245 | trans->ops->sync_nmi(trans); | ||
1246 | } | 1246 | } |
1247 | 1247 | ||
1248 | /***************************************************** | 1248 | /***************************************************** |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 3a92c09d4692..6a3b11dd2edf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | |||
@@ -2714,9 +2714,6 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, | |||
2714 | 2714 | ||
2715 | iwl_mvm_mac_ctxt_remove(mvm, vif); | 2715 | iwl_mvm_mac_ctxt_remove(mvm, vif); |
2716 | 2716 | ||
2717 | kfree(mvmvif->ap_wep_key); | ||
2718 | mvmvif->ap_wep_key = NULL; | ||
2719 | |||
2720 | mutex_unlock(&mvm->mutex); | 2717 | mutex_unlock(&mvm->mutex); |
2721 | } | 2718 | } |
2722 | 2719 | ||
@@ -3183,24 +3180,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, | |||
3183 | ret = iwl_mvm_update_sta(mvm, vif, sta); | 3180 | ret = iwl_mvm_update_sta(mvm, vif, sta); |
3184 | } else if (old_state == IEEE80211_STA_ASSOC && | 3181 | } else if (old_state == IEEE80211_STA_ASSOC && |
3185 | new_state == IEEE80211_STA_AUTHORIZED) { | 3182 | new_state == IEEE80211_STA_AUTHORIZED) { |
3186 | /* if wep is used, need to set the key for the station now */ | 3183 | ret = 0; |
3187 | if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) { | ||
3188 | mvm_sta->wep_key = | ||
3189 | kmemdup(mvmvif->ap_wep_key, | ||
3190 | sizeof(*mvmvif->ap_wep_key) + | ||
3191 | mvmvif->ap_wep_key->keylen, | ||
3192 | GFP_KERNEL); | ||
3193 | if (!mvm_sta->wep_key) { | ||
3194 | ret = -ENOMEM; | ||
3195 | goto out_unlock; | ||
3196 | } | ||
3197 | |||
3198 | ret = iwl_mvm_set_sta_key(mvm, vif, sta, | ||
3199 | mvm_sta->wep_key, | ||
3200 | STA_KEY_IDX_INVALID); | ||
3201 | } else { | ||
3202 | ret = 0; | ||
3203 | } | ||
3204 | 3184 | ||
3205 | /* we don't support TDLS during DCM */ | 3185 | /* we don't support TDLS during DCM */ |
3206 | if (iwl_mvm_phy_ctx_count(mvm) > 1) | 3186 | if (iwl_mvm_phy_ctx_count(mvm) > 1) |
@@ -3242,17 +3222,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, | |||
3242 | NL80211_TDLS_DISABLE_LINK); | 3222 | NL80211_TDLS_DISABLE_LINK); |
3243 | } | 3223 | } |
3244 | 3224 | ||
3245 | /* Remove STA key if this is an AP using WEP */ | ||
3246 | if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) { | ||
3247 | int rm_ret = iwl_mvm_remove_sta_key(mvm, vif, sta, | ||
3248 | mvm_sta->wep_key); | ||
3249 | |||
3250 | if (!ret) | ||
3251 | ret = rm_ret; | ||
3252 | kfree(mvm_sta->wep_key); | ||
3253 | mvm_sta->wep_key = NULL; | ||
3254 | } | ||
3255 | |||
3256 | if (unlikely(ret && | 3225 | if (unlikely(ret && |
3257 | test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, | 3226 | test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, |
3258 | &mvm->status))) | 3227 | &mvm->status))) |
@@ -3289,6 +3258,13 @@ static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, | |||
3289 | struct ieee80211_sta *sta, u32 changed) | 3258 | struct ieee80211_sta *sta, u32 changed) |
3290 | { | 3259 | { |
3291 | struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); | 3260 | struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); |
3261 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); | ||
3262 | |||
3263 | if (changed & (IEEE80211_RC_BW_CHANGED | | ||
3264 | IEEE80211_RC_SUPP_RATES_CHANGED | | ||
3265 | IEEE80211_RC_NSS_CHANGED)) | ||
3266 | iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, | ||
3267 | true); | ||
3292 | 3268 | ||
3293 | if (vif->type == NL80211_IFTYPE_STATION && | 3269 | if (vif->type == NL80211_IFTYPE_STATION && |
3294 | changed & IEEE80211_RC_NSS_CHANGED) | 3270 | changed & IEEE80211_RC_NSS_CHANGED) |
@@ -3439,20 +3415,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, | |||
3439 | break; | 3415 | break; |
3440 | case WLAN_CIPHER_SUITE_WEP40: | 3416 | case WLAN_CIPHER_SUITE_WEP40: |
3441 | case WLAN_CIPHER_SUITE_WEP104: | 3417 | case WLAN_CIPHER_SUITE_WEP104: |
3442 | if (vif->type == NL80211_IFTYPE_AP) { | 3418 | if (vif->type == NL80211_IFTYPE_STATION) |
3443 | struct iwl_mvm_vif *mvmvif = | 3419 | break; |
3444 | iwl_mvm_vif_from_mac80211(vif); | 3420 | if (iwl_mvm_has_new_tx_api(mvm)) |
3445 | 3421 | return -EOPNOTSUPP; | |
3446 | mvmvif->ap_wep_key = kmemdup(key, | 3422 | /* support HW crypto on TX */ |
3447 | sizeof(*key) + key->keylen, | 3423 | return 0; |
3448 | GFP_KERNEL); | ||
3449 | if (!mvmvif->ap_wep_key) | ||
3450 | return -ENOMEM; | ||
3451 | } | ||
3452 | |||
3453 | if (vif->type != NL80211_IFTYPE_STATION) | ||
3454 | return 0; | ||
3455 | break; | ||
3456 | default: | 3424 | default: |
3457 | /* currently FW supports only one optional cipher scheme */ | 3425 | /* currently FW supports only one optional cipher scheme */ |
3458 | if (hw->n_cipher_schemes && | 3426 | if (hw->n_cipher_schemes && |
@@ -3540,12 +3508,17 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, | |||
3540 | ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); | 3508 | ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); |
3541 | if (ret) { | 3509 | if (ret) { |
3542 | IWL_WARN(mvm, "set key failed\n"); | 3510 | IWL_WARN(mvm, "set key failed\n"); |
3511 | key->hw_key_idx = STA_KEY_IDX_INVALID; | ||
3543 | /* | 3512 | /* |
3544 | * can't add key for RX, but we don't need it | 3513 | * can't add key for RX, but we don't need it |
3545 | * in the device for TX so still return 0 | 3514 | * in the device for TX so still return 0, |
3515 | * unless we have new TX API where we cannot | ||
3516 | * put key material into the TX_CMD | ||
3546 | */ | 3517 | */ |
3547 | key->hw_key_idx = STA_KEY_IDX_INVALID; | 3518 | if (iwl_mvm_has_new_tx_api(mvm)) |
3548 | ret = 0; | 3519 | ret = -EOPNOTSUPP; |
3520 | else | ||
3521 | ret = 0; | ||
3549 | } | 3522 | } |
3550 | 3523 | ||
3551 | break; | 3524 | break; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index bca6f6b536d9..a50dc53df086 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | |||
@@ -498,7 +498,6 @@ struct iwl_mvm_vif { | |||
498 | netdev_features_t features; | 498 | netdev_features_t features; |
499 | 499 | ||
500 | struct iwl_probe_resp_data __rcu *probe_resp_data; | 500 | struct iwl_probe_resp_data __rcu *probe_resp_data; |
501 | struct ieee80211_key_conf *ap_wep_key; | ||
502 | }; | 501 | }; |
503 | 502 | ||
504 | static inline struct iwl_mvm_vif * | 503 | static inline struct iwl_mvm_vif * |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 498c315291cf..98d123dd7177 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. | 8 | * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. |
9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
10 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH | 10 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
11 | * Copyright(c) 2018 Intel Corporation | 11 | * Copyright(c) 2018 - 2019 Intel Corporation |
12 | * | 12 | * |
13 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
14 | * it under the terms of version 2 of the GNU General Public License as | 14 | * it under the terms of version 2 of the GNU General Public License as |
@@ -31,7 +31,7 @@ | |||
31 | * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. | 31 | * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. |
32 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 32 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
33 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH | 33 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
34 | * Copyright(c) 2018 Intel Corporation | 34 | * Copyright(c) 2018 - 2019 Intel Corporation |
35 | * All rights reserved. | 35 | * All rights reserved. |
36 | * | 36 | * |
37 | * Redistribution and use in source and binary forms, with or without | 37 | * Redistribution and use in source and binary forms, with or without |
@@ -1399,7 +1399,9 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) | |||
1399 | 1399 | ||
1400 | iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid); | 1400 | iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid); |
1401 | list_del_init(&mvmtxq->list); | 1401 | list_del_init(&mvmtxq->list); |
1402 | local_bh_disable(); | ||
1402 | iwl_mvm_mac_itxq_xmit(mvm->hw, txq); | 1403 | iwl_mvm_mac_itxq_xmit(mvm->hw, txq); |
1404 | local_bh_enable(); | ||
1403 | } | 1405 | } |
1404 | 1406 | ||
1405 | mutex_unlock(&mvm->mutex); | 1407 | mutex_unlock(&mvm->mutex); |
@@ -2333,21 +2335,6 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |||
2333 | iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, | 2335 | iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, |
2334 | timeout); | 2336 | timeout); |
2335 | 2337 | ||
2336 | if (mvmvif->ap_wep_key) { | ||
2337 | u8 key_offset = iwl_mvm_set_fw_key_idx(mvm); | ||
2338 | |||
2339 | __set_bit(key_offset, mvm->fw_key_table); | ||
2340 | |||
2341 | if (key_offset == STA_KEY_IDX_INVALID) | ||
2342 | return -ENOSPC; | ||
2343 | |||
2344 | ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id, | ||
2345 | mvmvif->ap_wep_key, true, 0, NULL, 0, | ||
2346 | key_offset, 0); | ||
2347 | if (ret) | ||
2348 | return ret; | ||
2349 | } | ||
2350 | |||
2351 | return 0; | 2338 | return 0; |
2352 | } | 2339 | } |
2353 | 2340 | ||
@@ -2419,28 +2406,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |||
2419 | 2406 | ||
2420 | iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0); | 2407 | iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0); |
2421 | 2408 | ||
2422 | if (mvmvif->ap_wep_key) { | ||
2423 | int i; | ||
2424 | |||
2425 | if (!__test_and_clear_bit(mvmvif->ap_wep_key->hw_key_idx, | ||
2426 | mvm->fw_key_table)) { | ||
2427 | IWL_ERR(mvm, "offset %d not used in fw key table.\n", | ||
2428 | mvmvif->ap_wep_key->hw_key_idx); | ||
2429 | return -ENOENT; | ||
2430 | } | ||
2431 | |||
2432 | /* track which key was deleted last */ | ||
2433 | for (i = 0; i < STA_KEY_MAX_NUM; i++) { | ||
2434 | if (mvm->fw_key_deleted[i] < U8_MAX) | ||
2435 | mvm->fw_key_deleted[i]++; | ||
2436 | } | ||
2437 | mvm->fw_key_deleted[mvmvif->ap_wep_key->hw_key_idx] = 0; | ||
2438 | ret = __iwl_mvm_remove_sta_key(mvm, mvmvif->mcast_sta.sta_id, | ||
2439 | mvmvif->ap_wep_key, true); | ||
2440 | if (ret) | ||
2441 | return ret; | ||
2442 | } | ||
2443 | |||
2444 | ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); | 2409 | ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); |
2445 | if (ret) | 2410 | if (ret) |
2446 | IWL_WARN(mvm, "Failed sending remove station\n"); | 2411 | IWL_WARN(mvm, "Failed sending remove station\n"); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 79700c7310a1..b4d4071b865d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h | |||
@@ -8,7 +8,7 @@ | |||
8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. | 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
9 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH | 9 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH |
10 | * Copyright(c) 2015 - 2016 Intel Deutschland GmbH | 10 | * Copyright(c) 2015 - 2016 Intel Deutschland GmbH |
11 | * Copyright(c) 2018 Intel Corporation | 11 | * Copyright(c) 2018 - 2019 Intel Corporation |
12 | * | 12 | * |
13 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
14 | * it under the terms of version 2 of the GNU General Public License as | 14 | * it under the terms of version 2 of the GNU General Public License as |
@@ -31,7 +31,7 @@ | |||
31 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. | 31 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
32 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH | 32 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH |
33 | * Copyright(c) 2015 - 2016 Intel Deutschland GmbH | 33 | * Copyright(c) 2015 - 2016 Intel Deutschland GmbH |
34 | * Copyright(c) 2018 Intel Corporation | 34 | * Copyright(c) 2018 - 2019 Intel Corporation |
35 | * All rights reserved. | 35 | * All rights reserved. |
36 | * | 36 | * |
37 | * Redistribution and use in source and binary forms, with or without | 37 | * Redistribution and use in source and binary forms, with or without |
@@ -394,7 +394,6 @@ struct iwl_mvm_rxq_dup_data { | |||
394 | * the BA window. To be used for UAPSD only. | 394 | * the BA window. To be used for UAPSD only. |
395 | * @ptk_pn: per-queue PTK PN data structures | 395 | * @ptk_pn: per-queue PTK PN data structures |
396 | * @dup_data: per queue duplicate packet detection data | 396 | * @dup_data: per queue duplicate packet detection data |
397 | * @wep_key: used in AP mode. Is a duplicate of the WEP key. | ||
398 | * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID | 397 | * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID |
399 | * @tx_ant: the index of the antenna to use for data tx to this station. Only | 398 | * @tx_ant: the index of the antenna to use for data tx to this station. Only |
400 | * used during connection establishment (e.g. for the 4 way handshake | 399 | * used during connection establishment (e.g. for the 4 way handshake |
@@ -426,8 +425,6 @@ struct iwl_mvm_sta { | |||
426 | struct iwl_mvm_key_pn __rcu *ptk_pn[4]; | 425 | struct iwl_mvm_key_pn __rcu *ptk_pn[4]; |
427 | struct iwl_mvm_rxq_dup_data *dup_data; | 426 | struct iwl_mvm_rxq_dup_data *dup_data; |
428 | 427 | ||
429 | struct ieee80211_key_conf *wep_key; | ||
430 | |||
431 | u8 reserved_queue; | 428 | u8 reserved_queue; |
432 | 429 | ||
433 | /* Temporary, until the new TLC will control the Tx protection */ | 430 | /* Temporary, until the new TLC will control the Tx protection */ |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 2b94e4cef56c..9f1af8da9dc1 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c | |||
@@ -953,14 +953,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
953 | {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, | 953 | {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, |
954 | {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)}, | 954 | {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)}, |
955 | 955 | ||
956 | {IWL_PCI_DEVICE(0x2723, 0x0080, iwl22260_2ax_cfg)}, | 956 | {IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)}, |
957 | {IWL_PCI_DEVICE(0x2723, 0x0084, iwl22260_2ax_cfg)}, | 957 | {IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)}, |
958 | {IWL_PCI_DEVICE(0x2723, 0x0088, iwl22260_2ax_cfg)}, | 958 | {IWL_PCI_DEVICE(0x2723, 0x0088, iwl_ax200_cfg_cc)}, |
959 | {IWL_PCI_DEVICE(0x2723, 0x008C, iwl22260_2ax_cfg)}, | 959 | {IWL_PCI_DEVICE(0x2723, 0x008C, iwl_ax200_cfg_cc)}, |
960 | {IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)}, | 960 | {IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)}, |
961 | {IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)}, | 961 | {IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)}, |
962 | {IWL_PCI_DEVICE(0x2723, 0x4080, iwl22260_2ax_cfg)}, | 962 | {IWL_PCI_DEVICE(0x2723, 0x2080, iwl_ax200_cfg_cc)}, |
963 | {IWL_PCI_DEVICE(0x2723, 0x4088, iwl22260_2ax_cfg)}, | 963 | {IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)}, |
964 | {IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)}, | ||
964 | 965 | ||
965 | {IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)}, | 966 | {IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)}, |
966 | {IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)}, | 967 | {IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)}, |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index bf8b61a476c5..59213164f35e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h | |||
@@ -1043,7 +1043,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans) | |||
1043 | 1043 | ||
1044 | void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); | 1044 | void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); |
1045 | void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); | 1045 | void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); |
1046 | void iwl_trans_sync_nmi(struct iwl_trans *trans); | 1046 | void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans); |
1047 | 1047 | ||
1048 | #ifdef CONFIG_IWLWIFI_DEBUGFS | 1048 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
1049 | int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); | 1049 | int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index fe8269d023de..79c1dc05f948 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c | |||
@@ -3318,7 +3318,8 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans) | |||
3318 | .unref = iwl_trans_pcie_unref, \ | 3318 | .unref = iwl_trans_pcie_unref, \ |
3319 | .dump_data = iwl_trans_pcie_dump_data, \ | 3319 | .dump_data = iwl_trans_pcie_dump_data, \ |
3320 | .d3_suspend = iwl_trans_pcie_d3_suspend, \ | 3320 | .d3_suspend = iwl_trans_pcie_d3_suspend, \ |
3321 | .d3_resume = iwl_trans_pcie_d3_resume | 3321 | .d3_resume = iwl_trans_pcie_d3_resume, \ |
3322 | .sync_nmi = iwl_trans_pcie_sync_nmi | ||
3322 | 3323 | ||
3323 | #ifdef CONFIG_PM_SLEEP | 3324 | #ifdef CONFIG_PM_SLEEP |
3324 | #define IWL_TRANS_PM_OPS \ | 3325 | #define IWL_TRANS_PM_OPS \ |
@@ -3542,6 +3543,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
3542 | } | 3543 | } |
3543 | } else if (cfg == &iwl_ax101_cfg_qu_hr) { | 3544 | } else if (cfg == &iwl_ax101_cfg_qu_hr) { |
3544 | if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == | 3545 | if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == |
3546 | CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && | ||
3547 | trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) { | ||
3548 | trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0; | ||
3549 | } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == | ||
3545 | CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { | 3550 | CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { |
3546 | trans->cfg = &iwl_ax101_cfg_qu_hr; | 3551 | trans->cfg = &iwl_ax101_cfg_qu_hr; |
3547 | } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == | 3552 | } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == |
@@ -3560,7 +3565,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
3560 | } | 3565 | } |
3561 | } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == | 3566 | } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == |
3562 | CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && | 3567 | CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && |
3563 | (trans->cfg != &iwl22260_2ax_cfg || | 3568 | (trans->cfg != &iwl_ax200_cfg_cc || |
3564 | trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) { | 3569 | trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) { |
3565 | u32 hw_status; | 3570 | u32 hw_status; |
3566 | 3571 | ||
@@ -3637,7 +3642,7 @@ out_no_pci: | |||
3637 | return ERR_PTR(ret); | 3642 | return ERR_PTR(ret); |
3638 | } | 3643 | } |
3639 | 3644 | ||
3640 | void iwl_trans_sync_nmi(struct iwl_trans *trans) | 3645 | void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) |
3641 | { | 3646 | { |
3642 | unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT; | 3647 | unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT; |
3643 | 3648 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 88530d9f4a54..38d110338987 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | |||
@@ -965,7 +965,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans, | |||
965 | cmd_str); | 965 | cmd_str); |
966 | ret = -ETIMEDOUT; | 966 | ret = -ETIMEDOUT; |
967 | 967 | ||
968 | iwl_trans_sync_nmi(trans); | 968 | iwl_trans_pcie_sync_nmi(trans); |
969 | goto cancel; | 969 | goto cancel; |
970 | } | 970 | } |
971 | 971 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 9fbd37d23e85..7be73e2c4681 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c | |||
@@ -1960,7 +1960,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, | |||
1960 | iwl_get_cmd_string(trans, cmd->id)); | 1960 | iwl_get_cmd_string(trans, cmd->id)); |
1961 | ret = -ETIMEDOUT; | 1961 | ret = -ETIMEDOUT; |
1962 | 1962 | ||
1963 | iwl_trans_sync_nmi(trans); | 1963 | iwl_trans_pcie_sync_nmi(trans); |
1964 | goto cancel; | 1964 | goto cancel; |
1965 | } | 1965 | } |
1966 | 1966 | ||
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 0838af04d681..524eb5805995 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -2644,7 +2644,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, | |||
2644 | enum nl80211_band band; | 2644 | enum nl80211_band band; |
2645 | const struct ieee80211_ops *ops = &mac80211_hwsim_ops; | 2645 | const struct ieee80211_ops *ops = &mac80211_hwsim_ops; |
2646 | struct net *net; | 2646 | struct net *net; |
2647 | int idx; | 2647 | int idx, i; |
2648 | int n_limits = 0; | 2648 | int n_limits = 0; |
2649 | 2649 | ||
2650 | if (WARN_ON(param->channels > 1 && !param->use_chanctx)) | 2650 | if (WARN_ON(param->channels > 1 && !param->use_chanctx)) |
@@ -2768,12 +2768,23 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, | |||
2768 | goto failed_hw; | 2768 | goto failed_hw; |
2769 | } | 2769 | } |
2770 | 2770 | ||
2771 | data->if_combination.max_interfaces = 0; | ||
2772 | for (i = 0; i < n_limits; i++) | ||
2773 | data->if_combination.max_interfaces += | ||
2774 | data->if_limits[i].max; | ||
2775 | |||
2771 | data->if_combination.n_limits = n_limits; | 2776 | data->if_combination.n_limits = n_limits; |
2772 | data->if_combination.max_interfaces = 2048; | ||
2773 | data->if_combination.limits = data->if_limits; | 2777 | data->if_combination.limits = data->if_limits; |
2774 | 2778 | ||
2775 | hw->wiphy->iface_combinations = &data->if_combination; | 2779 | /* |
2776 | hw->wiphy->n_iface_combinations = 1; | 2780 | * If we actually were asked to support combinations, |
2781 | * advertise them - if there's only a single thing like | ||
2782 | * only IBSS then don't advertise it as combinations. | ||
2783 | */ | ||
2784 | if (data->if_combination.max_interfaces > 1) { | ||
2785 | hw->wiphy->iface_combinations = &data->if_combination; | ||
2786 | hw->wiphy->n_iface_combinations = 1; | ||
2787 | } | ||
2777 | 2788 | ||
2778 | if (param->ciphers) { | 2789 | if (param->ciphers) { |
2779 | memcpy(data->ciphers, param->ciphers, | 2790 | memcpy(data->ciphers, param->ciphers, |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c index d54dda67d036..3af45949e868 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c | |||
@@ -510,6 +510,8 @@ int mt7603_register_device(struct mt7603_dev *dev) | |||
510 | bus_ops->rmw = mt7603_rmw; | 510 | bus_ops->rmw = mt7603_rmw; |
511 | dev->mt76.bus = bus_ops; | 511 | dev->mt76.bus = bus_ops; |
512 | 512 | ||
513 | spin_lock_init(&dev->ps_lock); | ||
514 | |||
513 | INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work); | 515 | INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work); |
514 | tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet, | 516 | tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet, |
515 | (unsigned long)dev); | 517 | (unsigned long)dev); |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c index 5e31d7da96fc..5abc02b57818 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c | |||
@@ -343,7 +343,7 @@ void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid) | |||
343 | MT_BA_CONTROL_1_RESET)); | 343 | MT_BA_CONTROL_1_RESET)); |
344 | } | 344 | } |
345 | 345 | ||
346 | void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn, | 346 | void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, |
347 | int ba_size) | 347 | int ba_size) |
348 | { | 348 | { |
349 | u32 addr = mt7603_wtbl2_addr(wcid); | 349 | u32 addr = mt7603_wtbl2_addr(wcid); |
@@ -358,43 +358,6 @@ void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn, | |||
358 | mt76_clear(dev, addr + (15 * 4), tid_mask); | 358 | mt76_clear(dev, addr + (15 * 4), tid_mask); |
359 | return; | 359 | return; |
360 | } | 360 | } |
361 | mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); | ||
362 | |||
363 | mt7603_mac_stop(dev); | ||
364 | switch (tid) { | ||
365 | case 0: | ||
366 | mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID0_SN, ssn); | ||
367 | break; | ||
368 | case 1: | ||
369 | mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID1_SN, ssn); | ||
370 | break; | ||
371 | case 2: | ||
372 | mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID2_SN_LO, | ||
373 | ssn); | ||
374 | mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID2_SN_HI, | ||
375 | ssn >> 8); | ||
376 | break; | ||
377 | case 3: | ||
378 | mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID3_SN, ssn); | ||
379 | break; | ||
380 | case 4: | ||
381 | mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID4_SN, ssn); | ||
382 | break; | ||
383 | case 5: | ||
384 | mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID5_SN_LO, | ||
385 | ssn); | ||
386 | mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID5_SN_HI, | ||
387 | ssn >> 4); | ||
388 | break; | ||
389 | case 6: | ||
390 | mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID6_SN, ssn); | ||
391 | break; | ||
392 | case 7: | ||
393 | mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID7_SN, ssn); | ||
394 | break; | ||
395 | } | ||
396 | mt7603_wtbl_update(dev, wcid, MT_WTBL_UPDATE_WTBL2); | ||
397 | mt7603_mac_start(dev); | ||
398 | 361 | ||
399 | for (i = 7; i > 0; i--) { | 362 | for (i = 7; i > 0; i--) { |
400 | if (ba_size >= MT_AGG_SIZE_LIMIT(i)) | 363 | if (ba_size >= MT_AGG_SIZE_LIMIT(i)) |
@@ -827,6 +790,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, | |||
827 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 790 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
828 | struct ieee80211_tx_rate *rate = &info->control.rates[0]; | 791 | struct ieee80211_tx_rate *rate = &info->control.rates[0]; |
829 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 792 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
793 | struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; | ||
830 | struct ieee80211_vif *vif = info->control.vif; | 794 | struct ieee80211_vif *vif = info->control.vif; |
831 | struct mt7603_vif *mvif; | 795 | struct mt7603_vif *mvif; |
832 | int wlan_idx; | 796 | int wlan_idx; |
@@ -834,6 +798,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, | |||
834 | int tx_count = 8; | 798 | int tx_count = 8; |
835 | u8 frame_type, frame_subtype; | 799 | u8 frame_type, frame_subtype; |
836 | u16 fc = le16_to_cpu(hdr->frame_control); | 800 | u16 fc = le16_to_cpu(hdr->frame_control); |
801 | u16 seqno = 0; | ||
837 | u8 vif_idx = 0; | 802 | u8 vif_idx = 0; |
838 | u32 val; | 803 | u32 val; |
839 | u8 bw; | 804 | u8 bw; |
@@ -919,7 +884,17 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, | |||
919 | tx_count = 0x1f; | 884 | tx_count = 0x1f; |
920 | 885 | ||
921 | val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) | | 886 | val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) | |
922 | FIELD_PREP(MT_TXD3_SEQ, le16_to_cpu(hdr->seq_ctrl)); | 887 | MT_TXD3_SN_VALID; |
888 | |||
889 | if (ieee80211_is_data_qos(hdr->frame_control)) | ||
890 | seqno = le16_to_cpu(hdr->seq_ctrl); | ||
891 | else if (ieee80211_is_back_req(hdr->frame_control)) | ||
892 | seqno = le16_to_cpu(bar->start_seq_num); | ||
893 | else | ||
894 | val &= ~MT_TXD3_SN_VALID; | ||
895 | |||
896 | val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4); | ||
897 | |||
923 | txwi[3] = cpu_to_le32(val); | 898 | txwi[3] = cpu_to_le32(val); |
924 | 899 | ||
925 | if (key) { | 900 | if (key) { |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index cc0fe0933b2d..a3c4ef198bfe 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c | |||
@@ -372,7 +372,7 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) | |||
372 | struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; | 372 | struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; |
373 | struct sk_buff_head list; | 373 | struct sk_buff_head list; |
374 | 374 | ||
375 | mt76_stop_tx_queues(&dev->mt76, sta, false); | 375 | mt76_stop_tx_queues(&dev->mt76, sta, true); |
376 | mt7603_wtbl_set_ps(dev, msta, ps); | 376 | mt7603_wtbl_set_ps(dev, msta, ps); |
377 | if (ps) | 377 | if (ps) |
378 | return; | 378 | return; |
@@ -584,13 +584,13 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | |||
584 | case IEEE80211_AMPDU_TX_OPERATIONAL: | 584 | case IEEE80211_AMPDU_TX_OPERATIONAL: |
585 | mtxq->aggr = true; | 585 | mtxq->aggr = true; |
586 | mtxq->send_bar = false; | 586 | mtxq->send_bar = false; |
587 | mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, ba_size); | 587 | mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, ba_size); |
588 | break; | 588 | break; |
589 | case IEEE80211_AMPDU_TX_STOP_FLUSH: | 589 | case IEEE80211_AMPDU_TX_STOP_FLUSH: |
590 | case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: | 590 | case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: |
591 | mtxq->aggr = false; | 591 | mtxq->aggr = false; |
592 | ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); | 592 | ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); |
593 | mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1); | 593 | mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1); |
594 | break; | 594 | break; |
595 | case IEEE80211_AMPDU_TX_START: | 595 | case IEEE80211_AMPDU_TX_START: |
596 | mtxq->agg_ssn = *ssn << 4; | 596 | mtxq->agg_ssn = *ssn << 4; |
@@ -598,7 +598,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | |||
598 | break; | 598 | break; |
599 | case IEEE80211_AMPDU_TX_STOP_CONT: | 599 | case IEEE80211_AMPDU_TX_STOP_CONT: |
600 | mtxq->aggr = false; | 600 | mtxq->aggr = false; |
601 | mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1); | 601 | mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1); |
602 | ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); | 602 | ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); |
603 | break; | 603 | break; |
604 | } | 604 | } |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h index 79f332429432..6049f3b7c8fe 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h | |||
@@ -200,7 +200,7 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval); | |||
200 | int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb); | 200 | int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb); |
201 | void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data); | 201 | void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data); |
202 | void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid); | 202 | void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid); |
203 | void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn, | 203 | void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, |
204 | int ba_size); | 204 | int ba_size); |
205 | 205 | ||
206 | void mt7603_pse_client_reset(struct mt7603_dev *dev); | 206 | void mt7603_pse_client_reset(struct mt7603_dev *dev); |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c index 9ed231abe916..4fe5a83ca5a4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c | |||
@@ -466,7 +466,6 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, | |||
466 | return; | 466 | return; |
467 | 467 | ||
468 | rcu_read_lock(); | 468 | rcu_read_lock(); |
469 | mt76_tx_status_lock(mdev, &list); | ||
470 | 469 | ||
471 | if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid)) | 470 | if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid)) |
472 | wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]); | 471 | wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]); |
@@ -479,6 +478,8 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, | |||
479 | drv_priv); | 478 | drv_priv); |
480 | } | 479 | } |
481 | 480 | ||
481 | mt76_tx_status_lock(mdev, &list); | ||
482 | |||
482 | if (wcid) { | 483 | if (wcid) { |
483 | if (stat->pktid >= MT_PACKET_ID_FIRST) | 484 | if (stat->pktid >= MT_PACKET_ID_FIRST) |
484 | status.skb = mt76_tx_status_skb_get(mdev, wcid, | 485 | status.skb = mt76_tx_status_skb_get(mdev, wcid, |
@@ -498,7 +499,9 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, | |||
498 | if (*update == 0 && stat_val == stat_cache && | 499 | if (*update == 0 && stat_val == stat_cache && |
499 | stat->wcid == msta->status.wcid && msta->n_frames < 32) { | 500 | stat->wcid == msta->status.wcid && msta->n_frames < 32) { |
500 | msta->n_frames++; | 501 | msta->n_frames++; |
501 | goto out; | 502 | mt76_tx_status_unlock(mdev, &list); |
503 | rcu_read_unlock(); | ||
504 | return; | ||
502 | } | 505 | } |
503 | 506 | ||
504 | mt76x02_mac_fill_tx_status(dev, status.info, &msta->status, | 507 | mt76x02_mac_fill_tx_status(dev, status.info, &msta->status, |
@@ -514,11 +517,10 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, | |||
514 | 517 | ||
515 | if (status.skb) | 518 | if (status.skb) |
516 | mt76_tx_status_skb_done(mdev, status.skb, &list); | 519 | mt76_tx_status_skb_done(mdev, status.skb, &list); |
517 | else | ||
518 | ieee80211_tx_status_ext(mt76_hw(dev), &status); | ||
519 | |||
520 | out: | ||
521 | mt76_tx_status_unlock(mdev, &list); | 520 | mt76_tx_status_unlock(mdev, &list); |
521 | |||
522 | if (!status.skb) | ||
523 | ieee80211_tx_status_ext(mt76_hw(dev), &status); | ||
522 | rcu_read_unlock(); | 524 | rcu_read_unlock(); |
523 | } | 525 | } |
524 | 526 | ||
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 4b1744e9fb78..50b92ca92bd7 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h | |||
@@ -673,7 +673,6 @@ enum rt2x00_state_flags { | |||
673 | CONFIG_CHANNEL_HT40, | 673 | CONFIG_CHANNEL_HT40, |
674 | CONFIG_POWERSAVING, | 674 | CONFIG_POWERSAVING, |
675 | CONFIG_HT_DISABLED, | 675 | CONFIG_HT_DISABLED, |
676 | CONFIG_QOS_DISABLED, | ||
677 | CONFIG_MONITORING, | 676 | CONFIG_MONITORING, |
678 | 677 | ||
679 | /* | 678 | /* |
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index 2825560e2424..e8462f25d252 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c | |||
@@ -642,19 +642,9 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, | |||
642 | rt2x00dev->intf_associated--; | 642 | rt2x00dev->intf_associated--; |
643 | 643 | ||
644 | rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated); | 644 | rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated); |
645 | |||
646 | clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags); | ||
647 | } | 645 | } |
648 | 646 | ||
649 | /* | 647 | /* |
650 | * Check for access point which do not support 802.11e . We have to | ||
651 | * generate data frames sequence number in S/W for such AP, because | ||
652 | * of H/W bug. | ||
653 | */ | ||
654 | if (changes & BSS_CHANGED_QOS && !bss_conf->qos) | ||
655 | set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags); | ||
656 | |||
657 | /* | ||
658 | * When the erp information has changed, we should perform | 648 | * When the erp information has changed, we should perform |
659 | * additional configuration steps. For all other changes we are done. | 649 | * additional configuration steps. For all other changes we are done. |
660 | */ | 650 | */ |
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index 92ddc19e7bf7..4834b4eb0206 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c | |||
@@ -201,15 +201,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, | |||
201 | if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) { | 201 | if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) { |
202 | /* | 202 | /* |
203 | * rt2800 has a H/W (or F/W) bug, device incorrectly increase | 203 | * rt2800 has a H/W (or F/W) bug, device incorrectly increase |
204 | * seqno on retransmited data (non-QOS) frames. To workaround | 204 | * seqno on retransmitted data (non-QOS) and management frames. |
205 | * the problem let's generate seqno in software if QOS is | 205 | * To workaround the problem let's generate seqno in software. |
206 | * disabled. | 206 | * Except for beacons which are transmitted periodically by H/W |
207 | * hence hardware has to assign seqno for them. | ||
207 | */ | 208 | */ |
208 | if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags)) | 209 | if (ieee80211_is_beacon(hdr->frame_control)) { |
209 | __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); | 210 | __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); |
210 | else | ||
211 | /* H/W will generate sequence number */ | 211 | /* H/W will generate sequence number */ |
212 | return; | 212 | return; |
213 | } | ||
214 | |||
215 | __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); | ||
213 | } | 216 | } |
214 | 217 | ||
215 | /* | 218 | /* |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 5ace833de746..351af88231ad 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -911,8 +911,12 @@ static int vhost_new_umem_range(struct vhost_umem *umem, | |||
911 | u64 start, u64 size, u64 end, | 911 | u64 start, u64 size, u64 end, |
912 | u64 userspace_addr, int perm) | 912 | u64 userspace_addr, int perm) |
913 | { | 913 | { |
914 | struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC); | 914 | struct vhost_umem_node *tmp, *node; |
915 | 915 | ||
916 | if (!size) | ||
917 | return -EFAULT; | ||
918 | |||
919 | node = kmalloc(sizeof(*node), GFP_ATOMIC); | ||
916 | if (!node) | 920 | if (!node) |
917 | return -ENOMEM; | 921 | return -ENOMEM; |
918 | 922 | ||
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 2c588f9bbbda..c14001b42d20 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c | |||
@@ -610,6 +610,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call, | |||
610 | bool stalled = false; | 610 | bool stalled = false; |
611 | u64 rtt; | 611 | u64 rtt; |
612 | u32 life, last_life; | 612 | u32 life, last_life; |
613 | bool rxrpc_complete = false; | ||
613 | 614 | ||
614 | DECLARE_WAITQUEUE(myself, current); | 615 | DECLARE_WAITQUEUE(myself, current); |
615 | 616 | ||
@@ -621,7 +622,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call, | |||
621 | rtt2 = 2; | 622 | rtt2 = 2; |
622 | 623 | ||
623 | timeout = rtt2; | 624 | timeout = rtt2; |
624 | last_life = rxrpc_kernel_check_life(call->net->socket, call->rxcall); | 625 | rxrpc_kernel_check_life(call->net->socket, call->rxcall, &last_life); |
625 | 626 | ||
626 | add_wait_queue(&call->waitq, &myself); | 627 | add_wait_queue(&call->waitq, &myself); |
627 | for (;;) { | 628 | for (;;) { |
@@ -639,7 +640,12 @@ static long afs_wait_for_call_to_complete(struct afs_call *call, | |||
639 | if (afs_check_call_state(call, AFS_CALL_COMPLETE)) | 640 | if (afs_check_call_state(call, AFS_CALL_COMPLETE)) |
640 | break; | 641 | break; |
641 | 642 | ||
642 | life = rxrpc_kernel_check_life(call->net->socket, call->rxcall); | 643 | if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall, &life)) { |
644 | /* rxrpc terminated the call. */ | ||
645 | rxrpc_complete = true; | ||
646 | break; | ||
647 | } | ||
648 | |||
643 | if (timeout == 0 && | 649 | if (timeout == 0 && |
644 | life == last_life && signal_pending(current)) { | 650 | life == last_life && signal_pending(current)) { |
645 | if (stalled) | 651 | if (stalled) |
@@ -663,12 +669,16 @@ static long afs_wait_for_call_to_complete(struct afs_call *call, | |||
663 | remove_wait_queue(&call->waitq, &myself); | 669 | remove_wait_queue(&call->waitq, &myself); |
664 | __set_current_state(TASK_RUNNING); | 670 | __set_current_state(TASK_RUNNING); |
665 | 671 | ||
666 | /* Kill off the call if it's still live. */ | ||
667 | if (!afs_check_call_state(call, AFS_CALL_COMPLETE)) { | 672 | if (!afs_check_call_state(call, AFS_CALL_COMPLETE)) { |
668 | _debug("call interrupted"); | 673 | if (rxrpc_complete) { |
669 | if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall, | 674 | afs_set_call_complete(call, call->error, call->abort_code); |
670 | RX_USER_ABORT, -EINTR, "KWI")) | 675 | } else { |
671 | afs_set_call_complete(call, -EINTR, 0); | 676 | /* Kill off the call if it's still live. */ |
677 | _debug("call interrupted"); | ||
678 | if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall, | ||
679 | RX_USER_ABORT, -EINTR, "KWI")) | ||
680 | afs_set_call_complete(call, -EINTR, 0); | ||
681 | } | ||
672 | } | 682 | } |
673 | 683 | ||
674 | spin_lock_bh(&call->state_lock); | 684 | spin_lock_bh(&call->state_lock); |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 26f69cf763f4..324e872c91d1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -1500,6 +1500,7 @@ struct net_device_ops { | |||
1500 | * @IFF_FAILOVER: device is a failover master device | 1500 | * @IFF_FAILOVER: device is a failover master device |
1501 | * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device | 1501 | * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device |
1502 | * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device | 1502 | * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device |
1503 | * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running | ||
1503 | */ | 1504 | */ |
1504 | enum netdev_priv_flags { | 1505 | enum netdev_priv_flags { |
1505 | IFF_802_1Q_VLAN = 1<<0, | 1506 | IFF_802_1Q_VLAN = 1<<0, |
@@ -1532,6 +1533,7 @@ enum netdev_priv_flags { | |||
1532 | IFF_FAILOVER = 1<<27, | 1533 | IFF_FAILOVER = 1<<27, |
1533 | IFF_FAILOVER_SLAVE = 1<<28, | 1534 | IFF_FAILOVER_SLAVE = 1<<28, |
1534 | IFF_L3MDEV_RX_HANDLER = 1<<29, | 1535 | IFF_L3MDEV_RX_HANDLER = 1<<29, |
1536 | IFF_LIVE_RENAME_OK = 1<<30, | ||
1535 | }; | 1537 | }; |
1536 | 1538 | ||
1537 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN | 1539 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN |
@@ -1563,6 +1565,7 @@ enum netdev_priv_flags { | |||
1563 | #define IFF_FAILOVER IFF_FAILOVER | 1565 | #define IFF_FAILOVER IFF_FAILOVER |
1564 | #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE | 1566 | #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE |
1565 | #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER | 1567 | #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER |
1568 | #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK | ||
1566 | 1569 | ||
1567 | /** | 1570 | /** |
1568 | * struct net_device - The DEVICE structure. | 1571 | * struct net_device - The DEVICE structure. |
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 2bfb87eb98ce..78c856cba4f5 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h | |||
@@ -61,10 +61,12 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, | |||
61 | rxrpc_user_attach_call_t, unsigned long, gfp_t, | 61 | rxrpc_user_attach_call_t, unsigned long, gfp_t, |
62 | unsigned int); | 62 | unsigned int); |
63 | void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64); | 63 | void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64); |
64 | u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *); | 64 | bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *, |
65 | u32 *); | ||
65 | void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *); | 66 | void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *); |
66 | u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); | 67 | u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); |
67 | bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *, | 68 | bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *, |
68 | ktime_t *); | 69 | ktime_t *); |
70 | bool rxrpc_kernel_call_is_complete(struct rxrpc_call *); | ||
69 | 71 | ||
70 | #endif /* _NET_RXRPC_H */ | 72 | #endif /* _NET_RXRPC_H */ |
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index bb307a11ee63..13bfeb712d36 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h | |||
@@ -7183,6 +7183,11 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev, | |||
7183 | #define wiphy_info(wiphy, format, args...) \ | 7183 | #define wiphy_info(wiphy, format, args...) \ |
7184 | dev_info(&(wiphy)->dev, format, ##args) | 7184 | dev_info(&(wiphy)->dev, format, ##args) |
7185 | 7185 | ||
7186 | #define wiphy_err_ratelimited(wiphy, format, args...) \ | ||
7187 | dev_err_ratelimited(&(wiphy)->dev, format, ##args) | ||
7188 | #define wiphy_warn_ratelimited(wiphy, format, args...) \ | ||
7189 | dev_warn_ratelimited(&(wiphy)->dev, format, ##args) | ||
7190 | |||
7186 | #define wiphy_debug(wiphy, format, args...) \ | 7191 | #define wiphy_debug(wiphy, format, args...) \ |
7187 | wiphy_printk(KERN_DEBUG, wiphy, format, ##args) | 7192 | wiphy_printk(KERN_DEBUG, wiphy, format, ##args) |
7188 | 7193 | ||
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index ac2ed8ec662b..112dc18c658f 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h | |||
@@ -6231,8 +6231,6 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, | |||
6231 | * @hw: pointer as obtained from ieee80211_alloc_hw() | 6231 | * @hw: pointer as obtained from ieee80211_alloc_hw() |
6232 | * @ac: AC number to return packets from. | 6232 | * @ac: AC number to return packets from. |
6233 | * | 6233 | * |
6234 | * Should only be called between calls to ieee80211_txq_schedule_start() | ||
6235 | * and ieee80211_txq_schedule_end(). | ||
6236 | * Returns the next txq if successful, %NULL if no queue is eligible. If a txq | 6234 | * Returns the next txq if successful, %NULL if no queue is eligible. If a txq |
6237 | * is returned, it should be returned with ieee80211_return_txq() after the | 6235 | * is returned, it should be returned with ieee80211_return_txq() after the |
6238 | * driver has finished scheduling it. | 6236 | * driver has finished scheduling it. |
@@ -6240,51 +6238,58 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, | |||
6240 | struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac); | 6238 | struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac); |
6241 | 6239 | ||
6242 | /** | 6240 | /** |
6243 | * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq() | 6241 | * ieee80211_txq_schedule_start - start new scheduling round for TXQs |
6244 | * | ||
6245 | * @hw: pointer as obtained from ieee80211_alloc_hw() | ||
6246 | * @txq: pointer obtained from station or virtual interface | ||
6247 | * | ||
6248 | * Should only be called between calls to ieee80211_txq_schedule_start() | ||
6249 | * and ieee80211_txq_schedule_end(). | ||
6250 | */ | ||
6251 | void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq); | ||
6252 | |||
6253 | /** | ||
6254 | * ieee80211_txq_schedule_start - acquire locks for safe scheduling of an AC | ||
6255 | * | 6242 | * |
6256 | * @hw: pointer as obtained from ieee80211_alloc_hw() | 6243 | * @hw: pointer as obtained from ieee80211_alloc_hw() |
6257 | * @ac: AC number to acquire locks for | 6244 | * @ac: AC number to acquire locks for |
6258 | * | 6245 | * |
6259 | * Acquire locks needed to schedule TXQs from the given AC. Should be called | 6246 | * Should be called before ieee80211_next_txq() or ieee80211_return_txq(). |
6260 | * before ieee80211_next_txq() or ieee80211_return_txq(). | 6247 | * The driver must not call multiple TXQ scheduling rounds concurrently. |
6261 | */ | 6248 | */ |
6262 | void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac) | 6249 | void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac); |
6263 | __acquires(txq_lock); | 6250 | |
6251 | /* (deprecated) */ | ||
6252 | static inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac) | ||
6253 | { | ||
6254 | } | ||
6255 | |||
6256 | void __ieee80211_schedule_txq(struct ieee80211_hw *hw, | ||
6257 | struct ieee80211_txq *txq, bool force); | ||
6264 | 6258 | ||
6265 | /** | 6259 | /** |
6266 | * ieee80211_txq_schedule_end - release locks for safe scheduling of an AC | 6260 | * ieee80211_schedule_txq - schedule a TXQ for transmission |
6267 | * | 6261 | * |
6268 | * @hw: pointer as obtained from ieee80211_alloc_hw() | 6262 | * @hw: pointer as obtained from ieee80211_alloc_hw() |
6269 | * @ac: AC number to acquire locks for | 6263 | * @txq: pointer obtained from station or virtual interface |
6270 | * | 6264 | * |
6271 | * Release locks previously acquired by ieee80211_txq_schedule_end(). | 6265 | * Schedules a TXQ for transmission if it is not already scheduled, |
6266 | * even if mac80211 does not have any packets buffered. | ||
6267 | * | ||
6268 | * The driver may call this function if it has buffered packets for | ||
6269 | * this TXQ internally. | ||
6272 | */ | 6270 | */ |
6273 | void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac) | 6271 | static inline void |
6274 | __releases(txq_lock); | 6272 | ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq) |
6273 | { | ||
6274 | __ieee80211_schedule_txq(hw, txq, true); | ||
6275 | } | ||
6275 | 6276 | ||
6276 | /** | 6277 | /** |
6277 | * ieee80211_schedule_txq - schedule a TXQ for transmission | 6278 | * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq() |
6278 | * | 6279 | * |
6279 | * @hw: pointer as obtained from ieee80211_alloc_hw() | 6280 | * @hw: pointer as obtained from ieee80211_alloc_hw() |
6280 | * @txq: pointer obtained from station or virtual interface | 6281 | * @txq: pointer obtained from station or virtual interface |
6282 | * @force: schedule txq even if mac80211 does not have any buffered packets. | ||
6281 | * | 6283 | * |
6282 | * Schedules a TXQ for transmission if it is not already scheduled. Takes a | 6284 | * The driver may set force=true if it has buffered packets for this TXQ |
6283 | * lock, which means it must *not* be called between | 6285 | * internally. |
6284 | * ieee80211_txq_schedule_start() and ieee80211_txq_schedule_end() | ||
6285 | */ | 6286 | */ |
6286 | void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq) | 6287 | static inline void |
6287 | __acquires(txq_lock) __releases(txq_lock); | 6288 | ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq, |
6289 | bool force) | ||
6290 | { | ||
6291 | __ieee80211_schedule_txq(hw, txq, force); | ||
6292 | } | ||
6288 | 6293 | ||
6289 | /** | 6294 | /** |
6290 | * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit | 6295 | * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit |
diff --git a/include/net/netrom.h b/include/net/netrom.h index 5a0714ff500f..80f15b1c1a48 100644 --- a/include/net/netrom.h +++ b/include/net/netrom.h | |||
@@ -266,7 +266,7 @@ void nr_stop_idletimer(struct sock *); | |||
266 | int nr_t1timer_running(struct sock *); | 266 | int nr_t1timer_running(struct sock *); |
267 | 267 | ||
268 | /* sysctl_net_netrom.c */ | 268 | /* sysctl_net_netrom.c */ |
269 | void nr_register_sysctl(void); | 269 | int nr_register_sysctl(void); |
270 | void nr_unregister_sysctl(void); | 270 | void nr_unregister_sysctl(void); |
271 | 271 | ||
272 | #endif | 272 | #endif |
diff --git a/include/net/sock.h b/include/net/sock.h index 8de5ee258b93..341f8bafa0cf 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -2084,12 +2084,6 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq) | |||
2084 | * @p: poll_table | 2084 | * @p: poll_table |
2085 | * | 2085 | * |
2086 | * See the comments in the wq_has_sleeper function. | 2086 | * See the comments in the wq_has_sleeper function. |
2087 | * | ||
2088 | * Do not derive sock from filp->private_data here. An SMC socket establishes | ||
2089 | * an internal TCP socket that is used in the fallback case. All socket | ||
2090 | * operations on the SMC socket are then forwarded to the TCP socket. In case of | ||
2091 | * poll, the filp->private_data pointer references the SMC socket because the | ||
2092 | * TCP socket has no file assigned. | ||
2093 | */ | 2087 | */ |
2094 | static inline void sock_poll_wait(struct file *filp, struct socket *sock, | 2088 | static inline void sock_poll_wait(struct file *filp, struct socket *sock, |
2095 | poll_table *p) | 2089 | poll_table *p) |
diff --git a/include/net/tls.h b/include/net/tls.h index a5a938583295..5934246b2c6f 100644 --- a/include/net/tls.h +++ b/include/net/tls.h | |||
@@ -307,6 +307,7 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); | |||
307 | int tls_device_sendpage(struct sock *sk, struct page *page, | 307 | int tls_device_sendpage(struct sock *sk, struct page *page, |
308 | int offset, size_t size, int flags); | 308 | int offset, size_t size, int flags); |
309 | void tls_device_sk_destruct(struct sock *sk); | 309 | void tls_device_sk_destruct(struct sock *sk); |
310 | void tls_device_free_resources_tx(struct sock *sk); | ||
310 | void tls_device_init(void); | 311 | void tls_device_init(void); |
311 | void tls_device_cleanup(void); | 312 | void tls_device_cleanup(void); |
312 | int tls_tx_records(struct sock *sk, int flags); | 313 | int tls_tx_records(struct sock *sk, int flags); |
@@ -330,6 +331,7 @@ int tls_push_sg(struct sock *sk, struct tls_context *ctx, | |||
330 | int flags); | 331 | int flags); |
331 | int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, | 332 | int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, |
332 | int flags); | 333 | int flags); |
334 | bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx); | ||
333 | 335 | ||
334 | static inline struct tls_msg *tls_msg(struct sk_buff *skb) | 336 | static inline struct tls_msg *tls_msg(struct sk_buff *skb) |
335 | { | 337 | { |
@@ -379,7 +381,7 @@ tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, | |||
379 | static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) | 381 | static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) |
380 | { | 382 | { |
381 | #ifdef CONFIG_SOCK_VALIDATE_XMIT | 383 | #ifdef CONFIG_SOCK_VALIDATE_XMIT |
382 | return sk_fullsock(sk) & | 384 | return sk_fullsock(sk) && |
383 | (smp_load_acquire(&sk->sk_validate_xmit_skb) == | 385 | (smp_load_acquire(&sk->sk_validate_xmit_skb) == |
384 | &tls_validate_xmit_skb); | 386 | &tls_validate_xmit_skb); |
385 | #else | 387 | #else |
diff --git a/net/atm/lec.c b/net/atm/lec.c index d7f5cf5b7594..ad4f829193f0 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
@@ -710,7 +710,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg) | |||
710 | 710 | ||
711 | static int lec_mcast_attach(struct atm_vcc *vcc, int arg) | 711 | static int lec_mcast_attach(struct atm_vcc *vcc, int arg) |
712 | { | 712 | { |
713 | if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg]) | 713 | if (arg < 0 || arg >= MAX_LEC_ITF) |
714 | return -EINVAL; | ||
715 | arg = array_index_nospec(arg, MAX_LEC_ITF); | ||
716 | if (!dev_lec[arg]) | ||
714 | return -EINVAL; | 717 | return -EINVAL; |
715 | vcc->proto_data = dev_lec[arg]; | 718 | vcc->proto_data = dev_lec[arg]; |
716 | return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc); | 719 | return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc); |
@@ -728,6 +731,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg) | |||
728 | i = arg; | 731 | i = arg; |
729 | if (arg >= MAX_LEC_ITF) | 732 | if (arg >= MAX_LEC_ITF) |
730 | return -EINVAL; | 733 | return -EINVAL; |
734 | i = array_index_nospec(arg, MAX_LEC_ITF); | ||
731 | if (!dev_lec[i]) { | 735 | if (!dev_lec[i]) { |
732 | int size; | 736 | int size; |
733 | 737 | ||
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 9a580999ca57..d892b7c3cc42 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -523,12 +523,12 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, | |||
523 | struct sock *sk = sock->sk; | 523 | struct sock *sk = sock->sk; |
524 | int err = 0; | 524 | int err = 0; |
525 | 525 | ||
526 | BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr); | ||
527 | |||
528 | if (!addr || addr_len < sizeof(struct sockaddr_sco) || | 526 | if (!addr || addr_len < sizeof(struct sockaddr_sco) || |
529 | addr->sa_family != AF_BLUETOOTH) | 527 | addr->sa_family != AF_BLUETOOTH) |
530 | return -EINVAL; | 528 | return -EINVAL; |
531 | 529 | ||
530 | BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr); | ||
531 | |||
532 | lock_sock(sk); | 532 | lock_sock(sk); |
533 | 533 | ||
534 | if (sk->sk_state != BT_OPEN) { | 534 | if (sk->sk_state != BT_OPEN) { |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 5ea7e56119c1..ba303ee99b9b 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -197,13 +197,10 @@ static void __br_handle_local_finish(struct sk_buff *skb) | |||
197 | /* note: already called with rcu_read_lock */ | 197 | /* note: already called with rcu_read_lock */ |
198 | static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb) | 198 | static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb) |
199 | { | 199 | { |
200 | struct net_bridge_port *p = br_port_get_rcu(skb->dev); | ||
201 | |||
202 | __br_handle_local_finish(skb); | 200 | __br_handle_local_finish(skb); |
203 | 201 | ||
204 | BR_INPUT_SKB_CB(skb)->brdev = p->br->dev; | 202 | /* return 1 to signal the okfn() was called so it's ok to use the skb */ |
205 | br_pass_frame_up(skb); | 203 | return 1; |
206 | return 0; | ||
207 | } | 204 | } |
208 | 205 | ||
209 | /* | 206 | /* |
@@ -280,10 +277,18 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb) | |||
280 | goto forward; | 277 | goto forward; |
281 | } | 278 | } |
282 | 279 | ||
283 | /* Deliver packet to local host only */ | 280 | /* The else clause should be hit when nf_hook(): |
284 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev), | 281 | * - returns < 0 (drop/error) |
285 | NULL, skb, skb->dev, NULL, br_handle_local_finish); | 282 | * - returns = 0 (stolen/nf_queue) |
286 | return RX_HANDLER_CONSUMED; | 283 | * Thus return 1 from the okfn() to signal the skb is ok to pass |
284 | */ | ||
285 | if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, | ||
286 | dev_net(skb->dev), NULL, skb, skb->dev, NULL, | ||
287 | br_handle_local_finish) == 1) { | ||
288 | return RX_HANDLER_PASS; | ||
289 | } else { | ||
290 | return RX_HANDLER_CONSUMED; | ||
291 | } | ||
287 | } | 292 | } |
288 | 293 | ||
289 | forward: | 294 | forward: |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 02da21d771c9..45e7f4173bba 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -2031,7 +2031,8 @@ static void br_multicast_start_querier(struct net_bridge *br, | |||
2031 | 2031 | ||
2032 | __br_multicast_open(br, query); | 2032 | __br_multicast_open(br, query); |
2033 | 2033 | ||
2034 | list_for_each_entry(port, &br->port_list, list) { | 2034 | rcu_read_lock(); |
2035 | list_for_each_entry_rcu(port, &br->port_list, list) { | ||
2035 | if (port->state == BR_STATE_DISABLED || | 2036 | if (port->state == BR_STATE_DISABLED || |
2036 | port->state == BR_STATE_BLOCKING) | 2037 | port->state == BR_STATE_BLOCKING) |
2037 | continue; | 2038 | continue; |
@@ -2043,6 +2044,7 @@ static void br_multicast_start_querier(struct net_bridge *br, | |||
2043 | br_multicast_enable(&port->ip6_own_query); | 2044 | br_multicast_enable(&port->ip6_own_query); |
2044 | #endif | 2045 | #endif |
2045 | } | 2046 | } |
2047 | rcu_read_unlock(); | ||
2046 | } | 2048 | } |
2047 | 2049 | ||
2048 | int br_multicast_toggle(struct net_bridge *br, unsigned long val) | 2050 | int br_multicast_toggle(struct net_bridge *br, unsigned long val) |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 9c07591b0232..7104cf13da84 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -1441,7 +1441,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) | |||
1441 | nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, | 1441 | nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, |
1442 | br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) || | 1442 | br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) || |
1443 | nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT, | 1443 | nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT, |
1444 | br_opt_get(br, IFLA_BR_VLAN_STATS_PER_PORT))) | 1444 | br_opt_get(br, BROPT_VLAN_STATS_PER_PORT))) |
1445 | return -EMSGSIZE; | 1445 | return -EMSGSIZE; |
1446 | #endif | 1446 | #endif |
1447 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | 1447 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING |
diff --git a/net/core/dev.c b/net/core/dev.c index fdcff29df915..f409406254dd 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1184,7 +1184,21 @@ int dev_change_name(struct net_device *dev, const char *newname) | |||
1184 | BUG_ON(!dev_net(dev)); | 1184 | BUG_ON(!dev_net(dev)); |
1185 | 1185 | ||
1186 | net = dev_net(dev); | 1186 | net = dev_net(dev); |
1187 | if (dev->flags & IFF_UP) | 1187 | |
1188 | /* Some auto-enslaved devices e.g. failover slaves are | ||
1189 | * special, as userspace might rename the device after | ||
1190 | * the interface had been brought up and running since | ||
1191 | * the point kernel initiated auto-enslavement. Allow | ||
1192 | * live name change even when these slave devices are | ||
1193 | * up and running. | ||
1194 | * | ||
1195 | * Typically, users of these auto-enslaving devices | ||
1196 | * don't actually care about slave name change, as | ||
1197 | * they are supposed to operate on master interface | ||
1198 | * directly. | ||
1199 | */ | ||
1200 | if (dev->flags & IFF_UP && | ||
1201 | likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK))) | ||
1188 | return -EBUSY; | 1202 | return -EBUSY; |
1189 | 1203 | ||
1190 | write_seqcount_begin(&devnet_rename_seq); | 1204 | write_seqcount_begin(&devnet_rename_seq); |
diff --git a/net/core/failover.c b/net/core/failover.c index 4a92a98ccce9..b5cd3c727285 100644 --- a/net/core/failover.c +++ b/net/core/failover.c | |||
@@ -80,14 +80,14 @@ static int failover_slave_register(struct net_device *slave_dev) | |||
80 | goto err_upper_link; | 80 | goto err_upper_link; |
81 | } | 81 | } |
82 | 82 | ||
83 | slave_dev->priv_flags |= IFF_FAILOVER_SLAVE; | 83 | slave_dev->priv_flags |= (IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK); |
84 | 84 | ||
85 | if (fops && fops->slave_register && | 85 | if (fops && fops->slave_register && |
86 | !fops->slave_register(slave_dev, failover_dev)) | 86 | !fops->slave_register(slave_dev, failover_dev)) |
87 | return NOTIFY_OK; | 87 | return NOTIFY_OK; |
88 | 88 | ||
89 | netdev_upper_dev_unlink(slave_dev, failover_dev); | 89 | netdev_upper_dev_unlink(slave_dev, failover_dev); |
90 | slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE; | 90 | slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK); |
91 | err_upper_link: | 91 | err_upper_link: |
92 | netdev_rx_handler_unregister(slave_dev); | 92 | netdev_rx_handler_unregister(slave_dev); |
93 | done: | 93 | done: |
@@ -121,7 +121,7 @@ int failover_slave_unregister(struct net_device *slave_dev) | |||
121 | 121 | ||
122 | netdev_rx_handler_unregister(slave_dev); | 122 | netdev_rx_handler_unregister(slave_dev); |
123 | netdev_upper_dev_unlink(slave_dev, failover_dev); | 123 | netdev_upper_dev_unlink(slave_dev, failover_dev); |
124 | slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE; | 124 | slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK); |
125 | 125 | ||
126 | if (fops && fops->slave_unregister && | 126 | if (fops && fops->slave_unregister && |
127 | !fops->slave_unregister(slave_dev, failover_dev)) | 127 | !fops->slave_unregister(slave_dev, failover_dev)) |
diff --git a/net/core/filter.c b/net/core/filter.c index fc92ebc4e200..27e61ffd9039 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -4383,6 +4383,8 @@ BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr, | |||
4383 | * Only binding to IP is supported. | 4383 | * Only binding to IP is supported. |
4384 | */ | 4384 | */ |
4385 | err = -EINVAL; | 4385 | err = -EINVAL; |
4386 | if (addr_len < offsetofend(struct sockaddr, sa_family)) | ||
4387 | return err; | ||
4386 | if (addr->sa_family == AF_INET) { | 4388 | if (addr->sa_family == AF_INET) { |
4387 | if (addr_len < sizeof(struct sockaddr_in)) | 4389 | if (addr_len < sizeof(struct sockaddr_in)) |
4388 | return err; | 4390 | return err; |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index f8f94303a1f5..8f8b7b6c2945 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -1747,20 +1747,16 @@ int netdev_register_kobject(struct net_device *ndev) | |||
1747 | 1747 | ||
1748 | error = device_add(dev); | 1748 | error = device_add(dev); |
1749 | if (error) | 1749 | if (error) |
1750 | goto error_put_device; | 1750 | return error; |
1751 | 1751 | ||
1752 | error = register_queue_kobjects(ndev); | 1752 | error = register_queue_kobjects(ndev); |
1753 | if (error) | 1753 | if (error) { |
1754 | goto error_device_del; | 1754 | device_del(dev); |
1755 | return error; | ||
1756 | } | ||
1755 | 1757 | ||
1756 | pm_runtime_set_memalloc_noio(dev, true); | 1758 | pm_runtime_set_memalloc_noio(dev, true); |
1757 | 1759 | ||
1758 | return 0; | ||
1759 | |||
1760 | error_device_del: | ||
1761 | device_del(dev); | ||
1762 | error_put_device: | ||
1763 | put_device(dev); | ||
1764 | return error; | 1760 | return error; |
1765 | } | 1761 | } |
1766 | 1762 | ||
diff --git a/net/core/ptp_classifier.c b/net/core/ptp_classifier.c index 703cf76aa7c2..7109c168b5e0 100644 --- a/net/core/ptp_classifier.c +++ b/net/core/ptp_classifier.c | |||
@@ -185,9 +185,10 @@ void __init ptp_classifier_init(void) | |||
185 | { 0x16, 0, 0, 0x00000000 }, | 185 | { 0x16, 0, 0, 0x00000000 }, |
186 | { 0x06, 0, 0, 0x00000000 }, | 186 | { 0x06, 0, 0, 0x00000000 }, |
187 | }; | 187 | }; |
188 | struct sock_fprog_kern ptp_prog = { | 188 | struct sock_fprog_kern ptp_prog; |
189 | .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter, | 189 | |
190 | }; | 190 | ptp_prog.len = ARRAY_SIZE(ptp_filter); |
191 | ptp_prog.filter = ptp_filter; | ||
191 | 192 | ||
192 | BUG_ON(bpf_prog_create(&ptp_insns, &ptp_prog)); | 193 | BUG_ON(bpf_prog_create(&ptp_insns, &ptp_prog)); |
193 | } | 194 | } |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a51cab95ba64..220c56e93659 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -4948,7 +4948,7 @@ static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check, | |||
4948 | { | 4948 | { |
4949 | struct if_stats_msg *ifsm; | 4949 | struct if_stats_msg *ifsm; |
4950 | 4950 | ||
4951 | if (nlh->nlmsg_len < sizeof(*ifsm)) { | 4951 | if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) { |
4952 | NL_SET_ERR_MSG(extack, "Invalid header for stats dump"); | 4952 | NL_SET_ERR_MSG(extack, "Invalid header for stats dump"); |
4953 | return -EINVAL; | 4953 | return -EINVAL; |
4954 | } | 4954 | } |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index ef2cd5712098..40796b8bf820 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -5083,7 +5083,8 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len); | |||
5083 | 5083 | ||
5084 | static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) | 5084 | static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) |
5085 | { | 5085 | { |
5086 | int mac_len; | 5086 | int mac_len, meta_len; |
5087 | void *meta; | ||
5087 | 5088 | ||
5088 | if (skb_cow(skb, skb_headroom(skb)) < 0) { | 5089 | if (skb_cow(skb, skb_headroom(skb)) < 0) { |
5089 | kfree_skb(skb); | 5090 | kfree_skb(skb); |
@@ -5095,6 +5096,13 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) | |||
5095 | memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), | 5096 | memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), |
5096 | mac_len - VLAN_HLEN - ETH_TLEN); | 5097 | mac_len - VLAN_HLEN - ETH_TLEN); |
5097 | } | 5098 | } |
5099 | |||
5100 | meta_len = skb_metadata_len(skb); | ||
5101 | if (meta_len) { | ||
5102 | meta = skb_metadata_end(skb) - meta_len; | ||
5103 | memmove(meta + VLAN_HLEN, meta, meta_len); | ||
5104 | } | ||
5105 | |||
5098 | skb->mac_header += VLAN_HLEN; | 5106 | skb->mac_header += VLAN_HLEN; |
5099 | return skb; | 5107 | return skb; |
5100 | } | 5108 | } |
diff --git a/net/core/sock.c b/net/core/sock.c index 782343bb925b..067878a1e4c5 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -348,7 +348,7 @@ static int sock_get_timeout(long timeo, void *optval, bool old_timeval) | |||
348 | tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ; | 348 | tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ; |
349 | } | 349 | } |
350 | 350 | ||
351 | if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { | 351 | if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { |
352 | struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec }; | 352 | struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec }; |
353 | *(struct old_timeval32 *)optval = tv32; | 353 | *(struct old_timeval32 *)optval = tv32; |
354 | return sizeof(tv32); | 354 | return sizeof(tv32); |
@@ -372,7 +372,7 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool | |||
372 | { | 372 | { |
373 | struct __kernel_sock_timeval tv; | 373 | struct __kernel_sock_timeval tv; |
374 | 374 | ||
375 | if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { | 375 | if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { |
376 | struct old_timeval32 tv32; | 376 | struct old_timeval32 tv32; |
377 | 377 | ||
378 | if (optlen < sizeof(tv32)) | 378 | if (optlen < sizeof(tv32)) |
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 79e98e21cdd7..12ce6c526d72 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c | |||
@@ -121,6 +121,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb) | |||
121 | struct guehdr *guehdr; | 121 | struct guehdr *guehdr; |
122 | void *data; | 122 | void *data; |
123 | u16 doffset = 0; | 123 | u16 doffset = 0; |
124 | u8 proto_ctype; | ||
124 | 125 | ||
125 | if (!fou) | 126 | if (!fou) |
126 | return 1; | 127 | return 1; |
@@ -212,13 +213,14 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb) | |||
212 | if (unlikely(guehdr->control)) | 213 | if (unlikely(guehdr->control)) |
213 | return gue_control_message(skb, guehdr); | 214 | return gue_control_message(skb, guehdr); |
214 | 215 | ||
216 | proto_ctype = guehdr->proto_ctype; | ||
215 | __skb_pull(skb, sizeof(struct udphdr) + hdrlen); | 217 | __skb_pull(skb, sizeof(struct udphdr) + hdrlen); |
216 | skb_reset_transport_header(skb); | 218 | skb_reset_transport_header(skb); |
217 | 219 | ||
218 | if (iptunnel_pull_offloads(skb)) | 220 | if (iptunnel_pull_offloads(skb)) |
219 | goto drop; | 221 | goto drop; |
220 | 222 | ||
221 | return -guehdr->proto_ctype; | 223 | return -proto_ctype; |
222 | 224 | ||
223 | drop: | 225 | drop: |
224 | kfree_skb(skb); | 226 | kfree_skb(skb); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index a5da63e5faa2..88ce038dd495 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1185,9 +1185,23 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) | |||
1185 | 1185 | ||
1186 | static void ipv4_link_failure(struct sk_buff *skb) | 1186 | static void ipv4_link_failure(struct sk_buff *skb) |
1187 | { | 1187 | { |
1188 | struct ip_options opt; | ||
1188 | struct rtable *rt; | 1189 | struct rtable *rt; |
1190 | int res; | ||
1189 | 1191 | ||
1190 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); | 1192 | /* Recompile ip options since IPCB may not be valid anymore. |
1193 | */ | ||
1194 | memset(&opt, 0, sizeof(opt)); | ||
1195 | opt.optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr); | ||
1196 | |||
1197 | rcu_read_lock(); | ||
1198 | res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL); | ||
1199 | rcu_read_unlock(); | ||
1200 | |||
1201 | if (res) | ||
1202 | return; | ||
1203 | |||
1204 | __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt); | ||
1191 | 1205 | ||
1192 | rt = skb_rtable(skb); | 1206 | rt = skb_rtable(skb); |
1193 | if (rt) | 1207 | if (rt) |
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 359da68d7c06..477cb4aa456c 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c | |||
@@ -49,9 +49,8 @@ | |||
49 | #define DCTCP_MAX_ALPHA 1024U | 49 | #define DCTCP_MAX_ALPHA 1024U |
50 | 50 | ||
51 | struct dctcp { | 51 | struct dctcp { |
52 | u32 acked_bytes_ecn; | 52 | u32 old_delivered; |
53 | u32 acked_bytes_total; | 53 | u32 old_delivered_ce; |
54 | u32 prior_snd_una; | ||
55 | u32 prior_rcv_nxt; | 54 | u32 prior_rcv_nxt; |
56 | u32 dctcp_alpha; | 55 | u32 dctcp_alpha; |
57 | u32 next_seq; | 56 | u32 next_seq; |
@@ -73,8 +72,8 @@ static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca) | |||
73 | { | 72 | { |
74 | ca->next_seq = tp->snd_nxt; | 73 | ca->next_seq = tp->snd_nxt; |
75 | 74 | ||
76 | ca->acked_bytes_ecn = 0; | 75 | ca->old_delivered = tp->delivered; |
77 | ca->acked_bytes_total = 0; | 76 | ca->old_delivered_ce = tp->delivered_ce; |
78 | } | 77 | } |
79 | 78 | ||
80 | static void dctcp_init(struct sock *sk) | 79 | static void dctcp_init(struct sock *sk) |
@@ -86,7 +85,6 @@ static void dctcp_init(struct sock *sk) | |||
86 | sk->sk_state == TCP_CLOSE)) { | 85 | sk->sk_state == TCP_CLOSE)) { |
87 | struct dctcp *ca = inet_csk_ca(sk); | 86 | struct dctcp *ca = inet_csk_ca(sk); |
88 | 87 | ||
89 | ca->prior_snd_una = tp->snd_una; | ||
90 | ca->prior_rcv_nxt = tp->rcv_nxt; | 88 | ca->prior_rcv_nxt = tp->rcv_nxt; |
91 | 89 | ||
92 | ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); | 90 | ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); |
@@ -118,37 +116,25 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags) | |||
118 | { | 116 | { |
119 | const struct tcp_sock *tp = tcp_sk(sk); | 117 | const struct tcp_sock *tp = tcp_sk(sk); |
120 | struct dctcp *ca = inet_csk_ca(sk); | 118 | struct dctcp *ca = inet_csk_ca(sk); |
121 | u32 acked_bytes = tp->snd_una - ca->prior_snd_una; | ||
122 | |||
123 | /* If ack did not advance snd_una, count dupack as MSS size. | ||
124 | * If ack did update window, do not count it at all. | ||
125 | */ | ||
126 | if (acked_bytes == 0 && !(flags & CA_ACK_WIN_UPDATE)) | ||
127 | acked_bytes = inet_csk(sk)->icsk_ack.rcv_mss; | ||
128 | if (acked_bytes) { | ||
129 | ca->acked_bytes_total += acked_bytes; | ||
130 | ca->prior_snd_una = tp->snd_una; | ||
131 | |||
132 | if (flags & CA_ACK_ECE) | ||
133 | ca->acked_bytes_ecn += acked_bytes; | ||
134 | } | ||
135 | 119 | ||
136 | /* Expired RTT */ | 120 | /* Expired RTT */ |
137 | if (!before(tp->snd_una, ca->next_seq)) { | 121 | if (!before(tp->snd_una, ca->next_seq)) { |
138 | u64 bytes_ecn = ca->acked_bytes_ecn; | 122 | u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce; |
139 | u32 alpha = ca->dctcp_alpha; | 123 | u32 alpha = ca->dctcp_alpha; |
140 | 124 | ||
141 | /* alpha = (1 - g) * alpha + g * F */ | 125 | /* alpha = (1 - g) * alpha + g * F */ |
142 | 126 | ||
143 | alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g); | 127 | alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g); |
144 | if (bytes_ecn) { | 128 | if (delivered_ce) { |
129 | u32 delivered = tp->delivered - ca->old_delivered; | ||
130 | |||
145 | /* If dctcp_shift_g == 1, a 32bit value would overflow | 131 | /* If dctcp_shift_g == 1, a 32bit value would overflow |
146 | * after 8 Mbytes. | 132 | * after 8 M packets. |
147 | */ | 133 | */ |
148 | bytes_ecn <<= (10 - dctcp_shift_g); | 134 | delivered_ce <<= (10 - dctcp_shift_g); |
149 | do_div(bytes_ecn, max(1U, ca->acked_bytes_total)); | 135 | delivered_ce /= max(1U, delivered); |
150 | 136 | ||
151 | alpha = min(alpha + (u32)bytes_ecn, DCTCP_MAX_ALPHA); | 137 | alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA); |
152 | } | 138 | } |
153 | /* dctcp_alpha can be read from dctcp_get_info() without | 139 | /* dctcp_alpha can be read from dctcp_get_info() without |
154 | * synchro, so we ask compiler to not use dctcp_alpha | 140 | * synchro, so we ask compiler to not use dctcp_alpha |
@@ -200,6 +186,7 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr, | |||
200 | union tcp_cc_info *info) | 186 | union tcp_cc_info *info) |
201 | { | 187 | { |
202 | const struct dctcp *ca = inet_csk_ca(sk); | 188 | const struct dctcp *ca = inet_csk_ca(sk); |
189 | const struct tcp_sock *tp = tcp_sk(sk); | ||
203 | 190 | ||
204 | /* Fill it also in case of VEGASINFO due to req struct limits. | 191 | /* Fill it also in case of VEGASINFO due to req struct limits. |
205 | * We can still correctly retrieve it later. | 192 | * We can still correctly retrieve it later. |
@@ -211,8 +198,10 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr, | |||
211 | info->dctcp.dctcp_enabled = 1; | 198 | info->dctcp.dctcp_enabled = 1; |
212 | info->dctcp.dctcp_ce_state = (u16) ca->ce_state; | 199 | info->dctcp.dctcp_ce_state = (u16) ca->ce_state; |
213 | info->dctcp.dctcp_alpha = ca->dctcp_alpha; | 200 | info->dctcp.dctcp_alpha = ca->dctcp_alpha; |
214 | info->dctcp.dctcp_ab_ecn = ca->acked_bytes_ecn; | 201 | info->dctcp.dctcp_ab_ecn = tp->mss_cache * |
215 | info->dctcp.dctcp_ab_tot = ca->acked_bytes_total; | 202 | (tp->delivered_ce - ca->old_delivered_ce); |
203 | info->dctcp.dctcp_ab_tot = tp->mss_cache * | ||
204 | (tp->delivered - ca->old_delivered); | ||
216 | } | 205 | } |
217 | 206 | ||
218 | *attr = INET_DIAG_DCTCPINFO; | 207 | *attr = INET_DIAG_DCTCPINFO; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 5def3c48870e..731d3045b50a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -402,11 +402,12 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) | |||
402 | static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) | 402 | static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) |
403 | { | 403 | { |
404 | struct tcp_sock *tp = tcp_sk(sk); | 404 | struct tcp_sock *tp = tcp_sk(sk); |
405 | int room; | ||
406 | |||
407 | room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh; | ||
405 | 408 | ||
406 | /* Check #1 */ | 409 | /* Check #1 */ |
407 | if (tp->rcv_ssthresh < tp->window_clamp && | 410 | if (room > 0 && !tcp_under_memory_pressure(sk)) { |
408 | (int)tp->rcv_ssthresh < tcp_space(sk) && | ||
409 | !tcp_under_memory_pressure(sk)) { | ||
410 | int incr; | 411 | int incr; |
411 | 412 | ||
412 | /* Check #2. Increase window, if skb with such overhead | 413 | /* Check #2. Increase window, if skb with such overhead |
@@ -419,8 +420,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) | |||
419 | 420 | ||
420 | if (incr) { | 421 | if (incr) { |
421 | incr = max_t(int, incr, 2 * skb->len); | 422 | incr = max_t(int, incr, 2 * skb->len); |
422 | tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, | 423 | tp->rcv_ssthresh += min(room, incr); |
423 | tp->window_clamp); | ||
424 | inet_csk(sk)->icsk_ack.quick |= 1; | 424 | inet_csk(sk)->icsk_ack.quick |= 1; |
425 | } | 425 | } |
426 | } | 426 | } |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 0302e0eb07af..7178e32eb15d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -2330,6 +2330,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, | |||
2330 | 2330 | ||
2331 | rcu_read_lock(); | 2331 | rcu_read_lock(); |
2332 | from = rcu_dereference(rt6->from); | 2332 | from = rcu_dereference(rt6->from); |
2333 | if (!from) { | ||
2334 | rcu_read_unlock(); | ||
2335 | return; | ||
2336 | } | ||
2333 | nrt6 = ip6_rt_cache_alloc(from, daddr, saddr); | 2337 | nrt6 = ip6_rt_cache_alloc(from, daddr, saddr); |
2334 | if (nrt6) { | 2338 | if (nrt6) { |
2335 | rt6_do_update_pmtu(nrt6, mtu); | 2339 | rt6_do_update_pmtu(nrt6, mtu); |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index b444483cdb2b..622eeaf5732b 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -1047,6 +1047,8 @@ static void udp_v6_flush_pending_frames(struct sock *sk) | |||
1047 | static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, | 1047 | static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, |
1048 | int addr_len) | 1048 | int addr_len) |
1049 | { | 1049 | { |
1050 | if (addr_len < offsetofend(struct sockaddr, sa_family)) | ||
1051 | return -EINVAL; | ||
1050 | /* The following checks are replicated from __ip6_datagram_connect() | 1052 | /* The following checks are replicated from __ip6_datagram_connect() |
1051 | * and intended to prevent BPF program called below from accessing | 1053 | * and intended to prevent BPF program called below from accessing |
1052 | * bytes that are out of the bound specified by user in addr_len. | 1054 | * bytes that are out of the bound specified by user in addr_len. |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index b99e73a7e7e0..2017b7d780f5 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -320,14 +320,13 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) | |||
320 | struct llc_sap *sap; | 320 | struct llc_sap *sap; |
321 | int rc = -EINVAL; | 321 | int rc = -EINVAL; |
322 | 322 | ||
323 | dprintk("%s: binding %02X\n", __func__, addr->sllc_sap); | ||
324 | |||
325 | lock_sock(sk); | 323 | lock_sock(sk); |
326 | if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) | 324 | if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) |
327 | goto out; | 325 | goto out; |
328 | rc = -EAFNOSUPPORT; | 326 | rc = -EAFNOSUPPORT; |
329 | if (unlikely(addr->sllc_family != AF_LLC)) | 327 | if (unlikely(addr->sllc_family != AF_LLC)) |
330 | goto out; | 328 | goto out; |
329 | dprintk("%s: binding %02X\n", __func__, addr->sllc_sap); | ||
331 | rc = -ENODEV; | 330 | rc = -ENODEV; |
332 | rcu_read_lock(); | 331 | rcu_read_lock(); |
333 | if (sk->sk_bound_dev_if) { | 332 | if (sk->sk_bound_dev_if) { |
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 28d022a3eee3..ae4f0be3b393 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h | |||
@@ -1195,6 +1195,9 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local, | |||
1195 | { | 1195 | { |
1196 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif); | 1196 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif); |
1197 | 1197 | ||
1198 | if (local->in_reconfig) | ||
1199 | return; | ||
1200 | |||
1198 | if (!check_sdata_in_driver(sdata)) | 1201 | if (!check_sdata_in_driver(sdata)) |
1199 | return; | 1202 | return; |
1200 | 1203 | ||
diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 4700718e010f..37e372896230 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c | |||
@@ -167,8 +167,10 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) | |||
167 | * The driver doesn't know anything about VLAN interfaces. | 167 | * The driver doesn't know anything about VLAN interfaces. |
168 | * Hence, don't send GTKs for VLAN interfaces to the driver. | 168 | * Hence, don't send GTKs for VLAN interfaces to the driver. |
169 | */ | 169 | */ |
170 | if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) | 170 | if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) { |
171 | ret = 1; | ||
171 | goto out_unsupported; | 172 | goto out_unsupported; |
173 | } | ||
172 | } | 174 | } |
173 | 175 | ||
174 | ret = drv_set_key(key->local, SET_KEY, sdata, | 176 | ret = drv_set_key(key->local, SET_KEY, sdata, |
@@ -213,11 +215,8 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) | |||
213 | /* all of these we can do in software - if driver can */ | 215 | /* all of these we can do in software - if driver can */ |
214 | if (ret == 1) | 216 | if (ret == 1) |
215 | return 0; | 217 | return 0; |
216 | if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL)) { | 218 | if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL)) |
217 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | ||
218 | return 0; | ||
219 | return -EINVAL; | 219 | return -EINVAL; |
220 | } | ||
221 | return 0; | 220 | return 0; |
222 | default: | 221 | default: |
223 | return -EINVAL; | 222 | return -EINVAL; |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 95eb5064fa91..b76a2aefa9ec 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -23,7 +23,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath); | |||
23 | static u32 mesh_table_hash(const void *addr, u32 len, u32 seed) | 23 | static u32 mesh_table_hash(const void *addr, u32 len, u32 seed) |
24 | { | 24 | { |
25 | /* Use last four bytes of hw addr as hash index */ | 25 | /* Use last four bytes of hw addr as hash index */ |
26 | return jhash_1word(*(u32 *)(addr+2), seed); | 26 | return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed); |
27 | } | 27 | } |
28 | 28 | ||
29 | static const struct rhashtable_params mesh_rht_params = { | 29 | static const struct rhashtable_params mesh_rht_params = { |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 7f8d93401ce0..bf0b187f994e 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -1568,7 +1568,15 @@ static void sta_ps_start(struct sta_info *sta) | |||
1568 | return; | 1568 | return; |
1569 | 1569 | ||
1570 | for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { | 1570 | for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { |
1571 | if (txq_has_queue(sta->sta.txq[tid])) | 1571 | struct ieee80211_txq *txq = sta->sta.txq[tid]; |
1572 | struct txq_info *txqi = to_txq_info(txq); | ||
1573 | |||
1574 | spin_lock(&local->active_txq_lock[txq->ac]); | ||
1575 | if (!list_empty(&txqi->schedule_order)) | ||
1576 | list_del_init(&txqi->schedule_order); | ||
1577 | spin_unlock(&local->active_txq_lock[txq->ac]); | ||
1578 | |||
1579 | if (txq_has_queue(txq)) | ||
1572 | set_bit(tid, &sta->txq_buffered_tids); | 1580 | set_bit(tid, &sta->txq_buffered_tids); |
1573 | else | 1581 | else |
1574 | clear_bit(tid, &sta->txq_buffered_tids); | 1582 | clear_bit(tid, &sta->txq_buffered_tids); |
diff --git a/net/mac80211/trace_msg.h b/net/mac80211/trace_msg.h index 366b9e6f043e..40141df09f25 100644 --- a/net/mac80211/trace_msg.h +++ b/net/mac80211/trace_msg.h | |||
@@ -1,4 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* | ||
3 | * Portions of this file | ||
4 | * Copyright (C) 2019 Intel Corporation | ||
5 | */ | ||
6 | |||
2 | #ifdef CONFIG_MAC80211_MESSAGE_TRACING | 7 | #ifdef CONFIG_MAC80211_MESSAGE_TRACING |
3 | 8 | ||
4 | #if !defined(__MAC80211_MSG_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ) | 9 | #if !defined(__MAC80211_MSG_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ) |
@@ -11,7 +16,7 @@ | |||
11 | #undef TRACE_SYSTEM | 16 | #undef TRACE_SYSTEM |
12 | #define TRACE_SYSTEM mac80211_msg | 17 | #define TRACE_SYSTEM mac80211_msg |
13 | 18 | ||
14 | #define MAX_MSG_LEN 100 | 19 | #define MAX_MSG_LEN 120 |
15 | 20 | ||
16 | DECLARE_EVENT_CLASS(mac80211_msg_event, | 21 | DECLARE_EVENT_CLASS(mac80211_msg_event, |
17 | TP_PROTO(struct va_format *vaf), | 22 | TP_PROTO(struct va_format *vaf), |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 8a49a74c0a37..2e816dd67be7 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -3221,6 +3221,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
3221 | u8 max_subframes = sta->sta.max_amsdu_subframes; | 3221 | u8 max_subframes = sta->sta.max_amsdu_subframes; |
3222 | int max_frags = local->hw.max_tx_fragments; | 3222 | int max_frags = local->hw.max_tx_fragments; |
3223 | int max_amsdu_len = sta->sta.max_amsdu_len; | 3223 | int max_amsdu_len = sta->sta.max_amsdu_len; |
3224 | int orig_truesize; | ||
3224 | __be16 len; | 3225 | __be16 len; |
3225 | void *data; | 3226 | void *data; |
3226 | bool ret = false; | 3227 | bool ret = false; |
@@ -3261,6 +3262,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
3261 | if (!head || skb_is_gso(head)) | 3262 | if (!head || skb_is_gso(head)) |
3262 | goto out; | 3263 | goto out; |
3263 | 3264 | ||
3265 | orig_truesize = head->truesize; | ||
3264 | orig_len = head->len; | 3266 | orig_len = head->len; |
3265 | 3267 | ||
3266 | if (skb->len + head->len > max_amsdu_len) | 3268 | if (skb->len + head->len > max_amsdu_len) |
@@ -3318,6 +3320,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
3318 | *frag_tail = skb; | 3320 | *frag_tail = skb; |
3319 | 3321 | ||
3320 | out_recalc: | 3322 | out_recalc: |
3323 | fq->memory_usage += head->truesize - orig_truesize; | ||
3321 | if (head->len != orig_len) { | 3324 | if (head->len != orig_len) { |
3322 | flow->backlog += head->len - orig_len; | 3325 | flow->backlog += head->len - orig_len; |
3323 | tin->backlog_bytes += head->len - orig_len; | 3326 | tin->backlog_bytes += head->len - orig_len; |
@@ -3646,16 +3649,17 @@ EXPORT_SYMBOL(ieee80211_tx_dequeue); | |||
3646 | struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac) | 3649 | struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac) |
3647 | { | 3650 | { |
3648 | struct ieee80211_local *local = hw_to_local(hw); | 3651 | struct ieee80211_local *local = hw_to_local(hw); |
3652 | struct ieee80211_txq *ret = NULL; | ||
3649 | struct txq_info *txqi = NULL; | 3653 | struct txq_info *txqi = NULL; |
3650 | 3654 | ||
3651 | lockdep_assert_held(&local->active_txq_lock[ac]); | 3655 | spin_lock_bh(&local->active_txq_lock[ac]); |
3652 | 3656 | ||
3653 | begin: | 3657 | begin: |
3654 | txqi = list_first_entry_or_null(&local->active_txqs[ac], | 3658 | txqi = list_first_entry_or_null(&local->active_txqs[ac], |
3655 | struct txq_info, | 3659 | struct txq_info, |
3656 | schedule_order); | 3660 | schedule_order); |
3657 | if (!txqi) | 3661 | if (!txqi) |
3658 | return NULL; | 3662 | goto out; |
3659 | 3663 | ||
3660 | if (txqi->txq.sta) { | 3664 | if (txqi->txq.sta) { |
3661 | struct sta_info *sta = container_of(txqi->txq.sta, | 3665 | struct sta_info *sta = container_of(txqi->txq.sta, |
@@ -3672,24 +3676,30 @@ struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac) | |||
3672 | 3676 | ||
3673 | 3677 | ||
3674 | if (txqi->schedule_round == local->schedule_round[ac]) | 3678 | if (txqi->schedule_round == local->schedule_round[ac]) |
3675 | return NULL; | 3679 | goto out; |
3676 | 3680 | ||
3677 | list_del_init(&txqi->schedule_order); | 3681 | list_del_init(&txqi->schedule_order); |
3678 | txqi->schedule_round = local->schedule_round[ac]; | 3682 | txqi->schedule_round = local->schedule_round[ac]; |
3679 | return &txqi->txq; | 3683 | ret = &txqi->txq; |
3684 | |||
3685 | out: | ||
3686 | spin_unlock_bh(&local->active_txq_lock[ac]); | ||
3687 | return ret; | ||
3680 | } | 3688 | } |
3681 | EXPORT_SYMBOL(ieee80211_next_txq); | 3689 | EXPORT_SYMBOL(ieee80211_next_txq); |
3682 | 3690 | ||
3683 | void ieee80211_return_txq(struct ieee80211_hw *hw, | 3691 | void __ieee80211_schedule_txq(struct ieee80211_hw *hw, |
3684 | struct ieee80211_txq *txq) | 3692 | struct ieee80211_txq *txq, |
3693 | bool force) | ||
3685 | { | 3694 | { |
3686 | struct ieee80211_local *local = hw_to_local(hw); | 3695 | struct ieee80211_local *local = hw_to_local(hw); |
3687 | struct txq_info *txqi = to_txq_info(txq); | 3696 | struct txq_info *txqi = to_txq_info(txq); |
3688 | 3697 | ||
3689 | lockdep_assert_held(&local->active_txq_lock[txq->ac]); | 3698 | spin_lock_bh(&local->active_txq_lock[txq->ac]); |
3690 | 3699 | ||
3691 | if (list_empty(&txqi->schedule_order) && | 3700 | if (list_empty(&txqi->schedule_order) && |
3692 | (!skb_queue_empty(&txqi->frags) || txqi->tin.backlog_packets)) { | 3701 | (force || !skb_queue_empty(&txqi->frags) || |
3702 | txqi->tin.backlog_packets)) { | ||
3693 | /* If airtime accounting is active, always enqueue STAs at the | 3703 | /* If airtime accounting is active, always enqueue STAs at the |
3694 | * head of the list to ensure that they only get moved to the | 3704 | * head of the list to ensure that they only get moved to the |
3695 | * back by the airtime DRR scheduler once they have a negative | 3705 | * back by the airtime DRR scheduler once they have a negative |
@@ -3706,20 +3716,10 @@ void ieee80211_return_txq(struct ieee80211_hw *hw, | |||
3706 | list_add_tail(&txqi->schedule_order, | 3716 | list_add_tail(&txqi->schedule_order, |
3707 | &local->active_txqs[txq->ac]); | 3717 | &local->active_txqs[txq->ac]); |
3708 | } | 3718 | } |
3709 | } | ||
3710 | EXPORT_SYMBOL(ieee80211_return_txq); | ||
3711 | 3719 | ||
3712 | void ieee80211_schedule_txq(struct ieee80211_hw *hw, | ||
3713 | struct ieee80211_txq *txq) | ||
3714 | __acquires(txq_lock) __releases(txq_lock) | ||
3715 | { | ||
3716 | struct ieee80211_local *local = hw_to_local(hw); | ||
3717 | |||
3718 | spin_lock_bh(&local->active_txq_lock[txq->ac]); | ||
3719 | ieee80211_return_txq(hw, txq); | ||
3720 | spin_unlock_bh(&local->active_txq_lock[txq->ac]); | 3720 | spin_unlock_bh(&local->active_txq_lock[txq->ac]); |
3721 | } | 3721 | } |
3722 | EXPORT_SYMBOL(ieee80211_schedule_txq); | 3722 | EXPORT_SYMBOL(__ieee80211_schedule_txq); |
3723 | 3723 | ||
3724 | bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw, | 3724 | bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw, |
3725 | struct ieee80211_txq *txq) | 3725 | struct ieee80211_txq *txq) |
@@ -3729,7 +3729,7 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw, | |||
3729 | struct sta_info *sta; | 3729 | struct sta_info *sta; |
3730 | u8 ac = txq->ac; | 3730 | u8 ac = txq->ac; |
3731 | 3731 | ||
3732 | lockdep_assert_held(&local->active_txq_lock[ac]); | 3732 | spin_lock_bh(&local->active_txq_lock[ac]); |
3733 | 3733 | ||
3734 | if (!txqi->txq.sta) | 3734 | if (!txqi->txq.sta) |
3735 | goto out; | 3735 | goto out; |
@@ -3759,34 +3759,27 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw, | |||
3759 | 3759 | ||
3760 | sta->airtime[ac].deficit += sta->airtime_weight; | 3760 | sta->airtime[ac].deficit += sta->airtime_weight; |
3761 | list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]); | 3761 | list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]); |
3762 | spin_unlock_bh(&local->active_txq_lock[ac]); | ||
3762 | 3763 | ||
3763 | return false; | 3764 | return false; |
3764 | out: | 3765 | out: |
3765 | if (!list_empty(&txqi->schedule_order)) | 3766 | if (!list_empty(&txqi->schedule_order)) |
3766 | list_del_init(&txqi->schedule_order); | 3767 | list_del_init(&txqi->schedule_order); |
3768 | spin_unlock_bh(&local->active_txq_lock[ac]); | ||
3767 | 3769 | ||
3768 | return true; | 3770 | return true; |
3769 | } | 3771 | } |
3770 | EXPORT_SYMBOL(ieee80211_txq_may_transmit); | 3772 | EXPORT_SYMBOL(ieee80211_txq_may_transmit); |
3771 | 3773 | ||
3772 | void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac) | 3774 | void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac) |
3773 | __acquires(txq_lock) | ||
3774 | { | 3775 | { |
3775 | struct ieee80211_local *local = hw_to_local(hw); | 3776 | struct ieee80211_local *local = hw_to_local(hw); |
3776 | 3777 | ||
3777 | spin_lock_bh(&local->active_txq_lock[ac]); | 3778 | spin_lock_bh(&local->active_txq_lock[ac]); |
3778 | local->schedule_round[ac]++; | 3779 | local->schedule_round[ac]++; |
3779 | } | ||
3780 | EXPORT_SYMBOL(ieee80211_txq_schedule_start); | ||
3781 | |||
3782 | void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac) | ||
3783 | __releases(txq_lock) | ||
3784 | { | ||
3785 | struct ieee80211_local *local = hw_to_local(hw); | ||
3786 | |||
3787 | spin_unlock_bh(&local->active_txq_lock[ac]); | 3780 | spin_unlock_bh(&local->active_txq_lock[ac]); |
3788 | } | 3781 | } |
3789 | EXPORT_SYMBOL(ieee80211_txq_schedule_end); | 3782 | EXPORT_SYMBOL(ieee80211_txq_schedule_start); |
3790 | 3783 | ||
3791 | void __ieee80211_subif_start_xmit(struct sk_buff *skb, | 3784 | void __ieee80211_subif_start_xmit(struct sk_buff *skb, |
3792 | struct net_device *dev, | 3785 | struct net_device *dev, |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index f28e937320a3..216ab915dd54 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -988,7 +988,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, | |||
988 | struct netlink_sock *nlk = nlk_sk(sk); | 988 | struct netlink_sock *nlk = nlk_sk(sk); |
989 | struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; | 989 | struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; |
990 | int err = 0; | 990 | int err = 0; |
991 | unsigned long groups = nladdr->nl_groups; | 991 | unsigned long groups; |
992 | bool bound; | 992 | bool bound; |
993 | 993 | ||
994 | if (addr_len < sizeof(struct sockaddr_nl)) | 994 | if (addr_len < sizeof(struct sockaddr_nl)) |
@@ -996,6 +996,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, | |||
996 | 996 | ||
997 | if (nladdr->nl_family != AF_NETLINK) | 997 | if (nladdr->nl_family != AF_NETLINK) |
998 | return -EINVAL; | 998 | return -EINVAL; |
999 | groups = nladdr->nl_groups; | ||
999 | 1000 | ||
1000 | /* Only superuser is allowed to listen multicasts */ | 1001 | /* Only superuser is allowed to listen multicasts */ |
1001 | if (groups) { | 1002 | if (groups) { |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 1d3144d19903..71ffd1a6dc7c 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
@@ -1392,18 +1392,22 @@ static int __init nr_proto_init(void) | |||
1392 | int i; | 1392 | int i; |
1393 | int rc = proto_register(&nr_proto, 0); | 1393 | int rc = proto_register(&nr_proto, 0); |
1394 | 1394 | ||
1395 | if (rc != 0) | 1395 | if (rc) |
1396 | goto out; | 1396 | return rc; |
1397 | 1397 | ||
1398 | if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) { | 1398 | if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) { |
1399 | printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n"); | 1399 | pr_err("NET/ROM: %s - nr_ndevs parameter too large\n", |
1400 | return -1; | 1400 | __func__); |
1401 | rc = -EINVAL; | ||
1402 | goto unregister_proto; | ||
1401 | } | 1403 | } |
1402 | 1404 | ||
1403 | dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL); | 1405 | dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL); |
1404 | if (dev_nr == NULL) { | 1406 | if (!dev_nr) { |
1405 | printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n"); | 1407 | pr_err("NET/ROM: %s - unable to allocate device array\n", |
1406 | return -1; | 1408 | __func__); |
1409 | rc = -ENOMEM; | ||
1410 | goto unregister_proto; | ||
1407 | } | 1411 | } |
1408 | 1412 | ||
1409 | for (i = 0; i < nr_ndevs; i++) { | 1413 | for (i = 0; i < nr_ndevs; i++) { |
@@ -1413,13 +1417,13 @@ static int __init nr_proto_init(void) | |||
1413 | sprintf(name, "nr%d", i); | 1417 | sprintf(name, "nr%d", i); |
1414 | dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup); | 1418 | dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup); |
1415 | if (!dev) { | 1419 | if (!dev) { |
1416 | printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n"); | 1420 | rc = -ENOMEM; |
1417 | goto fail; | 1421 | goto fail; |
1418 | } | 1422 | } |
1419 | 1423 | ||
1420 | dev->base_addr = i; | 1424 | dev->base_addr = i; |
1421 | if (register_netdev(dev)) { | 1425 | rc = register_netdev(dev); |
1422 | printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n"); | 1426 | if (rc) { |
1423 | free_netdev(dev); | 1427 | free_netdev(dev); |
1424 | goto fail; | 1428 | goto fail; |
1425 | } | 1429 | } |
@@ -1427,36 +1431,64 @@ static int __init nr_proto_init(void) | |||
1427 | dev_nr[i] = dev; | 1431 | dev_nr[i] = dev; |
1428 | } | 1432 | } |
1429 | 1433 | ||
1430 | if (sock_register(&nr_family_ops)) { | 1434 | rc = sock_register(&nr_family_ops); |
1431 | printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n"); | 1435 | if (rc) |
1432 | goto fail; | 1436 | goto fail; |
1433 | } | ||
1434 | 1437 | ||
1435 | register_netdevice_notifier(&nr_dev_notifier); | 1438 | rc = register_netdevice_notifier(&nr_dev_notifier); |
1439 | if (rc) | ||
1440 | goto out_sock; | ||
1436 | 1441 | ||
1437 | ax25_register_pid(&nr_pid); | 1442 | ax25_register_pid(&nr_pid); |
1438 | ax25_linkfail_register(&nr_linkfail_notifier); | 1443 | ax25_linkfail_register(&nr_linkfail_notifier); |
1439 | 1444 | ||
1440 | #ifdef CONFIG_SYSCTL | 1445 | #ifdef CONFIG_SYSCTL |
1441 | nr_register_sysctl(); | 1446 | rc = nr_register_sysctl(); |
1447 | if (rc) | ||
1448 | goto out_sysctl; | ||
1442 | #endif | 1449 | #endif |
1443 | 1450 | ||
1444 | nr_loopback_init(); | 1451 | nr_loopback_init(); |
1445 | 1452 | ||
1446 | proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops); | 1453 | rc = -ENOMEM; |
1447 | proc_create_seq("nr_neigh", 0444, init_net.proc_net, &nr_neigh_seqops); | 1454 | if (!proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops)) |
1448 | proc_create_seq("nr_nodes", 0444, init_net.proc_net, &nr_node_seqops); | 1455 | goto proc_remove1; |
1449 | out: | 1456 | if (!proc_create_seq("nr_neigh", 0444, init_net.proc_net, |
1450 | return rc; | 1457 | &nr_neigh_seqops)) |
1458 | goto proc_remove2; | ||
1459 | if (!proc_create_seq("nr_nodes", 0444, init_net.proc_net, | ||
1460 | &nr_node_seqops)) | ||
1461 | goto proc_remove3; | ||
1462 | |||
1463 | return 0; | ||
1464 | |||
1465 | proc_remove3: | ||
1466 | remove_proc_entry("nr_neigh", init_net.proc_net); | ||
1467 | proc_remove2: | ||
1468 | remove_proc_entry("nr", init_net.proc_net); | ||
1469 | proc_remove1: | ||
1470 | |||
1471 | nr_loopback_clear(); | ||
1472 | nr_rt_free(); | ||
1473 | |||
1474 | #ifdef CONFIG_SYSCTL | ||
1475 | nr_unregister_sysctl(); | ||
1476 | out_sysctl: | ||
1477 | #endif | ||
1478 | ax25_linkfail_release(&nr_linkfail_notifier); | ||
1479 | ax25_protocol_release(AX25_P_NETROM); | ||
1480 | unregister_netdevice_notifier(&nr_dev_notifier); | ||
1481 | out_sock: | ||
1482 | sock_unregister(PF_NETROM); | ||
1451 | fail: | 1483 | fail: |
1452 | while (--i >= 0) { | 1484 | while (--i >= 0) { |
1453 | unregister_netdev(dev_nr[i]); | 1485 | unregister_netdev(dev_nr[i]); |
1454 | free_netdev(dev_nr[i]); | 1486 | free_netdev(dev_nr[i]); |
1455 | } | 1487 | } |
1456 | kfree(dev_nr); | 1488 | kfree(dev_nr); |
1489 | unregister_proto: | ||
1457 | proto_unregister(&nr_proto); | 1490 | proto_unregister(&nr_proto); |
1458 | rc = -1; | 1491 | return rc; |
1459 | goto out; | ||
1460 | } | 1492 | } |
1461 | 1493 | ||
1462 | module_init(nr_proto_init); | 1494 | module_init(nr_proto_init); |
diff --git a/net/netrom/nr_loopback.c b/net/netrom/nr_loopback.c index 215ad22a9647..93d13f019981 100644 --- a/net/netrom/nr_loopback.c +++ b/net/netrom/nr_loopback.c | |||
@@ -70,7 +70,7 @@ static void nr_loopback_timer(struct timer_list *unused) | |||
70 | } | 70 | } |
71 | } | 71 | } |
72 | 72 | ||
73 | void __exit nr_loopback_clear(void) | 73 | void nr_loopback_clear(void) |
74 | { | 74 | { |
75 | del_timer_sync(&loopback_timer); | 75 | del_timer_sync(&loopback_timer); |
76 | skb_queue_purge(&loopback_queue); | 76 | skb_queue_purge(&loopback_queue); |
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index 6485f593e2f0..b76aa668a94b 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c | |||
@@ -953,7 +953,7 @@ const struct seq_operations nr_neigh_seqops = { | |||
953 | /* | 953 | /* |
954 | * Free all memory associated with the nodes and routes lists. | 954 | * Free all memory associated with the nodes and routes lists. |
955 | */ | 955 | */ |
956 | void __exit nr_rt_free(void) | 956 | void nr_rt_free(void) |
957 | { | 957 | { |
958 | struct nr_neigh *s = NULL; | 958 | struct nr_neigh *s = NULL; |
959 | struct nr_node *t = NULL; | 959 | struct nr_node *t = NULL; |
diff --git a/net/netrom/sysctl_net_netrom.c b/net/netrom/sysctl_net_netrom.c index ba1c368b3f18..771011b84270 100644 --- a/net/netrom/sysctl_net_netrom.c +++ b/net/netrom/sysctl_net_netrom.c | |||
@@ -146,9 +146,12 @@ static struct ctl_table nr_table[] = { | |||
146 | { } | 146 | { } |
147 | }; | 147 | }; |
148 | 148 | ||
149 | void __init nr_register_sysctl(void) | 149 | int __init nr_register_sysctl(void) |
150 | { | 150 | { |
151 | nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table); | 151 | nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table); |
152 | if (!nr_table_header) | ||
153 | return -ENOMEM; | ||
154 | return 0; | ||
152 | } | 155 | } |
153 | 156 | ||
154 | void nr_unregister_sysctl(void) | 157 | void nr_unregister_sysctl(void) |
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index d6cc97fbbbb0..2b969f99ef13 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c | |||
@@ -543,6 +543,9 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr, | |||
543 | struct rds_sock *rs = rds_sk_to_rs(sk); | 543 | struct rds_sock *rs = rds_sk_to_rs(sk); |
544 | int ret = 0; | 544 | int ret = 0; |
545 | 545 | ||
546 | if (addr_len < offsetofend(struct sockaddr, sa_family)) | ||
547 | return -EINVAL; | ||
548 | |||
546 | lock_sock(sk); | 549 | lock_sock(sk); |
547 | 550 | ||
548 | switch (uaddr->sa_family) { | 551 | switch (uaddr->sa_family) { |
diff --git a/net/rds/bind.c b/net/rds/bind.c index 17c9d9f0c848..0f4398e7f2a7 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c | |||
@@ -173,6 +173,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
173 | /* We allow an RDS socket to be bound to either IPv4 or IPv6 | 173 | /* We allow an RDS socket to be bound to either IPv4 or IPv6 |
174 | * address. | 174 | * address. |
175 | */ | 175 | */ |
176 | if (addr_len < offsetofend(struct sockaddr, sa_family)) | ||
177 | return -EINVAL; | ||
176 | if (uaddr->sa_family == AF_INET) { | 178 | if (uaddr->sa_family == AF_INET) { |
177 | struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; | 179 | struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; |
178 | 180 | ||
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 96f2952bbdfd..ae8c5d7f3bf1 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c | |||
@@ -135,7 +135,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len) | |||
135 | struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr; | 135 | struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr; |
136 | struct rxrpc_local *local; | 136 | struct rxrpc_local *local; |
137 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); | 137 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); |
138 | u16 service_id = srx->srx_service; | 138 | u16 service_id; |
139 | int ret; | 139 | int ret; |
140 | 140 | ||
141 | _enter("%p,%p,%d", rx, saddr, len); | 141 | _enter("%p,%p,%d", rx, saddr, len); |
@@ -143,6 +143,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len) | |||
143 | ret = rxrpc_validate_address(rx, srx, len); | 143 | ret = rxrpc_validate_address(rx, srx, len); |
144 | if (ret < 0) | 144 | if (ret < 0) |
145 | goto error; | 145 | goto error; |
146 | service_id = srx->srx_service; | ||
146 | 147 | ||
147 | lock_sock(&rx->sk); | 148 | lock_sock(&rx->sk); |
148 | 149 | ||
@@ -370,18 +371,22 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call); | |||
370 | * rxrpc_kernel_check_life - Check to see whether a call is still alive | 371 | * rxrpc_kernel_check_life - Check to see whether a call is still alive |
371 | * @sock: The socket the call is on | 372 | * @sock: The socket the call is on |
372 | * @call: The call to check | 373 | * @call: The call to check |
374 | * @_life: Where to store the life value | ||
373 | * | 375 | * |
374 | * Allow a kernel service to find out whether a call is still alive - ie. we're | 376 | * Allow a kernel service to find out whether a call is still alive - ie. we're |
375 | * getting ACKs from the server. Returns a number representing the life state | 377 | * getting ACKs from the server. Passes back in *_life a number representing |
376 | * which can be compared to that returned by a previous call. | 378 | * the life state which can be compared to that returned by a previous call and |
379 | * return true if the call is still alive. | ||
377 | * | 380 | * |
378 | * If the life state stalls, rxrpc_kernel_probe_life() should be called and | 381 | * If the life state stalls, rxrpc_kernel_probe_life() should be called and |
379 | * then 2RTT waited. | 382 | * then 2RTT waited. |
380 | */ | 383 | */ |
381 | u32 rxrpc_kernel_check_life(const struct socket *sock, | 384 | bool rxrpc_kernel_check_life(const struct socket *sock, |
382 | const struct rxrpc_call *call) | 385 | const struct rxrpc_call *call, |
386 | u32 *_life) | ||
383 | { | 387 | { |
384 | return call->acks_latest; | 388 | *_life = call->acks_latest; |
389 | return call->state != RXRPC_CALL_COMPLETE; | ||
385 | } | 390 | } |
386 | EXPORT_SYMBOL(rxrpc_kernel_check_life); | 391 | EXPORT_SYMBOL(rxrpc_kernel_check_life); |
387 | 392 | ||
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 4b1a534d290a..062ca9dc29b8 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h | |||
@@ -654,6 +654,7 @@ struct rxrpc_call { | |||
654 | u8 ackr_reason; /* reason to ACK */ | 654 | u8 ackr_reason; /* reason to ACK */ |
655 | u16 ackr_skew; /* skew on packet being ACK'd */ | 655 | u16 ackr_skew; /* skew on packet being ACK'd */ |
656 | rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */ | 656 | rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */ |
657 | rxrpc_serial_t ackr_first_seq; /* first sequence number received */ | ||
657 | rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */ | 658 | rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */ |
658 | rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */ | 659 | rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */ |
659 | rxrpc_seq_t ackr_seen; /* Highest packet shown seen */ | 660 | rxrpc_seq_t ackr_seen; /* Highest packet shown seen */ |
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index b6fca8ebb117..8d31fb4c51e1 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c | |||
@@ -153,7 +153,8 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, | |||
153 | * pass a connection-level abort onto all calls on that connection | 153 | * pass a connection-level abort onto all calls on that connection |
154 | */ | 154 | */ |
155 | static void rxrpc_abort_calls(struct rxrpc_connection *conn, | 155 | static void rxrpc_abort_calls(struct rxrpc_connection *conn, |
156 | enum rxrpc_call_completion compl) | 156 | enum rxrpc_call_completion compl, |
157 | rxrpc_serial_t serial) | ||
157 | { | 158 | { |
158 | struct rxrpc_call *call; | 159 | struct rxrpc_call *call; |
159 | int i; | 160 | int i; |
@@ -173,6 +174,9 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn, | |||
173 | call->call_id, 0, | 174 | call->call_id, 0, |
174 | conn->abort_code, | 175 | conn->abort_code, |
175 | conn->error); | 176 | conn->error); |
177 | else | ||
178 | trace_rxrpc_rx_abort(call, serial, | ||
179 | conn->abort_code); | ||
176 | if (rxrpc_set_call_completion(call, compl, | 180 | if (rxrpc_set_call_completion(call, compl, |
177 | conn->abort_code, | 181 | conn->abort_code, |
178 | conn->error)) | 182 | conn->error)) |
@@ -213,8 +217,6 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, | |||
213 | conn->state = RXRPC_CONN_LOCALLY_ABORTED; | 217 | conn->state = RXRPC_CONN_LOCALLY_ABORTED; |
214 | spin_unlock_bh(&conn->state_lock); | 218 | spin_unlock_bh(&conn->state_lock); |
215 | 219 | ||
216 | rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED); | ||
217 | |||
218 | msg.msg_name = &conn->params.peer->srx.transport; | 220 | msg.msg_name = &conn->params.peer->srx.transport; |
219 | msg.msg_namelen = conn->params.peer->srx.transport_len; | 221 | msg.msg_namelen = conn->params.peer->srx.transport_len; |
220 | msg.msg_control = NULL; | 222 | msg.msg_control = NULL; |
@@ -242,6 +244,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, | |||
242 | len = iov[0].iov_len + iov[1].iov_len; | 244 | len = iov[0].iov_len + iov[1].iov_len; |
243 | 245 | ||
244 | serial = atomic_inc_return(&conn->serial); | 246 | serial = atomic_inc_return(&conn->serial); |
247 | rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, serial); | ||
245 | whdr.serial = htonl(serial); | 248 | whdr.serial = htonl(serial); |
246 | _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code); | 249 | _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code); |
247 | 250 | ||
@@ -321,7 +324,7 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, | |||
321 | conn->error = -ECONNABORTED; | 324 | conn->error = -ECONNABORTED; |
322 | conn->abort_code = abort_code; | 325 | conn->abort_code = abort_code; |
323 | conn->state = RXRPC_CONN_REMOTELY_ABORTED; | 326 | conn->state = RXRPC_CONN_REMOTELY_ABORTED; |
324 | rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED); | 327 | rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial); |
325 | return -ECONNABORTED; | 328 | return -ECONNABORTED; |
326 | 329 | ||
327 | case RXRPC_PACKET_TYPE_CHALLENGE: | 330 | case RXRPC_PACKET_TYPE_CHALLENGE: |
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 9128aa0e40aa..4c6f9d0a00e7 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c | |||
@@ -837,7 +837,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | |||
837 | u8 acks[RXRPC_MAXACKS]; | 837 | u8 acks[RXRPC_MAXACKS]; |
838 | } buf; | 838 | } buf; |
839 | rxrpc_serial_t acked_serial; | 839 | rxrpc_serial_t acked_serial; |
840 | rxrpc_seq_t first_soft_ack, hard_ack; | 840 | rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt; |
841 | int nr_acks, offset, ioffset; | 841 | int nr_acks, offset, ioffset; |
842 | 842 | ||
843 | _enter(""); | 843 | _enter(""); |
@@ -851,13 +851,14 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | |||
851 | 851 | ||
852 | acked_serial = ntohl(buf.ack.serial); | 852 | acked_serial = ntohl(buf.ack.serial); |
853 | first_soft_ack = ntohl(buf.ack.firstPacket); | 853 | first_soft_ack = ntohl(buf.ack.firstPacket); |
854 | prev_pkt = ntohl(buf.ack.previousPacket); | ||
854 | hard_ack = first_soft_ack - 1; | 855 | hard_ack = first_soft_ack - 1; |
855 | nr_acks = buf.ack.nAcks; | 856 | nr_acks = buf.ack.nAcks; |
856 | summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ? | 857 | summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ? |
857 | buf.ack.reason : RXRPC_ACK__INVALID); | 858 | buf.ack.reason : RXRPC_ACK__INVALID); |
858 | 859 | ||
859 | trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial, | 860 | trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial, |
860 | first_soft_ack, ntohl(buf.ack.previousPacket), | 861 | first_soft_ack, prev_pkt, |
861 | summary.ack_reason, nr_acks); | 862 | summary.ack_reason, nr_acks); |
862 | 863 | ||
863 | if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE) | 864 | if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE) |
@@ -878,8 +879,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | |||
878 | rxrpc_propose_ack_respond_to_ack); | 879 | rxrpc_propose_ack_respond_to_ack); |
879 | } | 880 | } |
880 | 881 | ||
881 | /* Discard any out-of-order or duplicate ACKs. */ | 882 | /* Discard any out-of-order or duplicate ACKs (outside lock). */ |
882 | if (before_eq(sp->hdr.serial, call->acks_latest)) | 883 | if (before(first_soft_ack, call->ackr_first_seq) || |
884 | before(prev_pkt, call->ackr_prev_seq)) | ||
883 | return; | 885 | return; |
884 | 886 | ||
885 | buf.info.rxMTU = 0; | 887 | buf.info.rxMTU = 0; |
@@ -890,12 +892,16 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | |||
890 | 892 | ||
891 | spin_lock(&call->input_lock); | 893 | spin_lock(&call->input_lock); |
892 | 894 | ||
893 | /* Discard any out-of-order or duplicate ACKs. */ | 895 | /* Discard any out-of-order or duplicate ACKs (inside lock). */ |
894 | if (before_eq(sp->hdr.serial, call->acks_latest)) | 896 | if (before(first_soft_ack, call->ackr_first_seq) || |
897 | before(prev_pkt, call->ackr_prev_seq)) | ||
895 | goto out; | 898 | goto out; |
896 | call->acks_latest_ts = skb->tstamp; | 899 | call->acks_latest_ts = skb->tstamp; |
897 | call->acks_latest = sp->hdr.serial; | 900 | call->acks_latest = sp->hdr.serial; |
898 | 901 | ||
902 | call->ackr_first_seq = first_soft_ack; | ||
903 | call->ackr_prev_seq = prev_pkt; | ||
904 | |||
899 | /* Parse rwind and mtu sizes if provided. */ | 905 | /* Parse rwind and mtu sizes if provided. */ |
900 | if (buf.info.rxMTU) | 906 | if (buf.info.rxMTU) |
901 | rxrpc_input_ackinfo(call, skb, &buf.info); | 907 | rxrpc_input_ackinfo(call, skb, &buf.info); |
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index bc05af89fc38..6e84d878053c 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c | |||
@@ -157,6 +157,11 @@ void rxrpc_error_report(struct sock *sk) | |||
157 | 157 | ||
158 | _enter("%p{%d}", sk, local->debug_id); | 158 | _enter("%p{%d}", sk, local->debug_id); |
159 | 159 | ||
160 | /* Clear the outstanding error value on the socket so that it doesn't | ||
161 | * cause kernel_sendmsg() to return it later. | ||
162 | */ | ||
163 | sock_error(sk); | ||
164 | |||
160 | skb = sock_dequeue_err_skb(sk); | 165 | skb = sock_dequeue_err_skb(sk); |
161 | if (!skb) { | 166 | if (!skb) { |
162 | _leave("UDP socket errqueue empty"); | 167 | _leave("UDP socket errqueue empty"); |
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 46c9312085b1..bec64deb7b0a 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c | |||
@@ -152,12 +152,13 @@ static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call, | |||
152 | } | 152 | } |
153 | 153 | ||
154 | /* | 154 | /* |
155 | * Queue a DATA packet for transmission, set the resend timeout and send the | 155 | * Queue a DATA packet for transmission, set the resend timeout and send |
156 | * packet immediately | 156 | * the packet immediately. Returns the error from rxrpc_send_data_packet() |
157 | * in case the caller wants to do something with it. | ||
157 | */ | 158 | */ |
158 | static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, | 159 | static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, |
159 | struct sk_buff *skb, bool last, | 160 | struct sk_buff *skb, bool last, |
160 | rxrpc_notify_end_tx_t notify_end_tx) | 161 | rxrpc_notify_end_tx_t notify_end_tx) |
161 | { | 162 | { |
162 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | 163 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
163 | unsigned long now; | 164 | unsigned long now; |
@@ -250,7 +251,8 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, | |||
250 | 251 | ||
251 | out: | 252 | out: |
252 | rxrpc_free_skb(skb, rxrpc_skb_tx_freed); | 253 | rxrpc_free_skb(skb, rxrpc_skb_tx_freed); |
253 | _leave(""); | 254 | _leave(" = %d", ret); |
255 | return ret; | ||
254 | } | 256 | } |
255 | 257 | ||
256 | /* | 258 | /* |
@@ -423,9 +425,10 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, | |||
423 | if (ret < 0) | 425 | if (ret < 0) |
424 | goto out; | 426 | goto out; |
425 | 427 | ||
426 | rxrpc_queue_packet(rx, call, skb, | 428 | ret = rxrpc_queue_packet(rx, call, skb, |
427 | !msg_data_left(msg) && !more, | 429 | !msg_data_left(msg) && !more, |
428 | notify_end_tx); | 430 | notify_end_tx); |
431 | /* Should check for failure here */ | ||
429 | skb = NULL; | 432 | skb = NULL; |
430 | } | 433 | } |
431 | } while (msg_data_left(msg) > 0); | 434 | } while (msg_data_left(msg) > 0); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9874e60c9b0d..4583fa914e62 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -4847,7 +4847,8 @@ static int sctp_connect(struct sock *sk, struct sockaddr *addr, | |||
4847 | } | 4847 | } |
4848 | 4848 | ||
4849 | /* Validate addr_len before calling common connect/connectx routine. */ | 4849 | /* Validate addr_len before calling common connect/connectx routine. */ |
4850 | af = sctp_get_af_specific(addr->sa_family); | 4850 | af = addr_len < offsetofend(struct sockaddr, sa_family) ? NULL : |
4851 | sctp_get_af_specific(addr->sa_family); | ||
4851 | if (!af || addr_len < af->sockaddr_len) { | 4852 | if (!af || addr_len < af->sockaddr_len) { |
4852 | err = -EINVAL; | 4853 | err = -EINVAL; |
4853 | } else { | 4854 | } else { |
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 77ef53596d18..6f869ef49b32 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
@@ -167,10 +167,9 @@ static int smc_release(struct socket *sock) | |||
167 | 167 | ||
168 | if (sk->sk_state == SMC_CLOSED) { | 168 | if (sk->sk_state == SMC_CLOSED) { |
169 | if (smc->clcsock) { | 169 | if (smc->clcsock) { |
170 | mutex_lock(&smc->clcsock_release_lock); | 170 | release_sock(sk); |
171 | sock_release(smc->clcsock); | 171 | smc_clcsock_release(smc); |
172 | smc->clcsock = NULL; | 172 | lock_sock(sk); |
173 | mutex_unlock(&smc->clcsock_release_lock); | ||
174 | } | 173 | } |
175 | if (!smc->use_fallback) | 174 | if (!smc->use_fallback) |
176 | smc_conn_free(&smc->conn); | 175 | smc_conn_free(&smc->conn); |
@@ -446,10 +445,19 @@ static void smc_link_save_peer_info(struct smc_link *link, | |||
446 | link->peer_mtu = clc->qp_mtu; | 445 | link->peer_mtu = clc->qp_mtu; |
447 | } | 446 | } |
448 | 447 | ||
448 | static void smc_switch_to_fallback(struct smc_sock *smc) | ||
449 | { | ||
450 | smc->use_fallback = true; | ||
451 | if (smc->sk.sk_socket && smc->sk.sk_socket->file) { | ||
452 | smc->clcsock->file = smc->sk.sk_socket->file; | ||
453 | smc->clcsock->file->private_data = smc->clcsock; | ||
454 | } | ||
455 | } | ||
456 | |||
449 | /* fall back during connect */ | 457 | /* fall back during connect */ |
450 | static int smc_connect_fallback(struct smc_sock *smc, int reason_code) | 458 | static int smc_connect_fallback(struct smc_sock *smc, int reason_code) |
451 | { | 459 | { |
452 | smc->use_fallback = true; | 460 | smc_switch_to_fallback(smc); |
453 | smc->fallback_rsn = reason_code; | 461 | smc->fallback_rsn = reason_code; |
454 | smc_copy_sock_settings_to_clc(smc); | 462 | smc_copy_sock_settings_to_clc(smc); |
455 | if (smc->sk.sk_state == SMC_INIT) | 463 | if (smc->sk.sk_state == SMC_INIT) |
@@ -775,10 +783,14 @@ static void smc_connect_work(struct work_struct *work) | |||
775 | smc->sk.sk_err = -rc; | 783 | smc->sk.sk_err = -rc; |
776 | 784 | ||
777 | out: | 785 | out: |
778 | if (smc->sk.sk_err) | 786 | if (!sock_flag(&smc->sk, SOCK_DEAD)) { |
779 | smc->sk.sk_state_change(&smc->sk); | 787 | if (smc->sk.sk_err) { |
780 | else | 788 | smc->sk.sk_state_change(&smc->sk); |
781 | smc->sk.sk_write_space(&smc->sk); | 789 | } else { /* allow polling before and after fallback decision */ |
790 | smc->clcsock->sk->sk_write_space(smc->clcsock->sk); | ||
791 | smc->sk.sk_write_space(&smc->sk); | ||
792 | } | ||
793 | } | ||
782 | kfree(smc->connect_info); | 794 | kfree(smc->connect_info); |
783 | smc->connect_info = NULL; | 795 | smc->connect_info = NULL; |
784 | release_sock(&smc->sk); | 796 | release_sock(&smc->sk); |
@@ -872,11 +884,11 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc) | |||
872 | if (rc < 0) | 884 | if (rc < 0) |
873 | lsk->sk_err = -rc; | 885 | lsk->sk_err = -rc; |
874 | if (rc < 0 || lsk->sk_state == SMC_CLOSED) { | 886 | if (rc < 0 || lsk->sk_state == SMC_CLOSED) { |
887 | new_sk->sk_prot->unhash(new_sk); | ||
875 | if (new_clcsock) | 888 | if (new_clcsock) |
876 | sock_release(new_clcsock); | 889 | sock_release(new_clcsock); |
877 | new_sk->sk_state = SMC_CLOSED; | 890 | new_sk->sk_state = SMC_CLOSED; |
878 | sock_set_flag(new_sk, SOCK_DEAD); | 891 | sock_set_flag(new_sk, SOCK_DEAD); |
879 | new_sk->sk_prot->unhash(new_sk); | ||
880 | sock_put(new_sk); /* final */ | 892 | sock_put(new_sk); /* final */ |
881 | *new_smc = NULL; | 893 | *new_smc = NULL; |
882 | goto out; | 894 | goto out; |
@@ -927,16 +939,21 @@ struct sock *smc_accept_dequeue(struct sock *parent, | |||
927 | 939 | ||
928 | smc_accept_unlink(new_sk); | 940 | smc_accept_unlink(new_sk); |
929 | if (new_sk->sk_state == SMC_CLOSED) { | 941 | if (new_sk->sk_state == SMC_CLOSED) { |
942 | new_sk->sk_prot->unhash(new_sk); | ||
930 | if (isk->clcsock) { | 943 | if (isk->clcsock) { |
931 | sock_release(isk->clcsock); | 944 | sock_release(isk->clcsock); |
932 | isk->clcsock = NULL; | 945 | isk->clcsock = NULL; |
933 | } | 946 | } |
934 | new_sk->sk_prot->unhash(new_sk); | ||
935 | sock_put(new_sk); /* final */ | 947 | sock_put(new_sk); /* final */ |
936 | continue; | 948 | continue; |
937 | } | 949 | } |
938 | if (new_sock) | 950 | if (new_sock) { |
939 | sock_graft(new_sk, new_sock); | 951 | sock_graft(new_sk, new_sock); |
952 | if (isk->use_fallback) { | ||
953 | smc_sk(new_sk)->clcsock->file = new_sock->file; | ||
954 | isk->clcsock->file->private_data = isk->clcsock; | ||
955 | } | ||
956 | } | ||
940 | return new_sk; | 957 | return new_sk; |
941 | } | 958 | } |
942 | return NULL; | 959 | return NULL; |
@@ -956,6 +973,7 @@ void smc_close_non_accepted(struct sock *sk) | |||
956 | sock_set_flag(sk, SOCK_DEAD); | 973 | sock_set_flag(sk, SOCK_DEAD); |
957 | sk->sk_shutdown |= SHUTDOWN_MASK; | 974 | sk->sk_shutdown |= SHUTDOWN_MASK; |
958 | } | 975 | } |
976 | sk->sk_prot->unhash(sk); | ||
959 | if (smc->clcsock) { | 977 | if (smc->clcsock) { |
960 | struct socket *tcp; | 978 | struct socket *tcp; |
961 | 979 | ||
@@ -971,7 +989,6 @@ void smc_close_non_accepted(struct sock *sk) | |||
971 | smc_conn_free(&smc->conn); | 989 | smc_conn_free(&smc->conn); |
972 | } | 990 | } |
973 | release_sock(sk); | 991 | release_sock(sk); |
974 | sk->sk_prot->unhash(sk); | ||
975 | sock_put(sk); /* final sock_put */ | 992 | sock_put(sk); /* final sock_put */ |
976 | } | 993 | } |
977 | 994 | ||
@@ -1037,13 +1054,13 @@ static void smc_listen_out(struct smc_sock *new_smc) | |||
1037 | struct smc_sock *lsmc = new_smc->listen_smc; | 1054 | struct smc_sock *lsmc = new_smc->listen_smc; |
1038 | struct sock *newsmcsk = &new_smc->sk; | 1055 | struct sock *newsmcsk = &new_smc->sk; |
1039 | 1056 | ||
1040 | lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING); | ||
1041 | if (lsmc->sk.sk_state == SMC_LISTEN) { | 1057 | if (lsmc->sk.sk_state == SMC_LISTEN) { |
1058 | lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING); | ||
1042 | smc_accept_enqueue(&lsmc->sk, newsmcsk); | 1059 | smc_accept_enqueue(&lsmc->sk, newsmcsk); |
1060 | release_sock(&lsmc->sk); | ||
1043 | } else { /* no longer listening */ | 1061 | } else { /* no longer listening */ |
1044 | smc_close_non_accepted(newsmcsk); | 1062 | smc_close_non_accepted(newsmcsk); |
1045 | } | 1063 | } |
1046 | release_sock(&lsmc->sk); | ||
1047 | 1064 | ||
1048 | /* Wake up accept */ | 1065 | /* Wake up accept */ |
1049 | lsmc->sk.sk_data_ready(&lsmc->sk); | 1066 | lsmc->sk.sk_data_ready(&lsmc->sk); |
@@ -1087,7 +1104,7 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code, | |||
1087 | return; | 1104 | return; |
1088 | } | 1105 | } |
1089 | smc_conn_free(&new_smc->conn); | 1106 | smc_conn_free(&new_smc->conn); |
1090 | new_smc->use_fallback = true; | 1107 | smc_switch_to_fallback(new_smc); |
1091 | new_smc->fallback_rsn = reason_code; | 1108 | new_smc->fallback_rsn = reason_code; |
1092 | if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) { | 1109 | if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) { |
1093 | if (smc_clc_send_decline(new_smc, reason_code) < 0) { | 1110 | if (smc_clc_send_decline(new_smc, reason_code) < 0) { |
@@ -1237,6 +1254,9 @@ static void smc_listen_work(struct work_struct *work) | |||
1237 | int rc = 0; | 1254 | int rc = 0; |
1238 | u8 ibport; | 1255 | u8 ibport; |
1239 | 1256 | ||
1257 | if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN) | ||
1258 | return smc_listen_out_err(new_smc); | ||
1259 | |||
1240 | if (new_smc->use_fallback) { | 1260 | if (new_smc->use_fallback) { |
1241 | smc_listen_out_connected(new_smc); | 1261 | smc_listen_out_connected(new_smc); |
1242 | return; | 1262 | return; |
@@ -1244,7 +1264,7 @@ static void smc_listen_work(struct work_struct *work) | |||
1244 | 1264 | ||
1245 | /* check if peer is smc capable */ | 1265 | /* check if peer is smc capable */ |
1246 | if (!tcp_sk(newclcsock->sk)->syn_smc) { | 1266 | if (!tcp_sk(newclcsock->sk)->syn_smc) { |
1247 | new_smc->use_fallback = true; | 1267 | smc_switch_to_fallback(new_smc); |
1248 | new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC; | 1268 | new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC; |
1249 | smc_listen_out_connected(new_smc); | 1269 | smc_listen_out_connected(new_smc); |
1250 | return; | 1270 | return; |
@@ -1501,7 +1521,7 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) | |||
1501 | 1521 | ||
1502 | if (msg->msg_flags & MSG_FASTOPEN) { | 1522 | if (msg->msg_flags & MSG_FASTOPEN) { |
1503 | if (sk->sk_state == SMC_INIT) { | 1523 | if (sk->sk_state == SMC_INIT) { |
1504 | smc->use_fallback = true; | 1524 | smc_switch_to_fallback(smc); |
1505 | smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; | 1525 | smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; |
1506 | } else { | 1526 | } else { |
1507 | rc = -EINVAL; | 1527 | rc = -EINVAL; |
@@ -1703,7 +1723,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, | |||
1703 | case TCP_FASTOPEN_NO_COOKIE: | 1723 | case TCP_FASTOPEN_NO_COOKIE: |
1704 | /* option not supported by SMC */ | 1724 | /* option not supported by SMC */ |
1705 | if (sk->sk_state == SMC_INIT) { | 1725 | if (sk->sk_state == SMC_INIT) { |
1706 | smc->use_fallback = true; | 1726 | smc_switch_to_fallback(smc); |
1707 | smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; | 1727 | smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; |
1708 | } else { | 1728 | } else { |
1709 | if (!smc->use_fallback) | 1729 | if (!smc->use_fallback) |
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index 2ad37e998509..fc06720b53c1 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c | |||
@@ -21,6 +21,22 @@ | |||
21 | 21 | ||
22 | #define SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME (5 * HZ) | 22 | #define SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME (5 * HZ) |
23 | 23 | ||
24 | /* release the clcsock that is assigned to the smc_sock */ | ||
25 | void smc_clcsock_release(struct smc_sock *smc) | ||
26 | { | ||
27 | struct socket *tcp; | ||
28 | |||
29 | if (smc->listen_smc && current_work() != &smc->smc_listen_work) | ||
30 | cancel_work_sync(&smc->smc_listen_work); | ||
31 | mutex_lock(&smc->clcsock_release_lock); | ||
32 | if (smc->clcsock) { | ||
33 | tcp = smc->clcsock; | ||
34 | smc->clcsock = NULL; | ||
35 | sock_release(tcp); | ||
36 | } | ||
37 | mutex_unlock(&smc->clcsock_release_lock); | ||
38 | } | ||
39 | |||
24 | static void smc_close_cleanup_listen(struct sock *parent) | 40 | static void smc_close_cleanup_listen(struct sock *parent) |
25 | { | 41 | { |
26 | struct sock *sk; | 42 | struct sock *sk; |
@@ -321,6 +337,7 @@ static void smc_close_passive_work(struct work_struct *work) | |||
321 | close_work); | 337 | close_work); |
322 | struct smc_sock *smc = container_of(conn, struct smc_sock, conn); | 338 | struct smc_sock *smc = container_of(conn, struct smc_sock, conn); |
323 | struct smc_cdc_conn_state_flags *rxflags; | 339 | struct smc_cdc_conn_state_flags *rxflags; |
340 | bool release_clcsock = false; | ||
324 | struct sock *sk = &smc->sk; | 341 | struct sock *sk = &smc->sk; |
325 | int old_state; | 342 | int old_state; |
326 | 343 | ||
@@ -400,13 +417,13 @@ wakeup: | |||
400 | if ((sk->sk_state == SMC_CLOSED) && | 417 | if ((sk->sk_state == SMC_CLOSED) && |
401 | (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) { | 418 | (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) { |
402 | smc_conn_free(conn); | 419 | smc_conn_free(conn); |
403 | if (smc->clcsock) { | 420 | if (smc->clcsock) |
404 | sock_release(smc->clcsock); | 421 | release_clcsock = true; |
405 | smc->clcsock = NULL; | ||
406 | } | ||
407 | } | 422 | } |
408 | } | 423 | } |
409 | release_sock(sk); | 424 | release_sock(sk); |
425 | if (release_clcsock) | ||
426 | smc_clcsock_release(smc); | ||
410 | sock_put(sk); /* sock_hold done by schedulers of close_work */ | 427 | sock_put(sk); /* sock_hold done by schedulers of close_work */ |
411 | } | 428 | } |
412 | 429 | ||
diff --git a/net/smc/smc_close.h b/net/smc/smc_close.h index 19eb6a211c23..e0e3b5df25d2 100644 --- a/net/smc/smc_close.h +++ b/net/smc/smc_close.h | |||
@@ -23,5 +23,6 @@ void smc_close_wake_tx_prepared(struct smc_sock *smc); | |||
23 | int smc_close_active(struct smc_sock *smc); | 23 | int smc_close_active(struct smc_sock *smc); |
24 | int smc_close_shutdown_write(struct smc_sock *smc); | 24 | int smc_close_shutdown_write(struct smc_sock *smc); |
25 | void smc_close_init(struct smc_sock *smc); | 25 | void smc_close_init(struct smc_sock *smc); |
26 | void smc_clcsock_release(struct smc_sock *smc); | ||
26 | 27 | ||
27 | #endif /* SMC_CLOSE_H */ | 28 | #endif /* SMC_CLOSE_H */ |
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c index 2fff79db1a59..e89e918b88e0 100644 --- a/net/smc/smc_ism.c +++ b/net/smc/smc_ism.c | |||
@@ -289,6 +289,11 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name, | |||
289 | INIT_LIST_HEAD(&smcd->vlan); | 289 | INIT_LIST_HEAD(&smcd->vlan); |
290 | smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)", | 290 | smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)", |
291 | WQ_MEM_RECLAIM, name); | 291 | WQ_MEM_RECLAIM, name); |
292 | if (!smcd->event_wq) { | ||
293 | kfree(smcd->conn); | ||
294 | kfree(smcd); | ||
295 | return NULL; | ||
296 | } | ||
292 | return smcd; | 297 | return smcd; |
293 | } | 298 | } |
294 | EXPORT_SYMBOL_GPL(smcd_alloc_dev); | 299 | EXPORT_SYMBOL_GPL(smcd_alloc_dev); |
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 8d2f6296279c..0285c7f9e79b 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c | |||
@@ -603,7 +603,8 @@ static int smc_pnet_flush(struct sk_buff *skb, struct genl_info *info) | |||
603 | { | 603 | { |
604 | struct net *net = genl_info_net(info); | 604 | struct net *net = genl_info_net(info); |
605 | 605 | ||
606 | return smc_pnet_remove_by_pnetid(net, NULL); | 606 | smc_pnet_remove_by_pnetid(net, NULL); |
607 | return 0; | ||
607 | } | 608 | } |
608 | 609 | ||
609 | /* SMC_PNETID generic netlink operation definition */ | 610 | /* SMC_PNETID generic netlink operation definition */ |
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index 860dcfb95ee4..fa6c977b4c41 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c | |||
@@ -140,13 +140,11 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, | |||
140 | /* We are going to append to the frags_list of head. | 140 | /* We are going to append to the frags_list of head. |
141 | * Need to unshare the frag_list. | 141 | * Need to unshare the frag_list. |
142 | */ | 142 | */ |
143 | if (skb_has_frag_list(head)) { | 143 | err = skb_unclone(head, GFP_ATOMIC); |
144 | err = skb_unclone(head, GFP_ATOMIC); | 144 | if (err) { |
145 | if (err) { | 145 | STRP_STATS_INCR(strp->stats.mem_fail); |
146 | STRP_STATS_INCR(strp->stats.mem_fail); | 146 | desc->error = err; |
147 | desc->error = err; | 147 | return 0; |
148 | return 0; | ||
149 | } | ||
150 | } | 148 | } |
151 | 149 | ||
152 | if (unlikely(skb_shinfo(head)->frag_list)) { | 150 | if (unlikely(skb_shinfo(head)->frag_list)) { |
diff --git a/net/tipc/link.c b/net/tipc/link.c index 341ecd796aa4..131aa2f0fd27 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -869,6 +869,8 @@ void tipc_link_reset(struct tipc_link *l) | |||
869 | __skb_queue_head_init(&list); | 869 | __skb_queue_head_init(&list); |
870 | 870 | ||
871 | l->in_session = false; | 871 | l->in_session = false; |
872 | /* Force re-synch of peer session number before establishing */ | ||
873 | l->peer_session--; | ||
872 | l->session++; | 874 | l->session++; |
873 | l->mtu = l->advertised_mtu; | 875 | l->mtu = l->advertised_mtu; |
874 | 876 | ||
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index bff241f03525..89993afe0fbd 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
@@ -909,7 +909,8 @@ static int tipc_nl_service_list(struct net *net, struct tipc_nl_msg *msg, | |||
909 | for (; i < TIPC_NAMETBL_SIZE; i++) { | 909 | for (; i < TIPC_NAMETBL_SIZE; i++) { |
910 | head = &tn->nametbl->services[i]; | 910 | head = &tn->nametbl->services[i]; |
911 | 911 | ||
912 | if (*last_type) { | 912 | if (*last_type || |
913 | (!i && *last_key && (*last_lower == *last_key))) { | ||
913 | service = tipc_service_find(net, *last_type); | 914 | service = tipc_service_find(net, *last_type); |
914 | if (!service) | 915 | if (!service) |
915 | return -EPIPE; | 916 | return -EPIPE; |
diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c index 3481e4906bd6..9df82a573aa7 100644 --- a/net/tipc/sysctl.c +++ b/net/tipc/sysctl.c | |||
@@ -38,6 +38,8 @@ | |||
38 | 38 | ||
39 | #include <linux/sysctl.h> | 39 | #include <linux/sysctl.h> |
40 | 40 | ||
41 | static int zero; | ||
42 | static int one = 1; | ||
41 | static struct ctl_table_header *tipc_ctl_hdr; | 43 | static struct ctl_table_header *tipc_ctl_hdr; |
42 | 44 | ||
43 | static struct ctl_table tipc_table[] = { | 45 | static struct ctl_table tipc_table[] = { |
@@ -46,14 +48,16 @@ static struct ctl_table tipc_table[] = { | |||
46 | .data = &sysctl_tipc_rmem, | 48 | .data = &sysctl_tipc_rmem, |
47 | .maxlen = sizeof(sysctl_tipc_rmem), | 49 | .maxlen = sizeof(sysctl_tipc_rmem), |
48 | .mode = 0644, | 50 | .mode = 0644, |
49 | .proc_handler = proc_dointvec, | 51 | .proc_handler = proc_dointvec_minmax, |
52 | .extra1 = &one, | ||
50 | }, | 53 | }, |
51 | { | 54 | { |
52 | .procname = "named_timeout", | 55 | .procname = "named_timeout", |
53 | .data = &sysctl_tipc_named_timeout, | 56 | .data = &sysctl_tipc_named_timeout, |
54 | .maxlen = sizeof(sysctl_tipc_named_timeout), | 57 | .maxlen = sizeof(sysctl_tipc_named_timeout), |
55 | .mode = 0644, | 58 | .mode = 0644, |
56 | .proc_handler = proc_dointvec, | 59 | .proc_handler = proc_dointvec_minmax, |
60 | .extra1 = &zero, | ||
57 | }, | 61 | }, |
58 | { | 62 | { |
59 | .procname = "sk_filter", | 63 | .procname = "sk_filter", |
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 135a7ee9db03..9f3bdbc1e593 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c | |||
@@ -52,8 +52,11 @@ static DEFINE_SPINLOCK(tls_device_lock); | |||
52 | 52 | ||
53 | static void tls_device_free_ctx(struct tls_context *ctx) | 53 | static void tls_device_free_ctx(struct tls_context *ctx) |
54 | { | 54 | { |
55 | if (ctx->tx_conf == TLS_HW) | 55 | if (ctx->tx_conf == TLS_HW) { |
56 | kfree(tls_offload_ctx_tx(ctx)); | 56 | kfree(tls_offload_ctx_tx(ctx)); |
57 | kfree(ctx->tx.rec_seq); | ||
58 | kfree(ctx->tx.iv); | ||
59 | } | ||
57 | 60 | ||
58 | if (ctx->rx_conf == TLS_HW) | 61 | if (ctx->rx_conf == TLS_HW) |
59 | kfree(tls_offload_ctx_rx(ctx)); | 62 | kfree(tls_offload_ctx_rx(ctx)); |
@@ -216,6 +219,13 @@ void tls_device_sk_destruct(struct sock *sk) | |||
216 | } | 219 | } |
217 | EXPORT_SYMBOL(tls_device_sk_destruct); | 220 | EXPORT_SYMBOL(tls_device_sk_destruct); |
218 | 221 | ||
222 | void tls_device_free_resources_tx(struct sock *sk) | ||
223 | { | ||
224 | struct tls_context *tls_ctx = tls_get_ctx(sk); | ||
225 | |||
226 | tls_free_partial_record(sk, tls_ctx); | ||
227 | } | ||
228 | |||
219 | static void tls_append_frag(struct tls_record_info *record, | 229 | static void tls_append_frag(struct tls_record_info *record, |
220 | struct page_frag *pfrag, | 230 | struct page_frag *pfrag, |
221 | int size) | 231 | int size) |
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index df921a2904b9..9547cea0ce3b 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c | |||
@@ -208,6 +208,26 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, | |||
208 | return tls_push_sg(sk, ctx, sg, offset, flags); | 208 | return tls_push_sg(sk, ctx, sg, offset, flags); |
209 | } | 209 | } |
210 | 210 | ||
211 | bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx) | ||
212 | { | ||
213 | struct scatterlist *sg; | ||
214 | |||
215 | sg = ctx->partially_sent_record; | ||
216 | if (!sg) | ||
217 | return false; | ||
218 | |||
219 | while (1) { | ||
220 | put_page(sg_page(sg)); | ||
221 | sk_mem_uncharge(sk, sg->length); | ||
222 | |||
223 | if (sg_is_last(sg)) | ||
224 | break; | ||
225 | sg++; | ||
226 | } | ||
227 | ctx->partially_sent_record = NULL; | ||
228 | return true; | ||
229 | } | ||
230 | |||
211 | static void tls_write_space(struct sock *sk) | 231 | static void tls_write_space(struct sock *sk) |
212 | { | 232 | { |
213 | struct tls_context *ctx = tls_get_ctx(sk); | 233 | struct tls_context *ctx = tls_get_ctx(sk); |
@@ -267,6 +287,10 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) | |||
267 | kfree(ctx->tx.rec_seq); | 287 | kfree(ctx->tx.rec_seq); |
268 | kfree(ctx->tx.iv); | 288 | kfree(ctx->tx.iv); |
269 | tls_sw_free_resources_tx(sk); | 289 | tls_sw_free_resources_tx(sk); |
290 | #ifdef CONFIG_TLS_DEVICE | ||
291 | } else if (ctx->tx_conf == TLS_HW) { | ||
292 | tls_device_free_resources_tx(sk); | ||
293 | #endif | ||
270 | } | 294 | } |
271 | 295 | ||
272 | if (ctx->rx_conf == TLS_SW) { | 296 | if (ctx->rx_conf == TLS_SW) { |
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 20b191227969..b50ced862f6f 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c | |||
@@ -2052,20 +2052,7 @@ void tls_sw_free_resources_tx(struct sock *sk) | |||
2052 | /* Free up un-sent records in tx_list. First, free | 2052 | /* Free up un-sent records in tx_list. First, free |
2053 | * the partially sent record if any at head of tx_list. | 2053 | * the partially sent record if any at head of tx_list. |
2054 | */ | 2054 | */ |
2055 | if (tls_ctx->partially_sent_record) { | 2055 | if (tls_free_partial_record(sk, tls_ctx)) { |
2056 | struct scatterlist *sg = tls_ctx->partially_sent_record; | ||
2057 | |||
2058 | while (1) { | ||
2059 | put_page(sg_page(sg)); | ||
2060 | sk_mem_uncharge(sk, sg->length); | ||
2061 | |||
2062 | if (sg_is_last(sg)) | ||
2063 | break; | ||
2064 | sg++; | ||
2065 | } | ||
2066 | |||
2067 | tls_ctx->partially_sent_record = NULL; | ||
2068 | |||
2069 | rec = list_first_entry(&ctx->tx_list, | 2056 | rec = list_first_entry(&ctx->tx_list, |
2070 | struct tls_rec, list); | 2057 | struct tls_rec, list); |
2071 | list_del(&rec->list); | 2058 | list_del(&rec->list); |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 25a9e3b5c154..47e30a58566c 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -13650,7 +13650,8 @@ static const struct genl_ops nl80211_ops[] = { | |||
13650 | .policy = nl80211_policy, | 13650 | .policy = nl80211_policy, |
13651 | .flags = GENL_UNS_ADMIN_PERM, | 13651 | .flags = GENL_UNS_ADMIN_PERM, |
13652 | .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | | 13652 | .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | |
13653 | NL80211_FLAG_NEED_RTNL, | 13653 | NL80211_FLAG_NEED_RTNL | |
13654 | NL80211_FLAG_CLEAR_SKB, | ||
13654 | }, | 13655 | }, |
13655 | { | 13656 | { |
13656 | .cmd = NL80211_CMD_DEAUTHENTICATE, | 13657 | .cmd = NL80211_CMD_DEAUTHENTICATE, |
@@ -13701,7 +13702,8 @@ static const struct genl_ops nl80211_ops[] = { | |||
13701 | .policy = nl80211_policy, | 13702 | .policy = nl80211_policy, |
13702 | .flags = GENL_UNS_ADMIN_PERM, | 13703 | .flags = GENL_UNS_ADMIN_PERM, |
13703 | .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | | 13704 | .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | |
13704 | NL80211_FLAG_NEED_RTNL, | 13705 | NL80211_FLAG_NEED_RTNL | |
13706 | NL80211_FLAG_CLEAR_SKB, | ||
13705 | }, | 13707 | }, |
13706 | { | 13708 | { |
13707 | .cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS, | 13709 | .cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS, |
@@ -13709,7 +13711,8 @@ static const struct genl_ops nl80211_ops[] = { | |||
13709 | .policy = nl80211_policy, | 13711 | .policy = nl80211_policy, |
13710 | .flags = GENL_ADMIN_PERM, | 13712 | .flags = GENL_ADMIN_PERM, |
13711 | .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | | 13713 | .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | |
13712 | NL80211_FLAG_NEED_RTNL, | 13714 | NL80211_FLAG_NEED_RTNL | |
13715 | NL80211_FLAG_CLEAR_SKB, | ||
13713 | }, | 13716 | }, |
13714 | { | 13717 | { |
13715 | .cmd = NL80211_CMD_DISCONNECT, | 13718 | .cmd = NL80211_CMD_DISCONNECT, |
@@ -13738,7 +13741,8 @@ static const struct genl_ops nl80211_ops[] = { | |||
13738 | .policy = nl80211_policy, | 13741 | .policy = nl80211_policy, |
13739 | .flags = GENL_UNS_ADMIN_PERM, | 13742 | .flags = GENL_UNS_ADMIN_PERM, |
13740 | .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | | 13743 | .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | |
13741 | NL80211_FLAG_NEED_RTNL, | 13744 | NL80211_FLAG_NEED_RTNL | |
13745 | NL80211_FLAG_CLEAR_SKB, | ||
13742 | }, | 13746 | }, |
13743 | { | 13747 | { |
13744 | .cmd = NL80211_CMD_DEL_PMKSA, | 13748 | .cmd = NL80211_CMD_DEL_PMKSA, |
@@ -14090,7 +14094,8 @@ static const struct genl_ops nl80211_ops[] = { | |||
14090 | .policy = nl80211_policy, | 14094 | .policy = nl80211_policy, |
14091 | .flags = GENL_UNS_ADMIN_PERM, | 14095 | .flags = GENL_UNS_ADMIN_PERM, |
14092 | .internal_flags = NL80211_FLAG_NEED_WIPHY | | 14096 | .internal_flags = NL80211_FLAG_NEED_WIPHY | |
14093 | NL80211_FLAG_NEED_RTNL, | 14097 | NL80211_FLAG_NEED_RTNL | |
14098 | NL80211_FLAG_CLEAR_SKB, | ||
14094 | }, | 14099 | }, |
14095 | { | 14100 | { |
14096 | .cmd = NL80211_CMD_SET_QOS_MAP, | 14101 | .cmd = NL80211_CMD_SET_QOS_MAP, |
@@ -14145,7 +14150,8 @@ static const struct genl_ops nl80211_ops[] = { | |||
14145 | .doit = nl80211_set_pmk, | 14150 | .doit = nl80211_set_pmk, |
14146 | .policy = nl80211_policy, | 14151 | .policy = nl80211_policy, |
14147 | .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | | 14152 | .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | |
14148 | NL80211_FLAG_NEED_RTNL, | 14153 | NL80211_FLAG_NEED_RTNL | |
14154 | NL80211_FLAG_CLEAR_SKB, | ||
14149 | }, | 14155 | }, |
14150 | { | 14156 | { |
14151 | .cmd = NL80211_CMD_DEL_PMK, | 14157 | .cmd = NL80211_CMD_DEL_PMK, |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 2f1bf91eb226..0ba778f371cb 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -1309,6 +1309,16 @@ reg_intersect_dfs_region(const enum nl80211_dfs_regions dfs_region1, | |||
1309 | return dfs_region1; | 1309 | return dfs_region1; |
1310 | } | 1310 | } |
1311 | 1311 | ||
1312 | static void reg_wmm_rules_intersect(const struct ieee80211_wmm_ac *wmm_ac1, | ||
1313 | const struct ieee80211_wmm_ac *wmm_ac2, | ||
1314 | struct ieee80211_wmm_ac *intersect) | ||
1315 | { | ||
1316 | intersect->cw_min = max_t(u16, wmm_ac1->cw_min, wmm_ac2->cw_min); | ||
1317 | intersect->cw_max = max_t(u16, wmm_ac1->cw_max, wmm_ac2->cw_max); | ||
1318 | intersect->cot = min_t(u16, wmm_ac1->cot, wmm_ac2->cot); | ||
1319 | intersect->aifsn = max_t(u8, wmm_ac1->aifsn, wmm_ac2->aifsn); | ||
1320 | } | ||
1321 | |||
1312 | /* | 1322 | /* |
1313 | * Helper for regdom_intersect(), this does the real | 1323 | * Helper for regdom_intersect(), this does the real |
1314 | * mathematical intersection fun | 1324 | * mathematical intersection fun |
@@ -1323,6 +1333,8 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1, | |||
1323 | struct ieee80211_freq_range *freq_range; | 1333 | struct ieee80211_freq_range *freq_range; |
1324 | const struct ieee80211_power_rule *power_rule1, *power_rule2; | 1334 | const struct ieee80211_power_rule *power_rule1, *power_rule2; |
1325 | struct ieee80211_power_rule *power_rule; | 1335 | struct ieee80211_power_rule *power_rule; |
1336 | const struct ieee80211_wmm_rule *wmm_rule1, *wmm_rule2; | ||
1337 | struct ieee80211_wmm_rule *wmm_rule; | ||
1326 | u32 freq_diff, max_bandwidth1, max_bandwidth2; | 1338 | u32 freq_diff, max_bandwidth1, max_bandwidth2; |
1327 | 1339 | ||
1328 | freq_range1 = &rule1->freq_range; | 1340 | freq_range1 = &rule1->freq_range; |
@@ -1333,6 +1345,10 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1, | |||
1333 | power_rule2 = &rule2->power_rule; | 1345 | power_rule2 = &rule2->power_rule; |
1334 | power_rule = &intersected_rule->power_rule; | 1346 | power_rule = &intersected_rule->power_rule; |
1335 | 1347 | ||
1348 | wmm_rule1 = &rule1->wmm_rule; | ||
1349 | wmm_rule2 = &rule2->wmm_rule; | ||
1350 | wmm_rule = &intersected_rule->wmm_rule; | ||
1351 | |||
1336 | freq_range->start_freq_khz = max(freq_range1->start_freq_khz, | 1352 | freq_range->start_freq_khz = max(freq_range1->start_freq_khz, |
1337 | freq_range2->start_freq_khz); | 1353 | freq_range2->start_freq_khz); |
1338 | freq_range->end_freq_khz = min(freq_range1->end_freq_khz, | 1354 | freq_range->end_freq_khz = min(freq_range1->end_freq_khz, |
@@ -1376,6 +1392,29 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1, | |||
1376 | intersected_rule->dfs_cac_ms = max(rule1->dfs_cac_ms, | 1392 | intersected_rule->dfs_cac_ms = max(rule1->dfs_cac_ms, |
1377 | rule2->dfs_cac_ms); | 1393 | rule2->dfs_cac_ms); |
1378 | 1394 | ||
1395 | if (rule1->has_wmm && rule2->has_wmm) { | ||
1396 | u8 ac; | ||
1397 | |||
1398 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { | ||
1399 | reg_wmm_rules_intersect(&wmm_rule1->client[ac], | ||
1400 | &wmm_rule2->client[ac], | ||
1401 | &wmm_rule->client[ac]); | ||
1402 | reg_wmm_rules_intersect(&wmm_rule1->ap[ac], | ||
1403 | &wmm_rule2->ap[ac], | ||
1404 | &wmm_rule->ap[ac]); | ||
1405 | } | ||
1406 | |||
1407 | intersected_rule->has_wmm = true; | ||
1408 | } else if (rule1->has_wmm) { | ||
1409 | *wmm_rule = *wmm_rule1; | ||
1410 | intersected_rule->has_wmm = true; | ||
1411 | } else if (rule2->has_wmm) { | ||
1412 | *wmm_rule = *wmm_rule2; | ||
1413 | intersected_rule->has_wmm = true; | ||
1414 | } else { | ||
1415 | intersected_rule->has_wmm = false; | ||
1416 | } | ||
1417 | |||
1379 | if (!is_valid_reg_rule(intersected_rule)) | 1418 | if (!is_valid_reg_rule(intersected_rule)) |
1380 | return -EINVAL; | 1419 | return -EINVAL; |
1381 | 1420 | ||
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 287518c6caa4..04d888628f29 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -190,10 +190,9 @@ static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen, | |||
190 | /* copy subelement as we need to change its content to | 190 | /* copy subelement as we need to change its content to |
191 | * mark an ie after it is processed. | 191 | * mark an ie after it is processed. |
192 | */ | 192 | */ |
193 | sub_copy = kmalloc(subie_len, gfp); | 193 | sub_copy = kmemdup(subelement, subie_len, gfp); |
194 | if (!sub_copy) | 194 | if (!sub_copy) |
195 | return 0; | 195 | return 0; |
196 | memcpy(sub_copy, subelement, subie_len); | ||
197 | 196 | ||
198 | pos = &new_ie[0]; | 197 | pos = &new_ie[0]; |
199 | 198 | ||
diff --git a/net/wireless/util.c b/net/wireless/util.c index e4b8db5e81ec..75899b62bdc9 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -1220,9 +1220,11 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate) | |||
1220 | else if (rate->bw == RATE_INFO_BW_HE_RU && | 1220 | else if (rate->bw == RATE_INFO_BW_HE_RU && |
1221 | rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_26) | 1221 | rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_26) |
1222 | result = rates_26[rate->he_gi]; | 1222 | result = rates_26[rate->he_gi]; |
1223 | else if (WARN(1, "invalid HE MCS: bw:%d, ru:%d\n", | 1223 | else { |
1224 | rate->bw, rate->he_ru_alloc)) | 1224 | WARN(1, "invalid HE MCS: bw:%d, ru:%d\n", |
1225 | rate->bw, rate->he_ru_alloc); | ||
1225 | return 0; | 1226 | return 0; |
1227 | } | ||
1226 | 1228 | ||
1227 | /* now scale to the appropriate MCS */ | 1229 | /* now scale to the appropriate MCS */ |
1228 | tmp = result; | 1230 | tmp = result; |
diff --git a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh index c4cf6e6d800e..a6c196c8534c 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh | |||
@@ -11,6 +11,7 @@ lib_dir=$(dirname $0)/../../../net/forwarding | |||
11 | 11 | ||
12 | ALL_TESTS=" | 12 | ALL_TESTS=" |
13 | rif_set_addr_test | 13 | rif_set_addr_test |
14 | rif_vrf_set_addr_test | ||
14 | rif_inherit_bridge_addr_test | 15 | rif_inherit_bridge_addr_test |
15 | rif_non_inherit_bridge_addr_test | 16 | rif_non_inherit_bridge_addr_test |
16 | vlan_interface_deletion_test | 17 | vlan_interface_deletion_test |
@@ -98,6 +99,25 @@ rif_set_addr_test() | |||
98 | ip link set dev $swp1 addr $swp1_mac | 99 | ip link set dev $swp1 addr $swp1_mac |
99 | } | 100 | } |
100 | 101 | ||
102 | rif_vrf_set_addr_test() | ||
103 | { | ||
104 | # Test that it is possible to set an IP address on a VRF upper despite | ||
105 | # its random MAC address. | ||
106 | RET=0 | ||
107 | |||
108 | ip link add name vrf-test type vrf table 10 | ||
109 | ip link set dev $swp1 master vrf-test | ||
110 | |||
111 | ip -4 address add 192.0.2.1/24 dev vrf-test | ||
112 | check_err $? "failed to set IPv4 address on VRF" | ||
113 | ip -6 address add 2001:db8:1::1/64 dev vrf-test | ||
114 | check_err $? "failed to set IPv6 address on VRF" | ||
115 | |||
116 | log_test "RIF - setting IP address on VRF" | ||
117 | |||
118 | ip link del dev vrf-test | ||
119 | } | ||
120 | |||
101 | rif_inherit_bridge_addr_test() | 121 | rif_inherit_bridge_addr_test() |
102 | { | 122 | { |
103 | RET=0 | 123 | RET=0 |
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 1080ff55a788..0d2a5f4f1e63 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh | |||
@@ -605,6 +605,39 @@ run_cmd() | |||
605 | return $rc | 605 | return $rc |
606 | } | 606 | } |
607 | 607 | ||
608 | check_expected() | ||
609 | { | ||
610 | local out="$1" | ||
611 | local expected="$2" | ||
612 | local rc=0 | ||
613 | |||
614 | [ "${out}" = "${expected}" ] && return 0 | ||
615 | |||
616 | if [ -z "${out}" ]; then | ||
617 | if [ "$VERBOSE" = "1" ]; then | ||
618 | printf "\nNo route entry found\n" | ||
619 | printf "Expected:\n" | ||
620 | printf " ${expected}\n" | ||
621 | fi | ||
622 | return 1 | ||
623 | fi | ||
624 | |||
625 | # tricky way to convert output to 1-line without ip's | ||
626 | # messy '\'; this drops all extra white space | ||
627 | out=$(echo ${out}) | ||
628 | if [ "${out}" != "${expected}" ]; then | ||
629 | rc=1 | ||
630 | if [ "${VERBOSE}" = "1" ]; then | ||
631 | printf " Unexpected route entry. Have:\n" | ||
632 | printf " ${out}\n" | ||
633 | printf " Expected:\n" | ||
634 | printf " ${expected}\n\n" | ||
635 | fi | ||
636 | fi | ||
637 | |||
638 | return $rc | ||
639 | } | ||
640 | |||
608 | # add route for a prefix, flushing any existing routes first | 641 | # add route for a prefix, flushing any existing routes first |
609 | # expected to be the first step of a test | 642 | # expected to be the first step of a test |
610 | add_route6() | 643 | add_route6() |
@@ -652,31 +685,7 @@ check_route6() | |||
652 | pfx=$1 | 685 | pfx=$1 |
653 | 686 | ||
654 | out=$($IP -6 ro ls match ${pfx} | sed -e 's/ pref medium//') | 687 | out=$($IP -6 ro ls match ${pfx} | sed -e 's/ pref medium//') |
655 | [ "${out}" = "${expected}" ] && return 0 | 688 | check_expected "${out}" "${expected}" |
656 | |||
657 | if [ -z "${out}" ]; then | ||
658 | if [ "$VERBOSE" = "1" ]; then | ||
659 | printf "\nNo route entry found\n" | ||
660 | printf "Expected:\n" | ||
661 | printf " ${expected}\n" | ||
662 | fi | ||
663 | return 1 | ||
664 | fi | ||
665 | |||
666 | # tricky way to convert output to 1-line without ip's | ||
667 | # messy '\'; this drops all extra white space | ||
668 | out=$(echo ${out}) | ||
669 | if [ "${out}" != "${expected}" ]; then | ||
670 | rc=1 | ||
671 | if [ "${VERBOSE}" = "1" ]; then | ||
672 | printf " Unexpected route entry. Have:\n" | ||
673 | printf " ${out}\n" | ||
674 | printf " Expected:\n" | ||
675 | printf " ${expected}\n\n" | ||
676 | fi | ||
677 | fi | ||
678 | |||
679 | return $rc | ||
680 | } | 689 | } |
681 | 690 | ||
682 | route_cleanup() | 691 | route_cleanup() |
@@ -725,7 +734,7 @@ route_setup() | |||
725 | ip -netns ns2 addr add 172.16.103.2/24 dev veth4 | 734 | ip -netns ns2 addr add 172.16.103.2/24 dev veth4 |
726 | ip -netns ns2 addr add 172.16.104.1/24 dev dummy1 | 735 | ip -netns ns2 addr add 172.16.104.1/24 dev dummy1 |
727 | 736 | ||
728 | set +ex | 737 | set +e |
729 | } | 738 | } |
730 | 739 | ||
731 | # assumption is that basic add of a single path route works | 740 | # assumption is that basic add of a single path route works |
@@ -960,7 +969,8 @@ ipv6_addr_metric_test() | |||
960 | run_cmd "$IP li set dev dummy2 down" | 969 | run_cmd "$IP li set dev dummy2 down" |
961 | rc=$? | 970 | rc=$? |
962 | if [ $rc -eq 0 ]; then | 971 | if [ $rc -eq 0 ]; then |
963 | check_route6 "" | 972 | out=$($IP -6 ro ls match 2001:db8:104::/64) |
973 | check_expected "${out}" "" | ||
964 | rc=$? | 974 | rc=$? |
965 | fi | 975 | fi |
966 | log_test $rc 0 "Prefix route removed on link down" | 976 | log_test $rc 0 "Prefix route removed on link down" |
@@ -1091,38 +1101,13 @@ check_route() | |||
1091 | local pfx | 1101 | local pfx |
1092 | local expected="$1" | 1102 | local expected="$1" |
1093 | local out | 1103 | local out |
1094 | local rc=0 | ||
1095 | 1104 | ||
1096 | set -- $expected | 1105 | set -- $expected |
1097 | pfx=$1 | 1106 | pfx=$1 |
1098 | [ "${pfx}" = "unreachable" ] && pfx=$2 | 1107 | [ "${pfx}" = "unreachable" ] && pfx=$2 |
1099 | 1108 | ||
1100 | out=$($IP ro ls match ${pfx}) | 1109 | out=$($IP ro ls match ${pfx}) |
1101 | [ "${out}" = "${expected}" ] && return 0 | 1110 | check_expected "${out}" "${expected}" |
1102 | |||
1103 | if [ -z "${out}" ]; then | ||
1104 | if [ "$VERBOSE" = "1" ]; then | ||
1105 | printf "\nNo route entry found\n" | ||
1106 | printf "Expected:\n" | ||
1107 | printf " ${expected}\n" | ||
1108 | fi | ||
1109 | return 1 | ||
1110 | fi | ||
1111 | |||
1112 | # tricky way to convert output to 1-line without ip's | ||
1113 | # messy '\'; this drops all extra white space | ||
1114 | out=$(echo ${out}) | ||
1115 | if [ "${out}" != "${expected}" ]; then | ||
1116 | rc=1 | ||
1117 | if [ "${VERBOSE}" = "1" ]; then | ||
1118 | printf " Unexpected route entry. Have:\n" | ||
1119 | printf " ${out}\n" | ||
1120 | printf " Expected:\n" | ||
1121 | printf " ${expected}\n\n" | ||
1122 | fi | ||
1123 | fi | ||
1124 | |||
1125 | return $rc | ||
1126 | } | 1111 | } |
1127 | 1112 | ||
1128 | # assumption is that basic add of a single path route works | 1113 | # assumption is that basic add of a single path route works |
@@ -1387,7 +1372,8 @@ ipv4_addr_metric_test() | |||
1387 | run_cmd "$IP li set dev dummy2 down" | 1372 | run_cmd "$IP li set dev dummy2 down" |
1388 | rc=$? | 1373 | rc=$? |
1389 | if [ $rc -eq 0 ]; then | 1374 | if [ $rc -eq 0 ]; then |
1390 | check_route "" | 1375 | out=$($IP ro ls match 172.16.104.0/24) |
1376 | check_expected "${out}" "" | ||
1391 | rc=$? | 1377 | rc=$? |
1392 | fi | 1378 | fi |
1393 | log_test $rc 0 "Prefix route removed on link down" | 1379 | log_test $rc 0 "Prefix route removed on link down" |