diff options
70 files changed, 602 insertions, 316 deletions
diff --git a/Documentation/bpf/bpf_devel_QA.txt b/Documentation/bpf/bpf_devel_QA.txt index 1a0b704e1a38..da57601153a0 100644 --- a/Documentation/bpf/bpf_devel_QA.txt +++ b/Documentation/bpf/bpf_devel_QA.txt | |||
| @@ -557,6 +557,14 @@ A: Although LLVM IR generation and optimization try to stay architecture | |||
| 557 | pulls in some header files containing file scope host assembly codes. | 557 | pulls in some header files containing file scope host assembly codes. |
| 558 | - You can add "-fno-jump-tables" to work around the switch table issue. | 558 | - You can add "-fno-jump-tables" to work around the switch table issue. |
| 559 | 559 | ||
| 560 | Otherwise, you can use bpf target. | 560 | Otherwise, you can use bpf target. Additionally, you _must_ use bpf target |
| 561 | when: | ||
| 562 | |||
| 563 | - Your program uses data structures with pointer or long / unsigned long | ||
| 564 | types that interface with BPF helpers or context data structures. Access | ||
| 565 | into these structures is verified by the BPF verifier and may result | ||
| 566 | in verification failures if the native architecture is not aligned with | ||
| 567 | the BPF architecture, e.g. 64-bit. An example of this is | ||
| 568 | BPF_PROG_TYPE_SK_MSG require '-target bpf' | ||
| 561 | 569 | ||
| 562 | Happy BPF hacking! | 570 | Happy BPF hacking! |
diff --git a/MAINTAINERS b/MAINTAINERS index eab763f17aab..b1ccabd0dbc3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -9725,6 +9725,7 @@ W: https://fedorahosted.org/dropwatch/ | |||
| 9725 | F: net/core/drop_monitor.c | 9725 | F: net/core/drop_monitor.c |
| 9726 | 9726 | ||
| 9727 | NETWORKING DRIVERS | 9727 | NETWORKING DRIVERS |
| 9728 | M: "David S. Miller" <davem@davemloft.net> | ||
| 9728 | L: netdev@vger.kernel.org | 9729 | L: netdev@vger.kernel.org |
| 9729 | W: http://www.linuxfoundation.org/en/Net | 9730 | W: http://www.linuxfoundation.org/en/Net |
| 9730 | Q: http://patchwork.ozlabs.org/project/netdev/list/ | 9731 | Q: http://patchwork.ozlabs.org/project/netdev/list/ |
| @@ -12498,6 +12499,7 @@ F: drivers/scsi/st_*.h | |||
| 12498 | SCTP PROTOCOL | 12499 | SCTP PROTOCOL |
| 12499 | M: Vlad Yasevich <vyasevich@gmail.com> | 12500 | M: Vlad Yasevich <vyasevich@gmail.com> |
| 12500 | M: Neil Horman <nhorman@tuxdriver.com> | 12501 | M: Neil Horman <nhorman@tuxdriver.com> |
| 12502 | M: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> | ||
| 12501 | L: linux-sctp@vger.kernel.org | 12503 | L: linux-sctp@vger.kernel.org |
| 12502 | W: http://lksctp.sourceforge.net | 12504 | W: http://lksctp.sourceforge.net |
| 12503 | S: Maintained | 12505 | S: Maintained |
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index b725154182cc..263c8453815e 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c | |||
| @@ -1027,7 +1027,17 @@ emit_cond_jmp: /* convert BPF opcode to x86 */ | |||
| 1027 | break; | 1027 | break; |
| 1028 | 1028 | ||
| 1029 | case BPF_JMP | BPF_JA: | 1029 | case BPF_JMP | BPF_JA: |
| 1030 | jmp_offset = addrs[i + insn->off] - addrs[i]; | 1030 | if (insn->off == -1) |
| 1031 | /* -1 jmp instructions will always jump | ||
| 1032 | * backwards two bytes. Explicitly handling | ||
| 1033 | * this case avoids wasting too many passes | ||
| 1034 | * when there are long sequences of replaced | ||
| 1035 | * dead code. | ||
| 1036 | */ | ||
| 1037 | jmp_offset = -2; | ||
| 1038 | else | ||
| 1039 | jmp_offset = addrs[i + insn->off] - addrs[i]; | ||
| 1040 | |||
| 1031 | if (!jmp_offset) | 1041 | if (!jmp_offset) |
| 1032 | /* optimize out nop jumps */ | 1042 | /* optimize out nop jumps */ |
| 1033 | break; | 1043 | break; |
| @@ -1226,6 +1236,7 @@ skip_init_addrs: | |||
| 1226 | for (pass = 0; pass < 20 || image; pass++) { | 1236 | for (pass = 0; pass < 20 || image; pass++) { |
| 1227 | proglen = do_jit(prog, addrs, image, oldproglen, &ctx); | 1237 | proglen = do_jit(prog, addrs, image, oldproglen, &ctx); |
| 1228 | if (proglen <= 0) { | 1238 | if (proglen <= 0) { |
| 1239 | out_image: | ||
| 1229 | image = NULL; | 1240 | image = NULL; |
| 1230 | if (header) | 1241 | if (header) |
| 1231 | bpf_jit_binary_free(header); | 1242 | bpf_jit_binary_free(header); |
| @@ -1236,8 +1247,7 @@ skip_init_addrs: | |||
| 1236 | if (proglen != oldproglen) { | 1247 | if (proglen != oldproglen) { |
| 1237 | pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", | 1248 | pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", |
| 1238 | proglen, oldproglen); | 1249 | proglen, oldproglen); |
| 1239 | prog = orig_prog; | 1250 | goto out_image; |
| 1240 | goto out_addrs; | ||
| 1241 | } | 1251 | } |
| 1242 | break; | 1252 | break; |
| 1243 | } | 1253 | } |
| @@ -1273,7 +1283,7 @@ skip_init_addrs: | |||
| 1273 | prog = orig_prog; | 1283 | prog = orig_prog; |
| 1274 | } | 1284 | } |
| 1275 | 1285 | ||
| 1276 | if (!prog->is_func || extra_pass) { | 1286 | if (!image || !prog->is_func || extra_pass) { |
| 1277 | out_addrs: | 1287 | out_addrs: |
| 1278 | kfree(addrs); | 1288 | kfree(addrs); |
| 1279 | kfree(jit_data); | 1289 | kfree(jit_data); |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index daa919e5a442..241cf4ff9901 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
| @@ -4757,7 +4757,7 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector) | |||
| 4757 | { | 4757 | { |
| 4758 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | 4758 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
| 4759 | 4759 | ||
| 4760 | return mlx5_get_vector_affinity(dev->mdev, comp_vector); | 4760 | return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector); |
| 4761 | } | 4761 | } |
| 4762 | 4762 | ||
| 4763 | /* The mlx5_ib_multiport_mutex should be held when calling this function */ | 4763 | /* The mlx5_ib_multiport_mutex should be held when calling this function */ |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index f9a3c1a76d5d..f33b25fbca63 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
| @@ -2144,14 +2144,21 @@ static const struct net_device_ops bcm_sysport_netdev_ops = { | |||
| 2144 | .ndo_select_queue = bcm_sysport_select_queue, | 2144 | .ndo_select_queue = bcm_sysport_select_queue, |
| 2145 | }; | 2145 | }; |
| 2146 | 2146 | ||
| 2147 | static int bcm_sysport_map_queues(struct net_device *dev, | 2147 | static int bcm_sysport_map_queues(struct notifier_block *nb, |
| 2148 | struct dsa_notifier_register_info *info) | 2148 | struct dsa_notifier_register_info *info) |
| 2149 | { | 2149 | { |
| 2150 | struct bcm_sysport_priv *priv = netdev_priv(dev); | ||
| 2151 | struct bcm_sysport_tx_ring *ring; | 2150 | struct bcm_sysport_tx_ring *ring; |
| 2151 | struct bcm_sysport_priv *priv; | ||
| 2152 | struct net_device *slave_dev; | 2152 | struct net_device *slave_dev; |
| 2153 | unsigned int num_tx_queues; | 2153 | unsigned int num_tx_queues; |
| 2154 | unsigned int q, start, port; | 2154 | unsigned int q, start, port; |
| 2155 | struct net_device *dev; | ||
| 2156 | |||
| 2157 | priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); | ||
| 2158 | if (priv->netdev != info->master) | ||
| 2159 | return 0; | ||
| 2160 | |||
| 2161 | dev = info->master; | ||
| 2155 | 2162 | ||
| 2156 | /* We can't be setting up queue inspection for non directly attached | 2163 | /* We can't be setting up queue inspection for non directly attached |
| 2157 | * switches | 2164 | * switches |
| @@ -2174,11 +2181,12 @@ static int bcm_sysport_map_queues(struct net_device *dev, | |||
| 2174 | if (priv->is_lite) | 2181 | if (priv->is_lite) |
| 2175 | netif_set_real_num_tx_queues(slave_dev, | 2182 | netif_set_real_num_tx_queues(slave_dev, |
| 2176 | slave_dev->num_tx_queues / 2); | 2183 | slave_dev->num_tx_queues / 2); |
| 2184 | |||
| 2177 | num_tx_queues = slave_dev->real_num_tx_queues; | 2185 | num_tx_queues = slave_dev->real_num_tx_queues; |
| 2178 | 2186 | ||
| 2179 | if (priv->per_port_num_tx_queues && | 2187 | if (priv->per_port_num_tx_queues && |
| 2180 | priv->per_port_num_tx_queues != num_tx_queues) | 2188 | priv->per_port_num_tx_queues != num_tx_queues) |
| 2181 | netdev_warn(slave_dev, "asymetric number of per-port queues\n"); | 2189 | netdev_warn(slave_dev, "asymmetric number of per-port queues\n"); |
| 2182 | 2190 | ||
| 2183 | priv->per_port_num_tx_queues = num_tx_queues; | 2191 | priv->per_port_num_tx_queues = num_tx_queues; |
| 2184 | 2192 | ||
| @@ -2201,7 +2209,7 @@ static int bcm_sysport_map_queues(struct net_device *dev, | |||
| 2201 | return 0; | 2209 | return 0; |
| 2202 | } | 2210 | } |
| 2203 | 2211 | ||
| 2204 | static int bcm_sysport_dsa_notifier(struct notifier_block *unused, | 2212 | static int bcm_sysport_dsa_notifier(struct notifier_block *nb, |
| 2205 | unsigned long event, void *ptr) | 2213 | unsigned long event, void *ptr) |
| 2206 | { | 2214 | { |
| 2207 | struct dsa_notifier_register_info *info; | 2215 | struct dsa_notifier_register_info *info; |
| @@ -2211,7 +2219,7 @@ static int bcm_sysport_dsa_notifier(struct notifier_block *unused, | |||
| 2211 | 2219 | ||
| 2212 | info = ptr; | 2220 | info = ptr; |
| 2213 | 2221 | ||
| 2214 | return notifier_from_errno(bcm_sysport_map_queues(info->master, info)); | 2222 | return notifier_from_errno(bcm_sysport_map_queues(nb, info)); |
| 2215 | } | 2223 | } |
| 2216 | 2224 | ||
| 2217 | #define REV_FMT "v%2x.%02x" | 2225 | #define REV_FMT "v%2x.%02x" |
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c index 4df282ed22c7..0beee2cc2ddd 100644 --- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c +++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c | |||
| @@ -61,7 +61,7 @@ static const char hw_stat_gstrings[][ETH_GSTRING_LEN] = { | |||
| 61 | static const char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { | 61 | static const char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { |
| 62 | "tx-single-collision", | 62 | "tx-single-collision", |
| 63 | "tx-multiple-collision", | 63 | "tx-multiple-collision", |
| 64 | "tx-late-collsion", | 64 | "tx-late-collision", |
| 65 | "tx-aborted-frames", | 65 | "tx-aborted-frames", |
| 66 | "tx-lost-frames", | 66 | "tx-lost-frames", |
| 67 | "tx-carrier-sense-errors", | 67 | "tx-carrier-sense-errors", |
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 4202f9b5b966..6f410235987c 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c | |||
| @@ -942,6 +942,7 @@ struct mvpp2 { | |||
| 942 | struct clk *pp_clk; | 942 | struct clk *pp_clk; |
| 943 | struct clk *gop_clk; | 943 | struct clk *gop_clk; |
| 944 | struct clk *mg_clk; | 944 | struct clk *mg_clk; |
| 945 | struct clk *mg_core_clk; | ||
| 945 | struct clk *axi_clk; | 946 | struct clk *axi_clk; |
| 946 | 947 | ||
| 947 | /* List of pointers to port structures */ | 948 | /* List of pointers to port structures */ |
| @@ -8768,18 +8769,27 @@ static int mvpp2_probe(struct platform_device *pdev) | |||
| 8768 | err = clk_prepare_enable(priv->mg_clk); | 8769 | err = clk_prepare_enable(priv->mg_clk); |
| 8769 | if (err < 0) | 8770 | if (err < 0) |
| 8770 | goto err_gop_clk; | 8771 | goto err_gop_clk; |
| 8772 | |||
| 8773 | priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk"); | ||
| 8774 | if (IS_ERR(priv->mg_core_clk)) { | ||
| 8775 | priv->mg_core_clk = NULL; | ||
| 8776 | } else { | ||
| 8777 | err = clk_prepare_enable(priv->mg_core_clk); | ||
| 8778 | if (err < 0) | ||
| 8779 | goto err_mg_clk; | ||
| 8780 | } | ||
| 8771 | } | 8781 | } |
| 8772 | 8782 | ||
| 8773 | priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); | 8783 | priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); |
| 8774 | if (IS_ERR(priv->axi_clk)) { | 8784 | if (IS_ERR(priv->axi_clk)) { |
| 8775 | err = PTR_ERR(priv->axi_clk); | 8785 | err = PTR_ERR(priv->axi_clk); |
| 8776 | if (err == -EPROBE_DEFER) | 8786 | if (err == -EPROBE_DEFER) |
| 8777 | goto err_gop_clk; | 8787 | goto err_mg_core_clk; |
| 8778 | priv->axi_clk = NULL; | 8788 | priv->axi_clk = NULL; |
| 8779 | } else { | 8789 | } else { |
| 8780 | err = clk_prepare_enable(priv->axi_clk); | 8790 | err = clk_prepare_enable(priv->axi_clk); |
| 8781 | if (err < 0) | 8791 | if (err < 0) |
| 8782 | goto err_gop_clk; | 8792 | goto err_mg_core_clk; |
| 8783 | } | 8793 | } |
| 8784 | 8794 | ||
| 8785 | /* Get system's tclk rate */ | 8795 | /* Get system's tclk rate */ |
| @@ -8793,7 +8803,7 @@ static int mvpp2_probe(struct platform_device *pdev) | |||
| 8793 | if (priv->hw_version == MVPP22) { | 8803 | if (priv->hw_version == MVPP22) { |
| 8794 | err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); | 8804 | err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); |
| 8795 | if (err) | 8805 | if (err) |
| 8796 | goto err_mg_clk; | 8806 | goto err_axi_clk; |
| 8797 | /* Sadly, the BM pools all share the same register to | 8807 | /* Sadly, the BM pools all share the same register to |
| 8798 | * store the high 32 bits of their address. So they | 8808 | * store the high 32 bits of their address. So they |
| 8799 | * must all have the same high 32 bits, which forces | 8809 | * must all have the same high 32 bits, which forces |
| @@ -8801,14 +8811,14 @@ static int mvpp2_probe(struct platform_device *pdev) | |||
| 8801 | */ | 8811 | */ |
| 8802 | err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); | 8812 | err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
| 8803 | if (err) | 8813 | if (err) |
| 8804 | goto err_mg_clk; | 8814 | goto err_axi_clk; |
| 8805 | } | 8815 | } |
| 8806 | 8816 | ||
| 8807 | /* Initialize network controller */ | 8817 | /* Initialize network controller */ |
| 8808 | err = mvpp2_init(pdev, priv); | 8818 | err = mvpp2_init(pdev, priv); |
| 8809 | if (err < 0) { | 8819 | if (err < 0) { |
| 8810 | dev_err(&pdev->dev, "failed to initialize controller\n"); | 8820 | dev_err(&pdev->dev, "failed to initialize controller\n"); |
| 8811 | goto err_mg_clk; | 8821 | goto err_axi_clk; |
| 8812 | } | 8822 | } |
| 8813 | 8823 | ||
| 8814 | /* Initialize ports */ | 8824 | /* Initialize ports */ |
| @@ -8821,7 +8831,7 @@ static int mvpp2_probe(struct platform_device *pdev) | |||
| 8821 | if (priv->port_count == 0) { | 8831 | if (priv->port_count == 0) { |
| 8822 | dev_err(&pdev->dev, "no ports enabled\n"); | 8832 | dev_err(&pdev->dev, "no ports enabled\n"); |
| 8823 | err = -ENODEV; | 8833 | err = -ENODEV; |
| 8824 | goto err_mg_clk; | 8834 | goto err_axi_clk; |
| 8825 | } | 8835 | } |
| 8826 | 8836 | ||
| 8827 | /* Statistics must be gathered regularly because some of them (like | 8837 | /* Statistics must be gathered regularly because some of them (like |
| @@ -8849,8 +8859,13 @@ err_port_probe: | |||
| 8849 | mvpp2_port_remove(priv->port_list[i]); | 8859 | mvpp2_port_remove(priv->port_list[i]); |
| 8850 | i++; | 8860 | i++; |
| 8851 | } | 8861 | } |
| 8852 | err_mg_clk: | 8862 | err_axi_clk: |
| 8853 | clk_disable_unprepare(priv->axi_clk); | 8863 | clk_disable_unprepare(priv->axi_clk); |
| 8864 | |||
| 8865 | err_mg_core_clk: | ||
| 8866 | if (priv->hw_version == MVPP22) | ||
| 8867 | clk_disable_unprepare(priv->mg_core_clk); | ||
| 8868 | err_mg_clk: | ||
| 8854 | if (priv->hw_version == MVPP22) | 8869 | if (priv->hw_version == MVPP22) |
| 8855 | clk_disable_unprepare(priv->mg_clk); | 8870 | clk_disable_unprepare(priv->mg_clk); |
| 8856 | err_gop_clk: | 8871 | err_gop_clk: |
| @@ -8897,6 +8912,7 @@ static int mvpp2_remove(struct platform_device *pdev) | |||
| 8897 | return 0; | 8912 | return 0; |
| 8898 | 8913 | ||
| 8899 | clk_disable_unprepare(priv->axi_clk); | 8914 | clk_disable_unprepare(priv->axi_clk); |
| 8915 | clk_disable_unprepare(priv->mg_core_clk); | ||
| 8900 | clk_disable_unprepare(priv->mg_clk); | 8916 | clk_disable_unprepare(priv->mg_clk); |
| 8901 | clk_disable_unprepare(priv->pp_clk); | 8917 | clk_disable_unprepare(priv->pp_clk); |
| 8902 | clk_disable_unprepare(priv->gop_clk); | 8918 | clk_disable_unprepare(priv->gop_clk); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index bfef69235d71..211578ffc70d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
| @@ -1317,7 +1317,7 @@ static int mlx4_mf_unbond(struct mlx4_dev *dev) | |||
| 1317 | 1317 | ||
| 1318 | ret = mlx4_unbond_fs_rules(dev); | 1318 | ret = mlx4_unbond_fs_rules(dev); |
| 1319 | if (ret) | 1319 | if (ret) |
| 1320 | mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret); | 1320 | mlx4_warn(dev, "multifunction unbond for flow rules failed (%d)\n", ret); |
| 1321 | ret1 = mlx4_unbond_mac_table(dev); | 1321 | ret1 = mlx4_unbond_mac_table(dev); |
| 1322 | if (ret1) { | 1322 | if (ret1) { |
| 1323 | mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1); | 1323 | mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 3d46ef48d5b8..c641d5656b2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c | |||
| @@ -1007,12 +1007,14 @@ static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv) | |||
| 1007 | 1007 | ||
| 1008 | mutex_lock(&priv->state_lock); | 1008 | mutex_lock(&priv->state_lock); |
| 1009 | 1009 | ||
| 1010 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) | ||
| 1011 | goto out; | ||
| 1012 | |||
| 1013 | new_channels.params = priv->channels.params; | 1010 | new_channels.params = priv->channels.params; |
| 1014 | mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params); | 1011 | mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params); |
| 1015 | 1012 | ||
| 1013 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { | ||
| 1014 | priv->channels.params = new_channels.params; | ||
| 1015 | goto out; | ||
| 1016 | } | ||
| 1017 | |||
| 1016 | /* Skip if tx_min_inline is the same */ | 1018 | /* Skip if tx_min_inline is the same */ |
| 1017 | if (new_channels.params.tx_min_inline_mode == | 1019 | if (new_channels.params.tx_min_inline_mode == |
| 1018 | priv->channels.params.tx_min_inline_mode) | 1020 | priv->channels.params.tx_min_inline_mode) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index d8f68e4d1018..876c3e4c6193 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
| @@ -877,13 +877,14 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { | |||
| 877 | }; | 877 | }; |
| 878 | 878 | ||
| 879 | static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, | 879 | static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, |
| 880 | struct mlx5e_params *params) | 880 | struct mlx5e_params *params, u16 mtu) |
| 881 | { | 881 | { |
| 882 | u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? | 882 | u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? |
| 883 | MLX5_CQ_PERIOD_MODE_START_FROM_CQE : | 883 | MLX5_CQ_PERIOD_MODE_START_FROM_CQE : |
| 884 | MLX5_CQ_PERIOD_MODE_START_FROM_EQE; | 884 | MLX5_CQ_PERIOD_MODE_START_FROM_EQE; |
| 885 | 885 | ||
| 886 | params->hard_mtu = MLX5E_ETH_HARD_MTU; | 886 | params->hard_mtu = MLX5E_ETH_HARD_MTU; |
| 887 | params->sw_mtu = mtu; | ||
| 887 | params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE; | 888 | params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE; |
| 888 | params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; | 889 | params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; |
| 889 | params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE; | 890 | params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE; |
| @@ -931,7 +932,7 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev, | |||
| 931 | 932 | ||
| 932 | priv->channels.params.num_channels = profile->max_nch(mdev); | 933 | priv->channels.params.num_channels = profile->max_nch(mdev); |
| 933 | 934 | ||
| 934 | mlx5e_build_rep_params(mdev, &priv->channels.params); | 935 | mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu); |
| 935 | mlx5e_build_rep_netdev(netdev); | 936 | mlx5e_build_rep_netdev(netdev); |
| 936 | 937 | ||
| 937 | mlx5e_timestamp_init(priv); | 938 | mlx5e_timestamp_init(priv); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 707976482c09..027f54ac1ca2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c | |||
| @@ -290,7 +290,7 @@ static int mlx5e_test_loopback(struct mlx5e_priv *priv) | |||
| 290 | 290 | ||
| 291 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { | 291 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { |
| 292 | netdev_err(priv->netdev, | 292 | netdev_err(priv->netdev, |
| 293 | "\tCan't perform loobpack test while device is down\n"); | 293 | "\tCan't perform loopback test while device is down\n"); |
| 294 | return -ENODEV; | 294 | return -ENODEV; |
| 295 | } | 295 | } |
| 296 | 296 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 4197001f9801..3c534fc43400 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
| @@ -1864,7 +1864,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | |||
| 1864 | } | 1864 | } |
| 1865 | 1865 | ||
| 1866 | ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); | 1866 | ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); |
| 1867 | if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { | 1867 | if (modify_ip_header && ip_proto != IPPROTO_TCP && |
| 1868 | ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) { | ||
| 1868 | pr_info("can't offload re-write of ip proto %d\n", ip_proto); | 1869 | pr_info("can't offload re-write of ip proto %d\n", ip_proto); |
| 1869 | return false; | 1870 | return false; |
| 1870 | } | 1871 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 20297108528a..5532aa3675c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | |||
| @@ -255,7 +255,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |||
| 255 | dma_addr = dma_map_single(sq->pdev, skb_data, headlen, | 255 | dma_addr = dma_map_single(sq->pdev, skb_data, headlen, |
| 256 | DMA_TO_DEVICE); | 256 | DMA_TO_DEVICE); |
| 257 | if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) | 257 | if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) |
| 258 | return -ENOMEM; | 258 | goto dma_unmap_wqe_err; |
| 259 | 259 | ||
| 260 | dseg->addr = cpu_to_be64(dma_addr); | 260 | dseg->addr = cpu_to_be64(dma_addr); |
| 261 | dseg->lkey = sq->mkey_be; | 261 | dseg->lkey = sq->mkey_be; |
| @@ -273,7 +273,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |||
| 273 | dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, | 273 | dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, |
| 274 | DMA_TO_DEVICE); | 274 | DMA_TO_DEVICE); |
| 275 | if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) | 275 | if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) |
| 276 | return -ENOMEM; | 276 | goto dma_unmap_wqe_err; |
| 277 | 277 | ||
| 278 | dseg->addr = cpu_to_be64(dma_addr); | 278 | dseg->addr = cpu_to_be64(dma_addr); |
| 279 | dseg->lkey = sq->mkey_be; | 279 | dseg->lkey = sq->mkey_be; |
| @@ -285,6 +285,10 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |||
| 285 | } | 285 | } |
| 286 | 286 | ||
| 287 | return num_dma; | 287 | return num_dma; |
| 288 | |||
| 289 | dma_unmap_wqe_err: | ||
| 290 | mlx5e_dma_unmap_wqe_err(sq, num_dma); | ||
| 291 | return -ENOMEM; | ||
| 288 | } | 292 | } |
| 289 | 293 | ||
| 290 | static inline void | 294 | static inline void |
| @@ -380,17 +384,15 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |||
| 380 | num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, | 384 | num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, |
| 381 | (struct mlx5_wqe_data_seg *)cseg + ds_cnt); | 385 | (struct mlx5_wqe_data_seg *)cseg + ds_cnt); |
| 382 | if (unlikely(num_dma < 0)) | 386 | if (unlikely(num_dma < 0)) |
| 383 | goto dma_unmap_wqe_err; | 387 | goto err_drop; |
| 384 | 388 | ||
| 385 | mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, | 389 | mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, |
| 386 | num_bytes, num_dma, wi, cseg); | 390 | num_bytes, num_dma, wi, cseg); |
| 387 | 391 | ||
| 388 | return NETDEV_TX_OK; | 392 | return NETDEV_TX_OK; |
| 389 | 393 | ||
| 390 | dma_unmap_wqe_err: | 394 | err_drop: |
| 391 | sq->stats.dropped++; | 395 | sq->stats.dropped++; |
| 392 | mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); | ||
| 393 | |||
| 394 | dev_kfree_skb_any(skb); | 396 | dev_kfree_skb_any(skb); |
| 395 | 397 | ||
| 396 | return NETDEV_TX_OK; | 398 | return NETDEV_TX_OK; |
| @@ -645,17 +647,15 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |||
| 645 | num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, | 647 | num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, |
| 646 | (struct mlx5_wqe_data_seg *)cseg + ds_cnt); | 648 | (struct mlx5_wqe_data_seg *)cseg + ds_cnt); |
| 647 | if (unlikely(num_dma < 0)) | 649 | if (unlikely(num_dma < 0)) |
| 648 | goto dma_unmap_wqe_err; | 650 | goto err_drop; |
| 649 | 651 | ||
| 650 | mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, | 652 | mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, |
| 651 | num_bytes, num_dma, wi, cseg); | 653 | num_bytes, num_dma, wi, cseg); |
| 652 | 654 | ||
| 653 | return NETDEV_TX_OK; | 655 | return NETDEV_TX_OK; |
| 654 | 656 | ||
| 655 | dma_unmap_wqe_err: | 657 | err_drop: |
| 656 | sq->stats.dropped++; | 658 | sq->stats.dropped++; |
| 657 | mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); | ||
| 658 | |||
| 659 | dev_kfree_skb_any(skb); | 659 | dev_kfree_skb_any(skb); |
| 660 | 660 | ||
| 661 | return NETDEV_TX_OK; | 661 | return NETDEV_TX_OK; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index de51e7c39bc8..c39c1692e674 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
| @@ -187,6 +187,7 @@ static void del_sw_ns(struct fs_node *node); | |||
| 187 | static void del_sw_hw_rule(struct fs_node *node); | 187 | static void del_sw_hw_rule(struct fs_node *node); |
| 188 | static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, | 188 | static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, |
| 189 | struct mlx5_flow_destination *d2); | 189 | struct mlx5_flow_destination *d2); |
| 190 | static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns); | ||
| 190 | static struct mlx5_flow_rule * | 191 | static struct mlx5_flow_rule * |
| 191 | find_flow_rule(struct fs_fte *fte, | 192 | find_flow_rule(struct fs_fte *fte, |
| 192 | struct mlx5_flow_destination *dest); | 193 | struct mlx5_flow_destination *dest); |
| @@ -481,7 +482,8 @@ static void del_sw_hw_rule(struct fs_node *node) | |||
| 481 | 482 | ||
| 482 | if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER && | 483 | if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER && |
| 483 | --fte->dests_size) { | 484 | --fte->dests_size) { |
| 484 | modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); | 485 | modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) | |
| 486 | BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); | ||
| 485 | fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; | 487 | fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; |
| 486 | update_fte = true; | 488 | update_fte = true; |
| 487 | goto out; | 489 | goto out; |
| @@ -2351,23 +2353,27 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering) | |||
| 2351 | 2353 | ||
| 2352 | static int init_root_ns(struct mlx5_flow_steering *steering) | 2354 | static int init_root_ns(struct mlx5_flow_steering *steering) |
| 2353 | { | 2355 | { |
| 2356 | int err; | ||
| 2357 | |||
| 2354 | steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); | 2358 | steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); |
| 2355 | if (!steering->root_ns) | 2359 | if (!steering->root_ns) |
| 2356 | goto cleanup; | 2360 | return -ENOMEM; |
| 2357 | 2361 | ||
| 2358 | if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node)) | 2362 | err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node); |
| 2359 | goto cleanup; | 2363 | if (err) |
| 2364 | goto out_err; | ||
| 2360 | 2365 | ||
| 2361 | set_prio_attrs(steering->root_ns); | 2366 | set_prio_attrs(steering->root_ns); |
| 2362 | 2367 | err = create_anchor_flow_table(steering); | |
| 2363 | if (create_anchor_flow_table(steering)) | 2368 | if (err) |
| 2364 | goto cleanup; | 2369 | goto out_err; |
| 2365 | 2370 | ||
| 2366 | return 0; | 2371 | return 0; |
| 2367 | 2372 | ||
| 2368 | cleanup: | 2373 | out_err: |
| 2369 | mlx5_cleanup_fs(steering->dev); | 2374 | cleanup_root_ns(steering->root_ns); |
| 2370 | return -ENOMEM; | 2375 | steering->root_ns = NULL; |
| 2376 | return err; | ||
| 2371 | } | 2377 | } |
| 2372 | 2378 | ||
| 2373 | static void clean_tree(struct fs_node *node) | 2379 | static void clean_tree(struct fs_node *node) |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index c11c9a635866..4ed01182a82c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
| @@ -1718,13 +1718,11 @@ __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, | |||
| 1718 | struct net_device *dev = mlxsw_sp_port->dev; | 1718 | struct net_device *dev = mlxsw_sp_port->dev; |
| 1719 | int err; | 1719 | int err; |
| 1720 | 1720 | ||
| 1721 | if (bridge_port->bridge_device->multicast_enabled) { | 1721 | if (bridge_port->bridge_device->multicast_enabled && |
| 1722 | if (bridge_port->bridge_device->multicast_enabled) { | 1722 | !bridge_port->mrouter) { |
| 1723 | err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, | 1723 | err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); |
| 1724 | false); | 1724 | if (err) |
| 1725 | if (err) | 1725 | netdev_err(dev, "Unable to remove port from SMID\n"); |
| 1726 | netdev_err(dev, "Unable to remove port from SMID\n"); | ||
| 1727 | } | ||
| 1728 | } | 1726 | } |
| 1729 | 1727 | ||
| 1730 | err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); | 1728 | err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index b3567a596fc1..80df9a5d4217 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c | |||
| @@ -183,17 +183,21 @@ static int | |||
| 183 | nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, | 183 | nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, |
| 184 | const struct tc_action *action, | 184 | const struct tc_action *action, |
| 185 | struct nfp_fl_pre_tunnel *pre_tun, | 185 | struct nfp_fl_pre_tunnel *pre_tun, |
| 186 | enum nfp_flower_tun_type tun_type) | 186 | enum nfp_flower_tun_type tun_type, |
| 187 | struct net_device *netdev) | ||
| 187 | { | 188 | { |
| 188 | size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun); | 189 | size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun); |
| 189 | struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action); | 190 | struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action); |
| 190 | u32 tmp_set_ip_tun_type_index = 0; | 191 | u32 tmp_set_ip_tun_type_index = 0; |
| 191 | /* Currently support one pre-tunnel so index is always 0. */ | 192 | /* Currently support one pre-tunnel so index is always 0. */ |
| 192 | int pretun_idx = 0; | 193 | int pretun_idx = 0; |
| 194 | struct net *net; | ||
| 193 | 195 | ||
| 194 | if (ip_tun->options_len) | 196 | if (ip_tun->options_len) |
| 195 | return -EOPNOTSUPP; | 197 | return -EOPNOTSUPP; |
| 196 | 198 | ||
| 199 | net = dev_net(netdev); | ||
| 200 | |||
| 197 | set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL; | 201 | set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL; |
| 198 | set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ; | 202 | set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ; |
| 199 | 203 | ||
| @@ -204,6 +208,7 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, | |||
| 204 | 208 | ||
| 205 | set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index); | 209 | set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index); |
| 206 | set_tun->tun_id = ip_tun->key.tun_id; | 210 | set_tun->tun_id = ip_tun->key.tun_id; |
| 211 | set_tun->ttl = net->ipv4.sysctl_ip_default_ttl; | ||
| 207 | 212 | ||
| 208 | /* Complete pre_tunnel action. */ | 213 | /* Complete pre_tunnel action. */ |
| 209 | pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst; | 214 | pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst; |
| @@ -511,7 +516,8 @@ nfp_flower_loop_action(const struct tc_action *a, | |||
| 511 | *a_len += sizeof(struct nfp_fl_pre_tunnel); | 516 | *a_len += sizeof(struct nfp_fl_pre_tunnel); |
| 512 | 517 | ||
| 513 | set_tun = (void *)&nfp_fl->action_data[*a_len]; | 518 | set_tun = (void *)&nfp_fl->action_data[*a_len]; |
| 514 | err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type); | 519 | err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type, |
| 520 | netdev); | ||
| 515 | if (err) | 521 | if (err) |
| 516 | return err; | 522 | return err; |
| 517 | *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun); | 523 | *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun); |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index b6c0fd053a50..bee4367a2c38 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h | |||
| @@ -190,7 +190,10 @@ struct nfp_fl_set_ipv4_udp_tun { | |||
| 190 | __be16 reserved; | 190 | __be16 reserved; |
| 191 | __be64 tun_id __packed; | 191 | __be64 tun_id __packed; |
| 192 | __be32 tun_type_index; | 192 | __be32 tun_type_index; |
| 193 | __be32 extra[3]; | 193 | __be16 reserved2; |
| 194 | u8 ttl; | ||
| 195 | u8 reserved3; | ||
| 196 | __be32 extra[2]; | ||
| 194 | }; | 197 | }; |
| 195 | 198 | ||
| 196 | /* Metadata with L2 (1W/4B) | 199 | /* Metadata with L2 (1W/4B) |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index ad02592a82b7..a997e34bcec2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c | |||
| @@ -360,7 +360,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) | |||
| 360 | } | 360 | } |
| 361 | 361 | ||
| 362 | SET_NETDEV_DEV(repr, &priv->nn->pdev->dev); | 362 | SET_NETDEV_DEV(repr, &priv->nn->pdev->dev); |
| 363 | nfp_net_get_mac_addr(app->pf, port); | 363 | nfp_net_get_mac_addr(app->pf, repr, port); |
| 364 | 364 | ||
| 365 | cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port); | 365 | cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port); |
| 366 | err = nfp_repr_init(app, repr, | 366 | err = nfp_repr_init(app, repr, |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c index 2a2f2fbc8850..b9618c37403f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c | |||
| @@ -69,7 +69,7 @@ int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, | |||
| 69 | if (err) | 69 | if (err) |
| 70 | return err < 0 ? err : 0; | 70 | return err < 0 ? err : 0; |
| 71 | 71 | ||
| 72 | nfp_net_get_mac_addr(app->pf, nn->port); | 72 | nfp_net_get_mac_addr(app->pf, nn->dp.netdev, nn->port); |
| 73 | 73 | ||
| 74 | return 0; | 74 | return 0; |
| 75 | } | 75 | } |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index add46e28212b..42211083b51f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h | |||
| @@ -171,7 +171,9 @@ void nfp_net_pci_remove(struct nfp_pf *pf); | |||
| 171 | int nfp_hwmon_register(struct nfp_pf *pf); | 171 | int nfp_hwmon_register(struct nfp_pf *pf); |
| 172 | void nfp_hwmon_unregister(struct nfp_pf *pf); | 172 | void nfp_hwmon_unregister(struct nfp_pf *pf); |
| 173 | 173 | ||
| 174 | void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port); | 174 | void |
| 175 | nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev, | ||
| 176 | struct nfp_port *port); | ||
| 175 | 177 | ||
| 176 | bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb); | 178 | bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb); |
| 177 | 179 | ||
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 15fa47f622aa..45cd2092e498 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c | |||
| @@ -67,23 +67,26 @@ | |||
| 67 | /** | 67 | /** |
| 68 | * nfp_net_get_mac_addr() - Get the MAC address. | 68 | * nfp_net_get_mac_addr() - Get the MAC address. |
| 69 | * @pf: NFP PF handle | 69 | * @pf: NFP PF handle |
| 70 | * @netdev: net_device to set MAC address on | ||
| 70 | * @port: NFP port structure | 71 | * @port: NFP port structure |
| 71 | * | 72 | * |
| 72 | * First try to get the MAC address from NSP ETH table. If that | 73 | * First try to get the MAC address from NSP ETH table. If that |
| 73 | * fails generate a random address. | 74 | * fails generate a random address. |
| 74 | */ | 75 | */ |
| 75 | void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port) | 76 | void |
| 77 | nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev, | ||
| 78 | struct nfp_port *port) | ||
| 76 | { | 79 | { |
| 77 | struct nfp_eth_table_port *eth_port; | 80 | struct nfp_eth_table_port *eth_port; |
| 78 | 81 | ||
| 79 | eth_port = __nfp_port_get_eth_port(port); | 82 | eth_port = __nfp_port_get_eth_port(port); |
| 80 | if (!eth_port) { | 83 | if (!eth_port) { |
| 81 | eth_hw_addr_random(port->netdev); | 84 | eth_hw_addr_random(netdev); |
| 82 | return; | 85 | return; |
| 83 | } | 86 | } |
| 84 | 87 | ||
| 85 | ether_addr_copy(port->netdev->dev_addr, eth_port->mac_addr); | 88 | ether_addr_copy(netdev->dev_addr, eth_port->mac_addr); |
| 86 | ether_addr_copy(port->netdev->perm_addr, eth_port->mac_addr); | 89 | ether_addr_copy(netdev->perm_addr, eth_port->mac_addr); |
| 87 | } | 90 | } |
| 88 | 91 | ||
| 89 | static struct nfp_eth_table_port * | 92 | static struct nfp_eth_table_port * |
| @@ -511,16 +514,18 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) | |||
| 511 | return PTR_ERR(mem); | 514 | return PTR_ERR(mem); |
| 512 | } | 515 | } |
| 513 | 516 | ||
| 514 | min_size = NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1); | 517 | if (pf->eth_tbl) { |
| 515 | pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats", | 518 | min_size = NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1); |
| 516 | "net.macstats", min_size, | 519 | pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats", |
| 517 | &pf->mac_stats_bar); | 520 | "net.macstats", min_size, |
| 518 | if (IS_ERR(pf->mac_stats_mem)) { | 521 | &pf->mac_stats_bar); |
| 519 | if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) { | 522 | if (IS_ERR(pf->mac_stats_mem)) { |
| 520 | err = PTR_ERR(pf->mac_stats_mem); | 523 | if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) { |
| 521 | goto err_unmap_ctrl; | 524 | err = PTR_ERR(pf->mac_stats_mem); |
| 525 | goto err_unmap_ctrl; | ||
| 526 | } | ||
| 527 | pf->mac_stats_mem = NULL; | ||
| 522 | } | 528 | } |
| 523 | pf->mac_stats_mem = NULL; | ||
| 524 | } | 529 | } |
| 525 | 530 | ||
| 526 | pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg", | 531 | pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg", |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 74fc626b1ec1..38502815d681 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c | |||
| @@ -2370,7 +2370,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) | |||
| 2370 | u8 flags = 0; | 2370 | u8 flags = 0; |
| 2371 | 2371 | ||
| 2372 | if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { | 2372 | if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { |
| 2373 | DP_INFO(cdev, "Cannot transmit a checksumed packet\n"); | 2373 | DP_INFO(cdev, "Cannot transmit a checksummed packet\n"); |
| 2374 | return -EINVAL; | 2374 | return -EINVAL; |
| 2375 | } | 2375 | } |
| 2376 | 2376 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index fb7c2d1562ae..6acfd43c1a4f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c | |||
| @@ -848,7 +848,7 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn, | |||
| 848 | 848 | ||
| 849 | if (!(qp->resp_offloaded)) { | 849 | if (!(qp->resp_offloaded)) { |
| 850 | DP_NOTICE(p_hwfn, | 850 | DP_NOTICE(p_hwfn, |
| 851 | "The responder's qp should be offloded before requester's\n"); | 851 | "The responder's qp should be offloaded before requester's\n"); |
| 852 | return -EINVAL; | 852 | return -EINVAL; |
| 853 | } | 853 | } |
| 854 | 854 | ||
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index d24b47b8e0b2..d118da5a10a2 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c | |||
| @@ -2224,7 +2224,7 @@ static void rtl8139_poll_controller(struct net_device *dev) | |||
| 2224 | struct rtl8139_private *tp = netdev_priv(dev); | 2224 | struct rtl8139_private *tp = netdev_priv(dev); |
| 2225 | const int irq = tp->pci_dev->irq; | 2225 | const int irq = tp->pci_dev->irq; |
| 2226 | 2226 | ||
| 2227 | disable_irq(irq); | 2227 | disable_irq_nosync(irq); |
| 2228 | rtl8139_interrupt(irq, dev); | 2228 | rtl8139_interrupt(irq, dev); |
| 2229 | enable_irq(irq); | 2229 | enable_irq(irq); |
| 2230 | } | 2230 | } |
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 63036d9bf3e6..d90a7b1f4088 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
| @@ -4784,8 +4784,9 @@ expire: | |||
| 4784 | * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that | 4784 | * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that |
| 4785 | * the rule is not removed by efx_rps_hash_del() below. | 4785 | * the rule is not removed by efx_rps_hash_del() below. |
| 4786 | */ | 4786 | */ |
| 4787 | ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority, | 4787 | if (ret) |
| 4788 | filter_idx, true) == 0; | 4788 | ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority, |
| 4789 | filter_idx, true) == 0; | ||
| 4789 | /* While we can't safely dereference rule (we dropped the lock), we can | 4790 | /* While we can't safely dereference rule (we dropped the lock), we can |
| 4790 | * still test it for NULL. | 4791 | * still test it for NULL. |
| 4791 | */ | 4792 | */ |
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 64a94f242027..d2e254f2f72b 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c | |||
| @@ -839,6 +839,8 @@ static void efx_filter_rfs_work(struct work_struct *data) | |||
| 839 | int rc; | 839 | int rc; |
| 840 | 840 | ||
| 841 | rc = efx->type->filter_insert(efx, &req->spec, true); | 841 | rc = efx->type->filter_insert(efx, &req->spec, true); |
| 842 | if (rc >= 0) | ||
| 843 | rc %= efx->type->max_rx_ip_filters; | ||
| 842 | if (efx->rps_hash_table) { | 844 | if (efx->rps_hash_table) { |
| 843 | spin_lock_bh(&efx->rps_hash_lock); | 845 | spin_lock_bh(&efx->rps_hash_lock); |
| 844 | rule = efx_rps_hash_find(efx, &req->spec); | 846 | rule = efx_rps_hash_find(efx, &req->spec); |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 74f828412055..28d893b93d30 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
| @@ -1340,6 +1340,8 @@ static inline void cpsw_add_dual_emac_def_ale_entries( | |||
| 1340 | cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, | 1340 | cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, |
| 1341 | HOST_PORT_NUM, ALE_VLAN | | 1341 | HOST_PORT_NUM, ALE_VLAN | |
| 1342 | ALE_SECURE, slave->port_vlan); | 1342 | ALE_SECURE, slave->port_vlan); |
| 1343 | cpsw_ale_control_set(cpsw->ale, slave_port, | ||
| 1344 | ALE_PORT_DROP_UNKNOWN_VLAN, 1); | ||
| 1343 | } | 1345 | } |
| 1344 | 1346 | ||
| 1345 | static void soft_reset_slave(struct cpsw_slave *slave) | 1347 | static void soft_reset_slave(struct cpsw_slave *slave) |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index ac23322a32e1..9e4ba8e80a18 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
| @@ -535,8 +535,17 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id, | |||
| 535 | 535 | ||
| 536 | /* Grab the bits from PHYIR1, and put them in the upper half */ | 536 | /* Grab the bits from PHYIR1, and put them in the upper half */ |
| 537 | phy_reg = mdiobus_read(bus, addr, MII_PHYSID1); | 537 | phy_reg = mdiobus_read(bus, addr, MII_PHYSID1); |
| 538 | if (phy_reg < 0) | 538 | if (phy_reg < 0) { |
| 539 | /* if there is no device, return without an error so scanning | ||
| 540 | * the bus works properly | ||
| 541 | */ | ||
| 542 | if (phy_reg == -EIO || phy_reg == -ENODEV) { | ||
| 543 | *phy_id = 0xffffffff; | ||
| 544 | return 0; | ||
| 545 | } | ||
| 546 | |||
| 539 | return -EIO; | 547 | return -EIO; |
| 548 | } | ||
| 540 | 549 | ||
| 541 | *phy_id = (phy_reg & 0xffff) << 16; | 550 | *phy_id = (phy_reg & 0xffff) << 16; |
| 542 | 551 | ||
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index c853e7410f5a..42565dd33aa6 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
| @@ -1098,6 +1098,7 @@ static const struct usb_device_id products[] = { | |||
| 1098 | {QMI_FIXED_INTF(0x05c6, 0x9080, 8)}, | 1098 | {QMI_FIXED_INTF(0x05c6, 0x9080, 8)}, |
| 1099 | {QMI_FIXED_INTF(0x05c6, 0x9083, 3)}, | 1099 | {QMI_FIXED_INTF(0x05c6, 0x9083, 3)}, |
| 1100 | {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, | 1100 | {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, |
| 1101 | {QMI_FIXED_INTF(0x05c6, 0x90b2, 3)}, /* ublox R410M */ | ||
| 1101 | {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, | 1102 | {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, |
| 1102 | {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, | 1103 | {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, |
| 1103 | {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */ | 1104 | {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */ |
| @@ -1343,6 +1344,18 @@ static int qmi_wwan_probe(struct usb_interface *intf, | |||
| 1343 | id->driver_info = (unsigned long)&qmi_wwan_info; | 1344 | id->driver_info = (unsigned long)&qmi_wwan_info; |
| 1344 | } | 1345 | } |
| 1345 | 1346 | ||
| 1347 | /* There are devices where the same interface number can be | ||
| 1348 | * configured as different functions. We should only bind to | ||
| 1349 | * vendor specific functions when matching on interface number | ||
| 1350 | */ | ||
| 1351 | if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER && | ||
| 1352 | desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) { | ||
| 1353 | dev_dbg(&intf->dev, | ||
| 1354 | "Rejecting interface number match for class %02x\n", | ||
| 1355 | desc->bInterfaceClass); | ||
| 1356 | return -ENODEV; | ||
| 1357 | } | ||
| 1358 | |||
| 1346 | /* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */ | 1359 | /* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */ |
| 1347 | if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) { | 1360 | if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) { |
| 1348 | dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n"); | 1361 | dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n"); |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 9277f4c2bfeb..94e177d7c9b5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c | |||
| @@ -459,7 +459,7 @@ static void brcmf_fw_free_request(struct brcmf_fw_request *req) | |||
| 459 | kfree(req); | 459 | kfree(req); |
| 460 | } | 460 | } |
| 461 | 461 | ||
| 462 | static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) | 462 | static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) |
| 463 | { | 463 | { |
| 464 | struct brcmf_fw *fwctx = ctx; | 464 | struct brcmf_fw *fwctx = ctx; |
| 465 | struct brcmf_fw_item *cur; | 465 | struct brcmf_fw_item *cur; |
| @@ -498,13 +498,10 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) | |||
| 498 | brcmf_dbg(TRACE, "nvram %p len %d\n", nvram, nvram_length); | 498 | brcmf_dbg(TRACE, "nvram %p len %d\n", nvram, nvram_length); |
| 499 | cur->nv_data.data = nvram; | 499 | cur->nv_data.data = nvram; |
| 500 | cur->nv_data.len = nvram_length; | 500 | cur->nv_data.len = nvram_length; |
| 501 | return; | 501 | return 0; |
| 502 | 502 | ||
| 503 | fail: | 503 | fail: |
| 504 | brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); | 504 | return -ENOENT; |
| 505 | fwctx->done(fwctx->dev, -ENOENT, NULL); | ||
| 506 | brcmf_fw_free_request(fwctx->req); | ||
| 507 | kfree(fwctx); | ||
| 508 | } | 505 | } |
| 509 | 506 | ||
| 510 | static int brcmf_fw_request_next_item(struct brcmf_fw *fwctx, bool async) | 507 | static int brcmf_fw_request_next_item(struct brcmf_fw *fwctx, bool async) |
| @@ -553,20 +550,27 @@ static void brcmf_fw_request_done(const struct firmware *fw, void *ctx) | |||
| 553 | brcmf_dbg(TRACE, "enter: firmware %s %sfound\n", cur->path, | 550 | brcmf_dbg(TRACE, "enter: firmware %s %sfound\n", cur->path, |
| 554 | fw ? "" : "not "); | 551 | fw ? "" : "not "); |
| 555 | 552 | ||
| 556 | if (fw) { | 553 | if (!fw) |
| 557 | if (cur->type == BRCMF_FW_TYPE_BINARY) | ||
| 558 | cur->binary = fw; | ||
| 559 | else if (cur->type == BRCMF_FW_TYPE_NVRAM) | ||
| 560 | brcmf_fw_request_nvram_done(fw, fwctx); | ||
| 561 | else | ||
| 562 | release_firmware(fw); | ||
| 563 | } else if (cur->type == BRCMF_FW_TYPE_NVRAM) { | ||
| 564 | brcmf_fw_request_nvram_done(NULL, fwctx); | ||
| 565 | } else if (!(cur->flags & BRCMF_FW_REQF_OPTIONAL)) { | ||
| 566 | ret = -ENOENT; | 554 | ret = -ENOENT; |
| 555 | |||
| 556 | switch (cur->type) { | ||
| 557 | case BRCMF_FW_TYPE_NVRAM: | ||
| 558 | ret = brcmf_fw_request_nvram_done(fw, fwctx); | ||
| 559 | break; | ||
| 560 | case BRCMF_FW_TYPE_BINARY: | ||
| 561 | cur->binary = fw; | ||
| 562 | break; | ||
| 563 | default: | ||
| 564 | /* something fishy here so bail out early */ | ||
| 565 | brcmf_err("unknown fw type: %d\n", cur->type); | ||
| 566 | release_firmware(fw); | ||
| 567 | ret = -EINVAL; | ||
| 567 | goto fail; | 568 | goto fail; |
| 568 | } | 569 | } |
| 569 | 570 | ||
| 571 | if (ret < 0 && !(cur->flags & BRCMF_FW_REQF_OPTIONAL)) | ||
| 572 | goto fail; | ||
| 573 | |||
| 570 | do { | 574 | do { |
| 571 | if (++fwctx->curpos == fwctx->req->n_items) { | 575 | if (++fwctx->curpos == fwctx->req->n_items) { |
| 572 | ret = 0; | 576 | ret = 0; |
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 7af3a0f51b77..a17c4a79b8d4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. | 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
| 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
| 10 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH | 10 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
| 11 | * Copyright(c) 2018 Intel Corporation | ||
| 11 | * | 12 | * |
| 12 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
| 13 | * it under the terms of version 2 of the GNU General Public License as | 14 | * it under the terms of version 2 of the GNU General Public License as |
| @@ -30,7 +31,7 @@ | |||
| 30 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. | 31 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
| 31 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 32 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
| 32 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH | 33 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
| 33 | * Copyright(c) 2018 Intel Corporation | 34 | * Copyright(c) 2018 Intel Corporation |
| 34 | * All rights reserved. | 35 | * All rights reserved. |
| 35 | * | 36 | * |
| 36 | * Redistribution and use in source and binary forms, with or without | 37 | * Redistribution and use in source and binary forms, with or without |
| @@ -749,13 +750,9 @@ struct iwl_scan_req_umac { | |||
| 749 | } __packed; | 750 | } __packed; |
| 750 | 751 | ||
| 751 | #define IWL_SCAN_REQ_UMAC_SIZE_V8 sizeof(struct iwl_scan_req_umac) | 752 | #define IWL_SCAN_REQ_UMAC_SIZE_V8 sizeof(struct iwl_scan_req_umac) |
| 752 | #define IWL_SCAN_REQ_UMAC_SIZE_V7 (sizeof(struct iwl_scan_req_umac) - \ | 753 | #define IWL_SCAN_REQ_UMAC_SIZE_V7 48 |
| 753 | 4 * sizeof(u8)) | 754 | #define IWL_SCAN_REQ_UMAC_SIZE_V6 44 |
| 754 | #define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \ | 755 | #define IWL_SCAN_REQ_UMAC_SIZE_V1 36 |
| 755 | 2 * sizeof(u8) - sizeof(__le16)) | ||
| 756 | #define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \ | ||
| 757 | 2 * sizeof(__le32) - 2 * sizeof(u8) - \ | ||
| 758 | sizeof(__le16)) | ||
| 759 | 756 | ||
| 760 | /** | 757 | /** |
| 761 | * struct iwl_umac_scan_abort | 758 | * struct iwl_umac_scan_abort |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 8928613e033e..ca0174680af9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | |||
| @@ -76,6 +76,7 @@ | |||
| 76 | #include "iwl-io.h" | 76 | #include "iwl-io.h" |
| 77 | #include "iwl-csr.h" | 77 | #include "iwl-csr.h" |
| 78 | #include "fw/acpi.h" | 78 | #include "fw/acpi.h" |
| 79 | #include "fw/api/nvm-reg.h" | ||
| 79 | 80 | ||
| 80 | /* NVM offsets (in words) definitions */ | 81 | /* NVM offsets (in words) definitions */ |
| 81 | enum nvm_offsets { | 82 | enum nvm_offsets { |
| @@ -146,8 +147,8 @@ static const u8 iwl_ext_nvm_channels[] = { | |||
| 146 | 149, 153, 157, 161, 165, 169, 173, 177, 181 | 147 | 149, 153, 157, 161, 165, 169, 173, 177, 181 |
| 147 | }; | 148 | }; |
| 148 | 149 | ||
| 149 | #define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) | 150 | #define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) |
| 150 | #define IWL_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels) | 151 | #define IWL_NVM_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels) |
| 151 | #define NUM_2GHZ_CHANNELS 14 | 152 | #define NUM_2GHZ_CHANNELS 14 |
| 152 | #define NUM_2GHZ_CHANNELS_EXT 14 | 153 | #define NUM_2GHZ_CHANNELS_EXT 14 |
| 153 | #define FIRST_2GHZ_HT_MINUS 5 | 154 | #define FIRST_2GHZ_HT_MINUS 5 |
| @@ -301,11 +302,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, | |||
| 301 | const u8 *nvm_chan; | 302 | const u8 *nvm_chan; |
| 302 | 303 | ||
| 303 | if (cfg->nvm_type != IWL_NVM_EXT) { | 304 | if (cfg->nvm_type != IWL_NVM_EXT) { |
| 304 | num_of_ch = IWL_NUM_CHANNELS; | 305 | num_of_ch = IWL_NVM_NUM_CHANNELS; |
| 305 | nvm_chan = &iwl_nvm_channels[0]; | 306 | nvm_chan = &iwl_nvm_channels[0]; |
| 306 | num_2ghz_channels = NUM_2GHZ_CHANNELS; | 307 | num_2ghz_channels = NUM_2GHZ_CHANNELS; |
| 307 | } else { | 308 | } else { |
| 308 | num_of_ch = IWL_NUM_CHANNELS_EXT; | 309 | num_of_ch = IWL_NVM_NUM_CHANNELS_EXT; |
| 309 | nvm_chan = &iwl_ext_nvm_channels[0]; | 310 | nvm_chan = &iwl_ext_nvm_channels[0]; |
| 310 | num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT; | 311 | num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT; |
| 311 | } | 312 | } |
| @@ -720,12 +721,12 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, | |||
| 720 | if (cfg->nvm_type != IWL_NVM_EXT) | 721 | if (cfg->nvm_type != IWL_NVM_EXT) |
| 721 | data = kzalloc(sizeof(*data) + | 722 | data = kzalloc(sizeof(*data) + |
| 722 | sizeof(struct ieee80211_channel) * | 723 | sizeof(struct ieee80211_channel) * |
| 723 | IWL_NUM_CHANNELS, | 724 | IWL_NVM_NUM_CHANNELS, |
| 724 | GFP_KERNEL); | 725 | GFP_KERNEL); |
| 725 | else | 726 | else |
| 726 | data = kzalloc(sizeof(*data) + | 727 | data = kzalloc(sizeof(*data) + |
| 727 | sizeof(struct ieee80211_channel) * | 728 | sizeof(struct ieee80211_channel) * |
| 728 | IWL_NUM_CHANNELS_EXT, | 729 | IWL_NVM_NUM_CHANNELS_EXT, |
| 729 | GFP_KERNEL); | 730 | GFP_KERNEL); |
| 730 | if (!data) | 731 | if (!data) |
| 731 | return NULL; | 732 | return NULL; |
| @@ -842,24 +843,34 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan, | |||
| 842 | return flags; | 843 | return flags; |
| 843 | } | 844 | } |
| 844 | 845 | ||
| 846 | struct regdb_ptrs { | ||
| 847 | struct ieee80211_wmm_rule *rule; | ||
| 848 | u32 token; | ||
| 849 | }; | ||
| 850 | |||
| 845 | struct ieee80211_regdomain * | 851 | struct ieee80211_regdomain * |
| 846 | iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | 852 | iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, |
| 847 | int num_of_ch, __le32 *channels, u16 fw_mcc) | 853 | int num_of_ch, __le32 *channels, u16 fw_mcc, |
| 854 | u16 geo_info) | ||
| 848 | { | 855 | { |
| 849 | int ch_idx; | 856 | int ch_idx; |
| 850 | u16 ch_flags; | 857 | u16 ch_flags; |
| 851 | u32 reg_rule_flags, prev_reg_rule_flags = 0; | 858 | u32 reg_rule_flags, prev_reg_rule_flags = 0; |
| 852 | const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? | 859 | const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? |
| 853 | iwl_ext_nvm_channels : iwl_nvm_channels; | 860 | iwl_ext_nvm_channels : iwl_nvm_channels; |
| 854 | struct ieee80211_regdomain *regd; | 861 | struct ieee80211_regdomain *regd, *copy_rd; |
| 855 | int size_of_regd; | 862 | int size_of_regd, regd_to_copy, wmms_to_copy; |
| 863 | int size_of_wmms = 0; | ||
| 856 | struct ieee80211_reg_rule *rule; | 864 | struct ieee80211_reg_rule *rule; |
| 865 | struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm; | ||
| 866 | struct regdb_ptrs *regdb_ptrs; | ||
| 857 | enum nl80211_band band; | 867 | enum nl80211_band band; |
| 858 | int center_freq, prev_center_freq = 0; | 868 | int center_freq, prev_center_freq = 0; |
| 859 | int valid_rules = 0; | 869 | int valid_rules = 0, n_wmms = 0; |
| 870 | int i; | ||
| 860 | bool new_rule; | 871 | bool new_rule; |
| 861 | int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? | 872 | int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? |
| 862 | IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS; | 873 | IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS; |
| 863 | 874 | ||
| 864 | if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) | 875 | if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) |
| 865 | return ERR_PTR(-EINVAL); | 876 | return ERR_PTR(-EINVAL); |
| @@ -875,10 +886,26 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
| 875 | sizeof(struct ieee80211_regdomain) + | 886 | sizeof(struct ieee80211_regdomain) + |
| 876 | num_of_ch * sizeof(struct ieee80211_reg_rule); | 887 | num_of_ch * sizeof(struct ieee80211_reg_rule); |
| 877 | 888 | ||
| 878 | regd = kzalloc(size_of_regd, GFP_KERNEL); | 889 | if (geo_info & GEO_WMM_ETSI_5GHZ_INFO) |
| 890 | size_of_wmms = | ||
| 891 | num_of_ch * sizeof(struct ieee80211_wmm_rule); | ||
| 892 | |||
| 893 | regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL); | ||
| 879 | if (!regd) | 894 | if (!regd) |
| 880 | return ERR_PTR(-ENOMEM); | 895 | return ERR_PTR(-ENOMEM); |
| 881 | 896 | ||
| 897 | regdb_ptrs = kcalloc(num_of_ch, sizeof(*regdb_ptrs), GFP_KERNEL); | ||
| 898 | if (!regdb_ptrs) { | ||
| 899 | copy_rd = ERR_PTR(-ENOMEM); | ||
| 900 | goto out; | ||
| 901 | } | ||
| 902 | |||
| 903 | /* set alpha2 from FW. */ | ||
| 904 | regd->alpha2[0] = fw_mcc >> 8; | ||
| 905 | regd->alpha2[1] = fw_mcc & 0xff; | ||
| 906 | |||
| 907 | wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); | ||
| 908 | |||
| 882 | for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { | 909 | for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { |
| 883 | ch_flags = (u16)__le32_to_cpup(channels + ch_idx); | 910 | ch_flags = (u16)__le32_to_cpup(channels + ch_idx); |
| 884 | band = (ch_idx < NUM_2GHZ_CHANNELS) ? | 911 | band = (ch_idx < NUM_2GHZ_CHANNELS) ? |
| @@ -927,14 +954,66 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
| 927 | 954 | ||
| 928 | iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, | 955 | iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, |
| 929 | nvm_chan[ch_idx], ch_flags); | 956 | nvm_chan[ch_idx], ch_flags); |
| 957 | |||
| 958 | if (!(geo_info & GEO_WMM_ETSI_5GHZ_INFO) || | ||
| 959 | band == NL80211_BAND_2GHZ) | ||
| 960 | continue; | ||
| 961 | |||
| 962 | if (!reg_query_regdb_wmm(regd->alpha2, center_freq, | ||
| 963 | ®db_ptrs[n_wmms].token, wmm_rule)) { | ||
| 964 | /* Add only new rules */ | ||
| 965 | for (i = 0; i < n_wmms; i++) { | ||
| 966 | if (regdb_ptrs[i].token == | ||
| 967 | regdb_ptrs[n_wmms].token) { | ||
| 968 | rule->wmm_rule = regdb_ptrs[i].rule; | ||
| 969 | break; | ||
| 970 | } | ||
| 971 | } | ||
| 972 | if (i == n_wmms) { | ||
| 973 | rule->wmm_rule = wmm_rule; | ||
| 974 | regdb_ptrs[n_wmms++].rule = wmm_rule; | ||
| 975 | wmm_rule++; | ||
| 976 | } | ||
| 977 | } | ||
| 930 | } | 978 | } |
| 931 | 979 | ||
| 932 | regd->n_reg_rules = valid_rules; | 980 | regd->n_reg_rules = valid_rules; |
| 981 | regd->n_wmm_rules = n_wmms; | ||
| 933 | 982 | ||
| 934 | /* set alpha2 from FW. */ | 983 | /* |
| 935 | regd->alpha2[0] = fw_mcc >> 8; | 984 | * Narrow down regdom for unused regulatory rules to prevent hole |
| 936 | regd->alpha2[1] = fw_mcc & 0xff; | 985 | * between reg rules to wmm rules. |
| 986 | */ | ||
| 987 | regd_to_copy = sizeof(struct ieee80211_regdomain) + | ||
| 988 | valid_rules * sizeof(struct ieee80211_reg_rule); | ||
| 989 | |||
| 990 | wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms; | ||
| 991 | |||
| 992 | copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL); | ||
| 993 | if (!copy_rd) { | ||
| 994 | copy_rd = ERR_PTR(-ENOMEM); | ||
| 995 | goto out; | ||
| 996 | } | ||
| 997 | |||
| 998 | memcpy(copy_rd, regd, regd_to_copy); | ||
| 999 | memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd, | ||
| 1000 | wmms_to_copy); | ||
| 1001 | |||
| 1002 | d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy); | ||
| 1003 | s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); | ||
| 1004 | |||
| 1005 | for (i = 0; i < regd->n_reg_rules; i++) { | ||
| 1006 | if (!regd->reg_rules[i].wmm_rule) | ||
| 1007 | continue; | ||
| 1008 | |||
| 1009 | copy_rd->reg_rules[i].wmm_rule = d_wmm + | ||
| 1010 | (regd->reg_rules[i].wmm_rule - s_wmm) / | ||
| 1011 | sizeof(struct ieee80211_wmm_rule); | ||
| 1012 | } | ||
| 937 | 1013 | ||
| 938 | return regd; | 1014 | out: |
| 1015 | kfree(regdb_ptrs); | ||
| 1016 | kfree(regd); | ||
| 1017 | return copy_rd; | ||
| 939 | } | 1018 | } |
| 940 | IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info); | 1019 | IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info); |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 306736c7a042..3071a23b7606 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h | |||
| @@ -101,12 +101,14 @@ void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, | |||
| 101 | * | 101 | * |
| 102 | * This function parses the regulatory channel data received as a | 102 | * This function parses the regulatory channel data received as a |
| 103 | * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain, | 103 | * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain, |
| 104 | * to be fed into the regulatory core. An ERR_PTR is returned on error. | 104 | * to be fed into the regulatory core. In case the geo_info is set handle |
| 105 | * accordingly. An ERR_PTR is returned on error. | ||
| 105 | * If not given to the regulatory core, the user is responsible for freeing | 106 | * If not given to the regulatory core, the user is responsible for freeing |
| 106 | * the regdomain returned here with kfree. | 107 | * the regdomain returned here with kfree. |
| 107 | */ | 108 | */ |
| 108 | struct ieee80211_regdomain * | 109 | struct ieee80211_regdomain * |
| 109 | iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | 110 | iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, |
| 110 | int num_of_ch, __le32 *channels, u16 fw_mcc); | 111 | int num_of_ch, __le32 *channels, u16 fw_mcc, |
| 112 | u16 geo_info); | ||
| 111 | 113 | ||
| 112 | #endif /* __iwl_nvm_parse_h__ */ | 114 | #endif /* __iwl_nvm_parse_h__ */ |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 51b30424575b..90f8c89ea59c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | |||
| @@ -311,7 +311,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, | |||
| 311 | regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, | 311 | regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, |
| 312 | __le32_to_cpu(resp->n_channels), | 312 | __le32_to_cpu(resp->n_channels), |
| 313 | resp->channels, | 313 | resp->channels, |
| 314 | __le16_to_cpu(resp->mcc)); | 314 | __le16_to_cpu(resp->mcc), |
| 315 | __le16_to_cpu(resp->geo_info)); | ||
| 315 | /* Store the return source id */ | 316 | /* Store the return source id */ |
| 316 | src_id = resp->source_id; | 317 | src_id = resp->source_id; |
| 317 | kfree(resp); | 318 | kfree(resp); |
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index 8b6b07a936f5..b026e80940a4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c | |||
| @@ -158,16 +158,6 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist) | |||
| 158 | 158 | ||
| 159 | static u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv) | 159 | static u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv) |
| 160 | { | 160 | { |
| 161 | struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params; | ||
| 162 | |||
| 163 | /* override ant_num / ant_path */ | ||
| 164 | if (mod_params->ant_sel) { | ||
| 165 | rtlpriv->btcoexist.btc_info.ant_num = | ||
| 166 | (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1); | ||
| 167 | |||
| 168 | rtlpriv->btcoexist.btc_info.single_ant_path = | ||
| 169 | (mod_params->ant_sel == 1 ? 0 : 1); | ||
| 170 | } | ||
| 171 | return rtlpriv->btcoexist.btc_info.single_ant_path; | 161 | return rtlpriv->btcoexist.btc_info.single_ant_path; |
| 172 | } | 162 | } |
| 173 | 163 | ||
| @@ -178,7 +168,6 @@ static u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv) | |||
| 178 | 168 | ||
| 179 | static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) | 169 | static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) |
| 180 | { | 170 | { |
| 181 | struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params; | ||
| 182 | u8 num; | 171 | u8 num; |
| 183 | 172 | ||
| 184 | if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2) | 173 | if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2) |
| @@ -186,10 +175,6 @@ static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) | |||
| 186 | else | 175 | else |
| 187 | num = 1; | 176 | num = 1; |
| 188 | 177 | ||
| 189 | /* override ant_num / ant_path */ | ||
| 190 | if (mod_params->ant_sel) | ||
| 191 | num = (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1) + 1; | ||
| 192 | |||
| 193 | return num; | 178 | return num; |
| 194 | } | 179 | } |
| 195 | 180 | ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index e7bbbc95cdb1..b4f3f91b590e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c | |||
| @@ -848,6 +848,9 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw) | |||
| 848 | return false; | 848 | return false; |
| 849 | } | 849 | } |
| 850 | 850 | ||
| 851 | if (rtlpriv->cfg->ops->get_btc_status()) | ||
| 852 | rtlpriv->btcoexist.btc_ops->btc_power_on_setting(rtlpriv); | ||
| 853 | |||
| 851 | bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL); | 854 | bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL); |
| 852 | rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3)); | 855 | rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3)); |
| 853 | 856 | ||
| @@ -2696,21 +2699,21 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, | |||
| 2696 | rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; | 2699 | rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; |
| 2697 | rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1); | 2700 | rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1); |
| 2698 | rtlpriv->btcoexist.btc_info.single_ant_path = | 2701 | rtlpriv->btcoexist.btc_info.single_ant_path = |
| 2699 | (value & 0x40); /*0xc3[6]*/ | 2702 | (value & 0x40 ? ANT_AUX : ANT_MAIN); /*0xc3[6]*/ |
| 2700 | } else { | 2703 | } else { |
| 2701 | rtlpriv->btcoexist.btc_info.btcoexist = 0; | 2704 | rtlpriv->btcoexist.btc_info.btcoexist = 0; |
| 2702 | rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; | 2705 | rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; |
| 2703 | rtlpriv->btcoexist.btc_info.ant_num = ANT_X2; | 2706 | rtlpriv->btcoexist.btc_info.ant_num = ANT_X2; |
| 2704 | rtlpriv->btcoexist.btc_info.single_ant_path = 0; | 2707 | rtlpriv->btcoexist.btc_info.single_ant_path = ANT_MAIN; |
| 2705 | } | 2708 | } |
| 2706 | 2709 | ||
| 2707 | /* override ant_num / ant_path */ | 2710 | /* override ant_num / ant_path */ |
| 2708 | if (mod_params->ant_sel) { | 2711 | if (mod_params->ant_sel) { |
| 2709 | rtlpriv->btcoexist.btc_info.ant_num = | 2712 | rtlpriv->btcoexist.btc_info.ant_num = |
| 2710 | (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1); | 2713 | (mod_params->ant_sel == 1 ? ANT_X1 : ANT_X2); |
| 2711 | 2714 | ||
| 2712 | rtlpriv->btcoexist.btc_info.single_ant_path = | 2715 | rtlpriv->btcoexist.btc_info.single_ant_path = |
| 2713 | (mod_params->ant_sel == 1 ? 0 : 1); | 2716 | (mod_params->ant_sel == 1 ? ANT_AUX : ANT_MAIN); |
| 2714 | } | 2717 | } |
| 2715 | } | 2718 | } |
| 2716 | 2719 | ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index d27e33960e77..ce1754054a07 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h | |||
| @@ -2823,6 +2823,11 @@ enum bt_ant_num { | |||
| 2823 | ANT_X1 = 1, | 2823 | ANT_X1 = 1, |
| 2824 | }; | 2824 | }; |
| 2825 | 2825 | ||
| 2826 | enum bt_ant_path { | ||
| 2827 | ANT_MAIN = 0, | ||
| 2828 | ANT_AUX = 1, | ||
| 2829 | }; | ||
| 2830 | |||
| 2826 | enum bt_co_type { | 2831 | enum bt_co_type { |
| 2827 | BT_2WIRE = 0, | 2832 | BT_2WIRE = 0, |
| 2828 | BT_ISSC_3WIRE = 1, | 2833 | BT_ISSC_3WIRE = 1, |
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 486e65e3db26..469b20e1dd7e 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
| @@ -31,6 +31,7 @@ struct bpf_map_ops { | |||
| 31 | void (*map_release)(struct bpf_map *map, struct file *map_file); | 31 | void (*map_release)(struct bpf_map *map, struct file *map_file); |
| 32 | void (*map_free)(struct bpf_map *map); | 32 | void (*map_free)(struct bpf_map *map); |
| 33 | int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); | 33 | int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); |
| 34 | void (*map_release_uref)(struct bpf_map *map); | ||
| 34 | 35 | ||
| 35 | /* funcs callable from userspace and from eBPF programs */ | 36 | /* funcs callable from userspace and from eBPF programs */ |
| 36 | void *(*map_lookup_elem)(struct bpf_map *map, void *key); | 37 | void *(*map_lookup_elem)(struct bpf_map *map, void *key); |
| @@ -351,6 +352,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, | |||
| 351 | struct bpf_prog **_prog, *__prog; \ | 352 | struct bpf_prog **_prog, *__prog; \ |
| 352 | struct bpf_prog_array *_array; \ | 353 | struct bpf_prog_array *_array; \ |
| 353 | u32 _ret = 1; \ | 354 | u32 _ret = 1; \ |
| 355 | preempt_disable(); \ | ||
| 354 | rcu_read_lock(); \ | 356 | rcu_read_lock(); \ |
| 355 | _array = rcu_dereference(array); \ | 357 | _array = rcu_dereference(array); \ |
| 356 | if (unlikely(check_non_null && !_array))\ | 358 | if (unlikely(check_non_null && !_array))\ |
| @@ -362,6 +364,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, | |||
| 362 | } \ | 364 | } \ |
| 363 | _out: \ | 365 | _out: \ |
| 364 | rcu_read_unlock(); \ | 366 | rcu_read_unlock(); \ |
| 367 | preempt_enable_no_resched(); \ | ||
| 365 | _ret; \ | 368 | _ret; \ |
| 366 | }) | 369 | }) |
| 367 | 370 | ||
| @@ -434,7 +437,6 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); | |||
| 434 | int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, | 437 | int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, |
| 435 | void *key, void *value, u64 map_flags); | 438 | void *key, void *value, u64 map_flags); |
| 436 | int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); | 439 | int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); |
| 437 | void bpf_fd_array_map_clear(struct bpf_map *map); | ||
| 438 | int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, | 440 | int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, |
| 439 | void *key, void *value, u64 map_flags); | 441 | void *key, void *value, u64 map_flags); |
| 440 | int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); | 442 | int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 767d193c269a..2a156c5dfadd 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
| @@ -1284,25 +1284,19 @@ enum { | |||
| 1284 | }; | 1284 | }; |
| 1285 | 1285 | ||
| 1286 | static inline const struct cpumask * | 1286 | static inline const struct cpumask * |
| 1287 | mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector) | 1287 | mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector) |
| 1288 | { | 1288 | { |
| 1289 | const struct cpumask *mask; | ||
| 1290 | struct irq_desc *desc; | 1289 | struct irq_desc *desc; |
| 1291 | unsigned int irq; | 1290 | unsigned int irq; |
| 1292 | int eqn; | 1291 | int eqn; |
| 1293 | int err; | 1292 | int err; |
| 1294 | 1293 | ||
| 1295 | err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq); | 1294 | err = mlx5_vector2eqn(dev, vector, &eqn, &irq); |
| 1296 | if (err) | 1295 | if (err) |
| 1297 | return NULL; | 1296 | return NULL; |
| 1298 | 1297 | ||
| 1299 | desc = irq_to_desc(irq); | 1298 | desc = irq_to_desc(irq); |
| 1300 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK | 1299 | return desc->affinity_hint; |
| 1301 | mask = irq_data_get_effective_affinity_mask(&desc->irq_data); | ||
| 1302 | #else | ||
| 1303 | mask = desc->irq_common_data.affinity; | ||
| 1304 | #endif | ||
| 1305 | return mask; | ||
| 1306 | } | 1300 | } |
| 1307 | 1301 | ||
| 1308 | #endif /* MLX5_DRIVER_H */ | 1302 | #endif /* MLX5_DRIVER_H */ |
diff --git a/include/net/tls.h b/include/net/tls.h index 3da8e13a6d96..b400d0bb7448 100644 --- a/include/net/tls.h +++ b/include/net/tls.h | |||
| @@ -148,6 +148,7 @@ struct tls_context { | |||
| 148 | struct scatterlist *partially_sent_record; | 148 | struct scatterlist *partially_sent_record; |
| 149 | u16 partially_sent_offset; | 149 | u16 partially_sent_offset; |
| 150 | unsigned long flags; | 150 | unsigned long flags; |
| 151 | bool in_tcp_sendpages; | ||
| 151 | 152 | ||
| 152 | u16 pending_open_record_frags; | 153 | u16 pending_open_record_frags; |
| 153 | int (*push_pending_record)(struct sock *sk, int flags); | 154 | int (*push_pending_record)(struct sock *sk, int flags); |
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 14750e7c5ee4..027107f4be53 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c | |||
| @@ -476,7 +476,7 @@ static u32 prog_fd_array_sys_lookup_elem(void *ptr) | |||
| 476 | } | 476 | } |
| 477 | 477 | ||
| 478 | /* decrement refcnt of all bpf_progs that are stored in this map */ | 478 | /* decrement refcnt of all bpf_progs that are stored in this map */ |
| 479 | void bpf_fd_array_map_clear(struct bpf_map *map) | 479 | static void bpf_fd_array_map_clear(struct bpf_map *map) |
| 480 | { | 480 | { |
| 481 | struct bpf_array *array = container_of(map, struct bpf_array, map); | 481 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 482 | int i; | 482 | int i; |
| @@ -495,6 +495,7 @@ const struct bpf_map_ops prog_array_map_ops = { | |||
| 495 | .map_fd_get_ptr = prog_fd_array_get_ptr, | 495 | .map_fd_get_ptr = prog_fd_array_get_ptr, |
| 496 | .map_fd_put_ptr = prog_fd_array_put_ptr, | 496 | .map_fd_put_ptr = prog_fd_array_put_ptr, |
| 497 | .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, | 497 | .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, |
| 498 | .map_release_uref = bpf_fd_array_map_clear, | ||
| 498 | }; | 499 | }; |
| 499 | 500 | ||
| 500 | static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, | 501 | static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, |
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index a3b21385e947..098eca568c2b 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c | |||
| @@ -43,6 +43,7 @@ | |||
| 43 | #include <net/tcp.h> | 43 | #include <net/tcp.h> |
| 44 | #include <linux/ptr_ring.h> | 44 | #include <linux/ptr_ring.h> |
| 45 | #include <net/inet_common.h> | 45 | #include <net/inet_common.h> |
| 46 | #include <linux/sched/signal.h> | ||
| 46 | 47 | ||
| 47 | #define SOCK_CREATE_FLAG_MASK \ | 48 | #define SOCK_CREATE_FLAG_MASK \ |
| 48 | (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) | 49 | (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) |
| @@ -325,6 +326,9 @@ retry: | |||
| 325 | if (ret > 0) { | 326 | if (ret > 0) { |
| 326 | if (apply) | 327 | if (apply) |
| 327 | apply_bytes -= ret; | 328 | apply_bytes -= ret; |
| 329 | |||
| 330 | sg->offset += ret; | ||
| 331 | sg->length -= ret; | ||
| 328 | size -= ret; | 332 | size -= ret; |
| 329 | offset += ret; | 333 | offset += ret; |
| 330 | if (uncharge) | 334 | if (uncharge) |
| @@ -332,8 +336,6 @@ retry: | |||
| 332 | goto retry; | 336 | goto retry; |
| 333 | } | 337 | } |
| 334 | 338 | ||
| 335 | sg->length = size; | ||
| 336 | sg->offset = offset; | ||
| 337 | return ret; | 339 | return ret; |
| 338 | } | 340 | } |
| 339 | 341 | ||
| @@ -391,7 +393,8 @@ static void return_mem_sg(struct sock *sk, int bytes, struct sk_msg_buff *md) | |||
| 391 | } while (i != md->sg_end); | 393 | } while (i != md->sg_end); |
| 392 | } | 394 | } |
| 393 | 395 | ||
| 394 | static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md) | 396 | static void free_bytes_sg(struct sock *sk, int bytes, |
| 397 | struct sk_msg_buff *md, bool charge) | ||
| 395 | { | 398 | { |
| 396 | struct scatterlist *sg = md->sg_data; | 399 | struct scatterlist *sg = md->sg_data; |
| 397 | int i = md->sg_start, free; | 400 | int i = md->sg_start, free; |
| @@ -401,11 +404,13 @@ static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md) | |||
| 401 | if (bytes < free) { | 404 | if (bytes < free) { |
| 402 | sg[i].length -= bytes; | 405 | sg[i].length -= bytes; |
| 403 | sg[i].offset += bytes; | 406 | sg[i].offset += bytes; |
| 404 | sk_mem_uncharge(sk, bytes); | 407 | if (charge) |
| 408 | sk_mem_uncharge(sk, bytes); | ||
| 405 | break; | 409 | break; |
| 406 | } | 410 | } |
| 407 | 411 | ||
| 408 | sk_mem_uncharge(sk, sg[i].length); | 412 | if (charge) |
| 413 | sk_mem_uncharge(sk, sg[i].length); | ||
| 409 | put_page(sg_page(&sg[i])); | 414 | put_page(sg_page(&sg[i])); |
| 410 | bytes -= sg[i].length; | 415 | bytes -= sg[i].length; |
| 411 | sg[i].length = 0; | 416 | sg[i].length = 0; |
| @@ -416,6 +421,7 @@ static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md) | |||
| 416 | if (i == MAX_SKB_FRAGS) | 421 | if (i == MAX_SKB_FRAGS) |
| 417 | i = 0; | 422 | i = 0; |
| 418 | } | 423 | } |
| 424 | md->sg_start = i; | ||
| 419 | } | 425 | } |
| 420 | 426 | ||
| 421 | static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) | 427 | static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) |
| @@ -523,8 +529,6 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes, | |||
| 523 | i = md->sg_start; | 529 | i = md->sg_start; |
| 524 | 530 | ||
| 525 | do { | 531 | do { |
| 526 | r->sg_data[i] = md->sg_data[i]; | ||
| 527 | |||
| 528 | size = (apply && apply_bytes < md->sg_data[i].length) ? | 532 | size = (apply && apply_bytes < md->sg_data[i].length) ? |
| 529 | apply_bytes : md->sg_data[i].length; | 533 | apply_bytes : md->sg_data[i].length; |
| 530 | 534 | ||
| @@ -535,6 +539,7 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes, | |||
| 535 | } | 539 | } |
| 536 | 540 | ||
| 537 | sk_mem_charge(sk, size); | 541 | sk_mem_charge(sk, size); |
| 542 | r->sg_data[i] = md->sg_data[i]; | ||
| 538 | r->sg_data[i].length = size; | 543 | r->sg_data[i].length = size; |
| 539 | md->sg_data[i].length -= size; | 544 | md->sg_data[i].length -= size; |
| 540 | md->sg_data[i].offset += size; | 545 | md->sg_data[i].offset += size; |
| @@ -575,10 +580,10 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send, | |||
| 575 | struct sk_msg_buff *md, | 580 | struct sk_msg_buff *md, |
| 576 | int flags) | 581 | int flags) |
| 577 | { | 582 | { |
| 583 | bool ingress = !!(md->flags & BPF_F_INGRESS); | ||
| 578 | struct smap_psock *psock; | 584 | struct smap_psock *psock; |
| 579 | struct scatterlist *sg; | 585 | struct scatterlist *sg; |
| 580 | int i, err, free = 0; | 586 | int err = 0; |
| 581 | bool ingress = !!(md->flags & BPF_F_INGRESS); | ||
| 582 | 587 | ||
| 583 | sg = md->sg_data; | 588 | sg = md->sg_data; |
| 584 | 589 | ||
| @@ -606,16 +611,8 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send, | |||
| 606 | out_rcu: | 611 | out_rcu: |
| 607 | rcu_read_unlock(); | 612 | rcu_read_unlock(); |
| 608 | out: | 613 | out: |
| 609 | i = md->sg_start; | 614 | free_bytes_sg(NULL, send, md, false); |
| 610 | while (sg[i].length) { | 615 | return err; |
| 611 | free += sg[i].length; | ||
| 612 | put_page(sg_page(&sg[i])); | ||
| 613 | sg[i].length = 0; | ||
| 614 | i++; | ||
| 615 | if (i == MAX_SKB_FRAGS) | ||
| 616 | i = 0; | ||
| 617 | } | ||
| 618 | return free; | ||
| 619 | } | 616 | } |
| 620 | 617 | ||
| 621 | static inline void bpf_md_init(struct smap_psock *psock) | 618 | static inline void bpf_md_init(struct smap_psock *psock) |
| @@ -700,19 +697,26 @@ more_data: | |||
| 700 | err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags); | 697 | err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags); |
| 701 | lock_sock(sk); | 698 | lock_sock(sk); |
| 702 | 699 | ||
| 700 | if (unlikely(err < 0)) { | ||
| 701 | free_start_sg(sk, m); | ||
| 702 | psock->sg_size = 0; | ||
| 703 | if (!cork) | ||
| 704 | *copied -= send; | ||
| 705 | } else { | ||
| 706 | psock->sg_size -= send; | ||
| 707 | } | ||
| 708 | |||
| 703 | if (cork) { | 709 | if (cork) { |
| 704 | free_start_sg(sk, m); | 710 | free_start_sg(sk, m); |
| 711 | psock->sg_size = 0; | ||
| 705 | kfree(m); | 712 | kfree(m); |
| 706 | m = NULL; | 713 | m = NULL; |
| 714 | err = 0; | ||
| 707 | } | 715 | } |
| 708 | if (unlikely(err)) | ||
| 709 | *copied -= err; | ||
| 710 | else | ||
| 711 | psock->sg_size -= send; | ||
| 712 | break; | 716 | break; |
| 713 | case __SK_DROP: | 717 | case __SK_DROP: |
| 714 | default: | 718 | default: |
| 715 | free_bytes_sg(sk, send, m); | 719 | free_bytes_sg(sk, send, m, true); |
| 716 | apply_bytes_dec(psock, send); | 720 | apply_bytes_dec(psock, send); |
| 717 | *copied -= send; | 721 | *copied -= send; |
| 718 | psock->sg_size -= send; | 722 | psock->sg_size -= send; |
| @@ -732,6 +736,26 @@ out_err: | |||
| 732 | return err; | 736 | return err; |
| 733 | } | 737 | } |
| 734 | 738 | ||
| 739 | static int bpf_wait_data(struct sock *sk, | ||
| 740 | struct smap_psock *psk, int flags, | ||
| 741 | long timeo, int *err) | ||
| 742 | { | ||
| 743 | int rc; | ||
| 744 | |||
| 745 | DEFINE_WAIT_FUNC(wait, woken_wake_function); | ||
| 746 | |||
| 747 | add_wait_queue(sk_sleep(sk), &wait); | ||
| 748 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); | ||
| 749 | rc = sk_wait_event(sk, &timeo, | ||
| 750 | !list_empty(&psk->ingress) || | ||
| 751 | !skb_queue_empty(&sk->sk_receive_queue), | ||
| 752 | &wait); | ||
| 753 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); | ||
| 754 | remove_wait_queue(sk_sleep(sk), &wait); | ||
| 755 | |||
| 756 | return rc; | ||
| 757 | } | ||
| 758 | |||
| 735 | static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, | 759 | static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, |
| 736 | int nonblock, int flags, int *addr_len) | 760 | int nonblock, int flags, int *addr_len) |
| 737 | { | 761 | { |
| @@ -755,6 +779,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, | |||
| 755 | return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); | 779 | return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); |
| 756 | 780 | ||
| 757 | lock_sock(sk); | 781 | lock_sock(sk); |
| 782 | bytes_ready: | ||
| 758 | while (copied != len) { | 783 | while (copied != len) { |
| 759 | struct scatterlist *sg; | 784 | struct scatterlist *sg; |
| 760 | struct sk_msg_buff *md; | 785 | struct sk_msg_buff *md; |
| @@ -809,6 +834,28 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, | |||
| 809 | } | 834 | } |
| 810 | } | 835 | } |
| 811 | 836 | ||
| 837 | if (!copied) { | ||
| 838 | long timeo; | ||
| 839 | int data; | ||
| 840 | int err = 0; | ||
| 841 | |||
| 842 | timeo = sock_rcvtimeo(sk, nonblock); | ||
| 843 | data = bpf_wait_data(sk, psock, flags, timeo, &err); | ||
| 844 | |||
| 845 | if (data) { | ||
| 846 | if (!skb_queue_empty(&sk->sk_receive_queue)) { | ||
| 847 | release_sock(sk); | ||
| 848 | smap_release_sock(psock, sk); | ||
| 849 | copied = tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); | ||
| 850 | return copied; | ||
| 851 | } | ||
| 852 | goto bytes_ready; | ||
| 853 | } | ||
| 854 | |||
| 855 | if (err) | ||
| 856 | copied = err; | ||
| 857 | } | ||
| 858 | |||
| 812 | release_sock(sk); | 859 | release_sock(sk); |
| 813 | smap_release_sock(psock, sk); | 860 | smap_release_sock(psock, sk); |
| 814 | return copied; | 861 | return copied; |
| @@ -1831,7 +1878,7 @@ static int sock_map_update_elem(struct bpf_map *map, | |||
| 1831 | return err; | 1878 | return err; |
| 1832 | } | 1879 | } |
| 1833 | 1880 | ||
| 1834 | static void sock_map_release(struct bpf_map *map, struct file *map_file) | 1881 | static void sock_map_release(struct bpf_map *map) |
| 1835 | { | 1882 | { |
| 1836 | struct bpf_stab *stab = container_of(map, struct bpf_stab, map); | 1883 | struct bpf_stab *stab = container_of(map, struct bpf_stab, map); |
| 1837 | struct bpf_prog *orig; | 1884 | struct bpf_prog *orig; |
| @@ -1855,7 +1902,7 @@ const struct bpf_map_ops sock_map_ops = { | |||
| 1855 | .map_get_next_key = sock_map_get_next_key, | 1902 | .map_get_next_key = sock_map_get_next_key, |
| 1856 | .map_update_elem = sock_map_update_elem, | 1903 | .map_update_elem = sock_map_update_elem, |
| 1857 | .map_delete_elem = sock_map_delete_elem, | 1904 | .map_delete_elem = sock_map_delete_elem, |
| 1858 | .map_release = sock_map_release, | 1905 | .map_release_uref = sock_map_release, |
| 1859 | }; | 1906 | }; |
| 1860 | 1907 | ||
| 1861 | BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, | 1908 | BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, |
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 4ca46df19c9a..ebfe9f29dae8 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
| @@ -257,8 +257,8 @@ static void bpf_map_free_deferred(struct work_struct *work) | |||
| 257 | static void bpf_map_put_uref(struct bpf_map *map) | 257 | static void bpf_map_put_uref(struct bpf_map *map) |
| 258 | { | 258 | { |
| 259 | if (atomic_dec_and_test(&map->usercnt)) { | 259 | if (atomic_dec_and_test(&map->usercnt)) { |
| 260 | if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) | 260 | if (map->ops->map_release_uref) |
| 261 | bpf_fd_array_map_clear(map); | 261 | map->ops->map_release_uref(map); |
| 262 | } | 262 | } |
| 263 | } | 263 | } |
| 264 | 264 | ||
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 82c1a6f430b3..5bb6681fa91e 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
| @@ -518,8 +518,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev, | |||
| 518 | return -ELOOP; | 518 | return -ELOOP; |
| 519 | } | 519 | } |
| 520 | 520 | ||
| 521 | /* Device is already being bridged */ | 521 | /* Device has master upper dev */ |
| 522 | if (br_port_exists(dev)) | 522 | if (netdev_master_upper_dev_get(dev)) |
| 523 | return -EBUSY; | 523 | return -EBUSY; |
| 524 | 524 | ||
| 525 | /* No bridging devices that dislike that (e.g. wireless) */ | 525 | /* No bridging devices that dislike that (e.g. wireless) */ |
diff --git a/net/compat.c b/net/compat.c index 5ae7437d3853..7242cce5631b 100644 --- a/net/compat.c +++ b/net/compat.c | |||
| @@ -377,7 +377,8 @@ static int compat_sock_setsockopt(struct socket *sock, int level, int optname, | |||
| 377 | optname == SO_ATTACH_REUSEPORT_CBPF) | 377 | optname == SO_ATTACH_REUSEPORT_CBPF) |
| 378 | return do_set_attach_filter(sock, level, optname, | 378 | return do_set_attach_filter(sock, level, optname, |
| 379 | optval, optlen); | 379 | optval, optlen); |
| 380 | if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) | 380 | if (!COMPAT_USE_64BIT_TIME && |
| 381 | (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)) | ||
| 381 | return do_set_sock_timeout(sock, level, optname, optval, optlen); | 382 | return do_set_sock_timeout(sock, level, optname, optval, optlen); |
| 382 | 383 | ||
| 383 | return sock_setsockopt(sock, level, optname, optval, optlen); | 384 | return sock_setsockopt(sock, level, optname, optval, optlen); |
| @@ -448,7 +449,8 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname, | |||
| 448 | static int compat_sock_getsockopt(struct socket *sock, int level, int optname, | 449 | static int compat_sock_getsockopt(struct socket *sock, int level, int optname, |
| 449 | char __user *optval, int __user *optlen) | 450 | char __user *optval, int __user *optlen) |
| 450 | { | 451 | { |
| 451 | if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) | 452 | if (!COMPAT_USE_64BIT_TIME && |
| 453 | (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)) | ||
| 452 | return do_get_sock_timeout(sock, level, optname, optval, optlen); | 454 | return do_get_sock_timeout(sock, level, optname, optval, optlen); |
| 453 | return sock_getsockopt(sock, level, optname, optval, optlen); | 455 | return sock_getsockopt(sock, level, optname, optval, optlen); |
| 454 | } | 456 | } |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 03416e6dd5d7..ba02f0dfe85c 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
| @@ -1032,6 +1032,11 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, | |||
| 1032 | info_size = sizeof(info); | 1032 | info_size = sizeof(info); |
| 1033 | if (copy_from_user(&info, useraddr, info_size)) | 1033 | if (copy_from_user(&info, useraddr, info_size)) |
| 1034 | return -EFAULT; | 1034 | return -EFAULT; |
| 1035 | /* Since malicious users may modify the original data, | ||
| 1036 | * we need to check whether FLOW_RSS is still requested. | ||
| 1037 | */ | ||
| 1038 | if (!(info.flow_type & FLOW_RSS)) | ||
| 1039 | return -EINVAL; | ||
| 1035 | } | 1040 | } |
| 1036 | 1041 | ||
| 1037 | if (info.cmd == ETHTOOL_GRXCLSRLALL) { | 1042 | if (info.cmd == ETHTOOL_GRXCLSRLALL) { |
diff --git a/net/core/filter.c b/net/core/filter.c index d31aff93270d..e77c30ca491d 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -3240,6 +3240,7 @@ BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb, | |||
| 3240 | skb_dst_set(skb, (struct dst_entry *) md); | 3240 | skb_dst_set(skb, (struct dst_entry *) md); |
| 3241 | 3241 | ||
| 3242 | info = &md->u.tun_info; | 3242 | info = &md->u.tun_info; |
| 3243 | memset(info, 0, sizeof(*info)); | ||
| 3243 | info->mode = IP_TUNNEL_INFO_TX; | 3244 | info->mode = IP_TUNNEL_INFO_TX; |
| 3244 | 3245 | ||
| 3245 | info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE; | 3246 | info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE; |
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index 92d016e87816..385f153fe031 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c | |||
| @@ -126,6 +126,16 @@ static void ccid2_change_l_seq_window(struct sock *sk, u64 val) | |||
| 126 | DCCPF_SEQ_WMAX)); | 126 | DCCPF_SEQ_WMAX)); |
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | static void dccp_tasklet_schedule(struct sock *sk) | ||
| 130 | { | ||
| 131 | struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet; | ||
| 132 | |||
| 133 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { | ||
| 134 | sock_hold(sk); | ||
| 135 | __tasklet_schedule(t); | ||
| 136 | } | ||
| 137 | } | ||
| 138 | |||
| 129 | static void ccid2_hc_tx_rto_expire(struct timer_list *t) | 139 | static void ccid2_hc_tx_rto_expire(struct timer_list *t) |
| 130 | { | 140 | { |
| 131 | struct ccid2_hc_tx_sock *hc = from_timer(hc, t, tx_rtotimer); | 141 | struct ccid2_hc_tx_sock *hc = from_timer(hc, t, tx_rtotimer); |
| @@ -166,7 +176,7 @@ static void ccid2_hc_tx_rto_expire(struct timer_list *t) | |||
| 166 | 176 | ||
| 167 | /* if we were blocked before, we may now send cwnd=1 packet */ | 177 | /* if we were blocked before, we may now send cwnd=1 packet */ |
| 168 | if (sender_was_blocked) | 178 | if (sender_was_blocked) |
| 169 | tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); | 179 | dccp_tasklet_schedule(sk); |
| 170 | /* restart backed-off timer */ | 180 | /* restart backed-off timer */ |
| 171 | sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); | 181 | sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); |
| 172 | out: | 182 | out: |
| @@ -706,7 +716,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
| 706 | done: | 716 | done: |
| 707 | /* check if incoming Acks allow pending packets to be sent */ | 717 | /* check if incoming Acks allow pending packets to be sent */ |
| 708 | if (sender_was_blocked && !ccid2_cwnd_network_limited(hc)) | 718 | if (sender_was_blocked && !ccid2_cwnd_network_limited(hc)) |
| 709 | tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); | 719 | dccp_tasklet_schedule(sk); |
| 710 | dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); | 720 | dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); |
| 711 | } | 721 | } |
| 712 | 722 | ||
diff --git a/net/dccp/timer.c b/net/dccp/timer.c index b50a8732ff43..1501a20a94ca 100644 --- a/net/dccp/timer.c +++ b/net/dccp/timer.c | |||
| @@ -232,6 +232,7 @@ static void dccp_write_xmitlet(unsigned long data) | |||
| 232 | else | 232 | else |
| 233 | dccp_write_xmit(sk); | 233 | dccp_write_xmit(sk); |
| 234 | bh_unlock_sock(sk); | 234 | bh_unlock_sock(sk); |
| 235 | sock_put(sk); | ||
| 235 | } | 236 | } |
| 236 | 237 | ||
| 237 | static void dccp_write_xmit_timer(struct timer_list *t) | 238 | static void dccp_write_xmit_timer(struct timer_list *t) |
| @@ -240,7 +241,6 @@ static void dccp_write_xmit_timer(struct timer_list *t) | |||
| 240 | struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk; | 241 | struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk; |
| 241 | 242 | ||
| 242 | dccp_write_xmitlet((unsigned long)sk); | 243 | dccp_write_xmitlet((unsigned long)sk); |
| 243 | sock_put(sk); | ||
| 244 | } | 244 | } |
| 245 | 245 | ||
| 246 | void dccp_init_xmit_timers(struct sock *sk) | 246 | void dccp_init_xmit_timers(struct sock *sk) |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index ccb25d80f679..1412a7baf0b9 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
| @@ -709,7 +709,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, | |||
| 709 | fnhe->fnhe_gw = gw; | 709 | fnhe->fnhe_gw = gw; |
| 710 | fnhe->fnhe_pmtu = pmtu; | 710 | fnhe->fnhe_pmtu = pmtu; |
| 711 | fnhe->fnhe_mtu_locked = lock; | 711 | fnhe->fnhe_mtu_locked = lock; |
| 712 | fnhe->fnhe_expires = expires; | 712 | fnhe->fnhe_expires = max(1UL, expires); |
| 713 | 713 | ||
| 714 | /* Exception created; mark the cached routes for the nexthop | 714 | /* Exception created; mark the cached routes for the nexthop |
| 715 | * stale, so anyone caching it rechecks if this exception | 715 | * stale, so anyone caching it rechecks if this exception |
| @@ -1297,6 +1297,36 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst) | |||
| 1297 | return mtu - lwtunnel_headroom(dst->lwtstate, mtu); | 1297 | return mtu - lwtunnel_headroom(dst->lwtstate, mtu); |
| 1298 | } | 1298 | } |
| 1299 | 1299 | ||
| 1300 | static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr) | ||
| 1301 | { | ||
| 1302 | struct fnhe_hash_bucket *hash; | ||
| 1303 | struct fib_nh_exception *fnhe, __rcu **fnhe_p; | ||
| 1304 | u32 hval = fnhe_hashfun(daddr); | ||
| 1305 | |||
| 1306 | spin_lock_bh(&fnhe_lock); | ||
| 1307 | |||
| 1308 | hash = rcu_dereference_protected(nh->nh_exceptions, | ||
| 1309 | lockdep_is_held(&fnhe_lock)); | ||
| 1310 | hash += hval; | ||
| 1311 | |||
| 1312 | fnhe_p = &hash->chain; | ||
| 1313 | fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock)); | ||
| 1314 | while (fnhe) { | ||
| 1315 | if (fnhe->fnhe_daddr == daddr) { | ||
| 1316 | rcu_assign_pointer(*fnhe_p, rcu_dereference_protected( | ||
| 1317 | fnhe->fnhe_next, lockdep_is_held(&fnhe_lock))); | ||
| 1318 | fnhe_flush_routes(fnhe); | ||
| 1319 | kfree_rcu(fnhe, rcu); | ||
| 1320 | break; | ||
| 1321 | } | ||
| 1322 | fnhe_p = &fnhe->fnhe_next; | ||
| 1323 | fnhe = rcu_dereference_protected(fnhe->fnhe_next, | ||
| 1324 | lockdep_is_held(&fnhe_lock)); | ||
| 1325 | } | ||
| 1326 | |||
| 1327 | spin_unlock_bh(&fnhe_lock); | ||
| 1328 | } | ||
| 1329 | |||
| 1300 | static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr) | 1330 | static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr) |
| 1301 | { | 1331 | { |
| 1302 | struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions); | 1332 | struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions); |
| @@ -1310,8 +1340,14 @@ static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr) | |||
| 1310 | 1340 | ||
| 1311 | for (fnhe = rcu_dereference(hash[hval].chain); fnhe; | 1341 | for (fnhe = rcu_dereference(hash[hval].chain); fnhe; |
| 1312 | fnhe = rcu_dereference(fnhe->fnhe_next)) { | 1342 | fnhe = rcu_dereference(fnhe->fnhe_next)) { |
| 1313 | if (fnhe->fnhe_daddr == daddr) | 1343 | if (fnhe->fnhe_daddr == daddr) { |
| 1344 | if (fnhe->fnhe_expires && | ||
| 1345 | time_after(jiffies, fnhe->fnhe_expires)) { | ||
| 1346 | ip_del_fnhe(nh, daddr); | ||
| 1347 | break; | ||
| 1348 | } | ||
| 1314 | return fnhe; | 1349 | return fnhe; |
| 1350 | } | ||
| 1315 | } | 1351 | } |
| 1316 | return NULL; | 1352 | return NULL; |
| 1317 | } | 1353 | } |
| @@ -1636,36 +1672,6 @@ static void ip_handle_martian_source(struct net_device *dev, | |||
| 1636 | #endif | 1672 | #endif |
| 1637 | } | 1673 | } |
| 1638 | 1674 | ||
| 1639 | static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr) | ||
| 1640 | { | ||
| 1641 | struct fnhe_hash_bucket *hash; | ||
| 1642 | struct fib_nh_exception *fnhe, __rcu **fnhe_p; | ||
| 1643 | u32 hval = fnhe_hashfun(daddr); | ||
| 1644 | |||
| 1645 | spin_lock_bh(&fnhe_lock); | ||
| 1646 | |||
| 1647 | hash = rcu_dereference_protected(nh->nh_exceptions, | ||
| 1648 | lockdep_is_held(&fnhe_lock)); | ||
| 1649 | hash += hval; | ||
| 1650 | |||
| 1651 | fnhe_p = &hash->chain; | ||
| 1652 | fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock)); | ||
| 1653 | while (fnhe) { | ||
| 1654 | if (fnhe->fnhe_daddr == daddr) { | ||
| 1655 | rcu_assign_pointer(*fnhe_p, rcu_dereference_protected( | ||
| 1656 | fnhe->fnhe_next, lockdep_is_held(&fnhe_lock))); | ||
| 1657 | fnhe_flush_routes(fnhe); | ||
| 1658 | kfree_rcu(fnhe, rcu); | ||
| 1659 | break; | ||
| 1660 | } | ||
| 1661 | fnhe_p = &fnhe->fnhe_next; | ||
| 1662 | fnhe = rcu_dereference_protected(fnhe->fnhe_next, | ||
| 1663 | lockdep_is_held(&fnhe_lock)); | ||
| 1664 | } | ||
| 1665 | |||
| 1666 | spin_unlock_bh(&fnhe_lock); | ||
| 1667 | } | ||
| 1668 | |||
| 1669 | /* called in rcu_read_lock() section */ | 1675 | /* called in rcu_read_lock() section */ |
| 1670 | static int __mkroute_input(struct sk_buff *skb, | 1676 | static int __mkroute_input(struct sk_buff *skb, |
| 1671 | const struct fib_result *res, | 1677 | const struct fib_result *res, |
| @@ -1719,20 +1725,10 @@ static int __mkroute_input(struct sk_buff *skb, | |||
| 1719 | 1725 | ||
| 1720 | fnhe = find_exception(&FIB_RES_NH(*res), daddr); | 1726 | fnhe = find_exception(&FIB_RES_NH(*res), daddr); |
| 1721 | if (do_cache) { | 1727 | if (do_cache) { |
| 1722 | if (fnhe) { | 1728 | if (fnhe) |
| 1723 | rth = rcu_dereference(fnhe->fnhe_rth_input); | 1729 | rth = rcu_dereference(fnhe->fnhe_rth_input); |
| 1724 | if (rth && rth->dst.expires && | 1730 | else |
| 1725 | time_after(jiffies, rth->dst.expires)) { | 1731 | rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); |
| 1726 | ip_del_fnhe(&FIB_RES_NH(*res), daddr); | ||
| 1727 | fnhe = NULL; | ||
| 1728 | } else { | ||
| 1729 | goto rt_cache; | ||
| 1730 | } | ||
| 1731 | } | ||
| 1732 | |||
| 1733 | rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); | ||
| 1734 | |||
| 1735 | rt_cache: | ||
| 1736 | if (rt_cache_valid(rth)) { | 1732 | if (rt_cache_valid(rth)) { |
| 1737 | skb_dst_set_noref(skb, &rth->dst); | 1733 | skb_dst_set_noref(skb, &rth->dst); |
| 1738 | goto out; | 1734 | goto out; |
| @@ -2216,39 +2212,31 @@ static struct rtable *__mkroute_output(const struct fib_result *res, | |||
| 2216 | * the loopback interface and the IP_PKTINFO ipi_ifindex will | 2212 | * the loopback interface and the IP_PKTINFO ipi_ifindex will |
| 2217 | * be set to the loopback interface as well. | 2213 | * be set to the loopback interface as well. |
| 2218 | */ | 2214 | */ |
| 2219 | fi = NULL; | 2215 | do_cache = false; |
| 2220 | } | 2216 | } |
| 2221 | 2217 | ||
| 2222 | fnhe = NULL; | 2218 | fnhe = NULL; |
| 2223 | do_cache &= fi != NULL; | 2219 | do_cache &= fi != NULL; |
| 2224 | if (do_cache) { | 2220 | if (fi) { |
| 2225 | struct rtable __rcu **prth; | 2221 | struct rtable __rcu **prth; |
| 2226 | struct fib_nh *nh = &FIB_RES_NH(*res); | 2222 | struct fib_nh *nh = &FIB_RES_NH(*res); |
| 2227 | 2223 | ||
| 2228 | fnhe = find_exception(nh, fl4->daddr); | 2224 | fnhe = find_exception(nh, fl4->daddr); |
| 2225 | if (!do_cache) | ||
| 2226 | goto add; | ||
| 2229 | if (fnhe) { | 2227 | if (fnhe) { |
| 2230 | prth = &fnhe->fnhe_rth_output; | 2228 | prth = &fnhe->fnhe_rth_output; |
| 2231 | rth = rcu_dereference(*prth); | 2229 | } else { |
| 2232 | if (rth && rth->dst.expires && | 2230 | if (unlikely(fl4->flowi4_flags & |
| 2233 | time_after(jiffies, rth->dst.expires)) { | 2231 | FLOWI_FLAG_KNOWN_NH && |
| 2234 | ip_del_fnhe(nh, fl4->daddr); | 2232 | !(nh->nh_gw && |
| 2235 | fnhe = NULL; | 2233 | nh->nh_scope == RT_SCOPE_LINK))) { |
| 2236 | } else { | 2234 | do_cache = false; |
| 2237 | goto rt_cache; | 2235 | goto add; |
| 2238 | } | 2236 | } |
| 2237 | prth = raw_cpu_ptr(nh->nh_pcpu_rth_output); | ||
| 2239 | } | 2238 | } |
| 2240 | |||
| 2241 | if (unlikely(fl4->flowi4_flags & | ||
| 2242 | FLOWI_FLAG_KNOWN_NH && | ||
| 2243 | !(nh->nh_gw && | ||
| 2244 | nh->nh_scope == RT_SCOPE_LINK))) { | ||
| 2245 | do_cache = false; | ||
| 2246 | goto add; | ||
| 2247 | } | ||
| 2248 | prth = raw_cpu_ptr(nh->nh_pcpu_rth_output); | ||
| 2249 | rth = rcu_dereference(*prth); | 2239 | rth = rcu_dereference(*prth); |
| 2250 | |||
| 2251 | rt_cache: | ||
| 2252 | if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst)) | 2240 | if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst)) |
| 2253 | return rth; | 2241 | return rth; |
| 2254 | } | 2242 | } |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 9ce1c726185e..c9d00ef54dec 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -697,7 +697,7 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, | |||
| 697 | { | 697 | { |
| 698 | return skb->len < size_goal && | 698 | return skb->len < size_goal && |
| 699 | sock_net(sk)->ipv4.sysctl_tcp_autocorking && | 699 | sock_net(sk)->ipv4.sysctl_tcp_autocorking && |
| 700 | skb != tcp_write_queue_head(sk) && | 700 | !tcp_rtx_queue_empty(sk) && |
| 701 | refcount_read(&sk->sk_wmem_alloc) > skb->truesize; | 701 | refcount_read(&sk->sk_wmem_alloc) > skb->truesize; |
| 702 | } | 702 | } |
| 703 | 703 | ||
| @@ -1204,7 +1204,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) | |||
| 1204 | uarg->zerocopy = 0; | 1204 | uarg->zerocopy = 0; |
| 1205 | } | 1205 | } |
| 1206 | 1206 | ||
| 1207 | if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) { | 1207 | if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) && |
| 1208 | !tp->repair) { | ||
| 1208 | err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); | 1209 | err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); |
| 1209 | if (err == -EINPROGRESS && copied_syn > 0) | 1210 | if (err == -EINPROGRESS && copied_syn > 0) |
| 1210 | goto out; | 1211 | goto out; |
| @@ -2673,7 +2674,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, | |||
| 2673 | case TCP_REPAIR_QUEUE: | 2674 | case TCP_REPAIR_QUEUE: |
| 2674 | if (!tp->repair) | 2675 | if (!tp->repair) |
| 2675 | err = -EPERM; | 2676 | err = -EPERM; |
| 2676 | else if (val < TCP_QUEUES_NR) | 2677 | else if ((unsigned int)val < TCP_QUEUES_NR) |
| 2677 | tp->repair_queue = val; | 2678 | tp->repair_queue = val; |
| 2678 | else | 2679 | else |
| 2679 | err = -EINVAL; | 2680 | err = -EINVAL; |
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 158d105e76da..58e2f479ffb4 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c | |||
| @@ -806,7 +806,9 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs) | |||
| 806 | } | 806 | } |
| 807 | } | 807 | } |
| 808 | } | 808 | } |
| 809 | bbr->idle_restart = 0; | 809 | /* Restart after idle ends only once we process a new S/ACK for data */ |
| 810 | if (rs->delivered > 0) | ||
| 811 | bbr->idle_restart = 0; | ||
| 810 | } | 812 | } |
| 811 | 813 | ||
| 812 | static void bbr_update_model(struct sock *sk, const struct rate_sample *rs) | 814 | static void bbr_update_model(struct sock *sk, const struct rate_sample *rs) |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index cde7d8251377..f4d61736c41a 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -1835,11 +1835,16 @@ static void ip6_multipath_l3_keys(const struct sk_buff *skb, | |||
| 1835 | const struct ipv6hdr *inner_iph; | 1835 | const struct ipv6hdr *inner_iph; |
| 1836 | const struct icmp6hdr *icmph; | 1836 | const struct icmp6hdr *icmph; |
| 1837 | struct ipv6hdr _inner_iph; | 1837 | struct ipv6hdr _inner_iph; |
| 1838 | struct icmp6hdr _icmph; | ||
| 1838 | 1839 | ||
| 1839 | if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6)) | 1840 | if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6)) |
| 1840 | goto out; | 1841 | goto out; |
| 1841 | 1842 | ||
| 1842 | icmph = icmp6_hdr(skb); | 1843 | icmph = skb_header_pointer(skb, skb_transport_offset(skb), |
| 1844 | sizeof(_icmph), &_icmph); | ||
| 1845 | if (!icmph) | ||
| 1846 | goto out; | ||
| 1847 | |||
| 1843 | if (icmph->icmp6_type != ICMPV6_DEST_UNREACH && | 1848 | if (icmph->icmp6_type != ICMPV6_DEST_UNREACH && |
| 1844 | icmph->icmp6_type != ICMPV6_PKT_TOOBIG && | 1849 | icmph->icmp6_type != ICMPV6_PKT_TOOBIG && |
| 1845 | icmph->icmp6_type != ICMPV6_TIME_EXCEED && | 1850 | icmph->icmp6_type != ICMPV6_TIME_EXCEED && |
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index eea1d8611b20..13b38ad0fa4a 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c | |||
| @@ -547,7 +547,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) | |||
| 547 | rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd, | 547 | rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd, |
| 548 | ic->i_send_cq, ic->i_recv_cq); | 548 | ic->i_send_cq, ic->i_recv_cq); |
| 549 | 549 | ||
| 550 | return ret; | 550 | goto out; |
| 551 | 551 | ||
| 552 | sends_out: | 552 | sends_out: |
| 553 | vfree(ic->i_sends); | 553 | vfree(ic->i_sends); |
| @@ -572,6 +572,7 @@ send_cq_out: | |||
| 572 | ic->i_send_cq = NULL; | 572 | ic->i_send_cq = NULL; |
| 573 | rds_ibdev_out: | 573 | rds_ibdev_out: |
| 574 | rds_ib_remove_conn(rds_ibdev, conn); | 574 | rds_ib_remove_conn(rds_ibdev, conn); |
| 575 | out: | ||
| 575 | rds_ib_dev_put(rds_ibdev); | 576 | rds_ib_dev_put(rds_ibdev); |
| 576 | 577 | ||
| 577 | return ret; | 578 | return ret; |
diff --git a/net/rds/recv.c b/net/rds/recv.c index de50e2126e40..dc67458b52f0 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c | |||
| @@ -558,6 +558,7 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg, | |||
| 558 | struct rds_cmsg_rx_trace t; | 558 | struct rds_cmsg_rx_trace t; |
| 559 | int i, j; | 559 | int i, j; |
| 560 | 560 | ||
| 561 | memset(&t, 0, sizeof(t)); | ||
| 561 | inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock(); | 562 | inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock(); |
| 562 | t.rx_traces = rs->rs_rx_traces; | 563 | t.rx_traces = rs->rs_rx_traces; |
| 563 | for (i = 0; i < rs->rs_rx_traces; i++) { | 564 | for (i = 0; i < rs->rs_rx_traces; i++) { |
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index a366e4c9413a..4808713c73b9 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c | |||
| @@ -128,6 +128,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f) | |||
| 128 | return f->next == &detached; | 128 | return f->next == &detached; |
| 129 | } | 129 | } |
| 130 | 130 | ||
| 131 | static bool fq_flow_is_throttled(const struct fq_flow *f) | ||
| 132 | { | ||
| 133 | return f->next == &throttled; | ||
| 134 | } | ||
| 135 | |||
| 136 | static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) | ||
| 137 | { | ||
| 138 | if (head->first) | ||
| 139 | head->last->next = flow; | ||
| 140 | else | ||
| 141 | head->first = flow; | ||
| 142 | head->last = flow; | ||
| 143 | flow->next = NULL; | ||
| 144 | } | ||
| 145 | |||
| 146 | static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f) | ||
| 147 | { | ||
| 148 | rb_erase(&f->rate_node, &q->delayed); | ||
| 149 | q->throttled_flows--; | ||
| 150 | fq_flow_add_tail(&q->old_flows, f); | ||
| 151 | } | ||
| 152 | |||
| 131 | static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) | 153 | static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) |
| 132 | { | 154 | { |
| 133 | struct rb_node **p = &q->delayed.rb_node, *parent = NULL; | 155 | struct rb_node **p = &q->delayed.rb_node, *parent = NULL; |
| @@ -155,15 +177,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) | |||
| 155 | 177 | ||
| 156 | static struct kmem_cache *fq_flow_cachep __read_mostly; | 178 | static struct kmem_cache *fq_flow_cachep __read_mostly; |
| 157 | 179 | ||
| 158 | static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) | ||
| 159 | { | ||
| 160 | if (head->first) | ||
| 161 | head->last->next = flow; | ||
| 162 | else | ||
| 163 | head->first = flow; | ||
| 164 | head->last = flow; | ||
| 165 | flow->next = NULL; | ||
| 166 | } | ||
| 167 | 180 | ||
| 168 | /* limit number of collected flows per round */ | 181 | /* limit number of collected flows per round */ |
| 169 | #define FQ_GC_MAX 8 | 182 | #define FQ_GC_MAX 8 |
| @@ -267,6 +280,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) | |||
| 267 | f->socket_hash != sk->sk_hash)) { | 280 | f->socket_hash != sk->sk_hash)) { |
| 268 | f->credit = q->initial_quantum; | 281 | f->credit = q->initial_quantum; |
| 269 | f->socket_hash = sk->sk_hash; | 282 | f->socket_hash = sk->sk_hash; |
| 283 | if (fq_flow_is_throttled(f)) | ||
| 284 | fq_flow_unset_throttled(q, f); | ||
| 270 | f->time_next_packet = 0ULL; | 285 | f->time_next_packet = 0ULL; |
| 271 | } | 286 | } |
| 272 | return f; | 287 | return f; |
| @@ -438,9 +453,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now) | |||
| 438 | q->time_next_delayed_flow = f->time_next_packet; | 453 | q->time_next_delayed_flow = f->time_next_packet; |
| 439 | break; | 454 | break; |
| 440 | } | 455 | } |
| 441 | rb_erase(p, &q->delayed); | 456 | fq_flow_unset_throttled(q, f); |
| 442 | q->throttled_flows--; | ||
| 443 | fq_flow_add_tail(&q->old_flows, f); | ||
| 444 | } | 457 | } |
| 445 | } | 458 | } |
| 446 | 459 | ||
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index 23ebc5318edc..eb93ffe2408b 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c | |||
| @@ -217,7 +217,7 @@ new_skb: | |||
| 217 | skb_pull(chunk->skb, sizeof(*ch)); | 217 | skb_pull(chunk->skb, sizeof(*ch)); |
| 218 | chunk->subh.v = NULL; /* Subheader is no longer valid. */ | 218 | chunk->subh.v = NULL; /* Subheader is no longer valid. */ |
| 219 | 219 | ||
| 220 | if (chunk->chunk_end + sizeof(*ch) < skb_tail_pointer(chunk->skb)) { | 220 | if (chunk->chunk_end + sizeof(*ch) <= skb_tail_pointer(chunk->skb)) { |
| 221 | /* This is not a singleton */ | 221 | /* This is not a singleton */ |
| 222 | chunk->singleton = 0; | 222 | chunk->singleton = 0; |
| 223 | } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { | 223 | } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 2e3f7b75a8ec..42247110d842 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
| @@ -895,6 +895,9 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1, | |||
| 895 | if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2)) | 895 | if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2)) |
| 896 | return 1; | 896 | return 1; |
| 897 | 897 | ||
| 898 | if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET) | ||
| 899 | return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr; | ||
| 900 | |||
| 898 | return __sctp_v6_cmp_addr(addr1, addr2); | 901 | return __sctp_v6_cmp_addr(addr1, addr2); |
| 899 | } | 902 | } |
| 900 | 903 | ||
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index dd0594a10961..28c070e187c2 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
| @@ -1794,6 +1794,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_a( | |||
| 1794 | GFP_ATOMIC)) | 1794 | GFP_ATOMIC)) |
| 1795 | goto nomem; | 1795 | goto nomem; |
| 1796 | 1796 | ||
| 1797 | if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC)) | ||
| 1798 | goto nomem; | ||
| 1799 | |||
| 1797 | /* Make sure no new addresses are being added during the | 1800 | /* Make sure no new addresses are being added during the |
| 1798 | * restart. Though this is a pretty complicated attack | 1801 | * restart. Though this is a pretty complicated attack |
| 1799 | * since you'd have to get inside the cookie. | 1802 | * since you'd have to get inside the cookie. |
| @@ -1906,6 +1909,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_b( | |||
| 1906 | GFP_ATOMIC)) | 1909 | GFP_ATOMIC)) |
| 1907 | goto nomem; | 1910 | goto nomem; |
| 1908 | 1911 | ||
| 1912 | if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC)) | ||
| 1913 | goto nomem; | ||
| 1914 | |||
| 1909 | /* Update the content of current association. */ | 1915 | /* Update the content of current association. */ |
| 1910 | sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); | 1916 | sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); |
| 1911 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | 1917 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, |
| @@ -2050,7 +2056,7 @@ static enum sctp_disposition sctp_sf_do_dupcook_d( | |||
| 2050 | } | 2056 | } |
| 2051 | } | 2057 | } |
| 2052 | 2058 | ||
| 2053 | repl = sctp_make_cookie_ack(new_asoc, chunk); | 2059 | repl = sctp_make_cookie_ack(asoc, chunk); |
| 2054 | if (!repl) | 2060 | if (!repl) |
| 2055 | goto nomem; | 2061 | goto nomem; |
| 2056 | 2062 | ||
diff --git a/net/sctp/stream.c b/net/sctp/stream.c index f799043abec9..f1f1d1b232ba 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c | |||
| @@ -240,6 +240,8 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new) | |||
| 240 | 240 | ||
| 241 | new->out = NULL; | 241 | new->out = NULL; |
| 242 | new->in = NULL; | 242 | new->in = NULL; |
| 243 | new->outcnt = 0; | ||
| 244 | new->incnt = 0; | ||
| 243 | } | 245 | } |
| 244 | 246 | ||
| 245 | static int sctp_send_reconf(struct sctp_association *asoc, | 247 | static int sctp_send_reconf(struct sctp_association *asoc, |
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index f5d4b69dbabc..544bab42f925 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
| @@ -292,6 +292,17 @@ static void smc_copy_sock_settings_to_smc(struct smc_sock *smc) | |||
| 292 | smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC); | 292 | smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC); |
| 293 | } | 293 | } |
| 294 | 294 | ||
| 295 | /* register a new rmb */ | ||
| 296 | static int smc_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc) | ||
| 297 | { | ||
| 298 | /* register memory region for new rmb */ | ||
| 299 | if (smc_wr_reg_send(link, rmb_desc->mr_rx[SMC_SINGLE_LINK])) { | ||
| 300 | rmb_desc->regerr = 1; | ||
| 301 | return -EFAULT; | ||
| 302 | } | ||
| 303 | return 0; | ||
| 304 | } | ||
| 305 | |||
| 295 | static int smc_clnt_conf_first_link(struct smc_sock *smc) | 306 | static int smc_clnt_conf_first_link(struct smc_sock *smc) |
| 296 | { | 307 | { |
| 297 | struct smc_link_group *lgr = smc->conn.lgr; | 308 | struct smc_link_group *lgr = smc->conn.lgr; |
| @@ -321,9 +332,7 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc) | |||
| 321 | 332 | ||
| 322 | smc_wr_remember_qp_attr(link); | 333 | smc_wr_remember_qp_attr(link); |
| 323 | 334 | ||
| 324 | rc = smc_wr_reg_send(link, | 335 | if (smc_reg_rmb(link, smc->conn.rmb_desc)) |
| 325 | smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]); | ||
| 326 | if (rc) | ||
| 327 | return SMC_CLC_DECL_INTERR; | 336 | return SMC_CLC_DECL_INTERR; |
| 328 | 337 | ||
| 329 | /* send CONFIRM LINK response over RoCE fabric */ | 338 | /* send CONFIRM LINK response over RoCE fabric */ |
| @@ -473,13 +482,8 @@ static int smc_connect_rdma(struct smc_sock *smc) | |||
| 473 | goto decline_rdma_unlock; | 482 | goto decline_rdma_unlock; |
| 474 | } | 483 | } |
| 475 | } else { | 484 | } else { |
| 476 | struct smc_buf_desc *buf_desc = smc->conn.rmb_desc; | 485 | if (!smc->conn.rmb_desc->reused) { |
| 477 | 486 | if (smc_reg_rmb(link, smc->conn.rmb_desc)) { | |
| 478 | if (!buf_desc->reused) { | ||
| 479 | /* register memory region for new rmb */ | ||
| 480 | rc = smc_wr_reg_send(link, | ||
| 481 | buf_desc->mr_rx[SMC_SINGLE_LINK]); | ||
| 482 | if (rc) { | ||
| 483 | reason_code = SMC_CLC_DECL_INTERR; | 487 | reason_code = SMC_CLC_DECL_INTERR; |
| 484 | goto decline_rdma_unlock; | 488 | goto decline_rdma_unlock; |
| 485 | } | 489 | } |
| @@ -719,9 +723,7 @@ static int smc_serv_conf_first_link(struct smc_sock *smc) | |||
| 719 | 723 | ||
| 720 | link = &lgr->lnk[SMC_SINGLE_LINK]; | 724 | link = &lgr->lnk[SMC_SINGLE_LINK]; |
| 721 | 725 | ||
| 722 | rc = smc_wr_reg_send(link, | 726 | if (smc_reg_rmb(link, smc->conn.rmb_desc)) |
| 723 | smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]); | ||
| 724 | if (rc) | ||
| 725 | return SMC_CLC_DECL_INTERR; | 727 | return SMC_CLC_DECL_INTERR; |
| 726 | 728 | ||
| 727 | /* send CONFIRM LINK request to client over the RoCE fabric */ | 729 | /* send CONFIRM LINK request to client over the RoCE fabric */ |
| @@ -854,13 +856,8 @@ static void smc_listen_work(struct work_struct *work) | |||
| 854 | smc_rx_init(new_smc); | 856 | smc_rx_init(new_smc); |
| 855 | 857 | ||
| 856 | if (local_contact != SMC_FIRST_CONTACT) { | 858 | if (local_contact != SMC_FIRST_CONTACT) { |
| 857 | struct smc_buf_desc *buf_desc = new_smc->conn.rmb_desc; | 859 | if (!new_smc->conn.rmb_desc->reused) { |
| 858 | 860 | if (smc_reg_rmb(link, new_smc->conn.rmb_desc)) { | |
| 859 | if (!buf_desc->reused) { | ||
| 860 | /* register memory region for new rmb */ | ||
| 861 | rc = smc_wr_reg_send(link, | ||
| 862 | buf_desc->mr_rx[SMC_SINGLE_LINK]); | ||
| 863 | if (rc) { | ||
| 864 | reason_code = SMC_CLC_DECL_INTERR; | 861 | reason_code = SMC_CLC_DECL_INTERR; |
| 865 | goto decline_rdma_unlock; | 862 | goto decline_rdma_unlock; |
| 866 | } | 863 | } |
| @@ -978,10 +975,6 @@ static void smc_tcp_listen_work(struct work_struct *work) | |||
| 978 | } | 975 | } |
| 979 | 976 | ||
| 980 | out: | 977 | out: |
| 981 | if (lsmc->clcsock) { | ||
| 982 | sock_release(lsmc->clcsock); | ||
| 983 | lsmc->clcsock = NULL; | ||
| 984 | } | ||
| 985 | release_sock(lsk); | 978 | release_sock(lsk); |
| 986 | sock_put(&lsmc->sk); /* sock_hold in smc_listen */ | 979 | sock_put(&lsmc->sk); /* sock_hold in smc_listen */ |
| 987 | } | 980 | } |
| @@ -1170,13 +1163,15 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, | |||
| 1170 | /* delegate to CLC child sock */ | 1163 | /* delegate to CLC child sock */ |
| 1171 | release_sock(sk); | 1164 | release_sock(sk); |
| 1172 | mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); | 1165 | mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); |
| 1173 | /* if non-blocking connect finished ... */ | ||
| 1174 | lock_sock(sk); | 1166 | lock_sock(sk); |
| 1175 | if ((sk->sk_state == SMC_INIT) && (mask & EPOLLOUT)) { | 1167 | sk->sk_err = smc->clcsock->sk->sk_err; |
| 1176 | sk->sk_err = smc->clcsock->sk->sk_err; | 1168 | if (sk->sk_err) { |
| 1177 | if (sk->sk_err) { | 1169 | mask |= EPOLLERR; |
| 1178 | mask |= EPOLLERR; | 1170 | } else { |
| 1179 | } else { | 1171 | /* if non-blocking connect finished ... */ |
| 1172 | if (sk->sk_state == SMC_INIT && | ||
| 1173 | mask & EPOLLOUT && | ||
| 1174 | smc->clcsock->sk->sk_state != TCP_CLOSE) { | ||
| 1180 | rc = smc_connect_rdma(smc); | 1175 | rc = smc_connect_rdma(smc); |
| 1181 | if (rc < 0) | 1176 | if (rc < 0) |
| 1182 | mask |= EPOLLERR; | 1177 | mask |= EPOLLERR; |
| @@ -1320,8 +1315,11 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page, | |||
| 1320 | 1315 | ||
| 1321 | smc = smc_sk(sk); | 1316 | smc = smc_sk(sk); |
| 1322 | lock_sock(sk); | 1317 | lock_sock(sk); |
| 1323 | if (sk->sk_state != SMC_ACTIVE) | 1318 | if (sk->sk_state != SMC_ACTIVE) { |
| 1319 | release_sock(sk); | ||
| 1324 | goto out; | 1320 | goto out; |
| 1321 | } | ||
| 1322 | release_sock(sk); | ||
| 1325 | if (smc->use_fallback) | 1323 | if (smc->use_fallback) |
| 1326 | rc = kernel_sendpage(smc->clcsock, page, offset, | 1324 | rc = kernel_sendpage(smc->clcsock, page, offset, |
| 1327 | size, flags); | 1325 | size, flags); |
| @@ -1329,7 +1327,6 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page, | |||
| 1329 | rc = sock_no_sendpage(sock, page, offset, size, flags); | 1327 | rc = sock_no_sendpage(sock, page, offset, size, flags); |
| 1330 | 1328 | ||
| 1331 | out: | 1329 | out: |
| 1332 | release_sock(sk); | ||
| 1333 | return rc; | 1330 | return rc; |
| 1334 | } | 1331 | } |
| 1335 | 1332 | ||
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index f44f6803f7ff..d4bd01bb44e1 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c | |||
| @@ -32,6 +32,9 @@ | |||
| 32 | 32 | ||
| 33 | static u32 smc_lgr_num; /* unique link group number */ | 33 | static u32 smc_lgr_num; /* unique link group number */ |
| 34 | 34 | ||
| 35 | static void smc_buf_free(struct smc_buf_desc *buf_desc, struct smc_link *lnk, | ||
| 36 | bool is_rmb); | ||
| 37 | |||
| 35 | static void smc_lgr_schedule_free_work(struct smc_link_group *lgr) | 38 | static void smc_lgr_schedule_free_work(struct smc_link_group *lgr) |
| 36 | { | 39 | { |
| 37 | /* client link group creation always follows the server link group | 40 | /* client link group creation always follows the server link group |
| @@ -234,9 +237,22 @@ static void smc_buf_unuse(struct smc_connection *conn) | |||
| 234 | conn->sndbuf_size = 0; | 237 | conn->sndbuf_size = 0; |
| 235 | } | 238 | } |
| 236 | if (conn->rmb_desc) { | 239 | if (conn->rmb_desc) { |
| 237 | conn->rmb_desc->reused = true; | 240 | if (!conn->rmb_desc->regerr) { |
| 238 | conn->rmb_desc->used = 0; | 241 | conn->rmb_desc->reused = 1; |
| 239 | conn->rmbe_size = 0; | 242 | conn->rmb_desc->used = 0; |
| 243 | conn->rmbe_size = 0; | ||
| 244 | } else { | ||
| 245 | /* buf registration failed, reuse not possible */ | ||
| 246 | struct smc_link_group *lgr = conn->lgr; | ||
| 247 | struct smc_link *lnk; | ||
| 248 | |||
| 249 | write_lock_bh(&lgr->rmbs_lock); | ||
| 250 | list_del(&conn->rmb_desc->list); | ||
| 251 | write_unlock_bh(&lgr->rmbs_lock); | ||
| 252 | |||
| 253 | lnk = &lgr->lnk[SMC_SINGLE_LINK]; | ||
| 254 | smc_buf_free(conn->rmb_desc, lnk, true); | ||
| 255 | } | ||
| 240 | } | 256 | } |
| 241 | } | 257 | } |
| 242 | 258 | ||
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 07e2a393e6d9..5dfcb15d529f 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h | |||
| @@ -123,7 +123,8 @@ struct smc_buf_desc { | |||
| 123 | */ | 123 | */ |
| 124 | u32 order; /* allocation order */ | 124 | u32 order; /* allocation order */ |
| 125 | u32 used; /* currently used / unused */ | 125 | u32 used; /* currently used / unused */ |
| 126 | bool reused; /* new created / reused */ | 126 | u8 reused : 1; /* new created / reused */ |
| 127 | u8 regerr : 1; /* err during registration */ | ||
| 127 | }; | 128 | }; |
| 128 | 129 | ||
| 129 | struct smc_rtoken { /* address/key of remote RMB */ | 130 | struct smc_rtoken { /* address/key of remote RMB */ |
diff --git a/net/tipc/node.c b/net/tipc/node.c index 6f98b56dd48e..baaf93f12cbd 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
| @@ -2244,7 +2244,7 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 2244 | 2244 | ||
| 2245 | rtnl_lock(); | 2245 | rtnl_lock(); |
| 2246 | for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) { | 2246 | for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) { |
| 2247 | err = __tipc_nl_add_monitor(net, &msg, prev_bearer); | 2247 | err = __tipc_nl_add_monitor(net, &msg, bearer_id); |
| 2248 | if (err) | 2248 | if (err) |
| 2249 | break; | 2249 | break; |
| 2250 | } | 2250 | } |
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 0d379970960e..cc03e00785c7 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c | |||
| @@ -114,6 +114,7 @@ int tls_push_sg(struct sock *sk, | |||
| 114 | size = sg->length - offset; | 114 | size = sg->length - offset; |
| 115 | offset += sg->offset; | 115 | offset += sg->offset; |
| 116 | 116 | ||
| 117 | ctx->in_tcp_sendpages = true; | ||
| 117 | while (1) { | 118 | while (1) { |
| 118 | if (sg_is_last(sg)) | 119 | if (sg_is_last(sg)) |
| 119 | sendpage_flags = flags; | 120 | sendpage_flags = flags; |
| @@ -148,6 +149,8 @@ retry: | |||
| 148 | } | 149 | } |
| 149 | 150 | ||
| 150 | clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags); | 151 | clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags); |
| 152 | ctx->in_tcp_sendpages = false; | ||
| 153 | ctx->sk_write_space(sk); | ||
| 151 | 154 | ||
| 152 | return 0; | 155 | return 0; |
| 153 | } | 156 | } |
| @@ -217,6 +220,10 @@ static void tls_write_space(struct sock *sk) | |||
| 217 | { | 220 | { |
| 218 | struct tls_context *ctx = tls_get_ctx(sk); | 221 | struct tls_context *ctx = tls_get_ctx(sk); |
| 219 | 222 | ||
| 223 | /* We are already sending pages, ignore notification */ | ||
| 224 | if (ctx->in_tcp_sendpages) | ||
| 225 | return; | ||
| 226 | |||
| 220 | if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { | 227 | if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { |
| 221 | gfp_t sk_allocation = sk->sk_allocation; | 228 | gfp_t sk_allocation = sk->sk_allocation; |
| 222 | int rc; | 229 | int rc; |
diff --git a/samples/sockmap/Makefile b/samples/sockmap/Makefile index 9bf2881bd11b..fa53f4d77834 100644 --- a/samples/sockmap/Makefile +++ b/samples/sockmap/Makefile | |||
| @@ -65,11 +65,14 @@ $(src)/*.c: verify_target_bpf | |||
| 65 | # asm/sysreg.h - inline assembly used by it is incompatible with llvm. | 65 | # asm/sysreg.h - inline assembly used by it is incompatible with llvm. |
| 66 | # But, there is no easy way to fix it, so just exclude it since it is | 66 | # But, there is no easy way to fix it, so just exclude it since it is |
| 67 | # useless for BPF samples. | 67 | # useless for BPF samples. |
| 68 | # | ||
| 69 | # -target bpf option required with SK_MSG programs, this is to ensure | ||
| 70 | # reading 'void *' data types for data and data_end are __u64 reads. | ||
| 68 | $(obj)/%.o: $(src)/%.c | 71 | $(obj)/%.o: $(src)/%.c |
| 69 | $(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \ | 72 | $(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \ |
| 70 | -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \ | 73 | -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \ |
| 71 | -Wno-compare-distinct-pointer-types \ | 74 | -Wno-compare-distinct-pointer-types \ |
| 72 | -Wno-gnu-variable-sized-type-not-at-end \ | 75 | -Wno-gnu-variable-sized-type-not-at-end \ |
| 73 | -Wno-address-of-packed-member -Wno-tautological-compare \ | 76 | -Wno-address-of-packed-member -Wno-tautological-compare \ |
| 74 | -Wno-unknown-warning-option \ | 77 | -Wno-unknown-warning-option -O2 -target bpf \ |
| 75 | -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ | 78 | -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ |
diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile index 1ea545965ee3..53b60ad452f5 100644 --- a/tools/bpf/Makefile +++ b/tools/bpf/Makefile | |||
| @@ -76,6 +76,8 @@ $(OUTPUT)bpf_asm: $(OUTPUT)bpf_asm.o $(OUTPUT)bpf_exp.yacc.o $(OUTPUT)bpf_exp.le | |||
| 76 | $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $^ | 76 | $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $^ |
| 77 | 77 | ||
| 78 | $(OUTPUT)bpf_exp.lex.c: $(OUTPUT)bpf_exp.yacc.c | 78 | $(OUTPUT)bpf_exp.lex.c: $(OUTPUT)bpf_exp.yacc.c |
| 79 | $(OUTPUT)bpf_exp.yacc.o: $(OUTPUT)bpf_exp.yacc.c | ||
| 80 | $(OUTPUT)bpf_exp.lex.o: $(OUTPUT)bpf_exp.lex.c | ||
| 79 | 81 | ||
| 80 | clean: bpftool_clean | 82 | clean: bpftool_clean |
| 81 | $(call QUIET_CLEAN, bpf-progs) | 83 | $(call QUIET_CLEAN, bpf-progs) |
diff --git a/tools/bpf/bpf_dbg.c b/tools/bpf/bpf_dbg.c index 4f254bcc4423..61b9aa5d6415 100644 --- a/tools/bpf/bpf_dbg.c +++ b/tools/bpf/bpf_dbg.c | |||
| @@ -1063,7 +1063,7 @@ static int cmd_load_pcap(char *file) | |||
| 1063 | 1063 | ||
| 1064 | static int cmd_load(char *arg) | 1064 | static int cmd_load(char *arg) |
| 1065 | { | 1065 | { |
| 1066 | char *subcmd, *cont, *tmp = strdup(arg); | 1066 | char *subcmd, *cont = NULL, *tmp = strdup(arg); |
| 1067 | int ret = CMD_OK; | 1067 | int ret = CMD_OK; |
| 1068 | 1068 | ||
| 1069 | subcmd = strtok_r(tmp, " ", &cont); | 1069 | subcmd = strtok_r(tmp, " ", &cont); |
| @@ -1073,7 +1073,10 @@ static int cmd_load(char *arg) | |||
| 1073 | bpf_reset(); | 1073 | bpf_reset(); |
| 1074 | bpf_reset_breakpoints(); | 1074 | bpf_reset_breakpoints(); |
| 1075 | 1075 | ||
| 1076 | ret = cmd_load_bpf(cont); | 1076 | if (!cont) |
| 1077 | ret = CMD_ERR; | ||
| 1078 | else | ||
| 1079 | ret = cmd_load_bpf(cont); | ||
| 1077 | } else if (matches(subcmd, "pcap") == 0) { | 1080 | } else if (matches(subcmd, "pcap") == 0) { |
| 1078 | ret = cmd_load_pcap(cont); | 1081 | ret = cmd_load_pcap(cont); |
| 1079 | } else { | 1082 | } else { |
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index faadbe233966..4123d0ab90ba 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c | |||
| @@ -1108,7 +1108,7 @@ static void test_stacktrace_build_id(void) | |||
| 1108 | 1108 | ||
| 1109 | assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null") | 1109 | assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null") |
| 1110 | == 0); | 1110 | == 0); |
| 1111 | assert(system("./urandom_read if=/dev/urandom of=/dev/zero count=4 2> /dev/null") == 0); | 1111 | assert(system("./urandom_read") == 0); |
| 1112 | /* disable stack trace collection */ | 1112 | /* disable stack trace collection */ |
| 1113 | key = 0; | 1113 | key = 0; |
| 1114 | val = 1; | 1114 | val = 1; |
| @@ -1158,7 +1158,7 @@ static void test_stacktrace_build_id(void) | |||
| 1158 | } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); | 1158 | } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); |
| 1159 | 1159 | ||
| 1160 | CHECK(build_id_matches < 1, "build id match", | 1160 | CHECK(build_id_matches < 1, "build id match", |
| 1161 | "Didn't find expected build ID from the map"); | 1161 | "Didn't find expected build ID from the map\n"); |
| 1162 | 1162 | ||
| 1163 | disable_pmu: | 1163 | disable_pmu: |
| 1164 | ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); | 1164 | ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); |
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 8f1e13d2e547..daf5effec3f0 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile | |||
| @@ -5,7 +5,8 @@ CFLAGS = -Wall -Wl,--no-as-needed -O2 -g | |||
| 5 | CFLAGS += -I../../../../usr/include/ | 5 | CFLAGS += -I../../../../usr/include/ |
| 6 | 6 | ||
| 7 | TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh | 7 | TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh |
| 8 | TEST_PROGS += fib_tests.sh fib-onlink-tests.sh in_netns.sh pmtu.sh | 8 | TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh |
| 9 | TEST_GEN_PROGS_EXTENDED := in_netns.sh | ||
| 9 | TEST_GEN_FILES = socket | 10 | TEST_GEN_FILES = socket |
| 10 | TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy | 11 | TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy |
| 11 | TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa | 12 | TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa |
