aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-10-12 03:01:59 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-10-12 03:01:59 -0400
commit90ad18418c2d3db23ee827cdd74fed2ca9b70a18 (patch)
treed495ed813ebf8acd4dec5ad1efe50da07268b112
parent0778a9f2dd924c3af41971ba40eec44793aea531 (diff)
parent6b9bab550cac108d86c731c207e3e74ea10eb638 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
David writes: "Networking 1) RXRPC receive path fixes from David Howells. 2) Re-export __skb_recv_udp(), from Jiri Kosina. 3) Fix refcounting in u32 classificer, from Al Viro. 4) Userspace netlink ABI fixes from Eugene Syromiatnikov. 5) Don't double iounmap on rmmod in ena driver, from Arthur Kiyanovski. 6) Fix devlink string attribute handling, we must pull a copy into a kernel buffer if the lifetime extends past the netlink request. From Moshe Shemesh. 7) Fix hangs in RDS, from Ka-Cheong Poon. 8) Fix recursive locking lockdep warnings in tipc, from Ying Xue. 9) Clear RX irq correctly in socionext, from Ilias Apalodimas. 10) bcm_sf2 fixes from Florian Fainelli." * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (38 commits) net: dsa: bcm_sf2: Call setup during switch resume net: dsa: bcm_sf2: Fix unbind ordering net: phy: sfp: remove sfp_mutex's definition r8169: set RX_MULTI_EN bit in RxConfig for 8168F-family chips net: socionext: clear rx irq correctly net/mlx4_core: Fix warnings during boot on driverinit param set failures tipc: eliminate possible recursive locking detected by LOCKDEP selftests: udpgso_bench.sh explicitly requires bash selftests: rtnetlink.sh explicitly requires bash. qmi_wwan: Added support for Gemalto's Cinterion ALASxx WWAN interface tipc: queue socket protocol error messages into socket receive buffer tipc: set link tolerance correctly in broadcast link net: ipv4: don't let PMTU updates increase route MTU net: ipv4: update fnhe_pmtu when first hop's MTU changes net/ipv6: stop leaking percpu memory in fib6 info rds: RDS (tcp) hangs on sendto() to unresponding address net: make skb_partial_csum_set() more robust against overflows devlink: Add helper function for safely copy string param devlink: Fix param cmode driverinit for string type devlink: Fix param set handling for string type ...
-rw-r--r--drivers/net/dsa/bcm_sf2.c14
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c8
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c43
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/ethernet/socionext/netsec.c5
-rw-r--r--drivers/net/phy/sfp.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--include/linux/netdevice.h7
-rw-r--r--include/net/devlink.h12
-rw-r--r--include/net/ip_fib.h1
-rw-r--r--include/trace/events/rxrpc.h1
-rw-r--r--include/uapi/linux/smc_diag.h25
-rw-r--r--include/uapi/linux/udp.h1
-rw-r--r--net/core/dev.c28
-rw-r--r--net/core/devlink.c43
-rw-r--r--net/core/skbuff.c12
-rw-r--r--net/ipv4/fib_frontend.c12
-rw-r--r--net/ipv4/fib_semantics.c50
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/rds/send.c13
-rw-r--r--net/rxrpc/ar-internal.h23
-rw-r--r--net/rxrpc/call_accept.c27
-rw-r--r--net/rxrpc/call_object.c5
-rw-r--r--net/rxrpc/conn_client.c10
-rw-r--r--net/rxrpc/conn_event.c26
-rw-r--r--net/rxrpc/input.c253
-rw-r--r--net/rxrpc/local_object.c30
-rw-r--r--net/rxrpc/peer_event.c5
-rw-r--r--net/rxrpc/peer_object.c29
-rw-r--r--net/sched/cls_u32.c10
-rw-r--r--net/tipc/link.c27
-rw-r--r--net/tipc/socket.c14
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh2
-rwxr-xr-xtools/testing/selftests/net/udpgso_bench.sh2
37 files changed, 493 insertions, 285 deletions
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index e0066adcd2f3..fc8b48adf38b 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -703,7 +703,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
703static int bcm_sf2_sw_resume(struct dsa_switch *ds) 703static int bcm_sf2_sw_resume(struct dsa_switch *ds)
704{ 704{
705 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 705 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
706 unsigned int port;
707 int ret; 706 int ret;
708 707
709 ret = bcm_sf2_sw_rst(priv); 708 ret = bcm_sf2_sw_rst(priv);
@@ -715,14 +714,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
715 if (priv->hw_params.num_gphy == 1) 714 if (priv->hw_params.num_gphy == 1)
716 bcm_sf2_gphy_enable_set(ds, true); 715 bcm_sf2_gphy_enable_set(ds, true);
717 716
718 for (port = 0; port < DSA_MAX_PORTS; port++) { 717 ds->ops->setup(ds);
719 if (dsa_is_user_port(ds, port))
720 bcm_sf2_port_setup(ds, port, NULL);
721 else if (dsa_is_cpu_port(ds, port))
722 bcm_sf2_imp_setup(ds, port);
723 }
724
725 bcm_sf2_enable_acb(ds);
726 718
727 return 0; 719 return 0;
728} 720}
@@ -1173,10 +1165,10 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
1173{ 1165{
1174 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1166 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1175 1167
1176 /* Disable all ports and interrupts */
1177 priv->wol_ports_mask = 0; 1168 priv->wol_ports_mask = 0;
1178 bcm_sf2_sw_suspend(priv->dev->ds);
1179 dsa_unregister_switch(priv->dev->ds); 1169 dsa_unregister_switch(priv->dev->ds);
1170 /* Disable all ports and interrupts */
1171 bcm_sf2_sw_suspend(priv->dev->ds);
1180 bcm_sf2_mdio_unregister(priv); 1172 bcm_sf2_mdio_unregister(priv);
1181 1173
1182 return 0; 1174 return 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 1c682b76190f..2b3ff0c20155 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -245,11 +245,11 @@ static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
245 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> 245 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
246 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT; 246 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
247 ena_rx_ctx->l3_csum_err = 247 ena_rx_ctx->l3_csum_err =
248 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> 248 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
249 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT; 249 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
250 ena_rx_ctx->l4_csum_err = 250 ena_rx_ctx->l4_csum_err =
251 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> 251 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
252 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT; 252 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
253 ena_rx_ctx->hash = cdesc->hash; 253 ena_rx_ctx->hash = cdesc->hash;
254 ena_rx_ctx->frag = 254 ena_rx_ctx->frag =
255 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> 255 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 25621a218f20..d906293ce07d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1575,8 +1575,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
1575 if (rc) 1575 if (rc)
1576 return rc; 1576 return rc;
1577 1577
1578 ena_init_napi(adapter);
1579
1580 ena_change_mtu(adapter->netdev, adapter->netdev->mtu); 1578 ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
1581 1579
1582 ena_refill_all_rx_bufs(adapter); 1580 ena_refill_all_rx_bufs(adapter);
@@ -1730,6 +1728,13 @@ static int ena_up(struct ena_adapter *adapter)
1730 1728
1731 ena_setup_io_intr(adapter); 1729 ena_setup_io_intr(adapter);
1732 1730
1731 /* napi poll functions should be initialized before running
1732 * request_irq(), to handle a rare condition where there is a pending
1733 * interrupt, causing the ISR to fire immediately while the poll
1734 * function wasn't set yet, causing a null dereference
1735 */
1736 ena_init_napi(adapter);
1737
1733 rc = ena_request_io_irq(adapter); 1738 rc = ena_request_io_irq(adapter);
1734 if (rc) 1739 if (rc)
1735 goto err_req_irq; 1740 goto err_req_irq;
@@ -2619,7 +2624,11 @@ err_disable_msix:
2619 ena_free_mgmnt_irq(adapter); 2624 ena_free_mgmnt_irq(adapter);
2620 ena_disable_msix(adapter); 2625 ena_disable_msix(adapter);
2621err_device_destroy: 2626err_device_destroy:
2627 ena_com_abort_admin_commands(ena_dev);
2628 ena_com_wait_for_abort_completion(ena_dev);
2622 ena_com_admin_destroy(ena_dev); 2629 ena_com_admin_destroy(ena_dev);
2630 ena_com_mmio_reg_read_request_destroy(ena_dev);
2631 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
2623err: 2632err:
2624 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 2633 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2625 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); 2634 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
@@ -3099,15 +3108,8 @@ err_rss_init:
3099 3108
3100static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) 3109static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3101{ 3110{
3102 int release_bars; 3111 int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3103
3104 if (ena_dev->mem_bar)
3105 devm_iounmap(&pdev->dev, ena_dev->mem_bar);
3106
3107 if (ena_dev->reg_bar)
3108 devm_iounmap(&pdev->dev, ena_dev->reg_bar);
3109 3112
3110 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3111 pci_release_selected_regions(pdev, release_bars); 3113 pci_release_selected_regions(pdev, release_bars);
3112} 3114}
3113 3115
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d2d59444f562..6a046030e873 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -260,47 +260,34 @@ static const struct devlink_param mlx4_devlink_params[] = {
260 NULL, NULL, NULL), 260 NULL, NULL, NULL),
261}; 261};
262 262
263static void mlx4_devlink_set_init_value(struct devlink *devlink, u32 param_id,
264 union devlink_param_value init_val)
265{
266 struct mlx4_priv *priv = devlink_priv(devlink);
267 struct mlx4_dev *dev = &priv->dev;
268 int err;
269
270 err = devlink_param_driverinit_value_set(devlink, param_id, init_val);
271 if (err)
272 mlx4_warn(dev,
273 "devlink set parameter %u value failed (err = %d)",
274 param_id, err);
275}
276
277static void mlx4_devlink_set_params_init_values(struct devlink *devlink) 263static void mlx4_devlink_set_params_init_values(struct devlink *devlink)
278{ 264{
279 union devlink_param_value value; 265 union devlink_param_value value;
280 266
281 value.vbool = !!mlx4_internal_err_reset; 267 value.vbool = !!mlx4_internal_err_reset;
282 mlx4_devlink_set_init_value(devlink, 268 devlink_param_driverinit_value_set(devlink,
283 DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, 269 DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
284 value); 270 value);
285 271
286 value.vu32 = 1UL << log_num_mac; 272 value.vu32 = 1UL << log_num_mac;
287 mlx4_devlink_set_init_value(devlink, 273 devlink_param_driverinit_value_set(devlink,
288 DEVLINK_PARAM_GENERIC_ID_MAX_MACS, value); 274 DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
275 value);
289 276
290 value.vbool = enable_64b_cqe_eqe; 277 value.vbool = enable_64b_cqe_eqe;
291 mlx4_devlink_set_init_value(devlink, 278 devlink_param_driverinit_value_set(devlink,
292 MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, 279 MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
293 value); 280 value);
294 281
295 value.vbool = enable_4k_uar; 282 value.vbool = enable_4k_uar;
296 mlx4_devlink_set_init_value(devlink, 283 devlink_param_driverinit_value_set(devlink,
297 MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, 284 MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
298 value); 285 value);
299 286
300 value.vbool = false; 287 value.vbool = false;
301 mlx4_devlink_set_init_value(devlink, 288 devlink_param_driverinit_value_set(devlink,
302 DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, 289 DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
303 value); 290 value);
304} 291}
305 292
306static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev, 293static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 9a5e2969df61..3a5e6160bf0d 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4282,8 +4282,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
4282 RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); 4282 RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
4283 break; 4283 break;
4284 case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: 4284 case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
4285 case RTL_GIGA_MAC_VER_34: 4285 case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
4286 case RTL_GIGA_MAC_VER_35: 4286 case RTL_GIGA_MAC_VER_38:
4287 RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); 4287 RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4288 break; 4288 break;
4289 case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: 4289 case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 7aa5ebb6766c..4289ccb26e4e 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -735,8 +735,11 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
735 u16 idx = dring->tail; 735 u16 idx = dring->tail;
736 struct netsec_de *de = dring->vaddr + (DESC_SZ * idx); 736 struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
737 737
738 if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) 738 if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
739 /* reading the register clears the irq */
740 netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
739 break; 741 break;
742 }
740 743
741 /* This barrier is needed to keep us from reading 744 /* This barrier is needed to keep us from reading
742 * any other fields out of the netsec_de until we have 745 * any other fields out of the netsec_de until we have
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 6e13b8832bc7..fd8bb998ae52 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -163,8 +163,6 @@ static const enum gpiod_flags gpio_flags[] = {
163/* Give this long for the PHY to reset. */ 163/* Give this long for the PHY to reset. */
164#define T_PHY_RESET_MS 50 164#define T_PHY_RESET_MS 50
165 165
166static DEFINE_MUTEX(sfp_mutex);
167
168struct sff_data { 166struct sff_data {
169 unsigned int gpios; 167 unsigned int gpios;
170 bool (*module_supported)(const struct sfp_eeprom_id *id); 168 bool (*module_supported)(const struct sfp_eeprom_id *id);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 533b6fb8d923..72a55b6b4211 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1241,6 +1241,7 @@ static const struct usb_device_id products[] = {
1241 {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ 1241 {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
1242 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ 1242 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
1243 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ 1243 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
1244 {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */
1244 {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */ 1245 {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
1245 {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */ 1246 {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
1246 {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/ 1247 {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c7861e4b402c..d837dad24b4c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2458,6 +2458,13 @@ struct netdev_notifier_info {
2458 struct netlink_ext_ack *extack; 2458 struct netlink_ext_ack *extack;
2459}; 2459};
2460 2460
2461struct netdev_notifier_info_ext {
2462 struct netdev_notifier_info info; /* must be first */
2463 union {
2464 u32 mtu;
2465 } ext;
2466};
2467
2461struct netdev_notifier_change_info { 2468struct netdev_notifier_change_info {
2462 struct netdev_notifier_info info; /* must be first */ 2469 struct netdev_notifier_info info; /* must be first */
2463 unsigned int flags_changed; 2470 unsigned int flags_changed;
diff --git a/include/net/devlink.h b/include/net/devlink.h
index b9b89d6604d4..99efc156a309 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -298,7 +298,7 @@ struct devlink_resource {
298 298
299#define DEVLINK_RESOURCE_ID_PARENT_TOP 0 299#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
300 300
301#define DEVLINK_PARAM_MAX_STRING_VALUE 32 301#define __DEVLINK_PARAM_MAX_STRING_VALUE 32
302enum devlink_param_type { 302enum devlink_param_type {
303 DEVLINK_PARAM_TYPE_U8, 303 DEVLINK_PARAM_TYPE_U8,
304 DEVLINK_PARAM_TYPE_U16, 304 DEVLINK_PARAM_TYPE_U16,
@@ -311,7 +311,7 @@ union devlink_param_value {
311 u8 vu8; 311 u8 vu8;
312 u16 vu16; 312 u16 vu16;
313 u32 vu32; 313 u32 vu32;
314 const char *vstr; 314 char vstr[__DEVLINK_PARAM_MAX_STRING_VALUE];
315 bool vbool; 315 bool vbool;
316}; 316};
317 317
@@ -553,6 +553,8 @@ int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
553int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id, 553int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
554 union devlink_param_value init_val); 554 union devlink_param_value init_val);
555void devlink_param_value_changed(struct devlink *devlink, u32 param_id); 555void devlink_param_value_changed(struct devlink *devlink, u32 param_id);
556void devlink_param_value_str_fill(union devlink_param_value *dst_val,
557 const char *src);
556struct devlink_region *devlink_region_create(struct devlink *devlink, 558struct devlink_region *devlink_region_create(struct devlink *devlink,
557 const char *region_name, 559 const char *region_name,
558 u32 region_max_snapshots, 560 u32 region_max_snapshots,
@@ -789,6 +791,12 @@ devlink_param_value_changed(struct devlink *devlink, u32 param_id)
789{ 791{
790} 792}
791 793
794static inline void
795devlink_param_value_str_fill(union devlink_param_value *dst_val,
796 const char *src)
797{
798}
799
792static inline struct devlink_region * 800static inline struct devlink_region *
793devlink_region_create(struct devlink *devlink, 801devlink_region_create(struct devlink *devlink,
794 const char *region_name, 802 const char *region_name,
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 69c91d1934c1..c9b7b136939d 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -394,6 +394,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev);
394int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); 394int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
395int fib_sync_down_addr(struct net_device *dev, __be32 local); 395int fib_sync_down_addr(struct net_device *dev, __be32 local);
396int fib_sync_up(struct net_device *dev, unsigned int nh_flags); 396int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
397void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
397 398
398#ifdef CONFIG_IP_ROUTE_MULTIPATH 399#ifdef CONFIG_IP_ROUTE_MULTIPATH
399int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, 400int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 837393fa897b..573d5b901fb1 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -931,6 +931,7 @@ TRACE_EVENT(rxrpc_tx_packet,
931 TP_fast_assign( 931 TP_fast_assign(
932 __entry->call = call_id; 932 __entry->call = call_id;
933 memcpy(&__entry->whdr, whdr, sizeof(__entry->whdr)); 933 memcpy(&__entry->whdr, whdr, sizeof(__entry->whdr));
934 __entry->where = where;
934 ), 935 ),
935 936
936 TP_printk("c=%08x %08x:%08x:%08x:%04x %08x %08x %02x %02x %s %s", 937 TP_printk("c=%08x %08x:%08x:%08x:%04x %08x %08x %02x %02x %s %s",
diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h
index ac9e8c96d9bd..8cb3a6fef553 100644
--- a/include/uapi/linux/smc_diag.h
+++ b/include/uapi/linux/smc_diag.h
@@ -18,14 +18,17 @@ struct smc_diag_req {
18 * on the internal clcsock, and more SMC-related socket data 18 * on the internal clcsock, and more SMC-related socket data
19 */ 19 */
20struct smc_diag_msg { 20struct smc_diag_msg {
21 __u8 diag_family; 21 __u8 diag_family;
22 __u8 diag_state; 22 __u8 diag_state;
23 __u8 diag_mode; 23 union {
24 __u8 diag_shutdown; 24 __u8 diag_mode;
25 __u8 diag_fallback; /* the old name of the field */
26 };
27 __u8 diag_shutdown;
25 struct inet_diag_sockid id; 28 struct inet_diag_sockid id;
26 29
27 __u32 diag_uid; 30 __u32 diag_uid;
28 __u64 diag_inode; 31 __aligned_u64 diag_inode;
29}; 32};
30 33
31/* Mode of a connection */ 34/* Mode of a connection */
@@ -99,11 +102,11 @@ struct smc_diag_fallback {
99}; 102};
100 103
101struct smcd_diag_dmbinfo { /* SMC-D Socket internals */ 104struct smcd_diag_dmbinfo { /* SMC-D Socket internals */
102 __u32 linkid; /* Link identifier */ 105 __u32 linkid; /* Link identifier */
103 __u64 peer_gid; /* Peer GID */ 106 __aligned_u64 peer_gid; /* Peer GID */
104 __u64 my_gid; /* My GID */ 107 __aligned_u64 my_gid; /* My GID */
105 __u64 token; /* Token of DMB */ 108 __aligned_u64 token; /* Token of DMB */
106 __u64 peer_token; /* Token of remote DMBE */ 109 __aligned_u64 peer_token; /* Token of remote DMBE */
107}; 110};
108 111
109#endif /* _UAPI_SMC_DIAG_H_ */ 112#endif /* _UAPI_SMC_DIAG_H_ */
diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h
index 09d00f8c442b..09502de447f5 100644
--- a/include/uapi/linux/udp.h
+++ b/include/uapi/linux/udp.h
@@ -40,5 +40,6 @@ struct udphdr {
40#define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */ 40#define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */
41#define UDP_ENCAP_GTP0 4 /* GSM TS 09.60 */ 41#define UDP_ENCAP_GTP0 4 /* GSM TS 09.60 */
42#define UDP_ENCAP_GTP1U 5 /* 3GPP TS 29.060 */ 42#define UDP_ENCAP_GTP1U 5 /* 3GPP TS 29.060 */
43#define UDP_ENCAP_RXRPC 6
43 44
44#endif /* _UAPI_LINUX_UDP_H */ 45#endif /* _UAPI_LINUX_UDP_H */
diff --git a/net/core/dev.c b/net/core/dev.c
index 82114e1111e6..93243479085f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1752,6 +1752,28 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1752} 1752}
1753EXPORT_SYMBOL(call_netdevice_notifiers); 1753EXPORT_SYMBOL(call_netdevice_notifiers);
1754 1754
1755/**
1756 * call_netdevice_notifiers_mtu - call all network notifier blocks
1757 * @val: value passed unmodified to notifier function
1758 * @dev: net_device pointer passed unmodified to notifier function
1759 * @arg: additional u32 argument passed to the notifier function
1760 *
1761 * Call all network notifier blocks. Parameters and return value
1762 * are as for raw_notifier_call_chain().
1763 */
1764static int call_netdevice_notifiers_mtu(unsigned long val,
1765 struct net_device *dev, u32 arg)
1766{
1767 struct netdev_notifier_info_ext info = {
1768 .info.dev = dev,
1769 .ext.mtu = arg,
1770 };
1771
1772 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
1773
1774 return call_netdevice_notifiers_info(val, &info.info);
1775}
1776
1755#ifdef CONFIG_NET_INGRESS 1777#ifdef CONFIG_NET_INGRESS
1756static DEFINE_STATIC_KEY_FALSE(ingress_needed_key); 1778static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
1757 1779
@@ -7574,14 +7596,16 @@ int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
7574 err = __dev_set_mtu(dev, new_mtu); 7596 err = __dev_set_mtu(dev, new_mtu);
7575 7597
7576 if (!err) { 7598 if (!err) {
7577 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); 7599 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
7600 orig_mtu);
7578 err = notifier_to_errno(err); 7601 err = notifier_to_errno(err);
7579 if (err) { 7602 if (err) {
7580 /* setting mtu back and notifying everyone again, 7603 /* setting mtu back and notifying everyone again,
7581 * so that they have a chance to revert changes. 7604 * so that they have a chance to revert changes.
7582 */ 7605 */
7583 __dev_set_mtu(dev, orig_mtu); 7606 __dev_set_mtu(dev, orig_mtu);
7584 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); 7607 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
7608 new_mtu);
7585 } 7609 }
7586 } 7610 }
7587 return err; 7611 return err;
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 8c0ed225e280..6bc42933be4a 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -2995,6 +2995,8 @@ devlink_param_value_get_from_info(const struct devlink_param *param,
2995 struct genl_info *info, 2995 struct genl_info *info,
2996 union devlink_param_value *value) 2996 union devlink_param_value *value)
2997{ 2997{
2998 int len;
2999
2998 if (param->type != DEVLINK_PARAM_TYPE_BOOL && 3000 if (param->type != DEVLINK_PARAM_TYPE_BOOL &&
2999 !info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) 3001 !info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])
3000 return -EINVAL; 3002 return -EINVAL;
@@ -3010,10 +3012,13 @@ devlink_param_value_get_from_info(const struct devlink_param *param,
3010 value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); 3012 value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
3011 break; 3013 break;
3012 case DEVLINK_PARAM_TYPE_STRING: 3014 case DEVLINK_PARAM_TYPE_STRING:
3013 if (nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) > 3015 len = strnlen(nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]),
3014 DEVLINK_PARAM_MAX_STRING_VALUE) 3016 nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
3017 if (len == nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) ||
3018 len >= __DEVLINK_PARAM_MAX_STRING_VALUE)
3015 return -EINVAL; 3019 return -EINVAL;
3016 value->vstr = nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); 3020 strcpy(value->vstr,
3021 nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
3017 break; 3022 break;
3018 case DEVLINK_PARAM_TYPE_BOOL: 3023 case DEVLINK_PARAM_TYPE_BOOL:
3019 value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ? 3024 value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ?
@@ -3100,7 +3105,10 @@ static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
3100 return -EOPNOTSUPP; 3105 return -EOPNOTSUPP;
3101 3106
3102 if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) { 3107 if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) {
3103 param_item->driverinit_value = value; 3108 if (param->type == DEVLINK_PARAM_TYPE_STRING)
3109 strcpy(param_item->driverinit_value.vstr, value.vstr);
3110 else
3111 param_item->driverinit_value = value;
3104 param_item->driverinit_value_valid = true; 3112 param_item->driverinit_value_valid = true;
3105 } else { 3113 } else {
3106 if (!param->set) 3114 if (!param->set)
@@ -4540,7 +4548,10 @@ int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
4540 DEVLINK_PARAM_CMODE_DRIVERINIT)) 4548 DEVLINK_PARAM_CMODE_DRIVERINIT))
4541 return -EOPNOTSUPP; 4549 return -EOPNOTSUPP;
4542 4550
4543 *init_val = param_item->driverinit_value; 4551 if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
4552 strcpy(init_val->vstr, param_item->driverinit_value.vstr);
4553 else
4554 *init_val = param_item->driverinit_value;
4544 4555
4545 return 0; 4556 return 0;
4546} 4557}
@@ -4571,7 +4582,10 @@ int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
4571 DEVLINK_PARAM_CMODE_DRIVERINIT)) 4582 DEVLINK_PARAM_CMODE_DRIVERINIT))
4572 return -EOPNOTSUPP; 4583 return -EOPNOTSUPP;
4573 4584
4574 param_item->driverinit_value = init_val; 4585 if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
4586 strcpy(param_item->driverinit_value.vstr, init_val.vstr);
4587 else
4588 param_item->driverinit_value = init_val;
4575 param_item->driverinit_value_valid = true; 4589 param_item->driverinit_value_valid = true;
4576 4590
4577 devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW); 4591 devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
@@ -4604,6 +4618,23 @@ void devlink_param_value_changed(struct devlink *devlink, u32 param_id)
4604EXPORT_SYMBOL_GPL(devlink_param_value_changed); 4618EXPORT_SYMBOL_GPL(devlink_param_value_changed);
4605 4619
4606/** 4620/**
4621 * devlink_param_value_str_fill - Safely fill-up the string preventing
4622 * from overflow of the preallocated buffer
4623 *
4624 * @dst_val: destination devlink_param_value
4625 * @src: source buffer
4626 */
4627void devlink_param_value_str_fill(union devlink_param_value *dst_val,
4628 const char *src)
4629{
4630 size_t len;
4631
4632 len = strlcpy(dst_val->vstr, src, __DEVLINK_PARAM_MAX_STRING_VALUE);
4633 WARN_ON(len >= __DEVLINK_PARAM_MAX_STRING_VALUE);
4634}
4635EXPORT_SYMBOL_GPL(devlink_param_value_str_fill);
4636
4637/**
4607 * devlink_region_create - create a new address region 4638 * devlink_region_create - create a new address region
4608 * 4639 *
4609 * @devlink: devlink 4640 * @devlink: devlink
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b2c807f67aba..428094b577fc 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4452,14 +4452,16 @@ EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
4452 */ 4452 */
4453bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 4453bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
4454{ 4454{
4455 if (unlikely(start > skb_headlen(skb)) || 4455 u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
4456 unlikely((int)start + off > skb_headlen(skb) - 2)) { 4456 u32 csum_start = skb_headroom(skb) + (u32)start;
4457 net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", 4457
4458 start, off, skb_headlen(skb)); 4458 if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) {
4459 net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
4460 start, off, skb_headroom(skb), skb_headlen(skb));
4459 return false; 4461 return false;
4460 } 4462 }
4461 skb->ip_summed = CHECKSUM_PARTIAL; 4463 skb->ip_summed = CHECKSUM_PARTIAL;
4462 skb->csum_start = skb_headroom(skb) + start; 4464 skb->csum_start = csum_start;
4463 skb->csum_offset = off; 4465 skb->csum_offset = off;
4464 skb_set_transport_header(skb, start); 4466 skb_set_transport_header(skb, start);
4465 return true; 4467 return true;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 2998b0e47d4b..0113993e9b2c 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1243,7 +1243,8 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
1243static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) 1243static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
1244{ 1244{
1245 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1245 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1246 struct netdev_notifier_changeupper_info *info; 1246 struct netdev_notifier_changeupper_info *upper_info = ptr;
1247 struct netdev_notifier_info_ext *info_ext = ptr;
1247 struct in_device *in_dev; 1248 struct in_device *in_dev;
1248 struct net *net = dev_net(dev); 1249 struct net *net = dev_net(dev);
1249 unsigned int flags; 1250 unsigned int flags;
@@ -1278,16 +1279,19 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
1278 fib_sync_up(dev, RTNH_F_LINKDOWN); 1279 fib_sync_up(dev, RTNH_F_LINKDOWN);
1279 else 1280 else
1280 fib_sync_down_dev(dev, event, false); 1281 fib_sync_down_dev(dev, event, false);
1281 /* fall through */ 1282 rt_cache_flush(net);
1283 break;
1282 case NETDEV_CHANGEMTU: 1284 case NETDEV_CHANGEMTU:
1285 fib_sync_mtu(dev, info_ext->ext.mtu);
1283 rt_cache_flush(net); 1286 rt_cache_flush(net);
1284 break; 1287 break;
1285 case NETDEV_CHANGEUPPER: 1288 case NETDEV_CHANGEUPPER:
1286 info = ptr; 1289 upper_info = ptr;
1287 /* flush all routes if dev is linked to or unlinked from 1290 /* flush all routes if dev is linked to or unlinked from
1288 * an L3 master device (e.g., VRF) 1291 * an L3 master device (e.g., VRF)
1289 */ 1292 */
1290 if (info->upper_dev && netif_is_l3_master(info->upper_dev)) 1293 if (upper_info->upper_dev &&
1294 netif_is_l3_master(upper_info->upper_dev))
1291 fib_disable_ip(dev, NETDEV_DOWN, true); 1295 fib_disable_ip(dev, NETDEV_DOWN, true);
1292 break; 1296 break;
1293 } 1297 }
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index f3c89ccf14c5..446204ca7406 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1470,6 +1470,56 @@ static int call_fib_nh_notifiers(struct fib_nh *fib_nh,
1470 return NOTIFY_DONE; 1470 return NOTIFY_DONE;
1471} 1471}
1472 1472
1473/* Update the PMTU of exceptions when:
1474 * - the new MTU of the first hop becomes smaller than the PMTU
1475 * - the old MTU was the same as the PMTU, and it limited discovery of
1476 * larger MTUs on the path. With that limit raised, we can now
1477 * discover larger MTUs
1478 * A special case is locked exceptions, for which the PMTU is smaller
1479 * than the minimal accepted PMTU:
1480 * - if the new MTU is greater than the PMTU, don't make any change
1481 * - otherwise, unlock and set PMTU
1482 */
1483static void nh_update_mtu(struct fib_nh *nh, u32 new, u32 orig)
1484{
1485 struct fnhe_hash_bucket *bucket;
1486 int i;
1487
1488 bucket = rcu_dereference_protected(nh->nh_exceptions, 1);
1489 if (!bucket)
1490 return;
1491
1492 for (i = 0; i < FNHE_HASH_SIZE; i++) {
1493 struct fib_nh_exception *fnhe;
1494
1495 for (fnhe = rcu_dereference_protected(bucket[i].chain, 1);
1496 fnhe;
1497 fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1)) {
1498 if (fnhe->fnhe_mtu_locked) {
1499 if (new <= fnhe->fnhe_pmtu) {
1500 fnhe->fnhe_pmtu = new;
1501 fnhe->fnhe_mtu_locked = false;
1502 }
1503 } else if (new < fnhe->fnhe_pmtu ||
1504 orig == fnhe->fnhe_pmtu) {
1505 fnhe->fnhe_pmtu = new;
1506 }
1507 }
1508 }
1509}
1510
1511void fib_sync_mtu(struct net_device *dev, u32 orig_mtu)
1512{
1513 unsigned int hash = fib_devindex_hashfn(dev->ifindex);
1514 struct hlist_head *head = &fib_info_devhash[hash];
1515 struct fib_nh *nh;
1516
1517 hlist_for_each_entry(nh, head, nh_hash) {
1518 if (nh->nh_dev == dev)
1519 nh_update_mtu(nh, dev->mtu, orig_mtu);
1520 }
1521}
1522
1473/* Event force Flags Description 1523/* Event force Flags Description
1474 * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host 1524 * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
1475 * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host 1525 * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index b678466da451..8501554e96a4 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1001,21 +1001,22 @@ out: kfree_skb(skb);
1001static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) 1001static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1002{ 1002{
1003 struct dst_entry *dst = &rt->dst; 1003 struct dst_entry *dst = &rt->dst;
1004 u32 old_mtu = ipv4_mtu(dst);
1004 struct fib_result res; 1005 struct fib_result res;
1005 bool lock = false; 1006 bool lock = false;
1006 1007
1007 if (ip_mtu_locked(dst)) 1008 if (ip_mtu_locked(dst))
1008 return; 1009 return;
1009 1010
1010 if (ipv4_mtu(dst) < mtu) 1011 if (old_mtu < mtu)
1011 return; 1012 return;
1012 1013
1013 if (mtu < ip_rt_min_pmtu) { 1014 if (mtu < ip_rt_min_pmtu) {
1014 lock = true; 1015 lock = true;
1015 mtu = ip_rt_min_pmtu; 1016 mtu = min(old_mtu, ip_rt_min_pmtu);
1016 } 1017 }
1017 1018
1018 if (rt->rt_pmtu == mtu && 1019 if (rt->rt_pmtu == mtu && !lock &&
1019 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2)) 1020 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
1020 return; 1021 return;
1021 1022
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 7d69dd6fa7e8..c32a4c16b7ff 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1627,7 +1627,7 @@ busy_check:
1627 *err = error; 1627 *err = error;
1628 return NULL; 1628 return NULL;
1629} 1629}
1630EXPORT_SYMBOL_GPL(__skb_recv_udp); 1630EXPORT_SYMBOL(__skb_recv_udp);
1631 1631
1632/* 1632/*
1633 * This should be easy, if there is something there we 1633 * This should be easy, if there is something there we
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 5516f55e214b..cbe46175bb59 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -196,6 +196,8 @@ void fib6_info_destroy_rcu(struct rcu_head *head)
196 *ppcpu_rt = NULL; 196 *ppcpu_rt = NULL;
197 } 197 }
198 } 198 }
199
200 free_percpu(f6i->rt6i_pcpu);
199 } 201 }
200 202
201 lwtstate_put(f6i->fib6_nh.nh_lwtstate); 203 lwtstate_put(f6i->fib6_nh.nh_lwtstate);
diff --git a/net/rds/send.c b/net/rds/send.c
index 57b3d5a8b2db..fe785ee819dd 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1007,7 +1007,8 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
1007 return ret; 1007 return ret;
1008} 1008}
1009 1009
1010static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn) 1010static int rds_send_mprds_hash(struct rds_sock *rs,
1011 struct rds_connection *conn, int nonblock)
1011{ 1012{
1012 int hash; 1013 int hash;
1013 1014
@@ -1023,10 +1024,16 @@ static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
1023 * used. But if we are interrupted, we have to use the zero 1024 * used. But if we are interrupted, we have to use the zero
1024 * c_path in case the connection ends up being non-MP capable. 1025 * c_path in case the connection ends up being non-MP capable.
1025 */ 1026 */
1026 if (conn->c_npaths == 0) 1027 if (conn->c_npaths == 0) {
1028 /* Cannot wait for the connection be made, so just use
1029 * the base c_path.
1030 */
1031 if (nonblock)
1032 return 0;
1027 if (wait_event_interruptible(conn->c_hs_waitq, 1033 if (wait_event_interruptible(conn->c_hs_waitq,
1028 conn->c_npaths != 0)) 1034 conn->c_npaths != 0))
1029 hash = 0; 1035 hash = 0;
1036 }
1030 if (conn->c_npaths == 1) 1037 if (conn->c_npaths == 1)
1031 hash = 0; 1038 hash = 0;
1032 } 1039 }
@@ -1256,7 +1263,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1256 } 1263 }
1257 1264
1258 if (conn->c_trans->t_mp_capable) 1265 if (conn->c_trans->t_mp_capable)
1259 cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)]; 1266 cpath = &conn->c_path[rds_send_mprds_hash(rs, conn, nonblock)];
1260 else 1267 else
1261 cpath = &conn->c_path[0]; 1268 cpath = &conn->c_path[0];
1262 1269
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index ef9554131434..a6e6cae82c30 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -302,6 +302,7 @@ struct rxrpc_peer {
302 302
303 /* calculated RTT cache */ 303 /* calculated RTT cache */
304#define RXRPC_RTT_CACHE_SIZE 32 304#define RXRPC_RTT_CACHE_SIZE 32
305 spinlock_t rtt_input_lock; /* RTT lock for input routine */
305 ktime_t rtt_last_req; /* Time of last RTT request */ 306 ktime_t rtt_last_req; /* Time of last RTT request */
306 u64 rtt; /* Current RTT estimate (in nS) */ 307 u64 rtt; /* Current RTT estimate (in nS) */
307 u64 rtt_sum; /* Sum of cache contents */ 308 u64 rtt_sum; /* Sum of cache contents */
@@ -442,17 +443,17 @@ struct rxrpc_connection {
442 spinlock_t state_lock; /* state-change lock */ 443 spinlock_t state_lock; /* state-change lock */
443 enum rxrpc_conn_cache_state cache_state; 444 enum rxrpc_conn_cache_state cache_state;
444 enum rxrpc_conn_proto_state state; /* current state of connection */ 445 enum rxrpc_conn_proto_state state; /* current state of connection */
445 u32 local_abort; /* local abort code */ 446 u32 abort_code; /* Abort code of connection abort */
446 u32 remote_abort; /* remote abort code */
447 int debug_id; /* debug ID for printks */ 447 int debug_id; /* debug ID for printks */
448 atomic_t serial; /* packet serial number counter */ 448 atomic_t serial; /* packet serial number counter */
449 unsigned int hi_serial; /* highest serial number received */ 449 unsigned int hi_serial; /* highest serial number received */
450 u32 security_nonce; /* response re-use preventer */ 450 u32 security_nonce; /* response re-use preventer */
451 u16 service_id; /* Service ID, possibly upgraded */ 451 u32 service_id; /* Service ID, possibly upgraded */
452 u8 size_align; /* data size alignment (for security) */ 452 u8 size_align; /* data size alignment (for security) */
453 u8 security_size; /* security header size */ 453 u8 security_size; /* security header size */
454 u8 security_ix; /* security type */ 454 u8 security_ix; /* security type */
455 u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */ 455 u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
456 short error; /* Local error code */
456}; 457};
457 458
458static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp) 459static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
@@ -635,6 +636,8 @@ struct rxrpc_call {
635 bool tx_phase; /* T if transmission phase, F if receive phase */ 636 bool tx_phase; /* T if transmission phase, F if receive phase */
636 u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */ 637 u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */
637 638
639 spinlock_t input_lock; /* Lock for packet input to this call */
640
638 /* receive-phase ACK management */ 641 /* receive-phase ACK management */
639 u8 ackr_reason; /* reason to ACK */ 642 u8 ackr_reason; /* reason to ACK */
640 u16 ackr_skew; /* skew on packet being ACK'd */ 643 u16 ackr_skew; /* skew on packet being ACK'd */
@@ -720,8 +723,6 @@ int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
720void rxrpc_discard_prealloc(struct rxrpc_sock *); 723void rxrpc_discard_prealloc(struct rxrpc_sock *);
721struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *, 724struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
722 struct rxrpc_sock *, 725 struct rxrpc_sock *,
723 struct rxrpc_peer *,
724 struct rxrpc_connection *,
725 struct sk_buff *); 726 struct sk_buff *);
726void rxrpc_accept_incoming_calls(struct rxrpc_local *); 727void rxrpc_accept_incoming_calls(struct rxrpc_local *);
727struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long, 728struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
@@ -891,8 +892,9 @@ extern unsigned long rxrpc_conn_idle_client_fast_expiry;
891extern struct idr rxrpc_client_conn_ids; 892extern struct idr rxrpc_client_conn_ids;
892 893
893void rxrpc_destroy_client_conn_ids(void); 894void rxrpc_destroy_client_conn_ids(void);
894int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *, 895int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *,
895 struct sockaddr_rxrpc *, gfp_t); 896 struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *,
897 gfp_t);
896void rxrpc_expose_client_call(struct rxrpc_call *); 898void rxrpc_expose_client_call(struct rxrpc_call *);
897void rxrpc_disconnect_client_call(struct rxrpc_call *); 899void rxrpc_disconnect_client_call(struct rxrpc_call *);
898void rxrpc_put_client_conn(struct rxrpc_connection *); 900void rxrpc_put_client_conn(struct rxrpc_connection *);
@@ -965,7 +967,7 @@ void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
965/* 967/*
966 * input.c 968 * input.c
967 */ 969 */
968void rxrpc_data_ready(struct sock *); 970int rxrpc_input_packet(struct sock *, struct sk_buff *);
969 971
970/* 972/*
971 * insecure.c 973 * insecure.c
@@ -1045,10 +1047,11 @@ void rxrpc_peer_keepalive_worker(struct work_struct *);
1045 */ 1047 */
1046struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *, 1048struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
1047 const struct sockaddr_rxrpc *); 1049 const struct sockaddr_rxrpc *);
1048struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *, 1050struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *,
1049 struct sockaddr_rxrpc *, gfp_t); 1051 struct sockaddr_rxrpc *, gfp_t);
1050struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t); 1052struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
1051void rxrpc_new_incoming_peer(struct rxrpc_local *, struct rxrpc_peer *); 1053void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *,
1054 struct rxrpc_peer *);
1052void rxrpc_destroy_all_peers(struct rxrpc_net *); 1055void rxrpc_destroy_all_peers(struct rxrpc_net *);
1053struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *); 1056struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
1054struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *); 1057struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 9c7f26d06a52..652e314de38e 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -287,7 +287,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
287 (peer_tail + 1) & 287 (peer_tail + 1) &
288 (RXRPC_BACKLOG_MAX - 1)); 288 (RXRPC_BACKLOG_MAX - 1));
289 289
290 rxrpc_new_incoming_peer(local, peer); 290 rxrpc_new_incoming_peer(rx, local, peer);
291 } 291 }
292 292
293 /* Now allocate and set up the connection */ 293 /* Now allocate and set up the connection */
@@ -333,11 +333,11 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
333 */ 333 */
334struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, 334struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
335 struct rxrpc_sock *rx, 335 struct rxrpc_sock *rx,
336 struct rxrpc_peer *peer,
337 struct rxrpc_connection *conn,
338 struct sk_buff *skb) 336 struct sk_buff *skb)
339{ 337{
340 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 338 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
339 struct rxrpc_connection *conn;
340 struct rxrpc_peer *peer;
341 struct rxrpc_call *call; 341 struct rxrpc_call *call;
342 342
343 _enter(""); 343 _enter("");
@@ -354,6 +354,13 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
354 goto out; 354 goto out;
355 } 355 }
356 356
357 /* The peer, connection and call may all have sprung into existence due
358 * to a duplicate packet being handled on another CPU in parallel, so
359 * we have to recheck the routing. However, we're now holding
360 * rx->incoming_lock, so the values should remain stable.
361 */
362 conn = rxrpc_find_connection_rcu(local, skb, &peer);
363
357 call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb); 364 call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
358 if (!call) { 365 if (!call) {
359 skb->mark = RXRPC_SKB_MARK_REJECT_BUSY; 366 skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
@@ -396,20 +403,22 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
396 403
397 case RXRPC_CONN_SERVICE: 404 case RXRPC_CONN_SERVICE:
398 write_lock(&call->state_lock); 405 write_lock(&call->state_lock);
399 if (rx->discard_new_call) 406 if (call->state < RXRPC_CALL_COMPLETE) {
400 call->state = RXRPC_CALL_SERVER_RECV_REQUEST; 407 if (rx->discard_new_call)
401 else 408 call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
402 call->state = RXRPC_CALL_SERVER_ACCEPTING; 409 else
410 call->state = RXRPC_CALL_SERVER_ACCEPTING;
411 }
403 write_unlock(&call->state_lock); 412 write_unlock(&call->state_lock);
404 break; 413 break;
405 414
406 case RXRPC_CONN_REMOTELY_ABORTED: 415 case RXRPC_CONN_REMOTELY_ABORTED:
407 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, 416 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
408 conn->remote_abort, -ECONNABORTED); 417 conn->abort_code, conn->error);
409 break; 418 break;
410 case RXRPC_CONN_LOCALLY_ABORTED: 419 case RXRPC_CONN_LOCALLY_ABORTED:
411 rxrpc_abort_call("CON", call, sp->hdr.seq, 420 rxrpc_abort_call("CON", call, sp->hdr.seq,
412 conn->local_abort, -ECONNABORTED); 421 conn->abort_code, conn->error);
413 break; 422 break;
414 default: 423 default:
415 BUG(); 424 BUG();
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 799f75b6900d..8f1a8f85b1f9 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -138,6 +138,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
138 init_waitqueue_head(&call->waitq); 138 init_waitqueue_head(&call->waitq);
139 spin_lock_init(&call->lock); 139 spin_lock_init(&call->lock);
140 spin_lock_init(&call->notify_lock); 140 spin_lock_init(&call->notify_lock);
141 spin_lock_init(&call->input_lock);
141 rwlock_init(&call->state_lock); 142 rwlock_init(&call->state_lock);
142 atomic_set(&call->usage, 1); 143 atomic_set(&call->usage, 1);
143 call->debug_id = debug_id; 144 call->debug_id = debug_id;
@@ -287,7 +288,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
287 /* Set up or get a connection record and set the protocol parameters, 288 /* Set up or get a connection record and set the protocol parameters,
288 * including channel number and call ID. 289 * including channel number and call ID.
289 */ 290 */
290 ret = rxrpc_connect_call(call, cp, srx, gfp); 291 ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
291 if (ret < 0) 292 if (ret < 0)
292 goto error; 293 goto error;
293 294
@@ -339,7 +340,7 @@ int rxrpc_retry_client_call(struct rxrpc_sock *rx,
339 /* Set up or get a connection record and set the protocol parameters, 340 /* Set up or get a connection record and set the protocol parameters,
340 * including channel number and call ID. 341 * including channel number and call ID.
341 */ 342 */
342 ret = rxrpc_connect_call(call, cp, srx, gfp); 343 ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
343 if (ret < 0) 344 if (ret < 0)
344 goto error; 345 goto error;
345 346
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 8acf74fe24c0..521189f4b666 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -276,7 +276,8 @@ dont_reuse:
276 * If we return with a connection, the call will be on its waiting list. It's 276 * If we return with a connection, the call will be on its waiting list. It's
277 * left to the caller to assign a channel and wake up the call. 277 * left to the caller to assign a channel and wake up the call.
278 */ 278 */
279static int rxrpc_get_client_conn(struct rxrpc_call *call, 279static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
280 struct rxrpc_call *call,
280 struct rxrpc_conn_parameters *cp, 281 struct rxrpc_conn_parameters *cp,
281 struct sockaddr_rxrpc *srx, 282 struct sockaddr_rxrpc *srx,
282 gfp_t gfp) 283 gfp_t gfp)
@@ -289,7 +290,7 @@ static int rxrpc_get_client_conn(struct rxrpc_call *call,
289 290
290 _enter("{%d,%lx},", call->debug_id, call->user_call_ID); 291 _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
291 292
292 cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp); 293 cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
293 if (!cp->peer) 294 if (!cp->peer)
294 goto error; 295 goto error;
295 296
@@ -683,7 +684,8 @@ out:
683 * find a connection for a call 684 * find a connection for a call
684 * - called in process context with IRQs enabled 685 * - called in process context with IRQs enabled
685 */ 686 */
686int rxrpc_connect_call(struct rxrpc_call *call, 687int rxrpc_connect_call(struct rxrpc_sock *rx,
688 struct rxrpc_call *call,
687 struct rxrpc_conn_parameters *cp, 689 struct rxrpc_conn_parameters *cp,
688 struct sockaddr_rxrpc *srx, 690 struct sockaddr_rxrpc *srx,
689 gfp_t gfp) 691 gfp_t gfp)
@@ -696,7 +698,7 @@ int rxrpc_connect_call(struct rxrpc_call *call,
696 rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper); 698 rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
697 rxrpc_cull_active_client_conns(rxnet); 699 rxrpc_cull_active_client_conns(rxnet);
698 700
699 ret = rxrpc_get_client_conn(call, cp, srx, gfp); 701 ret = rxrpc_get_client_conn(rx, call, cp, srx, gfp);
700 if (ret < 0) 702 if (ret < 0)
701 goto out; 703 goto out;
702 704
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 6df56ce68861..b6fca8ebb117 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -126,7 +126,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
126 126
127 switch (chan->last_type) { 127 switch (chan->last_type) {
128 case RXRPC_PACKET_TYPE_ABORT: 128 case RXRPC_PACKET_TYPE_ABORT:
129 _proto("Tx ABORT %%%u { %d } [re]", serial, conn->local_abort); 129 _proto("Tx ABORT %%%u { %d } [re]", serial, conn->abort_code);
130 break; 130 break;
131 case RXRPC_PACKET_TYPE_ACK: 131 case RXRPC_PACKET_TYPE_ACK:
132 trace_rxrpc_tx_ack(chan->call_debug_id, serial, 132 trace_rxrpc_tx_ack(chan->call_debug_id, serial,
@@ -153,13 +153,12 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
153 * pass a connection-level abort onto all calls on that connection 153 * pass a connection-level abort onto all calls on that connection
154 */ 154 */
155static void rxrpc_abort_calls(struct rxrpc_connection *conn, 155static void rxrpc_abort_calls(struct rxrpc_connection *conn,
156 enum rxrpc_call_completion compl, 156 enum rxrpc_call_completion compl)
157 u32 abort_code, int error)
158{ 157{
159 struct rxrpc_call *call; 158 struct rxrpc_call *call;
160 int i; 159 int i;
161 160
162 _enter("{%d},%x", conn->debug_id, abort_code); 161 _enter("{%d},%x", conn->debug_id, conn->abort_code);
163 162
164 spin_lock(&conn->channel_lock); 163 spin_lock(&conn->channel_lock);
165 164
@@ -172,9 +171,11 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
172 trace_rxrpc_abort(call->debug_id, 171 trace_rxrpc_abort(call->debug_id,
173 "CON", call->cid, 172 "CON", call->cid,
174 call->call_id, 0, 173 call->call_id, 0,
175 abort_code, error); 174 conn->abort_code,
175 conn->error);
176 if (rxrpc_set_call_completion(call, compl, 176 if (rxrpc_set_call_completion(call, compl,
177 abort_code, error)) 177 conn->abort_code,
178 conn->error))
178 rxrpc_notify_socket(call); 179 rxrpc_notify_socket(call);
179 } 180 }
180 } 181 }
@@ -207,10 +208,12 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
207 return 0; 208 return 0;
208 } 209 }
209 210
211 conn->error = error;
212 conn->abort_code = abort_code;
210 conn->state = RXRPC_CONN_LOCALLY_ABORTED; 213 conn->state = RXRPC_CONN_LOCALLY_ABORTED;
211 spin_unlock_bh(&conn->state_lock); 214 spin_unlock_bh(&conn->state_lock);
212 215
213 rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code, error); 216 rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED);
214 217
215 msg.msg_name = &conn->params.peer->srx.transport; 218 msg.msg_name = &conn->params.peer->srx.transport;
216 msg.msg_namelen = conn->params.peer->srx.transport_len; 219 msg.msg_namelen = conn->params.peer->srx.transport_len;
@@ -229,7 +232,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
229 whdr._rsvd = 0; 232 whdr._rsvd = 0;
230 whdr.serviceId = htons(conn->service_id); 233 whdr.serviceId = htons(conn->service_id);
231 234
232 word = htonl(conn->local_abort); 235 word = htonl(conn->abort_code);
233 236
234 iov[0].iov_base = &whdr; 237 iov[0].iov_base = &whdr;
235 iov[0].iov_len = sizeof(whdr); 238 iov[0].iov_len = sizeof(whdr);
@@ -240,7 +243,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
240 243
241 serial = atomic_inc_return(&conn->serial); 244 serial = atomic_inc_return(&conn->serial);
242 whdr.serial = htonl(serial); 245 whdr.serial = htonl(serial);
243 _proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort); 246 _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code);
244 247
245 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); 248 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
246 if (ret < 0) { 249 if (ret < 0) {
@@ -315,9 +318,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
315 abort_code = ntohl(wtmp); 318 abort_code = ntohl(wtmp);
316 _proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code); 319 _proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code);
317 320
321 conn->error = -ECONNABORTED;
322 conn->abort_code = abort_code;
318 conn->state = RXRPC_CONN_REMOTELY_ABORTED; 323 conn->state = RXRPC_CONN_REMOTELY_ABORTED;
319 rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, 324 rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED);
320 abort_code, -ECONNABORTED);
321 return -ECONNABORTED; 325 return -ECONNABORTED;
322 326
323 case RXRPC_PACKET_TYPE_CHALLENGE: 327 case RXRPC_PACKET_TYPE_CHALLENGE:
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 800f5b8a1baa..570b49d2da42 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -216,10 +216,11 @@ static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb,
216/* 216/*
217 * Apply a hard ACK by advancing the Tx window. 217 * Apply a hard ACK by advancing the Tx window.
218 */ 218 */
219static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, 219static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
220 struct rxrpc_ack_summary *summary) 220 struct rxrpc_ack_summary *summary)
221{ 221{
222 struct sk_buff *skb, *list = NULL; 222 struct sk_buff *skb, *list = NULL;
223 bool rot_last = false;
223 int ix; 224 int ix;
224 u8 annotation; 225 u8 annotation;
225 226
@@ -243,15 +244,17 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
243 skb->next = list; 244 skb->next = list;
244 list = skb; 245 list = skb;
245 246
246 if (annotation & RXRPC_TX_ANNO_LAST) 247 if (annotation & RXRPC_TX_ANNO_LAST) {
247 set_bit(RXRPC_CALL_TX_LAST, &call->flags); 248 set_bit(RXRPC_CALL_TX_LAST, &call->flags);
249 rot_last = true;
250 }
248 if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK) 251 if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK)
249 summary->nr_rot_new_acks++; 252 summary->nr_rot_new_acks++;
250 } 253 }
251 254
252 spin_unlock(&call->lock); 255 spin_unlock(&call->lock);
253 256
254 trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ? 257 trace_rxrpc_transmit(call, (rot_last ?
255 rxrpc_transmit_rotate_last : 258 rxrpc_transmit_rotate_last :
256 rxrpc_transmit_rotate)); 259 rxrpc_transmit_rotate));
257 wake_up(&call->waitq); 260 wake_up(&call->waitq);
@@ -262,6 +265,8 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
262 skb->next = NULL; 265 skb->next = NULL;
263 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 266 rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
264 } 267 }
268
269 return rot_last;
265} 270}
266 271
267/* 272/*
@@ -273,23 +278,26 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
273static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, 278static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
274 const char *abort_why) 279 const char *abort_why)
275{ 280{
281 unsigned int state;
276 282
277 ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags)); 283 ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
278 284
279 write_lock(&call->state_lock); 285 write_lock(&call->state_lock);
280 286
281 switch (call->state) { 287 state = call->state;
288 switch (state) {
282 case RXRPC_CALL_CLIENT_SEND_REQUEST: 289 case RXRPC_CALL_CLIENT_SEND_REQUEST:
283 case RXRPC_CALL_CLIENT_AWAIT_REPLY: 290 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
284 if (reply_begun) 291 if (reply_begun)
285 call->state = RXRPC_CALL_CLIENT_RECV_REPLY; 292 call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY;
286 else 293 else
287 call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; 294 call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
288 break; 295 break;
289 296
290 case RXRPC_CALL_SERVER_AWAIT_ACK: 297 case RXRPC_CALL_SERVER_AWAIT_ACK:
291 __rxrpc_call_completed(call); 298 __rxrpc_call_completed(call);
292 rxrpc_notify_socket(call); 299 rxrpc_notify_socket(call);
300 state = call->state;
293 break; 301 break;
294 302
295 default: 303 default:
@@ -297,11 +305,10 @@ static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
297 } 305 }
298 306
299 write_unlock(&call->state_lock); 307 write_unlock(&call->state_lock);
300 if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) { 308 if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
301 trace_rxrpc_transmit(call, rxrpc_transmit_await_reply); 309 trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
302 } else { 310 else
303 trace_rxrpc_transmit(call, rxrpc_transmit_end); 311 trace_rxrpc_transmit(call, rxrpc_transmit_end);
304 }
305 _leave(" = ok"); 312 _leave(" = ok");
306 return true; 313 return true;
307 314
@@ -332,11 +339,11 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
332 trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now); 339 trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
333 } 340 }
334 341
335 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
336 rxrpc_rotate_tx_window(call, top, &summary);
337 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) { 342 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
338 rxrpc_proto_abort("TXL", call, top); 343 if (!rxrpc_rotate_tx_window(call, top, &summary)) {
339 return false; 344 rxrpc_proto_abort("TXL", call, top);
345 return false;
346 }
340 } 347 }
341 if (!rxrpc_end_tx_phase(call, true, "ETD")) 348 if (!rxrpc_end_tx_phase(call, true, "ETD"))
342 return false; 349 return false;
@@ -452,13 +459,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
452 } 459 }
453 } 460 }
454 461
462 spin_lock(&call->input_lock);
463
455 /* Received data implicitly ACKs all of the request packets we sent 464 /* Received data implicitly ACKs all of the request packets we sent
456 * when we're acting as a client. 465 * when we're acting as a client.
457 */ 466 */
458 if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST || 467 if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
459 state == RXRPC_CALL_CLIENT_AWAIT_REPLY) && 468 state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
460 !rxrpc_receiving_reply(call)) 469 !rxrpc_receiving_reply(call))
461 return; 470 goto unlock;
462 471
463 call->ackr_prev_seq = seq; 472 call->ackr_prev_seq = seq;
464 473
@@ -488,12 +497,16 @@ next_subpacket:
488 497
489 if (flags & RXRPC_LAST_PACKET) { 498 if (flags & RXRPC_LAST_PACKET) {
490 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && 499 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
491 seq != call->rx_top) 500 seq != call->rx_top) {
492 return rxrpc_proto_abort("LSN", call, seq); 501 rxrpc_proto_abort("LSN", call, seq);
502 goto unlock;
503 }
493 } else { 504 } else {
494 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && 505 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
495 after_eq(seq, call->rx_top)) 506 after_eq(seq, call->rx_top)) {
496 return rxrpc_proto_abort("LSA", call, seq); 507 rxrpc_proto_abort("LSA", call, seq);
508 goto unlock;
509 }
497 } 510 }
498 511
499 trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation); 512 trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
@@ -560,8 +573,10 @@ next_subpacket:
560skip: 573skip:
561 offset += len; 574 offset += len;
562 if (flags & RXRPC_JUMBO_PACKET) { 575 if (flags & RXRPC_JUMBO_PACKET) {
563 if (skb_copy_bits(skb, offset, &flags, 1) < 0) 576 if (skb_copy_bits(skb, offset, &flags, 1) < 0) {
564 return rxrpc_proto_abort("XJF", call, seq); 577 rxrpc_proto_abort("XJF", call, seq);
578 goto unlock;
579 }
565 offset += sizeof(struct rxrpc_jumbo_header); 580 offset += sizeof(struct rxrpc_jumbo_header);
566 seq++; 581 seq++;
567 serial++; 582 serial++;
@@ -601,6 +616,9 @@ ack:
601 trace_rxrpc_notify_socket(call->debug_id, serial); 616 trace_rxrpc_notify_socket(call->debug_id, serial);
602 rxrpc_notify_socket(call); 617 rxrpc_notify_socket(call);
603 } 618 }
619
620unlock:
621 spin_unlock(&call->input_lock);
604 _leave(" [queued]"); 622 _leave(" [queued]");
605} 623}
606 624
@@ -687,15 +705,14 @@ static void rxrpc_input_ping_response(struct rxrpc_call *call,
687 705
688 ping_time = call->ping_time; 706 ping_time = call->ping_time;
689 smp_rmb(); 707 smp_rmb();
690 ping_serial = call->ping_serial; 708 ping_serial = READ_ONCE(call->ping_serial);
691 709
692 if (orig_serial == call->acks_lost_ping) 710 if (orig_serial == call->acks_lost_ping)
693 rxrpc_input_check_for_lost_ack(call); 711 rxrpc_input_check_for_lost_ack(call);
694 712
695 if (!test_bit(RXRPC_CALL_PINGING, &call->flags) || 713 if (before(orig_serial, ping_serial) ||
696 before(orig_serial, ping_serial)) 714 !test_and_clear_bit(RXRPC_CALL_PINGING, &call->flags))
697 return; 715 return;
698 clear_bit(RXRPC_CALL_PINGING, &call->flags);
699 if (after(orig_serial, ping_serial)) 716 if (after(orig_serial, ping_serial))
700 return; 717 return;
701 718
@@ -861,15 +878,32 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
861 rxrpc_propose_ack_respond_to_ack); 878 rxrpc_propose_ack_respond_to_ack);
862 } 879 }
863 880
881 /* Discard any out-of-order or duplicate ACKs. */
882 if (before_eq(sp->hdr.serial, call->acks_latest))
883 return;
884
885 buf.info.rxMTU = 0;
864 ioffset = offset + nr_acks + 3; 886 ioffset = offset + nr_acks + 3;
865 if (skb->len >= ioffset + sizeof(buf.info)) { 887 if (skb->len >= ioffset + sizeof(buf.info) &&
866 if (skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0) 888 skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
867 return rxrpc_proto_abort("XAI", call, 0); 889 return rxrpc_proto_abort("XAI", call, 0);
890
891 spin_lock(&call->input_lock);
892
893 /* Discard any out-of-order or duplicate ACKs. */
894 if (before_eq(sp->hdr.serial, call->acks_latest))
895 goto out;
896 call->acks_latest_ts = skb->tstamp;
897 call->acks_latest = sp->hdr.serial;
898
899 /* Parse rwind and mtu sizes if provided. */
900 if (buf.info.rxMTU)
868 rxrpc_input_ackinfo(call, skb, &buf.info); 901 rxrpc_input_ackinfo(call, skb, &buf.info);
869 }
870 902
871 if (first_soft_ack == 0) 903 if (first_soft_ack == 0) {
872 return rxrpc_proto_abort("AK0", call, 0); 904 rxrpc_proto_abort("AK0", call, 0);
905 goto out;
906 }
873 907
874 /* Ignore ACKs unless we are or have just been transmitting. */ 908 /* Ignore ACKs unless we are or have just been transmitting. */
875 switch (READ_ONCE(call->state)) { 909 switch (READ_ONCE(call->state)) {
@@ -879,39 +913,35 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
879 case RXRPC_CALL_SERVER_AWAIT_ACK: 913 case RXRPC_CALL_SERVER_AWAIT_ACK:
880 break; 914 break;
881 default: 915 default:
882 return; 916 goto out;
883 }
884
885 /* Discard any out-of-order or duplicate ACKs. */
886 if (before_eq(sp->hdr.serial, call->acks_latest)) {
887 _debug("discard ACK %d <= %d",
888 sp->hdr.serial, call->acks_latest);
889 return;
890 } 917 }
891 call->acks_latest_ts = skb->tstamp;
892 call->acks_latest = sp->hdr.serial;
893 918
894 if (before(hard_ack, call->tx_hard_ack) || 919 if (before(hard_ack, call->tx_hard_ack) ||
895 after(hard_ack, call->tx_top)) 920 after(hard_ack, call->tx_top)) {
896 return rxrpc_proto_abort("AKW", call, 0); 921 rxrpc_proto_abort("AKW", call, 0);
897 if (nr_acks > call->tx_top - hard_ack) 922 goto out;
898 return rxrpc_proto_abort("AKN", call, 0); 923 }
924 if (nr_acks > call->tx_top - hard_ack) {
925 rxrpc_proto_abort("AKN", call, 0);
926 goto out;
927 }
899 928
900 if (after(hard_ack, call->tx_hard_ack)) 929 if (after(hard_ack, call->tx_hard_ack)) {
901 rxrpc_rotate_tx_window(call, hard_ack, &summary); 930 if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
931 rxrpc_end_tx_phase(call, false, "ETA");
932 goto out;
933 }
934 }
902 935
903 if (nr_acks > 0) { 936 if (nr_acks > 0) {
904 if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) 937 if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) {
905 return rxrpc_proto_abort("XSA", call, 0); 938 rxrpc_proto_abort("XSA", call, 0);
939 goto out;
940 }
906 rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks, 941 rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks,
907 &summary); 942 &summary);
908 } 943 }
909 944
910 if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
911 rxrpc_end_tx_phase(call, false, "ETA");
912 return;
913 }
914
915 if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] & 945 if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
916 RXRPC_TX_ANNO_LAST && 946 RXRPC_TX_ANNO_LAST &&
917 summary.nr_acks == call->tx_top - hard_ack && 947 summary.nr_acks == call->tx_top - hard_ack &&
@@ -920,7 +950,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
920 false, true, 950 false, true,
921 rxrpc_propose_ack_ping_for_lost_reply); 951 rxrpc_propose_ack_ping_for_lost_reply);
922 952
923 return rxrpc_congestion_management(call, skb, &summary, acked_serial); 953 rxrpc_congestion_management(call, skb, &summary, acked_serial);
954out:
955 spin_unlock(&call->input_lock);
924} 956}
925 957
926/* 958/*
@@ -933,9 +965,12 @@ static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
933 965
934 _proto("Rx ACKALL %%%u", sp->hdr.serial); 966 _proto("Rx ACKALL %%%u", sp->hdr.serial);
935 967
936 rxrpc_rotate_tx_window(call, call->tx_top, &summary); 968 spin_lock(&call->input_lock);
937 if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) 969
970 if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
938 rxrpc_end_tx_phase(call, false, "ETL"); 971 rxrpc_end_tx_phase(call, false, "ETL");
972
973 spin_unlock(&call->input_lock);
939} 974}
940 975
941/* 976/*
@@ -1018,18 +1053,19 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
1018} 1053}
1019 1054
1020/* 1055/*
1021 * Handle a new call on a channel implicitly completing the preceding call on 1056 * Handle a new service call on a channel implicitly completing the preceding
1022 * that channel. 1057 * call on that channel. This does not apply to client conns.
1023 * 1058 *
1024 * TODO: If callNumber > call_id + 1, renegotiate security. 1059 * TODO: If callNumber > call_id + 1, renegotiate security.
1025 */ 1060 */
1026static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn, 1061static void rxrpc_input_implicit_end_call(struct rxrpc_sock *rx,
1062 struct rxrpc_connection *conn,
1027 struct rxrpc_call *call) 1063 struct rxrpc_call *call)
1028{ 1064{
1029 switch (READ_ONCE(call->state)) { 1065 switch (READ_ONCE(call->state)) {
1030 case RXRPC_CALL_SERVER_AWAIT_ACK: 1066 case RXRPC_CALL_SERVER_AWAIT_ACK:
1031 rxrpc_call_completed(call); 1067 rxrpc_call_completed(call);
1032 break; 1068 /* Fall through */
1033 case RXRPC_CALL_COMPLETE: 1069 case RXRPC_CALL_COMPLETE:
1034 break; 1070 break;
1035 default: 1071 default:
@@ -1037,11 +1073,13 @@ static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
1037 set_bit(RXRPC_CALL_EV_ABORT, &call->events); 1073 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
1038 rxrpc_queue_call(call); 1074 rxrpc_queue_call(call);
1039 } 1075 }
1076 trace_rxrpc_improper_term(call);
1040 break; 1077 break;
1041 } 1078 }
1042 1079
1043 trace_rxrpc_improper_term(call); 1080 spin_lock(&rx->incoming_lock);
1044 __rxrpc_disconnect_call(conn, call); 1081 __rxrpc_disconnect_call(conn, call);
1082 spin_unlock(&rx->incoming_lock);
1045 rxrpc_notify_socket(call); 1083 rxrpc_notify_socket(call);
1046} 1084}
1047 1085
@@ -1120,8 +1158,10 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
1120 * The socket is locked by the caller and this prevents the socket from being 1158 * The socket is locked by the caller and this prevents the socket from being
1121 * shut down and the local endpoint from going away, thus sk_user_data will not 1159 * shut down and the local endpoint from going away, thus sk_user_data will not
1122 * be cleared until this function returns. 1160 * be cleared until this function returns.
1161 *
1162 * Called with the RCU read lock held from the IP layer via UDP.
1123 */ 1163 */
1124void rxrpc_data_ready(struct sock *udp_sk) 1164int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1125{ 1165{
1126 struct rxrpc_connection *conn; 1166 struct rxrpc_connection *conn;
1127 struct rxrpc_channel *chan; 1167 struct rxrpc_channel *chan;
@@ -1130,38 +1170,17 @@ void rxrpc_data_ready(struct sock *udp_sk)
1130 struct rxrpc_local *local = udp_sk->sk_user_data; 1170 struct rxrpc_local *local = udp_sk->sk_user_data;
1131 struct rxrpc_peer *peer = NULL; 1171 struct rxrpc_peer *peer = NULL;
1132 struct rxrpc_sock *rx = NULL; 1172 struct rxrpc_sock *rx = NULL;
1133 struct sk_buff *skb;
1134 unsigned int channel; 1173 unsigned int channel;
1135 int ret, skew = 0; 1174 int skew = 0;
1136 1175
1137 _enter("%p", udp_sk); 1176 _enter("%p", udp_sk);
1138 1177
1139 ASSERT(!irqs_disabled());
1140
1141 skb = skb_recv_udp(udp_sk, 0, 1, &ret);
1142 if (!skb) {
1143 if (ret == -EAGAIN)
1144 return;
1145 _debug("UDP socket error %d", ret);
1146 return;
1147 }
1148
1149 if (skb->tstamp == 0) 1178 if (skb->tstamp == 0)
1150 skb->tstamp = ktime_get_real(); 1179 skb->tstamp = ktime_get_real();
1151 1180
1152 rxrpc_new_skb(skb, rxrpc_skb_rx_received); 1181 rxrpc_new_skb(skb, rxrpc_skb_rx_received);
1153 1182
1154 _net("recv skb %p", skb); 1183 skb_pull(skb, sizeof(struct udphdr));
1155
1156 /* we'll probably need to checksum it (didn't call sock_recvmsg) */
1157 if (skb_checksum_complete(skb)) {
1158 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
1159 __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
1160 _leave(" [CSUM failed]");
1161 return;
1162 }
1163
1164 __UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0);
1165 1184
1166 /* The UDP protocol already released all skb resources; 1185 /* The UDP protocol already released all skb resources;
1167 * we are free to add our own data there. 1186 * we are free to add our own data there.
@@ -1176,11 +1195,13 @@ void rxrpc_data_ready(struct sock *udp_sk)
1176 static int lose; 1195 static int lose;
1177 if ((lose++ & 7) == 7) { 1196 if ((lose++ & 7) == 7) {
1178 trace_rxrpc_rx_lose(sp); 1197 trace_rxrpc_rx_lose(sp);
1179 rxrpc_lose_skb(skb, rxrpc_skb_rx_lost); 1198 rxrpc_free_skb(skb, rxrpc_skb_rx_lost);
1180 return; 1199 return 0;
1181 } 1200 }
1182 } 1201 }
1183 1202
1203 if (skb->tstamp == 0)
1204 skb->tstamp = ktime_get_real();
1184 trace_rxrpc_rx_packet(sp); 1205 trace_rxrpc_rx_packet(sp);
1185 1206
1186 switch (sp->hdr.type) { 1207 switch (sp->hdr.type) {
@@ -1234,8 +1255,6 @@ void rxrpc_data_ready(struct sock *udp_sk)
1234 if (sp->hdr.serviceId == 0) 1255 if (sp->hdr.serviceId == 0)
1235 goto bad_message; 1256 goto bad_message;
1236 1257
1237 rcu_read_lock();
1238
1239 if (rxrpc_to_server(sp)) { 1258 if (rxrpc_to_server(sp)) {
1240 /* Weed out packets to services we're not offering. Packets 1259 /* Weed out packets to services we're not offering. Packets
1241 * that would begin a call are explicitly rejected and the rest 1260 * that would begin a call are explicitly rejected and the rest
@@ -1247,7 +1266,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
1247 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && 1266 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
1248 sp->hdr.seq == 1) 1267 sp->hdr.seq == 1)
1249 goto unsupported_service; 1268 goto unsupported_service;
1250 goto discard_unlock; 1269 goto discard;
1251 } 1270 }
1252 } 1271 }
1253 1272
@@ -1257,17 +1276,23 @@ void rxrpc_data_ready(struct sock *udp_sk)
1257 goto wrong_security; 1276 goto wrong_security;
1258 1277
1259 if (sp->hdr.serviceId != conn->service_id) { 1278 if (sp->hdr.serviceId != conn->service_id) {
1260 if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) || 1279 int old_id;
1261 conn->service_id != conn->params.service_id) 1280
1281 if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags))
1282 goto reupgrade;
1283 old_id = cmpxchg(&conn->service_id, conn->params.service_id,
1284 sp->hdr.serviceId);
1285
1286 if (old_id != conn->params.service_id &&
1287 old_id != sp->hdr.serviceId)
1262 goto reupgrade; 1288 goto reupgrade;
1263 conn->service_id = sp->hdr.serviceId;
1264 } 1289 }
1265 1290
1266 if (sp->hdr.callNumber == 0) { 1291 if (sp->hdr.callNumber == 0) {
1267 /* Connection-level packet */ 1292 /* Connection-level packet */
1268 _debug("CONN %p {%d}", conn, conn->debug_id); 1293 _debug("CONN %p {%d}", conn, conn->debug_id);
1269 rxrpc_post_packet_to_conn(conn, skb); 1294 rxrpc_post_packet_to_conn(conn, skb);
1270 goto out_unlock; 1295 goto out;
1271 } 1296 }
1272 1297
1273 /* Note the serial number skew here */ 1298 /* Note the serial number skew here */
@@ -1286,19 +1311,19 @@ void rxrpc_data_ready(struct sock *udp_sk)
1286 1311
1287 /* Ignore really old calls */ 1312 /* Ignore really old calls */
1288 if (sp->hdr.callNumber < chan->last_call) 1313 if (sp->hdr.callNumber < chan->last_call)
1289 goto discard_unlock; 1314 goto discard;
1290 1315
1291 if (sp->hdr.callNumber == chan->last_call) { 1316 if (sp->hdr.callNumber == chan->last_call) {
1292 if (chan->call || 1317 if (chan->call ||
1293 sp->hdr.type == RXRPC_PACKET_TYPE_ABORT) 1318 sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
1294 goto discard_unlock; 1319 goto discard;
1295 1320
1296 /* For the previous service call, if completed 1321 /* For the previous service call, if completed
1297 * successfully, we discard all further packets. 1322 * successfully, we discard all further packets.
1298 */ 1323 */
1299 if (rxrpc_conn_is_service(conn) && 1324 if (rxrpc_conn_is_service(conn) &&
1300 chan->last_type == RXRPC_PACKET_TYPE_ACK) 1325 chan->last_type == RXRPC_PACKET_TYPE_ACK)
1301 goto discard_unlock; 1326 goto discard;
1302 1327
1303 /* But otherwise we need to retransmit the final packet 1328 /* But otherwise we need to retransmit the final packet
1304 * from data cached in the connection record. 1329 * from data cached in the connection record.
@@ -1309,18 +1334,16 @@ void rxrpc_data_ready(struct sock *udp_sk)
1309 sp->hdr.serial, 1334 sp->hdr.serial,
1310 sp->hdr.flags, 0); 1335 sp->hdr.flags, 0);
1311 rxrpc_post_packet_to_conn(conn, skb); 1336 rxrpc_post_packet_to_conn(conn, skb);
1312 goto out_unlock; 1337 goto out;
1313 } 1338 }
1314 1339
1315 call = rcu_dereference(chan->call); 1340 call = rcu_dereference(chan->call);
1316 1341
1317 if (sp->hdr.callNumber > chan->call_id) { 1342 if (sp->hdr.callNumber > chan->call_id) {
1318 if (rxrpc_to_client(sp)) { 1343 if (rxrpc_to_client(sp))
1319 rcu_read_unlock();
1320 goto reject_packet; 1344 goto reject_packet;
1321 }
1322 if (call) 1345 if (call)
1323 rxrpc_input_implicit_end_call(conn, call); 1346 rxrpc_input_implicit_end_call(rx, conn, call);
1324 call = NULL; 1347 call = NULL;
1325 } 1348 }
1326 1349
@@ -1337,55 +1360,42 @@ void rxrpc_data_ready(struct sock *udp_sk)
1337 if (!call || atomic_read(&call->usage) == 0) { 1360 if (!call || atomic_read(&call->usage) == 0) {
1338 if (rxrpc_to_client(sp) || 1361 if (rxrpc_to_client(sp) ||
1339 sp->hdr.type != RXRPC_PACKET_TYPE_DATA) 1362 sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
1340 goto bad_message_unlock; 1363 goto bad_message;
1341 if (sp->hdr.seq != 1) 1364 if (sp->hdr.seq != 1)
1342 goto discard_unlock; 1365 goto discard;
1343 call = rxrpc_new_incoming_call(local, rx, peer, conn, skb); 1366 call = rxrpc_new_incoming_call(local, rx, skb);
1344 if (!call) { 1367 if (!call)
1345 rcu_read_unlock();
1346 goto reject_packet; 1368 goto reject_packet;
1347 }
1348 rxrpc_send_ping(call, skb, skew); 1369 rxrpc_send_ping(call, skb, skew);
1349 mutex_unlock(&call->user_mutex); 1370 mutex_unlock(&call->user_mutex);
1350 } 1371 }
1351 1372
1352 rxrpc_input_call_packet(call, skb, skew); 1373 rxrpc_input_call_packet(call, skb, skew);
1353 goto discard_unlock; 1374 goto discard;
1354 1375
1355discard_unlock:
1356 rcu_read_unlock();
1357discard: 1376discard:
1358 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 1377 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
1359out: 1378out:
1360 trace_rxrpc_rx_done(0, 0); 1379 trace_rxrpc_rx_done(0, 0);
1361 return; 1380 return 0;
1362
1363out_unlock:
1364 rcu_read_unlock();
1365 goto out;
1366 1381
1367wrong_security: 1382wrong_security:
1368 rcu_read_unlock();
1369 trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1383 trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1370 RXKADINCONSISTENCY, EBADMSG); 1384 RXKADINCONSISTENCY, EBADMSG);
1371 skb->priority = RXKADINCONSISTENCY; 1385 skb->priority = RXKADINCONSISTENCY;
1372 goto post_abort; 1386 goto post_abort;
1373 1387
1374unsupported_service: 1388unsupported_service:
1375 rcu_read_unlock();
1376 trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1389 trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1377 RX_INVALID_OPERATION, EOPNOTSUPP); 1390 RX_INVALID_OPERATION, EOPNOTSUPP);
1378 skb->priority = RX_INVALID_OPERATION; 1391 skb->priority = RX_INVALID_OPERATION;
1379 goto post_abort; 1392 goto post_abort;
1380 1393
1381reupgrade: 1394reupgrade:
1382 rcu_read_unlock();
1383 trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1395 trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1384 RX_PROTOCOL_ERROR, EBADMSG); 1396 RX_PROTOCOL_ERROR, EBADMSG);
1385 goto protocol_error; 1397 goto protocol_error;
1386 1398
1387bad_message_unlock:
1388 rcu_read_unlock();
1389bad_message: 1399bad_message:
1390 trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1400 trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1391 RX_PROTOCOL_ERROR, EBADMSG); 1401 RX_PROTOCOL_ERROR, EBADMSG);
@@ -1397,4 +1407,5 @@ reject_packet:
1397 trace_rxrpc_rx_done(skb->mark, skb->priority); 1407 trace_rxrpc_rx_done(skb->mark, skb->priority);
1398 rxrpc_reject_packet(local, skb); 1408 rxrpc_reject_packet(local, skb);
1399 _leave(" [badmsg]"); 1409 _leave(" [badmsg]");
1410 return 0;
1400} 1411}
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 94d234e9c685..cad0691c2bb4 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -19,6 +19,7 @@
19#include <linux/ip.h> 19#include <linux/ip.h>
20#include <linux/hashtable.h> 20#include <linux/hashtable.h>
21#include <net/sock.h> 21#include <net/sock.h>
22#include <net/udp.h>
22#include <net/af_rxrpc.h> 23#include <net/af_rxrpc.h>
23#include "ar-internal.h" 24#include "ar-internal.h"
24 25
@@ -108,7 +109,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
108 */ 109 */
109static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) 110static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
110{ 111{
111 struct sock *sock; 112 struct sock *usk;
112 int ret, opt; 113 int ret, opt;
113 114
114 _enter("%p{%d,%d}", 115 _enter("%p{%d,%d}",
@@ -122,6 +123,28 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
122 return ret; 123 return ret;
123 } 124 }
124 125
126 /* set the socket up */
127 usk = local->socket->sk;
128 inet_sk(usk)->mc_loop = 0;
129
130 /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
131 inet_inc_convert_csum(usk);
132
133 rcu_assign_sk_user_data(usk, local);
134
135 udp_sk(usk)->encap_type = UDP_ENCAP_RXRPC;
136 udp_sk(usk)->encap_rcv = rxrpc_input_packet;
137 udp_sk(usk)->encap_destroy = NULL;
138 udp_sk(usk)->gro_receive = NULL;
139 udp_sk(usk)->gro_complete = NULL;
140
141 udp_encap_enable();
142#if IS_ENABLED(CONFIG_IPV6)
143 if (local->srx.transport.family == AF_INET6)
144 udpv6_encap_enable();
145#endif
146 usk->sk_error_report = rxrpc_error_report;
147
125 /* if a local address was supplied then bind it */ 148 /* if a local address was supplied then bind it */
126 if (local->srx.transport_len > sizeof(sa_family_t)) { 149 if (local->srx.transport_len > sizeof(sa_family_t)) {
127 _debug("bind"); 150 _debug("bind");
@@ -191,11 +214,6 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
191 BUG(); 214 BUG();
192 } 215 }
193 216
194 /* set the socket up */
195 sock = local->socket->sk;
196 sock->sk_user_data = local;
197 sock->sk_data_ready = rxrpc_data_ready;
198 sock->sk_error_report = rxrpc_error_report;
199 _leave(" = 0"); 217 _leave(" = 0");
200 return 0; 218 return 0;
201 219
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index f3e6fc670da2..05b51bdbdd41 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -301,6 +301,8 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
301 if (rtt < 0) 301 if (rtt < 0)
302 return; 302 return;
303 303
304 spin_lock(&peer->rtt_input_lock);
305
304 /* Replace the oldest datum in the RTT buffer */ 306 /* Replace the oldest datum in the RTT buffer */
305 sum -= peer->rtt_cache[cursor]; 307 sum -= peer->rtt_cache[cursor];
306 sum += rtt; 308 sum += rtt;
@@ -312,6 +314,8 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
312 peer->rtt_usage = usage; 314 peer->rtt_usage = usage;
313 } 315 }
314 316
317 spin_unlock(&peer->rtt_input_lock);
318
315 /* Now recalculate the average */ 319 /* Now recalculate the average */
316 if (usage == RXRPC_RTT_CACHE_SIZE) { 320 if (usage == RXRPC_RTT_CACHE_SIZE) {
317 avg = sum / RXRPC_RTT_CACHE_SIZE; 321 avg = sum / RXRPC_RTT_CACHE_SIZE;
@@ -320,6 +324,7 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
320 do_div(avg, usage); 324 do_div(avg, usage);
321 } 325 }
322 326
327 /* Don't need to update this under lock */
323 peer->rtt = avg; 328 peer->rtt = avg;
324 trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt, 329 trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt,
325 usage, avg); 330 usage, avg);
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 01a9febfa367..5691b7d266ca 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -153,8 +153,10 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
153 * assess the MTU size for the network interface through which this peer is 153 * assess the MTU size for the network interface through which this peer is
154 * reached 154 * reached
155 */ 155 */
156static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer) 156static void rxrpc_assess_MTU_size(struct rxrpc_sock *rx,
157 struct rxrpc_peer *peer)
157{ 158{
159 struct net *net = sock_net(&rx->sk);
158 struct dst_entry *dst; 160 struct dst_entry *dst;
159 struct rtable *rt; 161 struct rtable *rt;
160 struct flowi fl; 162 struct flowi fl;
@@ -169,7 +171,7 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
169 switch (peer->srx.transport.family) { 171 switch (peer->srx.transport.family) {
170 case AF_INET: 172 case AF_INET:
171 rt = ip_route_output_ports( 173 rt = ip_route_output_ports(
172 &init_net, fl4, NULL, 174 net, fl4, NULL,
173 peer->srx.transport.sin.sin_addr.s_addr, 0, 175 peer->srx.transport.sin.sin_addr.s_addr, 0,
174 htons(7000), htons(7001), IPPROTO_UDP, 0, 0); 176 htons(7000), htons(7001), IPPROTO_UDP, 0, 0);
175 if (IS_ERR(rt)) { 177 if (IS_ERR(rt)) {
@@ -188,7 +190,7 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
188 sizeof(struct in6_addr)); 190 sizeof(struct in6_addr));
189 fl6->fl6_dport = htons(7001); 191 fl6->fl6_dport = htons(7001);
190 fl6->fl6_sport = htons(7000); 192 fl6->fl6_sport = htons(7000);
191 dst = ip6_route_output(&init_net, NULL, fl6); 193 dst = ip6_route_output(net, NULL, fl6);
192 if (dst->error) { 194 if (dst->error) {
193 _leave(" [route err %d]", dst->error); 195 _leave(" [route err %d]", dst->error);
194 return; 196 return;
@@ -223,6 +225,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
223 peer->service_conns = RB_ROOT; 225 peer->service_conns = RB_ROOT;
224 seqlock_init(&peer->service_conn_lock); 226 seqlock_init(&peer->service_conn_lock);
225 spin_lock_init(&peer->lock); 227 spin_lock_init(&peer->lock);
228 spin_lock_init(&peer->rtt_input_lock);
226 peer->debug_id = atomic_inc_return(&rxrpc_debug_id); 229 peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
227 230
228 if (RXRPC_TX_SMSS > 2190) 231 if (RXRPC_TX_SMSS > 2190)
@@ -240,10 +243,11 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
240/* 243/*
241 * Initialise peer record. 244 * Initialise peer record.
242 */ 245 */
243static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key) 246static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer,
247 unsigned long hash_key)
244{ 248{
245 peer->hash_key = hash_key; 249 peer->hash_key = hash_key;
246 rxrpc_assess_MTU_size(peer); 250 rxrpc_assess_MTU_size(rx, peer);
247 peer->mtu = peer->if_mtu; 251 peer->mtu = peer->if_mtu;
248 peer->rtt_last_req = ktime_get_real(); 252 peer->rtt_last_req = ktime_get_real();
249 253
@@ -275,7 +279,8 @@ static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key)
275/* 279/*
276 * Set up a new peer. 280 * Set up a new peer.
277 */ 281 */
278static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local, 282static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
283 struct rxrpc_local *local,
279 struct sockaddr_rxrpc *srx, 284 struct sockaddr_rxrpc *srx,
280 unsigned long hash_key, 285 unsigned long hash_key,
281 gfp_t gfp) 286 gfp_t gfp)
@@ -287,7 +292,7 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
287 peer = rxrpc_alloc_peer(local, gfp); 292 peer = rxrpc_alloc_peer(local, gfp);
288 if (peer) { 293 if (peer) {
289 memcpy(&peer->srx, srx, sizeof(*srx)); 294 memcpy(&peer->srx, srx, sizeof(*srx));
290 rxrpc_init_peer(peer, hash_key); 295 rxrpc_init_peer(rx, peer, hash_key);
291 } 296 }
292 297
293 _leave(" = %p", peer); 298 _leave(" = %p", peer);
@@ -299,14 +304,15 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
299 * since we've already done a search in the list from the non-reentrant context 304 * since we've already done a search in the list from the non-reentrant context
300 * (the data_ready handler) that is the only place we can add new peers. 305 * (the data_ready handler) that is the only place we can add new peers.
301 */ 306 */
302void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer) 307void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local,
308 struct rxrpc_peer *peer)
303{ 309{
304 struct rxrpc_net *rxnet = local->rxnet; 310 struct rxrpc_net *rxnet = local->rxnet;
305 unsigned long hash_key; 311 unsigned long hash_key;
306 312
307 hash_key = rxrpc_peer_hash_key(local, &peer->srx); 313 hash_key = rxrpc_peer_hash_key(local, &peer->srx);
308 peer->local = local; 314 peer->local = local;
309 rxrpc_init_peer(peer, hash_key); 315 rxrpc_init_peer(rx, peer, hash_key);
310 316
311 spin_lock(&rxnet->peer_hash_lock); 317 spin_lock(&rxnet->peer_hash_lock);
312 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); 318 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
@@ -317,7 +323,8 @@ void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
317/* 323/*
318 * obtain a remote transport endpoint for the specified address 324 * obtain a remote transport endpoint for the specified address
319 */ 325 */
320struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local, 326struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
327 struct rxrpc_local *local,
321 struct sockaddr_rxrpc *srx, gfp_t gfp) 328 struct sockaddr_rxrpc *srx, gfp_t gfp)
322{ 329{
323 struct rxrpc_peer *peer, *candidate; 330 struct rxrpc_peer *peer, *candidate;
@@ -337,7 +344,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
337 /* The peer is not yet present in hash - create a candidate 344 /* The peer is not yet present in hash - create a candidate
338 * for a new record and then redo the search. 345 * for a new record and then redo the search.
339 */ 346 */
340 candidate = rxrpc_create_peer(local, srx, hash_key, gfp); 347 candidate = rxrpc_create_peer(rx, local, srx, hash_key, gfp);
341 if (!candidate) { 348 if (!candidate) {
342 _leave(" = NULL [nomem]"); 349 _leave(" = NULL [nomem]");
343 return NULL; 350 return NULL;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index f218ccf1e2d9..b2c3406a2cf2 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -398,6 +398,7 @@ static int u32_init(struct tcf_proto *tp)
398 rcu_assign_pointer(tp_c->hlist, root_ht); 398 rcu_assign_pointer(tp_c->hlist, root_ht);
399 root_ht->tp_c = tp_c; 399 root_ht->tp_c = tp_c;
400 400
401 root_ht->refcnt++;
401 rcu_assign_pointer(tp->root, root_ht); 402 rcu_assign_pointer(tp->root, root_ht);
402 tp->data = tp_c; 403 tp->data = tp_c;
403 return 0; 404 return 0;
@@ -610,7 +611,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
610 struct tc_u_hnode __rcu **hn; 611 struct tc_u_hnode __rcu **hn;
611 struct tc_u_hnode *phn; 612 struct tc_u_hnode *phn;
612 613
613 WARN_ON(ht->refcnt); 614 WARN_ON(--ht->refcnt);
614 615
615 u32_clear_hnode(tp, ht, extack); 616 u32_clear_hnode(tp, ht, extack);
616 617
@@ -649,7 +650,7 @@ static void u32_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
649 650
650 WARN_ON(root_ht == NULL); 651 WARN_ON(root_ht == NULL);
651 652
652 if (root_ht && --root_ht->refcnt == 0) 653 if (root_ht && --root_ht->refcnt == 1)
653 u32_destroy_hnode(tp, root_ht, extack); 654 u32_destroy_hnode(tp, root_ht, extack);
654 655
655 if (--tp_c->refcnt == 0) { 656 if (--tp_c->refcnt == 0) {
@@ -698,7 +699,6 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
698 } 699 }
699 700
700 if (ht->refcnt == 1) { 701 if (ht->refcnt == 1) {
701 ht->refcnt--;
702 u32_destroy_hnode(tp, ht, extack); 702 u32_destroy_hnode(tp, ht, extack);
703 } else { 703 } else {
704 NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter"); 704 NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
@@ -708,11 +708,11 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
708out: 708out:
709 *last = true; 709 *last = true;
710 if (root_ht) { 710 if (root_ht) {
711 if (root_ht->refcnt > 1) { 711 if (root_ht->refcnt > 2) {
712 *last = false; 712 *last = false;
713 goto ret; 713 goto ret;
714 } 714 }
715 if (root_ht->refcnt == 1) { 715 if (root_ht->refcnt == 2) {
716 if (!ht_empty(root_ht)) { 716 if (!ht_empty(root_ht)) {
717 *last = false; 717 *last = false;
718 goto ret; 718 goto ret;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index fb886b525d95..f6552e4f4b43 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -477,6 +477,8 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
477 l->in_session = false; 477 l->in_session = false;
478 l->bearer_id = bearer_id; 478 l->bearer_id = bearer_id;
479 l->tolerance = tolerance; 479 l->tolerance = tolerance;
480 if (bc_rcvlink)
481 bc_rcvlink->tolerance = tolerance;
480 l->net_plane = net_plane; 482 l->net_plane = net_plane;
481 l->advertised_mtu = mtu; 483 l->advertised_mtu = mtu;
482 l->mtu = mtu; 484 l->mtu = mtu;
@@ -843,14 +845,21 @@ static void link_prepare_wakeup(struct tipc_link *l)
843 845
844void tipc_link_reset(struct tipc_link *l) 846void tipc_link_reset(struct tipc_link *l)
845{ 847{
848 struct sk_buff_head list;
849
850 __skb_queue_head_init(&list);
851
846 l->in_session = false; 852 l->in_session = false;
847 l->session++; 853 l->session++;
848 l->mtu = l->advertised_mtu; 854 l->mtu = l->advertised_mtu;
855
849 spin_lock_bh(&l->wakeupq.lock); 856 spin_lock_bh(&l->wakeupq.lock);
857 skb_queue_splice_init(&l->wakeupq, &list);
858 spin_unlock_bh(&l->wakeupq.lock);
859
850 spin_lock_bh(&l->inputq->lock); 860 spin_lock_bh(&l->inputq->lock);
851 skb_queue_splice_init(&l->wakeupq, l->inputq); 861 skb_queue_splice_init(&list, l->inputq);
852 spin_unlock_bh(&l->inputq->lock); 862 spin_unlock_bh(&l->inputq->lock);
853 spin_unlock_bh(&l->wakeupq.lock);
854 863
855 __skb_queue_purge(&l->transmq); 864 __skb_queue_purge(&l->transmq);
856 __skb_queue_purge(&l->deferdq); 865 __skb_queue_purge(&l->deferdq);
@@ -1031,7 +1040,7 @@ static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
1031 /* Detect repeated retransmit failures on same packet */ 1040 /* Detect repeated retransmit failures on same packet */
1032 if (r->last_retransm != buf_seqno(skb)) { 1041 if (r->last_retransm != buf_seqno(skb)) {
1033 r->last_retransm = buf_seqno(skb); 1042 r->last_retransm = buf_seqno(skb);
1034 r->stale_limit = jiffies + msecs_to_jiffies(l->tolerance); 1043 r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
1035 } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) { 1044 } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {
1036 link_retransmit_failure(l, skb); 1045 link_retransmit_failure(l, skb);
1037 if (link_is_bc_sndlink(l)) 1046 if (link_is_bc_sndlink(l))
@@ -1576,9 +1585,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1576 strncpy(if_name, data, TIPC_MAX_IF_NAME); 1585 strncpy(if_name, data, TIPC_MAX_IF_NAME);
1577 1586
1578 /* Update own tolerance if peer indicates a non-zero value */ 1587 /* Update own tolerance if peer indicates a non-zero value */
1579 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) 1588 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
1580 l->tolerance = peers_tol; 1589 l->tolerance = peers_tol;
1581 1590 l->bc_rcvlink->tolerance = peers_tol;
1591 }
1582 /* Update own priority if peer's priority is higher */ 1592 /* Update own priority if peer's priority is higher */
1583 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI)) 1593 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1584 l->priority = peers_prio; 1594 l->priority = peers_prio;
@@ -1604,9 +1614,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1604 l->rcv_nxt_state = msg_seqno(hdr) + 1; 1614 l->rcv_nxt_state = msg_seqno(hdr) + 1;
1605 1615
1606 /* Update own tolerance if peer indicates a non-zero value */ 1616 /* Update own tolerance if peer indicates a non-zero value */
1607 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) 1617 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
1608 l->tolerance = peers_tol; 1618 l->tolerance = peers_tol;
1609 1619 l->bc_rcvlink->tolerance = peers_tol;
1620 }
1610 /* Update own prio if peer indicates a different value */ 1621 /* Update own prio if peer indicates a different value */
1611 if ((peers_prio != l->priority) && 1622 if ((peers_prio != l->priority) &&
1612 in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) { 1623 in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
@@ -2223,6 +2234,8 @@ void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2223 struct sk_buff_head *xmitq) 2234 struct sk_buff_head *xmitq)
2224{ 2235{
2225 l->tolerance = tol; 2236 l->tolerance = tol;
2237 if (l->bc_rcvlink)
2238 l->bc_rcvlink->tolerance = tol;
2226 if (link_is_up(l)) 2239 if (link_is_up(l))
2227 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq); 2240 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
2228} 2241}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index b6f99b021d09..49810fdff4c5 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1196,6 +1196,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1196 * @skb: pointer to message buffer. 1196 * @skb: pointer to message buffer.
1197 */ 1197 */
1198static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb, 1198static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1199 struct sk_buff_head *inputq,
1199 struct sk_buff_head *xmitq) 1200 struct sk_buff_head *xmitq)
1200{ 1201{
1201 struct tipc_msg *hdr = buf_msg(skb); 1202 struct tipc_msg *hdr = buf_msg(skb);
@@ -1213,7 +1214,16 @@ static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1213 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk), 1214 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1214 tsk_peer_port(tsk)); 1215 tsk_peer_port(tsk));
1215 sk->sk_state_change(sk); 1216 sk->sk_state_change(sk);
1216 goto exit; 1217
1218 /* State change is ignored if socket already awake,
1219 * - convert msg to abort msg and add to inqueue
1220 */
1221 msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
1222 msg_set_type(hdr, TIPC_CONN_MSG);
1223 msg_set_size(hdr, BASIC_H_SIZE);
1224 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1225 __skb_queue_tail(inputq, skb);
1226 return;
1217 } 1227 }
1218 1228
1219 tsk->probe_unacked = false; 1229 tsk->probe_unacked = false;
@@ -1936,7 +1946,7 @@ static void tipc_sk_proto_rcv(struct sock *sk,
1936 1946
1937 switch (msg_user(hdr)) { 1947 switch (msg_user(hdr)) {
1938 case CONN_MANAGER: 1948 case CONN_MANAGER:
1939 tipc_sk_conn_proto_rcv(tsk, skb, xmitq); 1949 tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
1940 return; 1950 return;
1941 case SOCK_WAKEUP: 1951 case SOCK_WAKEUP:
1942 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0); 1952 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 08c341b49760..e101af52d1d6 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -1,4 +1,4 @@
1#!/bin/sh 1#!/bin/bash
2# 2#
3# This test is for checking rtnetlink callpaths, and get as much coverage as possible. 3# This test is for checking rtnetlink callpaths, and get as much coverage as possible.
4# 4#
diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh
index 850767befa47..99e537ab5ad9 100755
--- a/tools/testing/selftests/net/udpgso_bench.sh
+++ b/tools/testing/selftests/net/udpgso_bench.sh
@@ -1,4 +1,4 @@
1#!/bin/sh 1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0 2# SPDX-License-Identifier: GPL-2.0
3# 3#
4# Run a series of udpgso benchmarks 4# Run a series of udpgso benchmarks