aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-03-04 20:31:39 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-03-04 20:31:39 -0500
commit8d70eeb84ab277377c017af6a21d0a337025dede (patch)
treed6e8a80e1d5e953ab37eef0c7468c77e7735779b
parent2d62e0768d3c28536d4cfe4c40ba1e5e8e442a93 (diff)
parentf78ef7cd9a0686b979679d0de061c6dbfd8d649e (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix double-free in batman-adv, from Sven Eckelmann. 2) Fix packet stats for fast-RX path, from Joannes Berg. 3) Netfilter's ip_route_me_harder() doesn't handle request sockets properly, fix from Florian Westphal. 4) Fix sendmsg deadlock in rxrpc, from David Howells. 5) Add missing RCU locking to transport hashtable scan, from Xin Long. 6) Fix potential packet loss in mlxsw driver, from Ido Schimmel. 7) Fix race in NAPI handling between poll handlers and busy polling, from Eric Dumazet. 8) TX path in vxlan and geneve need proper RCU locking, from Jakub Kicinski. 9) SYN processing in DCCP and TCP need to disable BH, from Eric Dumazet. 10) Properly handle net_enable_timestamp() being invoked from IRQ context, also from Eric Dumazet. 11) Fix crash on device-tree systems in xgene driver, from Alban Bedel. 12) Do not call sk_free() on a locked socket, from Arnaldo Carvalho de Melo. 13) Fix use-after-free in netvsc driver, from Dexuan Cui. 14) Fix max MTU setting in bonding driver, from WANG Cong. 15) xen-netback hash table can be allocated from softirq context, so use GFP_ATOMIC. From Anoob Soman. 16) Fix MAC address change bug in bgmac driver, from Hari Vyas. 17) strparser needs to destroy strp_wq on module exit, from WANG Cong. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (69 commits) strparser: destroy workqueue on module exit sfc: fix IPID endianness in TSOv2 sfc: avoid max() in array size rds: remove unnecessary returned value check rxrpc: Fix potential NULL-pointer exception nfp: correct DMA direction in XDP DMA sync nfp: don't tell FW about the reserved buffer space net: ethernet: bgmac: mac address change bug net: ethernet: bgmac: init sequence bug xen-netback: don't vfree() queues under spinlock xen-netback: keep a local pointer for vif in backend_disconnect() netfilter: nf_tables: don't call nfnetlink_set_err() if nfnetlink_send() fails netfilter: nft_set_rbtree: incorrect assumption on lower interval lookups netfilter: nf_conntrack_sip: fix wrong memory initialisation can: flexcan: fix typo in comment can: usb_8dev: Fix memory leak of priv->cmd_msg_buffer can: gs_usb: fix coding style can: gs_usb: Don't use stack memory for USB transfers ixgbe: Limit use of 2K buffers on architectures with 256B or larger cache lines ixgbe: update the rss key on h/w, when ethtool ask for it ...
-rw-r--r--MAINTAINERS5
-rw-r--r--drivers/net/bonding/bond_main.c1
-rw-r--r--drivers/net/can/flexcan.c2
-rw-r--r--drivers/net/can/usb/gs_usb.c51
-rw-r--r--drivers/net/can/usb/usb_8dev.c9
-rw-r--r--drivers/net/ethernet/amd/declance.c30
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c24
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c27
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h16
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c4
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c30
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c7
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c12
-rw-r--r--drivers/net/ethernet/sgi/meth.c4
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c15
-rw-r--r--drivers/net/usb/asix_devices.c2
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vxlan.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c101
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h19
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c28
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h2
-rw-r--r--drivers/net/xen-netback/hash.c2
-rw-r--r--drivers/net/xen-netback/xenbus.c31
-rw-r--r--include/linux/average.h61
-rw-r--r--include/linux/mlx4/driver.h2
-rw-r--r--include/linux/netdevice.h29
-rw-r--r--include/net/netfilter/nf_tables.h6
-rw-r--r--include/net/sock.h1
-rw-r--r--include/trace/events/rxrpc.h2
-rw-r--r--kernel/bpf/verifier.c2
-rw-r--r--net/batman-adv/fragmentation.c20
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/bridge/br_forward.c3
-rw-r--r--net/bridge/br_vlan.c2
-rw-r--r--net/core/dev.c111
-rw-r--r--net/core/sock.c16
-rw-r--r--net/dccp/input.c10
-rw-r--r--net/dccp/minisocks.c5
-rw-r--r--net/ipv4/fib_frontend.c1
-rw-r--r--net/ipv4/netfilter.c7
-rw-r--r--net/ipv4/tcp.c15
-rw-r--r--net/ipv4/tcp_input.c10
-rw-r--r--net/ipv6/addrconf.c22
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c1
-rw-r--r--net/ipv6/route.c21
-rw-r--r--net/mac80211/agg-rx.c3
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/pm.c1
-rw-r--r--net/mac80211/rx.c31
-rw-r--r--net/mac80211/sta_info.c4
-rw-r--r--net/mac80211/sta_info.h8
-rw-r--r--net/mac80211/status.c3
-rw-r--r--net/netfilter/nf_conntrack_sip.c2
-rw-r--r--net/netfilter/nf_tables_api.c133
-rw-r--r--net/netfilter/nft_set_rbtree.c9
-rw-r--r--net/openvswitch/actions.c3
-rw-r--r--net/openvswitch/conntrack.c1
-rw-r--r--net/packet/af_packet.c8
-rw-r--r--net/rds/ib.c10
-rw-r--r--net/rds/ib_mr.h2
-rw-r--r--net/rds/rds.h2
-rw-r--r--net/rds/tcp.c6
-rw-r--r--net/rds/transport.c4
-rw-r--r--net/rxrpc/af_rxrpc.c12
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/call_accept.c48
-rw-r--r--net/rxrpc/call_object.c18
-rw-r--r--net/rxrpc/input.c1
-rw-r--r--net/rxrpc/recvmsg.c39
-rw-r--r--net/rxrpc/sendmsg.c58
-rw-r--r--net/sctp/input.c3
-rw-r--r--net/strparser/strparser.c1
86 files changed, 895 insertions, 368 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index fa0e8da55f18..c265a5fe4848 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6011,9 +6011,8 @@ F: include/linux/hsi/
6011F: include/uapi/linux/hsi/ 6011F: include/uapi/linux/hsi/
6012 6012
6013HSO 3G MODEM DRIVER 6013HSO 3G MODEM DRIVER
6014M: Jan Dumon <j.dumon@option.com> 6014L: linux-usb@vger.kernel.org
6015W: http://www.pharscape.org 6015S: Orphan
6016S: Maintained
6017F: drivers/net/usb/hso.c 6016F: drivers/net/usb/hso.c
6018 6017
6019HSR NETWORK PROTOCOL 6018HSR NETWORK PROTOCOL
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6321f12630c8..8a4ba8b88e52 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4179,6 +4179,7 @@ void bond_setup(struct net_device *bond_dev)
4179 4179
4180 /* Initialize the device entry points */ 4180 /* Initialize the device entry points */
4181 ether_setup(bond_dev); 4181 ether_setup(bond_dev);
4182 bond_dev->max_mtu = ETH_MAX_MTU;
4182 bond_dev->netdev_ops = &bond_netdev_ops; 4183 bond_dev->netdev_ops = &bond_netdev_ops;
4183 bond_dev->ethtool_ops = &bond_ethtool_ops; 4184 bond_dev->ethtool_ops = &bond_ethtool_ops;
4184 4185
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index ea57fed375c6..13f0f219d8aa 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -196,7 +196,7 @@
196#define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */ 196#define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */
197#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */ 197#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */
198#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */ 198#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */
199#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disble Memory error detection */ 199#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */
200#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ 200#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */
201 201
202/* Structure of the message buffer */ 202/* Structure of the message buffer */
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 77e3cc06a30c..300349fe8dc0 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -258,7 +258,7 @@ static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev)
258 rc = usb_control_msg(interface_to_usbdev(intf), 258 rc = usb_control_msg(interface_to_usbdev(intf),
259 usb_sndctrlpipe(interface_to_usbdev(intf), 0), 259 usb_sndctrlpipe(interface_to_usbdev(intf), 0),
260 GS_USB_BREQ_MODE, 260 GS_USB_BREQ_MODE,
261 USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 261 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
262 gsdev->channel, 262 gsdev->channel,
263 0, 263 0,
264 dm, 264 dm,
@@ -432,7 +432,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
432 rc = usb_control_msg(interface_to_usbdev(intf), 432 rc = usb_control_msg(interface_to_usbdev(intf),
433 usb_sndctrlpipe(interface_to_usbdev(intf), 0), 433 usb_sndctrlpipe(interface_to_usbdev(intf), 0),
434 GS_USB_BREQ_BITTIMING, 434 GS_USB_BREQ_BITTIMING,
435 USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 435 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
436 dev->channel, 436 dev->channel,
437 0, 437 0,
438 dbt, 438 dbt,
@@ -546,7 +546,6 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
546 hf, 546 hf,
547 urb->transfer_dma); 547 urb->transfer_dma);
548 548
549
550 if (rc == -ENODEV) { 549 if (rc == -ENODEV) {
551 netif_device_detach(netdev); 550 netif_device_detach(netdev);
552 } else { 551 } else {
@@ -804,7 +803,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
804 rc = usb_control_msg(interface_to_usbdev(intf), 803 rc = usb_control_msg(interface_to_usbdev(intf),
805 usb_rcvctrlpipe(interface_to_usbdev(intf), 0), 804 usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
806 GS_USB_BREQ_BT_CONST, 805 GS_USB_BREQ_BT_CONST,
807 USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 806 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
808 channel, 807 channel,
809 0, 808 0,
810 bt_const, 809 bt_const,
@@ -908,57 +907,72 @@ static int gs_usb_probe(struct usb_interface *intf,
908 struct gs_usb *dev; 907 struct gs_usb *dev;
909 int rc = -ENOMEM; 908 int rc = -ENOMEM;
910 unsigned int icount, i; 909 unsigned int icount, i;
911 struct gs_host_config hconf = { 910 struct gs_host_config *hconf;
912 .byte_order = 0x0000beef, 911 struct gs_device_config *dconf;
913 }; 912
914 struct gs_device_config dconf; 913 hconf = kmalloc(sizeof(*hconf), GFP_KERNEL);
914 if (!hconf)
915 return -ENOMEM;
916
917 hconf->byte_order = 0x0000beef;
915 918
916 /* send host config */ 919 /* send host config */
917 rc = usb_control_msg(interface_to_usbdev(intf), 920 rc = usb_control_msg(interface_to_usbdev(intf),
918 usb_sndctrlpipe(interface_to_usbdev(intf), 0), 921 usb_sndctrlpipe(interface_to_usbdev(intf), 0),
919 GS_USB_BREQ_HOST_FORMAT, 922 GS_USB_BREQ_HOST_FORMAT,
920 USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 923 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
921 1, 924 1,
922 intf->altsetting[0].desc.bInterfaceNumber, 925 intf->altsetting[0].desc.bInterfaceNumber,
923 &hconf, 926 hconf,
924 sizeof(hconf), 927 sizeof(*hconf),
925 1000); 928 1000);
926 929
930 kfree(hconf);
931
927 if (rc < 0) { 932 if (rc < 0) {
928 dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", 933 dev_err(&intf->dev, "Couldn't send data format (err=%d)\n",
929 rc); 934 rc);
930 return rc; 935 return rc;
931 } 936 }
932 937
938 dconf = kmalloc(sizeof(*dconf), GFP_KERNEL);
939 if (!dconf)
940 return -ENOMEM;
941
933 /* read device config */ 942 /* read device config */
934 rc = usb_control_msg(interface_to_usbdev(intf), 943 rc = usb_control_msg(interface_to_usbdev(intf),
935 usb_rcvctrlpipe(interface_to_usbdev(intf), 0), 944 usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
936 GS_USB_BREQ_DEVICE_CONFIG, 945 GS_USB_BREQ_DEVICE_CONFIG,
937 USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 946 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
938 1, 947 1,
939 intf->altsetting[0].desc.bInterfaceNumber, 948 intf->altsetting[0].desc.bInterfaceNumber,
940 &dconf, 949 dconf,
941 sizeof(dconf), 950 sizeof(*dconf),
942 1000); 951 1000);
943 if (rc < 0) { 952 if (rc < 0) {
944 dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n", 953 dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
945 rc); 954 rc);
955 kfree(dconf);
946 return rc; 956 return rc;
947 } 957 }
948 958
949 icount = dconf.icount + 1; 959 icount = dconf->icount + 1;
950 dev_info(&intf->dev, "Configuring for %d interfaces\n", icount); 960 dev_info(&intf->dev, "Configuring for %d interfaces\n", icount);
951 961
952 if (icount > GS_MAX_INTF) { 962 if (icount > GS_MAX_INTF) {
953 dev_err(&intf->dev, 963 dev_err(&intf->dev,
954 "Driver cannot handle more that %d CAN interfaces\n", 964 "Driver cannot handle more that %d CAN interfaces\n",
955 GS_MAX_INTF); 965 GS_MAX_INTF);
966 kfree(dconf);
956 return -EINVAL; 967 return -EINVAL;
957 } 968 }
958 969
959 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 970 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
960 if (!dev) 971 if (!dev) {
972 kfree(dconf);
961 return -ENOMEM; 973 return -ENOMEM;
974 }
975
962 init_usb_anchor(&dev->rx_submitted); 976 init_usb_anchor(&dev->rx_submitted);
963 977
964 atomic_set(&dev->active_channels, 0); 978 atomic_set(&dev->active_channels, 0);
@@ -967,7 +981,7 @@ static int gs_usb_probe(struct usb_interface *intf,
967 dev->udev = interface_to_usbdev(intf); 981 dev->udev = interface_to_usbdev(intf);
968 982
969 for (i = 0; i < icount; i++) { 983 for (i = 0; i < icount; i++) {
970 dev->canch[i] = gs_make_candev(i, intf, &dconf); 984 dev->canch[i] = gs_make_candev(i, intf, dconf);
971 if (IS_ERR_OR_NULL(dev->canch[i])) { 985 if (IS_ERR_OR_NULL(dev->canch[i])) {
972 /* save error code to return later */ 986 /* save error code to return later */
973 rc = PTR_ERR(dev->canch[i]); 987 rc = PTR_ERR(dev->canch[i]);
@@ -978,12 +992,15 @@ static int gs_usb_probe(struct usb_interface *intf,
978 gs_destroy_candev(dev->canch[i]); 992 gs_destroy_candev(dev->canch[i]);
979 993
980 usb_kill_anchored_urbs(&dev->rx_submitted); 994 usb_kill_anchored_urbs(&dev->rx_submitted);
995 kfree(dconf);
981 kfree(dev); 996 kfree(dev);
982 return rc; 997 return rc;
983 } 998 }
984 dev->canch[i]->parent = dev; 999 dev->canch[i]->parent = dev;
985 } 1000 }
986 1001
1002 kfree(dconf);
1003
987 return 0; 1004 return 0;
988} 1005}
989 1006
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 108a30e15097..d000cb62d6ae 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -951,8 +951,8 @@ static int usb_8dev_probe(struct usb_interface *intf,
951 for (i = 0; i < MAX_TX_URBS; i++) 951 for (i = 0; i < MAX_TX_URBS; i++)
952 priv->tx_contexts[i].echo_index = MAX_TX_URBS; 952 priv->tx_contexts[i].echo_index = MAX_TX_URBS;
953 953
954 priv->cmd_msg_buffer = kzalloc(sizeof(struct usb_8dev_cmd_msg), 954 priv->cmd_msg_buffer = devm_kzalloc(&intf->dev, sizeof(struct usb_8dev_cmd_msg),
955 GFP_KERNEL); 955 GFP_KERNEL);
956 if (!priv->cmd_msg_buffer) 956 if (!priv->cmd_msg_buffer)
957 goto cleanup_candev; 957 goto cleanup_candev;
958 958
@@ -966,7 +966,7 @@ static int usb_8dev_probe(struct usb_interface *intf,
966 if (err) { 966 if (err) {
967 netdev_err(netdev, 967 netdev_err(netdev,
968 "couldn't register CAN device: %d\n", err); 968 "couldn't register CAN device: %d\n", err);
969 goto cleanup_cmd_msg_buffer; 969 goto cleanup_candev;
970 } 970 }
971 971
972 err = usb_8dev_cmd_version(priv, &version); 972 err = usb_8dev_cmd_version(priv, &version);
@@ -987,9 +987,6 @@ static int usb_8dev_probe(struct usb_interface *intf,
987cleanup_unregister_candev: 987cleanup_unregister_candev:
988 unregister_netdev(priv->netdev); 988 unregister_netdev(priv->netdev);
989 989
990cleanup_cmd_msg_buffer:
991 kfree(priv->cmd_msg_buffer);
992
993cleanup_candev: 990cleanup_candev:
994 free_candev(netdev); 991 free_candev(netdev);
995 992
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 76e5fc7adff5..6c98901f1b89 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -1276,18 +1276,6 @@ err_out:
1276 return ret; 1276 return ret;
1277} 1277}
1278 1278
1279static void __exit dec_lance_remove(struct device *bdev)
1280{
1281 struct net_device *dev = dev_get_drvdata(bdev);
1282 resource_size_t start, len;
1283
1284 unregister_netdev(dev);
1285 start = to_tc_dev(bdev)->resource.start;
1286 len = to_tc_dev(bdev)->resource.end - start + 1;
1287 release_mem_region(start, len);
1288 free_netdev(dev);
1289}
1290
1291/* Find all the lance cards on the system and initialize them */ 1279/* Find all the lance cards on the system and initialize them */
1292static int __init dec_lance_platform_probe(void) 1280static int __init dec_lance_platform_probe(void)
1293{ 1281{
@@ -1320,7 +1308,7 @@ static void __exit dec_lance_platform_remove(void)
1320 1308
1321#ifdef CONFIG_TC 1309#ifdef CONFIG_TC
1322static int dec_lance_tc_probe(struct device *dev); 1310static int dec_lance_tc_probe(struct device *dev);
1323static int __exit dec_lance_tc_remove(struct device *dev); 1311static int dec_lance_tc_remove(struct device *dev);
1324 1312
1325static const struct tc_device_id dec_lance_tc_table[] = { 1313static const struct tc_device_id dec_lance_tc_table[] = {
1326 { "DEC ", "PMAD-AA " }, 1314 { "DEC ", "PMAD-AA " },
@@ -1334,7 +1322,7 @@ static struct tc_driver dec_lance_tc_driver = {
1334 .name = "declance", 1322 .name = "declance",
1335 .bus = &tc_bus_type, 1323 .bus = &tc_bus_type,
1336 .probe = dec_lance_tc_probe, 1324 .probe = dec_lance_tc_probe,
1337 .remove = __exit_p(dec_lance_tc_remove), 1325 .remove = dec_lance_tc_remove,
1338 }, 1326 },
1339}; 1327};
1340 1328
@@ -1346,7 +1334,19 @@ static int dec_lance_tc_probe(struct device *dev)
1346 return status; 1334 return status;
1347} 1335}
1348 1336
1349static int __exit dec_lance_tc_remove(struct device *dev) 1337static void dec_lance_remove(struct device *bdev)
1338{
1339 struct net_device *dev = dev_get_drvdata(bdev);
1340 resource_size_t start, len;
1341
1342 unregister_netdev(dev);
1343 start = to_tc_dev(bdev)->resource.start;
1344 len = to_tc_dev(bdev)->resource.end - start + 1;
1345 release_mem_region(start, len);
1346 free_netdev(dev);
1347}
1348
1349static int dec_lance_tc_remove(struct device *dev)
1350{ 1350{
1351 put_device(dev); 1351 put_device(dev);
1352 dec_lance_remove(dev); 1352 dec_lance_remove(dev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index a7d16db5c4b2..937f37a5dcb2 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1323,7 +1323,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1323static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port, 1323static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
1324 enum xgbe_mdio_mode mode) 1324 enum xgbe_mdio_mode mode)
1325{ 1325{
1326 unsigned int reg_val = 0; 1326 unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R);
1327 1327
1328 switch (mode) { 1328 switch (mode) {
1329 case XGBE_MDIO_MODE_CL22: 1329 case XGBE_MDIO_MODE_CL22:
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 3aa457c8ca21..248f60d171a5 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1131,12 +1131,12 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
1131 hw_if->disable_tx(pdata); 1131 hw_if->disable_tx(pdata);
1132 hw_if->disable_rx(pdata); 1132 hw_if->disable_rx(pdata);
1133 1133
1134 phy_if->phy_stop(pdata);
1135
1134 xgbe_free_irqs(pdata); 1136 xgbe_free_irqs(pdata);
1135 1137
1136 xgbe_napi_disable(pdata, 1); 1138 xgbe_napi_disable(pdata, 1);
1137 1139
1138 phy_if->phy_stop(pdata);
1139
1140 hw_if->exit(pdata); 1140 hw_if->exit(pdata);
1141 1141
1142 channel = pdata->channel; 1142 channel = pdata->channel;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 9d8c953083b4..e707c49cc55a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -716,6 +716,8 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
716 pdata->phy.duplex = DUPLEX_UNKNOWN; 716 pdata->phy.duplex = DUPLEX_UNKNOWN;
717 pdata->phy.autoneg = AUTONEG_ENABLE; 717 pdata->phy.autoneg = AUTONEG_ENABLE;
718 pdata->phy.advertising = pdata->phy.supported; 718 pdata->phy.advertising = pdata->phy.supported;
719
720 return;
719 } 721 }
720 722
721 pdata->phy.advertising &= ~ADVERTISED_Autoneg; 723 pdata->phy.advertising &= ~ADVERTISED_Autoneg;
@@ -875,6 +877,16 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
875 !phy_data->sfp_phy_avail) 877 !phy_data->sfp_phy_avail)
876 return 0; 878 return 0;
877 879
880 /* Set the proper MDIO mode for the PHY */
881 ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr,
882 phy_data->phydev_mode);
883 if (ret) {
884 netdev_err(pdata->netdev,
885 "mdio port/clause not compatible (%u/%u)\n",
886 phy_data->mdio_addr, phy_data->phydev_mode);
887 return ret;
888 }
889
878 /* Create and connect to the PHY device */ 890 /* Create and connect to the PHY device */
879 phydev = get_phy_device(phy_data->mii, phy_data->mdio_addr, 891 phydev = get_phy_device(phy_data->mii, phy_data->mdio_addr,
880 (phy_data->phydev_mode == XGBE_MDIO_MODE_CL45)); 892 (phy_data->phydev_mode == XGBE_MDIO_MODE_CL45));
@@ -2722,6 +2734,18 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
2722 if (ret) 2734 if (ret)
2723 return ret; 2735 return ret;
2724 2736
2737 /* Set the proper MDIO mode for the re-driver */
2738 if (phy_data->redrv && !phy_data->redrv_if) {
2739 ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr,
2740 XGBE_MDIO_MODE_CL22);
2741 if (ret) {
2742 netdev_err(pdata->netdev,
2743 "redriver mdio port not compatible (%u)\n",
2744 phy_data->redrv_addr);
2745 return ret;
2746 }
2747 }
2748
2725 /* Start in highest supported mode */ 2749 /* Start in highest supported mode */
2726 xgbe_phy_set_mode(pdata, phy_data->start_mode); 2750 xgbe_phy_set_mode(pdata, phy_data->start_mode);
2727 2751
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index e536301acfde..b3568c453b14 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1749,6 +1749,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1749 1749
1750 pdata->clk = devm_clk_get(&pdev->dev, NULL); 1750 pdata->clk = devm_clk_get(&pdev->dev, NULL);
1751 if (IS_ERR(pdata->clk)) { 1751 if (IS_ERR(pdata->clk)) {
1752 /* Abort if the clock is defined but couldn't be retrived.
1753 * Always abort if the clock is missing on DT system as
1754 * the driver can't cope with this case.
1755 */
1756 if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node)
1757 return PTR_ERR(pdata->clk);
1752 /* Firmware may have set up the clock already. */ 1758 /* Firmware may have set up the clock already. */
1753 dev_info(dev, "clocks have been setup already\n"); 1759 dev_info(dev, "clocks have been setup already\n");
1754 } 1760 }
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 7b1af950f312..da1b8b225eb9 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -51,8 +51,7 @@ static void platform_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value)
51 51
52static bool platform_bgmac_clk_enabled(struct bgmac *bgmac) 52static bool platform_bgmac_clk_enabled(struct bgmac *bgmac)
53{ 53{
54 if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & 54 if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & BGMAC_CLK_EN) != BGMAC_CLK_EN)
55 (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC)) != BCMA_IOCTL_CLK)
56 return false; 55 return false;
57 if (bgmac_idm_read(bgmac, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET) 56 if (bgmac_idm_read(bgmac, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
58 return false; 57 return false;
@@ -61,15 +60,25 @@ static bool platform_bgmac_clk_enabled(struct bgmac *bgmac)
61 60
62static void platform_bgmac_clk_enable(struct bgmac *bgmac, u32 flags) 61static void platform_bgmac_clk_enable(struct bgmac *bgmac, u32 flags)
63{ 62{
64 bgmac_idm_write(bgmac, BCMA_IOCTL, 63 u32 val;
65 (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC | flags));
66 bgmac_idm_read(bgmac, BCMA_IOCTL);
67 64
68 bgmac_idm_write(bgmac, BCMA_RESET_CTL, 0); 65 /* The Reset Control register only contains a single bit to show if the
69 bgmac_idm_read(bgmac, BCMA_RESET_CTL); 66 * controller is currently in reset. Do a sanity check here, just in
70 udelay(1); 67 * case the bootloader happened to leave the device in reset.
68 */
69 val = bgmac_idm_read(bgmac, BCMA_RESET_CTL);
70 if (val) {
71 bgmac_idm_write(bgmac, BCMA_RESET_CTL, 0);
72 bgmac_idm_read(bgmac, BCMA_RESET_CTL);
73 udelay(1);
74 }
71 75
72 bgmac_idm_write(bgmac, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags)); 76 val = bgmac_idm_read(bgmac, BCMA_IOCTL);
77 /* Some bits of BCMA_IOCTL set by HW/ATF and should not change */
78 val |= flags & ~(BGMAC_AWCACHE | BGMAC_ARCACHE | BGMAC_AWUSER |
79 BGMAC_ARUSER);
80 val |= BGMAC_CLK_EN;
81 bgmac_idm_write(bgmac, BCMA_IOCTL, val);
73 bgmac_idm_read(bgmac, BCMA_IOCTL); 82 bgmac_idm_read(bgmac, BCMA_IOCTL);
74 udelay(1); 83 udelay(1);
75} 84}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 415046750bb4..fd66fca00e01 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1223,12 +1223,16 @@ static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
1223static int bgmac_set_mac_address(struct net_device *net_dev, void *addr) 1223static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
1224{ 1224{
1225 struct bgmac *bgmac = netdev_priv(net_dev); 1225 struct bgmac *bgmac = netdev_priv(net_dev);
1226 struct sockaddr *sa = addr;
1226 int ret; 1227 int ret;
1227 1228
1228 ret = eth_prepare_mac_addr_change(net_dev, addr); 1229 ret = eth_prepare_mac_addr_change(net_dev, addr);
1229 if (ret < 0) 1230 if (ret < 0)
1230 return ret; 1231 return ret;
1231 bgmac_write_mac_address(bgmac, (u8 *)addr); 1232
1233 ether_addr_copy(net_dev->dev_addr, sa->sa_data);
1234 bgmac_write_mac_address(bgmac, net_dev->dev_addr);
1235
1232 eth_commit_mac_addr_change(net_dev, addr); 1236 eth_commit_mac_addr_change(net_dev, addr);
1233 return 0; 1237 return 0;
1234} 1238}
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 248727dc62f2..6d1c6ff1ed96 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -213,6 +213,22 @@
213/* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */ 213/* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */
214#define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */ 214#define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */
215#define BGMAC_BCMA_IOCTL_SW_RESET 0x00000008 /* PHY Reset */ 215#define BGMAC_BCMA_IOCTL_SW_RESET 0x00000008 /* PHY Reset */
216/* The IOCTL values appear to be different in NS, NSP, and NS2, and do not match
217 * the values directly above
218 */
219#define BGMAC_CLK_EN BIT(0)
220#define BGMAC_RESERVED_0 BIT(1)
221#define BGMAC_SOURCE_SYNC_MODE_EN BIT(2)
222#define BGMAC_DEST_SYNC_MODE_EN BIT(3)
223#define BGMAC_TX_CLK_OUT_INVERT_EN BIT(4)
224#define BGMAC_DIRECT_GMII_MODE BIT(5)
225#define BGMAC_CLK_250_SEL BIT(6)
226#define BGMAC_AWCACHE (0xf << 7)
227#define BGMAC_RESERVED_1 (0x1f << 11)
228#define BGMAC_ARCACHE (0xf << 16)
229#define BGMAC_AWUSER (0x3f << 20)
230#define BGMAC_ARUSER (0x3f << 26)
231#define BGMAC_RESERVED BIT(31)
216 232
217/* BCMA GMAC core specific IO status (BCMA_IOST) flags */ 233/* BCMA GMAC core specific IO status (BCMA_IOST) flags */
218#define BGMAC_BCMA_IOST_ATTACHED 0x00000800 234#define BGMAC_BCMA_IOST_ATTACHED 0x00000800
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 89d4feba1a9a..55c8e25b43d9 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2617,7 +2617,7 @@ out_out:
2617 return err; 2617 return err;
2618} 2618}
2619 2619
2620static int __exit sbmac_remove(struct platform_device *pldev) 2620static int sbmac_remove(struct platform_device *pldev)
2621{ 2621{
2622 struct net_device *dev = platform_get_drvdata(pldev); 2622 struct net_device *dev = platform_get_drvdata(pldev);
2623 struct sbmac_softc *sc = netdev_priv(dev); 2623 struct sbmac_softc *sc = netdev_priv(dev);
@@ -2634,7 +2634,7 @@ static int __exit sbmac_remove(struct platform_device *pldev)
2634 2634
2635static struct platform_driver sbmac_driver = { 2635static struct platform_driver sbmac_driver = {
2636 .probe = sbmac_probe, 2636 .probe = sbmac_probe,
2637 .remove = __exit_p(sbmac_remove), 2637 .remove = sbmac_remove,
2638 .driver = { 2638 .driver = {
2639 .name = sbmac_string, 2639 .name = sbmac_string,
2640 }, 2640 },
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 5fdaa16426c5..fa376444e57c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -37,7 +37,7 @@
37 37
38#define T4FW_VERSION_MAJOR 0x01 38#define T4FW_VERSION_MAJOR 0x01
39#define T4FW_VERSION_MINOR 0x10 39#define T4FW_VERSION_MINOR 0x10
40#define T4FW_VERSION_MICRO 0x1A 40#define T4FW_VERSION_MICRO 0x21
41#define T4FW_VERSION_BUILD 0x00 41#define T4FW_VERSION_BUILD 0x00
42 42
43#define T4FW_MIN_VERSION_MAJOR 0x01 43#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -46,7 +46,7 @@
46 46
47#define T5FW_VERSION_MAJOR 0x01 47#define T5FW_VERSION_MAJOR 0x01
48#define T5FW_VERSION_MINOR 0x10 48#define T5FW_VERSION_MINOR 0x10
49#define T5FW_VERSION_MICRO 0x1A 49#define T5FW_VERSION_MICRO 0x21
50#define T5FW_VERSION_BUILD 0x00 50#define T5FW_VERSION_BUILD 0x00
51 51
52#define T5FW_MIN_VERSION_MAJOR 0x00 52#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -55,7 +55,7 @@
55 55
56#define T6FW_VERSION_MAJOR 0x01 56#define T6FW_VERSION_MAJOR 0x01
57#define T6FW_VERSION_MINOR 0x10 57#define T6FW_VERSION_MINOR 0x10
58#define T6FW_VERSION_MICRO 0x1A 58#define T6FW_VERSION_MICRO 0x21
59#define T6FW_VERSION_BUILD 0x00 59#define T6FW_VERSION_BUILD 0x00
60 60
61#define T6FW_MIN_VERSION_MAJOR 0x00 61#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 262587240c86..928b0df2b8e0 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1456,7 +1456,7 @@ err_alloc_etherdev:
1456 return err; 1456 return err;
1457} 1457}
1458 1458
1459static int __exit ftgmac100_remove(struct platform_device *pdev) 1459static int ftgmac100_remove(struct platform_device *pdev)
1460{ 1460{
1461 struct net_device *netdev; 1461 struct net_device *netdev;
1462 struct ftgmac100 *priv; 1462 struct ftgmac100 *priv;
@@ -1483,7 +1483,7 @@ MODULE_DEVICE_TABLE(of, ftgmac100_of_match);
1483 1483
1484static struct platform_driver ftgmac100_driver = { 1484static struct platform_driver ftgmac100_driver = {
1485 .probe = ftgmac100_probe, 1485 .probe = ftgmac100_probe,
1486 .remove = __exit_p(ftgmac100_remove), 1486 .remove = ftgmac100_remove,
1487 .driver = { 1487 .driver = {
1488 .name = DRV_NAME, 1488 .name = DRV_NAME,
1489 .of_match_table = ftgmac100_of_match, 1489 .of_match_table = ftgmac100_of_match,
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index c0ddbbe6c226..6ac336b546e6 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1156,7 +1156,7 @@ err_alloc_etherdev:
1156 return err; 1156 return err;
1157} 1157}
1158 1158
1159static int __exit ftmac100_remove(struct platform_device *pdev) 1159static int ftmac100_remove(struct platform_device *pdev)
1160{ 1160{
1161 struct net_device *netdev; 1161 struct net_device *netdev;
1162 struct ftmac100 *priv; 1162 struct ftmac100 *priv;
@@ -1176,7 +1176,7 @@ static int __exit ftmac100_remove(struct platform_device *pdev)
1176 1176
1177static struct platform_driver ftmac100_driver = { 1177static struct platform_driver ftmac100_driver = {
1178 .probe = ftmac100_probe, 1178 .probe = ftmac100_probe,
1179 .remove = __exit_p(ftmac100_remove), 1179 .remove = ftmac100_remove,
1180 .driver = { 1180 .driver = {
1181 .name = DRV_NAME, 1181 .name = DRV_NAME,
1182 }, 1182 },
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index a2cc43d28888..b1ecc2627a5a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -96,7 +96,7 @@
96#define IXGBE_MAX_FRAME_BUILD_SKB \ 96#define IXGBE_MAX_FRAME_BUILD_SKB \
97 (SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K) - IXGBE_SKB_PAD) 97 (SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K) - IXGBE_SKB_PAD)
98#else 98#else
99#define IGB_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K 99#define IXGBE_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K
100#endif 100#endif
101 101
102/* 102/*
@@ -929,6 +929,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
929 struct ixgbe_adapter *adapter, 929 struct ixgbe_adapter *adapter,
930 struct ixgbe_ring *tx_ring); 930 struct ixgbe_ring *tx_ring);
931u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); 931u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
932void ixgbe_store_key(struct ixgbe_adapter *adapter);
932void ixgbe_store_reta(struct ixgbe_adapter *adapter); 933void ixgbe_store_reta(struct ixgbe_adapter *adapter);
933s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 934s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
934 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); 935 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index a7574c7b12af..90fa5bf23d1b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2998,8 +2998,10 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
2998 } 2998 }
2999 2999
3000 /* Fill out the rss hash key */ 3000 /* Fill out the rss hash key */
3001 if (key) 3001 if (key) {
3002 memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev)); 3002 memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
3003 ixgbe_store_key(adapter);
3004 }
3003 3005
3004 ixgbe_store_reta(adapter); 3006 ixgbe_store_reta(adapter);
3005 3007
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 060cdce8058f..a7a430a7be2c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3474,6 +3474,21 @@ u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
3474} 3474}
3475 3475
3476/** 3476/**
3477 * ixgbe_store_key - Write the RSS key to HW
3478 * @adapter: device handle
3479 *
3480 * Write the RSS key stored in adapter.rss_key to HW.
3481 */
3482void ixgbe_store_key(struct ixgbe_adapter *adapter)
3483{
3484 struct ixgbe_hw *hw = &adapter->hw;
3485 int i;
3486
3487 for (i = 0; i < 10; i++)
3488 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
3489}
3490
3491/**
3477 * ixgbe_store_reta - Write the RETA table to HW 3492 * ixgbe_store_reta - Write the RETA table to HW
3478 * @adapter: device handle 3493 * @adapter: device handle
3479 * 3494 *
@@ -3538,7 +3553,6 @@ static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
3538 3553
3539static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) 3554static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
3540{ 3555{
3541 struct ixgbe_hw *hw = &adapter->hw;
3542 u32 i, j; 3556 u32 i, j;
3543 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); 3557 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3544 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; 3558 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
@@ -3551,8 +3565,7 @@ static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
3551 rss_i = 4; 3565 rss_i = 4;
3552 3566
3553 /* Fill out hash function seeds */ 3567 /* Fill out hash function seeds */
3554 for (i = 0; i < 10; i++) 3568 ixgbe_store_key(adapter);
3555 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
3556 3569
3557 /* Fill out redirection table */ 3570 /* Fill out redirection table */
3558 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); 3571 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
@@ -3959,7 +3972,8 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3959 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 3972 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3960 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); 3973 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
3961 3974
3962 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) 3975 if ((max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3976 (max_frame > IXGBE_MAX_FRAME_BUILD_SKB))
3963 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); 3977 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
3964#endif 3978#endif
3965 } 3979 }
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index d7ac22d7f940..bd8de6b9be71 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -441,30 +441,40 @@ static int
441mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, 441mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
442 struct mlxsw_sp_prefix_usage *req_prefix_usage) 442 struct mlxsw_sp_prefix_usage *req_prefix_usage)
443{ 443{
444 struct mlxsw_sp_lpm_tree *lpm_tree; 444 struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree;
445 struct mlxsw_sp_lpm_tree *new_tree;
446 int err;
445 447
446 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, 448 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
447 &vr->lpm_tree->prefix_usage))
448 return 0; 449 return 0;
449 450
450 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, 451 new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
451 vr->proto, false); 452 vr->proto, false);
452 if (IS_ERR(lpm_tree)) { 453 if (IS_ERR(new_tree)) {
453 /* We failed to get a tree according to the required 454 /* We failed to get a tree according to the required
454 * prefix usage. However, the current tree might be still good 455 * prefix usage. However, the current tree might be still good
455 * for us if our requirement is subset of the prefixes used 456 * for us if our requirement is subset of the prefixes used
456 * in the tree. 457 * in the tree.
457 */ 458 */
458 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage, 459 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
459 &vr->lpm_tree->prefix_usage)) 460 &lpm_tree->prefix_usage))
460 return 0; 461 return 0;
461 return PTR_ERR(lpm_tree); 462 return PTR_ERR(new_tree);
462 } 463 }
463 464
464 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr); 465 /* Prevent packet loss by overwriting existing binding */
465 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree); 466 vr->lpm_tree = new_tree;
467 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
468 if (err)
469 goto err_tree_bind;
470 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
471
472 return 0;
473
474err_tree_bind:
466 vr->lpm_tree = lpm_tree; 475 vr->lpm_tree = lpm_tree;
467 return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); 476 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
477 return err;
468} 478}
469 479
470static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, 480static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 074259cc8e06..9179a99563af 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1498,7 +1498,7 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
1498 txbuf->real_len = pkt_len; 1498 txbuf->real_len = pkt_len;
1499 1499
1500 dma_sync_single_for_device(&nn->pdev->dev, rxbuf->dma_addr + pkt_off, 1500 dma_sync_single_for_device(&nn->pdev->dev, rxbuf->dma_addr + pkt_off,
1501 pkt_len, DMA_TO_DEVICE); 1501 pkt_len, DMA_BIDIRECTIONAL);
1502 1502
1503 /* Build TX descriptor */ 1503 /* Build TX descriptor */
1504 txd = &tx_ring->txds[wr_idx]; 1504 txd = &tx_ring->txds[wr_idx];
@@ -1611,7 +1611,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1611 1611
1612 dma_sync_single_for_cpu(&nn->pdev->dev, 1612 dma_sync_single_for_cpu(&nn->pdev->dev,
1613 rxbuf->dma_addr + pkt_off, 1613 rxbuf->dma_addr + pkt_off,
1614 pkt_len, DMA_FROM_DEVICE); 1614 pkt_len, DMA_BIDIRECTIONAL);
1615 act = nfp_net_run_xdp(xdp_prog, rxbuf->frag + data_off, 1615 act = nfp_net_run_xdp(xdp_prog, rxbuf->frag + data_off,
1616 pkt_len); 1616 pkt_len);
1617 switch (act) { 1617 switch (act) {
@@ -2198,7 +2198,8 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
2198 nfp_net_write_mac_addr(nn); 2198 nfp_net_write_mac_addr(nn);
2199 2199
2200 nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu); 2200 nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu);
2201 nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz); 2201 nn_writel(nn, NFP_NET_CFG_FLBUFSZ,
2202 nn->fl_bufsz - NFP_NET_RX_BUF_NON_DATA);
2202 2203
2203 /* Enable device */ 2204 /* Enable device */
2204 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; 2205 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index ed34196028b8..70347720fdf9 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -807,7 +807,7 @@ err_out:
807 return err; 807 return err;
808} 808}
809 809
810static int __exit sgiseeq_remove(struct platform_device *pdev) 810static int sgiseeq_remove(struct platform_device *pdev)
811{ 811{
812 struct net_device *dev = platform_get_drvdata(pdev); 812 struct net_device *dev = platform_get_drvdata(pdev);
813 struct sgiseeq_private *sp = netdev_priv(dev); 813 struct sgiseeq_private *sp = netdev_priv(dev);
@@ -822,7 +822,7 @@ static int __exit sgiseeq_remove(struct platform_device *pdev)
822 822
823static struct platform_driver sgiseeq_driver = { 823static struct platform_driver sgiseeq_driver = {
824 .probe = sgiseeq_probe, 824 .probe = sgiseeq_probe,
825 .remove = __exit_p(sgiseeq_remove), 825 .remove = sgiseeq_remove,
826 .driver = { 826 .driver = {
827 .name = "sgiseeq", 827 .name = "sgiseeq",
828 } 828 }
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 92e1c6d8b293..c60c2d4c646a 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -828,9 +828,7 @@ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
828static int efx_ef10_link_piobufs(struct efx_nic *efx) 828static int efx_ef10_link_piobufs(struct efx_nic *efx)
829{ 829{
830 struct efx_ef10_nic_data *nic_data = efx->nic_data; 830 struct efx_ef10_nic_data *nic_data = efx->nic_data;
831 _MCDI_DECLARE_BUF(inbuf, 831 MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN);
832 max(MC_CMD_LINK_PIOBUF_IN_LEN,
833 MC_CMD_UNLINK_PIOBUF_IN_LEN));
834 struct efx_channel *channel; 832 struct efx_channel *channel;
835 struct efx_tx_queue *tx_queue; 833 struct efx_tx_queue *tx_queue;
836 unsigned int offset, index; 834 unsigned int offset, index;
@@ -839,8 +837,6 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
839 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); 837 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
840 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); 838 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
841 839
842 memset(inbuf, 0, sizeof(inbuf));
843
844 /* Link a buffer to each VI in the write-combining mapping */ 840 /* Link a buffer to each VI in the write-combining mapping */
845 for (index = 0; index < nic_data->n_piobufs; ++index) { 841 for (index = 0; index < nic_data->n_piobufs; ++index) {
846 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, 842 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
@@ -920,6 +916,10 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
920 return 0; 916 return 0;
921 917
922fail: 918fail:
919 /* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same
920 * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter.
921 */
922 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN);
923 while (index--) { 923 while (index--) {
924 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE, 924 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
925 nic_data->pio_write_vi_base + index); 925 nic_data->pio_write_vi_base + index);
@@ -2183,7 +2183,7 @@ static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue,
2183 /* Modify IPv4 header if needed. */ 2183 /* Modify IPv4 header if needed. */
2184 ip->tot_len = 0; 2184 ip->tot_len = 0;
2185 ip->check = 0; 2185 ip->check = 0;
2186 ipv4_id = ip->id; 2186 ipv4_id = ntohs(ip->id);
2187 } else { 2187 } else {
2188 /* Modify IPv6 header if needed. */ 2188 /* Modify IPv6 header if needed. */
2189 struct ipv6hdr *ipv6 = ipv6_hdr(skb); 2189 struct ipv6hdr *ipv6 = ipv6_hdr(skb);
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 69d2d30e5ef1..ea55abd62ec7 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -854,7 +854,7 @@ static int meth_probe(struct platform_device *pdev)
854 return 0; 854 return 0;
855} 855}
856 856
857static int __exit meth_remove(struct platform_device *pdev) 857static int meth_remove(struct platform_device *pdev)
858{ 858{
859 struct net_device *dev = platform_get_drvdata(pdev); 859 struct net_device *dev = platform_get_drvdata(pdev);
860 860
@@ -866,7 +866,7 @@ static int __exit meth_remove(struct platform_device *pdev)
866 866
867static struct platform_driver meth_driver = { 867static struct platform_driver meth_driver = {
868 .probe = meth_probe, 868 .probe = meth_probe,
869 .remove = __exit_p(meth_remove), 869 .remove = meth_remove,
870 .driver = { 870 .driver = {
871 .name = "meth", 871 .name = "meth",
872 } 872 }
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 45301cb98bc1..7074b40ebd7f 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -881,12 +881,14 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
881 info = &geneve->info; 881 info = &geneve->info;
882 } 882 }
883 883
884 rcu_read_lock();
884#if IS_ENABLED(CONFIG_IPV6) 885#if IS_ENABLED(CONFIG_IPV6)
885 if (info->mode & IP_TUNNEL_INFO_IPV6) 886 if (info->mode & IP_TUNNEL_INFO_IPV6)
886 err = geneve6_xmit_skb(skb, dev, geneve, info); 887 err = geneve6_xmit_skb(skb, dev, geneve, info);
887 else 888 else
888#endif 889#endif
889 err = geneve_xmit_skb(skb, dev, geneve, info); 890 err = geneve_xmit_skb(skb, dev, geneve, info);
891 rcu_read_unlock();
890 892
891 if (likely(!err)) 893 if (likely(!err))
892 return NETDEV_TX_OK; 894 return NETDEV_TX_OK;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 2d3cdb026a99..bc05c895d958 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -859,15 +859,22 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
859 if (ret) 859 if (ret)
860 goto out; 860 goto out;
861 861
862 memset(&device_info, 0, sizeof(device_info));
863 device_info.ring_size = ring_size;
864 device_info.num_chn = nvdev->num_chn;
865 device_info.max_num_vrss_chns = nvdev->num_chn;
866
862 ndevctx->start_remove = true; 867 ndevctx->start_remove = true;
863 rndis_filter_device_remove(hdev, nvdev); 868 rndis_filter_device_remove(hdev, nvdev);
864 869
870 /* 'nvdev' has been freed in rndis_filter_device_remove() ->
871 * netvsc_device_remove () -> free_netvsc_device().
872 * We mustn't access it before it's re-created in
873 * rndis_filter_device_add() -> netvsc_device_add().
874 */
875
865 ndev->mtu = mtu; 876 ndev->mtu = mtu;
866 877
867 memset(&device_info, 0, sizeof(device_info));
868 device_info.ring_size = ring_size;
869 device_info.num_chn = nvdev->num_chn;
870 device_info.max_num_vrss_chns = nvdev->num_chn;
871 rndis_filter_device_add(hdev, &device_info); 878 rndis_filter_device_add(hdev, &device_info);
872 879
873out: 880out:
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 6e98ede997d3..0dd510604118 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -346,7 +346,7 @@ static int ax88772_reset(struct usbnet *dev)
346 if (ret < 0) 346 if (ret < 0)
347 goto out; 347 goto out;
348 348
349 asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT, 0); 349 ret = asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT, 0);
350 if (ret < 0) 350 if (ret < 0)
351 goto out; 351 goto out;
352 352
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 66fd3139be60..ea9890d61967 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -51,7 +51,7 @@ module_param(gso, bool, 0444);
51 * at once, the weight is chosen so that the EWMA will be insensitive to short- 51 * at once, the weight is chosen so that the EWMA will be insensitive to short-
52 * term, transient changes in packet size. 52 * term, transient changes in packet size.
53 */ 53 */
54DECLARE_EWMA(pkt_len, 1, 64) 54DECLARE_EWMA(pkt_len, 0, 64)
55 55
56/* With mergeable buffers we align buffer address and use the low bits to 56/* With mergeable buffers we align buffer address and use the low bits to
57 * encode its true size. Buffer size is up to 1 page so we need to align to 57 * encode its true size. Buffer size is up to 1 page so we need to align to
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index b7911994112a..e375560cc74e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2105,6 +2105,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2105 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, 2105 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
2106 vxlan->cfg.port_max, true); 2106 vxlan->cfg.port_max, true);
2107 2107
2108 rcu_read_lock();
2108 if (dst->sa.sa_family == AF_INET) { 2109 if (dst->sa.sa_family == AF_INET) {
2109 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); 2110 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
2110 struct rtable *rt; 2111 struct rtable *rt;
@@ -2127,7 +2128,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2127 dst_port, vni, &rt->dst, 2128 dst_port, vni, &rt->dst,
2128 rt->rt_flags); 2129 rt->rt_flags);
2129 if (err) 2130 if (err)
2130 return; 2131 goto out_unlock;
2131 } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) { 2132 } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) {
2132 df = htons(IP_DF); 2133 df = htons(IP_DF);
2133 } 2134 }
@@ -2166,7 +2167,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2166 dst_port, vni, ndst, 2167 dst_port, vni, ndst,
2167 rt6i_flags); 2168 rt6i_flags);
2168 if (err) 2169 if (err)
2169 return; 2170 goto out_unlock;
2170 } 2171 }
2171 2172
2172 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2173 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
@@ -2183,6 +2184,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2183 label, src_port, dst_port, !udp_sum); 2184 label, src_port, dst_port, !udp_sum);
2184#endif 2185#endif
2185 } 2186 }
2187out_unlock:
2188 rcu_read_unlock();
2186 return; 2189 return;
2187 2190
2188drop: 2191drop:
@@ -2191,6 +2194,7 @@ drop:
2191 return; 2194 return;
2192 2195
2193tx_error: 2196tx_error:
2197 rcu_read_unlock();
2194 if (err == -ELOOP) 2198 if (err == -ELOOP)
2195 dev->stats.collisions++; 2199 dev->stats.collisions++;
2196 else if (err == -ENETUNREACH) 2200 else if (err == -ENETUNREACH)
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index dd902b43f8f7..0a8e29e9a0eb 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -18,6 +18,8 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/firmware.h> 19#include <linux/firmware.h>
20#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/dmi.h>
22#include <linux/ctype.h>
21#include <asm/byteorder.h> 23#include <asm/byteorder.h>
22 24
23#include "core.h" 25#include "core.h"
@@ -711,6 +713,72 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
711 return 0; 713 return 0;
712} 714}
713 715
716static void ath10k_core_check_bdfext(const struct dmi_header *hdr, void *data)
717{
718 struct ath10k *ar = data;
719 const char *bdf_ext;
720 const char *magic = ATH10K_SMBIOS_BDF_EXT_MAGIC;
721 u8 bdf_enabled;
722 int i;
723
724 if (hdr->type != ATH10K_SMBIOS_BDF_EXT_TYPE)
725 return;
726
727 if (hdr->length != ATH10K_SMBIOS_BDF_EXT_LENGTH) {
728 ath10k_dbg(ar, ATH10K_DBG_BOOT,
729 "wrong smbios bdf ext type length (%d).\n",
730 hdr->length);
731 return;
732 }
733
734 bdf_enabled = *((u8 *)hdr + ATH10K_SMBIOS_BDF_EXT_OFFSET);
735 if (!bdf_enabled) {
736 ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not found.\n");
737 return;
738 }
739
740 /* Only one string exists (per spec) */
741 bdf_ext = (char *)hdr + hdr->length;
742
743 if (memcmp(bdf_ext, magic, strlen(magic)) != 0) {
744 ath10k_dbg(ar, ATH10K_DBG_BOOT,
745 "bdf variant magic does not match.\n");
746 return;
747 }
748
749 for (i = 0; i < strlen(bdf_ext); i++) {
750 if (!isascii(bdf_ext[i]) || !isprint(bdf_ext[i])) {
751 ath10k_dbg(ar, ATH10K_DBG_BOOT,
752 "bdf variant name contains non ascii chars.\n");
753 return;
754 }
755 }
756
757 /* Copy extension name without magic suffix */
758 if (strscpy(ar->id.bdf_ext, bdf_ext + strlen(magic),
759 sizeof(ar->id.bdf_ext)) < 0) {
760 ath10k_dbg(ar, ATH10K_DBG_BOOT,
761 "bdf variant string is longer than the buffer can accommodate (variant: %s)\n",
762 bdf_ext);
763 return;
764 }
765
766 ath10k_dbg(ar, ATH10K_DBG_BOOT,
767 "found and validated bdf variant smbios_type 0x%x bdf %s\n",
768 ATH10K_SMBIOS_BDF_EXT_TYPE, bdf_ext);
769}
770
771static int ath10k_core_check_smbios(struct ath10k *ar)
772{
773 ar->id.bdf_ext[0] = '\0';
774 dmi_walk(ath10k_core_check_bdfext, ar);
775
776 if (ar->id.bdf_ext[0] == '\0')
777 return -ENODATA;
778
779 return 0;
780}
781
714static int ath10k_download_and_run_otp(struct ath10k *ar) 782static int ath10k_download_and_run_otp(struct ath10k *ar)
715{ 783{
716 u32 result, address = ar->hw_params.patch_load_addr; 784 u32 result, address = ar->hw_params.patch_load_addr;
@@ -1020,6 +1088,23 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
1020 case ATH10K_BD_IE_BOARD: 1088 case ATH10K_BD_IE_BOARD:
1021 ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len, 1089 ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len,
1022 boardname); 1090 boardname);
1091 if (ret == -ENOENT && ar->id.bdf_ext[0] != '\0') {
1092 /* try default bdf if variant was not found */
1093 char *s, *v = ",variant=";
1094 char boardname2[100];
1095
1096 strlcpy(boardname2, boardname,
1097 sizeof(boardname2));
1098
1099 s = strstr(boardname2, v);
1100 if (s)
1101 *s = '\0'; /* strip ",variant=%s" */
1102
1103 ret = ath10k_core_parse_bd_ie_board(ar, data,
1104 ie_len,
1105 boardname2);
1106 }
1107
1023 if (ret == -ENOENT) 1108 if (ret == -ENOENT)
1024 /* no match found, continue */ 1109 /* no match found, continue */
1025 break; 1110 break;
@@ -1057,6 +1142,9 @@ err:
1057static int ath10k_core_create_board_name(struct ath10k *ar, char *name, 1142static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
1058 size_t name_len) 1143 size_t name_len)
1059{ 1144{
1145 /* strlen(',variant=') + strlen(ar->id.bdf_ext) */
1146 char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 };
1147
1060 if (ar->id.bmi_ids_valid) { 1148 if (ar->id.bmi_ids_valid) {
1061 scnprintf(name, name_len, 1149 scnprintf(name, name_len,
1062 "bus=%s,bmi-chip-id=%d,bmi-board-id=%d", 1150 "bus=%s,bmi-chip-id=%d,bmi-board-id=%d",
@@ -1066,12 +1154,15 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
1066 goto out; 1154 goto out;
1067 } 1155 }
1068 1156
1157 if (ar->id.bdf_ext[0] != '\0')
1158 scnprintf(variant, sizeof(variant), ",variant=%s",
1159 ar->id.bdf_ext);
1160
1069 scnprintf(name, name_len, 1161 scnprintf(name, name_len,
1070 "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x", 1162 "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s",
1071 ath10k_bus_str(ar->hif.bus), 1163 ath10k_bus_str(ar->hif.bus),
1072 ar->id.vendor, ar->id.device, 1164 ar->id.vendor, ar->id.device,
1073 ar->id.subsystem_vendor, ar->id.subsystem_device); 1165 ar->id.subsystem_vendor, ar->id.subsystem_device, variant);
1074
1075out: 1166out:
1076 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using board name '%s'\n", name); 1167 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using board name '%s'\n", name);
1077 1168
@@ -2128,6 +2219,10 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
2128 goto err_free_firmware_files; 2219 goto err_free_firmware_files;
2129 } 2220 }
2130 2221
2222 ret = ath10k_core_check_smbios(ar);
2223 if (ret)
2224 ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not set.\n");
2225
2131 ret = ath10k_core_fetch_board_file(ar); 2226 ret = ath10k_core_fetch_board_file(ar);
2132 if (ret) { 2227 if (ret) {
2133 ath10k_err(ar, "failed to fetch board file: %d\n", ret); 2228 ath10k_err(ar, "failed to fetch board file: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 757242ef52ac..88d14be7fcce 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -69,6 +69,23 @@
69#define ATH10K_NAPI_BUDGET 64 69#define ATH10K_NAPI_BUDGET 64
70#define ATH10K_NAPI_QUOTA_LIMIT 60 70#define ATH10K_NAPI_QUOTA_LIMIT 60
71 71
72/* SMBIOS type containing Board Data File Name Extension */
73#define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8
74
75/* SMBIOS type structure length (excluding strings-set) */
76#define ATH10K_SMBIOS_BDF_EXT_LENGTH 0x9
77
78/* Offset pointing to Board Data File Name Extension */
79#define ATH10K_SMBIOS_BDF_EXT_OFFSET 0x8
80
81/* Board Data File Name Extension string length.
82 * String format: BDF_<Customer ID>_<Extension>\0
83 */
84#define ATH10K_SMBIOS_BDF_EXT_STR_LENGTH 0x20
85
86/* The magic used by QCA spec */
87#define ATH10K_SMBIOS_BDF_EXT_MAGIC "BDF_"
88
72struct ath10k; 89struct ath10k;
73 90
74enum ath10k_bus { 91enum ath10k_bus {
@@ -798,6 +815,8 @@ struct ath10k {
798 bool bmi_ids_valid; 815 bool bmi_ids_valid;
799 u8 bmi_board_id; 816 u8 bmi_board_id;
800 u8 bmi_chip_id; 817 u8 bmi_chip_id;
818
819 char bdf_ext[ATH10K_SMBIOS_BDF_EXT_STR_LENGTH];
801 } id; 820 } id;
802 821
803 int fw_api; 822 int fw_api;
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 67fedb61fcc0..979800c6f57f 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1252,7 +1252,7 @@ struct ath5k_statistics {
1252#define ATH5K_TXQ_LEN_MAX (ATH_TXBUF / 4) /* bufs per queue */ 1252#define ATH5K_TXQ_LEN_MAX (ATH_TXBUF / 4) /* bufs per queue */
1253#define ATH5K_TXQ_LEN_LOW (ATH5K_TXQ_LEN_MAX / 2) /* low mark */ 1253#define ATH5K_TXQ_LEN_LOW (ATH5K_TXQ_LEN_MAX / 2) /* low mark */
1254 1254
1255DECLARE_EWMA(beacon_rssi, 1024, 8) 1255DECLARE_EWMA(beacon_rssi, 10, 8)
1256 1256
1257/* Driver state associated with an instance of a device */ 1257/* Driver state associated with an instance of a device */
1258struct ath5k_hw { 1258struct ath5k_hw {
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 0889fc81ce9e..50c219fb1a52 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3056,6 +3056,7 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
3056static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) 3056static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
3057{ 3057{
3058 struct hwsim_new_radio_params param = { 0 }; 3058 struct hwsim_new_radio_params param = { 0 };
3059 const char *hwname = NULL;
3059 3060
3060 param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG]; 3061 param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
3061 param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE]; 3062 param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
@@ -3069,8 +3070,14 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
3069 if (info->attrs[HWSIM_ATTR_NO_VIF]) 3070 if (info->attrs[HWSIM_ATTR_NO_VIF])
3070 param.no_vif = true; 3071 param.no_vif = true;
3071 3072
3072 if (info->attrs[HWSIM_ATTR_RADIO_NAME]) 3073 if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
3073 param.hwname = nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]); 3074 hwname = kasprintf(GFP_KERNEL, "%.*s",
3075 nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
3076 (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
3077 if (!hwname)
3078 return -ENOMEM;
3079 param.hwname = hwname;
3080 }
3074 3081
3075 if (info->attrs[HWSIM_ATTR_USE_CHANCTX]) 3082 if (info->attrs[HWSIM_ATTR_USE_CHANCTX])
3076 param.use_chanctx = true; 3083 param.use_chanctx = true;
@@ -3098,11 +3105,15 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
3098 s64 idx = -1; 3105 s64 idx = -1;
3099 const char *hwname = NULL; 3106 const char *hwname = NULL;
3100 3107
3101 if (info->attrs[HWSIM_ATTR_RADIO_ID]) 3108 if (info->attrs[HWSIM_ATTR_RADIO_ID]) {
3102 idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]); 3109 idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
3103 else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) 3110 } else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
3104 hwname = (void *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]); 3111 hwname = kasprintf(GFP_KERNEL, "%.*s",
3105 else 3112 nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
3113 (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
3114 if (!hwname)
3115 return -ENOMEM;
3116 } else
3106 return -EINVAL; 3117 return -EINVAL;
3107 3118
3108 spin_lock_bh(&hwsim_radio_lock); 3119 spin_lock_bh(&hwsim_radio_lock);
@@ -3111,7 +3122,8 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
3111 if (data->idx != idx) 3122 if (data->idx != idx)
3112 continue; 3123 continue;
3113 } else { 3124 } else {
3114 if (strcmp(hwname, wiphy_name(data->hw->wiphy))) 3125 if (!hwname ||
3126 strcmp(hwname, wiphy_name(data->hw->wiphy)))
3115 continue; 3127 continue;
3116 } 3128 }
3117 3129
@@ -3122,10 +3134,12 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
3122 spin_unlock_bh(&hwsim_radio_lock); 3134 spin_unlock_bh(&hwsim_radio_lock);
3123 mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), 3135 mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
3124 info); 3136 info);
3137 kfree(hwname);
3125 return 0; 3138 return 0;
3126 } 3139 }
3127 spin_unlock_bh(&hwsim_radio_lock); 3140 spin_unlock_bh(&hwsim_radio_lock);
3128 3141
3142 kfree(hwname);
3129 return -ENODEV; 3143 return -ENODEV;
3130} 3144}
3131 3145
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 26869b3bef45..340787894c69 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -257,7 +257,7 @@ struct link_qual {
257 int tx_failed; 257 int tx_failed;
258}; 258};
259 259
260DECLARE_EWMA(rssi, 1024, 8) 260DECLARE_EWMA(rssi, 10, 8)
261 261
262/* 262/*
263 * Antenna settings about the currently active link. 263 * Antenna settings about the currently active link.
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index e8c5dddc54ba..3c4c58b9fe76 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -39,7 +39,7 @@ static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
39 unsigned long flags; 39 unsigned long flags;
40 bool found; 40 bool found;
41 41
42 new = kmalloc(sizeof(*entry), GFP_KERNEL); 42 new = kmalloc(sizeof(*entry), GFP_ATOMIC);
43 if (!new) 43 if (!new)
44 return; 44 return;
45 45
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index bb854f92f5a5..d2d7cd9145b1 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -492,24 +492,31 @@ static int backend_create_xenvif(struct backend_info *be)
492 492
493static void backend_disconnect(struct backend_info *be) 493static void backend_disconnect(struct backend_info *be)
494{ 494{
495 if (be->vif) { 495 struct xenvif *vif = be->vif;
496
497 if (vif) {
496 unsigned int queue_index; 498 unsigned int queue_index;
499 struct xenvif_queue *queues;
497 500
498 xen_unregister_watchers(be->vif); 501 xen_unregister_watchers(vif);
499#ifdef CONFIG_DEBUG_FS 502#ifdef CONFIG_DEBUG_FS
500 xenvif_debugfs_delif(be->vif); 503 xenvif_debugfs_delif(vif);
501#endif /* CONFIG_DEBUG_FS */ 504#endif /* CONFIG_DEBUG_FS */
502 xenvif_disconnect_data(be->vif); 505 xenvif_disconnect_data(vif);
503 for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index) 506 for (queue_index = 0;
504 xenvif_deinit_queue(&be->vif->queues[queue_index]); 507 queue_index < vif->num_queues;
508 ++queue_index)
509 xenvif_deinit_queue(&vif->queues[queue_index]);
510
511 spin_lock(&vif->lock);
512 queues = vif->queues;
513 vif->num_queues = 0;
514 vif->queues = NULL;
515 spin_unlock(&vif->lock);
505 516
506 spin_lock(&be->vif->lock); 517 vfree(queues);
507 vfree(be->vif->queues);
508 be->vif->num_queues = 0;
509 be->vif->queues = NULL;
510 spin_unlock(&be->vif->lock);
511 518
512 xenvif_disconnect_ctrl(be->vif); 519 xenvif_disconnect_ctrl(vif);
513 } 520 }
514} 521}
515 522
diff --git a/include/linux/average.h b/include/linux/average.h
index d04aa58280de..7ddaf340d2ac 100644
--- a/include/linux/average.h
+++ b/include/linux/average.h
@@ -1,45 +1,66 @@
1#ifndef _LINUX_AVERAGE_H 1#ifndef _LINUX_AVERAGE_H
2#define _LINUX_AVERAGE_H 2#define _LINUX_AVERAGE_H
3 3
4/* Exponentially weighted moving average (EWMA) */ 4/*
5 * Exponentially weighted moving average (EWMA)
6 *
7 * This implements a fixed-precision EWMA algorithm, with both the
8 * precision and fall-off coefficient determined at compile-time
9 * and built into the generated helper funtions.
10 *
11 * The first argument to the macro is the name that will be used
12 * for the struct and helper functions.
13 *
14 * The second argument, the precision, expresses how many bits are
15 * used for the fractional part of the fixed-precision values.
16 *
17 * The third argument, the weight reciprocal, determines how the
18 * new values will be weighed vs. the old state, new values will
19 * get weight 1/weight_rcp and old values 1-1/weight_rcp. Note
20 * that this parameter must be a power of two for efficiency.
21 */
5 22
6#define DECLARE_EWMA(name, _factor, _weight) \ 23#define DECLARE_EWMA(name, _precision, _weight_rcp) \
7 struct ewma_##name { \ 24 struct ewma_##name { \
8 unsigned long internal; \ 25 unsigned long internal; \
9 }; \ 26 }; \
10 static inline void ewma_##name##_init(struct ewma_##name *e) \ 27 static inline void ewma_##name##_init(struct ewma_##name *e) \
11 { \ 28 { \
12 BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ 29 BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
13 BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ 30 BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
14 BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ 31 /* \
15 BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ 32 * Even if you want to feed it just 0/1 you should have \
33 * some bits for the non-fractional part... \
34 */ \
35 BUILD_BUG_ON((_precision) > 30); \
36 BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
16 e->internal = 0; \ 37 e->internal = 0; \
17 } \ 38 } \
18 static inline unsigned long \ 39 static inline unsigned long \
19 ewma_##name##_read(struct ewma_##name *e) \ 40 ewma_##name##_read(struct ewma_##name *e) \
20 { \ 41 { \
21 BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ 42 BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
22 BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ 43 BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
23 BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ 44 BUILD_BUG_ON((_precision) > 30); \
24 BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ 45 BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
25 return e->internal >> ilog2(_factor); \ 46 return e->internal >> (_precision); \
26 } \ 47 } \
27 static inline void ewma_##name##_add(struct ewma_##name *e, \ 48 static inline void ewma_##name##_add(struct ewma_##name *e, \
28 unsigned long val) \ 49 unsigned long val) \
29 { \ 50 { \
30 unsigned long internal = ACCESS_ONCE(e->internal); \ 51 unsigned long internal = ACCESS_ONCE(e->internal); \
31 unsigned long weight = ilog2(_weight); \ 52 unsigned long weight_rcp = ilog2(_weight_rcp); \
32 unsigned long factor = ilog2(_factor); \ 53 unsigned long precision = _precision; \
33 \ 54 \
34 BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ 55 BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
35 BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ 56 BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
36 BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ 57 BUILD_BUG_ON((_precision) > 30); \
37 BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ 58 BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
38 \ 59 \
39 ACCESS_ONCE(e->internal) = internal ? \ 60 ACCESS_ONCE(e->internal) = internal ? \
40 (((internal << weight) - internal) + \ 61 (((internal << weight_rcp) - internal) + \
41 (val << factor)) >> weight : \ 62 (val << precision)) >> weight_rcp : \
42 (val << factor); \ 63 (val << precision); \
43 } 64 }
44 65
45#endif /* _LINUX_AVERAGE_H */ 66#endif /* _LINUX_AVERAGE_H */
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index e965e5090d96..a858bcb6220b 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -109,7 +109,7 @@ static inline void mlx4_u64_to_mac(u8 *addr, u64 mac)
109 int i; 109 int i;
110 110
111 for (i = ETH_ALEN; i > 0; i--) { 111 for (i = ETH_ALEN; i > 0; i--) {
112 addr[i - 1] = mac && 0xFF; 112 addr[i - 1] = mac & 0xFF;
113 mac >>= 8; 113 mac >>= 8;
114 } 114 }
115} 115}
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f40f0ab3847a..97456b2539e4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -330,6 +330,7 @@ struct napi_struct {
330 330
331enum { 331enum {
332 NAPI_STATE_SCHED, /* Poll is scheduled */ 332 NAPI_STATE_SCHED, /* Poll is scheduled */
333 NAPI_STATE_MISSED, /* reschedule a napi */
333 NAPI_STATE_DISABLE, /* Disable pending */ 334 NAPI_STATE_DISABLE, /* Disable pending */
334 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ 335 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
335 NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ 336 NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
@@ -338,12 +339,13 @@ enum {
338}; 339};
339 340
340enum { 341enum {
341 NAPIF_STATE_SCHED = (1UL << NAPI_STATE_SCHED), 342 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
342 NAPIF_STATE_DISABLE = (1UL << NAPI_STATE_DISABLE), 343 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
343 NAPIF_STATE_NPSVC = (1UL << NAPI_STATE_NPSVC), 344 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
344 NAPIF_STATE_HASHED = (1UL << NAPI_STATE_HASHED), 345 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
345 NAPIF_STATE_NO_BUSY_POLL = (1UL << NAPI_STATE_NO_BUSY_POLL), 346 NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED),
346 NAPIF_STATE_IN_BUSY_POLL = (1UL << NAPI_STATE_IN_BUSY_POLL), 347 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
348 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
347}; 349};
348 350
349enum gro_result { 351enum gro_result {
@@ -414,20 +416,7 @@ static inline bool napi_disable_pending(struct napi_struct *n)
414 return test_bit(NAPI_STATE_DISABLE, &n->state); 416 return test_bit(NAPI_STATE_DISABLE, &n->state);
415} 417}
416 418
417/** 419bool napi_schedule_prep(struct napi_struct *n);
418 * napi_schedule_prep - check if NAPI can be scheduled
419 * @n: NAPI context
420 *
421 * Test if NAPI routine is already running, and if not mark
422 * it as running. This is used as a condition variable to
423 * insure only one NAPI poll instance runs. We also make
424 * sure there is no pending NAPI disable.
425 */
426static inline bool napi_schedule_prep(struct napi_struct *n)
427{
428 return !napi_disable_pending(n) &&
429 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
430}
431 420
432/** 421/**
433 * napi_schedule - schedule NAPI poll 422 * napi_schedule - schedule NAPI poll
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index ac84686aaafb..2aa8a9d80fbe 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -988,9 +988,9 @@ struct nft_object *nf_tables_obj_lookup(const struct nft_table *table,
988 const struct nlattr *nla, u32 objtype, 988 const struct nlattr *nla, u32 objtype,
989 u8 genmask); 989 u8 genmask);
990 990
991int nft_obj_notify(struct net *net, struct nft_table *table, 991void nft_obj_notify(struct net *net, struct nft_table *table,
992 struct nft_object *obj, u32 portid, u32 seq, 992 struct nft_object *obj, u32 portid, u32 seq,
993 int event, int family, int report, gfp_t gfp); 993 int event, int family, int report, gfp_t gfp);
994 994
995/** 995/**
996 * struct nft_object_type - stateful object type 996 * struct nft_object_type - stateful object type
diff --git a/include/net/sock.h b/include/net/sock.h
index 9ccefa5c5487..5e5997654db6 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1526,6 +1526,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1526void sk_free(struct sock *sk); 1526void sk_free(struct sock *sk);
1527void sk_destruct(struct sock *sk); 1527void sk_destruct(struct sock *sk);
1528struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); 1528struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
1529void sk_free_unlock_clone(struct sock *sk);
1529 1530
1530struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1531struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1531 gfp_t priority); 1532 gfp_t priority);
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 593f586545eb..39123c06a566 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -119,6 +119,7 @@ enum rxrpc_recvmsg_trace {
119 rxrpc_recvmsg_full, 119 rxrpc_recvmsg_full,
120 rxrpc_recvmsg_hole, 120 rxrpc_recvmsg_hole,
121 rxrpc_recvmsg_next, 121 rxrpc_recvmsg_next,
122 rxrpc_recvmsg_requeue,
122 rxrpc_recvmsg_return, 123 rxrpc_recvmsg_return,
123 rxrpc_recvmsg_terminal, 124 rxrpc_recvmsg_terminal,
124 rxrpc_recvmsg_to_be_accepted, 125 rxrpc_recvmsg_to_be_accepted,
@@ -277,6 +278,7 @@ enum rxrpc_congest_change {
277 EM(rxrpc_recvmsg_full, "FULL") \ 278 EM(rxrpc_recvmsg_full, "FULL") \
278 EM(rxrpc_recvmsg_hole, "HOLE") \ 279 EM(rxrpc_recvmsg_hole, "HOLE") \
279 EM(rxrpc_recvmsg_next, "NEXT") \ 280 EM(rxrpc_recvmsg_next, "NEXT") \
281 EM(rxrpc_recvmsg_requeue, "REQU") \
280 EM(rxrpc_recvmsg_return, "RETN") \ 282 EM(rxrpc_recvmsg_return, "RETN") \
281 EM(rxrpc_recvmsg_terminal, "TERM") \ 283 EM(rxrpc_recvmsg_terminal, "TERM") \
282 EM(rxrpc_recvmsg_to_be_accepted, "TBAC") \ 284 EM(rxrpc_recvmsg_to_be_accepted, "TBAC") \
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 3fc6e39b223e..796b68d00119 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -33,7 +33,7 @@
33 * - out of bounds or malformed jumps 33 * - out of bounds or malformed jumps
34 * The second pass is all possible path descent from the 1st insn. 34 * The second pass is all possible path descent from the 1st insn.
35 * Since it's analyzing all pathes through the program, the length of the 35 * Since it's analyzing all pathes through the program, the length of the
36 * analysis is limited to 32k insn, which may be hit even if total number of 36 * analysis is limited to 64k insn, which may be hit even if total number of
37 * insn is less then 4K, but there are too many branches that change stack/regs. 37 * insn is less then 4K, but there are too many branches that change stack/regs.
38 * Number of 'branches to be analyzed' is limited to 1k 38 * Number of 'branches to be analyzed' is limited to 1k
39 * 39 *
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index ead18ca836de..11a23fd6e1a0 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -239,8 +239,10 @@ err_unlock:
239 spin_unlock_bh(&chain->lock); 239 spin_unlock_bh(&chain->lock);
240 240
241err: 241err:
242 if (!ret) 242 if (!ret) {
243 kfree(frag_entry_new); 243 kfree(frag_entry_new);
244 kfree_skb(skb);
245 }
244 246
245 return ret; 247 return ret;
246} 248}
@@ -313,7 +315,7 @@ free:
313 * 315 *
314 * There are three possible outcomes: 1) Packet is merged: Return true and 316 * There are three possible outcomes: 1) Packet is merged: Return true and
315 * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb 317 * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
316 * to NULL; 3) Error: Return false and leave skb as is. 318 * to NULL; 3) Error: Return false and free skb.
317 * 319 *
318 * Return: true when packet is merged or buffered, false when skb is not not 320 * Return: true when packet is merged or buffered, false when skb is not not
319 * used. 321 * used.
@@ -338,9 +340,9 @@ bool batadv_frag_skb_buffer(struct sk_buff **skb,
338 goto out_err; 340 goto out_err;
339 341
340out: 342out:
341 *skb = skb_out;
342 ret = true; 343 ret = true;
343out_err: 344out_err:
345 *skb = skb_out;
344 return ret; 346 return ret;
345} 347}
346 348
@@ -499,6 +501,12 @@ int batadv_frag_send_packet(struct sk_buff *skb,
499 501
500 /* Eat and send fragments from the tail of skb */ 502 /* Eat and send fragments from the tail of skb */
501 while (skb->len > max_fragment_size) { 503 while (skb->len > max_fragment_size) {
504 /* The initial check in this function should cover this case */
505 if (unlikely(frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1)) {
506 ret = -EINVAL;
507 goto put_primary_if;
508 }
509
502 skb_fragment = batadv_frag_create(skb, &frag_header, mtu); 510 skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
503 if (!skb_fragment) { 511 if (!skb_fragment) {
504 ret = -ENOMEM; 512 ret = -ENOMEM;
@@ -515,12 +523,6 @@ int batadv_frag_send_packet(struct sk_buff *skb,
515 } 523 }
516 524
517 frag_header.no++; 525 frag_header.no++;
518
519 /* The initial check in this function should cover this case */
520 if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
521 ret = -EINVAL;
522 goto put_primary_if;
523 }
524 } 526 }
525 527
526 /* Make room for the fragment header. */ 528 /* Make room for the fragment header. */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 8f64a5c01345..66b25e410a41 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -402,7 +402,7 @@ struct batadv_gw_node {
402 struct rcu_head rcu; 402 struct rcu_head rcu;
403}; 403};
404 404
405DECLARE_EWMA(throughput, 1024, 8) 405DECLARE_EWMA(throughput, 10, 8)
406 406
407/** 407/**
408 * struct batadv_hardif_neigh_node_bat_v - B.A.T.M.A.N. V private neighbor 408 * struct batadv_hardif_neigh_node_bat_v - B.A.T.M.A.N. V private neighbor
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 6bfac29318f2..902af6ba481c 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -186,8 +186,9 @@ void br_flood(struct net_bridge *br, struct sk_buff *skb,
186 /* Do not flood unicast traffic to ports that turn it off */ 186 /* Do not flood unicast traffic to ports that turn it off */
187 if (pkt_type == BR_PKT_UNICAST && !(p->flags & BR_FLOOD)) 187 if (pkt_type == BR_PKT_UNICAST && !(p->flags & BR_FLOOD))
188 continue; 188 continue;
189 /* Do not flood if mc off, except for traffic we originate */
189 if (pkt_type == BR_PKT_MULTICAST && 190 if (pkt_type == BR_PKT_MULTICAST &&
190 !(p->flags & BR_MCAST_FLOOD)) 191 !(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev)
191 continue; 192 continue;
192 193
193 /* Do not flood to ports that enable proxy ARP */ 194 /* Do not flood to ports that enable proxy ARP */
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 62e68c0dc687..b838213c408e 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -997,10 +997,10 @@ err_vlan_add:
997 RCU_INIT_POINTER(p->vlgrp, NULL); 997 RCU_INIT_POINTER(p->vlgrp, NULL);
998 synchronize_rcu(); 998 synchronize_rcu();
999 vlan_tunnel_deinit(vg); 999 vlan_tunnel_deinit(vg);
1000err_vlan_enabled:
1001err_tunnel_init: 1000err_tunnel_init:
1002 rhashtable_destroy(&vg->vlan_hash); 1001 rhashtable_destroy(&vg->vlan_hash);
1003err_rhtbl: 1002err_rhtbl:
1003err_vlan_enabled:
1004 kfree(vg); 1004 kfree(vg);
1005 1005
1006 goto out; 1006 goto out;
diff --git a/net/core/dev.c b/net/core/dev.c
index 304f2deae5f9..8637b2b71f3d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1698,27 +1698,54 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1698static struct static_key netstamp_needed __read_mostly; 1698static struct static_key netstamp_needed __read_mostly;
1699#ifdef HAVE_JUMP_LABEL 1699#ifdef HAVE_JUMP_LABEL
1700static atomic_t netstamp_needed_deferred; 1700static atomic_t netstamp_needed_deferred;
1701static atomic_t netstamp_wanted;
1701static void netstamp_clear(struct work_struct *work) 1702static void netstamp_clear(struct work_struct *work)
1702{ 1703{
1703 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 1704 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1705 int wanted;
1704 1706
1705 while (deferred--) 1707 wanted = atomic_add_return(deferred, &netstamp_wanted);
1706 static_key_slow_dec(&netstamp_needed); 1708 if (wanted > 0)
1709 static_key_enable(&netstamp_needed);
1710 else
1711 static_key_disable(&netstamp_needed);
1707} 1712}
1708static DECLARE_WORK(netstamp_work, netstamp_clear); 1713static DECLARE_WORK(netstamp_work, netstamp_clear);
1709#endif 1714#endif
1710 1715
1711void net_enable_timestamp(void) 1716void net_enable_timestamp(void)
1712{ 1717{
1718#ifdef HAVE_JUMP_LABEL
1719 int wanted;
1720
1721 while (1) {
1722 wanted = atomic_read(&netstamp_wanted);
1723 if (wanted <= 0)
1724 break;
1725 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
1726 return;
1727 }
1728 atomic_inc(&netstamp_needed_deferred);
1729 schedule_work(&netstamp_work);
1730#else
1713 static_key_slow_inc(&netstamp_needed); 1731 static_key_slow_inc(&netstamp_needed);
1732#endif
1714} 1733}
1715EXPORT_SYMBOL(net_enable_timestamp); 1734EXPORT_SYMBOL(net_enable_timestamp);
1716 1735
1717void net_disable_timestamp(void) 1736void net_disable_timestamp(void)
1718{ 1737{
1719#ifdef HAVE_JUMP_LABEL 1738#ifdef HAVE_JUMP_LABEL
1720 /* net_disable_timestamp() can be called from non process context */ 1739 int wanted;
1721 atomic_inc(&netstamp_needed_deferred); 1740
1741 while (1) {
1742 wanted = atomic_read(&netstamp_wanted);
1743 if (wanted <= 1)
1744 break;
1745 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
1746 return;
1747 }
1748 atomic_dec(&netstamp_needed_deferred);
1722 schedule_work(&netstamp_work); 1749 schedule_work(&netstamp_work);
1723#else 1750#else
1724 static_key_slow_dec(&netstamp_needed); 1751 static_key_slow_dec(&netstamp_needed);
@@ -4884,6 +4911,39 @@ void __napi_schedule(struct napi_struct *n)
4884EXPORT_SYMBOL(__napi_schedule); 4911EXPORT_SYMBOL(__napi_schedule);
4885 4912
4886/** 4913/**
4914 * napi_schedule_prep - check if napi can be scheduled
4915 * @n: napi context
4916 *
4917 * Test if NAPI routine is already running, and if not mark
4918 * it as running. This is used as a condition variable
4919 * insure only one NAPI poll instance runs. We also make
4920 * sure there is no pending NAPI disable.
4921 */
4922bool napi_schedule_prep(struct napi_struct *n)
4923{
4924 unsigned long val, new;
4925
4926 do {
4927 val = READ_ONCE(n->state);
4928 if (unlikely(val & NAPIF_STATE_DISABLE))
4929 return false;
4930 new = val | NAPIF_STATE_SCHED;
4931
4932 /* Sets STATE_MISSED bit if STATE_SCHED was already set
4933 * This was suggested by Alexander Duyck, as compiler
4934 * emits better code than :
4935 * if (val & NAPIF_STATE_SCHED)
4936 * new |= NAPIF_STATE_MISSED;
4937 */
4938 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
4939 NAPIF_STATE_MISSED;
4940 } while (cmpxchg(&n->state, val, new) != val);
4941
4942 return !(val & NAPIF_STATE_SCHED);
4943}
4944EXPORT_SYMBOL(napi_schedule_prep);
4945
4946/**
4887 * __napi_schedule_irqoff - schedule for receive 4947 * __napi_schedule_irqoff - schedule for receive
4888 * @n: entry to schedule 4948 * @n: entry to schedule
4889 * 4949 *
@@ -4897,7 +4957,7 @@ EXPORT_SYMBOL(__napi_schedule_irqoff);
4897 4957
4898bool napi_complete_done(struct napi_struct *n, int work_done) 4958bool napi_complete_done(struct napi_struct *n, int work_done)
4899{ 4959{
4900 unsigned long flags; 4960 unsigned long flags, val, new;
4901 4961
4902 /* 4962 /*
4903 * 1) Don't let napi dequeue from the cpu poll list 4963 * 1) Don't let napi dequeue from the cpu poll list
@@ -4927,7 +4987,27 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
4927 list_del_init(&n->poll_list); 4987 list_del_init(&n->poll_list);
4928 local_irq_restore(flags); 4988 local_irq_restore(flags);
4929 } 4989 }
4930 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state)); 4990
4991 do {
4992 val = READ_ONCE(n->state);
4993
4994 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
4995
4996 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
4997
4998 /* If STATE_MISSED was set, leave STATE_SCHED set,
4999 * because we will call napi->poll() one more time.
5000 * This C code was suggested by Alexander Duyck to help gcc.
5001 */
5002 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
5003 NAPIF_STATE_SCHED;
5004 } while (cmpxchg(&n->state, val, new) != val);
5005
5006 if (unlikely(val & NAPIF_STATE_MISSED)) {
5007 __napi_schedule(n);
5008 return false;
5009 }
5010
4931 return true; 5011 return true;
4932} 5012}
4933EXPORT_SYMBOL(napi_complete_done); 5013EXPORT_SYMBOL(napi_complete_done);
@@ -4953,6 +5033,16 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
4953{ 5033{
4954 int rc; 5034 int rc;
4955 5035
5036 /* Busy polling means there is a high chance device driver hard irq
5037 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
5038 * set in napi_schedule_prep().
5039 * Since we are about to call napi->poll() once more, we can safely
5040 * clear NAPI_STATE_MISSED.
5041 *
5042 * Note: x86 could use a single "lock and ..." instruction
5043 * to perform these two clear_bit()
5044 */
5045 clear_bit(NAPI_STATE_MISSED, &napi->state);
4956 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); 5046 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
4957 5047
4958 local_bh_disable(); 5048 local_bh_disable();
@@ -5088,8 +5178,13 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
5088 struct napi_struct *napi; 5178 struct napi_struct *napi;
5089 5179
5090 napi = container_of(timer, struct napi_struct, timer); 5180 napi = container_of(timer, struct napi_struct, timer);
5091 if (napi->gro_list) 5181
5092 napi_schedule_irqoff(napi); 5182 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
5183 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
5184 */
5185 if (napi->gro_list && !napi_disable_pending(napi) &&
5186 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
5187 __napi_schedule_irqoff(napi);
5093 5188
5094 return HRTIMER_NORESTART; 5189 return HRTIMER_NORESTART;
5095} 5190}
diff --git a/net/core/sock.c b/net/core/sock.c
index e7d74940e863..f6fd79f33097 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1539,11 +1539,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1539 is_charged = sk_filter_charge(newsk, filter); 1539 is_charged = sk_filter_charge(newsk, filter);
1540 1540
1541 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { 1541 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
1542 /* It is still raw copy of parent, so invalidate 1542 sk_free_unlock_clone(newsk);
1543 * destructor and make plain sk_free() */
1544 newsk->sk_destruct = NULL;
1545 bh_unlock_sock(newsk);
1546 sk_free(newsk);
1547 newsk = NULL; 1543 newsk = NULL;
1548 goto out; 1544 goto out;
1549 } 1545 }
@@ -1592,6 +1588,16 @@ out:
1592} 1588}
1593EXPORT_SYMBOL_GPL(sk_clone_lock); 1589EXPORT_SYMBOL_GPL(sk_clone_lock);
1594 1590
1591void sk_free_unlock_clone(struct sock *sk)
1592{
1593 /* It is still raw copy of parent, so invalidate
1594 * destructor and make plain sk_free() */
1595 sk->sk_destruct = NULL;
1596 bh_unlock_sock(sk);
1597 sk_free(sk);
1598}
1599EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
1600
1595void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1601void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1596{ 1602{
1597 u32 max_segs = 1; 1603 u32 max_segs = 1;
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 8fedc2d49770..4a05d7876850 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -577,6 +577,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
577 struct dccp_sock *dp = dccp_sk(sk); 577 struct dccp_sock *dp = dccp_sk(sk);
578 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 578 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
579 const int old_state = sk->sk_state; 579 const int old_state = sk->sk_state;
580 bool acceptable;
580 int queued = 0; 581 int queued = 0;
581 582
582 /* 583 /*
@@ -603,8 +604,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
603 */ 604 */
604 if (sk->sk_state == DCCP_LISTEN) { 605 if (sk->sk_state == DCCP_LISTEN) {
605 if (dh->dccph_type == DCCP_PKT_REQUEST) { 606 if (dh->dccph_type == DCCP_PKT_REQUEST) {
606 if (inet_csk(sk)->icsk_af_ops->conn_request(sk, 607 /* It is possible that we process SYN packets from backlog,
607 skb) < 0) 608 * so we need to make sure to disable BH right there.
609 */
610 local_bh_disable();
611 acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
612 local_bh_enable();
613 if (!acceptable)
608 return 1; 614 return 1;
609 consume_skb(skb); 615 consume_skb(skb);
610 return 0; 616 return 0;
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 53eddf99e4f6..e267e6f4c9a5 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -119,10 +119,7 @@ struct sock *dccp_create_openreq_child(const struct sock *sk,
119 * Activate features: initialise CCIDs, sequence windows etc. 119 * Activate features: initialise CCIDs, sequence windows etc.
120 */ 120 */
121 if (dccp_feat_activate_values(newsk, &dreq->dreq_featneg)) { 121 if (dccp_feat_activate_values(newsk, &dreq->dreq_featneg)) {
122 /* It is still raw copy of parent, so invalidate 122 sk_free_unlock_clone(newsk);
123 * destructor and make plain sk_free() */
124 newsk->sk_destruct = NULL;
125 sk_free(newsk);
126 return NULL; 123 return NULL;
127 } 124 }
128 dccp_init_xmit_timers(newsk); 125 dccp_init_xmit_timers(newsk);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index b39a791f6756..42bfd08109dd 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -622,6 +622,7 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
622 [RTA_ENCAP_TYPE] = { .type = NLA_U16 }, 622 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
623 [RTA_ENCAP] = { .type = NLA_NESTED }, 623 [RTA_ENCAP] = { .type = NLA_NESTED },
624 [RTA_UID] = { .type = NLA_U32 }, 624 [RTA_UID] = { .type = NLA_U32 },
625 [RTA_MARK] = { .type = NLA_U32 },
625}; 626};
626 627
627static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, 628static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index b3cc1335adbc..c0cc6aa8cfaa 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -23,7 +23,8 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
23 struct rtable *rt; 23 struct rtable *rt;
24 struct flowi4 fl4 = {}; 24 struct flowi4 fl4 = {};
25 __be32 saddr = iph->saddr; 25 __be32 saddr = iph->saddr;
26 __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; 26 const struct sock *sk = skb_to_full_sk(skb);
27 __u8 flags = sk ? inet_sk_flowi_flags(sk) : 0;
27 struct net_device *dev = skb_dst(skb)->dev; 28 struct net_device *dev = skb_dst(skb)->dev;
28 unsigned int hh_len; 29 unsigned int hh_len;
29 30
@@ -40,7 +41,7 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
40 fl4.daddr = iph->daddr; 41 fl4.daddr = iph->daddr;
41 fl4.saddr = saddr; 42 fl4.saddr = saddr;
42 fl4.flowi4_tos = RT_TOS(iph->tos); 43 fl4.flowi4_tos = RT_TOS(iph->tos);
43 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; 44 fl4.flowi4_oif = sk ? sk->sk_bound_dev_if : 0;
44 if (!fl4.flowi4_oif) 45 if (!fl4.flowi4_oif)
45 fl4.flowi4_oif = l3mdev_master_ifindex(dev); 46 fl4.flowi4_oif = l3mdev_master_ifindex(dev);
46 fl4.flowi4_mark = skb->mark; 47 fl4.flowi4_mark = skb->mark;
@@ -61,7 +62,7 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
61 xfrm_decode_session(skb, flowi4_to_flowi(&fl4), AF_INET) == 0) { 62 xfrm_decode_session(skb, flowi4_to_flowi(&fl4), AF_INET) == 0) {
62 struct dst_entry *dst = skb_dst(skb); 63 struct dst_entry *dst = skb_dst(skb);
63 skb_dst_set(skb, NULL); 64 skb_dst_set(skb, NULL);
64 dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), skb->sk, 0); 65 dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), sk, 0);
65 if (IS_ERR(dst)) 66 if (IS_ERR(dst))
66 return PTR_ERR(dst); 67 return PTR_ERR(dst);
67 skb_dst_set(skb, dst); 68 skb_dst_set(skb, dst);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index da385ae997a3..cf4555581282 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1110,9 +1110,14 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1110 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; 1110 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1111 err = __inet_stream_connect(sk->sk_socket, msg->msg_name, 1111 err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
1112 msg->msg_namelen, flags, 1); 1112 msg->msg_namelen, flags, 1);
1113 inet->defer_connect = 0; 1113 /* fastopen_req could already be freed in __inet_stream_connect
1114 *copied = tp->fastopen_req->copied; 1114 * if the connection times out or gets rst
1115 tcp_free_fastopen_req(tp); 1115 */
1116 if (tp->fastopen_req) {
1117 *copied = tp->fastopen_req->copied;
1118 tcp_free_fastopen_req(tp);
1119 inet->defer_connect = 0;
1120 }
1116 return err; 1121 return err;
1117} 1122}
1118 1123
@@ -2318,6 +2323,10 @@ int tcp_disconnect(struct sock *sk, int flags)
2318 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 2323 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2319 __sk_dst_reset(sk); 2324 __sk_dst_reset(sk);
2320 2325
2326 /* Clean up fastopen related fields */
2327 tcp_free_fastopen_req(tp);
2328 inet->defer_connect = 0;
2329
2321 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); 2330 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
2322 2331
2323 sk->sk_error_report(sk); 2332 sk->sk_error_report(sk);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2c0ff327b6df..39c393cc0fd3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5886,9 +5886,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
5886 if (th->syn) { 5886 if (th->syn) {
5887 if (th->fin) 5887 if (th->fin)
5888 goto discard; 5888 goto discard;
5889 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) 5889 /* It is possible that we process SYN packets from backlog,
5890 return 1; 5890 * so we need to make sure to disable BH right there.
5891 */
5892 local_bh_disable();
5893 acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
5894 local_bh_enable();
5891 5895
5896 if (!acceptable)
5897 return 1;
5892 consume_skb(skb); 5898 consume_skb(skb);
5893 return 0; 5899 return 0;
5894 } 5900 }
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 77362b88a661..363172527e43 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5693,13 +5693,18 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
5693 struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1; 5693 struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
5694 struct net *net = (struct net *)ctl->extra2; 5694 struct net *net = (struct net *)ctl->extra2;
5695 5695
5696 if (!rtnl_trylock())
5697 return restart_syscall();
5698
5696 ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 5699 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
5697 5700
5698 if (write) { 5701 if (write) {
5699 new_val = *((int *)ctl->data); 5702 new_val = *((int *)ctl->data);
5700 5703
5701 if (check_addr_gen_mode(new_val) < 0) 5704 if (check_addr_gen_mode(new_val) < 0) {
5702 return -EINVAL; 5705 ret = -EINVAL;
5706 goto out;
5707 }
5703 5708
5704 /* request for default */ 5709 /* request for default */
5705 if (&net->ipv6.devconf_dflt->addr_gen_mode == ctl->data) { 5710 if (&net->ipv6.devconf_dflt->addr_gen_mode == ctl->data) {
@@ -5708,20 +5713,23 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
5708 /* request for individual net device */ 5713 /* request for individual net device */
5709 } else { 5714 } else {
5710 if (!idev) 5715 if (!idev)
5711 return ret; 5716 goto out;
5712 5717
5713 if (check_stable_privacy(idev, net, new_val) < 0) 5718 if (check_stable_privacy(idev, net, new_val) < 0) {
5714 return -EINVAL; 5719 ret = -EINVAL;
5720 goto out;
5721 }
5715 5722
5716 if (idev->cnf.addr_gen_mode != new_val) { 5723 if (idev->cnf.addr_gen_mode != new_val) {
5717 idev->cnf.addr_gen_mode = new_val; 5724 idev->cnf.addr_gen_mode = new_val;
5718 rtnl_lock();
5719 addrconf_dev_config(idev->dev); 5725 addrconf_dev_config(idev->dev);
5720 rtnl_unlock();
5721 } 5726 }
5722 } 5727 }
5723 } 5728 }
5724 5729
5730out:
5731 rtnl_unlock();
5732
5725 return ret; 5733 return ret;
5726} 5734}
5727 5735
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 9948b5ce52da..986d4ca38832 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -589,6 +589,7 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
589 hdr = ipv6_hdr(skb); 589 hdr = ipv6_hdr(skb);
590 fhdr = (struct frag_hdr *)skb_transport_header(skb); 590 fhdr = (struct frag_hdr *)skb_transport_header(skb);
591 591
592 skb_orphan(skb);
592 fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, 593 fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
593 skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); 594 skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
594 if (fq == NULL) { 595 if (fq == NULL) {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index f54f4265b37f..229bfcc451ef 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2169,10 +2169,13 @@ int ip6_del_rt(struct rt6_info *rt)
2169static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg) 2169static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg)
2170{ 2170{
2171 struct nl_info *info = &cfg->fc_nlinfo; 2171 struct nl_info *info = &cfg->fc_nlinfo;
2172 struct net *net = info->nl_net;
2172 struct sk_buff *skb = NULL; 2173 struct sk_buff *skb = NULL;
2173 struct fib6_table *table; 2174 struct fib6_table *table;
2174 int err; 2175 int err = -ENOENT;
2175 2176
2177 if (rt == net->ipv6.ip6_null_entry)
2178 goto out_put;
2176 table = rt->rt6i_table; 2179 table = rt->rt6i_table;
2177 write_lock_bh(&table->tb6_lock); 2180 write_lock_bh(&table->tb6_lock);
2178 2181
@@ -2184,7 +2187,7 @@ static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg)
2184 if (skb) { 2187 if (skb) {
2185 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 2188 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
2186 2189
2187 if (rt6_fill_node(info->nl_net, skb, rt, 2190 if (rt6_fill_node(net, skb, rt,
2188 NULL, NULL, 0, RTM_DELROUTE, 2191 NULL, NULL, 0, RTM_DELROUTE,
2189 info->portid, seq, 0) < 0) { 2192 info->portid, seq, 0) < 0) {
2190 kfree_skb(skb); 2193 kfree_skb(skb);
@@ -2198,17 +2201,18 @@ static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg)
2198 rt6i_siblings) { 2201 rt6i_siblings) {
2199 err = fib6_del(sibling, info); 2202 err = fib6_del(sibling, info);
2200 if (err) 2203 if (err)
2201 goto out; 2204 goto out_unlock;
2202 } 2205 }
2203 } 2206 }
2204 2207
2205 err = fib6_del(rt, info); 2208 err = fib6_del(rt, info);
2206out: 2209out_unlock:
2207 write_unlock_bh(&table->tb6_lock); 2210 write_unlock_bh(&table->tb6_lock);
2211out_put:
2208 ip6_rt_put(rt); 2212 ip6_rt_put(rt);
2209 2213
2210 if (skb) { 2214 if (skb) {
2211 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV6_ROUTE, 2215 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
2212 info->nlh, gfp_any()); 2216 info->nlh, gfp_any());
2213 } 2217 }
2214 return err; 2218 return err;
@@ -2891,6 +2895,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2891 [RTA_ENCAP] = { .type = NLA_NESTED }, 2895 [RTA_ENCAP] = { .type = NLA_NESTED },
2892 [RTA_EXPIRES] = { .type = NLA_U32 }, 2896 [RTA_EXPIRES] = { .type = NLA_U32 },
2893 [RTA_UID] = { .type = NLA_U32 }, 2897 [RTA_UID] = { .type = NLA_U32 },
2898 [RTA_MARK] = { .type = NLA_U32 },
2894}; 2899};
2895 2900
2896static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, 2901static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -3627,6 +3632,12 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
3627 rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6); 3632 rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
3628 } 3633 }
3629 3634
3635 if (rt == net->ipv6.ip6_null_entry) {
3636 err = rt->dst.error;
3637 ip6_rt_put(rt);
3638 goto errout;
3639 }
3640
3630 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 3641 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3631 if (!skb) { 3642 if (!skb) {
3632 ip6_rt_put(rt); 3643 ip6_rt_put(rt);
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 3b5fd4188f2a..4456559cb056 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -85,7 +85,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
85 ht_dbg(sta->sdata, 85 ht_dbg(sta->sdata,
86 "Rx BA session stop requested for %pM tid %u %s reason: %d\n", 86 "Rx BA session stop requested for %pM tid %u %s reason: %d\n",
87 sta->sta.addr, tid, 87 sta->sta.addr, tid,
88 initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator", 88 initiator == WLAN_BACK_RECIPIENT ? "recipient" : "initiator",
89 (int)reason); 89 (int)reason);
90 90
91 if (drv_ampdu_action(local, sta->sdata, &params)) 91 if (drv_ampdu_action(local, sta->sdata, &params))
@@ -398,6 +398,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
398 tid_agg_rx->timeout = timeout; 398 tid_agg_rx->timeout = timeout;
399 tid_agg_rx->stored_mpdu_num = 0; 399 tid_agg_rx->stored_mpdu_num = 0;
400 tid_agg_rx->auto_seq = auto_seq; 400 tid_agg_rx->auto_seq = auto_seq;
401 tid_agg_rx->started = false;
401 tid_agg_rx->reorder_buf_filtered = 0; 402 tid_agg_rx->reorder_buf_filtered = 0;
402 status = WLAN_STATUS_SUCCESS; 403 status = WLAN_STATUS_SUCCESS;
403 404
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 159a1a733725..0e718437d080 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -428,7 +428,7 @@ struct ieee80211_sta_tx_tspec {
428 bool downgraded; 428 bool downgraded;
429}; 429};
430 430
431DECLARE_EWMA(beacon_signal, 16, 4) 431DECLARE_EWMA(beacon_signal, 4, 4)
432 432
433struct ieee80211_if_managed { 433struct ieee80211_if_managed {
434 struct timer_list timer; 434 struct timer_list timer;
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 28a3a0957c9e..76a8bcd8ef11 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -168,6 +168,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
168 break; 168 break;
169 } 169 }
170 170
171 flush_delayed_work(&sdata->dec_tailroom_needed_wk);
171 drv_remove_interface(local, sdata); 172 drv_remove_interface(local, sdata);
172 } 173 }
173 174
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 50ca3828b124..e48724a6725e 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -4,7 +4,7 @@
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
6 * Copyright 2013-2014 Intel Mobile Communications GmbH 6 * Copyright 2013-2014 Intel Mobile Communications GmbH
7 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH 7 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
@@ -1034,6 +1034,18 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
1034 buf_size = tid_agg_rx->buf_size; 1034 buf_size = tid_agg_rx->buf_size;
1035 head_seq_num = tid_agg_rx->head_seq_num; 1035 head_seq_num = tid_agg_rx->head_seq_num;
1036 1036
1037 /*
1038 * If the current MPDU's SN is smaller than the SSN, it shouldn't
1039 * be reordered.
1040 */
1041 if (unlikely(!tid_agg_rx->started)) {
1042 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1043 ret = false;
1044 goto out;
1045 }
1046 tid_agg_rx->started = true;
1047 }
1048
1037 /* frame with out of date sequence number */ 1049 /* frame with out of date sequence number */
1038 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 1050 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1039 dev_kfree_skb(skb); 1051 dev_kfree_skb(skb);
@@ -3880,6 +3892,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
3880 stats->last_rate = sta_stats_encode_rate(status); 3892 stats->last_rate = sta_stats_encode_rate(status);
3881 3893
3882 stats->fragments++; 3894 stats->fragments++;
3895 stats->packets++;
3883 3896
3884 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 3897 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
3885 stats->last_signal = status->signal; 3898 stats->last_signal = status->signal;
@@ -4073,15 +4086,17 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
4073 ieee80211_is_beacon(hdr->frame_control))) 4086 ieee80211_is_beacon(hdr->frame_control)))
4074 ieee80211_scan_rx(local, skb); 4087 ieee80211_scan_rx(local, skb);
4075 4088
4076 if (pubsta) { 4089 if (ieee80211_is_data(fc)) {
4077 rx.sta = container_of(pubsta, struct sta_info, sta);
4078 rx.sdata = rx.sta->sdata;
4079 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4080 return;
4081 goto out;
4082 } else if (ieee80211_is_data(fc)) {
4083 struct sta_info *sta, *prev_sta; 4090 struct sta_info *sta, *prev_sta;
4084 4091
4092 if (pubsta) {
4093 rx.sta = container_of(pubsta, struct sta_info, sta);
4094 rx.sdata = rx.sta->sdata;
4095 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4096 return;
4097 goto out;
4098 }
4099
4085 prev_sta = NULL; 4100 prev_sta = NULL;
4086 4101
4087 for_each_sta_info(local, hdr->addr2, sta, tmp) { 4102 for_each_sta_info(local, hdr->addr2, sta, tmp) {
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 4774e663a411..3323a2fb289b 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -688,7 +688,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
688 } 688 }
689 689
690 /* No need to do anything if the driver does all */ 690 /* No need to do anything if the driver does all */
691 if (ieee80211_hw_check(&local->hw, AP_LINK_PS)) 691 if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim)
692 return; 692 return;
693 693
694 if (sta->dead) 694 if (sta->dead)
@@ -1264,7 +1264,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
1264 sta_info_recalc_tim(sta); 1264 sta_info_recalc_tim(sta);
1265 1265
1266 ps_dbg(sdata, 1266 ps_dbg(sdata,
1267 "STA %pM aid %d sending %d filtered/%d PS frames since STA not sleeping anymore\n", 1267 "STA %pM aid %d sending %d filtered/%d PS frames since STA woke up\n",
1268 sta->sta.addr, sta->sta.aid, filtered, buffered); 1268 sta->sta.addr, sta->sta.aid, filtered, buffered);
1269 1269
1270 ieee80211_check_fast_xmit(sta); 1270 ieee80211_check_fast_xmit(sta);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index dd06ef0b8861..e65cda34d2bc 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -189,6 +189,7 @@ struct tid_ampdu_tx {
189 * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and 189 * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and
190 * and ssn. 190 * and ssn.
191 * @removed: this session is removed (but might have been found due to RCU) 191 * @removed: this session is removed (but might have been found due to RCU)
192 * @started: this session has started (head ssn or higher was received)
192 * 193 *
193 * This structure's lifetime is managed by RCU, assignments to 194 * This structure's lifetime is managed by RCU, assignments to
194 * the array holding it must hold the aggregation mutex. 195 * the array holding it must hold the aggregation mutex.
@@ -212,8 +213,9 @@ struct tid_ampdu_rx {
212 u16 ssn; 213 u16 ssn;
213 u16 buf_size; 214 u16 buf_size;
214 u16 timeout; 215 u16 timeout;
215 bool auto_seq; 216 u8 auto_seq:1,
216 bool removed; 217 removed:1,
218 started:1;
217}; 219};
218 220
219/** 221/**
@@ -370,7 +372,7 @@ struct mesh_sta {
370 unsigned int fail_avg; 372 unsigned int fail_avg;
371}; 373};
372 374
373DECLARE_EWMA(signal, 1024, 8) 375DECLARE_EWMA(signal, 10, 8)
374 376
375struct ieee80211_sta_rx_stats { 377struct ieee80211_sta_rx_stats {
376 unsigned long packets; 378 unsigned long packets;
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 0dd7c351002d..83b8b11f24ea 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -51,7 +51,8 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
51 struct ieee80211_hdr *hdr = (void *)skb->data; 51 struct ieee80211_hdr *hdr = (void *)skb->data;
52 int ac; 52 int ac;
53 53
54 if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) { 54 if (info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER |
55 IEEE80211_TX_CTL_AMPDU)) {
55 ieee80211_free_txskb(&local->hw, skb); 56 ieee80211_free_txskb(&local->hw, skb);
56 return; 57 return;
57 } 58 }
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 24174c520239..0d17894798b5 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1628,8 +1628,6 @@ static int __init nf_conntrack_sip_init(void)
1628 ports[ports_c++] = SIP_PORT; 1628 ports[ports_c++] = SIP_PORT;
1629 1629
1630 for (i = 0; i < ports_c; i++) { 1630 for (i = 0; i < ports_c; i++) {
1631 memset(&sip[i], 0, sizeof(sip[i]));
1632
1633 nf_ct_helper_init(&sip[4 * i], AF_INET, IPPROTO_UDP, "sip", 1631 nf_ct_helper_init(&sip[4 * i], AF_INET, IPPROTO_UDP, "sip",
1634 SIP_PORT, ports[i], i, sip_exp_policy, 1632 SIP_PORT, ports[i], i, sip_exp_policy,
1635 SIP_EXPECT_MAX, 1633 SIP_EXPECT_MAX,
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index ff7304ae58ac..5e0ccfd5bb37 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -461,16 +461,15 @@ nla_put_failure:
461 return -1; 461 return -1;
462} 462}
463 463
464static int nf_tables_table_notify(const struct nft_ctx *ctx, int event) 464static void nf_tables_table_notify(const struct nft_ctx *ctx, int event)
465{ 465{
466 struct sk_buff *skb; 466 struct sk_buff *skb;
467 int err; 467 int err;
468 468
469 if (!ctx->report && 469 if (!ctx->report &&
470 !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) 470 !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
471 return 0; 471 return;
472 472
473 err = -ENOBUFS;
474 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 473 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
475 if (skb == NULL) 474 if (skb == NULL)
476 goto err; 475 goto err;
@@ -482,14 +481,11 @@ static int nf_tables_table_notify(const struct nft_ctx *ctx, int event)
482 goto err; 481 goto err;
483 } 482 }
484 483
485 err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, 484 nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
486 ctx->report, GFP_KERNEL); 485 ctx->report, GFP_KERNEL);
486 return;
487err: 487err:
488 if (err < 0) { 488 nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
489 nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES,
490 err);
491 }
492 return err;
493} 489}
494 490
495static int nf_tables_dump_tables(struct sk_buff *skb, 491static int nf_tables_dump_tables(struct sk_buff *skb,
@@ -1050,16 +1046,15 @@ nla_put_failure:
1050 return -1; 1046 return -1;
1051} 1047}
1052 1048
1053static int nf_tables_chain_notify(const struct nft_ctx *ctx, int event) 1049static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event)
1054{ 1050{
1055 struct sk_buff *skb; 1051 struct sk_buff *skb;
1056 int err; 1052 int err;
1057 1053
1058 if (!ctx->report && 1054 if (!ctx->report &&
1059 !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) 1055 !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
1060 return 0; 1056 return;
1061 1057
1062 err = -ENOBUFS;
1063 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1058 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1064 if (skb == NULL) 1059 if (skb == NULL)
1065 goto err; 1060 goto err;
@@ -1072,14 +1067,11 @@ static int nf_tables_chain_notify(const struct nft_ctx *ctx, int event)
1072 goto err; 1067 goto err;
1073 } 1068 }
1074 1069
1075 err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, 1070 nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
1076 ctx->report, GFP_KERNEL); 1071 ctx->report, GFP_KERNEL);
1072 return;
1077err: 1073err:
1078 if (err < 0) { 1074 nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
1079 nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES,
1080 err);
1081 }
1082 return err;
1083} 1075}
1084 1076
1085static int nf_tables_dump_chains(struct sk_buff *skb, 1077static int nf_tables_dump_chains(struct sk_buff *skb,
@@ -1934,18 +1926,16 @@ nla_put_failure:
1934 return -1; 1926 return -1;
1935} 1927}
1936 1928
1937static int nf_tables_rule_notify(const struct nft_ctx *ctx, 1929static void nf_tables_rule_notify(const struct nft_ctx *ctx,
1938 const struct nft_rule *rule, 1930 const struct nft_rule *rule, int event)
1939 int event)
1940{ 1931{
1941 struct sk_buff *skb; 1932 struct sk_buff *skb;
1942 int err; 1933 int err;
1943 1934
1944 if (!ctx->report && 1935 if (!ctx->report &&
1945 !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) 1936 !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
1946 return 0; 1937 return;
1947 1938
1948 err = -ENOBUFS;
1949 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1939 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1950 if (skb == NULL) 1940 if (skb == NULL)
1951 goto err; 1941 goto err;
@@ -1958,14 +1948,11 @@ static int nf_tables_rule_notify(const struct nft_ctx *ctx,
1958 goto err; 1948 goto err;
1959 } 1949 }
1960 1950
1961 err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, 1951 nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
1962 ctx->report, GFP_KERNEL); 1952 ctx->report, GFP_KERNEL);
1953 return;
1963err: 1954err:
1964 if (err < 0) { 1955 nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
1965 nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES,
1966 err);
1967 }
1968 return err;
1969} 1956}
1970 1957
1971struct nft_rule_dump_ctx { 1958struct nft_rule_dump_ctx {
@@ -2696,9 +2683,9 @@ nla_put_failure:
2696 return -1; 2683 return -1;
2697} 2684}
2698 2685
2699static int nf_tables_set_notify(const struct nft_ctx *ctx, 2686static void nf_tables_set_notify(const struct nft_ctx *ctx,
2700 const struct nft_set *set, 2687 const struct nft_set *set, int event,
2701 int event, gfp_t gfp_flags) 2688 gfp_t gfp_flags)
2702{ 2689{
2703 struct sk_buff *skb; 2690 struct sk_buff *skb;
2704 u32 portid = ctx->portid; 2691 u32 portid = ctx->portid;
@@ -2706,9 +2693,8 @@ static int nf_tables_set_notify(const struct nft_ctx *ctx,
2706 2693
2707 if (!ctx->report && 2694 if (!ctx->report &&
2708 !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) 2695 !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
2709 return 0; 2696 return;
2710 2697
2711 err = -ENOBUFS;
2712 skb = nlmsg_new(NLMSG_GOODSIZE, gfp_flags); 2698 skb = nlmsg_new(NLMSG_GOODSIZE, gfp_flags);
2713 if (skb == NULL) 2699 if (skb == NULL)
2714 goto err; 2700 goto err;
@@ -2719,12 +2705,11 @@ static int nf_tables_set_notify(const struct nft_ctx *ctx,
2719 goto err; 2705 goto err;
2720 } 2706 }
2721 2707
2722 err = nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES, 2708 nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES, ctx->report,
2723 ctx->report, gfp_flags); 2709 gfp_flags);
2710 return;
2724err: 2711err:
2725 if (err < 0) 2712 nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, -ENOBUFS);
2726 nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, err);
2727 return err;
2728} 2713}
2729 2714
2730static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb) 2715static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
@@ -3504,10 +3489,10 @@ nla_put_failure:
3504 return -1; 3489 return -1;
3505} 3490}
3506 3491
3507static int nf_tables_setelem_notify(const struct nft_ctx *ctx, 3492static void nf_tables_setelem_notify(const struct nft_ctx *ctx,
3508 const struct nft_set *set, 3493 const struct nft_set *set,
3509 const struct nft_set_elem *elem, 3494 const struct nft_set_elem *elem,
3510 int event, u16 flags) 3495 int event, u16 flags)
3511{ 3496{
3512 struct net *net = ctx->net; 3497 struct net *net = ctx->net;
3513 u32 portid = ctx->portid; 3498 u32 portid = ctx->portid;
@@ -3515,9 +3500,8 @@ static int nf_tables_setelem_notify(const struct nft_ctx *ctx,
3515 int err; 3500 int err;
3516 3501
3517 if (!ctx->report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) 3502 if (!ctx->report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
3518 return 0; 3503 return;
3519 3504
3520 err = -ENOBUFS;
3521 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 3505 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
3522 if (skb == NULL) 3506 if (skb == NULL)
3523 goto err; 3507 goto err;
@@ -3529,12 +3513,11 @@ static int nf_tables_setelem_notify(const struct nft_ctx *ctx,
3529 goto err; 3513 goto err;
3530 } 3514 }
3531 3515
3532 err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, ctx->report, 3516 nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, ctx->report,
3533 GFP_KERNEL); 3517 GFP_KERNEL);
3518 return;
3534err: 3519err:
3535 if (err < 0) 3520 nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, -ENOBUFS);
3536 nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
3537 return err;
3538} 3521}
3539 3522
3540static struct nft_trans *nft_trans_elem_alloc(struct nft_ctx *ctx, 3523static struct nft_trans *nft_trans_elem_alloc(struct nft_ctx *ctx,
@@ -4476,18 +4459,17 @@ static int nf_tables_delobj(struct net *net, struct sock *nlsk,
4476 return nft_delobj(&ctx, obj); 4459 return nft_delobj(&ctx, obj);
4477} 4460}
4478 4461
4479int nft_obj_notify(struct net *net, struct nft_table *table, 4462void nft_obj_notify(struct net *net, struct nft_table *table,
4480 struct nft_object *obj, u32 portid, u32 seq, int event, 4463 struct nft_object *obj, u32 portid, u32 seq, int event,
4481 int family, int report, gfp_t gfp) 4464 int family, int report, gfp_t gfp)
4482{ 4465{
4483 struct sk_buff *skb; 4466 struct sk_buff *skb;
4484 int err; 4467 int err;
4485 4468
4486 if (!report && 4469 if (!report &&
4487 !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) 4470 !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
4488 return 0; 4471 return;
4489 4472
4490 err = -ENOBUFS;
4491 skb = nlmsg_new(NLMSG_GOODSIZE, gfp); 4473 skb = nlmsg_new(NLMSG_GOODSIZE, gfp);
4492 if (skb == NULL) 4474 if (skb == NULL)
4493 goto err; 4475 goto err;
@@ -4499,21 +4481,18 @@ int nft_obj_notify(struct net *net, struct nft_table *table,
4499 goto err; 4481 goto err;
4500 } 4482 }
4501 4483
4502 err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report, gfp); 4484 nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report, gfp);
4485 return;
4503err: 4486err:
4504 if (err < 0) { 4487 nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, -ENOBUFS);
4505 nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
4506 }
4507 return err;
4508} 4488}
4509EXPORT_SYMBOL_GPL(nft_obj_notify); 4489EXPORT_SYMBOL_GPL(nft_obj_notify);
4510 4490
4511static int nf_tables_obj_notify(const struct nft_ctx *ctx, 4491static void nf_tables_obj_notify(const struct nft_ctx *ctx,
4512 struct nft_object *obj, int event) 4492 struct nft_object *obj, int event)
4513{ 4493{
4514 return nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, 4494 nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, ctx->seq, event,
4515 ctx->seq, event, ctx->afi->family, ctx->report, 4495 ctx->afi->family, ctx->report, GFP_KERNEL);
4516 GFP_KERNEL);
4517} 4496}
4518 4497
4519static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net, 4498static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
@@ -4543,7 +4522,8 @@ nla_put_failure:
4543 return -EMSGSIZE; 4522 return -EMSGSIZE;
4544} 4523}
4545 4524
4546static int nf_tables_gen_notify(struct net *net, struct sk_buff *skb, int event) 4525static void nf_tables_gen_notify(struct net *net, struct sk_buff *skb,
4526 int event)
4547{ 4527{
4548 struct nlmsghdr *nlh = nlmsg_hdr(skb); 4528 struct nlmsghdr *nlh = nlmsg_hdr(skb);
4549 struct sk_buff *skb2; 4529 struct sk_buff *skb2;
@@ -4551,9 +4531,8 @@ static int nf_tables_gen_notify(struct net *net, struct sk_buff *skb, int event)
4551 4531
4552 if (nlmsg_report(nlh) && 4532 if (nlmsg_report(nlh) &&
4553 !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) 4533 !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
4554 return 0; 4534 return;
4555 4535
4556 err = -ENOBUFS;
4557 skb2 = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 4536 skb2 = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
4558 if (skb2 == NULL) 4537 if (skb2 == NULL)
4559 goto err; 4538 goto err;
@@ -4565,14 +4544,12 @@ static int nf_tables_gen_notify(struct net *net, struct sk_buff *skb, int event)
4565 goto err; 4544 goto err;
4566 } 4545 }
4567 4546
4568 err = nfnetlink_send(skb2, net, NETLINK_CB(skb).portid, 4547 nfnetlink_send(skb2, net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES,
4569 NFNLGRP_NFTABLES, nlmsg_report(nlh), GFP_KERNEL); 4548 nlmsg_report(nlh), GFP_KERNEL);
4549 return;
4570err: 4550err:
4571 if (err < 0) { 4551 nfnetlink_set_err(net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES,
4572 nfnetlink_set_err(net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES, 4552 -ENOBUFS);
4573 err);
4574 }
4575 return err;
4576} 4553}
4577 4554
4578static int nf_tables_getgen(struct net *net, struct sock *nlsk, 4555static int nf_tables_getgen(struct net *net, struct sock *nlsk,
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 71e8fb886a73..78dfbf9588b3 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -60,11 +60,10 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
60 d = memcmp(this, key, set->klen); 60 d = memcmp(this, key, set->klen);
61 if (d < 0) { 61 if (d < 0) {
62 parent = parent->rb_left; 62 parent = parent->rb_left;
63 /* In case of adjacent ranges, we always see the high 63 if (interval &&
64 * part of the range in first place, before the low one. 64 nft_rbtree_equal(set, this, interval) &&
65 * So don't update interval if the keys are equal. 65 nft_rbtree_interval_end(this) &&
66 */ 66 !nft_rbtree_interval_end(interval))
67 if (interval && nft_rbtree_equal(set, this, interval))
68 continue; 67 continue;
69 interval = rbe; 68 interval = rbe;
70 } else if (d > 0) 69 } else if (d > 0)
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index b1beb2b94ec7..c82301ce3fff 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -796,9 +796,8 @@ static void ovs_fragment(struct net *net, struct vport *vport,
796 unsigned long orig_dst; 796 unsigned long orig_dst;
797 struct rt6_info ovs_rt; 797 struct rt6_info ovs_rt;
798 798
799 if (!v6ops) { 799 if (!v6ops)
800 goto err; 800 goto err;
801 }
802 801
803 prepare_frag(vport, skb, orig_network_offset, 802 prepare_frag(vport, skb, orig_network_offset,
804 ovs_key_mac_proto(key)); 803 ovs_key_mac_proto(key));
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 85cd59526670..e0a87776a010 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -485,7 +485,6 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
485 } else if (key->eth.type == htons(ETH_P_IPV6)) { 485 } else if (key->eth.type == htons(ETH_P_IPV6)) {
486 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; 486 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
487 487
488 skb_orphan(skb);
489 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); 488 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
490 err = nf_ct_frag6_gather(net, skb, user); 489 err = nf_ct_frag6_gather(net, skb, user);
491 if (err) { 490 if (err) {
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 2bd0d1949312..a0dbe7ca8f72 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3103,7 +3103,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3103 int addr_len) 3103 int addr_len)
3104{ 3104{
3105 struct sock *sk = sock->sk; 3105 struct sock *sk = sock->sk;
3106 char name[15]; 3106 char name[sizeof(uaddr->sa_data) + 1];
3107 3107
3108 /* 3108 /*
3109 * Check legality 3109 * Check legality
@@ -3111,7 +3111,11 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3111 3111
3112 if (addr_len != sizeof(struct sockaddr)) 3112 if (addr_len != sizeof(struct sockaddr))
3113 return -EINVAL; 3113 return -EINVAL;
3114 strlcpy(name, uaddr->sa_data, sizeof(name)); 3114 /* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3115 * zero-terminated.
3116 */
3117 memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3118 name[sizeof(uaddr->sa_data)] = 0;
3115 3119
3116 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); 3120 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3117} 3121}
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 91fe46f1e4cc..7a64c8db81ab 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -45,8 +45,8 @@
45#include "ib.h" 45#include "ib.h"
46#include "ib_mr.h" 46#include "ib_mr.h"
47 47
48unsigned int rds_ib_mr_1m_pool_size = RDS_MR_1M_POOL_SIZE; 48static unsigned int rds_ib_mr_1m_pool_size = RDS_MR_1M_POOL_SIZE;
49unsigned int rds_ib_mr_8k_pool_size = RDS_MR_8K_POOL_SIZE; 49static unsigned int rds_ib_mr_8k_pool_size = RDS_MR_8K_POOL_SIZE;
50unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT; 50unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT;
51 51
52module_param(rds_ib_mr_1m_pool_size, int, 0444); 52module_param(rds_ib_mr_1m_pool_size, int, 0444);
@@ -438,16 +438,12 @@ int rds_ib_init(void)
438 if (ret) 438 if (ret)
439 goto out_sysctl; 439 goto out_sysctl;
440 440
441 ret = rds_trans_register(&rds_ib_transport); 441 rds_trans_register(&rds_ib_transport);
442 if (ret)
443 goto out_recv;
444 442
445 rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); 443 rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
446 444
447 goto out; 445 goto out;
448 446
449out_recv:
450 rds_ib_recv_exit();
451out_sysctl: 447out_sysctl:
452 rds_ib_sysctl_exit(); 448 rds_ib_sysctl_exit();
453out_ibreg: 449out_ibreg:
diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h
index 24c086db4511..5d6e98a79a5e 100644
--- a/net/rds/ib_mr.h
+++ b/net/rds/ib_mr.h
@@ -107,8 +107,6 @@ struct rds_ib_mr_pool {
107}; 107};
108 108
109extern struct workqueue_struct *rds_ib_mr_wq; 109extern struct workqueue_struct *rds_ib_mr_wq;
110extern unsigned int rds_ib_mr_1m_pool_size;
111extern unsigned int rds_ib_mr_8k_pool_size;
112extern bool prefer_frmr; 110extern bool prefer_frmr;
113 111
114struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_dev, 112struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_dev,
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 966d2ee1f107..39518ef7af4d 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -903,7 +903,7 @@ void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
903void rds_connect_complete(struct rds_connection *conn); 903void rds_connect_complete(struct rds_connection *conn);
904 904
905/* transport.c */ 905/* transport.c */
906int rds_trans_register(struct rds_transport *trans); 906void rds_trans_register(struct rds_transport *trans);
907void rds_trans_unregister(struct rds_transport *trans); 907void rds_trans_unregister(struct rds_transport *trans);
908struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr); 908struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
909void rds_trans_put(struct rds_transport *trans); 909void rds_trans_put(struct rds_transport *trans);
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 5438f6725092..a973d3b4dff0 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -652,16 +652,12 @@ static int rds_tcp_init(void)
652 if (ret) 652 if (ret)
653 goto out_pernet; 653 goto out_pernet;
654 654
655 ret = rds_trans_register(&rds_tcp_transport); 655 rds_trans_register(&rds_tcp_transport);
656 if (ret)
657 goto out_recv;
658 656
659 rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); 657 rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
660 658
661 goto out; 659 goto out;
662 660
663out_recv:
664 rds_tcp_recv_exit();
665out_pernet: 661out_pernet:
666 unregister_pernet_subsys(&rds_tcp_net_ops); 662 unregister_pernet_subsys(&rds_tcp_net_ops);
667out_notifier: 663out_notifier:
diff --git a/net/rds/transport.c b/net/rds/transport.c
index 2ffd3e30c643..0b188dd0a344 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -40,7 +40,7 @@
40static struct rds_transport *transports[RDS_TRANS_COUNT]; 40static struct rds_transport *transports[RDS_TRANS_COUNT];
41static DECLARE_RWSEM(rds_trans_sem); 41static DECLARE_RWSEM(rds_trans_sem);
42 42
43int rds_trans_register(struct rds_transport *trans) 43void rds_trans_register(struct rds_transport *trans)
44{ 44{
45 BUG_ON(strlen(trans->t_name) + 1 > TRANSNAMSIZ); 45 BUG_ON(strlen(trans->t_name) + 1 > TRANSNAMSIZ);
46 46
@@ -55,8 +55,6 @@ int rds_trans_register(struct rds_transport *trans)
55 } 55 }
56 56
57 up_write(&rds_trans_sem); 57 up_write(&rds_trans_sem);
58
59 return 0;
60} 58}
61EXPORT_SYMBOL_GPL(rds_trans_register); 59EXPORT_SYMBOL_GPL(rds_trans_register);
62 60
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 199b46e93e64..7fb59c3f1542 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -290,10 +290,11 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
290 cp.exclusive = false; 290 cp.exclusive = false;
291 cp.service_id = srx->srx_service; 291 cp.service_id = srx->srx_service;
292 call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, gfp); 292 call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, gfp);
293 /* The socket has been unlocked. */
293 if (!IS_ERR(call)) 294 if (!IS_ERR(call))
294 call->notify_rx = notify_rx; 295 call->notify_rx = notify_rx;
295 296
296 release_sock(&rx->sk); 297 mutex_unlock(&call->user_mutex);
297 _leave(" = %p", call); 298 _leave(" = %p", call);
298 return call; 299 return call;
299} 300}
@@ -310,7 +311,10 @@ EXPORT_SYMBOL(rxrpc_kernel_begin_call);
310void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call) 311void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call)
311{ 312{
312 _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); 313 _enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
314
315 mutex_lock(&call->user_mutex);
313 rxrpc_release_call(rxrpc_sk(sock->sk), call); 316 rxrpc_release_call(rxrpc_sk(sock->sk), call);
317 mutex_unlock(&call->user_mutex);
314 rxrpc_put_call(call, rxrpc_call_put_kernel); 318 rxrpc_put_call(call, rxrpc_call_put_kernel);
315} 319}
316EXPORT_SYMBOL(rxrpc_kernel_end_call); 320EXPORT_SYMBOL(rxrpc_kernel_end_call);
@@ -450,14 +454,16 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
450 case RXRPC_SERVER_BOUND: 454 case RXRPC_SERVER_BOUND:
451 case RXRPC_SERVER_LISTENING: 455 case RXRPC_SERVER_LISTENING:
452 ret = rxrpc_do_sendmsg(rx, m, len); 456 ret = rxrpc_do_sendmsg(rx, m, len);
453 break; 457 /* The socket has been unlocked */
458 goto out;
454 default: 459 default:
455 ret = -EINVAL; 460 ret = -EINVAL;
456 break; 461 goto error_unlock;
457 } 462 }
458 463
459error_unlock: 464error_unlock:
460 release_sock(&rx->sk); 465 release_sock(&rx->sk);
466out:
461 _leave(" = %d", ret); 467 _leave(" = %d", ret);
462 return ret; 468 return ret;
463} 469}
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 12be432be9b2..26a7b1db1361 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -467,6 +467,7 @@ struct rxrpc_call {
467 struct rxrpc_connection *conn; /* connection carrying call */ 467 struct rxrpc_connection *conn; /* connection carrying call */
468 struct rxrpc_peer *peer; /* Peer record for remote address */ 468 struct rxrpc_peer *peer; /* Peer record for remote address */
469 struct rxrpc_sock __rcu *socket; /* socket responsible */ 469 struct rxrpc_sock __rcu *socket; /* socket responsible */
470 struct mutex user_mutex; /* User access mutex */
470 ktime_t ack_at; /* When deferred ACK needs to happen */ 471 ktime_t ack_at; /* When deferred ACK needs to happen */
471 ktime_t resend_at; /* When next resend needs to happen */ 472 ktime_t resend_at; /* When next resend needs to happen */
472 ktime_t ping_at; /* When next to send a ping */ 473 ktime_t ping_at; /* When next to send a ping */
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 7c4c64ab8da2..0ed181f53f32 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -323,6 +323,8 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
323 * 323 *
324 * If we want to report an error, we mark the skb with the packet type and 324 * If we want to report an error, we mark the skb with the packet type and
325 * abort code and return NULL. 325 * abort code and return NULL.
326 *
327 * The call is returned with the user access mutex held.
326 */ 328 */
327struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, 329struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
328 struct rxrpc_connection *conn, 330 struct rxrpc_connection *conn,
@@ -371,6 +373,18 @@ found_service:
371 trace_rxrpc_receive(call, rxrpc_receive_incoming, 373 trace_rxrpc_receive(call, rxrpc_receive_incoming,
372 sp->hdr.serial, sp->hdr.seq); 374 sp->hdr.serial, sp->hdr.seq);
373 375
376 /* Lock the call to prevent rxrpc_kernel_send/recv_data() and
377 * sendmsg()/recvmsg() inconveniently stealing the mutex once the
378 * notification is generated.
379 *
380 * The BUG should never happen because the kernel should be well
381 * behaved enough not to access the call before the first notification
382 * event and userspace is prevented from doing so until the state is
383 * appropriate.
384 */
385 if (!mutex_trylock(&call->user_mutex))
386 BUG();
387
374 /* Make the call live. */ 388 /* Make the call live. */
375 rxrpc_incoming_call(rx, call, skb); 389 rxrpc_incoming_call(rx, call, skb);
376 conn = call->conn; 390 conn = call->conn;
@@ -429,10 +443,12 @@ out:
429/* 443/*
430 * handle acceptance of a call by userspace 444 * handle acceptance of a call by userspace
431 * - assign the user call ID to the call at the front of the queue 445 * - assign the user call ID to the call at the front of the queue
446 * - called with the socket locked.
432 */ 447 */
433struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, 448struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
434 unsigned long user_call_ID, 449 unsigned long user_call_ID,
435 rxrpc_notify_rx_t notify_rx) 450 rxrpc_notify_rx_t notify_rx)
451 __releases(&rx->sk.sk_lock.slock)
436{ 452{
437 struct rxrpc_call *call; 453 struct rxrpc_call *call;
438 struct rb_node *parent, **pp; 454 struct rb_node *parent, **pp;
@@ -446,6 +462,7 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
446 462
447 if (list_empty(&rx->to_be_accepted)) { 463 if (list_empty(&rx->to_be_accepted)) {
448 write_unlock(&rx->call_lock); 464 write_unlock(&rx->call_lock);
465 release_sock(&rx->sk);
449 kleave(" = -ENODATA [empty]"); 466 kleave(" = -ENODATA [empty]");
450 return ERR_PTR(-ENODATA); 467 return ERR_PTR(-ENODATA);
451 } 468 }
@@ -470,10 +487,39 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
470 */ 487 */
471 call = list_entry(rx->to_be_accepted.next, 488 call = list_entry(rx->to_be_accepted.next,
472 struct rxrpc_call, accept_link); 489 struct rxrpc_call, accept_link);
490 write_unlock(&rx->call_lock);
491
492 /* We need to gain the mutex from the interrupt handler without
493 * upsetting lockdep, so we have to release it there and take it here.
494 * We are, however, still holding the socket lock, so other accepts
495 * must wait for us and no one can add the user ID behind our backs.
496 */
497 if (mutex_lock_interruptible(&call->user_mutex) < 0) {
498 release_sock(&rx->sk);
499 kleave(" = -ERESTARTSYS");
500 return ERR_PTR(-ERESTARTSYS);
501 }
502
503 write_lock(&rx->call_lock);
473 list_del_init(&call->accept_link); 504 list_del_init(&call->accept_link);
474 sk_acceptq_removed(&rx->sk); 505 sk_acceptq_removed(&rx->sk);
475 rxrpc_see_call(call); 506 rxrpc_see_call(call);
476 507
508 /* Find the user ID insertion point. */
509 pp = &rx->calls.rb_node;
510 parent = NULL;
511 while (*pp) {
512 parent = *pp;
513 call = rb_entry(parent, struct rxrpc_call, sock_node);
514
515 if (user_call_ID < call->user_call_ID)
516 pp = &(*pp)->rb_left;
517 else if (user_call_ID > call->user_call_ID)
518 pp = &(*pp)->rb_right;
519 else
520 BUG();
521 }
522
477 write_lock_bh(&call->state_lock); 523 write_lock_bh(&call->state_lock);
478 switch (call->state) { 524 switch (call->state) {
479 case RXRPC_CALL_SERVER_ACCEPTING: 525 case RXRPC_CALL_SERVER_ACCEPTING:
@@ -499,6 +545,7 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
499 write_unlock(&rx->call_lock); 545 write_unlock(&rx->call_lock);
500 rxrpc_notify_socket(call); 546 rxrpc_notify_socket(call);
501 rxrpc_service_prealloc(rx, GFP_KERNEL); 547 rxrpc_service_prealloc(rx, GFP_KERNEL);
548 release_sock(&rx->sk);
502 _leave(" = %p{%d}", call, call->debug_id); 549 _leave(" = %p{%d}", call, call->debug_id);
503 return call; 550 return call;
504 551
@@ -515,6 +562,7 @@ id_in_use:
515 write_unlock(&rx->call_lock); 562 write_unlock(&rx->call_lock);
516out: 563out:
517 rxrpc_service_prealloc(rx, GFP_KERNEL); 564 rxrpc_service_prealloc(rx, GFP_KERNEL);
565 release_sock(&rx->sk);
518 _leave(" = %d", ret); 566 _leave(" = %d", ret);
519 return ERR_PTR(ret); 567 return ERR_PTR(ret);
520} 568}
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 8b94db3c9b2e..d79cd36987a9 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -115,6 +115,7 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
115 if (!call->rxtx_annotations) 115 if (!call->rxtx_annotations)
116 goto nomem_2; 116 goto nomem_2;
117 117
118 mutex_init(&call->user_mutex);
118 setup_timer(&call->timer, rxrpc_call_timer_expired, 119 setup_timer(&call->timer, rxrpc_call_timer_expired,
119 (unsigned long)call); 120 (unsigned long)call);
120 INIT_WORK(&call->processor, &rxrpc_process_call); 121 INIT_WORK(&call->processor, &rxrpc_process_call);
@@ -194,14 +195,16 @@ static void rxrpc_start_call_timer(struct rxrpc_call *call)
194} 195}
195 196
196/* 197/*
197 * set up a call for the given data 198 * Set up a call for the given parameters.
198 * - called in process context with IRQs enabled 199 * - Called with the socket lock held, which it must release.
200 * - If it returns a call, the call's lock will need releasing by the caller.
199 */ 201 */
200struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, 202struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
201 struct rxrpc_conn_parameters *cp, 203 struct rxrpc_conn_parameters *cp,
202 struct sockaddr_rxrpc *srx, 204 struct sockaddr_rxrpc *srx,
203 unsigned long user_call_ID, 205 unsigned long user_call_ID,
204 gfp_t gfp) 206 gfp_t gfp)
207 __releases(&rx->sk.sk_lock.slock)
205{ 208{
206 struct rxrpc_call *call, *xcall; 209 struct rxrpc_call *call, *xcall;
207 struct rb_node *parent, **pp; 210 struct rb_node *parent, **pp;
@@ -212,6 +215,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
212 215
213 call = rxrpc_alloc_client_call(srx, gfp); 216 call = rxrpc_alloc_client_call(srx, gfp);
214 if (IS_ERR(call)) { 217 if (IS_ERR(call)) {
218 release_sock(&rx->sk);
215 _leave(" = %ld", PTR_ERR(call)); 219 _leave(" = %ld", PTR_ERR(call));
216 return call; 220 return call;
217 } 221 }
@@ -219,6 +223,11 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
219 trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage), 223 trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
220 here, (const void *)user_call_ID); 224 here, (const void *)user_call_ID);
221 225
226 /* We need to protect a partially set up call against the user as we
227 * will be acting outside the socket lock.
228 */
229 mutex_lock(&call->user_mutex);
230
222 /* Publish the call, even though it is incompletely set up as yet */ 231 /* Publish the call, even though it is incompletely set up as yet */
223 write_lock(&rx->call_lock); 232 write_lock(&rx->call_lock);
224 233
@@ -250,6 +259,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
250 list_add_tail(&call->link, &rxrpc_calls); 259 list_add_tail(&call->link, &rxrpc_calls);
251 write_unlock(&rxrpc_call_lock); 260 write_unlock(&rxrpc_call_lock);
252 261
262 /* From this point on, the call is protected by its own lock. */
263 release_sock(&rx->sk);
264
253 /* Set up or get a connection record and set the protocol parameters, 265 /* Set up or get a connection record and set the protocol parameters,
254 * including channel number and call ID. 266 * including channel number and call ID.
255 */ 267 */
@@ -279,6 +291,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
279 */ 291 */
280error_dup_user_ID: 292error_dup_user_ID:
281 write_unlock(&rx->call_lock); 293 write_unlock(&rx->call_lock);
294 release_sock(&rx->sk);
282 ret = -EEXIST; 295 ret = -EEXIST;
283 296
284error: 297error:
@@ -287,6 +300,7 @@ error:
287 trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage), 300 trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
288 here, ERR_PTR(ret)); 301 here, ERR_PTR(ret));
289 rxrpc_release_call(rx, call); 302 rxrpc_release_call(rx, call);
303 mutex_unlock(&call->user_mutex);
290 rxrpc_put_call(call, rxrpc_call_put); 304 rxrpc_put_call(call, rxrpc_call_put);
291 _leave(" = %d", ret); 305 _leave(" = %d", ret);
292 return ERR_PTR(ret); 306 return ERR_PTR(ret);
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 78ec33477adf..9f4cfa25af7c 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -1194,6 +1194,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
1194 goto reject_packet; 1194 goto reject_packet;
1195 } 1195 }
1196 rxrpc_send_ping(call, skb, skew); 1196 rxrpc_send_ping(call, skb, skew);
1197 mutex_unlock(&call->user_mutex);
1197 } 1198 }
1198 1199
1199 rxrpc_input_call_packet(call, skb, skew); 1200 rxrpc_input_call_packet(call, skb, skew);
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 28274a3c9831..6491ca46a03f 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -489,6 +489,20 @@ try_again:
489 489
490 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0); 490 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0);
491 491
492 /* We're going to drop the socket lock, so we need to lock the call
493 * against interference by sendmsg.
494 */
495 if (!mutex_trylock(&call->user_mutex)) {
496 ret = -EWOULDBLOCK;
497 if (flags & MSG_DONTWAIT)
498 goto error_requeue_call;
499 ret = -ERESTARTSYS;
500 if (mutex_lock_interruptible(&call->user_mutex) < 0)
501 goto error_requeue_call;
502 }
503
504 release_sock(&rx->sk);
505
492 if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) 506 if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
493 BUG(); 507 BUG();
494 508
@@ -504,7 +518,7 @@ try_again:
504 &call->user_call_ID); 518 &call->user_call_ID);
505 } 519 }
506 if (ret < 0) 520 if (ret < 0)
507 goto error; 521 goto error_unlock_call;
508 } 522 }
509 523
510 if (msg->msg_name) { 524 if (msg->msg_name) {
@@ -535,12 +549,12 @@ try_again:
535 } 549 }
536 550
537 if (ret < 0) 551 if (ret < 0)
538 goto error; 552 goto error_unlock_call;
539 553
540 if (call->state == RXRPC_CALL_COMPLETE) { 554 if (call->state == RXRPC_CALL_COMPLETE) {
541 ret = rxrpc_recvmsg_term(call, msg); 555 ret = rxrpc_recvmsg_term(call, msg);
542 if (ret < 0) 556 if (ret < 0)
543 goto error; 557 goto error_unlock_call;
544 if (!(flags & MSG_PEEK)) 558 if (!(flags & MSG_PEEK))
545 rxrpc_release_call(rx, call); 559 rxrpc_release_call(rx, call);
546 msg->msg_flags |= MSG_EOR; 560 msg->msg_flags |= MSG_EOR;
@@ -553,8 +567,21 @@ try_again:
553 msg->msg_flags &= ~MSG_MORE; 567 msg->msg_flags &= ~MSG_MORE;
554 ret = copied; 568 ret = copied;
555 569
556error: 570error_unlock_call:
571 mutex_unlock(&call->user_mutex);
557 rxrpc_put_call(call, rxrpc_call_put); 572 rxrpc_put_call(call, rxrpc_call_put);
573 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
574 return ret;
575
576error_requeue_call:
577 if (!(flags & MSG_PEEK)) {
578 write_lock_bh(&rx->recvmsg_lock);
579 list_add(&call->recvmsg_link, &rx->recvmsg_q);
580 write_unlock_bh(&rx->recvmsg_lock);
581 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_requeue, 0, 0, 0, 0);
582 } else {
583 rxrpc_put_call(call, rxrpc_call_put);
584 }
558error_no_call: 585error_no_call:
559 release_sock(&rx->sk); 586 release_sock(&rx->sk);
560 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); 587 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
@@ -611,7 +638,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
611 iov.iov_len = size - *_offset; 638 iov.iov_len = size - *_offset;
612 iov_iter_kvec(&iter, ITER_KVEC | READ, &iov, 1, size - *_offset); 639 iov_iter_kvec(&iter, ITER_KVEC | READ, &iov, 1, size - *_offset);
613 640
614 lock_sock(sock->sk); 641 mutex_lock(&call->user_mutex);
615 642
616 switch (call->state) { 643 switch (call->state) {
617 case RXRPC_CALL_CLIENT_RECV_REPLY: 644 case RXRPC_CALL_CLIENT_RECV_REPLY:
@@ -650,7 +677,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
650read_phase_complete: 677read_phase_complete:
651 ret = 1; 678 ret = 1;
652out: 679out:
653 release_sock(sock->sk); 680 mutex_unlock(&call->user_mutex);
654 _leave(" = %d [%zu,%d]", ret, *_offset, *_abort); 681 _leave(" = %d [%zu,%d]", ret, *_offset, *_abort);
655 return ret; 682 return ret;
656 683
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 19b36c60fb4c..bc2d3dcff9de 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -61,9 +61,12 @@ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
61 } 61 }
62 62
63 trace_rxrpc_transmit(call, rxrpc_transmit_wait); 63 trace_rxrpc_transmit(call, rxrpc_transmit_wait);
64 release_sock(&rx->sk); 64 mutex_unlock(&call->user_mutex);
65 *timeo = schedule_timeout(*timeo); 65 *timeo = schedule_timeout(*timeo);
66 lock_sock(&rx->sk); 66 if (mutex_lock_interruptible(&call->user_mutex) < 0) {
67 ret = sock_intr_errno(*timeo);
68 break;
69 }
67 } 70 }
68 71
69 remove_wait_queue(&call->waitq, &myself); 72 remove_wait_queue(&call->waitq, &myself);
@@ -173,7 +176,7 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
173/* 176/*
174 * send data through a socket 177 * send data through a socket
175 * - must be called in process context 178 * - must be called in process context
176 * - caller holds the socket locked 179 * - The caller holds the call user access mutex, but not the socket lock.
177 */ 180 */
178static int rxrpc_send_data(struct rxrpc_sock *rx, 181static int rxrpc_send_data(struct rxrpc_sock *rx,
179 struct rxrpc_call *call, 182 struct rxrpc_call *call,
@@ -439,10 +442,13 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg,
439 442
440/* 443/*
441 * Create a new client call for sendmsg(). 444 * Create a new client call for sendmsg().
445 * - Called with the socket lock held, which it must release.
446 * - If it returns a call, the call's lock will need releasing by the caller.
442 */ 447 */
443static struct rxrpc_call * 448static struct rxrpc_call *
444rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, 449rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
445 unsigned long user_call_ID, bool exclusive) 450 unsigned long user_call_ID, bool exclusive)
451 __releases(&rx->sk.sk_lock.slock)
446{ 452{
447 struct rxrpc_conn_parameters cp; 453 struct rxrpc_conn_parameters cp;
448 struct rxrpc_call *call; 454 struct rxrpc_call *call;
@@ -452,8 +458,10 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
452 458
453 _enter(""); 459 _enter("");
454 460
455 if (!msg->msg_name) 461 if (!msg->msg_name) {
462 release_sock(&rx->sk);
456 return ERR_PTR(-EDESTADDRREQ); 463 return ERR_PTR(-EDESTADDRREQ);
464 }
457 465
458 key = rx->key; 466 key = rx->key;
459 if (key && !rx->key->payload.data[0]) 467 if (key && !rx->key->payload.data[0])
@@ -466,6 +474,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
466 cp.exclusive = rx->exclusive | exclusive; 474 cp.exclusive = rx->exclusive | exclusive;
467 cp.service_id = srx->srx_service; 475 cp.service_id = srx->srx_service;
468 call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, GFP_KERNEL); 476 call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, GFP_KERNEL);
477 /* The socket is now unlocked */
469 478
470 _leave(" = %p\n", call); 479 _leave(" = %p\n", call);
471 return call; 480 return call;
@@ -477,6 +486,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
477 * - the socket may be either a client socket or a server socket 486 * - the socket may be either a client socket or a server socket
478 */ 487 */
479int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) 488int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
489 __releases(&rx->sk.sk_lock.slock)
480{ 490{
481 enum rxrpc_command cmd; 491 enum rxrpc_command cmd;
482 struct rxrpc_call *call; 492 struct rxrpc_call *call;
@@ -490,12 +500,14 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
490 ret = rxrpc_sendmsg_cmsg(msg, &user_call_ID, &cmd, &abort_code, 500 ret = rxrpc_sendmsg_cmsg(msg, &user_call_ID, &cmd, &abort_code,
491 &exclusive); 501 &exclusive);
492 if (ret < 0) 502 if (ret < 0)
493 return ret; 503 goto error_release_sock;
494 504
495 if (cmd == RXRPC_CMD_ACCEPT) { 505 if (cmd == RXRPC_CMD_ACCEPT) {
506 ret = -EINVAL;
496 if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) 507 if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
497 return -EINVAL; 508 goto error_release_sock;
498 call = rxrpc_accept_call(rx, user_call_ID, NULL); 509 call = rxrpc_accept_call(rx, user_call_ID, NULL);
510 /* The socket is now unlocked. */
499 if (IS_ERR(call)) 511 if (IS_ERR(call))
500 return PTR_ERR(call); 512 return PTR_ERR(call);
501 rxrpc_put_call(call, rxrpc_call_put); 513 rxrpc_put_call(call, rxrpc_call_put);
@@ -504,12 +516,30 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
504 516
505 call = rxrpc_find_call_by_user_ID(rx, user_call_ID); 517 call = rxrpc_find_call_by_user_ID(rx, user_call_ID);
506 if (!call) { 518 if (!call) {
519 ret = -EBADSLT;
507 if (cmd != RXRPC_CMD_SEND_DATA) 520 if (cmd != RXRPC_CMD_SEND_DATA)
508 return -EBADSLT; 521 goto error_release_sock;
509 call = rxrpc_new_client_call_for_sendmsg(rx, msg, user_call_ID, 522 call = rxrpc_new_client_call_for_sendmsg(rx, msg, user_call_ID,
510 exclusive); 523 exclusive);
524 /* The socket is now unlocked... */
511 if (IS_ERR(call)) 525 if (IS_ERR(call))
512 return PTR_ERR(call); 526 return PTR_ERR(call);
527 /* ... and we have the call lock. */
528 } else {
529 ret = -EBUSY;
530 if (call->state == RXRPC_CALL_UNINITIALISED ||
531 call->state == RXRPC_CALL_CLIENT_AWAIT_CONN ||
532 call->state == RXRPC_CALL_SERVER_PREALLOC ||
533 call->state == RXRPC_CALL_SERVER_SECURING ||
534 call->state == RXRPC_CALL_SERVER_ACCEPTING)
535 goto error_release_sock;
536
537 ret = mutex_lock_interruptible(&call->user_mutex);
538 release_sock(&rx->sk);
539 if (ret < 0) {
540 ret = -ERESTARTSYS;
541 goto error_put;
542 }
513 } 543 }
514 544
515 _debug("CALL %d USR %lx ST %d on CONN %p", 545 _debug("CALL %d USR %lx ST %d on CONN %p",
@@ -537,9 +567,15 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
537 ret = rxrpc_send_data(rx, call, msg, len); 567 ret = rxrpc_send_data(rx, call, msg, len);
538 } 568 }
539 569
570 mutex_unlock(&call->user_mutex);
571error_put:
540 rxrpc_put_call(call, rxrpc_call_put); 572 rxrpc_put_call(call, rxrpc_call_put);
541 _leave(" = %d", ret); 573 _leave(" = %d", ret);
542 return ret; 574 return ret;
575
576error_release_sock:
577 release_sock(&rx->sk);
578 return ret;
543} 579}
544 580
545/** 581/**
@@ -564,7 +600,7 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
564 ASSERTCMP(msg->msg_name, ==, NULL); 600 ASSERTCMP(msg->msg_name, ==, NULL);
565 ASSERTCMP(msg->msg_control, ==, NULL); 601 ASSERTCMP(msg->msg_control, ==, NULL);
566 602
567 lock_sock(sock->sk); 603 mutex_lock(&call->user_mutex);
568 604
569 _debug("CALL %d USR %lx ST %d on CONN %p", 605 _debug("CALL %d USR %lx ST %d on CONN %p",
570 call->debug_id, call->user_call_ID, call->state, call->conn); 606 call->debug_id, call->user_call_ID, call->state, call->conn);
@@ -579,7 +615,7 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
579 ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len); 615 ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len);
580 } 616 }
581 617
582 release_sock(sock->sk); 618 mutex_unlock(&call->user_mutex);
583 _leave(" = %d", ret); 619 _leave(" = %d", ret);
584 return ret; 620 return ret;
585} 621}
@@ -600,12 +636,12 @@ void rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call,
600{ 636{
601 _enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why); 637 _enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why);
602 638
603 lock_sock(sock->sk); 639 mutex_lock(&call->user_mutex);
604 640
605 if (rxrpc_abort_call(why, call, 0, abort_code, error)) 641 if (rxrpc_abort_call(why, call, 0, abort_code, error))
606 rxrpc_send_abort_packet(call); 642 rxrpc_send_abort_packet(call);
607 643
608 release_sock(sock->sk); 644 mutex_unlock(&call->user_mutex);
609 _leave(""); 645 _leave("");
610} 646}
611 647
diff --git a/net/sctp/input.c b/net/sctp/input.c
index fc458968fe4b..2a28ab20487f 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -884,14 +884,17 @@ int sctp_hash_transport(struct sctp_transport *t)
884 arg.paddr = &t->ipaddr; 884 arg.paddr = &t->ipaddr;
885 arg.lport = htons(t->asoc->base.bind_addr.port); 885 arg.lport = htons(t->asoc->base.bind_addr.port);
886 886
887 rcu_read_lock();
887 list = rhltable_lookup(&sctp_transport_hashtable, &arg, 888 list = rhltable_lookup(&sctp_transport_hashtable, &arg,
888 sctp_hash_params); 889 sctp_hash_params);
889 890
890 rhl_for_each_entry_rcu(transport, tmp, list, node) 891 rhl_for_each_entry_rcu(transport, tmp, list, node)
891 if (transport->asoc->ep == t->asoc->ep) { 892 if (transport->asoc->ep == t->asoc->ep) {
893 rcu_read_unlock();
892 err = -EEXIST; 894 err = -EEXIST;
893 goto out; 895 goto out;
894 } 896 }
897 rcu_read_unlock();
895 898
896 err = rhltable_insert_key(&sctp_transport_hashtable, &arg, 899 err = rhltable_insert_key(&sctp_transport_hashtable, &arg,
897 &t->node, sctp_hash_params); 900 &t->node, sctp_hash_params);
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index 41adf362936d..b5c279b22680 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -504,6 +504,7 @@ static int __init strp_mod_init(void)
504 504
505static void __exit strp_mod_exit(void) 505static void __exit strp_mod_exit(void)
506{ 506{
507 destroy_workqueue(strp_wq);
507} 508}
508module_init(strp_mod_init); 509module_init(strp_mod_init);
509module_exit(strp_mod_exit); 510module_exit(strp_mod_exit);