aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-03-05 14:29:24 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-03-05 14:29:24 -0500
commit547046141f44dba075207fd343e3e032e129c9ac (patch)
tree3979961d838def5efa9f3835d19e05d60b3b4d88 /drivers
parent661e50bc853209e41a5c14a290ca4decc43cbfd1 (diff)
parenta7f0fb1bfb66ded5d556d6723d691b77a7146b6f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Use an appropriate TSQ pacing shift in mac80211, from Toke Høiland-Jørgensen. 2) Just like ipv4's ip_route_me_harder(), we have to use skb_to_full_sk in ip6_route_me_harder, from Eric Dumazet. 3) Fix several shutdown races and similar other problems in l2tp, from James Chapman. 4) Handle missing XDP flush properly in tuntap, for real this time. From Jason Wang. 5) Out-of-bounds access in powerpc ebpf tailcalls, from Daniel Borkmann. 6) Fix phy_resume() locking, from Andrew Lunn. 7) IFLA_MTU values are ignored on newlink for some tunnel types, fix from Xin Long. 8) Revert F-RTO middle box workarounds, they only handle one dimension of the problem. From Yuchung Cheng. 9) Fix socket refcounting in RDS, from Ka-Cheong Poon. 10) Don't allow ppp unit registration to an unregistered channel, from Guillaume Nault. 11) Various hv_netvsc fixes from Stephen Hemminger. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (98 commits) hv_netvsc: propagate rx filters to VF hv_netvsc: filter multicast/broadcast hv_netvsc: defer queue selection to VF hv_netvsc: use napi_schedule_irqoff hv_netvsc: fix race in napi poll when rescheduling hv_netvsc: cancel subchannel setup before halting device hv_netvsc: fix error unwind handling if vmbus_open fails hv_netvsc: only wake transmit queue if link is up hv_netvsc: avoid retry on send during shutdown virtio-net: re enable XDP_REDIRECT for mergeable buffer ppp: prevent unregistered channels from connecting to PPP units tc-testing: skbmod: fix match value of ethertype mlxsw: spectrum_switchdev: Check success of FDB add operation net: make skb_gso_*_seglen functions private net: xfrm: use skb_gso_validate_network_len() to check gso sizes net: sched: tbf: handle GSO_BY_FRAGS case in enqueue net: rename skb_gso_validate_mtu -> skb_gso_validate_network_len rds: Incorrect reference counting in TCP socket creation net: ethtool: don't ignore return from driver get_fecparam method vrf: check forwarding on the original netdevice when generating ICMP dest unreachable ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/bluetooth/btusb.c25
-rw-r--r--drivers/bluetooth/hci_bcm.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c83
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c29
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c11
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h11
-rw-r--r--drivers/net/hyperv/netvsc.c33
-rw-r--r--drivers/net/hyperv/netvsc_drv.c62
-rw-r--r--drivers/net/hyperv/rndis_filter.c23
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/phy/phy_device.c18
-rw-r--r--drivers/net/ppp/ppp_generic.c9
-rw-r--r--drivers/net/tun.c22
-rw-r--r--drivers/net/usb/cdc_ether.c6
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/virtio_net.c62
-rw-r--r--drivers/net/wan/hdlc_ppp.c5
-rw-r--r--drivers/s390/net/qeth_core_main.c29
-rw-r--r--drivers/s390/net/qeth_l3.h34
-rw-r--r--drivers/s390/net/qeth_l3_main.c123
25 files changed, 398 insertions, 236 deletions
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 2a55380ad730..60bf04b8f103 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -21,6 +21,7 @@
21 * 21 *
22 */ 22 */
23 23
24#include <linux/dmi.h>
24#include <linux/module.h> 25#include <linux/module.h>
25#include <linux/usb.h> 26#include <linux/usb.h>
26#include <linux/usb/quirks.h> 27#include <linux/usb/quirks.h>
@@ -379,6 +380,21 @@ static const struct usb_device_id blacklist_table[] = {
379 { } /* Terminating entry */ 380 { } /* Terminating entry */
380}; 381};
381 382
383/* The Bluetooth USB module build into some devices needs to be reset on resume,
384 * this is a problem with the platform (likely shutting off all power) not with
385 * the module itself. So we use a DMI list to match known broken platforms.
386 */
387static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
388 {
389 /* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */
390 .matches = {
391 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
392 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"),
393 },
394 },
395 {}
396};
397
382#define BTUSB_MAX_ISOC_FRAMES 10 398#define BTUSB_MAX_ISOC_FRAMES 10
383 399
384#define BTUSB_INTR_RUNNING 0 400#define BTUSB_INTR_RUNNING 0
@@ -2945,6 +2961,9 @@ static int btusb_probe(struct usb_interface *intf,
2945 hdev->send = btusb_send_frame; 2961 hdev->send = btusb_send_frame;
2946 hdev->notify = btusb_notify; 2962 hdev->notify = btusb_notify;
2947 2963
2964 if (dmi_check_system(btusb_needs_reset_resume_table))
2965 interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
2966
2948#ifdef CONFIG_PM 2967#ifdef CONFIG_PM
2949 err = btusb_config_oob_wake(hdev); 2968 err = btusb_config_oob_wake(hdev);
2950 if (err) 2969 if (err)
@@ -3031,12 +3050,6 @@ static int btusb_probe(struct usb_interface *intf,
3031 if (id->driver_info & BTUSB_QCA_ROME) { 3050 if (id->driver_info & BTUSB_QCA_ROME) {
3032 data->setup_on_usb = btusb_setup_qca; 3051 data->setup_on_usb = btusb_setup_qca;
3033 hdev->set_bdaddr = btusb_set_bdaddr_ath3012; 3052 hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
3034
3035 /* QCA Rome devices lose their updated firmware over suspend,
3036 * but the USB hub doesn't notice any status change.
3037 * explicitly request a device reset on resume.
3038 */
3039 interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
3040 } 3053 }
3041 3054
3042#ifdef CONFIG_BT_HCIBTUSB_RTL 3055#ifdef CONFIG_BT_HCIBTUSB_RTL
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 0438a64b8185..6314dfb02969 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -922,12 +922,13 @@ static int bcm_get_resources(struct bcm_device *dev)
922 922
923 dev->clk = devm_clk_get(dev->dev, NULL); 923 dev->clk = devm_clk_get(dev->dev, NULL);
924 924
925 dev->device_wakeup = devm_gpiod_get(dev->dev, "device-wakeup", 925 dev->device_wakeup = devm_gpiod_get_optional(dev->dev, "device-wakeup",
926 GPIOD_OUT_LOW); 926 GPIOD_OUT_LOW);
927 if (IS_ERR(dev->device_wakeup)) 927 if (IS_ERR(dev->device_wakeup))
928 return PTR_ERR(dev->device_wakeup); 928 return PTR_ERR(dev->device_wakeup);
929 929
930 dev->shutdown = devm_gpiod_get(dev->dev, "shutdown", GPIOD_OUT_LOW); 930 dev->shutdown = devm_gpiod_get_optional(dev->dev, "shutdown",
931 GPIOD_OUT_LOW);
931 if (IS_ERR(dev->shutdown)) 932 if (IS_ERR(dev->shutdown))
932 return PTR_ERR(dev->shutdown); 933 return PTR_ERR(dev->shutdown);
933 934
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index f5c87bd35fa1..f27f9bae1a4a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -3063,9 +3063,6 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
3063 if (ndev->features & NETIF_F_RXCSUM) 3063 if (ndev->features & NETIF_F_RXCSUM)
3064 gfar_rx_checksum(skb, fcb); 3064 gfar_rx_checksum(skb, fcb);
3065 3065
3066 /* Tell the skb what kind of packet this is */
3067 skb->protocol = eth_type_trans(skb, ndev);
3068
3069 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. 3066 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
3070 * Even if vlan rx accel is disabled, on some chips 3067 * Even if vlan rx accel is disabled, on some chips
3071 * RXFCB_VLN is pseudo randomly set. 3068 * RXFCB_VLN is pseudo randomly set.
@@ -3136,13 +3133,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
3136 continue; 3133 continue;
3137 } 3134 }
3138 3135
3136 gfar_process_frame(ndev, skb);
3137
3139 /* Increment the number of packets */ 3138 /* Increment the number of packets */
3140 total_pkts++; 3139 total_pkts++;
3141 total_bytes += skb->len; 3140 total_bytes += skb->len;
3142 3141
3143 skb_record_rx_queue(skb, rx_queue->qindex); 3142 skb_record_rx_queue(skb, rx_queue->qindex);
3144 3143
3145 gfar_process_frame(ndev, skb); 3144 skb->protocol = eth_type_trans(skb, ndev);
3146 3145
3147 /* Send the packet up the stack */ 3146 /* Send the packet up the stack */
3148 napi_gro_receive(&rx_queue->grp->napi_rx, skb); 3147 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0da5aa2c8aba..9fc063af233c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1888,6 +1888,14 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1888 ixgbe_rx_pg_size(rx_ring), 1888 ixgbe_rx_pg_size(rx_ring),
1889 DMA_FROM_DEVICE, 1889 DMA_FROM_DEVICE,
1890 IXGBE_RX_DMA_ATTR); 1890 IXGBE_RX_DMA_ATTR);
1891 } else if (ring_uses_build_skb(rx_ring)) {
1892 unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
1893
1894 dma_sync_single_range_for_cpu(rx_ring->dev,
1895 IXGBE_CB(skb)->dma,
1896 offset,
1897 skb_headlen(skb),
1898 DMA_FROM_DEVICE);
1891 } else { 1899 } else {
1892 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 1900 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1893 1901
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index f6963b0b4a55..122506daa586 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -107,20 +107,20 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
107 MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), 107 MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
108 MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3), 108 MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
109 MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9), 109 MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9),
110 MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x14, 0, 8),
111 MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x14, 9, 2),
112 MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x14, 11, 6),
113 MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32),
114 MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32),
115 MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8),
116 MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8),
117 MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8),
118 MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8),
119 MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16), 110 MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16),
120 MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16), 111 MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16),
112 MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
113 MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
114 MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
115 MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32),
116 MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32),
117 MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8),
118 MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8),
119 MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8),
120 MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8),
121}; 121};
122 122
123#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38 123#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40
124 124
125struct mlxsw_afk_element_inst { /* element instance in actual block */ 125struct mlxsw_afk_element_inst { /* element instance in actual block */
126 const struct mlxsw_afk_element_info *info; 126 const struct mlxsw_afk_element_info *info;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 3dcc58d61506..c7e941aecc2a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1459,6 +1459,7 @@ mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1459 } 1459 }
1460 1460
1461 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1461 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1462 mlxsw_sp_port_vlan->ref_count = 1;
1462 mlxsw_sp_port_vlan->vid = vid; 1463 mlxsw_sp_port_vlan->vid = vid;
1463 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1464 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1464 1465
@@ -1486,8 +1487,10 @@ mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1486 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1487 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1487 1488
1488 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1489 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1489 if (mlxsw_sp_port_vlan) 1490 if (mlxsw_sp_port_vlan) {
1491 mlxsw_sp_port_vlan->ref_count++;
1490 return mlxsw_sp_port_vlan; 1492 return mlxsw_sp_port_vlan;
1493 }
1491 1494
1492 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); 1495 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
1493} 1496}
@@ -1496,6 +1499,9 @@ void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1496{ 1499{
1497 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 1500 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1498 1501
1502 if (--mlxsw_sp_port_vlan->ref_count != 0)
1503 return;
1504
1499 if (mlxsw_sp_port_vlan->bridge_port) 1505 if (mlxsw_sp_port_vlan->bridge_port)
1500 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1506 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1501 else if (fid) 1507 else if (fid)
@@ -4207,13 +4213,12 @@ static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = {
4207 .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate, 4213 .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate,
4208}; 4214};
4209 4215
4210static struct devlink_resource_size_params mlxsw_sp_kvd_size_params;
4211static struct devlink_resource_size_params mlxsw_sp_linear_size_params;
4212static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params;
4213static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params;
4214
4215static void 4216static void
4216mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core) 4217mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
4218 struct devlink_resource_size_params *kvd_size_params,
4219 struct devlink_resource_size_params *linear_size_params,
4220 struct devlink_resource_size_params *hash_double_size_params,
4221 struct devlink_resource_size_params *hash_single_size_params)
4217{ 4222{
4218 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4223 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
4219 KVD_SINGLE_MIN_SIZE); 4224 KVD_SINGLE_MIN_SIZE);
@@ -4222,37 +4227,35 @@ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)
4222 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4227 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
4223 u32 linear_size_min = 0; 4228 u32 linear_size_min = 0;
4224 4229
4225 /* KVD top resource */ 4230 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
4226 mlxsw_sp_kvd_size_params.size_min = kvd_size; 4231 MLXSW_SP_KVD_GRANULARITY,
4227 mlxsw_sp_kvd_size_params.size_max = kvd_size; 4232 DEVLINK_RESOURCE_UNIT_ENTRY);
4228 mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; 4233 devlink_resource_size_params_init(linear_size_params, linear_size_min,
4229 mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; 4234 kvd_size - single_size_min -
4230 4235 double_size_min,
4231 /* Linear part init */ 4236 MLXSW_SP_KVD_GRANULARITY,
4232 mlxsw_sp_linear_size_params.size_min = linear_size_min; 4237 DEVLINK_RESOURCE_UNIT_ENTRY);
4233 mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min - 4238 devlink_resource_size_params_init(hash_double_size_params,
4234 double_size_min; 4239 double_size_min,
4235 mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; 4240 kvd_size - single_size_min -
4236 mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; 4241 linear_size_min,
4237 4242 MLXSW_SP_KVD_GRANULARITY,
4238 /* Hash double part init */ 4243 DEVLINK_RESOURCE_UNIT_ENTRY);
4239 mlxsw_sp_hash_double_size_params.size_min = double_size_min; 4244 devlink_resource_size_params_init(hash_single_size_params,
4240 mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min - 4245 single_size_min,
4241 linear_size_min; 4246 kvd_size - double_size_min -
4242 mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; 4247 linear_size_min,
4243 mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; 4248 MLXSW_SP_KVD_GRANULARITY,
4244 4249 DEVLINK_RESOURCE_UNIT_ENTRY);
4245 /* Hash single part init */
4246 mlxsw_sp_hash_single_size_params.size_min = single_size_min;
4247 mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min -
4248 linear_size_min;
4249 mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
4250 mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
4251} 4250}
4252 4251
4253static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) 4252static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
4254{ 4253{
4255 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4254 struct devlink *devlink = priv_to_devlink(mlxsw_core);
4255 struct devlink_resource_size_params hash_single_size_params;
4256 struct devlink_resource_size_params hash_double_size_params;
4257 struct devlink_resource_size_params linear_size_params;
4258 struct devlink_resource_size_params kvd_size_params;
4256 u32 kvd_size, single_size, double_size, linear_size; 4259 u32 kvd_size, single_size, double_size, linear_size;
4257 const struct mlxsw_config_profile *profile; 4260 const struct mlxsw_config_profile *profile;
4258 int err; 4261 int err;
@@ -4261,13 +4264,17 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
4261 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 4264 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
4262 return -EIO; 4265 return -EIO;
4263 4266
4264 mlxsw_sp_resource_size_params_prepare(mlxsw_core); 4267 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
4268 &linear_size_params,
4269 &hash_double_size_params,
4270 &hash_single_size_params);
4271
4265 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4272 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
4266 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 4273 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
4267 true, kvd_size, 4274 true, kvd_size,
4268 MLXSW_SP_RESOURCE_KVD, 4275 MLXSW_SP_RESOURCE_KVD,
4269 DEVLINK_RESOURCE_ID_PARENT_TOP, 4276 DEVLINK_RESOURCE_ID_PARENT_TOP,
4270 &mlxsw_sp_kvd_size_params, 4277 &kvd_size_params,
4271 &mlxsw_sp_resource_kvd_ops); 4278 &mlxsw_sp_resource_kvd_ops);
4272 if (err) 4279 if (err)
4273 return err; 4280 return err;
@@ -4277,7 +4284,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
4277 false, linear_size, 4284 false, linear_size,
4278 MLXSW_SP_RESOURCE_KVD_LINEAR, 4285 MLXSW_SP_RESOURCE_KVD_LINEAR,
4279 MLXSW_SP_RESOURCE_KVD, 4286 MLXSW_SP_RESOURCE_KVD,
4280 &mlxsw_sp_linear_size_params, 4287 &linear_size_params,
4281 &mlxsw_sp_resource_kvd_linear_ops); 4288 &mlxsw_sp_resource_kvd_linear_ops);
4282 if (err) 4289 if (err)
4283 return err; 4290 return err;
@@ -4291,7 +4298,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
4291 false, double_size, 4298 false, double_size,
4292 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4299 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
4293 MLXSW_SP_RESOURCE_KVD, 4300 MLXSW_SP_RESOURCE_KVD,
4294 &mlxsw_sp_hash_double_size_params, 4301 &hash_double_size_params,
4295 &mlxsw_sp_resource_kvd_hash_double_ops); 4302 &mlxsw_sp_resource_kvd_hash_double_ops);
4296 if (err) 4303 if (err)
4297 return err; 4304 return err;
@@ -4301,7 +4308,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
4301 false, single_size, 4308 false, single_size,
4302 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4309 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
4303 MLXSW_SP_RESOURCE_KVD, 4310 MLXSW_SP_RESOURCE_KVD,
4304 &mlxsw_sp_hash_single_size_params, 4311 &hash_single_size_params,
4305 &mlxsw_sp_resource_kvd_hash_single_ops); 4312 &mlxsw_sp_resource_kvd_hash_single_ops);
4306 if (err) 4313 if (err)
4307 return err; 4314 return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index bdd8f94a452c..4ec1ca3c96c8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -211,6 +211,7 @@ struct mlxsw_sp_port_vlan {
211 struct list_head list; 211 struct list_head list;
212 struct mlxsw_sp_port *mlxsw_sp_port; 212 struct mlxsw_sp_port *mlxsw_sp_port;
213 struct mlxsw_sp_fid *fid; 213 struct mlxsw_sp_fid *fid;
214 unsigned int ref_count;
214 u16 vid; 215 u16 vid;
215 struct mlxsw_sp_bridge_port *bridge_port; 216 struct mlxsw_sp_bridge_port *bridge_port;
216 struct list_head bridge_vlan_node; 217 struct list_head bridge_vlan_node;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index bbd238e50f05..54262af4e98f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -112,11 +112,11 @@ static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
112 [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1, 112 [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1,
113 [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1, 113 [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1,
114 [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1, 114 [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1,
115 [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
115}; 116};
116 117
117static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { 118static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
118 [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1, 119 [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1,
119 [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
120}; 120};
121 121
122static const int *mlxsw_sp_packet_type_sfgc_types[] = { 122static const int *mlxsw_sp_packet_type_sfgc_types[] = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 593ad31be749..161bcdc012f0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1203,6 +1203,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1203 bool dynamic) 1203 bool dynamic)
1204{ 1204{
1205 char *sfd_pl; 1205 char *sfd_pl;
1206 u8 num_rec;
1206 int err; 1207 int err;
1207 1208
1208 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1209 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -1212,9 +1213,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1212 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1213 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1213 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 1214 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1214 mac, fid, action, local_port); 1215 mac, fid, action, local_port);
1216 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1215 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1217 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1216 kfree(sfd_pl); 1218 if (err)
1219 goto out;
1220
1221 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1222 err = -EBUSY;
1217 1223
1224out:
1225 kfree(sfd_pl);
1218 return err; 1226 return err;
1219} 1227}
1220 1228
@@ -1239,6 +1247,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1239 bool adding, bool dynamic) 1247 bool adding, bool dynamic)
1240{ 1248{
1241 char *sfd_pl; 1249 char *sfd_pl;
1250 u8 num_rec;
1242 int err; 1251 int err;
1243 1252
1244 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1253 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -1249,9 +1258,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1249 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 1258 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1250 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, 1259 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1251 lag_vid, lag_id); 1260 lag_vid, lag_id);
1261 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1252 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1262 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1253 kfree(sfd_pl); 1263 if (err)
1264 goto out;
1265
1266 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1267 err = -EBUSY;
1254 1268
1269out:
1270 kfree(sfd_pl);
1255 return err; 1271 return err;
1256} 1272}
1257 1273
@@ -1296,6 +1312,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1296 u16 fid, u16 mid_idx, bool adding) 1312 u16 fid, u16 mid_idx, bool adding)
1297{ 1313{
1298 char *sfd_pl; 1314 char *sfd_pl;
1315 u8 num_rec;
1299 int err; 1316 int err;
1300 1317
1301 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1318 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -1305,7 +1322,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1305 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1322 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1306 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, 1323 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1307 MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx); 1324 MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1325 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1308 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1326 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1327 if (err)
1328 goto out;
1329
1330 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1331 err = -EBUSY;
1332
1333out:
1309 kfree(sfd_pl); 1334 kfree(sfd_pl);
1310 return err; 1335 return err;
1311} 1336}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 92dcf8717fc6..14c839bb09e7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -439,6 +439,17 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
439 enum_index); 439 enum_index);
440} 440}
441 441
442static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
443 int enum_index)
444{
445 iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
446}
447
448static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
449{
450 return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
451}
452
442static bool sh_eth_is_gether(struct sh_eth_private *mdp) 453static bool sh_eth_is_gether(struct sh_eth_private *mdp)
443{ 454{
444 return mdp->reg_offset == sh_eth_offset_gigabit; 455 return mdp->reg_offset == sh_eth_offset_gigabit;
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index a6753ccba711..e5fe70134690 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -567,15 +567,4 @@ static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
567 return mdp->tsu_addr + mdp->reg_offset[enum_index]; 567 return mdp->tsu_addr + mdp->reg_offset[enum_index];
568} 568}
569 569
570static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
571 int enum_index)
572{
573 iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
574}
575
576static inline u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
577{
578 return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
579}
580
581#endif /* #ifndef __SH_ETH_H__ */ 570#endif /* #ifndef __SH_ETH_H__ */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 17e529af79dc..0265d703eb03 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -852,13 +852,6 @@ int netvsc_send(struct net_device *ndev,
852 if (unlikely(!net_device || net_device->destroy)) 852 if (unlikely(!net_device || net_device->destroy))
853 return -ENODEV; 853 return -ENODEV;
854 854
855 /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get
856 * here before the negotiation with the host is finished and
857 * send_section_map may not be allocated yet.
858 */
859 if (unlikely(!net_device->send_section_map))
860 return -EAGAIN;
861
862 nvchan = &net_device->chan_table[packet->q_idx]; 855 nvchan = &net_device->chan_table[packet->q_idx];
863 packet->send_buf_index = NETVSC_INVALID_INDEX; 856 packet->send_buf_index = NETVSC_INVALID_INDEX;
864 packet->cp_partial = false; 857 packet->cp_partial = false;
@@ -866,10 +859,8 @@ int netvsc_send(struct net_device *ndev,
866 /* Send control message directly without accessing msd (Multi-Send 859 /* Send control message directly without accessing msd (Multi-Send
867 * Data) field which may be changed during data packet processing. 860 * Data) field which may be changed during data packet processing.
868 */ 861 */
869 if (!skb) { 862 if (!skb)
870 cur_send = packet; 863 return netvsc_send_pkt(device, packet, net_device, pb, skb);
871 goto send_now;
872 }
873 864
874 /* batch packets in send buffer if possible */ 865 /* batch packets in send buffer if possible */
875 msdp = &nvchan->msd; 866 msdp = &nvchan->msd;
@@ -953,7 +944,6 @@ int netvsc_send(struct net_device *ndev,
953 } 944 }
954 } 945 }
955 946
956send_now:
957 if (cur_send) 947 if (cur_send)
958 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb); 948 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
959 949
@@ -1217,9 +1207,10 @@ int netvsc_poll(struct napi_struct *napi, int budget)
1217 if (send_recv_completions(ndev, net_device, nvchan) == 0 && 1207 if (send_recv_completions(ndev, net_device, nvchan) == 0 &&
1218 work_done < budget && 1208 work_done < budget &&
1219 napi_complete_done(napi, work_done) && 1209 napi_complete_done(napi, work_done) &&
1220 hv_end_read(&channel->inbound)) { 1210 hv_end_read(&channel->inbound) &&
1211 napi_schedule_prep(napi)) {
1221 hv_begin_read(&channel->inbound); 1212 hv_begin_read(&channel->inbound);
1222 napi_reschedule(napi); 1213 __napi_schedule(napi);
1223 } 1214 }
1224 1215
1225 /* Driver may overshoot since multiple packets per descriptor */ 1216 /* Driver may overshoot since multiple packets per descriptor */
@@ -1242,7 +1233,7 @@ void netvsc_channel_cb(void *context)
1242 /* disable interupts from host */ 1233 /* disable interupts from host */
1243 hv_begin_read(rbi); 1234 hv_begin_read(rbi);
1244 1235
1245 __napi_schedule(&nvchan->napi); 1236 __napi_schedule_irqoff(&nvchan->napi);
1246 } 1237 }
1247} 1238}
1248 1239
@@ -1296,7 +1287,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
1296 netvsc_channel_cb, net_device->chan_table); 1287 netvsc_channel_cb, net_device->chan_table);
1297 1288
1298 if (ret != 0) { 1289 if (ret != 0) {
1299 netif_napi_del(&net_device->chan_table[0].napi);
1300 netdev_err(ndev, "unable to open channel: %d\n", ret); 1290 netdev_err(ndev, "unable to open channel: %d\n", ret);
1301 goto cleanup; 1291 goto cleanup;
1302 } 1292 }
@@ -1306,11 +1296,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
1306 1296
1307 napi_enable(&net_device->chan_table[0].napi); 1297 napi_enable(&net_device->chan_table[0].napi);
1308 1298
1309 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1310 * populated.
1311 */
1312 rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1313
1314 /* Connect with the NetVsp */ 1299 /* Connect with the NetVsp */
1315 ret = netvsc_connect_vsp(device, net_device, device_info); 1300 ret = netvsc_connect_vsp(device, net_device, device_info);
1316 if (ret != 0) { 1301 if (ret != 0) {
@@ -1319,6 +1304,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
1319 goto close; 1304 goto close;
1320 } 1305 }
1321 1306
1307 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1308 * populated.
1309 */
1310 rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1311
1322 return net_device; 1312 return net_device;
1323 1313
1324close: 1314close:
@@ -1329,6 +1319,7 @@ close:
1329 vmbus_close(device->channel); 1319 vmbus_close(device->channel);
1330 1320
1331cleanup: 1321cleanup:
1322 netif_napi_del(&net_device->chan_table[0].napi);
1332 free_netvsc_device(&net_device->rcu); 1323 free_netvsc_device(&net_device->rcu);
1333 1324
1334 return ERR_PTR(ret); 1325 return ERR_PTR(ret);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index c5584c2d440e..cdb78eefab67 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -66,10 +66,36 @@ static int debug = -1;
66module_param(debug, int, S_IRUGO); 66module_param(debug, int, S_IRUGO);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68 68
69static void netvsc_set_multicast_list(struct net_device *net) 69static void netvsc_change_rx_flags(struct net_device *net, int change)
70{ 70{
71 struct net_device_context *net_device_ctx = netdev_priv(net); 71 struct net_device_context *ndev_ctx = netdev_priv(net);
72 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 72 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
73 int inc;
74
75 if (!vf_netdev)
76 return;
77
78 if (change & IFF_PROMISC) {
79 inc = (net->flags & IFF_PROMISC) ? 1 : -1;
80 dev_set_promiscuity(vf_netdev, inc);
81 }
82
83 if (change & IFF_ALLMULTI) {
84 inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
85 dev_set_allmulti(vf_netdev, inc);
86 }
87}
88
89static void netvsc_set_rx_mode(struct net_device *net)
90{
91 struct net_device_context *ndev_ctx = netdev_priv(net);
92 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
93 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
94
95 if (vf_netdev) {
96 dev_uc_sync(vf_netdev, net);
97 dev_mc_sync(vf_netdev, net);
98 }
73 99
74 rndis_filter_update(nvdev); 100 rndis_filter_update(nvdev);
75} 101}
@@ -91,12 +117,11 @@ static int netvsc_open(struct net_device *net)
91 return ret; 117 return ret;
92 } 118 }
93 119
94 netif_tx_wake_all_queues(net);
95
96 rdev = nvdev->extension; 120 rdev = nvdev->extension;
97 121 if (!rdev->link_state) {
98 if (!rdev->link_state)
99 netif_carrier_on(net); 122 netif_carrier_on(net);
123 netif_tx_wake_all_queues(net);
124 }
100 125
101 if (vf_netdev) { 126 if (vf_netdev) {
102 /* Setting synthetic device up transparently sets 127 /* Setting synthetic device up transparently sets
@@ -299,8 +324,19 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
299 rcu_read_lock(); 324 rcu_read_lock();
300 vf_netdev = rcu_dereference(ndc->vf_netdev); 325 vf_netdev = rcu_dereference(ndc->vf_netdev);
301 if (vf_netdev) { 326 if (vf_netdev) {
302 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; 327 const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
303 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; 328
329 if (vf_ops->ndo_select_queue)
330 txq = vf_ops->ndo_select_queue(vf_netdev, skb,
331 accel_priv, fallback);
332 else
333 txq = fallback(vf_netdev, skb);
334
335 /* Record the queue selected by VF so that it can be
336 * used for common case where VF has more queues than
337 * the synthetic device.
338 */
339 qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
304 } else { 340 } else {
305 txq = netvsc_pick_tx(ndev, skb); 341 txq = netvsc_pick_tx(ndev, skb);
306 } 342 }
@@ -1576,7 +1612,8 @@ static const struct net_device_ops device_ops = {
1576 .ndo_open = netvsc_open, 1612 .ndo_open = netvsc_open,
1577 .ndo_stop = netvsc_close, 1613 .ndo_stop = netvsc_close,
1578 .ndo_start_xmit = netvsc_start_xmit, 1614 .ndo_start_xmit = netvsc_start_xmit,
1579 .ndo_set_rx_mode = netvsc_set_multicast_list, 1615 .ndo_change_rx_flags = netvsc_change_rx_flags,
1616 .ndo_set_rx_mode = netvsc_set_rx_mode,
1580 .ndo_change_mtu = netvsc_change_mtu, 1617 .ndo_change_mtu = netvsc_change_mtu,
1581 .ndo_validate_addr = eth_validate_addr, 1618 .ndo_validate_addr = eth_validate_addr,
1582 .ndo_set_mac_address = netvsc_set_mac_addr, 1619 .ndo_set_mac_address = netvsc_set_mac_addr,
@@ -1807,6 +1844,11 @@ static void __netvsc_vf_setup(struct net_device *ndev,
1807 netdev_warn(vf_netdev, 1844 netdev_warn(vf_netdev,
1808 "unable to change mtu to %u\n", ndev->mtu); 1845 "unable to change mtu to %u\n", ndev->mtu);
1809 1846
1847 /* set multicast etc flags on VF */
1848 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
1849 dev_uc_sync(vf_netdev, ndev);
1850 dev_mc_sync(vf_netdev, ndev);
1851
1810 if (netif_running(ndev)) { 1852 if (netif_running(ndev)) {
1811 ret = dev_open(vf_netdev); 1853 ret = dev_open(vf_netdev);
1812 if (ret) 1854 if (ret)
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index c3ca191fea7f..8927c483c217 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -854,15 +854,19 @@ static void rndis_set_multicast(struct work_struct *w)
854{ 854{
855 struct rndis_device *rdev 855 struct rndis_device *rdev
856 = container_of(w, struct rndis_device, mcast_work); 856 = container_of(w, struct rndis_device, mcast_work);
857 u32 filter = NDIS_PACKET_TYPE_DIRECTED;
858 unsigned int flags = rdev->ndev->flags;
857 859
858 if (rdev->ndev->flags & IFF_PROMISC) 860 if (flags & IFF_PROMISC) {
859 rndis_filter_set_packet_filter(rdev, 861 filter = NDIS_PACKET_TYPE_PROMISCUOUS;
860 NDIS_PACKET_TYPE_PROMISCUOUS); 862 } else {
861 else 863 if (flags & IFF_ALLMULTI)
862 rndis_filter_set_packet_filter(rdev, 864 flags |= NDIS_PACKET_TYPE_ALL_MULTICAST;
863 NDIS_PACKET_TYPE_BROADCAST | 865 if (flags & IFF_BROADCAST)
864 NDIS_PACKET_TYPE_ALL_MULTICAST | 866 flags |= NDIS_PACKET_TYPE_BROADCAST;
865 NDIS_PACKET_TYPE_DIRECTED); 867 }
868
869 rndis_filter_set_packet_filter(rdev, filter);
866} 870}
867 871
868void rndis_filter_update(struct netvsc_device *nvdev) 872void rndis_filter_update(struct netvsc_device *nvdev)
@@ -1340,6 +1344,9 @@ void rndis_filter_device_remove(struct hv_device *dev,
1340{ 1344{
1341 struct rndis_device *rndis_dev = net_dev->extension; 1345 struct rndis_device *rndis_dev = net_dev->extension;
1342 1346
1347 /* Don't try and setup sub channels if about to halt */
1348 cancel_work_sync(&net_dev->subchan_work);
1349
1343 /* Halt and release the rndis device */ 1350 /* Halt and release the rndis device */
1344 rndis_filter_halt_device(rndis_dev); 1351 rndis_filter_halt_device(rndis_dev);
1345 1352
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index e3e29c2b028b..a6f924fee584 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -819,7 +819,7 @@ void phy_start(struct phy_device *phydev)
819 break; 819 break;
820 case PHY_HALTED: 820 case PHY_HALTED:
821 /* if phy was suspended, bring the physical link up again */ 821 /* if phy was suspended, bring the physical link up again */
822 phy_resume(phydev); 822 __phy_resume(phydev);
823 823
824 /* make sure interrupts are re-enabled for the PHY */ 824 /* make sure interrupts are re-enabled for the PHY */
825 if (phy_interrupt_is_valid(phydev)) { 825 if (phy_interrupt_is_valid(phydev)) {
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index d39ae77707ef..478405e544cc 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -135,9 +135,7 @@ static int mdio_bus_phy_resume(struct device *dev)
135 if (!mdio_bus_phy_may_suspend(phydev)) 135 if (!mdio_bus_phy_may_suspend(phydev))
136 goto no_resume; 136 goto no_resume;
137 137
138 mutex_lock(&phydev->lock);
139 ret = phy_resume(phydev); 138 ret = phy_resume(phydev);
140 mutex_unlock(&phydev->lock);
141 if (ret < 0) 139 if (ret < 0)
142 return ret; 140 return ret;
143 141
@@ -1041,9 +1039,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
1041 if (err) 1039 if (err)
1042 goto error; 1040 goto error;
1043 1041
1044 mutex_lock(&phydev->lock);
1045 phy_resume(phydev); 1042 phy_resume(phydev);
1046 mutex_unlock(&phydev->lock);
1047 phy_led_triggers_register(phydev); 1043 phy_led_triggers_register(phydev);
1048 1044
1049 return err; 1045 return err;
@@ -1172,7 +1168,7 @@ int phy_suspend(struct phy_device *phydev)
1172} 1168}
1173EXPORT_SYMBOL(phy_suspend); 1169EXPORT_SYMBOL(phy_suspend);
1174 1170
1175int phy_resume(struct phy_device *phydev) 1171int __phy_resume(struct phy_device *phydev)
1176{ 1172{
1177 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); 1173 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
1178 int ret = 0; 1174 int ret = 0;
@@ -1189,6 +1185,18 @@ int phy_resume(struct phy_device *phydev)
1189 1185
1190 return ret; 1186 return ret;
1191} 1187}
1188EXPORT_SYMBOL(__phy_resume);
1189
1190int phy_resume(struct phy_device *phydev)
1191{
1192 int ret;
1193
1194 mutex_lock(&phydev->lock);
1195 ret = __phy_resume(phydev);
1196 mutex_unlock(&phydev->lock);
1197
1198 return ret;
1199}
1192EXPORT_SYMBOL(phy_resume); 1200EXPORT_SYMBOL(phy_resume);
1193 1201
1194int phy_loopback(struct phy_device *phydev, bool enable) 1202int phy_loopback(struct phy_device *phydev, bool enable)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 255a5def56e9..fa2a9bdd1866 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -3161,6 +3161,15 @@ ppp_connect_channel(struct channel *pch, int unit)
3161 goto outl; 3161 goto outl;
3162 3162
3163 ppp_lock(ppp); 3163 ppp_lock(ppp);
3164 spin_lock_bh(&pch->downl);
3165 if (!pch->chan) {
3166 /* Don't connect unregistered channels */
3167 spin_unlock_bh(&pch->downl);
3168 ppp_unlock(ppp);
3169 ret = -ENOTCONN;
3170 goto outl;
3171 }
3172 spin_unlock_bh(&pch->downl);
3164 if (pch->file.hdrlen > ppp->file.hdrlen) 3173 if (pch->file.hdrlen > ppp->file.hdrlen)
3165 ppp->file.hdrlen = pch->file.hdrlen; 3174 ppp->file.hdrlen = pch->file.hdrlen;
3166 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ 3175 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b52258c327d2..7433bb2e4451 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -181,7 +181,6 @@ struct tun_file {
181 struct tun_struct *detached; 181 struct tun_struct *detached;
182 struct ptr_ring tx_ring; 182 struct ptr_ring tx_ring;
183 struct xdp_rxq_info xdp_rxq; 183 struct xdp_rxq_info xdp_rxq;
184 int xdp_pending_pkts;
185}; 184};
186 185
187struct tun_flow_entry { 186struct tun_flow_entry {
@@ -1643,6 +1642,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1643 else 1642 else
1644 *skb_xdp = 0; 1643 *skb_xdp = 0;
1645 1644
1645 preempt_disable();
1646 rcu_read_lock(); 1646 rcu_read_lock();
1647 xdp_prog = rcu_dereference(tun->xdp_prog); 1647 xdp_prog = rcu_dereference(tun->xdp_prog);
1648 if (xdp_prog && !*skb_xdp) { 1648 if (xdp_prog && !*skb_xdp) {
@@ -1662,11 +1662,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1662 case XDP_REDIRECT: 1662 case XDP_REDIRECT:
1663 get_page(alloc_frag->page); 1663 get_page(alloc_frag->page);
1664 alloc_frag->offset += buflen; 1664 alloc_frag->offset += buflen;
1665 ++tfile->xdp_pending_pkts;
1666 err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); 1665 err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
1666 xdp_do_flush_map();
1667 if (err) 1667 if (err)
1668 goto err_redirect; 1668 goto err_redirect;
1669 rcu_read_unlock(); 1669 rcu_read_unlock();
1670 preempt_enable();
1670 return NULL; 1671 return NULL;
1671 case XDP_TX: 1672 case XDP_TX:
1672 xdp_xmit = true; 1673 xdp_xmit = true;
@@ -1688,6 +1689,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1688 skb = build_skb(buf, buflen); 1689 skb = build_skb(buf, buflen);
1689 if (!skb) { 1690 if (!skb) {
1690 rcu_read_unlock(); 1691 rcu_read_unlock();
1692 preempt_enable();
1691 return ERR_PTR(-ENOMEM); 1693 return ERR_PTR(-ENOMEM);
1692 } 1694 }
1693 1695
@@ -1700,10 +1702,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1700 skb->dev = tun->dev; 1702 skb->dev = tun->dev;
1701 generic_xdp_tx(skb, xdp_prog); 1703 generic_xdp_tx(skb, xdp_prog);
1702 rcu_read_unlock(); 1704 rcu_read_unlock();
1705 preempt_enable();
1703 return NULL; 1706 return NULL;
1704 } 1707 }
1705 1708
1706 rcu_read_unlock(); 1709 rcu_read_unlock();
1710 preempt_enable();
1707 1711
1708 return skb; 1712 return skb;
1709 1713
@@ -1711,6 +1715,7 @@ err_redirect:
1711 put_page(alloc_frag->page); 1715 put_page(alloc_frag->page);
1712err_xdp: 1716err_xdp:
1713 rcu_read_unlock(); 1717 rcu_read_unlock();
1718 preempt_enable();
1714 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1719 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1715 return NULL; 1720 return NULL;
1716} 1721}
@@ -1984,11 +1989,6 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
1984 result = tun_get_user(tun, tfile, NULL, from, 1989 result = tun_get_user(tun, tfile, NULL, from,
1985 file->f_flags & O_NONBLOCK, false); 1990 file->f_flags & O_NONBLOCK, false);
1986 1991
1987 if (tfile->xdp_pending_pkts) {
1988 tfile->xdp_pending_pkts = 0;
1989 xdp_do_flush_map();
1990 }
1991
1992 tun_put(tun); 1992 tun_put(tun);
1993 return result; 1993 return result;
1994} 1994}
@@ -2325,13 +2325,6 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
2325 ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, 2325 ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
2326 m->msg_flags & MSG_DONTWAIT, 2326 m->msg_flags & MSG_DONTWAIT,
2327 m->msg_flags & MSG_MORE); 2327 m->msg_flags & MSG_MORE);
2328
2329 if (tfile->xdp_pending_pkts >= NAPI_POLL_WEIGHT ||
2330 !(m->msg_flags & MSG_MORE)) {
2331 tfile->xdp_pending_pkts = 0;
2332 xdp_do_flush_map();
2333 }
2334
2335 tun_put(tun); 2328 tun_put(tun);
2336 return ret; 2329 return ret;
2337} 2330}
@@ -3163,7 +3156,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
3163 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 3156 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
3164 3157
3165 memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); 3158 memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
3166 tfile->xdp_pending_pkts = 0;
3167 3159
3168 return 0; 3160 return 0;
3169} 3161}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 05dca3e5c93d..fff4b13eece2 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -896,6 +896,12 @@ static const struct usb_device_id products[] = {
896 USB_CDC_PROTO_NONE), 896 USB_CDC_PROTO_NONE),
897 .driver_info = (unsigned long)&wwan_info, 897 .driver_info = (unsigned long)&wwan_info,
898}, { 898}, {
899 /* Cinterion PLS8 modem by GEMALTO */
900 USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0061, USB_CLASS_COMM,
901 USB_CDC_SUBCLASS_ETHERNET,
902 USB_CDC_PROTO_NONE),
903 .driver_info = (unsigned long)&wwan_info,
904}, {
899 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, 905 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
900 USB_CDC_PROTO_NONE), 906 USB_CDC_PROTO_NONE),
901 .driver_info = (unsigned long) &cdc_info, 907 .driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 958b2e8b90f6..86f7196f9d91 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1794,7 +1794,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
1794 1794
1795 tx_data += len; 1795 tx_data += len;
1796 agg->skb_len += len; 1796 agg->skb_len += len;
1797 agg->skb_num++; 1797 agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1;
1798 1798
1799 dev_kfree_skb_any(skb); 1799 dev_kfree_skb_any(skb);
1800 1800
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9bb9e562b893..23374603e4d9 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -504,6 +504,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
504 page_off += *len; 504 page_off += *len;
505 505
506 while (--*num_buf) { 506 while (--*num_buf) {
507 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
507 unsigned int buflen; 508 unsigned int buflen;
508 void *buf; 509 void *buf;
509 int off; 510 int off;
@@ -518,7 +519,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
518 /* guard against a misconfigured or uncooperative backend that 519 /* guard against a misconfigured or uncooperative backend that
519 * is sending packet larger than the MTU. 520 * is sending packet larger than the MTU.
520 */ 521 */
521 if ((page_off + buflen) > PAGE_SIZE) { 522 if ((page_off + buflen + tailroom) > PAGE_SIZE) {
522 put_page(p); 523 put_page(p);
523 goto err_buf; 524 goto err_buf;
524 } 525 }
@@ -690,6 +691,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
690 unsigned int truesize; 691 unsigned int truesize;
691 unsigned int headroom = mergeable_ctx_to_headroom(ctx); 692 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
692 bool sent; 693 bool sent;
694 int err;
693 695
694 head_skb = NULL; 696 head_skb = NULL;
695 697
@@ -701,7 +703,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
701 void *data; 703 void *data;
702 u32 act; 704 u32 act;
703 705
704 /* This happens when rx buffer size is underestimated */ 706 /* This happens when rx buffer size is underestimated
707 * or headroom is not enough because of the buffer
708 * was refilled before XDP is set. This should only
709 * happen for the first several packets, so we don't
710 * care much about its performance.
711 */
705 if (unlikely(num_buf > 1 || 712 if (unlikely(num_buf > 1 ||
706 headroom < virtnet_get_headroom(vi))) { 713 headroom < virtnet_get_headroom(vi))) {
707 /* linearize data for XDP */ 714 /* linearize data for XDP */
@@ -736,9 +743,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
736 743
737 act = bpf_prog_run_xdp(xdp_prog, &xdp); 744 act = bpf_prog_run_xdp(xdp_prog, &xdp);
738 745
739 if (act != XDP_PASS)
740 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
741
742 switch (act) { 746 switch (act) {
743 case XDP_PASS: 747 case XDP_PASS:
744 /* recalculate offset to account for any header 748 /* recalculate offset to account for any header
@@ -770,6 +774,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
770 goto err_xdp; 774 goto err_xdp;
771 rcu_read_unlock(); 775 rcu_read_unlock();
772 goto xdp_xmit; 776 goto xdp_xmit;
777 case XDP_REDIRECT:
778 err = xdp_do_redirect(dev, &xdp, xdp_prog);
779 if (err) {
780 if (unlikely(xdp_page != page))
781 put_page(xdp_page);
782 goto err_xdp;
783 }
784 *xdp_xmit = true;
785 if (unlikely(xdp_page != page))
786 goto err_xdp;
787 rcu_read_unlock();
788 goto xdp_xmit;
773 default: 789 default:
774 bpf_warn_invalid_xdp_action(act); 790 bpf_warn_invalid_xdp_action(act);
775 case XDP_ABORTED: 791 case XDP_ABORTED:
@@ -1013,13 +1029,18 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1013} 1029}
1014 1030
1015static unsigned int get_mergeable_buf_len(struct receive_queue *rq, 1031static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
1016 struct ewma_pkt_len *avg_pkt_len) 1032 struct ewma_pkt_len *avg_pkt_len,
1033 unsigned int room)
1017{ 1034{
1018 const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 1035 const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1019 unsigned int len; 1036 unsigned int len;
1020 1037
1021 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), 1038 if (room)
1039 return PAGE_SIZE - room;
1040
1041 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
1022 rq->min_buf_len, PAGE_SIZE - hdr_len); 1042 rq->min_buf_len, PAGE_SIZE - hdr_len);
1043
1023 return ALIGN(len, L1_CACHE_BYTES); 1044 return ALIGN(len, L1_CACHE_BYTES);
1024} 1045}
1025 1046
@@ -1028,21 +1049,27 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
1028{ 1049{
1029 struct page_frag *alloc_frag = &rq->alloc_frag; 1050 struct page_frag *alloc_frag = &rq->alloc_frag;
1030 unsigned int headroom = virtnet_get_headroom(vi); 1051 unsigned int headroom = virtnet_get_headroom(vi);
1052 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1053 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1031 char *buf; 1054 char *buf;
1032 void *ctx; 1055 void *ctx;
1033 int err; 1056 int err;
1034 unsigned int len, hole; 1057 unsigned int len, hole;
1035 1058
1036 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len); 1059 /* Extra tailroom is needed to satisfy XDP's assumption. This
1037 if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp))) 1060 * means rx frags coalescing won't work, but consider we've
1061 * disabled GSO for XDP, it won't be a big issue.
1062 */
1063 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
1064 if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
1038 return -ENOMEM; 1065 return -ENOMEM;
1039 1066
1040 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 1067 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1041 buf += headroom; /* advance address leaving hole at front of pkt */ 1068 buf += headroom; /* advance address leaving hole at front of pkt */
1042 get_page(alloc_frag->page); 1069 get_page(alloc_frag->page);
1043 alloc_frag->offset += len + headroom; 1070 alloc_frag->offset += len + room;
1044 hole = alloc_frag->size - alloc_frag->offset; 1071 hole = alloc_frag->size - alloc_frag->offset;
1045 if (hole < len + headroom) { 1072 if (hole < len + room) {
1046 /* To avoid internal fragmentation, if there is very likely not 1073 /* To avoid internal fragmentation, if there is very likely not
1047 * enough space for another buffer, add the remaining space to 1074 * enough space for another buffer, add the remaining space to
1048 * the current buffer. 1075 * the current buffer.
@@ -2185,8 +2212,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2185 } 2212 }
2186 2213
2187 /* Make sure NAPI is not using any XDP TX queues for RX. */ 2214 /* Make sure NAPI is not using any XDP TX queues for RX. */
2188 for (i = 0; i < vi->max_queue_pairs; i++) 2215 if (netif_running(dev))
2189 napi_disable(&vi->rq[i].napi); 2216 for (i = 0; i < vi->max_queue_pairs; i++)
2217 napi_disable(&vi->rq[i].napi);
2190 2218
2191 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); 2219 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
2192 err = _virtnet_set_queues(vi, curr_qp + xdp_qp); 2220 err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
@@ -2205,7 +2233,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2205 } 2233 }
2206 if (old_prog) 2234 if (old_prog)
2207 bpf_prog_put(old_prog); 2235 bpf_prog_put(old_prog);
2208 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 2236 if (netif_running(dev))
2237 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2209 } 2238 }
2210 2239
2211 return 0; 2240 return 0;
@@ -2576,12 +2605,15 @@ static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
2576{ 2605{
2577 struct virtnet_info *vi = netdev_priv(queue->dev); 2606 struct virtnet_info *vi = netdev_priv(queue->dev);
2578 unsigned int queue_index = get_netdev_rx_queue_index(queue); 2607 unsigned int queue_index = get_netdev_rx_queue_index(queue);
2608 unsigned int headroom = virtnet_get_headroom(vi);
2609 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2579 struct ewma_pkt_len *avg; 2610 struct ewma_pkt_len *avg;
2580 2611
2581 BUG_ON(queue_index >= vi->max_queue_pairs); 2612 BUG_ON(queue_index >= vi->max_queue_pairs);
2582 avg = &vi->rq[queue_index].mrg_avg_pkt_len; 2613 avg = &vi->rq[queue_index].mrg_avg_pkt_len;
2583 return sprintf(buf, "%u\n", 2614 return sprintf(buf, "%u\n",
2584 get_mergeable_buf_len(&vi->rq[queue_index], avg)); 2615 get_mergeable_buf_len(&vi->rq[queue_index], avg,
2616 SKB_DATA_ALIGN(headroom + tailroom)));
2585} 2617}
2586 2618
2587static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = 2619static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index afeca6bcdade..ab8b3cbbb205 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -574,7 +574,10 @@ static void ppp_timer(struct timer_list *t)
574 ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, 574 ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
575 0, NULL); 575 0, NULL);
576 proto->restart_counter--; 576 proto->restart_counter--;
577 } else 577 } else if (netif_carrier_ok(proto->dev))
578 ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
579 0, NULL);
580 else
578 ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0, 581 ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
579 0, NULL); 582 0, NULL);
580 break; 583 break;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index ca72f3311004..c8b308cfabf1 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2134,24 +2134,25 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2134 } 2134 }
2135 reply->callback = reply_cb; 2135 reply->callback = reply_cb;
2136 reply->param = reply_param; 2136 reply->param = reply_param;
2137 if (card->state == CARD_STATE_DOWN) 2137
2138 reply->seqno = QETH_IDX_COMMAND_SEQNO;
2139 else
2140 reply->seqno = card->seqno.ipa++;
2141 init_waitqueue_head(&reply->wait_q); 2138 init_waitqueue_head(&reply->wait_q);
2142 spin_lock_irqsave(&card->lock, flags);
2143 list_add_tail(&reply->list, &card->cmd_waiter_list);
2144 spin_unlock_irqrestore(&card->lock, flags);
2145 2139
2146 while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; 2140 while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
2147 qeth_prepare_control_data(card, len, iob);
2148 2141
2149 if (IS_IPA(iob->data)) { 2142 if (IS_IPA(iob->data)) {
2150 cmd = __ipa_cmd(iob); 2143 cmd = __ipa_cmd(iob);
2144 cmd->hdr.seqno = card->seqno.ipa++;
2145 reply->seqno = cmd->hdr.seqno;
2151 event_timeout = QETH_IPA_TIMEOUT; 2146 event_timeout = QETH_IPA_TIMEOUT;
2152 } else { 2147 } else {
2148 reply->seqno = QETH_IDX_COMMAND_SEQNO;
2153 event_timeout = QETH_TIMEOUT; 2149 event_timeout = QETH_TIMEOUT;
2154 } 2150 }
2151 qeth_prepare_control_data(card, len, iob);
2152
2153 spin_lock_irqsave(&card->lock, flags);
2154 list_add_tail(&reply->list, &card->cmd_waiter_list);
2155 spin_unlock_irqrestore(&card->lock, flags);
2155 2156
2156 timeout = jiffies + event_timeout; 2157 timeout = jiffies + event_timeout;
2157 2158
@@ -2933,7 +2934,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
2933 memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); 2934 memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
2934 cmd->hdr.command = command; 2935 cmd->hdr.command = command;
2935 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; 2936 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
2936 cmd->hdr.seqno = card->seqno.ipa; 2937 /* cmd->hdr.seqno is set by qeth_send_control_data() */
2937 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); 2938 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
2938 cmd->hdr.rel_adapter_no = (__u8) card->info.portno; 2939 cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
2939 if (card->options.layer2) 2940 if (card->options.layer2)
@@ -3898,10 +3899,12 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
3898int qeth_get_elements_no(struct qeth_card *card, 3899int qeth_get_elements_no(struct qeth_card *card,
3899 struct sk_buff *skb, int extra_elems, int data_offset) 3900 struct sk_buff *skb, int extra_elems, int data_offset)
3900{ 3901{
3901 int elements = qeth_get_elements_for_range( 3902 addr_t end = (addr_t)skb->data + skb_headlen(skb);
3902 (addr_t)skb->data + data_offset, 3903 int elements = qeth_get_elements_for_frags(skb);
3903 (addr_t)skb->data + skb_headlen(skb)) + 3904 addr_t start = (addr_t)skb->data + data_offset;
3904 qeth_get_elements_for_frags(skb); 3905
3906 if (start != end)
3907 elements += qeth_get_elements_for_range(start, end);
3905 3908
3906 if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { 3909 if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
3907 QETH_DBF_MESSAGE(2, "Invalid size of IP packet " 3910 QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index bdd45f4dcace..498fe9af2cdb 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -40,8 +40,40 @@ struct qeth_ipaddr {
40 unsigned int pfxlen; 40 unsigned int pfxlen;
41 } a6; 41 } a6;
42 } u; 42 } u;
43
44}; 43};
44
45static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1,
46 struct qeth_ipaddr *a2)
47{
48 if (a1->proto != a2->proto)
49 return false;
50 if (a1->proto == QETH_PROT_IPV6)
51 return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr);
52 return a1->u.a4.addr == a2->u.a4.addr;
53}
54
55static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
56 struct qeth_ipaddr *a2)
57{
58 /* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(),
59 * so 'proto' and 'addr' match for sure.
60 *
61 * For ucast:
62 * - 'mac' is always 0.
63 * - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching
64 * values are required to avoid mixups in takeover eligibility.
65 *
66 * For mcast,
67 * - 'mac' is mapped from the IP, and thus always matches.
68 * - 'mask'/'pfxlen' is always 0.
69 */
70 if (a1->type != a2->type)
71 return false;
72 if (a1->proto == QETH_PROT_IPV6)
73 return a1->u.a6.pfxlen == a2->u.a6.pfxlen;
74 return a1->u.a4.mask == a2->u.a4.mask;
75}
76
45static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr) 77static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
46{ 78{
47 u64 ret = 0; 79 u64 ret = 0;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index b0c888e86cd4..962a04b68dd2 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -67,6 +67,24 @@ void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
67 qeth_l3_ipaddr6_to_string(addr, buf); 67 qeth_l3_ipaddr6_to_string(addr, buf);
68} 68}
69 69
70static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
71 struct qeth_ipaddr *query)
72{
73 u64 key = qeth_l3_ipaddr_hash(query);
74 struct qeth_ipaddr *addr;
75
76 if (query->is_multicast) {
77 hash_for_each_possible(card->ip_mc_htable, addr, hnode, key)
78 if (qeth_l3_addr_match_ip(addr, query))
79 return addr;
80 } else {
81 hash_for_each_possible(card->ip_htable, addr, hnode, key)
82 if (qeth_l3_addr_match_ip(addr, query))
83 return addr;
84 }
85 return NULL;
86}
87
70static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) 88static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
71{ 89{
72 int i, j; 90 int i, j;
@@ -120,34 +138,6 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
120 return rc; 138 return rc;
121} 139}
122 140
123inline int
124qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
125{
126 return addr1->proto == addr2->proto &&
127 !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
128 ether_addr_equal_64bits(addr1->mac, addr2->mac);
129}
130
131static struct qeth_ipaddr *
132qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
133{
134 struct qeth_ipaddr *addr;
135
136 if (tmp_addr->is_multicast) {
137 hash_for_each_possible(card->ip_mc_htable, addr,
138 hnode, qeth_l3_ipaddr_hash(tmp_addr))
139 if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
140 return addr;
141 } else {
142 hash_for_each_possible(card->ip_htable, addr,
143 hnode, qeth_l3_ipaddr_hash(tmp_addr))
144 if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
145 return addr;
146 }
147
148 return NULL;
149}
150
151int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) 141int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
152{ 142{
153 int rc = 0; 143 int rc = 0;
@@ -162,23 +152,18 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
162 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); 152 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
163 } 153 }
164 154
165 addr = qeth_l3_ip_from_hash(card, tmp_addr); 155 addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
166 if (!addr) 156 if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr))
167 return -ENOENT; 157 return -ENOENT;
168 158
169 addr->ref_counter--; 159 addr->ref_counter--;
170 if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL || 160 if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
171 addr->type == QETH_IP_TYPE_RXIP))
172 return rc; 161 return rc;
173 if (addr->in_progress) 162 if (addr->in_progress)
174 return -EINPROGRESS; 163 return -EINPROGRESS;
175 164
176 if (!qeth_card_hw_is_reachable(card)) { 165 if (qeth_card_hw_is_reachable(card))
177 addr->disp_flag = QETH_DISP_ADDR_DELETE; 166 rc = qeth_l3_deregister_addr_entry(card, addr);
178 return 0;
179 }
180
181 rc = qeth_l3_deregister_addr_entry(card, addr);
182 167
183 hash_del(&addr->hnode); 168 hash_del(&addr->hnode);
184 kfree(addr); 169 kfree(addr);
@@ -190,6 +175,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
190{ 175{
191 int rc = 0; 176 int rc = 0;
192 struct qeth_ipaddr *addr; 177 struct qeth_ipaddr *addr;
178 char buf[40];
193 179
194 QETH_CARD_TEXT(card, 4, "addip"); 180 QETH_CARD_TEXT(card, 4, "addip");
195 181
@@ -200,8 +186,20 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
200 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); 186 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
201 } 187 }
202 188
203 addr = qeth_l3_ip_from_hash(card, tmp_addr); 189 addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
204 if (!addr) { 190 if (addr) {
191 if (tmp_addr->type != QETH_IP_TYPE_NORMAL)
192 return -EADDRINUSE;
193 if (qeth_l3_addr_match_all(addr, tmp_addr)) {
194 addr->ref_counter++;
195 return 0;
196 }
197 qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u,
198 buf);
199 dev_warn(&card->gdev->dev,
200 "Registering IP address %s failed\n", buf);
201 return -EADDRINUSE;
202 } else {
205 addr = qeth_l3_get_addr_buffer(tmp_addr->proto); 203 addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
206 if (!addr) 204 if (!addr)
207 return -ENOMEM; 205 return -ENOMEM;
@@ -241,19 +239,15 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
241 (rc == IPA_RC_LAN_OFFLINE)) { 239 (rc == IPA_RC_LAN_OFFLINE)) {
242 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 240 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
243 if (addr->ref_counter < 1) { 241 if (addr->ref_counter < 1) {
244 qeth_l3_delete_ip(card, addr); 242 qeth_l3_deregister_addr_entry(card, addr);
243 hash_del(&addr->hnode);
245 kfree(addr); 244 kfree(addr);
246 } 245 }
247 } else { 246 } else {
248 hash_del(&addr->hnode); 247 hash_del(&addr->hnode);
249 kfree(addr); 248 kfree(addr);
250 } 249 }
251 } else {
252 if (addr->type == QETH_IP_TYPE_NORMAL ||
253 addr->type == QETH_IP_TYPE_RXIP)
254 addr->ref_counter++;
255 } 250 }
256
257 return rc; 251 return rc;
258} 252}
259 253
@@ -321,11 +315,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
321 spin_lock_bh(&card->ip_lock); 315 spin_lock_bh(&card->ip_lock);
322 316
323 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { 317 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
324 if (addr->disp_flag == QETH_DISP_ADDR_DELETE) { 318 if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
325 qeth_l3_deregister_addr_entry(card, addr);
326 hash_del(&addr->hnode);
327 kfree(addr);
328 } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
329 if (addr->proto == QETH_PROT_IPV4) { 319 if (addr->proto == QETH_PROT_IPV4) {
330 addr->in_progress = 1; 320 addr->in_progress = 1;
331 spin_unlock_bh(&card->ip_lock); 321 spin_unlock_bh(&card->ip_lock);
@@ -643,12 +633,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
643 return -ENOMEM; 633 return -ENOMEM;
644 634
645 spin_lock_bh(&card->ip_lock); 635 spin_lock_bh(&card->ip_lock);
646 636 rc = qeth_l3_add_ip(card, ipaddr);
647 if (qeth_l3_ip_from_hash(card, ipaddr))
648 rc = -EEXIST;
649 else
650 rc = qeth_l3_add_ip(card, ipaddr);
651
652 spin_unlock_bh(&card->ip_lock); 637 spin_unlock_bh(&card->ip_lock);
653 638
654 kfree(ipaddr); 639 kfree(ipaddr);
@@ -713,12 +698,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
713 return -ENOMEM; 698 return -ENOMEM;
714 699
715 spin_lock_bh(&card->ip_lock); 700 spin_lock_bh(&card->ip_lock);
716 701 rc = qeth_l3_add_ip(card, ipaddr);
717 if (qeth_l3_ip_from_hash(card, ipaddr))
718 rc = -EEXIST;
719 else
720 rc = qeth_l3_add_ip(card, ipaddr);
721
722 spin_unlock_bh(&card->ip_lock); 702 spin_unlock_bh(&card->ip_lock);
723 703
724 kfree(ipaddr); 704 kfree(ipaddr);
@@ -1239,8 +1219,9 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
1239 tmp->u.a4.addr = be32_to_cpu(im4->multiaddr); 1219 tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
1240 tmp->is_multicast = 1; 1220 tmp->is_multicast = 1;
1241 1221
1242 ipm = qeth_l3_ip_from_hash(card, tmp); 1222 ipm = qeth_l3_find_addr_by_ip(card, tmp);
1243 if (ipm) { 1223 if (ipm) {
1224 /* for mcast, by-IP match means full match */
1244 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 1225 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
1245 } else { 1226 } else {
1246 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 1227 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
@@ -1319,8 +1300,9 @@ static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
1319 sizeof(struct in6_addr)); 1300 sizeof(struct in6_addr));
1320 tmp->is_multicast = 1; 1301 tmp->is_multicast = 1;
1321 1302
1322 ipm = qeth_l3_ip_from_hash(card, tmp); 1303 ipm = qeth_l3_find_addr_by_ip(card, tmp);
1323 if (ipm) { 1304 if (ipm) {
1305 /* for mcast, by-IP match means full match */
1324 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 1306 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
1325 continue; 1307 continue;
1326 } 1308 }
@@ -2450,11 +2432,12 @@ static void qeth_tso_fill_header(struct qeth_card *card,
2450static int qeth_l3_get_elements_no_tso(struct qeth_card *card, 2432static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
2451 struct sk_buff *skb, int extra_elems) 2433 struct sk_buff *skb, int extra_elems)
2452{ 2434{
2453 addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); 2435 addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
2454 int elements = qeth_get_elements_for_range( 2436 addr_t end = (addr_t)skb->data + skb_headlen(skb);
2455 tcpdptr, 2437 int elements = qeth_get_elements_for_frags(skb);
2456 (addr_t)skb->data + skb_headlen(skb)) + 2438
2457 qeth_get_elements_for_frags(skb); 2439 if (start != end)
2440 elements += qeth_get_elements_for_range(start, end);
2458 2441
2459 if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { 2442 if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
2460 QETH_DBF_MESSAGE(2, 2443 QETH_DBF_MESSAGE(2,