aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/can/usb/Kconfig2
-rw-r--r--drivers/net/can/usb/gs_usb.c17
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c72
-rw-r--r--drivers/net/dsa/b53/b53_common.c37
-rw-r--r--drivers/net/dsa/b53/b53_regs.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c10
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c7
-rw-r--r--drivers/net/ethernet/sfc/efx.h5
-rw-r--r--drivers/net/ethernet/sfc/workarounds.h1
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c9
-rw-r--r--drivers/net/macsec.c27
-rw-r--r--drivers/net/macvlan.c11
-rw-r--r--drivers/net/phy/micrel.c11
-rw-r--r--drivers/net/phy/phy.c40
-rw-r--r--drivers/net/team/team.c8
-rw-r--r--drivers/net/usb/Kconfig2
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/plusb.c15
-rw-r--r--include/linux/phy.h1
-rw-r--r--net/bridge/br_device.c1
-rw-r--r--net/bridge/br_if.c1
-rw-r--r--net/core/dev.c3
-rw-r--r--net/ipv4/route.c3
-rw-r--r--net/ipv4/tcp_cong.c11
-rw-r--r--net/ipv4/udp_offload.c3
-rw-r--r--net/ipv6/addrconf.c14
-rw-r--r--net/ipv6/af_inet6.c6
-rw-r--r--net/ipv6/exthdrs.c4
-rw-r--r--net/ipv6/ip6_tunnel.c34
-rw-r--r--net/ipv6/ndisc.c3
-rw-r--r--net/ipv6/raw.c3
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/tipc/socket.c4
44 files changed, 373 insertions, 141 deletions
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index 8483a40e7e9e..5f9e0e6301d0 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -72,6 +72,8 @@ config CAN_PEAK_USB
72 PCAN-USB Pro dual CAN 2.0b channels USB adapter 72 PCAN-USB Pro dual CAN 2.0b channels USB adapter
73 PCAN-USB FD single CAN-FD channel USB adapter 73 PCAN-USB FD single CAN-FD channel USB adapter
74 PCAN-USB Pro FD dual CAN-FD channels USB adapter 74 PCAN-USB Pro FD dual CAN-FD channels USB adapter
75 PCAN-Chip USB CAN-FD to USB stamp module
76 PCAN-USB X6 6 CAN-FD channels USB adapter
75 77
76 (see also http://www.peak-system.com). 78 (see also http://www.peak-system.com).
77 79
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 300349fe8dc0..eecee7f8dfb7 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -739,13 +739,18 @@ static const struct net_device_ops gs_usb_netdev_ops = {
739static int gs_usb_set_identify(struct net_device *netdev, bool do_identify) 739static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
740{ 740{
741 struct gs_can *dev = netdev_priv(netdev); 741 struct gs_can *dev = netdev_priv(netdev);
742 struct gs_identify_mode imode; 742 struct gs_identify_mode *imode;
743 int rc; 743 int rc;
744 744
745 imode = kmalloc(sizeof(*imode), GFP_KERNEL);
746
747 if (!imode)
748 return -ENOMEM;
749
745 if (do_identify) 750 if (do_identify)
746 imode.mode = GS_CAN_IDENTIFY_ON; 751 imode->mode = GS_CAN_IDENTIFY_ON;
747 else 752 else
748 imode.mode = GS_CAN_IDENTIFY_OFF; 753 imode->mode = GS_CAN_IDENTIFY_OFF;
749 754
750 rc = usb_control_msg(interface_to_usbdev(dev->iface), 755 rc = usb_control_msg(interface_to_usbdev(dev->iface),
751 usb_sndctrlpipe(interface_to_usbdev(dev->iface), 756 usb_sndctrlpipe(interface_to_usbdev(dev->iface),
@@ -755,10 +760,12 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
755 USB_RECIP_INTERFACE, 760 USB_RECIP_INTERFACE,
756 dev->channel, 761 dev->channel,
757 0, 762 0,
758 &imode, 763 imode,
759 sizeof(imode), 764 sizeof(*imode),
760 100); 765 100);
761 766
767 kfree(imode);
768
762 return (rc > 0) ? 0 : rc; 769 return (rc > 0) ? 0 : rc;
763} 770}
764 771
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 0b0302af3bd2..57913dbbae0a 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -39,6 +39,7 @@ static struct usb_device_id peak_usb_table[] = {
39 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)}, 39 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)},
40 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)}, 40 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)},
41 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)}, 41 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)},
42 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBCHIP_PRODUCT_ID)},
42 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID)}, 43 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID)},
43 {} /* Terminating entry */ 44 {} /* Terminating entry */
44}; 45};
@@ -51,6 +52,7 @@ static const struct peak_usb_adapter *const peak_usb_adapters_list[] = {
51 &pcan_usb_pro, 52 &pcan_usb_pro,
52 &pcan_usb_fd, 53 &pcan_usb_fd,
53 &pcan_usb_pro_fd, 54 &pcan_usb_pro_fd,
55 &pcan_usb_chip,
54 &pcan_usb_x6, 56 &pcan_usb_x6,
55}; 57};
56 58
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index 3cbfb069893d..c01316cac354 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -27,6 +27,7 @@
27#define PCAN_USBPRO_PRODUCT_ID 0x000d 27#define PCAN_USBPRO_PRODUCT_ID 0x000d
28#define PCAN_USBPROFD_PRODUCT_ID 0x0011 28#define PCAN_USBPROFD_PRODUCT_ID 0x0011
29#define PCAN_USBFD_PRODUCT_ID 0x0012 29#define PCAN_USBFD_PRODUCT_ID 0x0012
30#define PCAN_USBCHIP_PRODUCT_ID 0x0013
30#define PCAN_USBX6_PRODUCT_ID 0x0014 31#define PCAN_USBX6_PRODUCT_ID 0x0014
31 32
32#define PCAN_USB_DRIVER_NAME "peak_usb" 33#define PCAN_USB_DRIVER_NAME "peak_usb"
@@ -90,6 +91,7 @@ struct peak_usb_adapter {
90extern const struct peak_usb_adapter pcan_usb; 91extern const struct peak_usb_adapter pcan_usb;
91extern const struct peak_usb_adapter pcan_usb_pro; 92extern const struct peak_usb_adapter pcan_usb_pro;
92extern const struct peak_usb_adapter pcan_usb_fd; 93extern const struct peak_usb_adapter pcan_usb_fd;
94extern const struct peak_usb_adapter pcan_usb_chip;
93extern const struct peak_usb_adapter pcan_usb_pro_fd; 95extern const struct peak_usb_adapter pcan_usb_pro_fd;
94extern const struct peak_usb_adapter pcan_usb_x6; 96extern const struct peak_usb_adapter pcan_usb_x6;
95 97
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 304732550f0a..528d3bb4917f 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -1061,6 +1061,78 @@ const struct peak_usb_adapter pcan_usb_fd = {
1061 .do_get_berr_counter = pcan_usb_fd_get_berr_counter, 1061 .do_get_berr_counter = pcan_usb_fd_get_berr_counter,
1062}; 1062};
1063 1063
1064/* describes the PCAN-CHIP USB */
1065static const struct can_bittiming_const pcan_usb_chip_const = {
1066 .name = "pcan_chip_usb",
1067 .tseg1_min = 1,
1068 .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
1069 .tseg2_min = 1,
1070 .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
1071 .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
1072 .brp_min = 1,
1073 .brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
1074 .brp_inc = 1,
1075};
1076
1077static const struct can_bittiming_const pcan_usb_chip_data_const = {
1078 .name = "pcan_chip_usb",
1079 .tseg1_min = 1,
1080 .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS),
1081 .tseg2_min = 1,
1082 .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS),
1083 .sjw_max = (1 << PUCAN_TFAST_SJW_BITS),
1084 .brp_min = 1,
1085 .brp_max = (1 << PUCAN_TFAST_BRP_BITS),
1086 .brp_inc = 1,
1087};
1088
1089const struct peak_usb_adapter pcan_usb_chip = {
1090 .name = "PCAN-Chip USB",
1091 .device_id = PCAN_USBCHIP_PRODUCT_ID,
1092 .ctrl_count = PCAN_USBFD_CHANNEL_COUNT,
1093 .ctrlmode_supported = CAN_CTRLMODE_FD |
1094 CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
1095 .clock = {
1096 .freq = PCAN_UFD_CRYSTAL_HZ,
1097 },
1098 .bittiming_const = &pcan_usb_chip_const,
1099 .data_bittiming_const = &pcan_usb_chip_data_const,
1100
1101 /* size of device private data */
1102 .sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
1103
1104 /* timestamps usage */
1105 .ts_used_bits = 32,
1106 .ts_period = 1000000, /* calibration period in ts. */
1107 .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */
1108 .us_per_ts_shift = 0,
1109
1110 /* give here messages in/out endpoints */
1111 .ep_msg_in = PCAN_USBPRO_EP_MSGIN,
1112 .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0},
1113
1114 /* size of rx/tx usb buffers */
1115 .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE,
1116 .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE,
1117
1118 /* device callbacks */
1119 .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */
1120 .dev_init = pcan_usb_fd_init,
1121
1122 .dev_exit = pcan_usb_fd_exit,
1123 .dev_free = pcan_usb_fd_free,
1124 .dev_set_bus = pcan_usb_fd_set_bus,
1125 .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
1126 .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
1127 .dev_decode_buf = pcan_usb_fd_decode_buf,
1128 .dev_start = pcan_usb_fd_start,
1129 .dev_stop = pcan_usb_fd_stop,
1130 .dev_restart_async = pcan_usb_fd_restart_async,
1131 .dev_encode_msg = pcan_usb_fd_encode_msg,
1132
1133 .do_get_berr_counter = pcan_usb_fd_get_berr_counter,
1134};
1135
1064/* describes the PCAN-USB Pro FD adapter */ 1136/* describes the PCAN-USB Pro FD adapter */
1065static const struct can_bittiming_const pcan_usb_pro_fd_const = { 1137static const struct can_bittiming_const pcan_usb_pro_fd_const = {
1066 .name = "pcan_usb_pro_fd", 1138 .name = "pcan_usb_pro_fd",
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 8cf4801994e8..fa0eece21eef 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -326,6 +326,7 @@ static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
326 326
327static void b53_set_forwarding(struct b53_device *dev, int enable) 327static void b53_set_forwarding(struct b53_device *dev, int enable)
328{ 328{
329 struct dsa_switch *ds = dev->ds;
329 u8 mgmt; 330 u8 mgmt;
330 331
331 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 332 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
@@ -336,6 +337,15 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
336 mgmt &= ~SM_SW_FWD_EN; 337 mgmt &= ~SM_SW_FWD_EN;
337 338
338 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 339 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
340
341 /* Include IMP port in dumb forwarding mode when no tagging protocol is
342 * set
343 */
344 if (ds->ops->get_tag_protocol(ds) == DSA_TAG_PROTO_NONE) {
345 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
346 mgmt |= B53_MII_DUMB_FWDG_EN;
347 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
348 }
339} 349}
340 350
341static void b53_enable_vlan(struct b53_device *dev, bool enable) 351static void b53_enable_vlan(struct b53_device *dev, bool enable)
@@ -598,7 +608,8 @@ static void b53_switch_reset_gpio(struct b53_device *dev)
598 608
599static int b53_switch_reset(struct b53_device *dev) 609static int b53_switch_reset(struct b53_device *dev)
600{ 610{
601 u8 mgmt; 611 unsigned int timeout = 1000;
612 u8 mgmt, reg;
602 613
603 b53_switch_reset_gpio(dev); 614 b53_switch_reset_gpio(dev);
604 615
@@ -607,6 +618,28 @@ static int b53_switch_reset(struct b53_device *dev)
607 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00); 618 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00);
608 } 619 }
609 620
621 /* This is specific to 58xx devices here, do not use is58xx() which
622 * covers the larger Starfigther 2 family, including 7445/7278 which
623 * still use this driver as a library and need to perform the reset
624 * earlier.
625 */
626 if (dev->chip_id == BCM58XX_DEVICE_ID) {
627 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
628 reg |= SW_RST | EN_SW_RST | EN_CH_RST;
629 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg);
630
631 do {
632 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
633 if (!(reg & SW_RST))
634 break;
635
636 usleep_range(1000, 2000);
637 } while (timeout-- > 0);
638
639 if (timeout == 0)
640 return -ETIMEDOUT;
641 }
642
610 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 643 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
611 644
612 if (!(mgmt & SM_SW_FWD_EN)) { 645 if (!(mgmt & SM_SW_FWD_EN)) {
@@ -1731,7 +1764,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
1731 .vlans = 4096, 1764 .vlans = 4096,
1732 .enabled_ports = 0x1ff, 1765 .enabled_ports = 0x1ff,
1733 .arl_entries = 4, 1766 .arl_entries = 4,
1734 .cpu_port = B53_CPU_PORT_25, 1767 .cpu_port = B53_CPU_PORT,
1735 .vta_regs = B53_VTA_REGS, 1768 .vta_regs = B53_VTA_REGS,
1736 .duplex_reg = B53_DUPLEX_STAT_GE, 1769 .duplex_reg = B53_DUPLEX_STAT_GE,
1737 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 1770 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index 9fd24c418fa4..e5c86d44667a 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -104,6 +104,10 @@
104#define B53_UC_FWD_EN BIT(6) 104#define B53_UC_FWD_EN BIT(6)
105#define B53_MC_FWD_EN BIT(7) 105#define B53_MC_FWD_EN BIT(7)
106 106
107/* Switch control (8 bit) */
108#define B53_SWITCH_CTRL 0x22
109#define B53_MII_DUMB_FWDG_EN BIT(6)
110
107/* (16 bit) */ 111/* (16 bit) */
108#define B53_UC_FLOOD_MASK 0x32 112#define B53_UC_FLOOD_MASK 0x32
109#define B53_MC_FLOOD_MASK 0x34 113#define B53_MC_FLOOD_MASK 0x34
@@ -139,6 +143,7 @@
139/* Software reset register (8 bit) */ 143/* Software reset register (8 bit) */
140#define B53_SOFTRESET 0x79 144#define B53_SOFTRESET 0x79
141#define SW_RST BIT(7) 145#define SW_RST BIT(7)
146#define EN_CH_RST BIT(6)
142#define EN_SW_RST BIT(4) 147#define EN_SW_RST BIT(4)
143 148
144/* Fast Aging Control register (8 bit) */ 149/* Fast Aging Control register (8 bit) */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index dc52053128bc..3d9490cd2db1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -90,7 +90,7 @@
90#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX) 90#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX)
91 91
92#define MLX5_UMR_ALIGN (2048) 92#define MLX5_UMR_ALIGN (2048)
93#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) 93#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256)
94 94
95#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) 95#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
96#define MLX5E_DEFAULT_LRO_TIMEOUT 32 96#define MLX5E_DEFAULT_LRO_TIMEOUT 32
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index d55fff0ba388..26fc77e80f7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -564,6 +564,7 @@ int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *i
564 int idx = 0; 564 int idx = 0;
565 int err = 0; 565 int err = 0;
566 566
567 info->data = MAX_NUM_OF_ETHTOOL_RULES;
567 while ((!err || err == -ENOENT) && idx < info->rule_cnt) { 568 while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
568 err = mlx5e_ethtool_get_flow(priv, info, location); 569 err = mlx5e_ethtool_get_flow(priv, info, location);
569 if (!err) 570 if (!err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 66c133757a5e..15cc7b469d2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -174,7 +174,7 @@ unlock:
174 174
175static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) 175static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
176{ 176{
177 struct mlx5e_sw_stats *s = &priv->stats.sw; 177 struct mlx5e_sw_stats temp, *s = &temp;
178 struct mlx5e_rq_stats *rq_stats; 178 struct mlx5e_rq_stats *rq_stats;
179 struct mlx5e_sq_stats *sq_stats; 179 struct mlx5e_sq_stats *sq_stats;
180 u64 tx_offload_none = 0; 180 u64 tx_offload_none = 0;
@@ -229,6 +229,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
229 s->link_down_events_phy = MLX5_GET(ppcnt_reg, 229 s->link_down_events_phy = MLX5_GET(ppcnt_reg,
230 priv->stats.pport.phy_counters, 230 priv->stats.pport.phy_counters,
231 counter_set.phys_layer_cntrs.link_down_events); 231 counter_set.phys_layer_cntrs.link_down_events);
232 memcpy(&priv->stats.sw, s, sizeof(*s));
232} 233}
233 234
234static void mlx5e_update_vport_counters(struct mlx5e_priv *priv) 235static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
@@ -243,7 +244,6 @@ static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
243 MLX5_SET(query_vport_counter_in, in, op_mod, 0); 244 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
244 MLX5_SET(query_vport_counter_in, in, other_vport, 0); 245 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
245 246
246 memset(out, 0, outlen);
247 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); 247 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
248} 248}
249 249
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index fade7233dac5..5436866798f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -639,7 +639,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
639 639
640 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) && 640 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
641 rep->vport != FDB_UPLINK_VPORT) { 641 rep->vport != FDB_UPLINK_VPORT) {
642 if (min_inline > esw->offloads.inline_mode) { 642 if (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
643 esw->offloads.inline_mode < min_inline) {
643 netdev_warn(priv->netdev, 644 netdev_warn(priv->netdev,
644 "Flow is not offloaded due to min inline setting, required %d actual %d\n", 645 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
645 min_inline, esw->offloads.inline_mode); 646 min_inline, esw->offloads.inline_mode);
@@ -785,16 +786,15 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
785 return 0; 786 return 0;
786} 787}
787 788
788static int gen_vxlan_header_ipv4(struct net_device *out_dev, 789static void gen_vxlan_header_ipv4(struct net_device *out_dev,
789 char buf[], 790 char buf[], int encap_size,
790 unsigned char h_dest[ETH_ALEN], 791 unsigned char h_dest[ETH_ALEN],
791 int ttl, 792 int ttl,
792 __be32 daddr, 793 __be32 daddr,
793 __be32 saddr, 794 __be32 saddr,
794 __be16 udp_dst_port, 795 __be16 udp_dst_port,
795 __be32 vx_vni) 796 __be32 vx_vni)
796{ 797{
797 int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
798 struct ethhdr *eth = (struct ethhdr *)buf; 798 struct ethhdr *eth = (struct ethhdr *)buf;
799 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr)); 799 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
800 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr)); 800 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
@@ -817,20 +817,17 @@ static int gen_vxlan_header_ipv4(struct net_device *out_dev,
817 udp->dest = udp_dst_port; 817 udp->dest = udp_dst_port;
818 vxh->vx_flags = VXLAN_HF_VNI; 818 vxh->vx_flags = VXLAN_HF_VNI;
819 vxh->vx_vni = vxlan_vni_field(vx_vni); 819 vxh->vx_vni = vxlan_vni_field(vx_vni);
820
821 return encap_size;
822} 820}
823 821
824static int gen_vxlan_header_ipv6(struct net_device *out_dev, 822static void gen_vxlan_header_ipv6(struct net_device *out_dev,
825 char buf[], 823 char buf[], int encap_size,
826 unsigned char h_dest[ETH_ALEN], 824 unsigned char h_dest[ETH_ALEN],
827 int ttl, 825 int ttl,
828 struct in6_addr *daddr, 826 struct in6_addr *daddr,
829 struct in6_addr *saddr, 827 struct in6_addr *saddr,
830 __be16 udp_dst_port, 828 __be16 udp_dst_port,
831 __be32 vx_vni) 829 __be32 vx_vni)
832{ 830{
833 int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN;
834 struct ethhdr *eth = (struct ethhdr *)buf; 831 struct ethhdr *eth = (struct ethhdr *)buf;
835 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr)); 832 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
836 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr)); 833 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
@@ -852,8 +849,6 @@ static int gen_vxlan_header_ipv6(struct net_device *out_dev,
852 udp->dest = udp_dst_port; 849 udp->dest = udp_dst_port;
853 vxh->vx_flags = VXLAN_HF_VNI; 850 vxh->vx_flags = VXLAN_HF_VNI;
854 vxh->vx_vni = vxlan_vni_field(vx_vni); 851 vxh->vx_vni = vxlan_vni_field(vx_vni);
855
856 return encap_size;
857} 852}
858 853
859static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, 854static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
@@ -862,13 +857,20 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
862 struct net_device **out_dev) 857 struct net_device **out_dev)
863{ 858{
864 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); 859 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
860 int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
865 struct ip_tunnel_key *tun_key = &e->tun_info.key; 861 struct ip_tunnel_key *tun_key = &e->tun_info.key;
866 int encap_size, ttl, err;
867 struct neighbour *n = NULL; 862 struct neighbour *n = NULL;
868 struct flowi4 fl4 = {}; 863 struct flowi4 fl4 = {};
869 char *encap_header; 864 char *encap_header;
865 int ttl, err;
866
867 if (max_encap_size < ipv4_encap_size) {
868 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
869 ipv4_encap_size, max_encap_size);
870 return -EOPNOTSUPP;
871 }
870 872
871 encap_header = kzalloc(max_encap_size, GFP_KERNEL); 873 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
872 if (!encap_header) 874 if (!encap_header)
873 return -ENOMEM; 875 return -ENOMEM;
874 876
@@ -903,11 +905,11 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
903 905
904 switch (e->tunnel_type) { 906 switch (e->tunnel_type) {
905 case MLX5_HEADER_TYPE_VXLAN: 907 case MLX5_HEADER_TYPE_VXLAN:
906 encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header, 908 gen_vxlan_header_ipv4(*out_dev, encap_header,
907 e->h_dest, ttl, 909 ipv4_encap_size, e->h_dest, ttl,
908 fl4.daddr, 910 fl4.daddr,
909 fl4.saddr, tun_key->tp_dst, 911 fl4.saddr, tun_key->tp_dst,
910 tunnel_id_to_key32(tun_key->tun_id)); 912 tunnel_id_to_key32(tun_key->tun_id));
911 break; 913 break;
912 default: 914 default:
913 err = -EOPNOTSUPP; 915 err = -EOPNOTSUPP;
@@ -915,7 +917,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
915 } 917 }
916 918
917 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, 919 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
918 encap_size, encap_header, &e->encap_id); 920 ipv4_encap_size, encap_header, &e->encap_id);
919out: 921out:
920 if (err && n) 922 if (err && n)
921 neigh_release(n); 923 neigh_release(n);
@@ -930,13 +932,20 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
930 932
931{ 933{
932 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); 934 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
935 int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
933 struct ip_tunnel_key *tun_key = &e->tun_info.key; 936 struct ip_tunnel_key *tun_key = &e->tun_info.key;
934 int encap_size, err, ttl = 0;
935 struct neighbour *n = NULL; 937 struct neighbour *n = NULL;
936 struct flowi6 fl6 = {}; 938 struct flowi6 fl6 = {};
937 char *encap_header; 939 char *encap_header;
940 int err, ttl = 0;
941
942 if (max_encap_size < ipv6_encap_size) {
943 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
944 ipv6_encap_size, max_encap_size);
945 return -EOPNOTSUPP;
946 }
938 947
939 encap_header = kzalloc(max_encap_size, GFP_KERNEL); 948 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
940 if (!encap_header) 949 if (!encap_header)
941 return -ENOMEM; 950 return -ENOMEM;
942 951
@@ -972,11 +981,11 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
972 981
973 switch (e->tunnel_type) { 982 switch (e->tunnel_type) {
974 case MLX5_HEADER_TYPE_VXLAN: 983 case MLX5_HEADER_TYPE_VXLAN:
975 encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header, 984 gen_vxlan_header_ipv6(*out_dev, encap_header,
976 e->h_dest, ttl, 985 ipv6_encap_size, e->h_dest, ttl,
977 &fl6.daddr, 986 &fl6.daddr,
978 &fl6.saddr, tun_key->tp_dst, 987 &fl6.saddr, tun_key->tp_dst,
979 tunnel_id_to_key32(tun_key->tun_id)); 988 tunnel_id_to_key32(tun_key->tun_id));
980 break; 989 break;
981 default: 990 default:
982 err = -EOPNOTSUPP; 991 err = -EOPNOTSUPP;
@@ -984,7 +993,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
984 } 993 }
985 994
986 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, 995 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
987 encap_size, encap_header, &e->encap_id); 996 ipv6_encap_size, encap_header, &e->encap_id);
988out: 997out:
989 if (err && n) 998 if (err && n)
990 neigh_release(n); 999 neigh_release(n);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 307ec6c5fd3b..d111cebca9f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -911,8 +911,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
911 struct mlx5_core_dev *dev = devlink_priv(devlink); 911 struct mlx5_core_dev *dev = devlink_priv(devlink);
912 struct mlx5_eswitch *esw = dev->priv.eswitch; 912 struct mlx5_eswitch *esw = dev->priv.eswitch;
913 int num_vports = esw->enabled_vports; 913 int num_vports = esw->enabled_vports;
914 int err; 914 int err, vport;
915 int vport;
916 u8 mlx5_mode; 915 u8 mlx5_mode;
917 916
918 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 917 if (!MLX5_CAP_GEN(dev, vport_group_manager))
@@ -921,9 +920,17 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
921 if (esw->mode == SRIOV_NONE) 920 if (esw->mode == SRIOV_NONE)
922 return -EOPNOTSUPP; 921 return -EOPNOTSUPP;
923 922
924 if (MLX5_CAP_ETH(dev, wqe_inline_mode) != 923 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
925 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) 924 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
925 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
926 return 0;
927 /* fall through */
928 case MLX5_CAP_INLINE_MODE_L2:
929 esw_warn(dev, "Inline mode can't be set\n");
926 return -EOPNOTSUPP; 930 return -EOPNOTSUPP;
931 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
932 break;
933 }
927 934
928 if (esw->offloads.num_flows > 0) { 935 if (esw->offloads.num_flows > 0) {
929 esw_warn(dev, "Can't set inline mode when flows are configured\n"); 936 esw_warn(dev, "Can't set inline mode when flows are configured\n");
@@ -966,18 +973,14 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
966 if (esw->mode == SRIOV_NONE) 973 if (esw->mode == SRIOV_NONE)
967 return -EOPNOTSUPP; 974 return -EOPNOTSUPP;
968 975
969 if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
970 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
971 return -EOPNOTSUPP;
972
973 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); 976 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
974} 977}
975 978
976int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode) 979int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
977{ 980{
981 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
978 struct mlx5_core_dev *dev = esw->dev; 982 struct mlx5_core_dev *dev = esw->dev;
979 int vport; 983 int vport;
980 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
981 984
982 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 985 if (!MLX5_CAP_GEN(dev, vport_group_manager))
983 return -EOPNOTSUPP; 986 return -EOPNOTSUPP;
@@ -985,10 +988,18 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
985 if (esw->mode == SRIOV_NONE) 988 if (esw->mode == SRIOV_NONE)
986 return -EOPNOTSUPP; 989 return -EOPNOTSUPP;
987 990
988 if (MLX5_CAP_ETH(dev, wqe_inline_mode) != 991 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
989 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) 992 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
990 return -EOPNOTSUPP; 993 mlx5_mode = MLX5_INLINE_MODE_NONE;
994 goto out;
995 case MLX5_CAP_INLINE_MODE_L2:
996 mlx5_mode = MLX5_INLINE_MODE_L2;
997 goto out;
998 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
999 goto query_vports;
1000 }
991 1001
1002query_vports:
992 for (vport = 1; vport <= nvfs; vport++) { 1003 for (vport = 1; vport <= nvfs; vport++) {
993 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode); 1004 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
994 if (vport > 1 && prev_mlx5_mode != mlx5_mode) 1005 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
@@ -996,6 +1007,7 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
996 prev_mlx5_mode = mlx5_mode; 1007 prev_mlx5_mode = mlx5_mode;
997 } 1008 }
998 1009
1010out:
999 *mode = mlx5_mode; 1011 *mode = mlx5_mode;
1000 return 0; 1012 return 0;
1001} 1013}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 60154a175bd3..0ad66324247f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1029,7 +1029,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1029 if (err) { 1029 if (err) {
1030 dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n", 1030 dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
1031 FW_INIT_TIMEOUT_MILI); 1031 FW_INIT_TIMEOUT_MILI);
1032 goto out_err; 1032 goto err_cmd_cleanup;
1033 } 1033 }
1034 1034
1035 err = mlx5_core_enable_hca(dev, 0); 1035 err = mlx5_core_enable_hca(dev, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 2e6b0f290ddc..222b25908d01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -87,6 +87,7 @@ static void up_rel_func(struct kref *kref)
87 struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count); 87 struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
88 88
89 list_del(&up->list); 89 list_del(&up->list);
90 iounmap(up->map);
90 if (mlx5_cmd_free_uar(up->mdev, up->index)) 91 if (mlx5_cmd_free_uar(up->mdev, up->index))
91 mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index); 92 mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
92 kfree(up->reg_bitmap); 93 kfree(up->reg_bitmap);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index a6e2bbe629bd..cfdadb658ade 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -64,11 +64,11 @@
64 ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7) 64 ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7)
65 65
66static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = { 66static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = {
67 {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_DEFAULT}, 67 {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_ISCSI},
68 {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_DEFAULT}, 68 {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_FCOE},
69 {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_DEFAULT}, 69 {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_ETH_ROCE},
70 {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_DEFAULT}, 70 {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_ETH_ROCE},
71 {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH} 71 {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH},
72}; 72};
73 73
74static bool qed_dcbx_app_ethtype(u32 app_info_bitmap) 74static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 8cfc4a54f2dc..3cd7989c007d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1516,11 +1516,12 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1516 spin_unlock_irqrestore(&priv->lock, flags); 1516 spin_unlock_irqrestore(&priv->lock, flags);
1517 return NETDEV_TX_BUSY; 1517 return NETDEV_TX_BUSY;
1518 } 1518 }
1519 entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
1520 priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
1521 1519
1522 if (skb_put_padto(skb, ETH_ZLEN)) 1520 if (skb_put_padto(skb, ETH_ZLEN))
1523 goto drop; 1521 goto exit;
1522
1523 entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
1524 priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
1524 1525
1525 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + 1526 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
1526 entry / NUM_TX_DESC * DPTR_ALIGN; 1527 entry / NUM_TX_DESC * DPTR_ALIGN;
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index ee14662415c5..a0c52e328102 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -74,7 +74,10 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
74#define EFX_RXQ_MIN_ENT 128U 74#define EFX_RXQ_MIN_ENT 128U
75#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) 75#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
76 76
77#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \ 77/* All EF10 architecture NICs steal one bit of the DMAQ size for various
78 * other purposes when counting TxQ entries, so we halve the queue size.
79 */
80#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_EF10(efx) ? \
78 EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) 81 EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
79 82
80static inline bool efx_rss_enabled(struct efx_nic *efx) 83static inline bool efx_rss_enabled(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h
index 103f827a1623..c67fa18b8121 100644
--- a/drivers/net/ethernet/sfc/workarounds.h
+++ b/drivers/net/ethernet/sfc/workarounds.h
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0) 18#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
19#define EFX_WORKAROUND_EF10(efx) (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
19#define EFX_WORKAROUND_10G(efx) 1 20#define EFX_WORKAROUND_10G(efx) 1
20 21
21/* Bit-bashed I2C reads cause performance drop */ 22/* Bit-bashed I2C reads cause performance drop */
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index a45f98fa4aa7..3dadee1080b9 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1017,8 +1017,8 @@ tc35815_free_queues(struct net_device *dev)
1017 BUG_ON(lp->tx_skbs[i].skb != skb); 1017 BUG_ON(lp->tx_skbs[i].skb != skb);
1018#endif 1018#endif
1019 if (skb) { 1019 if (skb) {
1020 dev_kfree_skb(skb);
1021 pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE); 1020 pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
1021 dev_kfree_skb(skb);
1022 lp->tx_skbs[i].skb = NULL; 1022 lp->tx_skbs[i].skb = NULL;
1023 lp->tx_skbs[i].skb_dma = 0; 1023 lp->tx_skbs[i].skb_dma = 0;
1024 } 1024 }
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index f9f3dba7a588..db23cb36ae5c 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -751,7 +751,6 @@ struct netvsc_device {
751 u32 send_section_cnt; 751 u32 send_section_cnt;
752 u32 send_section_size; 752 u32 send_section_size;
753 unsigned long *send_section_map; 753 unsigned long *send_section_map;
754 int map_words;
755 754
756 /* Used for NetVSP initialization protocol */ 755 /* Used for NetVSP initialization protocol */
757 struct completion channel_init_wait; 756 struct completion channel_init_wait;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 8dd0b8770328..15ef713d96c0 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -236,6 +236,7 @@ static int netvsc_init_buf(struct hv_device *device)
236 struct netvsc_device *net_device; 236 struct netvsc_device *net_device;
237 struct nvsp_message *init_packet; 237 struct nvsp_message *init_packet;
238 struct net_device *ndev; 238 struct net_device *ndev;
239 size_t map_words;
239 int node; 240 int node;
240 241
241 net_device = get_outbound_net_device(device); 242 net_device = get_outbound_net_device(device);
@@ -401,11 +402,9 @@ static int netvsc_init_buf(struct hv_device *device)
401 net_device->send_section_size, net_device->send_section_cnt); 402 net_device->send_section_size, net_device->send_section_cnt);
402 403
403 /* Setup state for managing the send buffer. */ 404 /* Setup state for managing the send buffer. */
404 net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt, 405 map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
405 BITS_PER_LONG);
406 406
407 net_device->send_section_map = kcalloc(net_device->map_words, 407 net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
408 sizeof(ulong), GFP_KERNEL);
409 if (net_device->send_section_map == NULL) { 408 if (net_device->send_section_map == NULL) {
410 ret = -ENOMEM; 409 ret = -ENOMEM;
411 goto cleanup; 410 goto cleanup;
@@ -683,7 +682,7 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
683 unsigned long *map_addr = net_device->send_section_map; 682 unsigned long *map_addr = net_device->send_section_map;
684 unsigned int i; 683 unsigned int i;
685 684
686 for_each_clear_bit(i, map_addr, net_device->map_words) { 685 for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
687 if (sync_test_and_set_bit(i, map_addr) == 0) 686 if (sync_test_and_set_bit(i, map_addr) == 0)
688 return i; 687 return i;
689 } 688 }
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index ff0a5ed3ca80..49ce4e9f4a0f 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -617,7 +617,8 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err)
617 617
618static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 618static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
619 unsigned char **iv, 619 unsigned char **iv,
620 struct scatterlist **sg) 620 struct scatterlist **sg,
621 int num_frags)
621{ 622{
622 size_t size, iv_offset, sg_offset; 623 size_t size, iv_offset, sg_offset;
623 struct aead_request *req; 624 struct aead_request *req;
@@ -629,7 +630,7 @@ static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
629 630
630 size = ALIGN(size, __alignof__(struct scatterlist)); 631 size = ALIGN(size, __alignof__(struct scatterlist));
631 sg_offset = size; 632 sg_offset = size;
632 size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1); 633 size += sizeof(struct scatterlist) * num_frags;
633 634
634 tmp = kmalloc(size, GFP_ATOMIC); 635 tmp = kmalloc(size, GFP_ATOMIC);
635 if (!tmp) 636 if (!tmp)
@@ -649,6 +650,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
649{ 650{
650 int ret; 651 int ret;
651 struct scatterlist *sg; 652 struct scatterlist *sg;
653 struct sk_buff *trailer;
652 unsigned char *iv; 654 unsigned char *iv;
653 struct ethhdr *eth; 655 struct ethhdr *eth;
654 struct macsec_eth_header *hh; 656 struct macsec_eth_header *hh;
@@ -723,7 +725,14 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
723 return ERR_PTR(-EINVAL); 725 return ERR_PTR(-EINVAL);
724 } 726 }
725 727
726 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg); 728 ret = skb_cow_data(skb, 0, &trailer);
729 if (unlikely(ret < 0)) {
730 macsec_txsa_put(tx_sa);
731 kfree_skb(skb);
732 return ERR_PTR(ret);
733 }
734
735 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
727 if (!req) { 736 if (!req) {
728 macsec_txsa_put(tx_sa); 737 macsec_txsa_put(tx_sa);
729 kfree_skb(skb); 738 kfree_skb(skb);
@@ -732,7 +741,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
732 741
733 macsec_fill_iv(iv, secy->sci, pn); 742 macsec_fill_iv(iv, secy->sci, pn);
734 743
735 sg_init_table(sg, MAX_SKB_FRAGS + 1); 744 sg_init_table(sg, ret);
736 skb_to_sgvec(skb, sg, 0, skb->len); 745 skb_to_sgvec(skb, sg, 0, skb->len);
737 746
738 if (tx_sc->encrypt) { 747 if (tx_sc->encrypt) {
@@ -917,6 +926,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
917{ 926{
918 int ret; 927 int ret;
919 struct scatterlist *sg; 928 struct scatterlist *sg;
929 struct sk_buff *trailer;
920 unsigned char *iv; 930 unsigned char *iv;
921 struct aead_request *req; 931 struct aead_request *req;
922 struct macsec_eth_header *hdr; 932 struct macsec_eth_header *hdr;
@@ -927,7 +937,12 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
927 if (!skb) 937 if (!skb)
928 return ERR_PTR(-ENOMEM); 938 return ERR_PTR(-ENOMEM);
929 939
930 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg); 940 ret = skb_cow_data(skb, 0, &trailer);
941 if (unlikely(ret < 0)) {
942 kfree_skb(skb);
943 return ERR_PTR(ret);
944 }
945 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
931 if (!req) { 946 if (!req) {
932 kfree_skb(skb); 947 kfree_skb(skb);
933 return ERR_PTR(-ENOMEM); 948 return ERR_PTR(-ENOMEM);
@@ -936,7 +951,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
936 hdr = (struct macsec_eth_header *)skb->data; 951 hdr = (struct macsec_eth_header *)skb->data;
937 macsec_fill_iv(iv, sci, ntohl(hdr->packet_number)); 952 macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
938 953
939 sg_init_table(sg, MAX_SKB_FRAGS + 1); 954 sg_init_table(sg, ret);
940 skb_to_sgvec(skb, sg, 0, skb->len); 955 skb_to_sgvec(skb, sg, 0, skb->len);
941 956
942 if (hdr->tci_an & MACSEC_TCI_E) { 957 if (hdr->tci_an & MACSEC_TCI_E) {
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 9261722960a7..b34eaaae03fd 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1139,6 +1139,7 @@ static int macvlan_port_create(struct net_device *dev)
1139static void macvlan_port_destroy(struct net_device *dev) 1139static void macvlan_port_destroy(struct net_device *dev)
1140{ 1140{
1141 struct macvlan_port *port = macvlan_port_get_rtnl(dev); 1141 struct macvlan_port *port = macvlan_port_get_rtnl(dev);
1142 struct sk_buff *skb;
1142 1143
1143 dev->priv_flags &= ~IFF_MACVLAN_PORT; 1144 dev->priv_flags &= ~IFF_MACVLAN_PORT;
1144 netdev_rx_handler_unregister(dev); 1145 netdev_rx_handler_unregister(dev);
@@ -1147,7 +1148,15 @@ static void macvlan_port_destroy(struct net_device *dev)
1147 * but we need to cancel it and purge left skbs if any. 1148 * but we need to cancel it and purge left skbs if any.
1148 */ 1149 */
1149 cancel_work_sync(&port->bc_work); 1150 cancel_work_sync(&port->bc_work);
1150 __skb_queue_purge(&port->bc_queue); 1151
1152 while ((skb = __skb_dequeue(&port->bc_queue))) {
1153 const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
1154
1155 if (src)
1156 dev_put(src->dev);
1157
1158 kfree_skb(skb);
1159 }
1151 1160
1152 kfree(port); 1161 kfree(port);
1153} 1162}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 1326d99771c1..da5b39268370 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -297,17 +297,6 @@ static int kszphy_config_init(struct phy_device *phydev)
297 if (priv->led_mode >= 0) 297 if (priv->led_mode >= 0)
298 kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode); 298 kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode);
299 299
300 if (phy_interrupt_is_valid(phydev)) {
301 int ctl = phy_read(phydev, MII_BMCR);
302
303 if (ctl < 0)
304 return ctl;
305
306 ret = phy_write(phydev, MII_BMCR, ctl & ~BMCR_ANENABLE);
307 if (ret < 0)
308 return ret;
309 }
310
311 return 0; 300 return 0;
312} 301}
313 302
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index a2bfc82e95d7..97ff1278167b 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -591,16 +591,18 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
591EXPORT_SYMBOL(phy_mii_ioctl); 591EXPORT_SYMBOL(phy_mii_ioctl);
592 592
593/** 593/**
594 * phy_start_aneg - start auto-negotiation for this PHY device 594 * phy_start_aneg_priv - start auto-negotiation for this PHY device
595 * @phydev: the phy_device struct 595 * @phydev: the phy_device struct
596 * @sync: indicate whether we should wait for the workqueue cancelation
596 * 597 *
597 * Description: Sanitizes the settings (if we're not autonegotiating 598 * Description: Sanitizes the settings (if we're not autonegotiating
598 * them), and then calls the driver's config_aneg function. 599 * them), and then calls the driver's config_aneg function.
599 * If the PHYCONTROL Layer is operating, we change the state to 600 * If the PHYCONTROL Layer is operating, we change the state to
600 * reflect the beginning of Auto-negotiation or forcing. 601 * reflect the beginning of Auto-negotiation or forcing.
601 */ 602 */
602int phy_start_aneg(struct phy_device *phydev) 603static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
603{ 604{
605 bool trigger = 0;
604 int err; 606 int err;
605 607
606 if (!phydev->drv) 608 if (!phydev->drv)
@@ -628,10 +630,40 @@ int phy_start_aneg(struct phy_device *phydev)
628 } 630 }
629 } 631 }
630 632
633 /* Re-schedule a PHY state machine to check PHY status because
634 * negotiation may already be done and aneg interrupt may not be
635 * generated.
636 */
637 if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
638 err = phy_aneg_done(phydev);
639 if (err > 0) {
640 trigger = true;
641 err = 0;
642 }
643 }
644
631out_unlock: 645out_unlock:
632 mutex_unlock(&phydev->lock); 646 mutex_unlock(&phydev->lock);
647
648 if (trigger)
649 phy_trigger_machine(phydev, sync);
650
633 return err; 651 return err;
634} 652}
653
654/**
655 * phy_start_aneg - start auto-negotiation for this PHY device
656 * @phydev: the phy_device struct
657 *
658 * Description: Sanitizes the settings (if we're not autonegotiating
659 * them), and then calls the driver's config_aneg function.
660 * If the PHYCONTROL Layer is operating, we change the state to
661 * reflect the beginning of Auto-negotiation or forcing.
662 */
663int phy_start_aneg(struct phy_device *phydev)
664{
665 return phy_start_aneg_priv(phydev, true);
666}
635EXPORT_SYMBOL(phy_start_aneg); 667EXPORT_SYMBOL(phy_start_aneg);
636 668
637/** 669/**
@@ -659,7 +691,7 @@ void phy_start_machine(struct phy_device *phydev)
659 * state machine runs. 691 * state machine runs.
660 */ 692 */
661 693
662static void phy_trigger_machine(struct phy_device *phydev, bool sync) 694void phy_trigger_machine(struct phy_device *phydev, bool sync)
663{ 695{
664 if (sync) 696 if (sync)
665 cancel_delayed_work_sync(&phydev->state_queue); 697 cancel_delayed_work_sync(&phydev->state_queue);
@@ -1154,7 +1186,7 @@ void phy_state_machine(struct work_struct *work)
1154 mutex_unlock(&phydev->lock); 1186 mutex_unlock(&phydev->lock);
1155 1187
1156 if (needs_aneg) 1188 if (needs_aneg)
1157 err = phy_start_aneg(phydev); 1189 err = phy_start_aneg_priv(phydev, false);
1158 else if (do_suspend) 1190 else if (do_suspend)
1159 phy_suspend(phydev); 1191 phy_suspend(phydev);
1160 1192
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index f8c81f12d988..85c01247f2e3 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2361,8 +2361,10 @@ start_again:
2361 2361
2362 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI, 2362 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2363 TEAM_CMD_OPTIONS_GET); 2363 TEAM_CMD_OPTIONS_GET);
2364 if (!hdr) 2364 if (!hdr) {
2365 nlmsg_free(skb);
2365 return -EMSGSIZE; 2366 return -EMSGSIZE;
2367 }
2366 2368
2367 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) 2369 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2368 goto nla_put_failure; 2370 goto nla_put_failure;
@@ -2634,8 +2636,10 @@ start_again:
2634 2636
2635 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI, 2637 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2636 TEAM_CMD_PORT_LIST_GET); 2638 TEAM_CMD_PORT_LIST_GET);
2637 if (!hdr) 2639 if (!hdr) {
2640 nlmsg_free(skb);
2638 return -EMSGSIZE; 2641 return -EMSGSIZE;
2642 }
2639 2643
2640 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) 2644 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2641 goto nla_put_failure; 2645 goto nla_put_failure;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3dd490f53e48..f28bd74ac275 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -369,7 +369,7 @@ config USB_NET_NET1080
369 optionally with LEDs that indicate traffic 369 optionally with LEDs that indicate traffic
370 370
371config USB_NET_PLUSB 371config USB_NET_PLUSB
372 tristate "Prolific PL-2301/2302/25A1 based cables" 372 tristate "Prolific PL-2301/2302/25A1/27A1 based cables"
373 # if the handshake/init/reset problems, from original 'plusb', 373 # if the handshake/init/reset problems, from original 'plusb',
374 # are ever resolved ... then remove "experimental" 374 # are ever resolved ... then remove "experimental"
375 depends on USB_USBNET 375 depends on USB_USBNET
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 4f2e8141dbe2..93411a348f12 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -3279,9 +3279,9 @@ static void __exit hso_exit(void)
3279 pr_info("unloaded\n"); 3279 pr_info("unloaded\n");
3280 3280
3281 tty_unregister_driver(tty_drv); 3281 tty_unregister_driver(tty_drv);
3282 put_tty_driver(tty_drv);
3283 /* deregister the usb driver */ 3282 /* deregister the usb driver */
3284 usb_deregister(&hso_driver); 3283 usb_deregister(&hso_driver);
3284 put_tty_driver(tty_drv);
3285} 3285}
3286 3286
3287/* Module definitions */ 3287/* Module definitions */
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 22e1a9a99a7d..6fe59373cba9 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -102,7 +102,7 @@ static int pl_reset(struct usbnet *dev)
102} 102}
103 103
104static const struct driver_info prolific_info = { 104static const struct driver_info prolific_info = {
105 .description = "Prolific PL-2301/PL-2302/PL-25A1", 105 .description = "Prolific PL-2301/PL-2302/PL-25A1/PL-27A1",
106 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT, 106 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT,
107 /* some PL-2302 versions seem to fail usb_set_interface() */ 107 /* some PL-2302 versions seem to fail usb_set_interface() */
108 .reset = pl_reset, 108 .reset = pl_reset,
@@ -139,6 +139,17 @@ static const struct usb_device_id products [] = {
139 * Host-to-Host Cable 139 * Host-to-Host Cable
140 */ 140 */
141 .driver_info = (unsigned long) &prolific_info, 141 .driver_info = (unsigned long) &prolific_info,
142
143},
144
145/* super speed cables */
146{
147 USB_DEVICE(0x067b, 0x27a1), /* PL-27A1, no eeprom
148 * also: goobay Active USB 3.0
149 * Data Link,
150 * Unitek Y-3501
151 */
152 .driver_info = (unsigned long) &prolific_info,
142}, 153},
143 154
144 { }, // END 155 { }, // END
@@ -158,5 +169,5 @@ static struct usb_driver plusb_driver = {
158module_usb_driver(plusb_driver); 169module_usb_driver(plusb_driver);
159 170
160MODULE_AUTHOR("David Brownell"); 171MODULE_AUTHOR("David Brownell");
161MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1 USB Host to Host Link Driver"); 172MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1/27A1 USB Host to Host Link Driver");
162MODULE_LICENSE("GPL"); 173MODULE_LICENSE("GPL");
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 43a774873aa9..fb3857337151 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -852,6 +852,7 @@ void phy_change_work(struct work_struct *work);
852void phy_mac_interrupt(struct phy_device *phydev, int new_link); 852void phy_mac_interrupt(struct phy_device *phydev, int new_link);
853void phy_start_machine(struct phy_device *phydev); 853void phy_start_machine(struct phy_device *phydev);
854void phy_stop_machine(struct phy_device *phydev); 854void phy_stop_machine(struct phy_device *phydev);
855void phy_trigger_machine(struct phy_device *phydev, bool sync);
855int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); 856int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
856int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd); 857int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
857int phy_ethtool_ksettings_get(struct phy_device *phydev, 858int phy_ethtool_ksettings_get(struct phy_device *phydev,
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 90f49a194249..430b53e7d941 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -123,6 +123,7 @@ static void br_dev_uninit(struct net_device *dev)
123{ 123{
124 struct net_bridge *br = netdev_priv(dev); 124 struct net_bridge *br = netdev_priv(dev);
125 125
126 br_multicast_dev_del(br);
126 br_multicast_uninit_stats(br); 127 br_multicast_uninit_stats(br);
127 br_vlan_flush(br); 128 br_vlan_flush(br);
128 free_percpu(br->stats); 129 free_percpu(br->stats);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 56a2a72e7738..a8d0ed282a10 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -311,7 +311,6 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
311 311
312 br_fdb_delete_by_port(br, NULL, 0, 1); 312 br_fdb_delete_by_port(br, NULL, 0, 1);
313 313
314 br_multicast_dev_del(br);
315 cancel_delayed_work_sync(&br->gc_work); 314 cancel_delayed_work_sync(&br->gc_work);
316 315
317 br_sysfs_delbr(br->dev); 316 br_sysfs_delbr(br->dev);
diff --git a/net/core/dev.c b/net/core/dev.c
index 533a6d6f6092..9b5875388c23 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2450,6 +2450,9 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2450{ 2450{
2451 unsigned long flags; 2451 unsigned long flags;
2452 2452
2453 if (unlikely(!skb))
2454 return;
2455
2453 if (likely(atomic_read(&skb->users) == 1)) { 2456 if (likely(atomic_read(&skb->users) == 1)) {
2454 smp_rmb(); 2457 smp_rmb();
2455 atomic_set(&skb->users, 0); 2458 atomic_set(&skb->users, 0);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index acd69cfe2951..d9724889ff09 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2359,7 +2359,8 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2359 } 2359 }
2360 2360
2361 /* L3 master device is the loopback for that domain */ 2361 /* L3 master device is the loopback for that domain */
2362 dev_out = l3mdev_master_dev_rcu(dev_out) ? : net->loopback_dev; 2362 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(res)) ? :
2363 net->loopback_dev;
2363 fl4->flowi4_oif = dev_out->ifindex; 2364 fl4->flowi4_oif = dev_out->ifindex;
2364 flags |= RTCF_LOCAL; 2365 flags |= RTCF_LOCAL;
2365 goto make_route; 2366 goto make_route;
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 79c4817abc94..6e3c512054a6 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -168,12 +168,8 @@ void tcp_assign_congestion_control(struct sock *sk)
168 } 168 }
169out: 169out:
170 rcu_read_unlock(); 170 rcu_read_unlock();
171 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
171 172
172 /* Clear out private data before diag gets it and
173 * the ca has not been initialized.
174 */
175 if (ca->get_info)
176 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
177 if (ca->flags & TCP_CONG_NEEDS_ECN) 173 if (ca->flags & TCP_CONG_NEEDS_ECN)
178 INET_ECN_xmit(sk); 174 INET_ECN_xmit(sk);
179 else 175 else
@@ -200,11 +196,10 @@ static void tcp_reinit_congestion_control(struct sock *sk,
200 tcp_cleanup_congestion_control(sk); 196 tcp_cleanup_congestion_control(sk);
201 icsk->icsk_ca_ops = ca; 197 icsk->icsk_ca_ops = ca;
202 icsk->icsk_ca_setsockopt = 1; 198 icsk->icsk_ca_setsockopt = 1;
199 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
203 200
204 if (sk->sk_state != TCP_CLOSE) { 201 if (sk->sk_state != TCP_CLOSE)
205 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
206 tcp_init_congestion_control(sk); 202 tcp_init_congestion_control(sk);
207 }
208} 203}
209 204
210/* Manage refcounts on socket close. */ 205/* Manage refcounts on socket close. */
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index b2be1d9757ef..781250151d40 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -29,6 +29,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
29 u16 mac_len = skb->mac_len; 29 u16 mac_len = skb->mac_len;
30 int udp_offset, outer_hlen; 30 int udp_offset, outer_hlen;
31 __wsum partial; 31 __wsum partial;
32 bool need_ipsec;
32 33
33 if (unlikely(!pskb_may_pull(skb, tnl_hlen))) 34 if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
34 goto out; 35 goto out;
@@ -62,8 +63,10 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
62 63
63 ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); 64 ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
64 65
66 need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb));
65 /* Try to offload checksum if possible */ 67 /* Try to offload checksum if possible */
66 offload_csum = !!(need_csum && 68 offload_csum = !!(need_csum &&
69 !need_ipsec &&
67 (skb->dev->features & 70 (skb->dev->features &
68 (is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) : 71 (is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) :
69 (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM)))); 72 (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM))));
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 80ce478c4851..0ea96c4d334d 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3271,14 +3271,24 @@ static void addrconf_gre_config(struct net_device *dev)
3271static int fixup_permanent_addr(struct inet6_dev *idev, 3271static int fixup_permanent_addr(struct inet6_dev *idev,
3272 struct inet6_ifaddr *ifp) 3272 struct inet6_ifaddr *ifp)
3273{ 3273{
3274 if (!ifp->rt) { 3274 /* rt6i_ref == 0 means the host route was removed from the
3275 struct rt6_info *rt; 3275 * FIB, for example, if 'lo' device is taken down. In that
3276 * case regenerate the host route.
3277 */
3278 if (!ifp->rt || !atomic_read(&ifp->rt->rt6i_ref)) {
3279 struct rt6_info *rt, *prev;
3276 3280
3277 rt = addrconf_dst_alloc(idev, &ifp->addr, false); 3281 rt = addrconf_dst_alloc(idev, &ifp->addr, false);
3278 if (unlikely(IS_ERR(rt))) 3282 if (unlikely(IS_ERR(rt)))
3279 return PTR_ERR(rt); 3283 return PTR_ERR(rt);
3280 3284
3285 /* ifp->rt can be accessed outside of rtnl */
3286 spin_lock(&ifp->lock);
3287 prev = ifp->rt;
3281 ifp->rt = rt; 3288 ifp->rt = rt;
3289 spin_unlock(&ifp->lock);
3290
3291 ip6_rt_put(prev);
3282 } 3292 }
3283 3293
3284 if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) { 3294 if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index a9a9553ee63d..e82e59f22dfc 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -933,8 +933,6 @@ static int __init inet6_init(void)
933 if (err) 933 if (err)
934 goto igmp_fail; 934 goto igmp_fail;
935 935
936 ipv6_stub = &ipv6_stub_impl;
937
938 err = ipv6_netfilter_init(); 936 err = ipv6_netfilter_init();
939 if (err) 937 if (err)
940 goto netfilter_fail; 938 goto netfilter_fail;
@@ -1010,6 +1008,10 @@ static int __init inet6_init(void)
1010 if (err) 1008 if (err)
1011 goto sysctl_fail; 1009 goto sysctl_fail;
1012#endif 1010#endif
1011
1012 /* ensure that ipv6 stubs are visible only after ipv6 is ready */
1013 wmb();
1014 ipv6_stub = &ipv6_stub_impl;
1013out: 1015out:
1014 return err; 1016 return err;
1015 1017
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 25192a3b0cd7..d32e2110aff2 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -909,6 +909,8 @@ static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
909{ 909{
910 switch (opt->type) { 910 switch (opt->type) {
911 case IPV6_SRCRT_TYPE_0: 911 case IPV6_SRCRT_TYPE_0:
912 case IPV6_SRCRT_STRICT:
913 case IPV6_SRCRT_TYPE_2:
912 ipv6_push_rthdr0(skb, proto, opt, addr_p, saddr); 914 ipv6_push_rthdr0(skb, proto, opt, addr_p, saddr);
913 break; 915 break;
914 case IPV6_SRCRT_TYPE_4: 916 case IPV6_SRCRT_TYPE_4:
@@ -1163,6 +1165,8 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
1163 1165
1164 switch (opt->srcrt->type) { 1166 switch (opt->srcrt->type) {
1165 case IPV6_SRCRT_TYPE_0: 1167 case IPV6_SRCRT_TYPE_0:
1168 case IPV6_SRCRT_STRICT:
1169 case IPV6_SRCRT_TYPE_2:
1166 fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr; 1170 fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
1167 break; 1171 break;
1168 case IPV6_SRCRT_TYPE_4: 1172 case IPV6_SRCRT_TYPE_4:
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 75fac933c209..a9692ec0cd6d 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1037,7 +1037,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1037 struct ip6_tnl *t = netdev_priv(dev); 1037 struct ip6_tnl *t = netdev_priv(dev);
1038 struct net *net = t->net; 1038 struct net *net = t->net;
1039 struct net_device_stats *stats = &t->dev->stats; 1039 struct net_device_stats *stats = &t->dev->stats;
1040 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 1040 struct ipv6hdr *ipv6h;
1041 struct ipv6_tel_txoption opt; 1041 struct ipv6_tel_txoption opt;
1042 struct dst_entry *dst = NULL, *ndst = NULL; 1042 struct dst_entry *dst = NULL, *ndst = NULL;
1043 struct net_device *tdev; 1043 struct net_device *tdev;
@@ -1057,26 +1057,28 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1057 1057
1058 /* NBMA tunnel */ 1058 /* NBMA tunnel */
1059 if (ipv6_addr_any(&t->parms.raddr)) { 1059 if (ipv6_addr_any(&t->parms.raddr)) {
1060 struct in6_addr *addr6; 1060 if (skb->protocol == htons(ETH_P_IPV6)) {
1061 struct neighbour *neigh; 1061 struct in6_addr *addr6;
1062 int addr_type; 1062 struct neighbour *neigh;
1063 int addr_type;
1063 1064
1064 if (!skb_dst(skb)) 1065 if (!skb_dst(skb))
1065 goto tx_err_link_failure; 1066 goto tx_err_link_failure;
1066 1067
1067 neigh = dst_neigh_lookup(skb_dst(skb), 1068 neigh = dst_neigh_lookup(skb_dst(skb),
1068 &ipv6_hdr(skb)->daddr); 1069 &ipv6_hdr(skb)->daddr);
1069 if (!neigh) 1070 if (!neigh)
1070 goto tx_err_link_failure; 1071 goto tx_err_link_failure;
1071 1072
1072 addr6 = (struct in6_addr *)&neigh->primary_key; 1073 addr6 = (struct in6_addr *)&neigh->primary_key;
1073 addr_type = ipv6_addr_type(addr6); 1074 addr_type = ipv6_addr_type(addr6);
1074 1075
1075 if (addr_type == IPV6_ADDR_ANY) 1076 if (addr_type == IPV6_ADDR_ANY)
1076 addr6 = &ipv6_hdr(skb)->daddr; 1077 addr6 = &ipv6_hdr(skb)->daddr;
1077 1078
1078 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1079 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1079 neigh_release(neigh); 1080 neigh_release(neigh);
1081 }
1080 } else if (!(t->parms.flags & 1082 } else if (!(t->parms.flags &
1081 (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { 1083 (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
1082 /* enable the cache only only if the routing decision does 1084 /* enable the cache only only if the routing decision does
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 7ebac630d3c6..cb1766724a4c 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1749,7 +1749,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
1749 idev = in6_dev_get(dev); 1749 idev = in6_dev_get(dev);
1750 if (!idev) 1750 if (!idev)
1751 break; 1751 break;
1752 if (idev->cnf.ndisc_notify) 1752 if (idev->cnf.ndisc_notify ||
1753 net->ipv6.devconf_all->ndisc_notify)
1753 ndisc_send_unsol_na(dev); 1754 ndisc_send_unsol_na(dev);
1754 in6_dev_put(idev); 1755 in6_dev_put(idev);
1755 break; 1756 break;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index f174e76e6505..0da6a12b5472 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1178,8 +1178,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
1178 spin_lock_bh(&sk->sk_receive_queue.lock); 1178 spin_lock_bh(&sk->sk_receive_queue.lock);
1179 skb = skb_peek(&sk->sk_receive_queue); 1179 skb = skb_peek(&sk->sk_receive_queue);
1180 if (skb) 1180 if (skb)
1181 amount = skb_tail_pointer(skb) - 1181 amount = skb->len;
1182 skb_transport_header(skb);
1183 spin_unlock_bh(&sk->sk_receive_queue.lock); 1182 spin_unlock_bh(&sk->sk_receive_queue.lock);
1184 return put_user(amount, (int __user *)arg); 1183 return put_user(amount, (int __user *)arg);
1185 } 1184 }
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 8489beff5c25..ea81ccf3c7d6 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3836,6 +3836,8 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3836 case PACKET_HDRLEN: 3836 case PACKET_HDRLEN:
3837 if (len > sizeof(int)) 3837 if (len > sizeof(int))
3838 len = sizeof(int); 3838 len = sizeof(int);
3839 if (len < sizeof(int))
3840 return -EINVAL;
3839 if (copy_from_user(&val, optval, len)) 3841 if (copy_from_user(&val, optval, len))
3840 return -EFAULT; 3842 return -EFAULT;
3841 switch (val) { 3843 switch (val) {
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 7130e73bd42c..566906795c8c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1083,7 +1083,7 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1083 } 1083 }
1084 } while (sent < dlen && !rc); 1084 } while (sent < dlen && !rc);
1085 1085
1086 return rc ? rc : sent; 1086 return sent ? sent : rc;
1087} 1087}
1088 1088
1089/** 1089/**
@@ -1484,7 +1484,7 @@ restart:
1484 if (unlikely(flags & MSG_PEEK)) 1484 if (unlikely(flags & MSG_PEEK))
1485 goto exit; 1485 goto exit;
1486 1486
1487 tsk->rcv_unacked += tsk_inc(tsk, hlen + sz); 1487 tsk->rcv_unacked += tsk_inc(tsk, hlen + msg_data_sz(msg));
1488 if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4))) 1488 if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
1489 tipc_sk_send_ack(tsk); 1489 tipc_sk_send_ack(tsk);
1490 tsk_advance_rx_queue(sk); 1490 tsk_advance_rx_queue(sk);