summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-17 23:17:04 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-17 23:17:04 -0500
commit52f40e9d657cc126b766304a5dd58ad73b02ff46 (patch)
tree7cbc22b9a89fe41103e6a61a52b7f9c71ef5ba43 /drivers/net
parent231753ef780012eb6f3922c3dfc0a7186baa33c2 (diff)
parent3e3397e7b11ce1b9526975ddfbe8dd569fc1f316 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes and cleanups from David Miller: 1) Revert bogus nla_ok() change, from Alexey Dobriyan. 2) Various bpf validator fixes from Daniel Borkmann. 3) Add some necessary SET_NETDEV_DEV() calls to hsis_femac and hip04 drivers, from Dongpo Li. 4) Several ethtool ksettings conversions from Philippe Reynes. 5) Fix bugs in inet port management wrt. soreuseport, from Tom Herbert. 6) XDP support for virtio_net, from John Fastabend. 7) Fix NAT handling within a vrf, from David Ahern. 8) Endianness fixes in dpaa_eth driver, from Claudiu Manoil * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (63 commits) net: mv643xx_eth: fix build failure isdn: Constify some function parameters mlxsw: spectrum: Mark split ports as such cgroup: Fix CGROUP_BPF config qed: fix old-style function definition net: ipv6: check route protocol when deleting routes r6040: move spinlock in r6040_close as SOFTIRQ-unsafe lock order detected irda: w83977af_ir: cleanup an indent issue net: sfc: use new api ethtool_{get|set}_link_ksettings net: davicom: dm9000: use new api ethtool_{get|set}_link_ksettings net: cirrus: ep93xx: use new api ethtool_{get|set}_link_ksettings net: chelsio: cxgb3: use new api ethtool_{get|set}_link_ksettings net: chelsio: cxgb2: use new api ethtool_{get|set}_link_ksettings bpf: fix mark_reg_unknown_value for spilled regs on map value marking bpf: fix overflow in prog accounting bpf: dynamically allocate digest scratch buffer gtp: Fix initialization of Flags octet in GTPv1 header gtp: gtp_check_src_ms_ipv4() always return success net/x25: use designated initializers isdn: use designated initializers ...
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c6
-rw-r--r--drivers/net/ethernet/3com/3c515.c15
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c8
-rw-r--r--drivers/net/ethernet/cadence/Kconfig9
-rw-r--r--drivers/net/ethernet/cadence/Makefile1
-rw-r--r--drivers/net/ethernet/cadence/macb.c31
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c153
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c64
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c65
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c14
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c14
-rw-r--r--drivers/net/ethernet/freescale/dpaa/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c71
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c12
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c17
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c9
-rw-r--r--drivers/net/ethernet/rdc/r6040.c10
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c35
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c60
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h12
-rw-r--r--drivers/net/gtp.c8
-rw-r--r--drivers/net/irda/w83977af_ir.c6
-rw-r--r--drivers/net/virtio_net.c369
-rw-r--r--drivers/net/vrf.c6
-rw-r--r--drivers/net/wan/lmc/lmc_media.c97
31 files changed, 877 insertions, 246 deletions
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 4da379f28d5d..f7222dc6581d 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1775,6 +1775,9 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
1775 if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i)) 1775 if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
1776 continue; 1776 continue;
1777 1777
1778 if (!ds->ports[port].netdev)
1779 continue;
1780
1778 if (vlan.data[i] == 1781 if (vlan.data[i] ==
1779 GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) 1782 GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
1780 continue; 1783 continue;
@@ -1783,6 +1786,9 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
1783 chip->ports[port].bridge_dev) 1786 chip->ports[port].bridge_dev)
1784 break; /* same bridge, check next VLAN */ 1787 break; /* same bridge, check next VLAN */
1785 1788
1789 if (!chip->ports[i].bridge_dev)
1790 continue;
1791
1786 netdev_warn(ds->ports[port].netdev, 1792 netdev_warn(ds->ports[port].netdev,
1787 "hardware VLAN %d already used by %s\n", 1793 "hardware VLAN %d already used by %s\n",
1788 vlan.vid, 1794 vlan.vid,
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index b9f4c463e516..be5b80103bec 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -627,6 +627,8 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
627 627
628 spin_lock_init(&vp->lock); 628 spin_lock_init(&vp->lock);
629 629
630 setup_timer(&vp->timer, corkscrew_timer, (unsigned long) dev);
631
630 /* Read the station address from the EEPROM. */ 632 /* Read the station address from the EEPROM. */
631 EL3WINDOW(0); 633 EL3WINDOW(0);
632 for (i = 0; i < 0x18; i++) { 634 for (i = 0; i < 0x18; i++) {
@@ -707,6 +709,7 @@ static int corkscrew_open(struct net_device *dev)
707{ 709{
708 int ioaddr = dev->base_addr; 710 int ioaddr = dev->base_addr;
709 struct corkscrew_private *vp = netdev_priv(dev); 711 struct corkscrew_private *vp = netdev_priv(dev);
712 bool armtimer = false;
710 __u32 config; 713 __u32 config;
711 int i; 714 int i;
712 715
@@ -731,12 +734,7 @@ static int corkscrew_open(struct net_device *dev)
731 if (corkscrew_debug > 1) 734 if (corkscrew_debug > 1)
732 pr_debug("%s: Initial media type %s.\n", 735 pr_debug("%s: Initial media type %s.\n",
733 dev->name, media_tbl[dev->if_port].name); 736 dev->name, media_tbl[dev->if_port].name);
734 737 armtimer = true;
735 init_timer(&vp->timer);
736 vp->timer.expires = jiffies + media_tbl[dev->if_port].wait;
737 vp->timer.data = (unsigned long) dev;
738 vp->timer.function = corkscrew_timer; /* timer handler */
739 add_timer(&vp->timer);
740 } else 738 } else
741 dev->if_port = vp->default_media; 739 dev->if_port = vp->default_media;
742 740
@@ -776,6 +774,9 @@ static int corkscrew_open(struct net_device *dev)
776 return -EAGAIN; 774 return -EAGAIN;
777 } 775 }
778 776
777 if (armtimer)
778 mod_timer(&vp->timer, jiffies + media_tbl[dev->if_port].wait);
779
779 if (corkscrew_debug > 1) { 780 if (corkscrew_debug > 1) {
780 EL3WINDOW(4); 781 EL3WINDOW(4);
781 pr_debug("%s: corkscrew_open() irq %d media status %4.4x.\n", 782 pr_debug("%s: corkscrew_open() irq %d media status %4.4x.\n",
@@ -1426,7 +1427,7 @@ static int corkscrew_close(struct net_device *dev)
1426 dev->name, rx_nocopy, rx_copy, queued_packet); 1427 dev->name, rx_nocopy, rx_copy, queued_packet);
1427 } 1428 }
1428 1429
1429 del_timer(&vp->timer); 1430 del_timer_sync(&vp->timer);
1430 1431
1431 /* Turn off statistics ASAP. We update lp->stats below. */ 1432 /* Turn off statistics ASAP. We update lp->stats below. */
1432 outw(StatsDisable, ioaddr + EL3_CMD); 1433 outw(StatsDisable, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 4e5c3874a50f..bba81735ce87 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1676,10 +1676,10 @@ bna_cb_ioceth_reset(void *arg)
1676} 1676}
1677 1677
1678static struct bfa_ioc_cbfn bna_ioceth_cbfn = { 1678static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
1679 bna_cb_ioceth_enable, 1679 .enable_cbfn = bna_cb_ioceth_enable,
1680 bna_cb_ioceth_disable, 1680 .disable_cbfn = bna_cb_ioceth_disable,
1681 bna_cb_ioceth_hbfail, 1681 .hbfail_cbfn = bna_cb_ioceth_hbfail,
1682 bna_cb_ioceth_reset 1682 .reset_cbfn = bna_cb_ioceth_reset
1683}; 1683};
1684 1684
1685static void bna_attr_init(struct bna_ioceth *ioceth) 1685static void bna_attr_init(struct bna_ioceth *ioceth)
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index f0bcb15d3fec..608bea171956 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -31,4 +31,13 @@ config MACB
31 To compile this driver as a module, choose M here: the module 31 To compile this driver as a module, choose M here: the module
32 will be called macb. 32 will be called macb.
33 33
34config MACB_PCI
35 tristate "Cadence PCI MACB/GEM support"
36 depends on MACB && PCI && COMMON_CLK
37 ---help---
38 This is PCI wrapper for MACB driver.
39
40 To compile this driver as a module, choose M here: the module
41 will be called macb_pci.
42
34endif # NET_CADENCE 43endif # NET_CADENCE
diff --git a/drivers/net/ethernet/cadence/Makefile b/drivers/net/ethernet/cadence/Makefile
index 91f79b1f0505..4ba75594d5c5 100644
--- a/drivers/net/ethernet/cadence/Makefile
+++ b/drivers/net/ethernet/cadence/Makefile
@@ -3,3 +3,4 @@
3# 3#
4 4
5obj-$(CONFIG_MACB) += macb.o 5obj-$(CONFIG_MACB) += macb.o
6obj-$(CONFIG_MACB_PCI) += macb_pci.o
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 538544a7c642..c0fb80acc2da 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -404,6 +404,8 @@ static int macb_mii_probe(struct net_device *dev)
404 phy_irq = gpio_to_irq(pdata->phy_irq_pin); 404 phy_irq = gpio_to_irq(pdata->phy_irq_pin);
405 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; 405 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
406 } 406 }
407 } else {
408 phydev->irq = PHY_POLL;
407 } 409 }
408 410
409 /* attach the mac to the phy */ 411 /* attach the mac to the phy */
@@ -482,6 +484,9 @@ static int macb_mii_init(struct macb *bp)
482 goto err_out_unregister_bus; 484 goto err_out_unregister_bus;
483 } 485 }
484 } else { 486 } else {
487 for (i = 0; i < PHY_MAX_ADDR; i++)
488 bp->mii_bus->irq[i] = PHY_POLL;
489
485 if (pdata) 490 if (pdata)
486 bp->mii_bus->phy_mask = pdata->phy_mask; 491 bp->mii_bus->phy_mask = pdata->phy_mask;
487 492
@@ -2523,16 +2528,24 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
2523 struct clk **hclk, struct clk **tx_clk, 2528 struct clk **hclk, struct clk **tx_clk,
2524 struct clk **rx_clk) 2529 struct clk **rx_clk)
2525{ 2530{
2531 struct macb_platform_data *pdata;
2526 int err; 2532 int err;
2527 2533
2528 *pclk = devm_clk_get(&pdev->dev, "pclk"); 2534 pdata = dev_get_platdata(&pdev->dev);
2535 if (pdata) {
2536 *pclk = pdata->pclk;
2537 *hclk = pdata->hclk;
2538 } else {
2539 *pclk = devm_clk_get(&pdev->dev, "pclk");
2540 *hclk = devm_clk_get(&pdev->dev, "hclk");
2541 }
2542
2529 if (IS_ERR(*pclk)) { 2543 if (IS_ERR(*pclk)) {
2530 err = PTR_ERR(*pclk); 2544 err = PTR_ERR(*pclk);
2531 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); 2545 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
2532 return err; 2546 return err;
2533 } 2547 }
2534 2548
2535 *hclk = devm_clk_get(&pdev->dev, "hclk");
2536 if (IS_ERR(*hclk)) { 2549 if (IS_ERR(*hclk)) {
2537 err = PTR_ERR(*hclk); 2550 err = PTR_ERR(*hclk);
2538 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); 2551 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
@@ -3107,15 +3120,23 @@ static const struct of_device_id macb_dt_ids[] = {
3107MODULE_DEVICE_TABLE(of, macb_dt_ids); 3120MODULE_DEVICE_TABLE(of, macb_dt_ids);
3108#endif /* CONFIG_OF */ 3121#endif /* CONFIG_OF */
3109 3122
3123static const struct macb_config default_gem_config = {
3124 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
3125 .dma_burst_length = 16,
3126 .clk_init = macb_clk_init,
3127 .init = macb_init,
3128 .jumbo_max_len = 10240,
3129};
3130
3110static int macb_probe(struct platform_device *pdev) 3131static int macb_probe(struct platform_device *pdev)
3111{ 3132{
3133 const struct macb_config *macb_config = &default_gem_config;
3112 int (*clk_init)(struct platform_device *, struct clk **, 3134 int (*clk_init)(struct platform_device *, struct clk **,
3113 struct clk **, struct clk **, struct clk **) 3135 struct clk **, struct clk **, struct clk **)
3114 = macb_clk_init; 3136 = macb_config->clk_init;
3115 int (*init)(struct platform_device *) = macb_init; 3137 int (*init)(struct platform_device *) = macb_config->init;
3116 struct device_node *np = pdev->dev.of_node; 3138 struct device_node *np = pdev->dev.of_node;
3117 struct device_node *phy_node; 3139 struct device_node *phy_node;
3118 const struct macb_config *macb_config = NULL;
3119 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; 3140 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
3120 unsigned int queue_mask, num_queues; 3141 unsigned int queue_mask, num_queues;
3121 struct macb_platform_data *pdata; 3142 struct macb_platform_data *pdata;
diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c
new file mode 100644
index 000000000000..92be2cd8f817
--- /dev/null
+++ b/drivers/net/ethernet/cadence/macb_pci.c
@@ -0,0 +1,153 @@
1/**
2 * macb_pci.c - Cadence GEM PCI wrapper.
3 *
4 * Copyright (C) 2016 Cadence Design Systems - http://www.cadence.com
5 *
6 * Authors: Rafal Ozieblo <rafalo@cadence.com>
7 * Bartosz Folta <bfolta@cadence.com>
8 *
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/clk.h>
23#include <linux/clk-provider.h>
24#include <linux/etherdevice.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27#include <linux/platform_data/macb.h>
28#include <linux/platform_device.h>
29#include "macb.h"
30
31#define PCI_DRIVER_NAME "macb_pci"
32#define PLAT_DRIVER_NAME "macb"
33
34#define CDNS_VENDOR_ID 0x17cd
35#define CDNS_DEVICE_ID 0xe007
36
37#define GEM_PCLK_RATE 50000000
38#define GEM_HCLK_RATE 50000000
39
40static int macb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
41{
42 int err;
43 struct platform_device *plat_dev;
44 struct platform_device_info plat_info;
45 struct macb_platform_data plat_data;
46 struct resource res[2];
47
48 /* sanity check */
49 if (!id)
50 return -EINVAL;
51
52 /* enable pci device */
53 err = pci_enable_device(pdev);
54 if (err < 0) {
55 dev_err(&pdev->dev, "Enabling PCI device has failed: 0x%04X",
56 err);
57 return -EACCES;
58 }
59
60 pci_set_master(pdev);
61
62 /* set up resources */
63 memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
64 res[0].start = pdev->resource[0].start;
65 res[0].end = pdev->resource[0].end;
66 res[0].name = PCI_DRIVER_NAME;
67 res[0].flags = IORESOURCE_MEM;
68 res[1].start = pdev->irq;
69 res[1].name = PCI_DRIVER_NAME;
70 res[1].flags = IORESOURCE_IRQ;
71
72 dev_info(&pdev->dev, "EMAC physical base addr = 0x%p\n",
73 (void *)(uintptr_t)pci_resource_start(pdev, 0));
74
75 /* set up macb platform data */
76 memset(&plat_data, 0, sizeof(plat_data));
77
78 /* initialize clocks */
79 plat_data.pclk = clk_register_fixed_rate(&pdev->dev, "pclk", NULL, 0,
80 GEM_PCLK_RATE);
81 if (IS_ERR(plat_data.pclk)) {
82 err = PTR_ERR(plat_data.pclk);
83 goto err_pclk_register;
84 }
85
86 plat_data.hclk = clk_register_fixed_rate(&pdev->dev, "hclk", NULL, 0,
87 GEM_HCLK_RATE);
88 if (IS_ERR(plat_data.hclk)) {
89 err = PTR_ERR(plat_data.hclk);
90 goto err_hclk_register;
91 }
92
93 /* set up platform device info */
94 memset(&plat_info, 0, sizeof(plat_info));
95 plat_info.parent = &pdev->dev;
96 plat_info.fwnode = pdev->dev.fwnode;
97 plat_info.name = PLAT_DRIVER_NAME;
98 plat_info.id = pdev->devfn;
99 plat_info.res = res;
100 plat_info.num_res = ARRAY_SIZE(res);
101 plat_info.data = &plat_data;
102 plat_info.size_data = sizeof(plat_data);
103 plat_info.dma_mask = DMA_BIT_MASK(32);
104
105 /* register platform device */
106 plat_dev = platform_device_register_full(&plat_info);
107 if (IS_ERR(plat_dev)) {
108 err = PTR_ERR(plat_dev);
109 goto err_plat_dev_register;
110 }
111
112 pci_set_drvdata(pdev, plat_dev);
113
114 return 0;
115
116err_plat_dev_register:
117 clk_unregister(plat_data.hclk);
118
119err_hclk_register:
120 clk_unregister(plat_data.pclk);
121
122err_pclk_register:
123 pci_disable_device(pdev);
124 return err;
125}
126
127static void macb_remove(struct pci_dev *pdev)
128{
129 struct platform_device *plat_dev = pci_get_drvdata(pdev);
130 struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
131
132 platform_device_unregister(plat_dev);
133 pci_disable_device(pdev);
134 clk_unregister(plat_data->pclk);
135 clk_unregister(plat_data->hclk);
136}
137
138static struct pci_device_id dev_id_table[] = {
139 { PCI_DEVICE(CDNS_VENDOR_ID, CDNS_DEVICE_ID), },
140 { 0, }
141};
142
143static struct pci_driver macb_pci_driver = {
144 .name = PCI_DRIVER_NAME,
145 .id_table = dev_id_table,
146 .probe = macb_probe,
147 .remove = macb_remove,
148};
149
150module_pci_driver(macb_pci_driver);
151MODULE_DEVICE_TABLE(pci, dev_id_table);
152MODULE_LICENSE("GPL");
153MODULE_DESCRIPTION("Cadence NIC PCI wrapper");
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 81d1d0bc7553..3a05f9098e75 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -568,28 +568,33 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
568 reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD); 568 reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
569} 569}
570 570
571static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 571static int get_link_ksettings(struct net_device *dev,
572 struct ethtool_link_ksettings *cmd)
572{ 573{
573 struct adapter *adapter = dev->ml_priv; 574 struct adapter *adapter = dev->ml_priv;
574 struct port_info *p = &adapter->port[dev->if_port]; 575 struct port_info *p = &adapter->port[dev->if_port];
576 u32 supported, advertising;
575 577
576 cmd->supported = p->link_config.supported; 578 supported = p->link_config.supported;
577 cmd->advertising = p->link_config.advertising; 579 advertising = p->link_config.advertising;
578 580
579 if (netif_carrier_ok(dev)) { 581 if (netif_carrier_ok(dev)) {
580 ethtool_cmd_speed_set(cmd, p->link_config.speed); 582 cmd->base.speed = p->link_config.speed;
581 cmd->duplex = p->link_config.duplex; 583 cmd->base.duplex = p->link_config.duplex;
582 } else { 584 } else {
583 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); 585 cmd->base.speed = SPEED_UNKNOWN;
584 cmd->duplex = DUPLEX_UNKNOWN; 586 cmd->base.duplex = DUPLEX_UNKNOWN;
585 } 587 }
586 588
587 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; 589 cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
588 cmd->phy_address = p->phy->mdio.prtad; 590 cmd->base.phy_address = p->phy->mdio.prtad;
589 cmd->transceiver = XCVR_EXTERNAL; 591 cmd->base.autoneg = p->link_config.autoneg;
590 cmd->autoneg = p->link_config.autoneg; 592
591 cmd->maxtxpkt = 0; 593 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
592 cmd->maxrxpkt = 0; 594 supported);
595 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
596 advertising);
597
593 return 0; 598 return 0;
594} 599}
595 600
@@ -628,36 +633,41 @@ static int speed_duplex_to_caps(int speed, int duplex)
628 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \ 633 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
629 ADVERTISED_10000baseT_Full) 634 ADVERTISED_10000baseT_Full)
630 635
631static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 636static int set_link_ksettings(struct net_device *dev,
637 const struct ethtool_link_ksettings *cmd)
632{ 638{
633 struct adapter *adapter = dev->ml_priv; 639 struct adapter *adapter = dev->ml_priv;
634 struct port_info *p = &adapter->port[dev->if_port]; 640 struct port_info *p = &adapter->port[dev->if_port];
635 struct link_config *lc = &p->link_config; 641 struct link_config *lc = &p->link_config;
642 u32 advertising;
643
644 ethtool_convert_link_mode_to_legacy_u32(&advertising,
645 cmd->link_modes.advertising);
636 646
637 if (!(lc->supported & SUPPORTED_Autoneg)) 647 if (!(lc->supported & SUPPORTED_Autoneg))
638 return -EOPNOTSUPP; /* can't change speed/duplex */ 648 return -EOPNOTSUPP; /* can't change speed/duplex */
639 649
640 if (cmd->autoneg == AUTONEG_DISABLE) { 650 if (cmd->base.autoneg == AUTONEG_DISABLE) {
641 u32 speed = ethtool_cmd_speed(cmd); 651 u32 speed = cmd->base.speed;
642 int cap = speed_duplex_to_caps(speed, cmd->duplex); 652 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
643 653
644 if (!(lc->supported & cap) || (speed == SPEED_1000)) 654 if (!(lc->supported & cap) || (speed == SPEED_1000))
645 return -EINVAL; 655 return -EINVAL;
646 lc->requested_speed = speed; 656 lc->requested_speed = speed;
647 lc->requested_duplex = cmd->duplex; 657 lc->requested_duplex = cmd->base.duplex;
648 lc->advertising = 0; 658 lc->advertising = 0;
649 } else { 659 } else {
650 cmd->advertising &= ADVERTISED_MASK; 660 advertising &= ADVERTISED_MASK;
651 if (cmd->advertising & (cmd->advertising - 1)) 661 if (advertising & (advertising - 1))
652 cmd->advertising = lc->supported; 662 advertising = lc->supported;
653 cmd->advertising &= lc->supported; 663 advertising &= lc->supported;
654 if (!cmd->advertising) 664 if (!advertising)
655 return -EINVAL; 665 return -EINVAL;
656 lc->requested_speed = SPEED_INVALID; 666 lc->requested_speed = SPEED_INVALID;
657 lc->requested_duplex = DUPLEX_INVALID; 667 lc->requested_duplex = DUPLEX_INVALID;
658 lc->advertising = cmd->advertising | ADVERTISED_Autoneg; 668 lc->advertising = advertising | ADVERTISED_Autoneg;
659 } 669 }
660 lc->autoneg = cmd->autoneg; 670 lc->autoneg = cmd->base.autoneg;
661 if (netif_running(dev)) 671 if (netif_running(dev))
662 t1_link_start(p->phy, p->mac, lc); 672 t1_link_start(p->phy, p->mac, lc);
663 return 0; 673 return 0;
@@ -788,8 +798,6 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
788} 798}
789 799
790static const struct ethtool_ops t1_ethtool_ops = { 800static const struct ethtool_ops t1_ethtool_ops = {
791 .get_settings = get_settings,
792 .set_settings = set_settings,
793 .get_drvinfo = get_drvinfo, 801 .get_drvinfo = get_drvinfo,
794 .get_msglevel = get_msglevel, 802 .get_msglevel = get_msglevel,
795 .set_msglevel = set_msglevel, 803 .set_msglevel = set_msglevel,
@@ -807,6 +815,8 @@ static const struct ethtool_ops t1_ethtool_ops = {
807 .get_ethtool_stats = get_stats, 815 .get_ethtool_stats = get_stats,
808 .get_regs_len = get_regs_len, 816 .get_regs_len = get_regs_len,
809 .get_regs = get_regs, 817 .get_regs = get_regs,
818 .get_link_ksettings = get_link_ksettings,
819 .set_link_ksettings = set_link_ksettings,
810}; 820};
811 821
812static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 822static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 092b3c16440b..7b2224ae72f2 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1801,27 +1801,31 @@ static int set_phys_id(struct net_device *dev,
1801 return 0; 1801 return 0;
1802} 1802}
1803 1803
1804static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1804static int get_link_ksettings(struct net_device *dev,
1805 struct ethtool_link_ksettings *cmd)
1805{ 1806{
1806 struct port_info *p = netdev_priv(dev); 1807 struct port_info *p = netdev_priv(dev);
1808 u32 supported;
1807 1809
1808 cmd->supported = p->link_config.supported; 1810 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1809 cmd->advertising = p->link_config.advertising; 1811 p->link_config.supported);
1812 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1813 p->link_config.advertising);
1810 1814
1811 if (netif_carrier_ok(dev)) { 1815 if (netif_carrier_ok(dev)) {
1812 ethtool_cmd_speed_set(cmd, p->link_config.speed); 1816 cmd->base.speed = p->link_config.speed;
1813 cmd->duplex = p->link_config.duplex; 1817 cmd->base.duplex = p->link_config.duplex;
1814 } else { 1818 } else {
1815 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); 1819 cmd->base.speed = SPEED_UNKNOWN;
1816 cmd->duplex = DUPLEX_UNKNOWN; 1820 cmd->base.duplex = DUPLEX_UNKNOWN;
1817 } 1821 }
1818 1822
1819 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; 1823 ethtool_convert_link_mode_to_legacy_u32(&supported,
1820 cmd->phy_address = p->phy.mdio.prtad; 1824 cmd->link_modes.supported);
1821 cmd->transceiver = XCVR_EXTERNAL; 1825
1822 cmd->autoneg = p->link_config.autoneg; 1826 cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1823 cmd->maxtxpkt = 0; 1827 cmd->base.phy_address = p->phy.mdio.prtad;
1824 cmd->maxrxpkt = 0; 1828 cmd->base.autoneg = p->link_config.autoneg;
1825 return 0; 1829 return 0;
1826} 1830}
1827 1831
@@ -1860,44 +1864,49 @@ static int speed_duplex_to_caps(int speed, int duplex)
1860 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \ 1864 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1861 ADVERTISED_10000baseT_Full) 1865 ADVERTISED_10000baseT_Full)
1862 1866
1863static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1867static int set_link_ksettings(struct net_device *dev,
1868 const struct ethtool_link_ksettings *cmd)
1864{ 1869{
1865 struct port_info *p = netdev_priv(dev); 1870 struct port_info *p = netdev_priv(dev);
1866 struct link_config *lc = &p->link_config; 1871 struct link_config *lc = &p->link_config;
1872 u32 advertising;
1873
1874 ethtool_convert_link_mode_to_legacy_u32(&advertising,
1875 cmd->link_modes.advertising);
1867 1876
1868 if (!(lc->supported & SUPPORTED_Autoneg)) { 1877 if (!(lc->supported & SUPPORTED_Autoneg)) {
1869 /* 1878 /*
1870 * PHY offers a single speed/duplex. See if that's what's 1879 * PHY offers a single speed/duplex. See if that's what's
1871 * being requested. 1880 * being requested.
1872 */ 1881 */
1873 if (cmd->autoneg == AUTONEG_DISABLE) { 1882 if (cmd->base.autoneg == AUTONEG_DISABLE) {
1874 u32 speed = ethtool_cmd_speed(cmd); 1883 u32 speed = cmd->base.speed;
1875 int cap = speed_duplex_to_caps(speed, cmd->duplex); 1884 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1876 if (lc->supported & cap) 1885 if (lc->supported & cap)
1877 return 0; 1886 return 0;
1878 } 1887 }
1879 return -EINVAL; 1888 return -EINVAL;
1880 } 1889 }
1881 1890
1882 if (cmd->autoneg == AUTONEG_DISABLE) { 1891 if (cmd->base.autoneg == AUTONEG_DISABLE) {
1883 u32 speed = ethtool_cmd_speed(cmd); 1892 u32 speed = cmd->base.speed;
1884 int cap = speed_duplex_to_caps(speed, cmd->duplex); 1893 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1885 1894
1886 if (!(lc->supported & cap) || (speed == SPEED_1000)) 1895 if (!(lc->supported & cap) || (speed == SPEED_1000))
1887 return -EINVAL; 1896 return -EINVAL;
1888 lc->requested_speed = speed; 1897 lc->requested_speed = speed;
1889 lc->requested_duplex = cmd->duplex; 1898 lc->requested_duplex = cmd->base.duplex;
1890 lc->advertising = 0; 1899 lc->advertising = 0;
1891 } else { 1900 } else {
1892 cmd->advertising &= ADVERTISED_MASK; 1901 advertising &= ADVERTISED_MASK;
1893 cmd->advertising &= lc->supported; 1902 advertising &= lc->supported;
1894 if (!cmd->advertising) 1903 if (!advertising)
1895 return -EINVAL; 1904 return -EINVAL;
1896 lc->requested_speed = SPEED_INVALID; 1905 lc->requested_speed = SPEED_INVALID;
1897 lc->requested_duplex = DUPLEX_INVALID; 1906 lc->requested_duplex = DUPLEX_INVALID;
1898 lc->advertising = cmd->advertising | ADVERTISED_Autoneg; 1907 lc->advertising = advertising | ADVERTISED_Autoneg;
1899 } 1908 }
1900 lc->autoneg = cmd->autoneg; 1909 lc->autoneg = cmd->base.autoneg;
1901 if (netif_running(dev)) 1910 if (netif_running(dev))
1902 t3_link_start(&p->phy, &p->mac, lc); 1911 t3_link_start(&p->phy, &p->mac, lc);
1903 return 0; 1912 return 0;
@@ -2097,8 +2106,6 @@ static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2097} 2106}
2098 2107
2099static const struct ethtool_ops cxgb_ethtool_ops = { 2108static const struct ethtool_ops cxgb_ethtool_ops = {
2100 .get_settings = get_settings,
2101 .set_settings = set_settings,
2102 .get_drvinfo = get_drvinfo, 2109 .get_drvinfo = get_drvinfo,
2103 .get_msglevel = get_msglevel, 2110 .get_msglevel = get_msglevel,
2104 .set_msglevel = set_msglevel, 2111 .set_msglevel = set_msglevel,
@@ -2120,6 +2127,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
2120 .get_regs_len = get_regs_len, 2127 .get_regs_len = get_regs_len,
2121 .get_regs = get_regs, 2128 .get_regs = get_regs,
2122 .get_wol = get_wol, 2129 .get_wol = get_wol,
2130 .get_link_ksettings = get_link_ksettings,
2131 .set_link_ksettings = set_link_ksettings,
2123}; 2132};
2124 2133
2125static int in_range(int val, int lo, int hi) 2134static int in_range(int val, int lo, int hi)
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index a1de0d12927d..396c88678eab 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -715,16 +715,18 @@ static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
715 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 715 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
716} 716}
717 717
718static int ep93xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 718static int ep93xx_get_link_ksettings(struct net_device *dev,
719 struct ethtool_link_ksettings *cmd)
719{ 720{
720 struct ep93xx_priv *ep = netdev_priv(dev); 721 struct ep93xx_priv *ep = netdev_priv(dev);
721 return mii_ethtool_gset(&ep->mii, cmd); 722 return mii_ethtool_get_link_ksettings(&ep->mii, cmd);
722} 723}
723 724
724static int ep93xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 725static int ep93xx_set_link_ksettings(struct net_device *dev,
726 const struct ethtool_link_ksettings *cmd)
725{ 727{
726 struct ep93xx_priv *ep = netdev_priv(dev); 728 struct ep93xx_priv *ep = netdev_priv(dev);
727 return mii_ethtool_sset(&ep->mii, cmd); 729 return mii_ethtool_set_link_ksettings(&ep->mii, cmd);
728} 730}
729 731
730static int ep93xx_nway_reset(struct net_device *dev) 732static int ep93xx_nway_reset(struct net_device *dev)
@@ -741,10 +743,10 @@ static u32 ep93xx_get_link(struct net_device *dev)
741 743
742static const struct ethtool_ops ep93xx_ethtool_ops = { 744static const struct ethtool_ops ep93xx_ethtool_ops = {
743 .get_drvinfo = ep93xx_get_drvinfo, 745 .get_drvinfo = ep93xx_get_drvinfo,
744 .get_settings = ep93xx_get_settings,
745 .set_settings = ep93xx_set_settings,
746 .nway_reset = ep93xx_nway_reset, 746 .nway_reset = ep93xx_nway_reset,
747 .get_link = ep93xx_get_link, 747 .get_link = ep93xx_get_link,
748 .get_link_ksettings = ep93xx_get_link_ksettings,
749 .set_link_ksettings = ep93xx_set_link_ksettings,
748}; 750};
749 751
750static const struct net_device_ops ep93xx_netdev_ops = { 752static const struct net_device_ops ep93xx_netdev_ops = {
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index f1a81c52afe3..008dc8161775 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -570,19 +570,21 @@ static void dm9000_set_msglevel(struct net_device *dev, u32 value)
570 dm->msg_enable = value; 570 dm->msg_enable = value;
571} 571}
572 572
573static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 573static int dm9000_get_link_ksettings(struct net_device *dev,
574 struct ethtool_link_ksettings *cmd)
574{ 575{
575 struct board_info *dm = to_dm9000_board(dev); 576 struct board_info *dm = to_dm9000_board(dev);
576 577
577 mii_ethtool_gset(&dm->mii, cmd); 578 mii_ethtool_get_link_ksettings(&dm->mii, cmd);
578 return 0; 579 return 0;
579} 580}
580 581
581static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 582static int dm9000_set_link_ksettings(struct net_device *dev,
583 const struct ethtool_link_ksettings *cmd)
582{ 584{
583 struct board_info *dm = to_dm9000_board(dev); 585 struct board_info *dm = to_dm9000_board(dev);
584 586
585 return mii_ethtool_sset(&dm->mii, cmd); 587 return mii_ethtool_set_link_ksettings(&dm->mii, cmd);
586} 588}
587 589
588static int dm9000_nway_reset(struct net_device *dev) 590static int dm9000_nway_reset(struct net_device *dev)
@@ -741,8 +743,6 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
741 743
742static const struct ethtool_ops dm9000_ethtool_ops = { 744static const struct ethtool_ops dm9000_ethtool_ops = {
743 .get_drvinfo = dm9000_get_drvinfo, 745 .get_drvinfo = dm9000_get_drvinfo,
744 .get_settings = dm9000_get_settings,
745 .set_settings = dm9000_set_settings,
746 .get_msglevel = dm9000_get_msglevel, 746 .get_msglevel = dm9000_get_msglevel,
747 .set_msglevel = dm9000_set_msglevel, 747 .set_msglevel = dm9000_set_msglevel,
748 .nway_reset = dm9000_nway_reset, 748 .nway_reset = dm9000_nway_reset,
@@ -752,6 +752,8 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
752 .get_eeprom_len = dm9000_get_eeprom_len, 752 .get_eeprom_len = dm9000_get_eeprom_len,
753 .get_eeprom = dm9000_get_eeprom, 753 .get_eeprom = dm9000_get_eeprom,
754 .set_eeprom = dm9000_set_eeprom, 754 .set_eeprom = dm9000_set_eeprom,
755 .get_link_ksettings = dm9000_get_link_ksettings,
756 .set_link_ksettings = dm9000_set_link_ksettings,
755}; 757};
756 758
757static void dm9000_show_carrier(struct board_info *db, 759static void dm9000_show_carrier(struct board_info *db,
diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig
index f3a3454805f9..a654736237a9 100644
--- a/drivers/net/ethernet/freescale/dpaa/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa/Kconfig
@@ -1,6 +1,6 @@
1menuconfig FSL_DPAA_ETH 1menuconfig FSL_DPAA_ETH
2 tristate "DPAA Ethernet" 2 tristate "DPAA Ethernet"
3 depends on FSL_SOC && FSL_DPAA && FSL_FMAN 3 depends on FSL_DPAA && FSL_FMAN
4 select PHYLIB 4 select PHYLIB
5 select FSL_FMAN_MAC 5 select FSL_FMAN_MAC
6 ---help--- 6 ---help---
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 3c48a84dec86..624ba9058dc4 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -733,7 +733,7 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
733 priv->cgr_data.cgr.cb = dpaa_eth_cgscn; 733 priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
734 734
735 /* Enable Congestion State Change Notifications and CS taildrop */ 735 /* Enable Congestion State Change Notifications and CS taildrop */
736 initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES; 736 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
737 initcgr.cgr.cscn_en = QM_CGR_EN; 737 initcgr.cgr.cscn_en = QM_CGR_EN;
738 738
739 /* Set different thresholds based on the MAC speed. 739 /* Set different thresholds based on the MAC speed.
@@ -747,7 +747,7 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
747 cs_th = DPAA_CS_THRESHOLD_1G; 747 cs_th = DPAA_CS_THRESHOLD_1G;
748 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); 748 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
749 749
750 initcgr.we_mask |= QM_CGR_WE_CSTD_EN; 750 initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
751 initcgr.cgr.cstd_en = QM_CGR_EN; 751 initcgr.cgr.cstd_en = QM_CGR_EN;
752 752
753 err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT, 753 err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
@@ -896,18 +896,18 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
896 if (dpaa_fq->init) { 896 if (dpaa_fq->init) {
897 memset(&initfq, 0, sizeof(initfq)); 897 memset(&initfq, 0, sizeof(initfq));
898 898
899 initfq.we_mask = QM_INITFQ_WE_FQCTRL; 899 initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL);
900 /* Note: we may get to keep an empty FQ in cache */ 900 /* Note: we may get to keep an empty FQ in cache */
901 initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; 901 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE);
902 902
903 /* Try to reduce the number of portal interrupts for 903 /* Try to reduce the number of portal interrupts for
904 * Tx Confirmation FQs. 904 * Tx Confirmation FQs.
905 */ 905 */
906 if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM) 906 if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
907 initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; 907 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
908 908
909 /* FQ placement */ 909 /* FQ placement */
910 initfq.we_mask |= QM_INITFQ_WE_DESTWQ; 910 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
911 911
912 qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq); 912 qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq);
913 913
@@ -920,8 +920,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
920 if (dpaa_fq->fq_type == FQ_TYPE_TX || 920 if (dpaa_fq->fq_type == FQ_TYPE_TX ||
921 dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM || 921 dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
922 dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) { 922 dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
923 initfq.we_mask |= QM_INITFQ_WE_CGID; 923 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
924 initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE; 924 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
925 initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid; 925 initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
926 /* Set a fixed overhead accounting, in an attempt to 926 /* Set a fixed overhead accounting, in an attempt to
927 * reduce the impact of fixed-size skb shells and the 927 * reduce the impact of fixed-size skb shells and the
@@ -932,7 +932,7 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
932 * insufficient value, but even that is better than 932 * insufficient value, but even that is better than
933 * no overhead accounting at all. 933 * no overhead accounting at all.
934 */ 934 */
935 initfq.we_mask |= QM_INITFQ_WE_OAC; 935 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
936 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); 936 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
937 qm_fqd_set_oal(&initfq.fqd, 937 qm_fqd_set_oal(&initfq.fqd,
938 min(sizeof(struct sk_buff) + 938 min(sizeof(struct sk_buff) +
@@ -941,9 +941,9 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
941 } 941 }
942 942
943 if (td_enable) { 943 if (td_enable) {
944 initfq.we_mask |= QM_INITFQ_WE_TDTHRESH; 944 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH);
945 qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1); 945 qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1);
946 initfq.fqd.fq_ctrl = QM_FQCTRL_TDE; 946 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE);
947 } 947 }
948 948
949 if (dpaa_fq->fq_type == FQ_TYPE_TX) { 949 if (dpaa_fq->fq_type == FQ_TYPE_TX) {
@@ -951,7 +951,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
951 if (queue_id >= 0) 951 if (queue_id >= 0)
952 confq = priv->conf_fqs[queue_id]; 952 confq = priv->conf_fqs[queue_id];
953 if (confq) { 953 if (confq) {
954 initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; 954 initfq.we_mask |=
955 cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
955 /* ContextA: OVOM=1(use contextA2 bits instead of ICAD) 956 /* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
956 * A2V=1 (contextA A2 field is valid) 957 * A2V=1 (contextA A2 field is valid)
957 * A0V=1 (contextA A0 field is valid) 958 * A0V=1 (contextA A0 field is valid)
@@ -959,8 +960,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
959 * ContextA A2: EBD=1 (deallocate buffers inside FMan) 960 * ContextA A2: EBD=1 (deallocate buffers inside FMan)
960 * ContextB B0(ASPID): 0 (absolute Virtual Storage ID) 961 * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
961 */ 962 */
962 initfq.fqd.context_a.hi = 0x1e000000; 963 qm_fqd_context_a_set64(&initfq.fqd,
963 initfq.fqd.context_a.lo = 0x80000000; 964 0x1e00000080000000ULL);
964 } 965 }
965 } 966 }
966 967
@@ -968,13 +969,13 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
968 if (priv->use_ingress_cgr && 969 if (priv->use_ingress_cgr &&
969 (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || 970 (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
970 dpaa_fq->fq_type == FQ_TYPE_RX_ERROR)) { 971 dpaa_fq->fq_type == FQ_TYPE_RX_ERROR)) {
971 initfq.we_mask |= QM_INITFQ_WE_CGID; 972 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
972 initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE; 973 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
973 initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid; 974 initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
974 /* Set a fixed overhead accounting, just like for the 975 /* Set a fixed overhead accounting, just like for the
975 * egress CGR. 976 * egress CGR.
976 */ 977 */
977 initfq.we_mask |= QM_INITFQ_WE_OAC; 978 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
978 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); 979 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
979 qm_fqd_set_oal(&initfq.fqd, 980 qm_fqd_set_oal(&initfq.fqd,
980 min(sizeof(struct sk_buff) + 981 min(sizeof(struct sk_buff) +
@@ -984,9 +985,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
984 985
985 /* Initialization common to all ingress queues */ 986 /* Initialization common to all ingress queues */
986 if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { 987 if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
987 initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; 988 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
988 initfq.fqd.fq_ctrl |= 989 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
989 QM_FQCTRL_HOLDACTIVE;
990 initfq.fqd.context_a.stashing.exclusive = 990 initfq.fqd.context_a.stashing.exclusive =
991 QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX | 991 QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
992 QM_STASHING_EXCL_ANNOTATION; 992 QM_STASHING_EXCL_ANNOTATION;
@@ -1350,7 +1350,7 @@ static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
1350 parse_result->l4_off = (u8)skb_transport_offset(skb); 1350 parse_result->l4_off = (u8)skb_transport_offset(skb);
1351 1351
1352 /* Enable L3 (and L4, if TCP or UDP) HW checksum. */ 1352 /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
1353 fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC; 1353 fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC);
1354 1354
1355 /* On P1023 and similar platforms fd->cmd interpretation could 1355 /* On P1023 and similar platforms fd->cmd interpretation could
1356 * be disabled by setting CONTEXT_A bit ICMD; currently this bit 1356 * be disabled by setting CONTEXT_A bit ICMD; currently this bit
@@ -1732,7 +1732,7 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
1732 1732
1733 /* Fill in the rest of the FD fields */ 1733 /* Fill in the rest of the FD fields */
1734 qm_fd_set_contig(fd, priv->tx_headroom, skb->len); 1734 qm_fd_set_contig(fd, priv->tx_headroom, skb->len);
1735 fd->cmd |= FM_FD_CMD_FCO; 1735 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
1736 1736
1737 /* Map the entire buffer size that may be seen by FMan, but no more */ 1737 /* Map the entire buffer size that may be seen by FMan, but no more */
1738 addr = dma_map_single(dev, skbh, 1738 addr = dma_map_single(dev, skbh,
@@ -1840,7 +1840,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
1840 } 1840 }
1841 1841
1842 fd->bpid = FSL_DPAA_BPID_INV; 1842 fd->bpid = FSL_DPAA_BPID_INV;
1843 fd->cmd |= FM_FD_CMD_FCO; 1843 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
1844 qm_fd_addr_set64(fd, addr); 1844 qm_fd_addr_set64(fd, addr);
1845 1845
1846 return 0; 1846 return 0;
@@ -1867,7 +1867,7 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
1867 1867
1868 egress_fq = priv->egress_fqs[queue]; 1868 egress_fq = priv->egress_fqs[queue];
1869 if (fd->bpid == FSL_DPAA_BPID_INV) 1869 if (fd->bpid == FSL_DPAA_BPID_INV)
1870 fd->cmd |= qman_fq_fqid(priv->conf_fqs[queue]); 1870 fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue]));
1871 1871
1872 /* Trace this Tx fd */ 1872 /* Trace this Tx fd */
1873 trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd); 1873 trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd);
@@ -1960,17 +1960,17 @@ static void dpaa_rx_error(struct net_device *net_dev,
1960{ 1960{
1961 if (net_ratelimit()) 1961 if (net_ratelimit())
1962 netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n", 1962 netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
1963 fd->status & FM_FD_STAT_RX_ERRORS); 1963 be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS);
1964 1964
1965 percpu_priv->stats.rx_errors++; 1965 percpu_priv->stats.rx_errors++;
1966 1966
1967 if (fd->status & FM_FD_ERR_DMA) 1967 if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA)
1968 percpu_priv->rx_errors.dme++; 1968 percpu_priv->rx_errors.dme++;
1969 if (fd->status & FM_FD_ERR_PHYSICAL) 1969 if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL)
1970 percpu_priv->rx_errors.fpe++; 1970 percpu_priv->rx_errors.fpe++;
1971 if (fd->status & FM_FD_ERR_SIZE) 1971 if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE)
1972 percpu_priv->rx_errors.fse++; 1972 percpu_priv->rx_errors.fse++;
1973 if (fd->status & FM_FD_ERR_PRS_HDR_ERR) 1973 if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR)
1974 percpu_priv->rx_errors.phe++; 1974 percpu_priv->rx_errors.phe++;
1975 1975
1976 dpaa_fd_release(net_dev, fd); 1976 dpaa_fd_release(net_dev, fd);
@@ -1986,7 +1986,7 @@ static void dpaa_tx_error(struct net_device *net_dev,
1986 1986
1987 if (net_ratelimit()) 1987 if (net_ratelimit())
1988 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", 1988 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
1989 fd->status & FM_FD_STAT_TX_ERRORS); 1989 be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS);
1990 1990
1991 percpu_priv->stats.tx_errors++; 1991 percpu_priv->stats.tx_errors++;
1992 1992
@@ -2020,10 +2020,11 @@ static void dpaa_tx_conf(struct net_device *net_dev,
2020{ 2020{
2021 struct sk_buff *skb; 2021 struct sk_buff *skb;
2022 2022
2023 if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) { 2023 if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) {
2024 if (net_ratelimit()) 2024 if (net_ratelimit())
2025 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", 2025 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2026 fd->status & FM_FD_STAT_TX_ERRORS); 2026 be32_to_cpu(fd->status) &
2027 FM_FD_STAT_TX_ERRORS);
2027 2028
2028 percpu_priv->stats.tx_errors++; 2029 percpu_priv->stats.tx_errors++;
2029 } 2030 }
@@ -2100,6 +2101,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
2100 struct sk_buff *skb; 2101 struct sk_buff *skb;
2101 int *count_ptr; 2102 int *count_ptr;
2102 2103
2104 fd_status = be32_to_cpu(fd->status);
2105 fd_format = qm_fd_get_format(fd);
2103 net_dev = ((struct dpaa_fq *)fq)->net_dev; 2106 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2104 priv = netdev_priv(net_dev); 2107 priv = netdev_priv(net_dev);
2105 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); 2108 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
@@ -2417,12 +2420,12 @@ static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
2417 } 2420 }
2418 2421
2419 /* Enable CS TD, but disable Congestion State Change Notifications. */ 2422 /* Enable CS TD, but disable Congestion State Change Notifications. */
2420 initcgr.we_mask = QM_CGR_WE_CS_THRES; 2423 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
2421 initcgr.cgr.cscn_en = QM_CGR_EN; 2424 initcgr.cgr.cscn_en = QM_CGR_EN;
2422 cs_th = DPAA_INGRESS_CS_THRESHOLD; 2425 cs_th = DPAA_INGRESS_CS_THRESHOLD;
2423 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); 2426 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
2424 2427
2425 initcgr.we_mask |= QM_CGR_WE_CSTD_EN; 2428 initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
2426 initcgr.cgr.cstd_en = QM_CGR_EN; 2429 initcgr.cgr.cstd_en = QM_CGR_EN;
2427 2430
2428 /* This CGR will be associated with the SWP affined to the current CPU. 2431 /* This CGR will be associated with the SWP affined to the current CPU.
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 854befde0a08..97b184774784 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -828,6 +828,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
828 priv = netdev_priv(ndev); 828 priv = netdev_priv(ndev);
829 priv->ndev = ndev; 829 priv->ndev = ndev;
830 platform_set_drvdata(pdev, ndev); 830 platform_set_drvdata(pdev, ndev);
831 SET_NETDEV_DEV(ndev, &pdev->dev);
831 832
832 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 833 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
833 priv->base = devm_ioremap_resource(d, res); 834 priv->base = devm_ioremap_resource(d, res);
@@ -903,7 +904,6 @@ static int hip04_mac_probe(struct platform_device *pdev)
903 ndev->priv_flags |= IFF_UNICAST_FLT; 904 ndev->priv_flags |= IFF_UNICAST_FLT;
904 ndev->irq = irq; 905 ndev->irq = irq;
905 netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT); 906 netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
906 SET_NETDEV_DEV(ndev, &pdev->dev);
907 907
908 hip04_reset_ppe(priv); 908 hip04_reset_ppe(priv);
909 if (priv->phy_mode == PHY_INTERFACE_MODE_MII) 909 if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 49863068c59e..979852d56f31 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -805,6 +805,7 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
805 return -ENOMEM; 805 return -ENOMEM;
806 806
807 platform_set_drvdata(pdev, ndev); 807 platform_set_drvdata(pdev, ndev);
808 SET_NETDEV_DEV(ndev, &pdev->dev);
808 809
809 priv = netdev_priv(ndev); 810 priv = netdev_priv(ndev);
810 priv->dev = dev; 811 priv->dev = dev;
@@ -882,7 +883,6 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
882 ndev->netdev_ops = &hisi_femac_netdev_ops; 883 ndev->netdev_ops = &hisi_femac_netdev_ops;
883 ndev->ethtool_ops = &hisi_femac_ethtools_ops; 884 ndev->ethtool_ops = &hisi_femac_ethtools_ops;
884 netif_napi_add(ndev, &priv->napi, hisi_femac_poll, FEMAC_POLL_WEIGHT); 885 netif_napi_add(ndev, &priv->napi, hisi_femac_poll, FEMAC_POLL_WEIGHT);
885 SET_NETDEV_DEV(ndev, &pdev->dev);
886 886
887 hisi_femac_port_init(priv); 887 hisi_femac_port_init(priv);
888 888
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index fbece63395a8..a831f947ca8c 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1181,7 +1181,9 @@ map_failed:
1181 1181
1182static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt) 1182static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
1183{ 1183{
1184 struct tcphdr *tcph;
1184 int offset = 0; 1185 int offset = 0;
1186 int hdr_len;
1185 1187
1186 /* only TCP packets will be aggregated */ 1188 /* only TCP packets will be aggregated */
1187 if (skb->protocol == htons(ETH_P_IP)) { 1189 if (skb->protocol == htons(ETH_P_IP)) {
@@ -1208,14 +1210,20 @@ static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
1208 /* if mss is not set through Large Packet bit/mss in rx buffer, 1210 /* if mss is not set through Large Packet bit/mss in rx buffer,
1209 * expect that the mss will be written to the tcp header checksum. 1211 * expect that the mss will be written to the tcp header checksum.
1210 */ 1212 */
1213 tcph = (struct tcphdr *)(skb->data + offset);
1211 if (lrg_pkt) { 1214 if (lrg_pkt) {
1212 skb_shinfo(skb)->gso_size = mss; 1215 skb_shinfo(skb)->gso_size = mss;
1213 } else if (offset) { 1216 } else if (offset) {
1214 struct tcphdr *tcph = (struct tcphdr *)(skb->data + offset);
1215
1216 skb_shinfo(skb)->gso_size = ntohs(tcph->check); 1217 skb_shinfo(skb)->gso_size = ntohs(tcph->check);
1217 tcph->check = 0; 1218 tcph->check = 0;
1218 } 1219 }
1220
1221 if (skb_shinfo(skb)->gso_size) {
1222 hdr_len = offset + tcph->doff * 4;
1223 skb_shinfo(skb)->gso_segs =
1224 DIV_ROUND_UP(skb->len - hdr_len,
1225 skb_shinfo(skb)->gso_size);
1226 }
1219} 1227}
1220 1228
1221static int ibmveth_poll(struct napi_struct *napi, int budget) 1229static int ibmveth_poll(struct napi_struct *napi, int budget)
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 5f62c3d70df9..1fa7c03edec2 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2713,7 +2713,7 @@ static const struct of_device_id mv643xx_eth_shared_ids[] = {
2713MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids); 2713MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
2714#endif 2714#endif
2715 2715
2716#if defined(CONFIG_OF) && !defined(CONFIG_MV64X60) 2716#if defined(CONFIG_OF_IRQ) && !defined(CONFIG_MV64X60)
2717#define mv643xx_eth_property(_np, _name, _v) \ 2717#define mv643xx_eth_property(_np, _name, _v) \
2718 do { \ 2718 do { \
2719 u32 tmp; \ 2719 u32 tmp; \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index fece974b4edd..d768c7b6c6d6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2404,7 +2404,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2404 local_port); 2404 local_port);
2405 return err; 2405 return err;
2406 } 2406 }
2407 err = __mlxsw_sp_port_create(mlxsw_sp, local_port, false, 2407 err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split,
2408 module, width, lane); 2408 module, width, lane);
2409 if (err) 2409 if (err)
2410 goto err_port_create; 2410 goto err_port_create;
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index f3bb9055a292..44bb04d4d21b 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -26,11 +26,11 @@ static inline bool is_bits_set(int value, int mask)
26} 26}
27 27
28static int encx24j600_switch_bank(struct encx24j600_context *ctx, 28static int encx24j600_switch_bank(struct encx24j600_context *ctx,
29 int bank) 29 int bank)
30{ 30{
31 int ret = 0; 31 int ret = 0;
32
33 int bank_opcode = BANK_SELECT(bank); 32 int bank_opcode = BANK_SELECT(bank);
33
34 ret = spi_write(ctx->spi, &bank_opcode, 1); 34 ret = spi_write(ctx->spi, &bank_opcode, 1);
35 if (ret == 0) 35 if (ret == 0)
36 ctx->bank = bank; 36 ctx->bank = bank;
@@ -39,7 +39,7 @@ static int encx24j600_switch_bank(struct encx24j600_context *ctx,
39} 39}
40 40
41static int encx24j600_cmdn(struct encx24j600_context *ctx, u8 opcode, 41static int encx24j600_cmdn(struct encx24j600_context *ctx, u8 opcode,
42 const void *buf, size_t len) 42 const void *buf, size_t len)
43{ 43{
44 struct spi_message m; 44 struct spi_message m;
45 struct spi_transfer t[2] = { { .tx_buf = &opcode, .len = 1, }, 45 struct spi_transfer t[2] = { { .tx_buf = &opcode, .len = 1, },
@@ -54,12 +54,14 @@ static int encx24j600_cmdn(struct encx24j600_context *ctx, u8 opcode,
54static void regmap_lock_mutex(void *context) 54static void regmap_lock_mutex(void *context)
55{ 55{
56 struct encx24j600_context *ctx = context; 56 struct encx24j600_context *ctx = context;
57
57 mutex_lock(&ctx->mutex); 58 mutex_lock(&ctx->mutex);
58} 59}
59 60
60static void regmap_unlock_mutex(void *context) 61static void regmap_unlock_mutex(void *context)
61{ 62{
62 struct encx24j600_context *ctx = context; 63 struct encx24j600_context *ctx = context;
64
63 mutex_unlock(&ctx->mutex); 65 mutex_unlock(&ctx->mutex);
64} 66}
65 67
@@ -128,6 +130,7 @@ static int regmap_encx24j600_sfr_update(struct encx24j600_context *ctx,
128 130
129 if (reg < 0x80) { 131 if (reg < 0x80) {
130 int ret = 0; 132 int ret = 0;
133
131 cmd = banked_code | banked_reg; 134 cmd = banked_code | banked_reg;
132 if ((banked_reg < 0x16) && (ctx->bank != bank)) 135 if ((banked_reg < 0x16) && (ctx->bank != bank))
133 ret = encx24j600_switch_bank(ctx, bank); 136 ret = encx24j600_switch_bank(ctx, bank);
@@ -174,6 +177,7 @@ static int regmap_encx24j600_sfr_write(void *context, u8 reg, u8 *val,
174 size_t len) 177 size_t len)
175{ 178{
176 struct encx24j600_context *ctx = context; 179 struct encx24j600_context *ctx = context;
180
177 return regmap_encx24j600_sfr_update(ctx, reg, val, len, WCRU, WCRCODE); 181 return regmap_encx24j600_sfr_update(ctx, reg, val, len, WCRU, WCRCODE);
178} 182}
179 183
@@ -228,9 +232,9 @@ int regmap_encx24j600_spi_write(void *context, u8 reg, const u8 *data,
228 232
229 if (reg < 0xc0) 233 if (reg < 0xc0)
230 return encx24j600_cmdn(ctx, reg, data, count); 234 return encx24j600_cmdn(ctx, reg, data, count);
231 else 235
232 /* SPI 1-byte command. Ignore data */ 236 /* SPI 1-byte command. Ignore data */
233 return spi_write(ctx->spi, &reg, 1); 237 return spi_write(ctx->spi, &reg, 1);
234} 238}
235EXPORT_SYMBOL_GPL(regmap_encx24j600_spi_write); 239EXPORT_SYMBOL_GPL(regmap_encx24j600_spi_write);
236 240
@@ -495,6 +499,7 @@ static struct regmap_config phycfg = {
495 .writeable_reg = encx24j600_phymap_writeable, 499 .writeable_reg = encx24j600_phymap_writeable,
496 .volatile_reg = encx24j600_phymap_volatile, 500 .volatile_reg = encx24j600_phymap_volatile,
497}; 501};
502
498static struct regmap_bus phymap_encx24j600 = { 503static struct regmap_bus phymap_encx24j600 = {
499 .reg_write = regmap_encx24j600_phy_reg_write, 504 .reg_write = regmap_encx24j600_phy_reg_write,
500 .reg_read = regmap_encx24j600_phy_reg_read, 505 .reg_read = regmap_encx24j600_phy_reg_read,
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index b14f0305aa31..fbce6166504e 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -30,7 +30,7 @@
30 30
31#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 31#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
32static int debug = -1; 32static int debug = -1;
33module_param(debug, int, 0); 33module_param(debug, int, 0000);
34MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 34MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
35 35
36/* SRAM memory layout: 36/* SRAM memory layout:
@@ -105,6 +105,7 @@ static u16 encx24j600_read_reg(struct encx24j600_priv *priv, u8 reg)
105 struct net_device *dev = priv->ndev; 105 struct net_device *dev = priv->ndev;
106 unsigned int val = 0; 106 unsigned int val = 0;
107 int ret = regmap_read(priv->ctx.regmap, reg, &val); 107 int ret = regmap_read(priv->ctx.regmap, reg, &val);
108
108 if (unlikely(ret)) 109 if (unlikely(ret))
109 netif_err(priv, drv, dev, "%s: error %d reading reg %02x\n", 110 netif_err(priv, drv, dev, "%s: error %d reading reg %02x\n",
110 __func__, ret, reg); 111 __func__, ret, reg);
@@ -115,6 +116,7 @@ static void encx24j600_write_reg(struct encx24j600_priv *priv, u8 reg, u16 val)
115{ 116{
116 struct net_device *dev = priv->ndev; 117 struct net_device *dev = priv->ndev;
117 int ret = regmap_write(priv->ctx.regmap, reg, val); 118 int ret = regmap_write(priv->ctx.regmap, reg, val);
119
118 if (unlikely(ret)) 120 if (unlikely(ret))
119 netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n", 121 netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
120 __func__, ret, reg, val); 122 __func__, ret, reg, val);
@@ -125,6 +127,7 @@ static void encx24j600_update_reg(struct encx24j600_priv *priv, u8 reg,
125{ 127{
126 struct net_device *dev = priv->ndev; 128 struct net_device *dev = priv->ndev;
127 int ret = regmap_update_bits(priv->ctx.regmap, reg, mask, val); 129 int ret = regmap_update_bits(priv->ctx.regmap, reg, mask, val);
130
128 if (unlikely(ret)) 131 if (unlikely(ret))
129 netif_err(priv, drv, dev, "%s: error %d updating reg %02x=%04x~%04x\n", 132 netif_err(priv, drv, dev, "%s: error %d updating reg %02x=%04x~%04x\n",
130 __func__, ret, reg, val, mask); 133 __func__, ret, reg, val, mask);
@@ -135,6 +138,7 @@ static u16 encx24j600_read_phy(struct encx24j600_priv *priv, u8 reg)
135 struct net_device *dev = priv->ndev; 138 struct net_device *dev = priv->ndev;
136 unsigned int val = 0; 139 unsigned int val = 0;
137 int ret = regmap_read(priv->ctx.phymap, reg, &val); 140 int ret = regmap_read(priv->ctx.phymap, reg, &val);
141
138 if (unlikely(ret)) 142 if (unlikely(ret))
139 netif_err(priv, drv, dev, "%s: error %d reading %02x\n", 143 netif_err(priv, drv, dev, "%s: error %d reading %02x\n",
140 __func__, ret, reg); 144 __func__, ret, reg);
@@ -145,6 +149,7 @@ static void encx24j600_write_phy(struct encx24j600_priv *priv, u8 reg, u16 val)
145{ 149{
146 struct net_device *dev = priv->ndev; 150 struct net_device *dev = priv->ndev;
147 int ret = regmap_write(priv->ctx.phymap, reg, val); 151 int ret = regmap_write(priv->ctx.phymap, reg, val);
152
148 if (unlikely(ret)) 153 if (unlikely(ret))
149 netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n", 154 netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
150 __func__, ret, reg, val); 155 __func__, ret, reg, val);
@@ -164,6 +169,7 @@ static void encx24j600_cmd(struct encx24j600_priv *priv, u8 cmd)
164{ 169{
165 struct net_device *dev = priv->ndev; 170 struct net_device *dev = priv->ndev;
166 int ret = regmap_write(priv->ctx.regmap, cmd, 0); 171 int ret = regmap_write(priv->ctx.regmap, cmd, 0);
172
167 if (unlikely(ret)) 173 if (unlikely(ret))
168 netif_err(priv, drv, dev, "%s: error %d with cmd %02x\n", 174 netif_err(priv, drv, dev, "%s: error %d with cmd %02x\n",
169 __func__, ret, cmd); 175 __func__, ret, cmd);
@@ -173,6 +179,7 @@ static int encx24j600_raw_read(struct encx24j600_priv *priv, u8 reg, u8 *data,
173 size_t count) 179 size_t count)
174{ 180{
175 int ret; 181 int ret;
182
176 mutex_lock(&priv->ctx.mutex); 183 mutex_lock(&priv->ctx.mutex);
177 ret = regmap_encx24j600_spi_read(&priv->ctx, reg, data, count); 184 ret = regmap_encx24j600_spi_read(&priv->ctx, reg, data, count);
178 mutex_unlock(&priv->ctx.mutex); 185 mutex_unlock(&priv->ctx.mutex);
@@ -184,6 +191,7 @@ static int encx24j600_raw_write(struct encx24j600_priv *priv, u8 reg,
184 const u8 *data, size_t count) 191 const u8 *data, size_t count)
185{ 192{
186 int ret; 193 int ret;
194
187 mutex_lock(&priv->ctx.mutex); 195 mutex_lock(&priv->ctx.mutex);
188 ret = regmap_encx24j600_spi_write(&priv->ctx, reg, data, count); 196 ret = regmap_encx24j600_spi_write(&priv->ctx, reg, data, count);
189 mutex_unlock(&priv->ctx.mutex); 197 mutex_unlock(&priv->ctx.mutex);
@@ -194,6 +202,7 @@ static int encx24j600_raw_write(struct encx24j600_priv *priv, u8 reg,
194static void encx24j600_update_phcon1(struct encx24j600_priv *priv) 202static void encx24j600_update_phcon1(struct encx24j600_priv *priv)
195{ 203{
196 u16 phcon1 = encx24j600_read_phy(priv, PHCON1); 204 u16 phcon1 = encx24j600_read_phy(priv, PHCON1);
205
197 if (priv->autoneg == AUTONEG_ENABLE) { 206 if (priv->autoneg == AUTONEG_ENABLE) {
198 phcon1 |= ANEN | RENEG; 207 phcon1 |= ANEN | RENEG;
199 } else { 208 } else {
@@ -328,6 +337,7 @@ static int encx24j600_receive_packet(struct encx24j600_priv *priv,
328{ 337{
329 struct net_device *dev = priv->ndev; 338 struct net_device *dev = priv->ndev;
330 struct sk_buff *skb = netdev_alloc_skb(dev, rsv->len + NET_IP_ALIGN); 339 struct sk_buff *skb = netdev_alloc_skb(dev, rsv->len + NET_IP_ALIGN);
340
331 if (!skb) { 341 if (!skb) {
332 pr_err_ratelimited("RX: OOM: packet dropped\n"); 342 pr_err_ratelimited("RX: OOM: packet dropped\n");
333 dev->stats.rx_dropped++; 343 dev->stats.rx_dropped++;
@@ -346,7 +356,6 @@ static int encx24j600_receive_packet(struct encx24j600_priv *priv,
346 /* Maintain stats */ 356 /* Maintain stats */
347 dev->stats.rx_packets++; 357 dev->stats.rx_packets++;
348 dev->stats.rx_bytes += rsv->len; 358 dev->stats.rx_bytes += rsv->len;
349 priv->next_packet = rsv->next_packet;
350 359
351 netif_rx(skb); 360 netif_rx(skb);
352 361
@@ -383,6 +392,8 @@ static void encx24j600_rx_packets(struct encx24j600_priv *priv, u8 packet_count)
383 encx24j600_receive_packet(priv, &rsv); 392 encx24j600_receive_packet(priv, &rsv);
384 } 393 }
385 394
395 priv->next_packet = rsv.next_packet;
396
386 newrxtail = priv->next_packet - 2; 397 newrxtail = priv->next_packet - 2;
387 if (newrxtail == ENC_RX_BUF_START) 398 if (newrxtail == ENC_RX_BUF_START)
388 newrxtail = SRAM_SIZE - 2; 399 newrxtail = SRAM_SIZE - 2;
@@ -827,6 +838,7 @@ static void encx24j600_set_multicast_list(struct net_device *dev)
827static void encx24j600_hw_tx(struct encx24j600_priv *priv) 838static void encx24j600_hw_tx(struct encx24j600_priv *priv)
828{ 839{
829 struct net_device *dev = priv->ndev; 840 struct net_device *dev = priv->ndev;
841
830 netif_info(priv, tx_queued, dev, "TX Packet Len:%d\n", 842 netif_info(priv, tx_queued, dev, "TX Packet Len:%d\n",
831 priv->tx_skb->len); 843 priv->tx_skb->len);
832 844
@@ -894,7 +906,6 @@ static void encx24j600_tx_timeout(struct net_device *dev)
894 906
895 dev->stats.tx_errors++; 907 dev->stats.tx_errors++;
896 netif_wake_queue(dev); 908 netif_wake_queue(dev);
897 return;
898} 909}
899 910
900static int encx24j600_get_regs_len(struct net_device *dev) 911static int encx24j600_get_regs_len(struct net_device *dev)
@@ -957,12 +968,14 @@ static int encx24j600_set_settings(struct net_device *dev,
957static u32 encx24j600_get_msglevel(struct net_device *dev) 968static u32 encx24j600_get_msglevel(struct net_device *dev)
958{ 969{
959 struct encx24j600_priv *priv = netdev_priv(dev); 970 struct encx24j600_priv *priv = netdev_priv(dev);
971
960 return priv->msg_enable; 972 return priv->msg_enable;
961} 973}
962 974
963static void encx24j600_set_msglevel(struct net_device *dev, u32 val) 975static void encx24j600_set_msglevel(struct net_device *dev, u32 val)
964{ 976{
965 struct encx24j600_priv *priv = netdev_priv(dev); 977 struct encx24j600_priv *priv = netdev_priv(dev);
978
966 priv->msg_enable = val; 979 priv->msg_enable = val;
967} 980}
968 981
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 00efb1c4c57e..17a70122df05 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -1265,7 +1265,7 @@ static const struct qed_iscsi_ops qed_iscsi_ops_pass = {
1265 .get_stats = &qed_iscsi_stats, 1265 .get_stats = &qed_iscsi_stats,
1266}; 1266};
1267 1267
1268const struct qed_iscsi_ops *qed_get_iscsi_ops() 1268const struct qed_iscsi_ops *qed_get_iscsi_ops(void)
1269{ 1269{
1270 return &qed_iscsi_ops_pass; 1270 return &qed_iscsi_ops_pass;
1271} 1271}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index ae32f855e31b..422289c232bc 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -460,6 +460,12 @@ static int emac_clks_phase1_init(struct platform_device *pdev,
460{ 460{
461 int ret; 461 int ret;
462 462
463 /* On ACPI platforms, clocks are controlled by firmware and/or
464 * ACPI, not by drivers.
465 */
466 if (has_acpi_companion(&pdev->dev))
467 return 0;
468
463 ret = emac_clks_get(pdev, adpt); 469 ret = emac_clks_get(pdev, adpt);
464 if (ret) 470 if (ret)
465 return ret; 471 return ret;
@@ -485,6 +491,9 @@ static int emac_clks_phase2_init(struct platform_device *pdev,
485{ 491{
486 int ret; 492 int ret;
487 493
494 if (has_acpi_companion(&pdev->dev))
495 return 0;
496
488 ret = clk_set_rate(adpt->clk[EMAC_CLK_TX], 125000000); 497 ret = clk_set_rate(adpt->clk[EMAC_CLK_TX], 125000000);
489 if (ret) 498 if (ret)
490 return ret; 499 return ret;
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 4ff4e0491406..aa11b70b9ca4 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -472,8 +472,6 @@ static void r6040_down(struct net_device *dev)
472 iowrite16(adrp[0], ioaddr + MID_0L); 472 iowrite16(adrp[0], ioaddr + MID_0L);
473 iowrite16(adrp[1], ioaddr + MID_0M); 473 iowrite16(adrp[1], ioaddr + MID_0M);
474 iowrite16(adrp[2], ioaddr + MID_0H); 474 iowrite16(adrp[2], ioaddr + MID_0H);
475
476 phy_stop(dev->phydev);
477} 475}
478 476
479static int r6040_close(struct net_device *dev) 477static int r6040_close(struct net_device *dev)
@@ -481,12 +479,12 @@ static int r6040_close(struct net_device *dev)
481 struct r6040_private *lp = netdev_priv(dev); 479 struct r6040_private *lp = netdev_priv(dev);
482 struct pci_dev *pdev = lp->pdev; 480 struct pci_dev *pdev = lp->pdev;
483 481
484 spin_lock_irq(&lp->lock); 482 phy_stop(dev->phydev);
485 napi_disable(&lp->napi); 483 napi_disable(&lp->napi);
486 netif_stop_queue(dev); 484 netif_stop_queue(dev);
487 r6040_down(dev);
488 485
489 free_irq(dev->irq, dev); 486 spin_lock_irq(&lp->lock);
487 r6040_down(dev);
490 488
491 /* Free RX buffer */ 489 /* Free RX buffer */
492 r6040_free_rxbufs(dev); 490 r6040_free_rxbufs(dev);
@@ -496,6 +494,8 @@ static int r6040_close(struct net_device *dev)
496 494
497 spin_unlock_irq(&lp->lock); 495 spin_unlock_irq(&lp->lock);
498 496
497 free_irq(dev->irq, dev);
498
499 /* Free Descriptor memory */ 499 /* Free Descriptor memory */
500 if (lp->rx_ring) { 500 if (lp->rx_ring) {
501 pci_free_consistent(pdev, 501 pci_free_consistent(pdev,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index f644216eda1b..87bdc56b4e3a 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -120,44 +120,53 @@ static int efx_ethtool_phys_id(struct net_device *net_dev,
120} 120}
121 121
122/* This must be called with rtnl_lock held. */ 122/* This must be called with rtnl_lock held. */
123static int efx_ethtool_get_settings(struct net_device *net_dev, 123static int
124 struct ethtool_cmd *ecmd) 124efx_ethtool_get_link_ksettings(struct net_device *net_dev,
125 struct ethtool_link_ksettings *cmd)
125{ 126{
126 struct efx_nic *efx = netdev_priv(net_dev); 127 struct efx_nic *efx = netdev_priv(net_dev);
127 struct efx_link_state *link_state = &efx->link_state; 128 struct efx_link_state *link_state = &efx->link_state;
129 u32 supported;
128 130
129 mutex_lock(&efx->mac_lock); 131 mutex_lock(&efx->mac_lock);
130 efx->phy_op->get_settings(efx, ecmd); 132 efx->phy_op->get_link_ksettings(efx, cmd);
131 mutex_unlock(&efx->mac_lock); 133 mutex_unlock(&efx->mac_lock);
132 134
133 /* Both MACs support pause frames (bidirectional and respond-only) */ 135 /* Both MACs support pause frames (bidirectional and respond-only) */
134 ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; 136 ethtool_convert_link_mode_to_legacy_u32(&supported,
137 cmd->link_modes.supported);
138
139 supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
140
141 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
142 supported);
135 143
136 if (LOOPBACK_INTERNAL(efx)) { 144 if (LOOPBACK_INTERNAL(efx)) {
137 ethtool_cmd_speed_set(ecmd, link_state->speed); 145 cmd->base.speed = link_state->speed;
138 ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; 146 cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
139 } 147 }
140 148
141 return 0; 149 return 0;
142} 150}
143 151
144/* This must be called with rtnl_lock held. */ 152/* This must be called with rtnl_lock held. */
145static int efx_ethtool_set_settings(struct net_device *net_dev, 153static int
146 struct ethtool_cmd *ecmd) 154efx_ethtool_set_link_ksettings(struct net_device *net_dev,
155 const struct ethtool_link_ksettings *cmd)
147{ 156{
148 struct efx_nic *efx = netdev_priv(net_dev); 157 struct efx_nic *efx = netdev_priv(net_dev);
149 int rc; 158 int rc;
150 159
151 /* GMAC does not support 1000Mbps HD */ 160 /* GMAC does not support 1000Mbps HD */
152 if ((ethtool_cmd_speed(ecmd) == SPEED_1000) && 161 if ((cmd->base.speed == SPEED_1000) &&
153 (ecmd->duplex != DUPLEX_FULL)) { 162 (cmd->base.duplex != DUPLEX_FULL)) {
154 netif_dbg(efx, drv, efx->net_dev, 163 netif_dbg(efx, drv, efx->net_dev,
155 "rejecting unsupported 1000Mbps HD setting\n"); 164 "rejecting unsupported 1000Mbps HD setting\n");
156 return -EINVAL; 165 return -EINVAL;
157 } 166 }
158 167
159 mutex_lock(&efx->mac_lock); 168 mutex_lock(&efx->mac_lock);
160 rc = efx->phy_op->set_settings(efx, ecmd); 169 rc = efx->phy_op->set_link_ksettings(efx, cmd);
161 mutex_unlock(&efx->mac_lock); 170 mutex_unlock(&efx->mac_lock);
162 return rc; 171 return rc;
163} 172}
@@ -1342,8 +1351,6 @@ static int efx_ethtool_get_module_info(struct net_device *net_dev,
1342} 1351}
1343 1352
1344const struct ethtool_ops efx_ethtool_ops = { 1353const struct ethtool_ops efx_ethtool_ops = {
1345 .get_settings = efx_ethtool_get_settings,
1346 .set_settings = efx_ethtool_set_settings,
1347 .get_drvinfo = efx_ethtool_get_drvinfo, 1354 .get_drvinfo = efx_ethtool_get_drvinfo,
1348 .get_regs_len = efx_ethtool_get_regs_len, 1355 .get_regs_len = efx_ethtool_get_regs_len,
1349 .get_regs = efx_ethtool_get_regs, 1356 .get_regs = efx_ethtool_get_regs,
@@ -1373,4 +1380,6 @@ const struct ethtool_ops efx_ethtool_ops = {
1373 .get_ts_info = efx_ethtool_get_ts_info, 1380 .get_ts_info = efx_ethtool_get_ts_info,
1374 .get_module_info = efx_ethtool_get_module_info, 1381 .get_module_info = efx_ethtool_get_module_info,
1375 .get_module_eeprom = efx_ethtool_get_module_eeprom, 1382 .get_module_eeprom = efx_ethtool_get_module_eeprom,
1383 .get_link_ksettings = efx_ethtool_get_link_ksettings,
1384 .set_link_ksettings = efx_ethtool_set_link_ksettings,
1376}; 1385};
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 9dcd396784ae..c905971c5f3a 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -503,45 +503,59 @@ static void efx_mcdi_phy_remove(struct efx_nic *efx)
503 kfree(phy_data); 503 kfree(phy_data);
504} 504}
505 505
506static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 506static void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx,
507 struct ethtool_link_ksettings *cmd)
507{ 508{
508 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; 509 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
509 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); 510 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
510 int rc; 511 int rc;
511 512 u32 supported, advertising, lp_advertising;
512 ecmd->supported = 513
513 mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap); 514 supported = mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
514 ecmd->advertising = efx->link_advertising; 515 advertising = efx->link_advertising;
515 ethtool_cmd_speed_set(ecmd, efx->link_state.speed); 516 cmd->base.speed = efx->link_state.speed;
516 ecmd->duplex = efx->link_state.fd; 517 cmd->base.duplex = efx->link_state.fd;
517 ecmd->port = mcdi_to_ethtool_media(phy_cfg->media); 518 cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
518 ecmd->phy_address = phy_cfg->port; 519 cmd->base.phy_address = phy_cfg->port;
519 ecmd->transceiver = XCVR_INTERNAL; 520 cmd->base.autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg);
520 ecmd->autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg); 521 cmd->base.mdio_support = (efx->mdio.mode_support &
521 ecmd->mdio_support = (efx->mdio.mode_support &
522 (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22)); 522 (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
523 523
524 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
525 supported);
526 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
527 advertising);
528
524 BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); 529 BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
525 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, 530 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
526 outbuf, sizeof(outbuf), NULL); 531 outbuf, sizeof(outbuf), NULL);
527 if (rc) 532 if (rc)
528 return; 533 return;
529 ecmd->lp_advertising = 534 lp_advertising =
530 mcdi_to_ethtool_cap(phy_cfg->media, 535 mcdi_to_ethtool_cap(phy_cfg->media,
531 MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP)); 536 MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
537
538 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
539 lp_advertising);
532} 540}
533 541
534static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 542static int
543efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
544 const struct ethtool_link_ksettings *cmd)
535{ 545{
536 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; 546 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
537 u32 caps; 547 u32 caps;
538 int rc; 548 int rc;
549 u32 advertising;
550
551 ethtool_convert_link_mode_to_legacy_u32(&advertising,
552 cmd->link_modes.advertising);
539 553
540 if (ecmd->autoneg) { 554 if (cmd->base.autoneg) {
541 caps = (ethtool_to_mcdi_cap(ecmd->advertising) | 555 caps = (ethtool_to_mcdi_cap(advertising) |
542 1 << MC_CMD_PHY_CAP_AN_LBN); 556 1 << MC_CMD_PHY_CAP_AN_LBN);
543 } else if (ecmd->duplex) { 557 } else if (cmd->base.duplex) {
544 switch (ethtool_cmd_speed(ecmd)) { 558 switch (cmd->base.speed) {
545 case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break; 559 case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
546 case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break; 560 case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
547 case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break; 561 case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
@@ -550,7 +564,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
550 default: return -EINVAL; 564 default: return -EINVAL;
551 } 565 }
552 } else { 566 } else {
553 switch (ethtool_cmd_speed(ecmd)) { 567 switch (cmd->base.speed) {
554 case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break; 568 case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
555 case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break; 569 case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
556 case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break; 570 case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
@@ -563,9 +577,9 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
563 if (rc) 577 if (rc)
564 return rc; 578 return rc;
565 579
566 if (ecmd->autoneg) { 580 if (cmd->base.autoneg) {
567 efx_link_set_advertising( 581 efx_link_set_advertising(
568 efx, ecmd->advertising | ADVERTISED_Autoneg); 582 efx, advertising | ADVERTISED_Autoneg);
569 phy_cfg->forced_cap = 0; 583 phy_cfg->forced_cap = 0;
570 } else { 584 } else {
571 efx_link_set_advertising(efx, 0); 585 efx_link_set_advertising(efx, 0);
@@ -812,8 +826,8 @@ static const struct efx_phy_operations efx_mcdi_phy_ops = {
812 .poll = efx_mcdi_phy_poll, 826 .poll = efx_mcdi_phy_poll,
813 .fini = efx_port_dummy_op_void, 827 .fini = efx_port_dummy_op_void,
814 .remove = efx_mcdi_phy_remove, 828 .remove = efx_mcdi_phy_remove,
815 .get_settings = efx_mcdi_phy_get_settings, 829 .get_link_ksettings = efx_mcdi_phy_get_link_ksettings,
816 .set_settings = efx_mcdi_phy_set_settings, 830 .set_link_ksettings = efx_mcdi_phy_set_link_ksettings,
817 .test_alive = efx_mcdi_phy_test_alive, 831 .test_alive = efx_mcdi_phy_test_alive,
818 .run_tests = efx_mcdi_phy_run_tests, 832 .run_tests = efx_mcdi_phy_run_tests,
819 .test_name = efx_mcdi_phy_test_name, 833 .test_name = efx_mcdi_phy_test_name,
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 8692e829b40f..1a635ced62d0 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -720,8 +720,8 @@ static inline bool efx_link_state_equal(const struct efx_link_state *left,
720 * @reconfigure: Reconfigure PHY (e.g. for new link parameters) 720 * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
721 * @poll: Update @link_state and report whether it changed. 721 * @poll: Update @link_state and report whether it changed.
722 * Serialised by the mac_lock. 722 * Serialised by the mac_lock.
723 * @get_settings: Get ethtool settings. Serialised by the mac_lock. 723 * @get_link_ksettings: Get ethtool settings. Serialised by the mac_lock.
724 * @set_settings: Set ethtool settings. Serialised by the mac_lock. 724 * @set_link_ksettings: Set ethtool settings. Serialised by the mac_lock.
725 * @set_npage_adv: Set abilities advertised in (Extended) Next Page 725 * @set_npage_adv: Set abilities advertised in (Extended) Next Page
726 * (only needed where AN bit is set in mmds) 726 * (only needed where AN bit is set in mmds)
727 * @test_alive: Test that PHY is 'alive' (online) 727 * @test_alive: Test that PHY is 'alive' (online)
@@ -736,10 +736,10 @@ struct efx_phy_operations {
736 void (*remove) (struct efx_nic *efx); 736 void (*remove) (struct efx_nic *efx);
737 int (*reconfigure) (struct efx_nic *efx); 737 int (*reconfigure) (struct efx_nic *efx);
738 bool (*poll) (struct efx_nic *efx); 738 bool (*poll) (struct efx_nic *efx);
739 void (*get_settings) (struct efx_nic *efx, 739 void (*get_link_ksettings)(struct efx_nic *efx,
740 struct ethtool_cmd *ecmd); 740 struct ethtool_link_ksettings *cmd);
741 int (*set_settings) (struct efx_nic *efx, 741 int (*set_link_ksettings)(struct efx_nic *efx,
742 struct ethtool_cmd *ecmd); 742 const struct ethtool_link_ksettings *cmd);
743 void (*set_npage_adv) (struct efx_nic *efx, u32); 743 void (*set_npage_adv) (struct efx_nic *efx, u32);
744 int (*test_alive) (struct efx_nic *efx); 744 int (*test_alive) (struct efx_nic *efx);
745 const char *(*test_name) (struct efx_nic *efx, unsigned int index); 745 const char *(*test_name) (struct efx_nic *efx, unsigned int index);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 98f10c216521..8b6810bad54b 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -158,9 +158,9 @@ static bool gtp_check_src_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
158 if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr))) 158 if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
159 return false; 159 return false;
160 160
161 iph = (struct iphdr *)(skb->data + hdrlen + sizeof(struct iphdr)); 161 iph = (struct iphdr *)(skb->data + hdrlen);
162 162
163 return iph->saddr != pctx->ms_addr_ip4.s_addr; 163 return iph->saddr == pctx->ms_addr_ip4.s_addr;
164} 164}
165 165
166/* Check if the inner IP source address in this packet is assigned to any 166/* Check if the inner IP source address in this packet is assigned to any
@@ -423,11 +423,11 @@ static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
423 423
424 /* Bits 8 7 6 5 4 3 2 1 424 /* Bits 8 7 6 5 4 3 2 1
425 * +--+--+--+--+--+--+--+--+ 425 * +--+--+--+--+--+--+--+--+
426 * |version |PT| 1| E| S|PN| 426 * |version |PT| 0| E| S|PN|
427 * +--+--+--+--+--+--+--+--+ 427 * +--+--+--+--+--+--+--+--+
428 * 0 0 1 1 1 0 0 0 428 * 0 0 1 1 1 0 0 0
429 */ 429 */
430 gtp1->flags = 0x38; /* v1, GTP-non-prime. */ 430 gtp1->flags = 0x30; /* v1, GTP-non-prime. */
431 gtp1->type = GTP_TPDU; 431 gtp1->type = GTP_TPDU;
432 gtp1->length = htons(payload_len); 432 gtp1->length = htons(payload_len);
433 gtp1->tid = htonl(pctx->u.v1.o_tei); 433 gtp1->tid = htonl(pctx->u.v1.o_tei);
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index f293d33fb28f..8d5b903d1d9d 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -517,9 +517,9 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
517 517
518 mtt = irda_get_mtt(skb); 518 mtt = irda_get_mtt(skb);
519 pr_debug("%s: %ld, mtt=%d\n", __func__, jiffies, mtt); 519 pr_debug("%s: %ld, mtt=%d\n", __func__, jiffies, mtt);
520 if (mtt > 1000) 520 if (mtt > 1000)
521 mdelay(mtt / 1000); 521 mdelay(mtt / 1000);
522 else if (mtt) 522 else if (mtt)
523 udelay(mtt); 523 udelay(mtt);
524 524
525 /* Enable DMA interrupt */ 525 /* Enable DMA interrupt */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b425fa1013af..08327e005ccc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -22,6 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/virtio.h> 23#include <linux/virtio.h>
24#include <linux/virtio_net.h> 24#include <linux/virtio_net.h>
25#include <linux/bpf.h>
25#include <linux/scatterlist.h> 26#include <linux/scatterlist.h>
26#include <linux/if_vlan.h> 27#include <linux/if_vlan.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
@@ -81,6 +82,8 @@ struct receive_queue {
81 82
82 struct napi_struct napi; 83 struct napi_struct napi;
83 84
85 struct bpf_prog __rcu *xdp_prog;
86
84 /* Chain pages by the private ptr. */ 87 /* Chain pages by the private ptr. */
85 struct page *pages; 88 struct page *pages;
86 89
@@ -111,6 +114,9 @@ struct virtnet_info {
111 /* # of queue pairs currently used by the driver */ 114 /* # of queue pairs currently used by the driver */
112 u16 curr_queue_pairs; 115 u16 curr_queue_pairs;
113 116
117 /* # of XDP queue pairs currently used by the driver */
118 u16 xdp_queue_pairs;
119
114 /* I like... big packets and I cannot lie! */ 120 /* I like... big packets and I cannot lie! */
115 bool big_packets; 121 bool big_packets;
116 122
@@ -324,6 +330,90 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
324 return skb; 330 return skb;
325} 331}
326 332
333static void virtnet_xdp_xmit(struct virtnet_info *vi,
334 struct receive_queue *rq,
335 struct send_queue *sq,
336 struct xdp_buff *xdp)
337{
338 struct page *page = virt_to_head_page(xdp->data);
339 struct virtio_net_hdr_mrg_rxbuf *hdr;
340 unsigned int num_sg, len;
341 void *xdp_sent;
342 int err;
343
344 /* Free up any pending old buffers before queueing new ones. */
345 while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) {
346 struct page *sent_page = virt_to_head_page(xdp_sent);
347
348 if (vi->mergeable_rx_bufs)
349 put_page(sent_page);
350 else
351 give_pages(rq, sent_page);
352 }
353
354 /* Zero header and leave csum up to XDP layers */
355 hdr = xdp->data;
356 memset(hdr, 0, vi->hdr_len);
357
358 num_sg = 1;
359 sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
360 err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
361 xdp->data, GFP_ATOMIC);
362 if (unlikely(err)) {
363 if (vi->mergeable_rx_bufs)
364 put_page(page);
365 else
366 give_pages(rq, page);
367 return; // On error abort to avoid unnecessary kick
368 } else if (!vi->mergeable_rx_bufs) {
369 /* If not mergeable bufs must be big packets so cleanup pages */
370 give_pages(rq, (struct page *)page->private);
371 page->private = 0;
372 }
373
374 virtqueue_kick(sq->vq);
375}
376
377static u32 do_xdp_prog(struct virtnet_info *vi,
378 struct receive_queue *rq,
379 struct bpf_prog *xdp_prog,
380 struct page *page, int offset, int len)
381{
382 int hdr_padded_len;
383 struct xdp_buff xdp;
384 unsigned int qp;
385 u32 act;
386 u8 *buf;
387
388 buf = page_address(page) + offset;
389
390 if (vi->mergeable_rx_bufs)
391 hdr_padded_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
392 else
393 hdr_padded_len = sizeof(struct padded_vnet_hdr);
394
395 xdp.data = buf + hdr_padded_len;
396 xdp.data_end = xdp.data + (len - vi->hdr_len);
397
398 act = bpf_prog_run_xdp(xdp_prog, &xdp);
399 switch (act) {
400 case XDP_PASS:
401 return XDP_PASS;
402 case XDP_TX:
403 qp = vi->curr_queue_pairs -
404 vi->xdp_queue_pairs +
405 smp_processor_id();
406 xdp.data = buf + (vi->mergeable_rx_bufs ? 0 : 4);
407 virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp);
408 return XDP_TX;
409 default:
410 bpf_warn_invalid_xdp_action(act);
411 case XDP_ABORTED:
412 case XDP_DROP:
413 return XDP_DROP;
414 }
415}
416
327static struct sk_buff *receive_small(struct virtnet_info *vi, void *buf, unsigned int len) 417static struct sk_buff *receive_small(struct virtnet_info *vi, void *buf, unsigned int len)
328{ 418{
329 struct sk_buff * skb = buf; 419 struct sk_buff * skb = buf;
@@ -340,17 +430,102 @@ static struct sk_buff *receive_big(struct net_device *dev,
340 void *buf, 430 void *buf,
341 unsigned int len) 431 unsigned int len)
342{ 432{
433 struct bpf_prog *xdp_prog;
343 struct page *page = buf; 434 struct page *page = buf;
344 struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); 435 struct sk_buff *skb;
345 436
437 rcu_read_lock();
438 xdp_prog = rcu_dereference(rq->xdp_prog);
439 if (xdp_prog) {
440 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
441 u32 act;
442
443 if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
444 goto err_xdp;
445 act = do_xdp_prog(vi, rq, xdp_prog, page, 0, len);
446 switch (act) {
447 case XDP_PASS:
448 break;
449 case XDP_TX:
450 rcu_read_unlock();
451 goto xdp_xmit;
452 case XDP_DROP:
453 default:
454 goto err_xdp;
455 }
456 }
457 rcu_read_unlock();
458
459 skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
346 if (unlikely(!skb)) 460 if (unlikely(!skb))
347 goto err; 461 goto err;
348 462
349 return skb; 463 return skb;
350 464
465err_xdp:
466 rcu_read_unlock();
351err: 467err:
352 dev->stats.rx_dropped++; 468 dev->stats.rx_dropped++;
353 give_pages(rq, page); 469 give_pages(rq, page);
470xdp_xmit:
471 return NULL;
472}
473
474/* The conditions to enable XDP should preclude the underlying device from
475 * sending packets across multiple buffers (num_buf > 1). However per spec
476 * it does not appear to be illegal to do so but rather just against convention.
477 * So in order to avoid making a system unresponsive the packets are pushed
478 * into a page and the XDP program is run. This will be extremely slow and we
479 * push a warning to the user to fix this as soon as possible. Fixing this may
480 * require resolving the underlying hardware to determine why multiple buffers
481 * are being received or simply loading the XDP program in the ingress stack
482 * after the skb is built because there is no advantage to running it here
483 * anymore.
484 */
485static struct page *xdp_linearize_page(struct receive_queue *rq,
486 u16 num_buf,
487 struct page *p,
488 int offset,
489 unsigned int *len)
490{
491 struct page *page = alloc_page(GFP_ATOMIC);
492 unsigned int page_off = 0;
493
494 if (!page)
495 return NULL;
496
497 memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
498 page_off += *len;
499
500 while (--num_buf) {
501 unsigned int buflen;
502 unsigned long ctx;
503 void *buf;
504 int off;
505
506 ctx = (unsigned long)virtqueue_get_buf(rq->vq, &buflen);
507 if (unlikely(!ctx))
508 goto err_buf;
509
510 /* guard against a misconfigured or uncooperative backend that
511 * is sending packet larger than the MTU.
512 */
513 if ((page_off + buflen) > PAGE_SIZE)
514 goto err_buf;
515
516 buf = mergeable_ctx_to_buf_address(ctx);
517 p = virt_to_head_page(buf);
518 off = buf - page_address(p);
519
520 memcpy(page_address(page) + page_off,
521 page_address(p) + off, buflen);
522 page_off += buflen;
523 }
524
525 *len = page_off;
526 return page;
527err_buf:
528 __free_pages(page, 0);
354 return NULL; 529 return NULL;
355} 530}
356 531
@@ -365,11 +540,67 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
365 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 540 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
366 struct page *page = virt_to_head_page(buf); 541 struct page *page = virt_to_head_page(buf);
367 int offset = buf - page_address(page); 542 int offset = buf - page_address(page);
368 unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); 543 struct sk_buff *head_skb, *curr_skb;
544 struct bpf_prog *xdp_prog;
545 unsigned int truesize;
546
547 head_skb = NULL;
548
549 rcu_read_lock();
550 xdp_prog = rcu_dereference(rq->xdp_prog);
551 if (xdp_prog) {
552 struct page *xdp_page;
553 u32 act;
554
555 /* No known backend devices should send packets with
556 * more than a single buffer when XDP conditions are
557 * met. However it is not strictly illegal so the case
558 * is handled as an exception and a warning is thrown.
559 */
560 if (unlikely(num_buf > 1)) {
561 bpf_warn_invalid_xdp_buffer();
562
563 /* linearize data for XDP */
564 xdp_page = xdp_linearize_page(rq, num_buf,
565 page, offset, &len);
566 if (!xdp_page)
567 goto err_xdp;
568 offset = 0;
569 } else {
570 xdp_page = page;
571 }
572
573 /* Transient failure which in theory could occur if
574 * in-flight packets from before XDP was enabled reach
575 * the receive path after XDP is loaded. In practice I
576 * was not able to create this condition.
577 */
578 if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
579 goto err_xdp;
580
581 act = do_xdp_prog(vi, rq, xdp_prog, page, offset, len);
582 switch (act) {
583 case XDP_PASS:
584 if (unlikely(xdp_page != page))
585 __free_pages(xdp_page, 0);
586 break;
587 case XDP_TX:
588 if (unlikely(xdp_page != page))
589 goto err_xdp;
590 rcu_read_unlock();
591 goto xdp_xmit;
592 case XDP_DROP:
593 default:
594 if (unlikely(xdp_page != page))
595 __free_pages(xdp_page, 0);
596 goto err_xdp;
597 }
598 }
599 rcu_read_unlock();
369 600
370 struct sk_buff *head_skb = page_to_skb(vi, rq, page, offset, len, 601 truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
371 truesize); 602 head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
372 struct sk_buff *curr_skb = head_skb; 603 curr_skb = head_skb;
373 604
374 if (unlikely(!curr_skb)) 605 if (unlikely(!curr_skb))
375 goto err_skb; 606 goto err_skb;
@@ -423,6 +654,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
423 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); 654 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
424 return head_skb; 655 return head_skb;
425 656
657err_xdp:
658 rcu_read_unlock();
426err_skb: 659err_skb:
427 put_page(page); 660 put_page(page);
428 while (--num_buf) { 661 while (--num_buf) {
@@ -439,6 +672,7 @@ err_skb:
439err_buf: 672err_buf:
440 dev->stats.rx_dropped++; 673 dev->stats.rx_dropped++;
441 dev_kfree_skb(head_skb); 674 dev_kfree_skb(head_skb);
675xdp_xmit:
442 return NULL; 676 return NULL;
443} 677}
444 678
@@ -1337,6 +1571,13 @@ static int virtnet_set_channels(struct net_device *dev,
1337 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) 1571 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
1338 return -EINVAL; 1572 return -EINVAL;
1339 1573
1574 /* For now we don't support modifying channels while XDP is loaded
1575 * also when XDP is loaded all RX queues have XDP programs so we only
1576 * need to check a single RX queue.
1577 */
1578 if (vi->rq[0].xdp_prog)
1579 return -EINVAL;
1580
1340 get_online_cpus(); 1581 get_online_cpus();
1341 err = virtnet_set_queues(vi, queue_pairs); 1582 err = virtnet_set_queues(vi, queue_pairs);
1342 if (!err) { 1583 if (!err) {
@@ -1428,6 +1669,93 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
1428 .set_settings = virtnet_set_settings, 1669 .set_settings = virtnet_set_settings,
1429}; 1670};
1430 1671
1672static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
1673{
1674 unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
1675 struct virtnet_info *vi = netdev_priv(dev);
1676 struct bpf_prog *old_prog;
1677 u16 xdp_qp = 0, curr_qp;
1678 int i, err;
1679
1680 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1681 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6)) {
1682 netdev_warn(dev, "can't set XDP while host is implementing LRO, disable LRO first\n");
1683 return -EOPNOTSUPP;
1684 }
1685
1686 if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
1687 netdev_warn(dev, "XDP expects header/data in single page, any_header_sg required\n");
1688 return -EINVAL;
1689 }
1690
1691 if (dev->mtu > max_sz) {
1692 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
1693 return -EINVAL;
1694 }
1695
1696 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
1697 if (prog)
1698 xdp_qp = nr_cpu_ids;
1699
1700 /* XDP requires extra queues for XDP_TX */
1701 if (curr_qp + xdp_qp > vi->max_queue_pairs) {
1702 netdev_warn(dev, "request %i queues but max is %i\n",
1703 curr_qp + xdp_qp, vi->max_queue_pairs);
1704 return -ENOMEM;
1705 }
1706
1707 err = virtnet_set_queues(vi, curr_qp + xdp_qp);
1708 if (err) {
1709 dev_warn(&dev->dev, "XDP Device queue allocation failure.\n");
1710 return err;
1711 }
1712
1713 if (prog) {
1714 prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
1715 if (IS_ERR(prog)) {
1716 virtnet_set_queues(vi, curr_qp);
1717 return PTR_ERR(prog);
1718 }
1719 }
1720
1721 vi->xdp_queue_pairs = xdp_qp;
1722 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
1723
1724 for (i = 0; i < vi->max_queue_pairs; i++) {
1725 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
1726 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
1727 if (old_prog)
1728 bpf_prog_put(old_prog);
1729 }
1730
1731 return 0;
1732}
1733
1734static bool virtnet_xdp_query(struct net_device *dev)
1735{
1736 struct virtnet_info *vi = netdev_priv(dev);
1737 int i;
1738
1739 for (i = 0; i < vi->max_queue_pairs; i++) {
1740 if (vi->rq[i].xdp_prog)
1741 return true;
1742 }
1743 return false;
1744}
1745
1746static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp)
1747{
1748 switch (xdp->command) {
1749 case XDP_SETUP_PROG:
1750 return virtnet_xdp_set(dev, xdp->prog);
1751 case XDP_QUERY_PROG:
1752 xdp->prog_attached = virtnet_xdp_query(dev);
1753 return 0;
1754 default:
1755 return -EINVAL;
1756 }
1757}
1758
1431static const struct net_device_ops virtnet_netdev = { 1759static const struct net_device_ops virtnet_netdev = {
1432 .ndo_open = virtnet_open, 1760 .ndo_open = virtnet_open,
1433 .ndo_stop = virtnet_close, 1761 .ndo_stop = virtnet_close,
@@ -1444,6 +1772,7 @@ static const struct net_device_ops virtnet_netdev = {
1444#ifdef CONFIG_NET_RX_BUSY_POLL 1772#ifdef CONFIG_NET_RX_BUSY_POLL
1445 .ndo_busy_poll = virtnet_busy_poll, 1773 .ndo_busy_poll = virtnet_busy_poll,
1446#endif 1774#endif
1775 .ndo_xdp = virtnet_xdp,
1447}; 1776};
1448 1777
1449static void virtnet_config_changed_work(struct work_struct *work) 1778static void virtnet_config_changed_work(struct work_struct *work)
@@ -1505,12 +1834,20 @@ static void virtnet_free_queues(struct virtnet_info *vi)
1505 1834
1506static void free_receive_bufs(struct virtnet_info *vi) 1835static void free_receive_bufs(struct virtnet_info *vi)
1507{ 1836{
1837 struct bpf_prog *old_prog;
1508 int i; 1838 int i;
1509 1839
1840 rtnl_lock();
1510 for (i = 0; i < vi->max_queue_pairs; i++) { 1841 for (i = 0; i < vi->max_queue_pairs; i++) {
1511 while (vi->rq[i].pages) 1842 while (vi->rq[i].pages)
1512 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); 1843 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
1844
1845 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
1846 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
1847 if (old_prog)
1848 bpf_prog_put(old_prog);
1513 } 1849 }
1850 rtnl_unlock();
1514} 1851}
1515 1852
1516static void free_receive_page_frags(struct virtnet_info *vi) 1853static void free_receive_page_frags(struct virtnet_info *vi)
@@ -1521,6 +1858,16 @@ static void free_receive_page_frags(struct virtnet_info *vi)
1521 put_page(vi->rq[i].alloc_frag.page); 1858 put_page(vi->rq[i].alloc_frag.page);
1522} 1859}
1523 1860
1861static bool is_xdp_queue(struct virtnet_info *vi, int q)
1862{
1863 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1864 return false;
1865 else if (q < vi->curr_queue_pairs)
1866 return true;
1867 else
1868 return false;
1869}
1870
1524static void free_unused_bufs(struct virtnet_info *vi) 1871static void free_unused_bufs(struct virtnet_info *vi)
1525{ 1872{
1526 void *buf; 1873 void *buf;
@@ -1528,8 +1875,12 @@ static void free_unused_bufs(struct virtnet_info *vi)
1528 1875
1529 for (i = 0; i < vi->max_queue_pairs; i++) { 1876 for (i = 0; i < vi->max_queue_pairs; i++) {
1530 struct virtqueue *vq = vi->sq[i].vq; 1877 struct virtqueue *vq = vi->sq[i].vq;
1531 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) 1878 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1532 dev_kfree_skb(buf); 1879 if (!is_xdp_queue(vi, i))
1880 dev_kfree_skb(buf);
1881 else
1882 put_page(virt_to_head_page(buf));
1883 }
1533 } 1884 }
1534 1885
1535 for (i = 0; i < vi->max_queue_pairs; i++) { 1886 for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -1930,7 +2281,9 @@ static int virtnet_probe(struct virtio_device *vdev)
1930 goto free_unregister_netdev; 2281 goto free_unregister_netdev;
1931 } 2282 }
1932 2283
1933 virtnet_set_affinity(vi); 2284 rtnl_lock();
2285 virtnet_set_queues(vi, vi->curr_queue_pairs);
2286 rtnl_unlock();
1934 2287
1935 /* Assume link up if device can't report link status, 2288 /* Assume link up if device can't report link status,
1936 otherwise get link status from config. */ 2289 otherwise get link status from config. */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 3bca24651dc0..7532646c3b7b 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -366,6 +366,8 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
366 struct in6_addr *nexthop; 366 struct in6_addr *nexthop;
367 int ret; 367 int ret;
368 368
369 nf_reset(skb);
370
369 skb->protocol = htons(ETH_P_IPV6); 371 skb->protocol = htons(ETH_P_IPV6);
370 skb->dev = dev; 372 skb->dev = dev;
371 373
@@ -547,6 +549,8 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
547 u32 nexthop; 549 u32 nexthop;
548 int ret = -EINVAL; 550 int ret = -EINVAL;
549 551
552 nf_reset(skb);
553
550 /* Be paranoid, rather than too clever. */ 554 /* Be paranoid, rather than too clever. */
551 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { 555 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
552 struct sk_buff *skb2; 556 struct sk_buff *skb2;
@@ -849,8 +853,6 @@ static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
849{ 853{
850 struct net *net = dev_net(dev); 854 struct net *net = dev_net(dev);
851 855
852 nf_reset(skb);
853
854 if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0) 856 if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0)
855 skb = NULL; /* kfree_skb(skb) handled by nf code */ 857 skb = NULL; /* kfree_skb(skb) handled by nf code */
856 858
diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
index 5920c996fcdf..ff2e4a5654c7 100644
--- a/drivers/net/wan/lmc/lmc_media.c
+++ b/drivers/net/wan/lmc/lmc_media.c
@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
95static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32); 95static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
96 96
97lmc_media_t lmc_ds3_media = { 97lmc_media_t lmc_ds3_media = {
98 lmc_ds3_init, /* special media init stuff */ 98 .init = lmc_ds3_init, /* special media init stuff */
99 lmc_ds3_default, /* reset to default state */ 99 .defaults = lmc_ds3_default, /* reset to default state */
100 lmc_ds3_set_status, /* reset status to state provided */ 100 .set_status = lmc_ds3_set_status, /* reset status to state provided */
101 lmc_dummy_set_1, /* set clock source */ 101 .set_clock_source = lmc_dummy_set_1, /* set clock source */
102 lmc_dummy_set2_1, /* set line speed */ 102 .set_speed = lmc_dummy_set2_1, /* set line speed */
103 lmc_ds3_set_100ft, /* set cable length */ 103 .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
104 lmc_ds3_set_scram, /* set scrambler */ 104 .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
105 lmc_ds3_get_link_status, /* get link status */ 105 .get_link_status = lmc_ds3_get_link_status, /* get link status */
106 lmc_dummy_set_1, /* set link status */ 106 .set_link_status = lmc_dummy_set_1, /* set link status */
107 lmc_ds3_set_crc_length, /* set CRC length */ 107 .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
108 lmc_dummy_set_1, /* set T1 or E1 circuit type */ 108 .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
109 lmc_ds3_watchdog 109 .watchdog = lmc_ds3_watchdog
110}; 110};
111 111
112lmc_media_t lmc_hssi_media = { 112lmc_media_t lmc_hssi_media = {
113 lmc_hssi_init, /* special media init stuff */ 113 .init = lmc_hssi_init, /* special media init stuff */
114 lmc_hssi_default, /* reset to default state */ 114 .defaults = lmc_hssi_default, /* reset to default state */
115 lmc_hssi_set_status, /* reset status to state provided */ 115 .set_status = lmc_hssi_set_status, /* reset status to state provided */
116 lmc_hssi_set_clock, /* set clock source */ 116 .set_clock_source = lmc_hssi_set_clock, /* set clock source */
117 lmc_dummy_set2_1, /* set line speed */ 117 .set_speed = lmc_dummy_set2_1, /* set line speed */
118 lmc_dummy_set_1, /* set cable length */ 118 .set_cable_length = lmc_dummy_set_1, /* set cable length */
119 lmc_dummy_set_1, /* set scrambler */ 119 .set_scrambler = lmc_dummy_set_1, /* set scrambler */
120 lmc_hssi_get_link_status, /* get link status */ 120 .get_link_status = lmc_hssi_get_link_status, /* get link status */
121 lmc_hssi_set_link_status, /* set link status */ 121 .set_link_status = lmc_hssi_set_link_status, /* set link status */
122 lmc_hssi_set_crc_length, /* set CRC length */ 122 .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
123 lmc_dummy_set_1, /* set T1 or E1 circuit type */ 123 .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
124 lmc_hssi_watchdog 124 .watchdog = lmc_hssi_watchdog
125}; 125};
126 126
127lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */ 127lmc_media_t lmc_ssi_media = {
128 lmc_ssi_default, /* reset to default state */ 128 .init = lmc_ssi_init, /* special media init stuff */
129 lmc_ssi_set_status, /* reset status to state provided */ 129 .defaults = lmc_ssi_default, /* reset to default state */
130 lmc_ssi_set_clock, /* set clock source */ 130 .set_status = lmc_ssi_set_status, /* reset status to state provided */
131 lmc_ssi_set_speed, /* set line speed */ 131 .set_clock_source = lmc_ssi_set_clock, /* set clock source */
132 lmc_dummy_set_1, /* set cable length */ 132 .set_speed = lmc_ssi_set_speed, /* set line speed */
133 lmc_dummy_set_1, /* set scrambler */ 133 .set_cable_length = lmc_dummy_set_1, /* set cable length */
134 lmc_ssi_get_link_status, /* get link status */ 134 .set_scrambler = lmc_dummy_set_1, /* set scrambler */
135 lmc_ssi_set_link_status, /* set link status */ 135 .get_link_status = lmc_ssi_get_link_status, /* get link status */
136 lmc_ssi_set_crc_length, /* set CRC length */ 136 .set_link_status = lmc_ssi_set_link_status, /* set link status */
137 lmc_dummy_set_1, /* set T1 or E1 circuit type */ 137 .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
138 lmc_ssi_watchdog 138 .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
139 .watchdog = lmc_ssi_watchdog
139}; 140};
140 141
141lmc_media_t lmc_t1_media = { 142lmc_media_t lmc_t1_media = {
142 lmc_t1_init, /* special media init stuff */ 143 .init = lmc_t1_init, /* special media init stuff */
143 lmc_t1_default, /* reset to default state */ 144 .defaults = lmc_t1_default, /* reset to default state */
144 lmc_t1_set_status, /* reset status to state provided */ 145 .set_status = lmc_t1_set_status, /* reset status to state provided */
145 lmc_t1_set_clock, /* set clock source */ 146 .set_clock_source = lmc_t1_set_clock, /* set clock source */
146 lmc_dummy_set2_1, /* set line speed */ 147 .set_speed = lmc_dummy_set2_1, /* set line speed */
147 lmc_dummy_set_1, /* set cable length */ 148 .set_cable_length = lmc_dummy_set_1, /* set cable length */
148 lmc_dummy_set_1, /* set scrambler */ 149 .set_scrambler = lmc_dummy_set_1, /* set scrambler */
149 lmc_t1_get_link_status, /* get link status */ 150 .get_link_status = lmc_t1_get_link_status, /* get link status */
150 lmc_dummy_set_1, /* set link status */ 151 .set_link_status = lmc_dummy_set_1, /* set link status */
151 lmc_t1_set_crc_length, /* set CRC length */ 152 .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
152 lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */ 153 .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
153 lmc_t1_watchdog 154 .watchdog = lmc_t1_watchdog
154}; 155};
155 156
156static void 157static void