aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-10-13 15:55:20 -0400
committerDavid S. Miller <davem@davemloft.net>2009-10-13 15:55:20 -0400
commit421355de876b9f3fcc7e4cb6026e416fb12a5068 (patch)
treefb814456af86c75e1eb9e330994dda461fc5b4f7
parentaace495933a981274b6491d71b915165a61defdc (diff)
parent0fe7463a35aadfaf22d1ca58325ab3851b8d757c (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
-rw-r--r--MAINTAINERS8
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/acenic.c3
-rw-r--r--drivers/net/davinci_emac.c36
-rw-r--r--drivers/net/ethoc.c3
-rw-r--r--drivers/net/irda/sa1100_ir.c7
-rw-r--r--drivers/net/ixp2000/enp2611.c18
-rw-r--r--drivers/net/ixp2000/ixpdev.c11
-rw-r--r--drivers/net/netxen/netxen_nic_main.c3
-rw-r--r--drivers/net/pcmcia/3c574_cs.c13
-rw-r--r--drivers/net/r8169.c987
-rw-r--r--drivers/net/usb/pegasus.c13
-rw-r--r--drivers/net/usb/pegasus.h6
-rw-r--r--drivers/net/vmxnet3/Makefile35
-rw-r--r--drivers/net/vmxnet3/upt1_defs.h96
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h535
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2556
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c566
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h389
-rw-r--r--drivers/net/wan/hdlc_cisco.c18
-rw-r--r--drivers/net/wireless/b43/xmit.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c2
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c1
-rw-r--r--include/net/mac80211.h2
-rw-r--r--include/net/sock.h10
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv4/udp.c73
-rw-r--r--net/mac80211/ibss.c4
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/sched/act_pedit.c2
32 files changed, 5273 insertions, 141 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index c57c2e6c8d27..ca4131ea5f7b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3654,6 +3654,7 @@ NETWORKING [GENERAL]
3654M: "David S. Miller" <davem@davemloft.net> 3654M: "David S. Miller" <davem@davemloft.net>
3655L: netdev@vger.kernel.org 3655L: netdev@vger.kernel.org
3656W: http://www.linuxfoundation.org/en/Net 3656W: http://www.linuxfoundation.org/en/Net
3657W: http://patchwork.ozlabs.org/project/netdev/list/
3657T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git 3658T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
3658S: Maintained 3659S: Maintained
3659F: net/ 3660F: net/
@@ -5635,6 +5636,13 @@ S: Maintained
5635F: drivers/vlynq/vlynq.c 5636F: drivers/vlynq/vlynq.c
5636F: include/linux/vlynq.h 5637F: include/linux/vlynq.h
5637 5638
5639VMWARE VMXNET3 ETHERNET DRIVER
5640M: Shreyas Bhatewara <sbhatewara@vmware.com>
5641M: VMware, Inc. <pv-drivers@vmware.com>
5642L: netdev@vger.kernel.org
5643S: Maintained
5644F: drivers/net/vmxnet3/
5645
5638VOLTAGE AND CURRENT REGULATOR FRAMEWORK 5646VOLTAGE AND CURRENT REGULATOR FRAMEWORK
5639M: Liam Girdwood <lrg@slimlogic.co.uk> 5647M: Liam Girdwood <lrg@slimlogic.co.uk>
5640M: Mark Brown <broonie@opensource.wolfsonmicro.com> 5648M: Mark Brown <broonie@opensource.wolfsonmicro.com>
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index d9772af812c4..04fb8b0ca3e6 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -3230,4 +3230,12 @@ config VIRTIO_NET
3230 This is the virtual network driver for virtio. It can be used with 3230 This is the virtual network driver for virtio. It can be used with
3231 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. 3231 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
3232 3232
3233config VMXNET3
3234 tristate "VMware VMXNET3 ethernet driver"
3235 depends on PCI && X86
3236 help
3237 This driver supports VMware's vmxnet3 virtual ethernet NIC.
3238 To compile this driver as a module, choose M here: the
3239 module will be called vmxnet3.
3240
3233endif # NETDEVICES 3241endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 48d82e901aad..fc6c8bb92c50 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_TEHUTI) += tehuti.o
30obj-$(CONFIG_ENIC) += enic/ 30obj-$(CONFIG_ENIC) += enic/
31obj-$(CONFIG_JME) += jme.o 31obj-$(CONFIG_JME) += jme.o
32obj-$(CONFIG_BE2NET) += benet/ 32obj-$(CONFIG_BE2NET) += benet/
33obj-$(CONFIG_VMXNET3) += vmxnet3/
33 34
34gianfar_driver-objs := gianfar.o \ 35gianfar_driver-objs := gianfar.o \
35 gianfar_ethtool.o \ 36 gianfar_ethtool.o \
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 5f0b05c2d71f..d82a9a994753 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -1209,7 +1209,8 @@ static int __devinit ace_init(struct net_device *dev)
1209 memset(ap->info, 0, sizeof(struct ace_info)); 1209 memset(ap->info, 0, sizeof(struct ace_info));
1210 memset(ap->skb, 0, sizeof(struct ace_skb)); 1210 memset(ap->skb, 0, sizeof(struct ace_skb));
1211 1211
1212 if (ace_load_firmware(dev)) 1212 ecode = ace_load_firmware(dev);
1213 if (ecode)
1213 goto init_error; 1214 goto init_error;
1214 1215
1215 ap->fw_running = 0; 1216 ap->fw_running = 0;
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index a421ec046b3c..a876dce13b9e 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -330,6 +330,9 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
330#define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01) 330#define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01)
331#define EMAC_DM646X_MAC_EOI_C0_TXEN (0x02) 331#define EMAC_DM646X_MAC_EOI_C0_TXEN (0x02)
332 332
333/* EMAC Stats Clear Mask */
334#define EMAC_STATS_CLR_MASK (0xFFFFFFFF)
335
333/** net_buf_obj: EMAC network bufferdata structure 336/** net_buf_obj: EMAC network bufferdata structure
334 * 337 *
335 * EMAC network buffer data structure 338 * EMAC network buffer data structure
@@ -2544,40 +2547,49 @@ static int emac_dev_stop(struct net_device *ndev)
2544static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev) 2547static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
2545{ 2548{
2546 struct emac_priv *priv = netdev_priv(ndev); 2549 struct emac_priv *priv = netdev_priv(ndev);
2550 u32 mac_control;
2551 u32 stats_clear_mask;
2547 2552
2548 /* update emac hardware stats and reset the registers*/ 2553 /* update emac hardware stats and reset the registers*/
2549 2554
2555 mac_control = emac_read(EMAC_MACCONTROL);
2556
2557 if (mac_control & EMAC_MACCONTROL_GMIIEN)
2558 stats_clear_mask = EMAC_STATS_CLR_MASK;
2559 else
2560 stats_clear_mask = 0;
2561
2550 priv->net_dev_stats.multicast += emac_read(EMAC_RXMCASTFRAMES); 2562 priv->net_dev_stats.multicast += emac_read(EMAC_RXMCASTFRAMES);
2551 emac_write(EMAC_RXMCASTFRAMES, EMAC_ALL_MULTI_REG_VALUE); 2563 emac_write(EMAC_RXMCASTFRAMES, stats_clear_mask);
2552 2564
2553 priv->net_dev_stats.collisions += (emac_read(EMAC_TXCOLLISION) + 2565 priv->net_dev_stats.collisions += (emac_read(EMAC_TXCOLLISION) +
2554 emac_read(EMAC_TXSINGLECOLL) + 2566 emac_read(EMAC_TXSINGLECOLL) +
2555 emac_read(EMAC_TXMULTICOLL)); 2567 emac_read(EMAC_TXMULTICOLL));
2556 emac_write(EMAC_TXCOLLISION, EMAC_ALL_MULTI_REG_VALUE); 2568 emac_write(EMAC_TXCOLLISION, stats_clear_mask);
2557 emac_write(EMAC_TXSINGLECOLL, EMAC_ALL_MULTI_REG_VALUE); 2569 emac_write(EMAC_TXSINGLECOLL, stats_clear_mask);
2558 emac_write(EMAC_TXMULTICOLL, EMAC_ALL_MULTI_REG_VALUE); 2570 emac_write(EMAC_TXMULTICOLL, stats_clear_mask);
2559 2571
2560 priv->net_dev_stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) + 2572 priv->net_dev_stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) +
2561 emac_read(EMAC_RXJABBER) + 2573 emac_read(EMAC_RXJABBER) +
2562 emac_read(EMAC_RXUNDERSIZED)); 2574 emac_read(EMAC_RXUNDERSIZED));
2563 emac_write(EMAC_RXOVERSIZED, EMAC_ALL_MULTI_REG_VALUE); 2575 emac_write(EMAC_RXOVERSIZED, stats_clear_mask);
2564 emac_write(EMAC_RXJABBER, EMAC_ALL_MULTI_REG_VALUE); 2576 emac_write(EMAC_RXJABBER, stats_clear_mask);
2565 emac_write(EMAC_RXUNDERSIZED, EMAC_ALL_MULTI_REG_VALUE); 2577 emac_write(EMAC_RXUNDERSIZED, stats_clear_mask);
2566 2578
2567 priv->net_dev_stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) + 2579 priv->net_dev_stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) +
2568 emac_read(EMAC_RXMOFOVERRUNS)); 2580 emac_read(EMAC_RXMOFOVERRUNS));
2569 emac_write(EMAC_RXSOFOVERRUNS, EMAC_ALL_MULTI_REG_VALUE); 2581 emac_write(EMAC_RXSOFOVERRUNS, stats_clear_mask);
2570 emac_write(EMAC_RXMOFOVERRUNS, EMAC_ALL_MULTI_REG_VALUE); 2582 emac_write(EMAC_RXMOFOVERRUNS, stats_clear_mask);
2571 2583
2572 priv->net_dev_stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS); 2584 priv->net_dev_stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS);
2573 emac_write(EMAC_RXDMAOVERRUNS, EMAC_ALL_MULTI_REG_VALUE); 2585 emac_write(EMAC_RXDMAOVERRUNS, stats_clear_mask);
2574 2586
2575 priv->net_dev_stats.tx_carrier_errors += 2587 priv->net_dev_stats.tx_carrier_errors +=
2576 emac_read(EMAC_TXCARRIERSENSE); 2588 emac_read(EMAC_TXCARRIERSENSE);
2577 emac_write(EMAC_TXCARRIERSENSE, EMAC_ALL_MULTI_REG_VALUE); 2589 emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask);
2578 2590
2579 priv->net_dev_stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN); 2591 priv->net_dev_stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN);
2580 emac_write(EMAC_TXUNDERRUN, EMAC_ALL_MULTI_REG_VALUE); 2592 emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
2581 2593
2582 return &priv->net_dev_stats; 2594 return &priv->net_dev_stats;
2583} 2595}
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index 0c229a5fa82a..a25f8ed8109d 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -663,7 +663,8 @@ static int ethoc_open(struct net_device *dev)
663 return ret; 663 return ret;
664 664
665 /* calculate the number of TX/RX buffers, maximum 128 supported */ 665 /* calculate the number of TX/RX buffers, maximum 128 supported */
666 num_bd = min(128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ); 666 num_bd = min_t(unsigned int,
667 128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ);
667 priv->num_tx = max(min_tx, num_bd / 4); 668 priv->num_tx = max(min_tx, num_bd / 4);
668 priv->num_rx = num_bd - priv->num_tx; 669 priv->num_rx = num_bd - priv->num_tx;
669 ethoc_write(priv, TX_BD_NUM, priv->num_tx); 670 ethoc_write(priv, TX_BD_NUM, priv->num_tx);
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 38bf7cf2256d..c412e8026173 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -232,8 +232,11 @@ static int sa1100_irda_startup(struct sa1100_irda *si)
232 /* 232 /*
233 * Ensure that the ports for this device are setup correctly. 233 * Ensure that the ports for this device are setup correctly.
234 */ 234 */
235 if (si->pdata->startup) 235 if (si->pdata->startup) {
236 si->pdata->startup(si->dev); 236 ret = si->pdata->startup(si->dev);
237 if (ret)
238 return ret;
239 }
237 240
238 /* 241 /*
239 * Configure PPC for IRDA - we want to drive TXD2 low. 242 * Configure PPC for IRDA - we want to drive TXD2 low.
diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c
index b02a981c87a8..34a6cfd17930 100644
--- a/drivers/net/ixp2000/enp2611.c
+++ b/drivers/net/ixp2000/enp2611.c
@@ -119,24 +119,9 @@ static struct ixp2400_msf_parameters enp2611_msf_parameters =
119 } 119 }
120}; 120};
121 121
122struct enp2611_ixpdev_priv
123{
124 struct ixpdev_priv ixpdev_priv;
125 struct net_device_stats stats;
126};
127
128static struct net_device *nds[3]; 122static struct net_device *nds[3];
129static struct timer_list link_check_timer; 123static struct timer_list link_check_timer;
130 124
131static struct net_device_stats *enp2611_get_stats(struct net_device *dev)
132{
133 struct enp2611_ixpdev_priv *ip = netdev_priv(dev);
134
135 pm3386_get_stats(ip->ixpdev_priv.channel, &(ip->stats));
136
137 return &(ip->stats);
138}
139
140/* @@@ Poll the SFP moddef0 line too. */ 125/* @@@ Poll the SFP moddef0 line too. */
141/* @@@ Try to use the pm3386 DOOL interrupt as well. */ 126/* @@@ Try to use the pm3386 DOOL interrupt as well. */
142static void enp2611_check_link_status(unsigned long __dummy) 127static void enp2611_check_link_status(unsigned long __dummy)
@@ -203,14 +188,13 @@ static int __init enp2611_init_module(void)
203 188
204 ports = pm3386_port_count(); 189 ports = pm3386_port_count();
205 for (i = 0; i < ports; i++) { 190 for (i = 0; i < ports; i++) {
206 nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv)); 191 nds[i] = ixpdev_alloc(i, sizeof(struct ixpdev_priv));
207 if (nds[i] == NULL) { 192 if (nds[i] == NULL) {
208 while (--i >= 0) 193 while (--i >= 0)
209 free_netdev(nds[i]); 194 free_netdev(nds[i]);
210 return -ENOMEM; 195 return -ENOMEM;
211 } 196 }
212 197
213 nds[i]->get_stats = enp2611_get_stats;
214 pm3386_init_port(i); 198 pm3386_init_port(i);
215 pm3386_get_mac(i, nds[i]->dev_addr); 199 pm3386_get_mac(i, nds[i]->dev_addr);
216 } 200 }
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 6baf3c94b3e8..e9d9d595e1b7 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -21,6 +21,7 @@
21#include "ixp2400_tx.ucode" 21#include "ixp2400_tx.ucode"
22#include "ixpdev_priv.h" 22#include "ixpdev_priv.h"
23#include "ixpdev.h" 23#include "ixpdev.h"
24#include "pm3386.h"
24 25
25#define DRV_MODULE_VERSION "0.2" 26#define DRV_MODULE_VERSION "0.2"
26 27
@@ -270,6 +271,15 @@ static int ixpdev_close(struct net_device *dev)
270 return 0; 271 return 0;
271} 272}
272 273
274static struct net_device_stats *ixpdev_get_stats(struct net_device *dev)
275{
276 struct ixpdev_priv *ip = netdev_priv(dev);
277
278 pm3386_get_stats(ip->channel, &(dev->stats));
279
280 return &(dev->stats);
281}
282
273static const struct net_device_ops ixpdev_netdev_ops = { 283static const struct net_device_ops ixpdev_netdev_ops = {
274 .ndo_open = ixpdev_open, 284 .ndo_open = ixpdev_open,
275 .ndo_stop = ixpdev_close, 285 .ndo_stop = ixpdev_close,
@@ -277,6 +287,7 @@ static const struct net_device_ops ixpdev_netdev_ops = {
277 .ndo_change_mtu = eth_change_mtu, 287 .ndo_change_mtu = eth_change_mtu,
278 .ndo_validate_addr = eth_validate_addr, 288 .ndo_validate_addr = eth_validate_addr,
279 .ndo_set_mac_address = eth_mac_addr, 289 .ndo_set_mac_address = eth_mac_addr,
290 .ndo_get_stats = ixpdev_get_stats,
280#ifdef CONFIG_NET_POLL_CONTROLLER 291#ifdef CONFIG_NET_POLL_CONTROLLER
281 .ndo_poll_controller = ixpdev_poll_controller, 292 .ndo_poll_controller = ixpdev_poll_controller,
282#endif 293#endif
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index ecbadc5e2899..5bc8520e3e15 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -597,7 +597,8 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
597 void __iomem *mem_ptr2 = NULL; 597 void __iomem *mem_ptr2 = NULL;
598 void __iomem *db_ptr = NULL; 598 void __iomem *db_ptr = NULL;
599 599
600 unsigned long mem_base, mem_len, db_base, db_len = 0, pci_len0 = 0; 600 resource_size_t mem_base, db_base;
601 unsigned long mem_len, db_len = 0, pci_len0 = 0;
601 602
602 struct pci_dev *pdev = adapter->pdev; 603 struct pci_dev *pdev = adapter->pdev;
603 int pci_func = adapter->ahw.pci_func; 604 int pci_func = adapter->ahw.pci_func;
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index ee8ad3e180dd..b58965a2b3ae 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -251,6 +251,7 @@ static void el3_tx_timeout(struct net_device *dev);
251static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 251static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
252static const struct ethtool_ops netdev_ethtool_ops; 252static const struct ethtool_ops netdev_ethtool_ops;
253static void set_rx_mode(struct net_device *dev); 253static void set_rx_mode(struct net_device *dev);
254static void set_multicast_list(struct net_device *dev);
254 255
255static void tc574_detach(struct pcmcia_device *p_dev); 256static void tc574_detach(struct pcmcia_device *p_dev);
256 257
@@ -266,7 +267,7 @@ static const struct net_device_ops el3_netdev_ops = {
266 .ndo_tx_timeout = el3_tx_timeout, 267 .ndo_tx_timeout = el3_tx_timeout,
267 .ndo_get_stats = el3_get_stats, 268 .ndo_get_stats = el3_get_stats,
268 .ndo_do_ioctl = el3_ioctl, 269 .ndo_do_ioctl = el3_ioctl,
269 .ndo_set_multicast_list = set_rx_mode, 270 .ndo_set_multicast_list = set_multicast_list,
270 .ndo_change_mtu = eth_change_mtu, 271 .ndo_change_mtu = eth_change_mtu,
271 .ndo_set_mac_address = eth_mac_addr, 272 .ndo_set_mac_address = eth_mac_addr,
272 .ndo_validate_addr = eth_validate_addr, 273 .ndo_validate_addr = eth_validate_addr,
@@ -1161,6 +1162,16 @@ static void set_rx_mode(struct net_device *dev)
1161 outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); 1162 outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
1162} 1163}
1163 1164
1165static void set_multicast_list(struct net_device *dev)
1166{
1167 struct el3_private *lp = netdev_priv(dev);
1168 unsigned long flags;
1169
1170 spin_lock_irqsave(&lp->window_lock, flags);
1171 set_rx_mode(dev);
1172 spin_unlock_irqrestore(&lp->window_lock, flags);
1173}
1174
1164static int el3_close(struct net_device *dev) 1175static int el3_close(struct net_device *dev)
1165{ 1176{
1166 unsigned int ioaddr = dev->base_addr; 1177 unsigned int ioaddr = dev->base_addr;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 97b170448ce6..7d9fc06ceb98 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -115,7 +115,9 @@ enum mac_version {
115 RTL_GIGA_MAC_VER_22 = 0x16, // 8168C 115 RTL_GIGA_MAC_VER_22 = 0x16, // 8168C
116 RTL_GIGA_MAC_VER_23 = 0x17, // 8168CP 116 RTL_GIGA_MAC_VER_23 = 0x17, // 8168CP
117 RTL_GIGA_MAC_VER_24 = 0x18, // 8168CP 117 RTL_GIGA_MAC_VER_24 = 0x18, // 8168CP
118 RTL_GIGA_MAC_VER_25 = 0x19 // 8168D 118 RTL_GIGA_MAC_VER_25 = 0x19, // 8168D
119 RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D
120 RTL_GIGA_MAC_VER_27 = 0x1b // 8168DP
119}; 121};
120 122
121#define _R(NAME,MAC,MASK) \ 123#define _R(NAME,MAC,MASK) \
@@ -150,7 +152,9 @@ static const struct {
150 _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_22, 0xff7e1880), // PCI-E 152 _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_22, 0xff7e1880), // PCI-E
151 _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_23, 0xff7e1880), // PCI-E 153 _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_23, 0xff7e1880), // PCI-E
152 _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_24, 0xff7e1880), // PCI-E 154 _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_24, 0xff7e1880), // PCI-E
153 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880) // PCI-E 155 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E
156 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E
157 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880) // PCI-E
154}; 158};
155#undef _R 159#undef _R
156 160
@@ -253,6 +257,13 @@ enum rtl8168_8101_registers {
253 DBG_REG = 0xd1, 257 DBG_REG = 0xd1,
254#define FIX_NAK_1 (1 << 4) 258#define FIX_NAK_1 (1 << 4)
255#define FIX_NAK_2 (1 << 3) 259#define FIX_NAK_2 (1 << 3)
260 EFUSEAR = 0xdc,
261#define EFUSEAR_FLAG 0x80000000
262#define EFUSEAR_WRITE_CMD 0x80000000
263#define EFUSEAR_READ_CMD 0x00000000
264#define EFUSEAR_REG_MASK 0x03ff
265#define EFUSEAR_REG_SHIFT 8
266#define EFUSEAR_DATA_MASK 0xff
256}; 267};
257 268
258enum rtl_register_content { 269enum rtl_register_content {
@@ -568,6 +579,14 @@ static void mdio_patch(void __iomem *ioaddr, int reg_addr, int value)
568 mdio_write(ioaddr, reg_addr, mdio_read(ioaddr, reg_addr) | value); 579 mdio_write(ioaddr, reg_addr, mdio_read(ioaddr, reg_addr) | value);
569} 580}
570 581
582static void mdio_plus_minus(void __iomem *ioaddr, int reg_addr, int p, int m)
583{
584 int val;
585
586 val = mdio_read(ioaddr, reg_addr);
587 mdio_write(ioaddr, reg_addr, (val | p) & ~m);
588}
589
571static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, 590static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
572 int val) 591 int val)
573{ 592{
@@ -651,6 +670,24 @@ static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
651 return value; 670 return value;
652} 671}
653 672
673static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
674{
675 u8 value = 0xff;
676 unsigned int i;
677
678 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
679
680 for (i = 0; i < 300; i++) {
681 if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) {
682 value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK;
683 break;
684 }
685 udelay(100);
686 }
687
688 return value;
689}
690
654static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) 691static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
655{ 692{
656 RTL_W16(IntrMask, 0x0000); 693 RTL_W16(IntrMask, 0x0000);
@@ -1243,7 +1280,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1243 int mac_version; 1280 int mac_version;
1244 } mac_info[] = { 1281 } mac_info[] = {
1245 /* 8168D family. */ 1282 /* 8168D family. */
1246 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_25 }, 1283 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
1284 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
1285 { 0x7c800000, 0x28800000, RTL_GIGA_MAC_VER_27 },
1286 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
1247 1287
1248 /* 8168C family. */ 1288 /* 8168C family. */
1249 { 0x7cf00000, 0x3ca00000, RTL_GIGA_MAC_VER_24 }, 1289 { 0x7cf00000, 0x3ca00000, RTL_GIGA_MAC_VER_24 },
@@ -1648,74 +1688,903 @@ static void rtl8168c_4_hw_phy_config(void __iomem *ioaddr)
1648 rtl8168c_3_hw_phy_config(ioaddr); 1688 rtl8168c_3_hw_phy_config(ioaddr);
1649} 1689}
1650 1690
1651static void rtl8168d_hw_phy_config(void __iomem *ioaddr) 1691static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
1652{ 1692{
1653 struct phy_reg phy_reg_init_0[] = { 1693 static struct phy_reg phy_reg_init_0[] = {
1654 { 0x1f, 0x0001 }, 1694 { 0x1f, 0x0001 },
1655 { 0x09, 0x2770 }, 1695 { 0x06, 0x4064 },
1656 { 0x08, 0x04d0 }, 1696 { 0x07, 0x2863 },
1657 { 0x0b, 0xad15 }, 1697 { 0x08, 0x059c },
1658 { 0x0c, 0x5bf0 }, 1698 { 0x09, 0x26b4 },
1659 { 0x1c, 0xf101 }, 1699 { 0x0a, 0x6a19 },
1700 { 0x0b, 0xdcc8 },
1701 { 0x10, 0xf06d },
1702 { 0x14, 0x7f68 },
1703 { 0x18, 0x7fd9 },
1704 { 0x1c, 0xf0ff },
1705 { 0x1d, 0x3d9c },
1660 { 0x1f, 0x0003 }, 1706 { 0x1f, 0x0003 },
1661 { 0x14, 0x94d7 }, 1707 { 0x12, 0xf49f },
1662 { 0x12, 0xf4d6 }, 1708 { 0x13, 0x070b },
1663 { 0x09, 0xca0f }, 1709 { 0x1a, 0x05ad },
1664 { 0x1f, 0x0002 }, 1710 { 0x14, 0x94c0 }
1665 { 0x0b, 0x0b10 }, 1711 };
1666 { 0x0c, 0xd1f7 }, 1712 static struct phy_reg phy_reg_init_1[] = {
1667 { 0x1f, 0x0002 },
1668 { 0x06, 0x5461 },
1669 { 0x1f, 0x0002 }, 1713 { 0x1f, 0x0002 },
1670 { 0x05, 0x6662 }, 1714 { 0x06, 0x5561 },
1715 { 0x1f, 0x0005 },
1716 { 0x05, 0x8332 },
1717 { 0x06, 0x5561 }
1718 };
1719 static struct phy_reg phy_reg_init_2[] = {
1720 { 0x1f, 0x0005 },
1721 { 0x05, 0xffc2 },
1722 { 0x1f, 0x0005 },
1723 { 0x05, 0x8000 },
1724 { 0x06, 0xf8f9 },
1725 { 0x06, 0xfaef },
1726 { 0x06, 0x59ee },
1727 { 0x06, 0xf8ea },
1728 { 0x06, 0x00ee },
1729 { 0x06, 0xf8eb },
1730 { 0x06, 0x00e0 },
1731 { 0x06, 0xf87c },
1732 { 0x06, 0xe1f8 },
1733 { 0x06, 0x7d59 },
1734 { 0x06, 0x0fef },
1735 { 0x06, 0x0139 },
1736 { 0x06, 0x029e },
1737 { 0x06, 0x06ef },
1738 { 0x06, 0x1039 },
1739 { 0x06, 0x089f },
1740 { 0x06, 0x2aee },
1741 { 0x06, 0xf8ea },
1742 { 0x06, 0x00ee },
1743 { 0x06, 0xf8eb },
1744 { 0x06, 0x01e0 },
1745 { 0x06, 0xf87c },
1746 { 0x06, 0xe1f8 },
1747 { 0x06, 0x7d58 },
1748 { 0x06, 0x409e },
1749 { 0x06, 0x0f39 },
1750 { 0x06, 0x46aa },
1751 { 0x06, 0x0bbf },
1752 { 0x06, 0x8290 },
1753 { 0x06, 0xd682 },
1754 { 0x06, 0x9802 },
1755 { 0x06, 0x014f },
1756 { 0x06, 0xae09 },
1757 { 0x06, 0xbf82 },
1758 { 0x06, 0x98d6 },
1759 { 0x06, 0x82a0 },
1760 { 0x06, 0x0201 },
1761 { 0x06, 0x4fef },
1762 { 0x06, 0x95fe },
1763 { 0x06, 0xfdfc },
1764 { 0x06, 0x05f8 },
1765 { 0x06, 0xf9fa },
1766 { 0x06, 0xeef8 },
1767 { 0x06, 0xea00 },
1768 { 0x06, 0xeef8 },
1769 { 0x06, 0xeb00 },
1770 { 0x06, 0xe2f8 },
1771 { 0x06, 0x7ce3 },
1772 { 0x06, 0xf87d },
1773 { 0x06, 0xa511 },
1774 { 0x06, 0x1112 },
1775 { 0x06, 0xd240 },
1776 { 0x06, 0xd644 },
1777 { 0x06, 0x4402 },
1778 { 0x06, 0x8217 },
1779 { 0x06, 0xd2a0 },
1780 { 0x06, 0xd6aa },
1781 { 0x06, 0xaa02 },
1782 { 0x06, 0x8217 },
1783 { 0x06, 0xae0f },
1784 { 0x06, 0xa544 },
1785 { 0x06, 0x4402 },
1786 { 0x06, 0xae4d },
1787 { 0x06, 0xa5aa },
1788 { 0x06, 0xaa02 },
1789 { 0x06, 0xae47 },
1790 { 0x06, 0xaf82 },
1791 { 0x06, 0x13ee },
1792 { 0x06, 0x834e },
1793 { 0x06, 0x00ee },
1794 { 0x06, 0x834d },
1795 { 0x06, 0x0fee },
1796 { 0x06, 0x834c },
1797 { 0x06, 0x0fee },
1798 { 0x06, 0x834f },
1799 { 0x06, 0x00ee },
1800 { 0x06, 0x8351 },
1801 { 0x06, 0x00ee },
1802 { 0x06, 0x834a },
1803 { 0x06, 0xffee },
1804 { 0x06, 0x834b },
1805 { 0x06, 0xffe0 },
1806 { 0x06, 0x8330 },
1807 { 0x06, 0xe183 },
1808 { 0x06, 0x3158 },
1809 { 0x06, 0xfee4 },
1810 { 0x06, 0xf88a },
1811 { 0x06, 0xe5f8 },
1812 { 0x06, 0x8be0 },
1813 { 0x06, 0x8332 },
1814 { 0x06, 0xe183 },
1815 { 0x06, 0x3359 },
1816 { 0x06, 0x0fe2 },
1817 { 0x06, 0x834d },
1818 { 0x06, 0x0c24 },
1819 { 0x06, 0x5af0 },
1820 { 0x06, 0x1e12 },
1821 { 0x06, 0xe4f8 },
1822 { 0x06, 0x8ce5 },
1823 { 0x06, 0xf88d },
1824 { 0x06, 0xaf82 },
1825 { 0x06, 0x13e0 },
1826 { 0x06, 0x834f },
1827 { 0x06, 0x10e4 },
1828 { 0x06, 0x834f },
1829 { 0x06, 0xe083 },
1830 { 0x06, 0x4e78 },
1831 { 0x06, 0x009f },
1832 { 0x06, 0x0ae0 },
1833 { 0x06, 0x834f },
1834 { 0x06, 0xa010 },
1835 { 0x06, 0xa5ee },
1836 { 0x06, 0x834e },
1837 { 0x06, 0x01e0 },
1838 { 0x06, 0x834e },
1839 { 0x06, 0x7805 },
1840 { 0x06, 0x9e9a },
1841 { 0x06, 0xe083 },
1842 { 0x06, 0x4e78 },
1843 { 0x06, 0x049e },
1844 { 0x06, 0x10e0 },
1845 { 0x06, 0x834e },
1846 { 0x06, 0x7803 },
1847 { 0x06, 0x9e0f },
1848 { 0x06, 0xe083 },
1849 { 0x06, 0x4e78 },
1850 { 0x06, 0x019e },
1851 { 0x06, 0x05ae },
1852 { 0x06, 0x0caf },
1853 { 0x06, 0x81f8 },
1854 { 0x06, 0xaf81 },
1855 { 0x06, 0xa3af },
1856 { 0x06, 0x81dc },
1857 { 0x06, 0xaf82 },
1858 { 0x06, 0x13ee },
1859 { 0x06, 0x8348 },
1860 { 0x06, 0x00ee },
1861 { 0x06, 0x8349 },
1862 { 0x06, 0x00e0 },
1863 { 0x06, 0x8351 },
1864 { 0x06, 0x10e4 },
1865 { 0x06, 0x8351 },
1866 { 0x06, 0x5801 },
1867 { 0x06, 0x9fea },
1868 { 0x06, 0xd000 },
1869 { 0x06, 0xd180 },
1870 { 0x06, 0x1f66 },
1871 { 0x06, 0xe2f8 },
1872 { 0x06, 0xeae3 },
1873 { 0x06, 0xf8eb },
1874 { 0x06, 0x5af8 },
1875 { 0x06, 0x1e20 },
1876 { 0x06, 0xe6f8 },
1877 { 0x06, 0xeae5 },
1878 { 0x06, 0xf8eb },
1879 { 0x06, 0xd302 },
1880 { 0x06, 0xb3fe },
1881 { 0x06, 0xe2f8 },
1882 { 0x06, 0x7cef },
1883 { 0x06, 0x325b },
1884 { 0x06, 0x80e3 },
1885 { 0x06, 0xf87d },
1886 { 0x06, 0x9e03 },
1887 { 0x06, 0x7dff },
1888 { 0x06, 0xff0d },
1889 { 0x06, 0x581c },
1890 { 0x06, 0x551a },
1891 { 0x06, 0x6511 },
1892 { 0x06, 0xa190 },
1893 { 0x06, 0xd3e2 },
1894 { 0x06, 0x8348 },
1895 { 0x06, 0xe383 },
1896 { 0x06, 0x491b },
1897 { 0x06, 0x56ab },
1898 { 0x06, 0x08ef },
1899 { 0x06, 0x56e6 },
1900 { 0x06, 0x8348 },
1901 { 0x06, 0xe783 },
1902 { 0x06, 0x4910 },
1903 { 0x06, 0xd180 },
1904 { 0x06, 0x1f66 },
1905 { 0x06, 0xa004 },
1906 { 0x06, 0xb9e2 },
1907 { 0x06, 0x8348 },
1908 { 0x06, 0xe383 },
1909 { 0x06, 0x49ef },
1910 { 0x06, 0x65e2 },
1911 { 0x06, 0x834a },
1912 { 0x06, 0xe383 },
1913 { 0x06, 0x4b1b },
1914 { 0x06, 0x56aa },
1915 { 0x06, 0x0eef },
1916 { 0x06, 0x56e6 },
1917 { 0x06, 0x834a },
1918 { 0x06, 0xe783 },
1919 { 0x06, 0x4be2 },
1920 { 0x06, 0x834d },
1921 { 0x06, 0xe683 },
1922 { 0x06, 0x4ce0 },
1923 { 0x06, 0x834d },
1924 { 0x06, 0xa000 },
1925 { 0x06, 0x0caf },
1926 { 0x06, 0x81dc },
1927 { 0x06, 0xe083 },
1928 { 0x06, 0x4d10 },
1929 { 0x06, 0xe483 },
1930 { 0x06, 0x4dae },
1931 { 0x06, 0x0480 },
1932 { 0x06, 0xe483 },
1933 { 0x06, 0x4de0 },
1934 { 0x06, 0x834e },
1935 { 0x06, 0x7803 },
1936 { 0x06, 0x9e0b },
1937 { 0x06, 0xe083 },
1938 { 0x06, 0x4e78 },
1939 { 0x06, 0x049e },
1940 { 0x06, 0x04ee },
1941 { 0x06, 0x834e },
1942 { 0x06, 0x02e0 },
1943 { 0x06, 0x8332 },
1944 { 0x06, 0xe183 },
1945 { 0x06, 0x3359 },
1946 { 0x06, 0x0fe2 },
1947 { 0x06, 0x834d },
1948 { 0x06, 0x0c24 },
1949 { 0x06, 0x5af0 },
1950 { 0x06, 0x1e12 },
1951 { 0x06, 0xe4f8 },
1952 { 0x06, 0x8ce5 },
1953 { 0x06, 0xf88d },
1954 { 0x06, 0xe083 },
1955 { 0x06, 0x30e1 },
1956 { 0x06, 0x8331 },
1957 { 0x06, 0x6801 },
1958 { 0x06, 0xe4f8 },
1959 { 0x06, 0x8ae5 },
1960 { 0x06, 0xf88b },
1961 { 0x06, 0xae37 },
1962 { 0x06, 0xee83 },
1963 { 0x06, 0x4e03 },
1964 { 0x06, 0xe083 },
1965 { 0x06, 0x4ce1 },
1966 { 0x06, 0x834d },
1967 { 0x06, 0x1b01 },
1968 { 0x06, 0x9e04 },
1969 { 0x06, 0xaaa1 },
1970 { 0x06, 0xaea8 },
1971 { 0x06, 0xee83 },
1972 { 0x06, 0x4e04 },
1973 { 0x06, 0xee83 },
1974 { 0x06, 0x4f00 },
1975 { 0x06, 0xaeab },
1976 { 0x06, 0xe083 },
1977 { 0x06, 0x4f78 },
1978 { 0x06, 0x039f },
1979 { 0x06, 0x14ee },
1980 { 0x06, 0x834e },
1981 { 0x06, 0x05d2 },
1982 { 0x06, 0x40d6 },
1983 { 0x06, 0x5554 },
1984 { 0x06, 0x0282 },
1985 { 0x06, 0x17d2 },
1986 { 0x06, 0xa0d6 },
1987 { 0x06, 0xba00 },
1988 { 0x06, 0x0282 },
1989 { 0x06, 0x17fe },
1990 { 0x06, 0xfdfc },
1991 { 0x06, 0x05f8 },
1992 { 0x06, 0xe0f8 },
1993 { 0x06, 0x60e1 },
1994 { 0x06, 0xf861 },
1995 { 0x06, 0x6802 },
1996 { 0x06, 0xe4f8 },
1997 { 0x06, 0x60e5 },
1998 { 0x06, 0xf861 },
1999 { 0x06, 0xe0f8 },
2000 { 0x06, 0x48e1 },
2001 { 0x06, 0xf849 },
2002 { 0x06, 0x580f },
2003 { 0x06, 0x1e02 },
2004 { 0x06, 0xe4f8 },
2005 { 0x06, 0x48e5 },
2006 { 0x06, 0xf849 },
2007 { 0x06, 0xd000 },
2008 { 0x06, 0x0282 },
2009 { 0x06, 0x5bbf },
2010 { 0x06, 0x8350 },
2011 { 0x06, 0xef46 },
2012 { 0x06, 0xdc19 },
2013 { 0x06, 0xddd0 },
2014 { 0x06, 0x0102 },
2015 { 0x06, 0x825b },
2016 { 0x06, 0x0282 },
2017 { 0x06, 0x77e0 },
2018 { 0x06, 0xf860 },
2019 { 0x06, 0xe1f8 },
2020 { 0x06, 0x6158 },
2021 { 0x06, 0xfde4 },
2022 { 0x06, 0xf860 },
2023 { 0x06, 0xe5f8 },
2024 { 0x06, 0x61fc },
2025 { 0x06, 0x04f9 },
2026 { 0x06, 0xfafb },
2027 { 0x06, 0xc6bf },
2028 { 0x06, 0xf840 },
2029 { 0x06, 0xbe83 },
2030 { 0x06, 0x50a0 },
2031 { 0x06, 0x0101 },
2032 { 0x06, 0x071b },
2033 { 0x06, 0x89cf },
2034 { 0x06, 0xd208 },
2035 { 0x06, 0xebdb },
2036 { 0x06, 0x19b2 },
2037 { 0x06, 0xfbff },
2038 { 0x06, 0xfefd },
2039 { 0x06, 0x04f8 },
2040 { 0x06, 0xe0f8 },
2041 { 0x06, 0x48e1 },
2042 { 0x06, 0xf849 },
2043 { 0x06, 0x6808 },
2044 { 0x06, 0xe4f8 },
2045 { 0x06, 0x48e5 },
2046 { 0x06, 0xf849 },
2047 { 0x06, 0x58f7 },
2048 { 0x06, 0xe4f8 },
2049 { 0x06, 0x48e5 },
2050 { 0x06, 0xf849 },
2051 { 0x06, 0xfc04 },
2052 { 0x06, 0x4d20 },
2053 { 0x06, 0x0002 },
2054 { 0x06, 0x4e22 },
2055 { 0x06, 0x0002 },
2056 { 0x06, 0x4ddf },
2057 { 0x06, 0xff01 },
2058 { 0x06, 0x4edd },
2059 { 0x06, 0xff01 },
2060 { 0x05, 0x83d4 },
2061 { 0x06, 0x8000 },
2062 { 0x05, 0x83d8 },
2063 { 0x06, 0x8051 },
2064 { 0x02, 0x6010 },
2065 { 0x03, 0xdc00 },
2066 { 0x05, 0xfff6 },
2067 { 0x06, 0x00fc },
1671 { 0x1f, 0x0000 }, 2068 { 0x1f, 0x0000 },
1672 { 0x14, 0x0060 }, 2069
1673 { 0x1f, 0x0000 }, 2070 { 0x1f, 0x0000 },
1674 { 0x0d, 0xf8a0 }, 2071 { 0x0d, 0xf880 },
2072 { 0x1f, 0x0000 }
2073 };
2074
2075 rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2076
2077 mdio_write(ioaddr, 0x1f, 0x0002);
2078 mdio_plus_minus(ioaddr, 0x0b, 0x0010, 0x00ef);
2079 mdio_plus_minus(ioaddr, 0x0c, 0xa200, 0x5d00);
2080
2081 rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1));
2082
2083 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
2084 struct phy_reg phy_reg_init[] = {
2085 { 0x1f, 0x0002 },
2086 { 0x05, 0x669a },
2087 { 0x1f, 0x0005 },
2088 { 0x05, 0x8330 },
2089 { 0x06, 0x669a },
2090 { 0x1f, 0x0002 }
2091 };
2092 int val;
2093
2094 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2095
2096 val = mdio_read(ioaddr, 0x0d);
2097
2098 if ((val & 0x00ff) != 0x006c) {
2099 u32 set[] = {
2100 0x0065, 0x0066, 0x0067, 0x0068,
2101 0x0069, 0x006a, 0x006b, 0x006c
2102 };
2103 int i;
2104
2105 mdio_write(ioaddr, 0x1f, 0x0002);
2106
2107 val &= 0xff00;
2108 for (i = 0; i < ARRAY_SIZE(set); i++)
2109 mdio_write(ioaddr, 0x0d, val | set[i]);
2110 }
2111 } else {
2112 struct phy_reg phy_reg_init[] = {
2113 { 0x1f, 0x0002 },
2114 { 0x05, 0x6662 },
2115 { 0x1f, 0x0005 },
2116 { 0x05, 0x8330 },
2117 { 0x06, 0x6662 }
2118 };
2119
2120 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2121 }
2122
2123 mdio_write(ioaddr, 0x1f, 0x0002);
2124 mdio_patch(ioaddr, 0x0d, 0x0300);
2125 mdio_patch(ioaddr, 0x0f, 0x0010);
2126
2127 mdio_write(ioaddr, 0x1f, 0x0002);
2128 mdio_plus_minus(ioaddr, 0x02, 0x0100, 0x0600);
2129 mdio_plus_minus(ioaddr, 0x03, 0x0000, 0xe000);
2130
2131 rtl_phy_write(ioaddr, phy_reg_init_2, ARRAY_SIZE(phy_reg_init_2));
2132}
2133
2134static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
2135{
2136 static struct phy_reg phy_reg_init_0[] = {
2137 { 0x1f, 0x0001 },
2138 { 0x06, 0x4064 },
2139 { 0x07, 0x2863 },
2140 { 0x08, 0x059c },
2141 { 0x09, 0x26b4 },
2142 { 0x0a, 0x6a19 },
2143 { 0x0b, 0xdcc8 },
2144 { 0x10, 0xf06d },
2145 { 0x14, 0x7f68 },
2146 { 0x18, 0x7fd9 },
2147 { 0x1c, 0xf0ff },
2148 { 0x1d, 0x3d9c },
2149 { 0x1f, 0x0003 },
2150 { 0x12, 0xf49f },
2151 { 0x13, 0x070b },
2152 { 0x1a, 0x05ad },
2153 { 0x14, 0x94c0 },
2154
2155 { 0x1f, 0x0002 },
2156 { 0x06, 0x5561 },
2157 { 0x1f, 0x0005 },
2158 { 0x05, 0x8332 },
2159 { 0x06, 0x5561 }
2160 };
2161 static struct phy_reg phy_reg_init_1[] = {
2162 { 0x1f, 0x0005 },
2163 { 0x05, 0xffc2 },
1675 { 0x1f, 0x0005 }, 2164 { 0x1f, 0x0005 },
1676 { 0x05, 0xffc2 } 2165 { 0x05, 0x8000 },
2166 { 0x06, 0xf8f9 },
2167 { 0x06, 0xfaee },
2168 { 0x06, 0xf8ea },
2169 { 0x06, 0x00ee },
2170 { 0x06, 0xf8eb },
2171 { 0x06, 0x00e2 },
2172 { 0x06, 0xf87c },
2173 { 0x06, 0xe3f8 },
2174 { 0x06, 0x7da5 },
2175 { 0x06, 0x1111 },
2176 { 0x06, 0x12d2 },
2177 { 0x06, 0x40d6 },
2178 { 0x06, 0x4444 },
2179 { 0x06, 0x0281 },
2180 { 0x06, 0xc6d2 },
2181 { 0x06, 0xa0d6 },
2182 { 0x06, 0xaaaa },
2183 { 0x06, 0x0281 },
2184 { 0x06, 0xc6ae },
2185 { 0x06, 0x0fa5 },
2186 { 0x06, 0x4444 },
2187 { 0x06, 0x02ae },
2188 { 0x06, 0x4da5 },
2189 { 0x06, 0xaaaa },
2190 { 0x06, 0x02ae },
2191 { 0x06, 0x47af },
2192 { 0x06, 0x81c2 },
2193 { 0x06, 0xee83 },
2194 { 0x06, 0x4e00 },
2195 { 0x06, 0xee83 },
2196 { 0x06, 0x4d0f },
2197 { 0x06, 0xee83 },
2198 { 0x06, 0x4c0f },
2199 { 0x06, 0xee83 },
2200 { 0x06, 0x4f00 },
2201 { 0x06, 0xee83 },
2202 { 0x06, 0x5100 },
2203 { 0x06, 0xee83 },
2204 { 0x06, 0x4aff },
2205 { 0x06, 0xee83 },
2206 { 0x06, 0x4bff },
2207 { 0x06, 0xe083 },
2208 { 0x06, 0x30e1 },
2209 { 0x06, 0x8331 },
2210 { 0x06, 0x58fe },
2211 { 0x06, 0xe4f8 },
2212 { 0x06, 0x8ae5 },
2213 { 0x06, 0xf88b },
2214 { 0x06, 0xe083 },
2215 { 0x06, 0x32e1 },
2216 { 0x06, 0x8333 },
2217 { 0x06, 0x590f },
2218 { 0x06, 0xe283 },
2219 { 0x06, 0x4d0c },
2220 { 0x06, 0x245a },
2221 { 0x06, 0xf01e },
2222 { 0x06, 0x12e4 },
2223 { 0x06, 0xf88c },
2224 { 0x06, 0xe5f8 },
2225 { 0x06, 0x8daf },
2226 { 0x06, 0x81c2 },
2227 { 0x06, 0xe083 },
2228 { 0x06, 0x4f10 },
2229 { 0x06, 0xe483 },
2230 { 0x06, 0x4fe0 },
2231 { 0x06, 0x834e },
2232 { 0x06, 0x7800 },
2233 { 0x06, 0x9f0a },
2234 { 0x06, 0xe083 },
2235 { 0x06, 0x4fa0 },
2236 { 0x06, 0x10a5 },
2237 { 0x06, 0xee83 },
2238 { 0x06, 0x4e01 },
2239 { 0x06, 0xe083 },
2240 { 0x06, 0x4e78 },
2241 { 0x06, 0x059e },
2242 { 0x06, 0x9ae0 },
2243 { 0x06, 0x834e },
2244 { 0x06, 0x7804 },
2245 { 0x06, 0x9e10 },
2246 { 0x06, 0xe083 },
2247 { 0x06, 0x4e78 },
2248 { 0x06, 0x039e },
2249 { 0x06, 0x0fe0 },
2250 { 0x06, 0x834e },
2251 { 0x06, 0x7801 },
2252 { 0x06, 0x9e05 },
2253 { 0x06, 0xae0c },
2254 { 0x06, 0xaf81 },
2255 { 0x06, 0xa7af },
2256 { 0x06, 0x8152 },
2257 { 0x06, 0xaf81 },
2258 { 0x06, 0x8baf },
2259 { 0x06, 0x81c2 },
2260 { 0x06, 0xee83 },
2261 { 0x06, 0x4800 },
2262 { 0x06, 0xee83 },
2263 { 0x06, 0x4900 },
2264 { 0x06, 0xe083 },
2265 { 0x06, 0x5110 },
2266 { 0x06, 0xe483 },
2267 { 0x06, 0x5158 },
2268 { 0x06, 0x019f },
2269 { 0x06, 0xead0 },
2270 { 0x06, 0x00d1 },
2271 { 0x06, 0x801f },
2272 { 0x06, 0x66e2 },
2273 { 0x06, 0xf8ea },
2274 { 0x06, 0xe3f8 },
2275 { 0x06, 0xeb5a },
2276 { 0x06, 0xf81e },
2277 { 0x06, 0x20e6 },
2278 { 0x06, 0xf8ea },
2279 { 0x06, 0xe5f8 },
2280 { 0x06, 0xebd3 },
2281 { 0x06, 0x02b3 },
2282 { 0x06, 0xfee2 },
2283 { 0x06, 0xf87c },
2284 { 0x06, 0xef32 },
2285 { 0x06, 0x5b80 },
2286 { 0x06, 0xe3f8 },
2287 { 0x06, 0x7d9e },
2288 { 0x06, 0x037d },
2289 { 0x06, 0xffff },
2290 { 0x06, 0x0d58 },
2291 { 0x06, 0x1c55 },
2292 { 0x06, 0x1a65 },
2293 { 0x06, 0x11a1 },
2294 { 0x06, 0x90d3 },
2295 { 0x06, 0xe283 },
2296 { 0x06, 0x48e3 },
2297 { 0x06, 0x8349 },
2298 { 0x06, 0x1b56 },
2299 { 0x06, 0xab08 },
2300 { 0x06, 0xef56 },
2301 { 0x06, 0xe683 },
2302 { 0x06, 0x48e7 },
2303 { 0x06, 0x8349 },
2304 { 0x06, 0x10d1 },
2305 { 0x06, 0x801f },
2306 { 0x06, 0x66a0 },
2307 { 0x06, 0x04b9 },
2308 { 0x06, 0xe283 },
2309 { 0x06, 0x48e3 },
2310 { 0x06, 0x8349 },
2311 { 0x06, 0xef65 },
2312 { 0x06, 0xe283 },
2313 { 0x06, 0x4ae3 },
2314 { 0x06, 0x834b },
2315 { 0x06, 0x1b56 },
2316 { 0x06, 0xaa0e },
2317 { 0x06, 0xef56 },
2318 { 0x06, 0xe683 },
2319 { 0x06, 0x4ae7 },
2320 { 0x06, 0x834b },
2321 { 0x06, 0xe283 },
2322 { 0x06, 0x4de6 },
2323 { 0x06, 0x834c },
2324 { 0x06, 0xe083 },
2325 { 0x06, 0x4da0 },
2326 { 0x06, 0x000c },
2327 { 0x06, 0xaf81 },
2328 { 0x06, 0x8be0 },
2329 { 0x06, 0x834d },
2330 { 0x06, 0x10e4 },
2331 { 0x06, 0x834d },
2332 { 0x06, 0xae04 },
2333 { 0x06, 0x80e4 },
2334 { 0x06, 0x834d },
2335 { 0x06, 0xe083 },
2336 { 0x06, 0x4e78 },
2337 { 0x06, 0x039e },
2338 { 0x06, 0x0be0 },
2339 { 0x06, 0x834e },
2340 { 0x06, 0x7804 },
2341 { 0x06, 0x9e04 },
2342 { 0x06, 0xee83 },
2343 { 0x06, 0x4e02 },
2344 { 0x06, 0xe083 },
2345 { 0x06, 0x32e1 },
2346 { 0x06, 0x8333 },
2347 { 0x06, 0x590f },
2348 { 0x06, 0xe283 },
2349 { 0x06, 0x4d0c },
2350 { 0x06, 0x245a },
2351 { 0x06, 0xf01e },
2352 { 0x06, 0x12e4 },
2353 { 0x06, 0xf88c },
2354 { 0x06, 0xe5f8 },
2355 { 0x06, 0x8de0 },
2356 { 0x06, 0x8330 },
2357 { 0x06, 0xe183 },
2358 { 0x06, 0x3168 },
2359 { 0x06, 0x01e4 },
2360 { 0x06, 0xf88a },
2361 { 0x06, 0xe5f8 },
2362 { 0x06, 0x8bae },
2363 { 0x06, 0x37ee },
2364 { 0x06, 0x834e },
2365 { 0x06, 0x03e0 },
2366 { 0x06, 0x834c },
2367 { 0x06, 0xe183 },
2368 { 0x06, 0x4d1b },
2369 { 0x06, 0x019e },
2370 { 0x06, 0x04aa },
2371 { 0x06, 0xa1ae },
2372 { 0x06, 0xa8ee },
2373 { 0x06, 0x834e },
2374 { 0x06, 0x04ee },
2375 { 0x06, 0x834f },
2376 { 0x06, 0x00ae },
2377 { 0x06, 0xabe0 },
2378 { 0x06, 0x834f },
2379 { 0x06, 0x7803 },
2380 { 0x06, 0x9f14 },
2381 { 0x06, 0xee83 },
2382 { 0x06, 0x4e05 },
2383 { 0x06, 0xd240 },
2384 { 0x06, 0xd655 },
2385 { 0x06, 0x5402 },
2386 { 0x06, 0x81c6 },
2387 { 0x06, 0xd2a0 },
2388 { 0x06, 0xd6ba },
2389 { 0x06, 0x0002 },
2390 { 0x06, 0x81c6 },
2391 { 0x06, 0xfefd },
2392 { 0x06, 0xfc05 },
2393 { 0x06, 0xf8e0 },
2394 { 0x06, 0xf860 },
2395 { 0x06, 0xe1f8 },
2396 { 0x06, 0x6168 },
2397 { 0x06, 0x02e4 },
2398 { 0x06, 0xf860 },
2399 { 0x06, 0xe5f8 },
2400 { 0x06, 0x61e0 },
2401 { 0x06, 0xf848 },
2402 { 0x06, 0xe1f8 },
2403 { 0x06, 0x4958 },
2404 { 0x06, 0x0f1e },
2405 { 0x06, 0x02e4 },
2406 { 0x06, 0xf848 },
2407 { 0x06, 0xe5f8 },
2408 { 0x06, 0x49d0 },
2409 { 0x06, 0x0002 },
2410 { 0x06, 0x820a },
2411 { 0x06, 0xbf83 },
2412 { 0x06, 0x50ef },
2413 { 0x06, 0x46dc },
2414 { 0x06, 0x19dd },
2415 { 0x06, 0xd001 },
2416 { 0x06, 0x0282 },
2417 { 0x06, 0x0a02 },
2418 { 0x06, 0x8226 },
2419 { 0x06, 0xe0f8 },
2420 { 0x06, 0x60e1 },
2421 { 0x06, 0xf861 },
2422 { 0x06, 0x58fd },
2423 { 0x06, 0xe4f8 },
2424 { 0x06, 0x60e5 },
2425 { 0x06, 0xf861 },
2426 { 0x06, 0xfc04 },
2427 { 0x06, 0xf9fa },
2428 { 0x06, 0xfbc6 },
2429 { 0x06, 0xbff8 },
2430 { 0x06, 0x40be },
2431 { 0x06, 0x8350 },
2432 { 0x06, 0xa001 },
2433 { 0x06, 0x0107 },
2434 { 0x06, 0x1b89 },
2435 { 0x06, 0xcfd2 },
2436 { 0x06, 0x08eb },
2437 { 0x06, 0xdb19 },
2438 { 0x06, 0xb2fb },
2439 { 0x06, 0xfffe },
2440 { 0x06, 0xfd04 },
2441 { 0x06, 0xf8e0 },
2442 { 0x06, 0xf848 },
2443 { 0x06, 0xe1f8 },
2444 { 0x06, 0x4968 },
2445 { 0x06, 0x08e4 },
2446 { 0x06, 0xf848 },
2447 { 0x06, 0xe5f8 },
2448 { 0x06, 0x4958 },
2449 { 0x06, 0xf7e4 },
2450 { 0x06, 0xf848 },
2451 { 0x06, 0xe5f8 },
2452 { 0x06, 0x49fc },
2453 { 0x06, 0x044d },
2454 { 0x06, 0x2000 },
2455 { 0x06, 0x024e },
2456 { 0x06, 0x2200 },
2457 { 0x06, 0x024d },
2458 { 0x06, 0xdfff },
2459 { 0x06, 0x014e },
2460 { 0x06, 0xddff },
2461 { 0x06, 0x0100 },
2462 { 0x05, 0x83d8 },
2463 { 0x06, 0x8000 },
2464 { 0x03, 0xdc00 },
2465 { 0x05, 0xfff6 },
2466 { 0x06, 0x00fc },
2467 { 0x1f, 0x0000 },
2468
2469 { 0x1f, 0x0000 },
2470 { 0x0d, 0xf880 },
2471 { 0x1f, 0x0000 }
1677 }; 2472 };
1678 2473
1679 rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); 2474 rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
1680 2475
1681 if (mdio_read(ioaddr, 0x06) == 0xc400) { 2476 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
1682 struct phy_reg phy_reg_init_1[] = { 2477 struct phy_reg phy_reg_init[] = {
2478 { 0x1f, 0x0002 },
2479 { 0x05, 0x669a },
1683 { 0x1f, 0x0005 }, 2480 { 0x1f, 0x0005 },
1684 { 0x01, 0x0300 }, 2481 { 0x05, 0x8330 },
1685 { 0x1f, 0x0000 }, 2482 { 0x06, 0x669a },
1686 { 0x11, 0x401c }, 2483
1687 { 0x16, 0x4100 }, 2484 { 0x1f, 0x0002 }
2485 };
2486 int val;
2487
2488 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2489
2490 val = mdio_read(ioaddr, 0x0d);
2491 if ((val & 0x00ff) != 0x006c) {
2492 u32 set[] = {
2493 0x0065, 0x0066, 0x0067, 0x0068,
2494 0x0069, 0x006a, 0x006b, 0x006c
2495 };
2496 int i;
2497
2498 mdio_write(ioaddr, 0x1f, 0x0002);
2499
2500 val &= 0xff00;
2501 for (i = 0; i < ARRAY_SIZE(set); i++)
2502 mdio_write(ioaddr, 0x0d, val | set[i]);
2503 }
2504 } else {
2505 struct phy_reg phy_reg_init[] = {
2506 { 0x1f, 0x0002 },
2507 { 0x05, 0x2642 },
1688 { 0x1f, 0x0005 }, 2508 { 0x1f, 0x0005 },
1689 { 0x07, 0x0010 }, 2509 { 0x05, 0x8330 },
1690 { 0x05, 0x83dc }, 2510 { 0x06, 0x2642 }
1691 { 0x06, 0x087d },
1692 { 0x05, 0x8300 },
1693 { 0x06, 0x0101 },
1694 { 0x06, 0x05f8 },
1695 { 0x06, 0xf9fa },
1696 { 0x06, 0xfbef },
1697 { 0x06, 0x79e2 },
1698 { 0x06, 0x835f },
1699 { 0x06, 0xe0f8 },
1700 { 0x06, 0x9ae1 },
1701 { 0x06, 0xf89b },
1702 { 0x06, 0xef31 },
1703 { 0x06, 0x3b65 },
1704 { 0x06, 0xaa07 },
1705 { 0x06, 0x81e4 },
1706 { 0x06, 0xf89a },
1707 { 0x06, 0xe5f8 },
1708 { 0x06, 0x9baf },
1709 { 0x06, 0x06ae },
1710 { 0x05, 0x83dc },
1711 { 0x06, 0x8300 },
1712 }; 2511 };
1713 2512
1714 rtl_phy_write(ioaddr, phy_reg_init_1, 2513 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1715 ARRAY_SIZE(phy_reg_init_1));
1716 } 2514 }
1717 2515
1718 mdio_write(ioaddr, 0x1f, 0x0000); 2516 mdio_write(ioaddr, 0x1f, 0x0002);
2517 mdio_plus_minus(ioaddr, 0x02, 0x0100, 0x0600);
2518 mdio_plus_minus(ioaddr, 0x03, 0x0000, 0xe000);
2519
2520 mdio_write(ioaddr, 0x1f, 0x0001);
2521 mdio_write(ioaddr, 0x17, 0x0cc0);
2522
2523 mdio_write(ioaddr, 0x1f, 0x0002);
2524 mdio_patch(ioaddr, 0x0f, 0x0017);
2525
2526 rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1));
2527}
2528
2529static void rtl8168d_3_hw_phy_config(void __iomem *ioaddr)
2530{
2531 struct phy_reg phy_reg_init[] = {
2532 { 0x1f, 0x0002 },
2533 { 0x10, 0x0008 },
2534 { 0x0d, 0x006c },
2535
2536 { 0x1f, 0x0000 },
2537 { 0x0d, 0xf880 },
2538
2539 { 0x1f, 0x0001 },
2540 { 0x17, 0x0cc0 },
2541
2542 { 0x1f, 0x0001 },
2543 { 0x0b, 0xa4d8 },
2544 { 0x09, 0x281c },
2545 { 0x07, 0x2883 },
2546 { 0x0a, 0x6b35 },
2547 { 0x1d, 0x3da4 },
2548 { 0x1c, 0xeffd },
2549 { 0x14, 0x7f52 },
2550 { 0x18, 0x7fc6 },
2551 { 0x08, 0x0601 },
2552 { 0x06, 0x4063 },
2553 { 0x10, 0xf074 },
2554 { 0x1f, 0x0003 },
2555 { 0x13, 0x0789 },
2556 { 0x12, 0xf4bd },
2557 { 0x1a, 0x04fd },
2558 { 0x14, 0x84b0 },
2559 { 0x1f, 0x0000 },
2560 { 0x00, 0x9200 },
2561
2562 { 0x1f, 0x0005 },
2563 { 0x01, 0x0340 },
2564 { 0x1f, 0x0001 },
2565 { 0x04, 0x4000 },
2566 { 0x03, 0x1d21 },
2567 { 0x02, 0x0c32 },
2568 { 0x01, 0x0200 },
2569 { 0x00, 0x5554 },
2570 { 0x04, 0x4800 },
2571 { 0x04, 0x4000 },
2572 { 0x04, 0xf000 },
2573 { 0x03, 0xdf01 },
2574 { 0x02, 0xdf20 },
2575 { 0x01, 0x101a },
2576 { 0x00, 0xa0ff },
2577 { 0x04, 0xf800 },
2578 { 0x04, 0xf000 },
2579 { 0x1f, 0x0000 },
2580
2581 { 0x1f, 0x0007 },
2582 { 0x1e, 0x0023 },
2583 { 0x16, 0x0000 },
2584 { 0x1f, 0x0000 }
2585 };
2586
2587 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1719} 2588}
1720 2589
1721static void rtl8102e_hw_phy_config(void __iomem *ioaddr) 2590static void rtl8102e_hw_phy_config(void __iomem *ioaddr)
@@ -1792,7 +2661,13 @@ static void rtl_hw_phy_config(struct net_device *dev)
1792 rtl8168cp_2_hw_phy_config(ioaddr); 2661 rtl8168cp_2_hw_phy_config(ioaddr);
1793 break; 2662 break;
1794 case RTL_GIGA_MAC_VER_25: 2663 case RTL_GIGA_MAC_VER_25:
1795 rtl8168d_hw_phy_config(ioaddr); 2664 rtl8168d_1_hw_phy_config(ioaddr);
2665 break;
2666 case RTL_GIGA_MAC_VER_26:
2667 rtl8168d_2_hw_phy_config(ioaddr);
2668 break;
2669 case RTL_GIGA_MAC_VER_27:
2670 rtl8168d_3_hw_phy_config(ioaddr);
1796 break; 2671 break;
1797 2672
1798 default: 2673 default:
@@ -2863,6 +3738,8 @@ static void rtl_hw_start_8168(struct net_device *dev)
2863 break; 3738 break;
2864 3739
2865 case RTL_GIGA_MAC_VER_25: 3740 case RTL_GIGA_MAC_VER_25:
3741 case RTL_GIGA_MAC_VER_26:
3742 case RTL_GIGA_MAC_VER_27:
2866 rtl_hw_start_8168d(ioaddr, pdev); 3743 rtl_hw_start_8168d(ioaddr, pdev);
2867 break; 3744 break;
2868 3745
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 6fdaba8674b9..ed4a508ef262 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -62,8 +62,11 @@ static char *devid=NULL;
62static struct usb_eth_dev usb_dev_id[] = { 62static struct usb_eth_dev usb_dev_id[] = {
63#define PEGASUS_DEV(pn, vid, pid, flags) \ 63#define PEGASUS_DEV(pn, vid, pid, flags) \
64 {.name = pn, .vendor = vid, .device = pid, .private = flags}, 64 {.name = pn, .vendor = vid, .device = pid, .private = flags},
65#define PEGASUS_DEV_CLASS(pn, vid, pid, dclass, flags) \
66 PEGASUS_DEV(pn, vid, pid, flags)
65#include "pegasus.h" 67#include "pegasus.h"
66#undef PEGASUS_DEV 68#undef PEGASUS_DEV
69#undef PEGASUS_DEV_CLASS
67 {NULL, 0, 0, 0}, 70 {NULL, 0, 0, 0},
68 {NULL, 0, 0, 0} 71 {NULL, 0, 0, 0}
69}; 72};
@@ -71,8 +74,18 @@ static struct usb_eth_dev usb_dev_id[] = {
71static struct usb_device_id pegasus_ids[] = { 74static struct usb_device_id pegasus_ids[] = {
72#define PEGASUS_DEV(pn, vid, pid, flags) \ 75#define PEGASUS_DEV(pn, vid, pid, flags) \
73 {.match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = vid, .idProduct = pid}, 76 {.match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = vid, .idProduct = pid},
77/*
78 * The Belkin F8T012xx1 bluetooth adaptor has the same vendor and product
79 * IDs as the Belkin F5D5050, so we need to teach the pegasus driver to
80 * ignore adaptors belonging to the "Wireless" class 0xE0. For this one
81 * case anyway, seeing as the pegasus is for "Wired" adaptors.
82 */
83#define PEGASUS_DEV_CLASS(pn, vid, pid, dclass, flags) \
84 {.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_CLASS), \
85 .idVendor = vid, .idProduct = pid, .bDeviceClass = dclass},
74#include "pegasus.h" 86#include "pegasus.h"
75#undef PEGASUS_DEV 87#undef PEGASUS_DEV
88#undef PEGASUS_DEV_CLASS
76 {}, 89 {},
77 {} 90 {}
78}; 91};
diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h
index f968c834ff63..5d02f0200737 100644
--- a/drivers/net/usb/pegasus.h
+++ b/drivers/net/usb/pegasus.h
@@ -202,7 +202,11 @@ PEGASUS_DEV( "AEI USB Fast Ethernet Adapter", VENDOR_AEILAB, 0x1701,
202 DEFAULT_GPIO_RESET | PEGASUS_II ) 202 DEFAULT_GPIO_RESET | PEGASUS_II )
203PEGASUS_DEV( "Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100, 203PEGASUS_DEV( "Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100,
204 DEFAULT_GPIO_RESET | PEGASUS_II ) 204 DEFAULT_GPIO_RESET | PEGASUS_II )
205PEGASUS_DEV( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, 205/*
206 * Distinguish between this Belkin adaptor and the Belkin bluetooth adaptors
207 * with the same product IDs by checking the device class too.
208 */
209PEGASUS_DEV_CLASS( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, 0x00,
206 DEFAULT_GPIO_RESET | PEGASUS_II ) 210 DEFAULT_GPIO_RESET | PEGASUS_II )
207PEGASUS_DEV( "Billionton USB-100", VENDOR_BILLIONTON, 0x0986, 211PEGASUS_DEV( "Billionton USB-100", VENDOR_BILLIONTON, 0x0986,
208 DEFAULT_GPIO_RESET ) 212 DEFAULT_GPIO_RESET )
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
new file mode 100644
index 000000000000..880f5098eac9
--- /dev/null
+++ b/drivers/net/vmxnet3/Makefile
@@ -0,0 +1,35 @@
1################################################################################
2#
3# Linux driver for VMware's vmxnet3 ethernet NIC.
4#
5# Copyright (C) 2007-2009, VMware, Inc. All Rights Reserved.
6#
7# This program is free software; you can redistribute it and/or modify it
8# under the terms of the GNU General Public License as published by the
9# Free Software Foundation; version 2 of the License and no later version.
10#
11# This program is distributed in the hope that it will be useful, but
12# WITHOUT ANY WARRANTY; without even the implied warranty of
13# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14# NON INFRINGEMENT. See the GNU General Public License for more
15# details.
16#
17# You should have received a copy of the GNU General Public License
18# along with this program; if not, write to the Free Software
19# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20#
21# The full GNU General Public License is included in this distribution in
22# the file called "COPYING".
23#
24# Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
25#
26#
27################################################################################
28
29#
30# Makefile for the VMware vmxnet3 ethernet NIC driver
31#
32
33obj-$(CONFIG_VMXNET3) += vmxnet3.o
34
35vmxnet3-objs := vmxnet3_drv.o vmxnet3_ethtool.o
diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h
new file mode 100644
index 000000000000..37108fb226d3
--- /dev/null
+++ b/drivers/net/vmxnet3/upt1_defs.h
@@ -0,0 +1,96 @@
1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24 *
25 */
26
27#ifndef _UPT1_DEFS_H
28#define _UPT1_DEFS_H
29
30struct UPT1_TxStats {
31 u64 TSOPktsTxOK; /* TSO pkts post-segmentation */
32 u64 TSOBytesTxOK;
33 u64 ucastPktsTxOK;
34 u64 ucastBytesTxOK;
35 u64 mcastPktsTxOK;
36 u64 mcastBytesTxOK;
37 u64 bcastPktsTxOK;
38 u64 bcastBytesTxOK;
39 u64 pktsTxError;
40 u64 pktsTxDiscard;
41};
42
43struct UPT1_RxStats {
44 u64 LROPktsRxOK; /* LRO pkts */
45 u64 LROBytesRxOK; /* bytes from LRO pkts */
46 /* the following counters are for pkts from the wire, i.e., pre-LRO */
47 u64 ucastPktsRxOK;
48 u64 ucastBytesRxOK;
49 u64 mcastPktsRxOK;
50 u64 mcastBytesRxOK;
51 u64 bcastPktsRxOK;
52 u64 bcastBytesRxOK;
53 u64 pktsRxOutOfBuf;
54 u64 pktsRxError;
55};
56
57/* interrupt moderation level */
58enum {
59 UPT1_IML_NONE = 0, /* no interrupt moderation */
60 UPT1_IML_HIGHEST = 7, /* least intr generated */
61 UPT1_IML_ADAPTIVE = 8, /* adpative intr moderation */
62};
63/* values for UPT1_RSSConf.hashFunc */
64enum {
65 UPT1_RSS_HASH_TYPE_NONE = 0x0,
66 UPT1_RSS_HASH_TYPE_IPV4 = 0x01,
67 UPT1_RSS_HASH_TYPE_TCP_IPV4 = 0x02,
68 UPT1_RSS_HASH_TYPE_IPV6 = 0x04,
69 UPT1_RSS_HASH_TYPE_TCP_IPV6 = 0x08,
70};
71
72enum {
73 UPT1_RSS_HASH_FUNC_NONE = 0x0,
74 UPT1_RSS_HASH_FUNC_TOEPLITZ = 0x01,
75};
76
77#define UPT1_RSS_MAX_KEY_SIZE 40
78#define UPT1_RSS_MAX_IND_TABLE_SIZE 128
79
80struct UPT1_RSSConf {
81 u16 hashType;
82 u16 hashFunc;
83 u16 hashKeySize;
84 u16 indTableSize;
85 u8 hashKey[UPT1_RSS_MAX_KEY_SIZE];
86 u8 indTable[UPT1_RSS_MAX_IND_TABLE_SIZE];
87};
88
89/* features */
90enum {
91 UPT1_F_RXCSUM = 0x0001, /* rx csum verification */
92 UPT1_F_RSS = 0x0002,
93 UPT1_F_RXVLAN = 0x0004, /* VLAN tag stripping */
94 UPT1_F_LRO = 0x0008,
95};
96#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
new file mode 100644
index 000000000000..dc8ee4438a4f
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -0,0 +1,535 @@
1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24 *
25 */
26
27#ifndef _VMXNET3_DEFS_H_
28#define _VMXNET3_DEFS_H_
29
30#include "upt1_defs.h"
31
32/* all registers are 32 bit wide */
33/* BAR 1 */
34enum {
35 VMXNET3_REG_VRRS = 0x0, /* Vmxnet3 Revision Report Selection */
36 VMXNET3_REG_UVRS = 0x8, /* UPT Version Report Selection */
37 VMXNET3_REG_DSAL = 0x10, /* Driver Shared Address Low */
38 VMXNET3_REG_DSAH = 0x18, /* Driver Shared Address High */
39 VMXNET3_REG_CMD = 0x20, /* Command */
40 VMXNET3_REG_MACL = 0x28, /* MAC Address Low */
41 VMXNET3_REG_MACH = 0x30, /* MAC Address High */
42 VMXNET3_REG_ICR = 0x38, /* Interrupt Cause Register */
43 VMXNET3_REG_ECR = 0x40 /* Event Cause Register */
44};
45
46/* BAR 0 */
47enum {
48 VMXNET3_REG_IMR = 0x0, /* Interrupt Mask Register */
49 VMXNET3_REG_TXPROD = 0x600, /* Tx Producer Index */
50 VMXNET3_REG_RXPROD = 0x800, /* Rx Producer Index for ring 1 */
51 VMXNET3_REG_RXPROD2 = 0xA00 /* Rx Producer Index for ring 2 */
52};
53
54#define VMXNET3_PT_REG_SIZE 4096 /* BAR 0 */
55#define VMXNET3_VD_REG_SIZE 4096 /* BAR 1 */
56
57#define VMXNET3_REG_ALIGN 8 /* All registers are 8-byte aligned. */
58#define VMXNET3_REG_ALIGN_MASK 0x7
59
60/* I/O Mapped access to registers */
61#define VMXNET3_IO_TYPE_PT 0
62#define VMXNET3_IO_TYPE_VD 1
63#define VMXNET3_IO_ADDR(type, reg) (((type) << 24) | ((reg) & 0xFFFFFF))
64#define VMXNET3_IO_TYPE(addr) ((addr) >> 24)
65#define VMXNET3_IO_REG(addr) ((addr) & 0xFFFFFF)
66
67enum {
68 VMXNET3_CMD_FIRST_SET = 0xCAFE0000,
69 VMXNET3_CMD_ACTIVATE_DEV = VMXNET3_CMD_FIRST_SET,
70 VMXNET3_CMD_QUIESCE_DEV,
71 VMXNET3_CMD_RESET_DEV,
72 VMXNET3_CMD_UPDATE_RX_MODE,
73 VMXNET3_CMD_UPDATE_MAC_FILTERS,
74 VMXNET3_CMD_UPDATE_VLAN_FILTERS,
75 VMXNET3_CMD_UPDATE_RSSIDT,
76 VMXNET3_CMD_UPDATE_IML,
77 VMXNET3_CMD_UPDATE_PMCFG,
78 VMXNET3_CMD_UPDATE_FEATURE,
79 VMXNET3_CMD_LOAD_PLUGIN,
80
81 VMXNET3_CMD_FIRST_GET = 0xF00D0000,
82 VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET,
83 VMXNET3_CMD_GET_STATS,
84 VMXNET3_CMD_GET_LINK,
85 VMXNET3_CMD_GET_PERM_MAC_LO,
86 VMXNET3_CMD_GET_PERM_MAC_HI,
87 VMXNET3_CMD_GET_DID_LO,
88 VMXNET3_CMD_GET_DID_HI,
89 VMXNET3_CMD_GET_DEV_EXTRA_INFO,
90 VMXNET3_CMD_GET_CONF_INTR
91};
92
93struct Vmxnet3_TxDesc {
94 u64 addr;
95
96 u32 len:14;
97 u32 gen:1; /* generation bit */
98 u32 rsvd:1;
99 u32 dtype:1; /* descriptor type */
100 u32 ext1:1;
101 u32 msscof:14; /* MSS, checksum offset, flags */
102
103 u32 hlen:10; /* header len */
104 u32 om:2; /* offload mode */
105 u32 eop:1; /* End Of Packet */
106 u32 cq:1; /* completion request */
107 u32 ext2:1;
108 u32 ti:1; /* VLAN Tag Insertion */
109 u32 tci:16; /* Tag to Insert */
110};
111
112/* TxDesc.OM values */
113#define VMXNET3_OM_NONE 0
114#define VMXNET3_OM_CSUM 2
115#define VMXNET3_OM_TSO 3
116
117/* fields in TxDesc we access w/o using bit fields */
118#define VMXNET3_TXD_EOP_SHIFT 12
119#define VMXNET3_TXD_CQ_SHIFT 13
120#define VMXNET3_TXD_GEN_SHIFT 14
121
122#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT)
123#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT)
124#define VMXNET3_TXD_GEN (1 << VMXNET3_TXD_GEN_SHIFT)
125
126#define VMXNET3_HDR_COPY_SIZE 128
127
128
129struct Vmxnet3_TxDataDesc {
130 u8 data[VMXNET3_HDR_COPY_SIZE];
131};
132
133
134struct Vmxnet3_TxCompDesc {
135 u32 txdIdx:12; /* Index of the EOP TxDesc */
136 u32 ext1:20;
137
138 u32 ext2;
139 u32 ext3;
140
141 u32 rsvd:24;
142 u32 type:7; /* completion type */
143 u32 gen:1; /* generation bit */
144};
145
146
147struct Vmxnet3_RxDesc {
148 u64 addr;
149
150 u32 len:14;
151 u32 btype:1; /* Buffer Type */
152 u32 dtype:1; /* Descriptor type */
153 u32 rsvd:15;
154 u32 gen:1; /* Generation bit */
155
156 u32 ext1;
157};
158
159/* values of RXD.BTYPE */
160#define VMXNET3_RXD_BTYPE_HEAD 0 /* head only */
161#define VMXNET3_RXD_BTYPE_BODY 1 /* body only */
162
163/* fields in RxDesc we access w/o using bit fields */
164#define VMXNET3_RXD_BTYPE_SHIFT 14
165#define VMXNET3_RXD_GEN_SHIFT 31
166
167
168struct Vmxnet3_RxCompDesc {
169 u32 rxdIdx:12; /* Index of the RxDesc */
170 u32 ext1:2;
171 u32 eop:1; /* End of Packet */
172 u32 sop:1; /* Start of Packet */
173 u32 rqID:10; /* rx queue/ring ID */
174 u32 rssType:4; /* RSS hash type used */
175 u32 cnc:1; /* Checksum Not Calculated */
176 u32 ext2:1;
177
178 u32 rssHash; /* RSS hash value */
179
180 u32 len:14; /* data length */
181 u32 err:1; /* Error */
182 u32 ts:1; /* Tag is stripped */
183 u32 tci:16; /* Tag stripped */
184
185 u32 csum:16;
186 u32 tuc:1; /* TCP/UDP Checksum Correct */
187 u32 udp:1; /* UDP packet */
188 u32 tcp:1; /* TCP packet */
189 u32 ipc:1; /* IP Checksum Correct */
190 u32 v6:1; /* IPv6 */
191 u32 v4:1; /* IPv4 */
192 u32 frg:1; /* IP Fragment */
193 u32 fcs:1; /* Frame CRC correct */
194 u32 type:7; /* completion type */
195 u32 gen:1; /* generation bit */
196};
197
198/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */
199#define VMXNET3_RCD_TUC_SHIFT 16
200#define VMXNET3_RCD_IPC_SHIFT 19
201
202/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.qword[1] */
203#define VMXNET3_RCD_TYPE_SHIFT 56
204#define VMXNET3_RCD_GEN_SHIFT 63
205
206/* csum OK for TCP/UDP pkts over IP */
207#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \
208 1 << VMXNET3_RCD_IPC_SHIFT)
209
210/* value of RxCompDesc.rssType */
211enum {
212 VMXNET3_RCD_RSS_TYPE_NONE = 0,
213 VMXNET3_RCD_RSS_TYPE_IPV4 = 1,
214 VMXNET3_RCD_RSS_TYPE_TCPIPV4 = 2,
215 VMXNET3_RCD_RSS_TYPE_IPV6 = 3,
216 VMXNET3_RCD_RSS_TYPE_TCPIPV6 = 4,
217};
218
219
220/* a union for accessing all cmd/completion descriptors */
221union Vmxnet3_GenericDesc {
222 u64 qword[2];
223 u32 dword[4];
224 u16 word[8];
225 struct Vmxnet3_TxDesc txd;
226 struct Vmxnet3_RxDesc rxd;
227 struct Vmxnet3_TxCompDesc tcd;
228 struct Vmxnet3_RxCompDesc rcd;
229};
230
231#define VMXNET3_INIT_GEN 1
232
233/* Max size of a single tx buffer */
234#define VMXNET3_MAX_TX_BUF_SIZE (1 << 14)
235
236/* # of tx desc needed for a tx buffer size */
237#define VMXNET3_TXD_NEEDED(size) (((size) + VMXNET3_MAX_TX_BUF_SIZE - 1) / \
238 VMXNET3_MAX_TX_BUF_SIZE)
239
240/* max # of tx descs for a non-tso pkt */
241#define VMXNET3_MAX_TXD_PER_PKT 16
242
243/* Max size of a single rx buffer */
244#define VMXNET3_MAX_RX_BUF_SIZE ((1 << 14) - 1)
245/* Minimum size of a type 0 buffer */
246#define VMXNET3_MIN_T0_BUF_SIZE 128
247#define VMXNET3_MAX_CSUM_OFFSET 1024
248
249/* Ring base address alignment */
250#define VMXNET3_RING_BA_ALIGN 512
251#define VMXNET3_RING_BA_MASK (VMXNET3_RING_BA_ALIGN - 1)
252
253/* Ring size must be a multiple of 32 */
254#define VMXNET3_RING_SIZE_ALIGN 32
255#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1)
256
257/* Max ring size */
258#define VMXNET3_TX_RING_MAX_SIZE 4096
259#define VMXNET3_TC_RING_MAX_SIZE 4096
260#define VMXNET3_RX_RING_MAX_SIZE 4096
261#define VMXNET3_RC_RING_MAX_SIZE 8192
262
263/* a list of reasons for queue stop */
264
265enum {
266 VMXNET3_ERR_NOEOP = 0x80000000, /* cannot find the EOP desc of a pkt */
267 VMXNET3_ERR_TXD_REUSE = 0x80000001, /* reuse TxDesc before tx completion */
268 VMXNET3_ERR_BIG_PKT = 0x80000002, /* too many TxDesc for a pkt */
269 VMXNET3_ERR_DESC_NOT_SPT = 0x80000003, /* descriptor type not supported */
270 VMXNET3_ERR_SMALL_BUF = 0x80000004, /* type 0 buffer too small */
271 VMXNET3_ERR_STRESS = 0x80000005, /* stress option firing in vmkernel */
272 VMXNET3_ERR_SWITCH = 0x80000006, /* mode switch failure */
273 VMXNET3_ERR_TXD_INVALID = 0x80000007, /* invalid TxDesc */
274};
275
276/* completion descriptor types */
277#define VMXNET3_CDTYPE_TXCOMP 0 /* Tx Completion Descriptor */
278#define VMXNET3_CDTYPE_RXCOMP 3 /* Rx Completion Descriptor */
279
280enum {
281 VMXNET3_GOS_BITS_UNK = 0, /* unknown */
282 VMXNET3_GOS_BITS_32 = 1,
283 VMXNET3_GOS_BITS_64 = 2,
284};
285
286#define VMXNET3_GOS_TYPE_LINUX 1
287
288
289struct Vmxnet3_GOSInfo {
290 u32 gosBits:2; /* 32-bit or 64-bit? */
291 u32 gosType:4; /* which guest */
292 u32 gosVer:16; /* gos version */
293 u32 gosMisc:10; /* other info about gos */
294};
295
296
297struct Vmxnet3_DriverInfo {
298 u32 version;
299 struct Vmxnet3_GOSInfo gos;
300 u32 vmxnet3RevSpt;
301 u32 uptVerSpt;
302};
303
304
305#define VMXNET3_REV1_MAGIC 0xbabefee1
306
307/*
308 * QueueDescPA must be 128 bytes aligned. It points to an array of
309 * Vmxnet3_TxQueueDesc followed by an array of Vmxnet3_RxQueueDesc.
310 * The number of Vmxnet3_TxQueueDesc/Vmxnet3_RxQueueDesc are specified by
311 * Vmxnet3_MiscConf.numTxQueues/numRxQueues, respectively.
312 */
313#define VMXNET3_QUEUE_DESC_ALIGN 128
314
315
316struct Vmxnet3_MiscConf {
317 struct Vmxnet3_DriverInfo driverInfo;
318 u64 uptFeatures;
319 u64 ddPA; /* driver data PA */
320 u64 queueDescPA; /* queue descriptor table PA */
321 u32 ddLen; /* driver data len */
322 u32 queueDescLen; /* queue desc. table len in bytes */
323 u32 mtu;
324 u16 maxNumRxSG;
325 u8 numTxQueues;
326 u8 numRxQueues;
327 u32 reserved[4];
328};
329
330
331struct Vmxnet3_TxQueueConf {
332 u64 txRingBasePA;
333 u64 dataRingBasePA;
334 u64 compRingBasePA;
335 u64 ddPA; /* driver data */
336 u64 reserved;
337 u32 txRingSize; /* # of tx desc */
338 u32 dataRingSize; /* # of data desc */
339 u32 compRingSize; /* # of comp desc */
340 u32 ddLen; /* size of driver data */
341 u8 intrIdx;
342 u8 _pad[7];
343};
344
345
346struct Vmxnet3_RxQueueConf {
347 u64 rxRingBasePA[2];
348 u64 compRingBasePA;
349 u64 ddPA; /* driver data */
350 u64 reserved;
351 u32 rxRingSize[2]; /* # of rx desc */
352 u32 compRingSize; /* # of rx comp desc */
353 u32 ddLen; /* size of driver data */
354 u8 intrIdx;
355 u8 _pad[7];
356};
357
358
359enum vmxnet3_intr_mask_mode {
360 VMXNET3_IMM_AUTO = 0,
361 VMXNET3_IMM_ACTIVE = 1,
362 VMXNET3_IMM_LAZY = 2
363};
364
365enum vmxnet3_intr_type {
366 VMXNET3_IT_AUTO = 0,
367 VMXNET3_IT_INTX = 1,
368 VMXNET3_IT_MSI = 2,
369 VMXNET3_IT_MSIX = 3
370};
371
372#define VMXNET3_MAX_TX_QUEUES 8
373#define VMXNET3_MAX_RX_QUEUES 16
374/* addition 1 for events */
375#define VMXNET3_MAX_INTRS 25
376
377
378struct Vmxnet3_IntrConf {
379 bool autoMask;
380 u8 numIntrs; /* # of interrupts */
381 u8 eventIntrIdx;
382 u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for
383 * each intr */
384 u32 reserved[3];
385};
386
387/* one bit per VLAN ID, the size is in the units of u32 */
388#define VMXNET3_VFT_SIZE (4096 / (sizeof(u32) * 8))
389
390
391struct Vmxnet3_QueueStatus {
392 bool stopped;
393 u8 _pad[3];
394 u32 error;
395};
396
397
398struct Vmxnet3_TxQueueCtrl {
399 u32 txNumDeferred;
400 u32 txThreshold;
401 u64 reserved;
402};
403
404
405struct Vmxnet3_RxQueueCtrl {
406 bool updateRxProd;
407 u8 _pad[7];
408 u64 reserved;
409};
410
411enum {
412 VMXNET3_RXM_UCAST = 0x01, /* unicast only */
413 VMXNET3_RXM_MCAST = 0x02, /* multicast passing the filters */
414 VMXNET3_RXM_BCAST = 0x04, /* broadcast only */
415 VMXNET3_RXM_ALL_MULTI = 0x08, /* all multicast */
416 VMXNET3_RXM_PROMISC = 0x10 /* promiscuous */
417};
418
419struct Vmxnet3_RxFilterConf {
420 u32 rxMode; /* VMXNET3_RXM_xxx */
421 u16 mfTableLen; /* size of the multicast filter table */
422 u16 _pad1;
423 u64 mfTablePA; /* PA of the multicast filters table */
424 u32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */
425};
426
427
428#define VMXNET3_PM_MAX_FILTERS 6
429#define VMXNET3_PM_MAX_PATTERN_SIZE 128
430#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
431
432#define VMXNET3_PM_WAKEUP_MAGIC 0x01 /* wake up on magic pkts */
433#define VMXNET3_PM_WAKEUP_FILTER 0x02 /* wake up on pkts matching
434 * filters */
435
436
437struct Vmxnet3_PM_PktFilter {
438 u8 maskSize;
439 u8 patternSize;
440 u8 mask[VMXNET3_PM_MAX_MASK_SIZE];
441 u8 pattern[VMXNET3_PM_MAX_PATTERN_SIZE];
442 u8 pad[6];
443};
444
445
446struct Vmxnet3_PMConf {
447 u16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */
448 u8 numFilters;
449 u8 pad[5];
450 struct Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS];
451};
452
453
454struct Vmxnet3_VariableLenConfDesc {
455 u32 confVer;
456 u32 confLen;
457 u64 confPA;
458};
459
460
461struct Vmxnet3_TxQueueDesc {
462 struct Vmxnet3_TxQueueCtrl ctrl;
463 struct Vmxnet3_TxQueueConf conf;
464
465 /* Driver read after a GET command */
466 struct Vmxnet3_QueueStatus status;
467 struct UPT1_TxStats stats;
468 u8 _pad[88]; /* 128 aligned */
469};
470
471
472struct Vmxnet3_RxQueueDesc {
473 struct Vmxnet3_RxQueueCtrl ctrl;
474 struct Vmxnet3_RxQueueConf conf;
475 /* Driver read after a GET commad */
476 struct Vmxnet3_QueueStatus status;
477 struct UPT1_RxStats stats;
478 u8 __pad[88]; /* 128 aligned */
479};
480
481
482struct Vmxnet3_DSDevRead {
483 /* read-only region for device, read by dev in response to a SET cmd */
484 struct Vmxnet3_MiscConf misc;
485 struct Vmxnet3_IntrConf intrConf;
486 struct Vmxnet3_RxFilterConf rxFilterConf;
487 struct Vmxnet3_VariableLenConfDesc rssConfDesc;
488 struct Vmxnet3_VariableLenConfDesc pmConfDesc;
489 struct Vmxnet3_VariableLenConfDesc pluginConfDesc;
490};
491
492/* All structures in DriverShared are padded to multiples of 8 bytes */
493struct Vmxnet3_DriverShared {
494 u32 magic;
495 /* make devRead start at 64bit boundaries */
496 u32 pad;
497 struct Vmxnet3_DSDevRead devRead;
498 u32 ecr;
499 u32 reserved[5];
500};
501
502
503#define VMXNET3_ECR_RQERR (1 << 0)
504#define VMXNET3_ECR_TQERR (1 << 1)
505#define VMXNET3_ECR_LINK (1 << 2)
506#define VMXNET3_ECR_DIC (1 << 3)
507#define VMXNET3_ECR_DEBUG (1 << 4)
508
509/* flip the gen bit of a ring */
510#define VMXNET3_FLIP_RING_GEN(gen) ((gen) = (gen) ^ 0x1)
511
512/* only use this if moving the idx won't affect the gen bit */
513#define VMXNET3_INC_RING_IDX_ONLY(idx, ring_size) \
514 do {\
515 (idx)++;\
516 if (unlikely((idx) == (ring_size))) {\
517 (idx) = 0;\
518 } \
519 } while (0)
520
521#define VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid) \
522 (vfTable[vid >> 5] |= (1 << (vid & 31)))
523#define VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid) \
524 (vfTable[vid >> 5] &= ~(1 << (vid & 31)))
525
526#define VMXNET3_VFTABLE_ENTRY_IS_SET(vfTable, vid) \
527 ((vfTable[vid >> 5] & (1 << (vid & 31))) != 0)
528
529#define VMXNET3_MAX_MTU 9000
530#define VMXNET3_MIN_MTU 60
531
532#define VMXNET3_LINK_UP (10000 << 16 | 1) /* 10 Gbps, up */
533#define VMXNET3_LINK_DOWN 0
534
535#endif /* _VMXNET3_DEFS_H_ */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
new file mode 100644
index 000000000000..44fb0c5a2800
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -0,0 +1,2556 @@
1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24 *
25 */
26
27#include "vmxnet3_int.h"
28
29char vmxnet3_driver_name[] = "vmxnet3";
30#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
31
32
33/*
34 * PCI Device ID Table
35 * Last entry must be all 0s
36 */
37static const struct pci_device_id vmxnet3_pciid_table[] = {
38 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
39 {0}
40};
41
42MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
43
44static atomic_t devices_found;
45
46
47/*
48 * Enable/Disable the given intr
49 */
50static void
51vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
52{
53 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
54}
55
56
57static void
58vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
59{
60 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
61}
62
63
64/*
65 * Enable/Disable all intrs used by the device
66 */
67static void
68vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
69{
70 int i;
71
72 for (i = 0; i < adapter->intr.num_intrs; i++)
73 vmxnet3_enable_intr(adapter, i);
74}
75
76
77static void
78vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
79{
80 int i;
81
82 for (i = 0; i < adapter->intr.num_intrs; i++)
83 vmxnet3_disable_intr(adapter, i);
84}
85
86
87static void
88vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
89{
90 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
91}
92
93
94static bool
95vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
96{
97 return netif_queue_stopped(adapter->netdev);
98}
99
100
101static void
102vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
103{
104 tq->stopped = false;
105 netif_start_queue(adapter->netdev);
106}
107
108
109static void
110vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
111{
112 tq->stopped = false;
113 netif_wake_queue(adapter->netdev);
114}
115
116
117static void
118vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
119{
120 tq->stopped = true;
121 tq->num_stop++;
122 netif_stop_queue(adapter->netdev);
123}
124
125
126/*
127 * Check the link state. This may start or stop the tx queue.
128 */
129static void
130vmxnet3_check_link(struct vmxnet3_adapter *adapter)
131{
132 u32 ret;
133
134 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
135 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
136 adapter->link_speed = ret >> 16;
137 if (ret & 1) { /* Link is up. */
138 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
139 adapter->netdev->name, adapter->link_speed);
140 if (!netif_carrier_ok(adapter->netdev))
141 netif_carrier_on(adapter->netdev);
142
143 vmxnet3_tq_start(&adapter->tx_queue, adapter);
144 } else {
145 printk(KERN_INFO "%s: NIC Link is Down\n",
146 adapter->netdev->name);
147 if (netif_carrier_ok(adapter->netdev))
148 netif_carrier_off(adapter->netdev);
149
150 vmxnet3_tq_stop(&adapter->tx_queue, adapter);
151 }
152}
153
154
155static void
156vmxnet3_process_events(struct vmxnet3_adapter *adapter)
157{
158 u32 events = adapter->shared->ecr;
159 if (!events)
160 return;
161
162 vmxnet3_ack_events(adapter, events);
163
164 /* Check if link state has changed */
165 if (events & VMXNET3_ECR_LINK)
166 vmxnet3_check_link(adapter);
167
168 /* Check if there is an error on xmit/recv queues */
169 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
170 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
171 VMXNET3_CMD_GET_QUEUE_STATUS);
172
173 if (adapter->tqd_start->status.stopped) {
174 printk(KERN_ERR "%s: tq error 0x%x\n",
175 adapter->netdev->name,
176 adapter->tqd_start->status.error);
177 }
178 if (adapter->rqd_start->status.stopped) {
179 printk(KERN_ERR "%s: rq error 0x%x\n",
180 adapter->netdev->name,
181 adapter->rqd_start->status.error);
182 }
183
184 schedule_work(&adapter->work);
185 }
186}
187
188
189static void
190vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
191 struct pci_dev *pdev)
192{
193 if (tbi->map_type == VMXNET3_MAP_SINGLE)
194 pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
195 PCI_DMA_TODEVICE);
196 else if (tbi->map_type == VMXNET3_MAP_PAGE)
197 pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
198 PCI_DMA_TODEVICE);
199 else
200 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
201
202 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
203}
204
205
206static int
207vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
208 struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
209{
210 struct sk_buff *skb;
211 int entries = 0;
212
213 /* no out of order completion */
214 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
215 BUG_ON(tq->tx_ring.base[eop_idx].txd.eop != 1);
216
217 skb = tq->buf_info[eop_idx].skb;
218 BUG_ON(skb == NULL);
219 tq->buf_info[eop_idx].skb = NULL;
220
221 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
222
223 while (tq->tx_ring.next2comp != eop_idx) {
224 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
225 pdev);
226
227 /* update next2comp w/o tx_lock. Since we are marking more,
228 * instead of less, tx ring entries avail, the worst case is
229 * that the tx routine incorrectly re-queues a pkt due to
230 * insufficient tx ring entries.
231 */
232 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
233 entries++;
234 }
235
236 dev_kfree_skb_any(skb);
237 return entries;
238}
239
240
241static int
242vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
243 struct vmxnet3_adapter *adapter)
244{
245 int completed = 0;
246 union Vmxnet3_GenericDesc *gdesc;
247
248 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
249 while (gdesc->tcd.gen == tq->comp_ring.gen) {
250 completed += vmxnet3_unmap_pkt(gdesc->tcd.txdIdx, tq,
251 adapter->pdev, adapter);
252
253 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
254 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
255 }
256
257 if (completed) {
258 spin_lock(&tq->tx_lock);
259 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
260 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
261 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
262 netif_carrier_ok(adapter->netdev))) {
263 vmxnet3_tq_wake(tq, adapter);
264 }
265 spin_unlock(&tq->tx_lock);
266 }
267 return completed;
268}
269
270
271static void
272vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
273 struct vmxnet3_adapter *adapter)
274{
275 int i;
276
277 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
278 struct vmxnet3_tx_buf_info *tbi;
279 union Vmxnet3_GenericDesc *gdesc;
280
281 tbi = tq->buf_info + tq->tx_ring.next2comp;
282 gdesc = tq->tx_ring.base + tq->tx_ring.next2comp;
283
284 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
285 if (tbi->skb) {
286 dev_kfree_skb_any(tbi->skb);
287 tbi->skb = NULL;
288 }
289 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
290 }
291
292 /* sanity check, verify all buffers are indeed unmapped and freed */
293 for (i = 0; i < tq->tx_ring.size; i++) {
294 BUG_ON(tq->buf_info[i].skb != NULL ||
295 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
296 }
297
298 tq->tx_ring.gen = VMXNET3_INIT_GEN;
299 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
300
301 tq->comp_ring.gen = VMXNET3_INIT_GEN;
302 tq->comp_ring.next2proc = 0;
303}
304
305
306void
307vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
308 struct vmxnet3_adapter *adapter)
309{
310 if (tq->tx_ring.base) {
311 pci_free_consistent(adapter->pdev, tq->tx_ring.size *
312 sizeof(struct Vmxnet3_TxDesc),
313 tq->tx_ring.base, tq->tx_ring.basePA);
314 tq->tx_ring.base = NULL;
315 }
316 if (tq->data_ring.base) {
317 pci_free_consistent(adapter->pdev, tq->data_ring.size *
318 sizeof(struct Vmxnet3_TxDataDesc),
319 tq->data_ring.base, tq->data_ring.basePA);
320 tq->data_ring.base = NULL;
321 }
322 if (tq->comp_ring.base) {
323 pci_free_consistent(adapter->pdev, tq->comp_ring.size *
324 sizeof(struct Vmxnet3_TxCompDesc),
325 tq->comp_ring.base, tq->comp_ring.basePA);
326 tq->comp_ring.base = NULL;
327 }
328 kfree(tq->buf_info);
329 tq->buf_info = NULL;
330}
331
332
333static void
334vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
335 struct vmxnet3_adapter *adapter)
336{
337 int i;
338
339 /* reset the tx ring contents to 0 and reset the tx ring states */
340 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
341 sizeof(struct Vmxnet3_TxDesc));
342 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
343 tq->tx_ring.gen = VMXNET3_INIT_GEN;
344
345 memset(tq->data_ring.base, 0, tq->data_ring.size *
346 sizeof(struct Vmxnet3_TxDataDesc));
347
348 /* reset the tx comp ring contents to 0 and reset comp ring states */
349 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
350 sizeof(struct Vmxnet3_TxCompDesc));
351 tq->comp_ring.next2proc = 0;
352 tq->comp_ring.gen = VMXNET3_INIT_GEN;
353
354 /* reset the bookkeeping data */
355 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
356 for (i = 0; i < tq->tx_ring.size; i++)
357 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
358
359 /* stats are not reset */
360}
361
362
363static int
364vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
365 struct vmxnet3_adapter *adapter)
366{
367 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
368 tq->comp_ring.base || tq->buf_info);
369
370 tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
371 * sizeof(struct Vmxnet3_TxDesc),
372 &tq->tx_ring.basePA);
373 if (!tq->tx_ring.base) {
374 printk(KERN_ERR "%s: failed to allocate tx ring\n",
375 adapter->netdev->name);
376 goto err;
377 }
378
379 tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
380 tq->data_ring.size *
381 sizeof(struct Vmxnet3_TxDataDesc),
382 &tq->data_ring.basePA);
383 if (!tq->data_ring.base) {
384 printk(KERN_ERR "%s: failed to allocate data ring\n",
385 adapter->netdev->name);
386 goto err;
387 }
388
389 tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
390 tq->comp_ring.size *
391 sizeof(struct Vmxnet3_TxCompDesc),
392 &tq->comp_ring.basePA);
393 if (!tq->comp_ring.base) {
394 printk(KERN_ERR "%s: failed to allocate tx comp ring\n",
395 adapter->netdev->name);
396 goto err;
397 }
398
399 tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
400 GFP_KERNEL);
401 if (!tq->buf_info) {
402 printk(KERN_ERR "%s: failed to allocate tx bufinfo\n",
403 adapter->netdev->name);
404 goto err;
405 }
406
407 return 0;
408
409err:
410 vmxnet3_tq_destroy(tq, adapter);
411 return -ENOMEM;
412}
413
414
415/*
416 * starting from ring->next2fill, allocate rx buffers for the given ring
417 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
418 * are allocated or allocation fails
419 */
420
421static int
422vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
423 int num_to_alloc, struct vmxnet3_adapter *adapter)
424{
425 int num_allocated = 0;
426 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
427 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
428 u32 val;
429
430 while (num_allocated < num_to_alloc) {
431 struct vmxnet3_rx_buf_info *rbi;
432 union Vmxnet3_GenericDesc *gd;
433
434 rbi = rbi_base + ring->next2fill;
435 gd = ring->base + ring->next2fill;
436
437 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
438 if (rbi->skb == NULL) {
439 rbi->skb = dev_alloc_skb(rbi->len +
440 NET_IP_ALIGN);
441 if (unlikely(rbi->skb == NULL)) {
442 rq->stats.rx_buf_alloc_failure++;
443 break;
444 }
445 rbi->skb->dev = adapter->netdev;
446
447 skb_reserve(rbi->skb, NET_IP_ALIGN);
448 rbi->dma_addr = pci_map_single(adapter->pdev,
449 rbi->skb->data, rbi->len,
450 PCI_DMA_FROMDEVICE);
451 } else {
452 /* rx buffer skipped by the device */
453 }
454 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
455 } else {
456 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
457 rbi->len != PAGE_SIZE);
458
459 if (rbi->page == NULL) {
460 rbi->page = alloc_page(GFP_ATOMIC);
461 if (unlikely(rbi->page == NULL)) {
462 rq->stats.rx_buf_alloc_failure++;
463 break;
464 }
465 rbi->dma_addr = pci_map_page(adapter->pdev,
466 rbi->page, 0, PAGE_SIZE,
467 PCI_DMA_FROMDEVICE);
468 } else {
469 /* rx buffers skipped by the device */
470 }
471 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
472 }
473
474 BUG_ON(rbi->dma_addr == 0);
475 gd->rxd.addr = rbi->dma_addr;
476 gd->dword[2] = (ring->gen << VMXNET3_RXD_GEN_SHIFT) | val |
477 rbi->len;
478
479 num_allocated++;
480 vmxnet3_cmd_ring_adv_next2fill(ring);
481 }
482 rq->uncommitted[ring_idx] += num_allocated;
483
484 dprintk(KERN_ERR "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
485 "%u, uncommited %u\n", num_allocated, ring->next2fill,
486 ring->next2comp, rq->uncommitted[ring_idx]);
487
488 /* so that the device can distinguish a full ring and an empty ring */
489 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
490
491 return num_allocated;
492}
493
494
495static void
496vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
497 struct vmxnet3_rx_buf_info *rbi)
498{
499 struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
500 skb_shinfo(skb)->nr_frags;
501
502 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
503
504 frag->page = rbi->page;
505 frag->page_offset = 0;
506 frag->size = rcd->len;
507 skb->data_len += frag->size;
508 skb_shinfo(skb)->nr_frags++;
509}
510
511
512static void
513vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
514 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
515 struct vmxnet3_adapter *adapter)
516{
517 u32 dw2, len;
518 unsigned long buf_offset;
519 int i;
520 union Vmxnet3_GenericDesc *gdesc;
521 struct vmxnet3_tx_buf_info *tbi = NULL;
522
523 BUG_ON(ctx->copy_size > skb_headlen(skb));
524
525 /* use the previous gen bit for the SOP desc */
526 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
527
528 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
529 gdesc = ctx->sop_txd; /* both loops below can be skipped */
530
531 /* no need to map the buffer if headers are copied */
532 if (ctx->copy_size) {
533 ctx->sop_txd->txd.addr = tq->data_ring.basePA +
534 tq->tx_ring.next2fill *
535 sizeof(struct Vmxnet3_TxDataDesc);
536 ctx->sop_txd->dword[2] = dw2 | ctx->copy_size;
537 ctx->sop_txd->dword[3] = 0;
538
539 tbi = tq->buf_info + tq->tx_ring.next2fill;
540 tbi->map_type = VMXNET3_MAP_NONE;
541
542 dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
543 tq->tx_ring.next2fill, ctx->sop_txd->txd.addr,
544 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
545 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
546
547 /* use the right gen for non-SOP desc */
548 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
549 }
550
551 /* linear part can use multiple tx desc if it's big */
552 len = skb_headlen(skb) - ctx->copy_size;
553 buf_offset = ctx->copy_size;
554 while (len) {
555 u32 buf_size;
556
557 buf_size = len > VMXNET3_MAX_TX_BUF_SIZE ?
558 VMXNET3_MAX_TX_BUF_SIZE : len;
559
560 tbi = tq->buf_info + tq->tx_ring.next2fill;
561 tbi->map_type = VMXNET3_MAP_SINGLE;
562 tbi->dma_addr = pci_map_single(adapter->pdev,
563 skb->data + buf_offset, buf_size,
564 PCI_DMA_TODEVICE);
565
566 tbi->len = buf_size; /* this automatically convert 2^14 to 0 */
567
568 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
569 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
570
571 gdesc->txd.addr = tbi->dma_addr;
572 gdesc->dword[2] = dw2 | buf_size;
573 gdesc->dword[3] = 0;
574
575 dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
576 tq->tx_ring.next2fill, gdesc->txd.addr,
577 gdesc->dword[2], gdesc->dword[3]);
578 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
579 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
580
581 len -= buf_size;
582 buf_offset += buf_size;
583 }
584
585 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
586 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
587
588 tbi = tq->buf_info + tq->tx_ring.next2fill;
589 tbi->map_type = VMXNET3_MAP_PAGE;
590 tbi->dma_addr = pci_map_page(adapter->pdev, frag->page,
591 frag->page_offset, frag->size,
592 PCI_DMA_TODEVICE);
593
594 tbi->len = frag->size;
595
596 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
597 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
598
599 gdesc->txd.addr = tbi->dma_addr;
600 gdesc->dword[2] = dw2 | frag->size;
601 gdesc->dword[3] = 0;
602
603 dprintk(KERN_ERR "txd[%u]: 0x%llu %u %u\n",
604 tq->tx_ring.next2fill, gdesc->txd.addr,
605 gdesc->dword[2], gdesc->dword[3]);
606 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
607 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
608 }
609
610 ctx->eop_txd = gdesc;
611
612 /* set the last buf_info for the pkt */
613 tbi->skb = skb;
614 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
615}
616
617
618/*
619 * parse and copy relevant protocol headers:
620 * For a tso pkt, relevant headers are L2/3/4 including options
621 * For a pkt requesting csum offloading, they are L2/3 and may include L4
622 * if it's a TCP/UDP pkt
623 *
624 * Returns:
625 * -1: error happens during parsing
626 * 0: protocol headers parsed, but too big to be copied
627 * 1: protocol headers parsed and copied
628 *
629 * Other effects:
630 * 1. related *ctx fields are updated.
631 * 2. ctx->copy_size is # of bytes copied
632 * 3. the portion copied is guaranteed to be in the linear part
633 *
634 */
635static int
636vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
637 struct vmxnet3_tx_ctx *ctx,
638 struct vmxnet3_adapter *adapter)
639{
640 struct Vmxnet3_TxDataDesc *tdd;
641
642 if (ctx->mss) {
643 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
644 ctx->l4_hdr_size = ((struct tcphdr *)
645 skb_transport_header(skb))->doff * 4;
646 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
647 } else {
648 unsigned int pull_size;
649
650 if (skb->ip_summed == CHECKSUM_PARTIAL) {
651 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
652
653 if (ctx->ipv4) {
654 struct iphdr *iph = (struct iphdr *)
655 skb_network_header(skb);
656 if (iph->protocol == IPPROTO_TCP) {
657 pull_size = ctx->eth_ip_hdr_size +
658 sizeof(struct tcphdr);
659
660 if (unlikely(!pskb_may_pull(skb,
661 pull_size))) {
662 goto err;
663 }
664 ctx->l4_hdr_size = ((struct tcphdr *)
665 skb_transport_header(skb))->doff * 4;
666 } else if (iph->protocol == IPPROTO_UDP) {
667 ctx->l4_hdr_size =
668 sizeof(struct udphdr);
669 } else {
670 ctx->l4_hdr_size = 0;
671 }
672 } else {
673 /* for simplicity, don't copy L4 headers */
674 ctx->l4_hdr_size = 0;
675 }
676 ctx->copy_size = ctx->eth_ip_hdr_size +
677 ctx->l4_hdr_size;
678 } else {
679 ctx->eth_ip_hdr_size = 0;
680 ctx->l4_hdr_size = 0;
681 /* copy as much as allowed */
682 ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
683 , skb_headlen(skb));
684 }
685
686 /* make sure headers are accessible directly */
687 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
688 goto err;
689 }
690
691 if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
692 tq->stats.oversized_hdr++;
693 ctx->copy_size = 0;
694 return 0;
695 }
696
697 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
698
699 memcpy(tdd->data, skb->data, ctx->copy_size);
700 dprintk(KERN_ERR "copy %u bytes to dataRing[%u]\n",
701 ctx->copy_size, tq->tx_ring.next2fill);
702 return 1;
703
704err:
705 return -1;
706}
707
708
709static void
710vmxnet3_prepare_tso(struct sk_buff *skb,
711 struct vmxnet3_tx_ctx *ctx)
712{
713 struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb);
714 if (ctx->ipv4) {
715 struct iphdr *iph = (struct iphdr *)skb_network_header(skb);
716 iph->check = 0;
717 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
718 IPPROTO_TCP, 0);
719 } else {
720 struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb);
721 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
722 IPPROTO_TCP, 0);
723 }
724}
725
726
727/*
728 * Transmits a pkt thru a given tq
729 * Returns:
730 * NETDEV_TX_OK: descriptors are setup successfully
731 * NETDEV_TX_OK: error occured, the pkt is dropped
732 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
733 *
734 * Side-effects:
735 * 1. tx ring may be changed
736 * 2. tq stats may be updated accordingly
737 * 3. shared->txNumDeferred may be updated
738 */
739
740static int
741vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
742 struct vmxnet3_adapter *adapter, struct net_device *netdev)
743{
744 int ret;
745 u32 count;
746 unsigned long flags;
747 struct vmxnet3_tx_ctx ctx;
748 union Vmxnet3_GenericDesc *gdesc;
749
750 /* conservatively estimate # of descriptors to use */
751 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
752 skb_shinfo(skb)->nr_frags + 1;
753
754 ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP));
755
756 ctx.mss = skb_shinfo(skb)->gso_size;
757 if (ctx.mss) {
758 if (skb_header_cloned(skb)) {
759 if (unlikely(pskb_expand_head(skb, 0, 0,
760 GFP_ATOMIC) != 0)) {
761 tq->stats.drop_tso++;
762 goto drop_pkt;
763 }
764 tq->stats.copy_skb_header++;
765 }
766 vmxnet3_prepare_tso(skb, &ctx);
767 } else {
768 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
769
770 /* non-tso pkts must not use more than
771 * VMXNET3_MAX_TXD_PER_PKT entries
772 */
773 if (skb_linearize(skb) != 0) {
774 tq->stats.drop_too_many_frags++;
775 goto drop_pkt;
776 }
777 tq->stats.linearized++;
778
779 /* recalculate the # of descriptors to use */
780 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
781 }
782 }
783
784 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
785 if (ret >= 0) {
786 BUG_ON(ret <= 0 && ctx.copy_size != 0);
787 /* hdrs parsed, check against other limits */
788 if (ctx.mss) {
789 if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
790 VMXNET3_MAX_TX_BUF_SIZE)) {
791 goto hdr_too_big;
792 }
793 } else {
794 if (skb->ip_summed == CHECKSUM_PARTIAL) {
795 if (unlikely(ctx.eth_ip_hdr_size +
796 skb->csum_offset >
797 VMXNET3_MAX_CSUM_OFFSET)) {
798 goto hdr_too_big;
799 }
800 }
801 }
802 } else {
803 tq->stats.drop_hdr_inspect_err++;
804 goto drop_pkt;
805 }
806
807 spin_lock_irqsave(&tq->tx_lock, flags);
808
809 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
810 tq->stats.tx_ring_full++;
811 dprintk(KERN_ERR "tx queue stopped on %s, next2comp %u"
812 " next2fill %u\n", adapter->netdev->name,
813 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
814
815 vmxnet3_tq_stop(tq, adapter);
816 spin_unlock_irqrestore(&tq->tx_lock, flags);
817 return NETDEV_TX_BUSY;
818 }
819
820 /* fill tx descs related to addr & len */
821 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
822
823 /* setup the EOP desc */
824 ctx.eop_txd->dword[3] = VMXNET3_TXD_CQ | VMXNET3_TXD_EOP;
825
826 /* setup the SOP desc */
827 gdesc = ctx.sop_txd;
828 if (ctx.mss) {
829 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
830 gdesc->txd.om = VMXNET3_OM_TSO;
831 gdesc->txd.msscof = ctx.mss;
832 tq->shared->txNumDeferred += (skb->len - gdesc->txd.hlen +
833 ctx.mss - 1) / ctx.mss;
834 } else {
835 if (skb->ip_summed == CHECKSUM_PARTIAL) {
836 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
837 gdesc->txd.om = VMXNET3_OM_CSUM;
838 gdesc->txd.msscof = ctx.eth_ip_hdr_size +
839 skb->csum_offset;
840 } else {
841 gdesc->txd.om = 0;
842 gdesc->txd.msscof = 0;
843 }
844 tq->shared->txNumDeferred++;
845 }
846
847 if (vlan_tx_tag_present(skb)) {
848 gdesc->txd.ti = 1;
849 gdesc->txd.tci = vlan_tx_tag_get(skb);
850 }
851
852 wmb();
853
854 /* finally flips the GEN bit of the SOP desc */
855 gdesc->dword[2] ^= VMXNET3_TXD_GEN;
856 dprintk(KERN_ERR "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
857 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
858 tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2],
859 gdesc->dword[3]);
860
861 spin_unlock_irqrestore(&tq->tx_lock, flags);
862
863 if (tq->shared->txNumDeferred >= tq->shared->txThreshold) {
864 tq->shared->txNumDeferred = 0;
865 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
866 tq->tx_ring.next2fill);
867 }
868 netdev->trans_start = jiffies;
869
870 return NETDEV_TX_OK;
871
872hdr_too_big:
873 tq->stats.drop_oversized_hdr++;
874drop_pkt:
875 tq->stats.drop_total++;
876 dev_kfree_skb(skb);
877 return NETDEV_TX_OK;
878}
879
880
881static netdev_tx_t
882vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
883{
884 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
885 struct vmxnet3_tx_queue *tq = &adapter->tx_queue;
886
887 return vmxnet3_tq_xmit(skb, tq, adapter, netdev);
888}
889
890
891static void
892vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
893 struct sk_buff *skb,
894 union Vmxnet3_GenericDesc *gdesc)
895{
896 if (!gdesc->rcd.cnc && adapter->rxcsum) {
897 /* typical case: TCP/UDP over IP and both csums are correct */
898 if ((gdesc->dword[3] & VMXNET3_RCD_CSUM_OK) ==
899 VMXNET3_RCD_CSUM_OK) {
900 skb->ip_summed = CHECKSUM_UNNECESSARY;
901 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
902 BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
903 BUG_ON(gdesc->rcd.frg);
904 } else {
905 if (gdesc->rcd.csum) {
906 skb->csum = htons(gdesc->rcd.csum);
907 skb->ip_summed = CHECKSUM_PARTIAL;
908 } else {
909 skb->ip_summed = CHECKSUM_NONE;
910 }
911 }
912 } else {
913 skb->ip_summed = CHECKSUM_NONE;
914 }
915}
916
917
918static void
919vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
920 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
921{
922 rq->stats.drop_err++;
923 if (!rcd->fcs)
924 rq->stats.drop_fcs++;
925
926 rq->stats.drop_total++;
927
928 /*
929 * We do not unmap and chain the rx buffer to the skb.
930 * We basically pretend this buffer is not used and will be recycled
931 * by vmxnet3_rq_alloc_rx_buf()
932 */
933
934 /*
935 * ctx->skb may be NULL if this is the first and the only one
936 * desc for the pkt
937 */
938 if (ctx->skb)
939 dev_kfree_skb_irq(ctx->skb);
940
941 ctx->skb = NULL;
942}
943
944
945static int
946vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
947 struct vmxnet3_adapter *adapter, int quota)
948{
949 static u32 rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
950 u32 num_rxd = 0;
951 struct Vmxnet3_RxCompDesc *rcd;
952 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
953
954 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd;
955 while (rcd->gen == rq->comp_ring.gen) {
956 struct vmxnet3_rx_buf_info *rbi;
957 struct sk_buff *skb;
958 int num_to_alloc;
959 struct Vmxnet3_RxDesc *rxd;
960 u32 idx, ring_idx;
961
962 if (num_rxd >= quota) {
963 /* we may stop even before we see the EOP desc of
964 * the current pkt
965 */
966 break;
967 }
968 num_rxd++;
969
970 idx = rcd->rxdIdx;
971 ring_idx = rcd->rqID == rq->qid ? 0 : 1;
972
973 rxd = &rq->rx_ring[ring_idx].base[idx].rxd;
974 rbi = rq->buf_info[ring_idx] + idx;
975
976 BUG_ON(rxd->addr != rbi->dma_addr || rxd->len != rbi->len);
977
978 if (unlikely(rcd->eop && rcd->err)) {
979 vmxnet3_rx_error(rq, rcd, ctx, adapter);
980 goto rcd_done;
981 }
982
983 if (rcd->sop) { /* first buf of the pkt */
984 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
985 rcd->rqID != rq->qid);
986
987 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
988 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
989
990 if (unlikely(rcd->len == 0)) {
991 /* Pretend the rx buffer is skipped. */
992 BUG_ON(!(rcd->sop && rcd->eop));
993 dprintk(KERN_ERR "rxRing[%u][%u] 0 length\n",
994 ring_idx, idx);
995 goto rcd_done;
996 }
997
998 ctx->skb = rbi->skb;
999 rbi->skb = NULL;
1000
1001 pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
1002 PCI_DMA_FROMDEVICE);
1003
1004 skb_put(ctx->skb, rcd->len);
1005 } else {
1006 BUG_ON(ctx->skb == NULL);
1007 /* non SOP buffer must be type 1 in most cases */
1008 if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) {
1009 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1010
1011 if (rcd->len) {
1012 pci_unmap_page(adapter->pdev,
1013 rbi->dma_addr, rbi->len,
1014 PCI_DMA_FROMDEVICE);
1015
1016 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1017 rbi->page = NULL;
1018 }
1019 } else {
1020 /*
1021 * The only time a non-SOP buffer is type 0 is
1022 * when it's EOP and error flag is raised, which
1023 * has already been handled.
1024 */
1025 BUG_ON(true);
1026 }
1027 }
1028
1029 skb = ctx->skb;
1030 if (rcd->eop) {
1031 skb->len += skb->data_len;
1032 skb->truesize += skb->data_len;
1033
1034 vmxnet3_rx_csum(adapter, skb,
1035 (union Vmxnet3_GenericDesc *)rcd);
1036 skb->protocol = eth_type_trans(skb, adapter->netdev);
1037
1038 if (unlikely(adapter->vlan_grp && rcd->ts)) {
1039 vlan_hwaccel_receive_skb(skb,
1040 adapter->vlan_grp, rcd->tci);
1041 } else {
1042 netif_receive_skb(skb);
1043 }
1044
1045 adapter->netdev->last_rx = jiffies;
1046 ctx->skb = NULL;
1047 }
1048
1049rcd_done:
1050 /* device may skip some rx descs */
1051 rq->rx_ring[ring_idx].next2comp = idx;
1052 VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp,
1053 rq->rx_ring[ring_idx].size);
1054
1055 /* refill rx buffers frequently to avoid starving the h/w */
1056 num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring +
1057 ring_idx);
1058 if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq,
1059 ring_idx, adapter))) {
1060 vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc,
1061 adapter);
1062
1063 /* if needed, update the register */
1064 if (unlikely(rq->shared->updateRxProd)) {
1065 VMXNET3_WRITE_BAR0_REG(adapter,
1066 rxprod_reg[ring_idx] + rq->qid * 8,
1067 rq->rx_ring[ring_idx].next2fill);
1068 rq->uncommitted[ring_idx] = 0;
1069 }
1070 }
1071
1072 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1073 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd;
1074 }
1075
1076 return num_rxd;
1077}
1078
1079
1080static void
1081vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1082 struct vmxnet3_adapter *adapter)
1083{
1084 u32 i, ring_idx;
1085 struct Vmxnet3_RxDesc *rxd;
1086
1087 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1088 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1089 rxd = &rq->rx_ring[ring_idx].base[i].rxd;
1090
1091 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1092 rq->buf_info[ring_idx][i].skb) {
1093 pci_unmap_single(adapter->pdev, rxd->addr,
1094 rxd->len, PCI_DMA_FROMDEVICE);
1095 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1096 rq->buf_info[ring_idx][i].skb = NULL;
1097 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1098 rq->buf_info[ring_idx][i].page) {
1099 pci_unmap_page(adapter->pdev, rxd->addr,
1100 rxd->len, PCI_DMA_FROMDEVICE);
1101 put_page(rq->buf_info[ring_idx][i].page);
1102 rq->buf_info[ring_idx][i].page = NULL;
1103 }
1104 }
1105
1106 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1107 rq->rx_ring[ring_idx].next2fill =
1108 rq->rx_ring[ring_idx].next2comp = 0;
1109 rq->uncommitted[ring_idx] = 0;
1110 }
1111
1112 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1113 rq->comp_ring.next2proc = 0;
1114}
1115
1116
1117void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1118 struct vmxnet3_adapter *adapter)
1119{
1120 int i;
1121 int j;
1122
1123 /* all rx buffers must have already been freed */
1124 for (i = 0; i < 2; i++) {
1125 if (rq->buf_info[i]) {
1126 for (j = 0; j < rq->rx_ring[i].size; j++)
1127 BUG_ON(rq->buf_info[i][j].page != NULL);
1128 }
1129 }
1130
1131
1132 kfree(rq->buf_info[0]);
1133
1134 for (i = 0; i < 2; i++) {
1135 if (rq->rx_ring[i].base) {
1136 pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
1137 * sizeof(struct Vmxnet3_RxDesc),
1138 rq->rx_ring[i].base,
1139 rq->rx_ring[i].basePA);
1140 rq->rx_ring[i].base = NULL;
1141 }
1142 rq->buf_info[i] = NULL;
1143 }
1144
1145 if (rq->comp_ring.base) {
1146 pci_free_consistent(adapter->pdev, rq->comp_ring.size *
1147 sizeof(struct Vmxnet3_RxCompDesc),
1148 rq->comp_ring.base, rq->comp_ring.basePA);
1149 rq->comp_ring.base = NULL;
1150 }
1151}
1152
1153
1154static int
1155vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1156 struct vmxnet3_adapter *adapter)
1157{
1158 int i;
1159
1160 /* initialize buf_info */
1161 for (i = 0; i < rq->rx_ring[0].size; i++) {
1162
1163 /* 1st buf for a pkt is skbuff */
1164 if (i % adapter->rx_buf_per_pkt == 0) {
1165 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1166 rq->buf_info[0][i].len = adapter->skb_buf_size;
1167 } else { /* subsequent bufs for a pkt is frag */
1168 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1169 rq->buf_info[0][i].len = PAGE_SIZE;
1170 }
1171 }
1172 for (i = 0; i < rq->rx_ring[1].size; i++) {
1173 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1174 rq->buf_info[1][i].len = PAGE_SIZE;
1175 }
1176
1177 /* reset internal state and allocate buffers for both rings */
1178 for (i = 0; i < 2; i++) {
1179 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1180 rq->uncommitted[i] = 0;
1181
1182 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1183 sizeof(struct Vmxnet3_RxDesc));
1184 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1185 }
1186 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1187 adapter) == 0) {
1188 /* at least has 1 rx buffer for the 1st ring */
1189 return -ENOMEM;
1190 }
1191 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1192
1193 /* reset the comp ring */
1194 rq->comp_ring.next2proc = 0;
1195 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1196 sizeof(struct Vmxnet3_RxCompDesc));
1197 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1198
1199 /* reset rxctx */
1200 rq->rx_ctx.skb = NULL;
1201
1202 /* stats are not reset */
1203 return 0;
1204}
1205
1206
1207static int
1208vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1209{
1210 int i;
1211 size_t sz;
1212 struct vmxnet3_rx_buf_info *bi;
1213
1214 for (i = 0; i < 2; i++) {
1215
1216 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1217 rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
1218 &rq->rx_ring[i].basePA);
1219 if (!rq->rx_ring[i].base) {
1220 printk(KERN_ERR "%s: failed to allocate rx ring %d\n",
1221 adapter->netdev->name, i);
1222 goto err;
1223 }
1224 }
1225
1226 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1227 rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
1228 &rq->comp_ring.basePA);
1229 if (!rq->comp_ring.base) {
1230 printk(KERN_ERR "%s: failed to allocate rx comp ring\n",
1231 adapter->netdev->name);
1232 goto err;
1233 }
1234
1235 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1236 rq->rx_ring[1].size);
1237 bi = kmalloc(sz, GFP_KERNEL);
1238 if (!bi) {
1239 printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
1240 adapter->netdev->name);
1241 goto err;
1242 }
1243 memset(bi, 0, sz);
1244 rq->buf_info[0] = bi;
1245 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1246
1247 return 0;
1248
1249err:
1250 vmxnet3_rq_destroy(rq, adapter);
1251 return -ENOMEM;
1252}
1253
1254
1255static int
1256vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1257{
1258 if (unlikely(adapter->shared->ecr))
1259 vmxnet3_process_events(adapter);
1260
1261 vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter);
1262 return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget);
1263}
1264
1265
1266static int
1267vmxnet3_poll(struct napi_struct *napi, int budget)
1268{
1269 struct vmxnet3_adapter *adapter = container_of(napi,
1270 struct vmxnet3_adapter, napi);
1271 int rxd_done;
1272
1273 rxd_done = vmxnet3_do_poll(adapter, budget);
1274
1275 if (rxd_done < budget) {
1276 napi_complete(napi);
1277 vmxnet3_enable_intr(adapter, 0);
1278 }
1279 return rxd_done;
1280}
1281
1282
1283/* Interrupt handler for vmxnet3 */
1284static irqreturn_t
1285vmxnet3_intr(int irq, void *dev_id)
1286{
1287 struct net_device *dev = dev_id;
1288 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1289
1290 if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) {
1291 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1292 if (unlikely(icr == 0))
1293 /* not ours */
1294 return IRQ_NONE;
1295 }
1296
1297
1298 /* disable intr if needed */
1299 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1300 vmxnet3_disable_intr(adapter, 0);
1301
1302 napi_schedule(&adapter->napi);
1303
1304 return IRQ_HANDLED;
1305}
1306
1307#ifdef CONFIG_NET_POLL_CONTROLLER
1308
1309
1310/* netpoll callback. */
1311static void
1312vmxnet3_netpoll(struct net_device *netdev)
1313{
1314 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1315 int irq;
1316
1317 if (adapter->intr.type == VMXNET3_IT_MSIX)
1318 irq = adapter->intr.msix_entries[0].vector;
1319 else
1320 irq = adapter->pdev->irq;
1321
1322 disable_irq(irq);
1323 vmxnet3_intr(irq, netdev);
1324 enable_irq(irq);
1325}
1326#endif
1327
1328static int
1329vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1330{
1331 int err;
1332
1333 if (adapter->intr.type == VMXNET3_IT_MSIX) {
1334 /* we only use 1 MSI-X vector */
1335 err = request_irq(adapter->intr.msix_entries[0].vector,
1336 vmxnet3_intr, 0, adapter->netdev->name,
1337 adapter->netdev);
1338 } else if (adapter->intr.type == VMXNET3_IT_MSI) {
1339 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1340 adapter->netdev->name, adapter->netdev);
1341 } else {
1342 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1343 IRQF_SHARED, adapter->netdev->name,
1344 adapter->netdev);
1345 }
1346
1347 if (err)
1348 printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
1349 ":%d\n", adapter->netdev->name, adapter->intr.type, err);
1350
1351
1352 if (!err) {
1353 int i;
1354 /* init our intr settings */
1355 for (i = 0; i < adapter->intr.num_intrs; i++)
1356 adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
1357
1358 /* next setup intr index for all intr sources */
1359 adapter->tx_queue.comp_ring.intr_idx = 0;
1360 adapter->rx_queue.comp_ring.intr_idx = 0;
1361 adapter->intr.event_intr_idx = 0;
1362
1363 printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
1364 "allocated\n", adapter->netdev->name, adapter->intr.type,
1365 adapter->intr.mask_mode, adapter->intr.num_intrs);
1366 }
1367
1368 return err;
1369}
1370
1371
1372static void
1373vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1374{
1375 BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO ||
1376 adapter->intr.num_intrs <= 0);
1377
1378 switch (adapter->intr.type) {
1379 case VMXNET3_IT_MSIX:
1380 {
1381 int i;
1382
1383 for (i = 0; i < adapter->intr.num_intrs; i++)
1384 free_irq(adapter->intr.msix_entries[i].vector,
1385 adapter->netdev);
1386 break;
1387 }
1388 case VMXNET3_IT_MSI:
1389 free_irq(adapter->pdev->irq, adapter->netdev);
1390 break;
1391 case VMXNET3_IT_INTX:
1392 free_irq(adapter->pdev->irq, adapter->netdev);
1393 break;
1394 default:
1395 BUG_ON(true);
1396 }
1397}
1398
1399
1400static void
1401vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1402{
1403 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1404 struct Vmxnet3_DriverShared *shared = adapter->shared;
1405 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1406
1407 if (grp) {
1408 /* add vlan rx stripping. */
1409 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
1410 int i;
1411 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1412 adapter->vlan_grp = grp;
1413
1414 /* update FEATURES to device */
1415 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1416 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1417 VMXNET3_CMD_UPDATE_FEATURE);
1418 /*
1419 * Clear entire vfTable; then enable untagged pkts.
1420 * Note: setting one entry in vfTable to non-zero turns
1421 * on VLAN rx filtering.
1422 */
1423 for (i = 0; i < VMXNET3_VFT_SIZE; i++)
1424 vfTable[i] = 0;
1425
1426 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1427 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1428 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1429 } else {
1430 printk(KERN_ERR "%s: vlan_rx_register when device has "
1431 "no NETIF_F_HW_VLAN_RX\n", netdev->name);
1432 }
1433 } else {
1434 /* remove vlan rx stripping. */
1435 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1436 adapter->vlan_grp = NULL;
1437
1438 if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
1439 int i;
1440
1441 for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
1442 /* clear entire vfTable; this also disables
1443 * VLAN rx filtering
1444 */
1445 vfTable[i] = 0;
1446 }
1447 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1448 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1449
1450 /* update FEATURES to device */
1451 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1452 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1453 VMXNET3_CMD_UPDATE_FEATURE);
1454 }
1455 }
1456}
1457
1458
1459static void
1460vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
1461{
1462 if (adapter->vlan_grp) {
1463 u16 vid;
1464 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1465 bool activeVlan = false;
1466
1467 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1468 if (vlan_group_get_device(adapter->vlan_grp, vid)) {
1469 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1470 activeVlan = true;
1471 }
1472 }
1473 if (activeVlan) {
1474 /* continue to allow untagged pkts */
1475 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1476 }
1477 }
1478}
1479
1480
1481static void
1482vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1483{
1484 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1485 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1486
1487 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1488 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1489 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1490}
1491
1492
1493static void
1494vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1495{
1496 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1497 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1498
1499 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
1500 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1501 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1502}
1503
1504
1505static u8 *
1506vmxnet3_copy_mc(struct net_device *netdev)
1507{
1508 u8 *buf = NULL;
1509 u32 sz = netdev->mc_count * ETH_ALEN;
1510
1511 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
1512 if (sz <= 0xffff) {
1513 /* We may be called with BH disabled */
1514 buf = kmalloc(sz, GFP_ATOMIC);
1515 if (buf) {
1516 int i;
1517 struct dev_mc_list *mc = netdev->mc_list;
1518
1519 for (i = 0; i < netdev->mc_count; i++) {
1520 BUG_ON(!mc);
1521 memcpy(buf + i * ETH_ALEN, mc->dmi_addr,
1522 ETH_ALEN);
1523 mc = mc->next;
1524 }
1525 }
1526 }
1527 return buf;
1528}
1529
1530
1531static void
1532vmxnet3_set_mc(struct net_device *netdev)
1533{
1534 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1535 struct Vmxnet3_RxFilterConf *rxConf =
1536 &adapter->shared->devRead.rxFilterConf;
1537 u8 *new_table = NULL;
1538 u32 new_mode = VMXNET3_RXM_UCAST;
1539
1540 if (netdev->flags & IFF_PROMISC)
1541 new_mode |= VMXNET3_RXM_PROMISC;
1542
1543 if (netdev->flags & IFF_BROADCAST)
1544 new_mode |= VMXNET3_RXM_BCAST;
1545
1546 if (netdev->flags & IFF_ALLMULTI)
1547 new_mode |= VMXNET3_RXM_ALL_MULTI;
1548 else
1549 if (netdev->mc_count > 0) {
1550 new_table = vmxnet3_copy_mc(netdev);
1551 if (new_table) {
1552 new_mode |= VMXNET3_RXM_MCAST;
1553 rxConf->mfTableLen = netdev->mc_count *
1554 ETH_ALEN;
1555 rxConf->mfTablePA = virt_to_phys(new_table);
1556 } else {
1557 printk(KERN_INFO "%s: failed to copy mcast list"
1558 ", setting ALL_MULTI\n", netdev->name);
1559 new_mode |= VMXNET3_RXM_ALL_MULTI;
1560 }
1561 }
1562
1563
1564 if (!(new_mode & VMXNET3_RXM_MCAST)) {
1565 rxConf->mfTableLen = 0;
1566 rxConf->mfTablePA = 0;
1567 }
1568
1569 if (new_mode != rxConf->rxMode) {
1570 rxConf->rxMode = new_mode;
1571 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1572 VMXNET3_CMD_UPDATE_RX_MODE);
1573 }
1574
1575 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1576 VMXNET3_CMD_UPDATE_MAC_FILTERS);
1577
1578 kfree(new_table);
1579}
1580
1581
1582/*
1583 * Set up driver_shared based on settings in adapter.
1584 */
1585
1586static void
1587vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
1588{
1589 struct Vmxnet3_DriverShared *shared = adapter->shared;
1590 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1591 struct Vmxnet3_TxQueueConf *tqc;
1592 struct Vmxnet3_RxQueueConf *rqc;
1593 int i;
1594
1595 memset(shared, 0, sizeof(*shared));
1596
1597 /* driver settings */
1598 shared->magic = VMXNET3_REV1_MAGIC;
1599 devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
1600 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
1601 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
1602 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
1603 devRead->misc.driverInfo.vmxnet3RevSpt = 1;
1604 devRead->misc.driverInfo.uptVerSpt = 1;
1605
1606 devRead->misc.ddPA = virt_to_phys(adapter);
1607 devRead->misc.ddLen = sizeof(struct vmxnet3_adapter);
1608
1609 /* set up feature flags */
1610 if (adapter->rxcsum)
1611 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
1612
1613 if (adapter->lro) {
1614 devRead->misc.uptFeatures |= UPT1_F_LRO;
1615 devRead->misc.maxNumRxSG = 1 + MAX_SKB_FRAGS;
1616 }
1617 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX)
1618 && adapter->vlan_grp) {
1619 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1620 }
1621
1622 devRead->misc.mtu = adapter->netdev->mtu;
1623 devRead->misc.queueDescPA = adapter->queue_desc_pa;
1624 devRead->misc.queueDescLen = sizeof(struct Vmxnet3_TxQueueDesc) +
1625 sizeof(struct Vmxnet3_RxQueueDesc);
1626
1627 /* tx queue settings */
1628 BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
1629
1630 devRead->misc.numTxQueues = 1;
1631 tqc = &adapter->tqd_start->conf;
1632 tqc->txRingBasePA = adapter->tx_queue.tx_ring.basePA;
1633 tqc->dataRingBasePA = adapter->tx_queue.data_ring.basePA;
1634 tqc->compRingBasePA = adapter->tx_queue.comp_ring.basePA;
1635 tqc->ddPA = virt_to_phys(adapter->tx_queue.buf_info);
1636 tqc->txRingSize = adapter->tx_queue.tx_ring.size;
1637 tqc->dataRingSize = adapter->tx_queue.data_ring.size;
1638 tqc->compRingSize = adapter->tx_queue.comp_ring.size;
1639 tqc->ddLen = sizeof(struct vmxnet3_tx_buf_info) *
1640 tqc->txRingSize;
1641 tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx;
1642
1643 /* rx queue settings */
1644 devRead->misc.numRxQueues = 1;
1645 rqc = &adapter->rqd_start->conf;
1646 rqc->rxRingBasePA[0] = adapter->rx_queue.rx_ring[0].basePA;
1647 rqc->rxRingBasePA[1] = adapter->rx_queue.rx_ring[1].basePA;
1648 rqc->compRingBasePA = adapter->rx_queue.comp_ring.basePA;
1649 rqc->ddPA = virt_to_phys(adapter->rx_queue.buf_info);
1650 rqc->rxRingSize[0] = adapter->rx_queue.rx_ring[0].size;
1651 rqc->rxRingSize[1] = adapter->rx_queue.rx_ring[1].size;
1652 rqc->compRingSize = adapter->rx_queue.comp_ring.size;
1653 rqc->ddLen = sizeof(struct vmxnet3_rx_buf_info) *
1654 (rqc->rxRingSize[0] + rqc->rxRingSize[1]);
1655 rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx;
1656
1657 /* intr settings */
1658 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
1659 VMXNET3_IMM_AUTO;
1660 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
1661 for (i = 0; i < adapter->intr.num_intrs; i++)
1662 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
1663
1664 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
1665
1666 /* rx filter settings */
1667 devRead->rxFilterConf.rxMode = 0;
1668 vmxnet3_restore_vlan(adapter);
1669 /* the rest are already zeroed */
1670}
1671
1672
1673int
1674vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1675{
1676 int err;
1677 u32 ret;
1678
1679 dprintk(KERN_ERR "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
1680 " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
1681 adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
1682 adapter->rx_queue.rx_ring[0].size,
1683 adapter->rx_queue.rx_ring[1].size);
1684
1685 vmxnet3_tq_init(&adapter->tx_queue, adapter);
1686 err = vmxnet3_rq_init(&adapter->rx_queue, adapter);
1687 if (err) {
1688 printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
1689 adapter->netdev->name, err);
1690 goto rq_err;
1691 }
1692
1693 err = vmxnet3_request_irqs(adapter);
1694 if (err) {
1695 printk(KERN_ERR "Failed to setup irq for %s: error %d\n",
1696 adapter->netdev->name, err);
1697 goto irq_err;
1698 }
1699
1700 vmxnet3_setup_driver_shared(adapter);
1701
1702 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL,
1703 VMXNET3_GET_ADDR_LO(adapter->shared_pa));
1704 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH,
1705 VMXNET3_GET_ADDR_HI(adapter->shared_pa));
1706
1707 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1708 VMXNET3_CMD_ACTIVATE_DEV);
1709 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
1710
1711 if (ret != 0) {
1712 printk(KERN_ERR "Failed to activate dev %s: error %u\n",
1713 adapter->netdev->name, ret);
1714 err = -EINVAL;
1715 goto activate_err;
1716 }
1717 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD,
1718 adapter->rx_queue.rx_ring[0].next2fill);
1719 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2,
1720 adapter->rx_queue.rx_ring[1].next2fill);
1721
1722 /* Apply the rx filter settins last. */
1723 vmxnet3_set_mc(adapter->netdev);
1724
1725 /*
1726 * Check link state when first activating device. It will start the
1727 * tx queue if the link is up.
1728 */
1729 vmxnet3_check_link(adapter);
1730
1731 napi_enable(&adapter->napi);
1732 vmxnet3_enable_all_intrs(adapter);
1733 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
1734 return 0;
1735
1736activate_err:
1737 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
1738 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
1739 vmxnet3_free_irqs(adapter);
1740irq_err:
1741rq_err:
1742 /* free up buffers we allocated */
1743 vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
1744 return err;
1745}
1746
1747
1748void
1749vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
1750{
1751 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
1752}
1753
1754
1755int
1756vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
1757{
1758 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
1759 return 0;
1760
1761
1762 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1763 VMXNET3_CMD_QUIESCE_DEV);
1764 vmxnet3_disable_all_intrs(adapter);
1765
1766 napi_disable(&adapter->napi);
1767 netif_tx_disable(adapter->netdev);
1768 adapter->link_speed = 0;
1769 netif_carrier_off(adapter->netdev);
1770
1771 vmxnet3_tq_cleanup(&adapter->tx_queue, adapter);
1772 vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
1773 vmxnet3_free_irqs(adapter);
1774 return 0;
1775}
1776
1777
1778static void
1779vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
1780{
1781 u32 tmp;
1782
1783 tmp = *(u32 *)mac;
1784 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
1785
1786 tmp = (mac[5] << 8) | mac[4];
1787 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
1788}
1789
1790
1791static int
1792vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
1793{
1794 struct sockaddr *addr = p;
1795 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1796
1797 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1798 vmxnet3_write_mac_addr(adapter, addr->sa_data);
1799
1800 return 0;
1801}
1802
1803
1804/* ==================== initialization and cleanup routines ============ */
1805
1806static int
1807vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
1808{
1809 int err;
1810 unsigned long mmio_start, mmio_len;
1811 struct pci_dev *pdev = adapter->pdev;
1812
1813 err = pci_enable_device(pdev);
1814 if (err) {
1815 printk(KERN_ERR "Failed to enable adapter %s: error %d\n",
1816 pci_name(pdev), err);
1817 return err;
1818 }
1819
1820 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
1821 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
1822 printk(KERN_ERR "pci_set_consistent_dma_mask failed "
1823 "for adapter %s\n", pci_name(pdev));
1824 err = -EIO;
1825 goto err_set_mask;
1826 }
1827 *dma64 = true;
1828 } else {
1829 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
1830 printk(KERN_ERR "pci_set_dma_mask failed for adapter "
1831 "%s\n", pci_name(pdev));
1832 err = -EIO;
1833 goto err_set_mask;
1834 }
1835 *dma64 = false;
1836 }
1837
1838 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
1839 vmxnet3_driver_name);
1840 if (err) {
1841 printk(KERN_ERR "Failed to request region for adapter %s: "
1842 "error %d\n", pci_name(pdev), err);
1843 goto err_set_mask;
1844 }
1845
1846 pci_set_master(pdev);
1847
1848 mmio_start = pci_resource_start(pdev, 0);
1849 mmio_len = pci_resource_len(pdev, 0);
1850 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
1851 if (!adapter->hw_addr0) {
1852 printk(KERN_ERR "Failed to map bar0 for adapter %s\n",
1853 pci_name(pdev));
1854 err = -EIO;
1855 goto err_ioremap;
1856 }
1857
1858 mmio_start = pci_resource_start(pdev, 1);
1859 mmio_len = pci_resource_len(pdev, 1);
1860 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
1861 if (!adapter->hw_addr1) {
1862 printk(KERN_ERR "Failed to map bar1 for adapter %s\n",
1863 pci_name(pdev));
1864 err = -EIO;
1865 goto err_bar1;
1866 }
1867 return 0;
1868
1869err_bar1:
1870 iounmap(adapter->hw_addr0);
1871err_ioremap:
1872 pci_release_selected_regions(pdev, (1 << 2) - 1);
1873err_set_mask:
1874 pci_disable_device(pdev);
1875 return err;
1876}
1877
1878
1879static void
1880vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
1881{
1882 BUG_ON(!adapter->pdev);
1883
1884 iounmap(adapter->hw_addr0);
1885 iounmap(adapter->hw_addr1);
1886 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
1887 pci_disable_device(adapter->pdev);
1888}
1889
1890
1891static void
1892vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
1893{
1894 size_t sz;
1895
1896 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
1897 VMXNET3_MAX_ETH_HDR_SIZE) {
1898 adapter->skb_buf_size = adapter->netdev->mtu +
1899 VMXNET3_MAX_ETH_HDR_SIZE;
1900 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
1901 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
1902
1903 adapter->rx_buf_per_pkt = 1;
1904 } else {
1905 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
1906 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
1907 VMXNET3_MAX_ETH_HDR_SIZE;
1908 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
1909 }
1910
1911 /*
1912 * for simplicity, force the ring0 size to be a multiple of
1913 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
1914 */
1915 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
1916 adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size +
1917 sz - 1) / sz * sz;
1918 adapter->rx_queue.rx_ring[0].size = min_t(u32,
1919 adapter->rx_queue.rx_ring[0].size,
1920 VMXNET3_RX_RING_MAX_SIZE / sz * sz);
1921}
1922
1923
1924int
1925vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
1926 u32 rx_ring_size, u32 rx_ring2_size)
1927{
1928 int err;
1929
1930 adapter->tx_queue.tx_ring.size = tx_ring_size;
1931 adapter->tx_queue.data_ring.size = tx_ring_size;
1932 adapter->tx_queue.comp_ring.size = tx_ring_size;
1933 adapter->tx_queue.shared = &adapter->tqd_start->ctrl;
1934 adapter->tx_queue.stopped = true;
1935 err = vmxnet3_tq_create(&adapter->tx_queue, adapter);
1936 if (err)
1937 return err;
1938
1939 adapter->rx_queue.rx_ring[0].size = rx_ring_size;
1940 adapter->rx_queue.rx_ring[1].size = rx_ring2_size;
1941 vmxnet3_adjust_rx_ring_size(adapter);
1942 adapter->rx_queue.comp_ring.size = adapter->rx_queue.rx_ring[0].size +
1943 adapter->rx_queue.rx_ring[1].size;
1944 adapter->rx_queue.qid = 0;
1945 adapter->rx_queue.qid2 = 1;
1946 adapter->rx_queue.shared = &adapter->rqd_start->ctrl;
1947 err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
1948 if (err)
1949 vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
1950
1951 return err;
1952}
1953
1954static int
1955vmxnet3_open(struct net_device *netdev)
1956{
1957 struct vmxnet3_adapter *adapter;
1958 int err;
1959
1960 adapter = netdev_priv(netdev);
1961
1962 spin_lock_init(&adapter->tx_queue.tx_lock);
1963
1964 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
1965 VMXNET3_DEF_RX_RING_SIZE,
1966 VMXNET3_DEF_RX_RING_SIZE);
1967 if (err)
1968 goto queue_err;
1969
1970 err = vmxnet3_activate_dev(adapter);
1971 if (err)
1972 goto activate_err;
1973
1974 return 0;
1975
1976activate_err:
1977 vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
1978 vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
1979queue_err:
1980 return err;
1981}
1982
1983
1984static int
1985vmxnet3_close(struct net_device *netdev)
1986{
1987 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1988
1989 /*
1990 * Reset_work may be in the middle of resetting the device, wait for its
1991 * completion.
1992 */
1993 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
1994 msleep(1);
1995
1996 vmxnet3_quiesce_dev(adapter);
1997
1998 vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
1999 vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
2000
2001 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2002
2003
2004 return 0;
2005}
2006
2007
2008void
2009vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2010{
2011 /*
2012 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2013 * vmxnet3_close() will deadlock.
2014 */
2015 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2016
2017 /* we need to enable NAPI, otherwise dev_close will deadlock */
2018 napi_enable(&adapter->napi);
2019 dev_close(adapter->netdev);
2020}
2021
2022
2023static int
2024vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2025{
2026 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2027 int err = 0;
2028
2029 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2030 return -EINVAL;
2031
2032 if (new_mtu > 1500 && !adapter->jumbo_frame)
2033 return -EINVAL;
2034
2035 netdev->mtu = new_mtu;
2036
2037 /*
2038 * Reset_work may be in the middle of resetting the device, wait for its
2039 * completion.
2040 */
2041 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2042 msleep(1);
2043
2044 if (netif_running(netdev)) {
2045 vmxnet3_quiesce_dev(adapter);
2046 vmxnet3_reset_dev(adapter);
2047
2048 /* we need to re-create the rx queue based on the new mtu */
2049 vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
2050 vmxnet3_adjust_rx_ring_size(adapter);
2051 adapter->rx_queue.comp_ring.size =
2052 adapter->rx_queue.rx_ring[0].size +
2053 adapter->rx_queue.rx_ring[1].size;
2054 err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
2055 if (err) {
2056 printk(KERN_ERR "%s: failed to re-create rx queue,"
2057 " error %d. Closing it.\n", netdev->name, err);
2058 goto out;
2059 }
2060
2061 err = vmxnet3_activate_dev(adapter);
2062 if (err) {
2063 printk(KERN_ERR "%s: failed to re-activate, error %d. "
2064 "Closing it\n", netdev->name, err);
2065 goto out;
2066 }
2067 }
2068
2069out:
2070 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2071 if (err)
2072 vmxnet3_force_close(adapter);
2073
2074 return err;
2075}
2076
2077
2078static void
2079vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
2080{
2081 struct net_device *netdev = adapter->netdev;
2082
2083 netdev->features = NETIF_F_SG |
2084 NETIF_F_HW_CSUM |
2085 NETIF_F_HW_VLAN_TX |
2086 NETIF_F_HW_VLAN_RX |
2087 NETIF_F_HW_VLAN_FILTER |
2088 NETIF_F_TSO |
2089 NETIF_F_TSO6 |
2090 NETIF_F_LRO;
2091
2092 printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro");
2093
2094 adapter->rxcsum = true;
2095 adapter->jumbo_frame = true;
2096 adapter->lro = true;
2097
2098 if (dma64) {
2099 netdev->features |= NETIF_F_HIGHDMA;
2100 printk(" highDMA");
2101 }
2102
2103 netdev->vlan_features = netdev->features;
2104 printk("\n");
2105}
2106
2107
2108static void
2109vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2110{
2111 u32 tmp;
2112
2113 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
2114 *(u32 *)mac = tmp;
2115
2116 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
2117 mac[4] = tmp & 0xff;
2118 mac[5] = (tmp >> 8) & 0xff;
2119}
2120
2121
2122static void
2123vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2124{
2125 u32 cfg;
2126
2127 /* intr settings */
2128 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2129 VMXNET3_CMD_GET_CONF_INTR);
2130 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2131 adapter->intr.type = cfg & 0x3;
2132 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2133
2134 if (adapter->intr.type == VMXNET3_IT_AUTO) {
2135 int err;
2136
2137 adapter->intr.msix_entries[0].entry = 0;
2138 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
2139 VMXNET3_LINUX_MAX_MSIX_VECT);
2140 if (!err) {
2141 adapter->intr.num_intrs = 1;
2142 adapter->intr.type = VMXNET3_IT_MSIX;
2143 return;
2144 }
2145
2146 err = pci_enable_msi(adapter->pdev);
2147 if (!err) {
2148 adapter->intr.num_intrs = 1;
2149 adapter->intr.type = VMXNET3_IT_MSI;
2150 return;
2151 }
2152 }
2153
2154 adapter->intr.type = VMXNET3_IT_INTX;
2155
2156 /* INT-X related setting */
2157 adapter->intr.num_intrs = 1;
2158}
2159
2160
2161static void
2162vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
2163{
2164 if (adapter->intr.type == VMXNET3_IT_MSIX)
2165 pci_disable_msix(adapter->pdev);
2166 else if (adapter->intr.type == VMXNET3_IT_MSI)
2167 pci_disable_msi(adapter->pdev);
2168 else
2169 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
2170}
2171
2172
2173static void
2174vmxnet3_tx_timeout(struct net_device *netdev)
2175{
2176 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2177 adapter->tx_timeout_count++;
2178
2179 printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
2180 schedule_work(&adapter->work);
2181}
2182
2183
2184static void
2185vmxnet3_reset_work(struct work_struct *data)
2186{
2187 struct vmxnet3_adapter *adapter;
2188
2189 adapter = container_of(data, struct vmxnet3_adapter, work);
2190
2191 /* if another thread is resetting the device, no need to proceed */
2192 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2193 return;
2194
2195 /* if the device is closed, we must leave it alone */
2196 if (netif_running(adapter->netdev)) {
2197 printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
2198 vmxnet3_quiesce_dev(adapter);
2199 vmxnet3_reset_dev(adapter);
2200 vmxnet3_activate_dev(adapter);
2201 } else {
2202 printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
2203 }
2204
2205 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2206}
2207
2208
2209static int __devinit
2210vmxnet3_probe_device(struct pci_dev *pdev,
2211 const struct pci_device_id *id)
2212{
2213 static const struct net_device_ops vmxnet3_netdev_ops = {
2214 .ndo_open = vmxnet3_open,
2215 .ndo_stop = vmxnet3_close,
2216 .ndo_start_xmit = vmxnet3_xmit_frame,
2217 .ndo_set_mac_address = vmxnet3_set_mac_addr,
2218 .ndo_change_mtu = vmxnet3_change_mtu,
2219 .ndo_get_stats = vmxnet3_get_stats,
2220 .ndo_tx_timeout = vmxnet3_tx_timeout,
2221 .ndo_set_multicast_list = vmxnet3_set_mc,
2222 .ndo_vlan_rx_register = vmxnet3_vlan_rx_register,
2223 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
2224 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
2225#ifdef CONFIG_NET_POLL_CONTROLLER
2226 .ndo_poll_controller = vmxnet3_netpoll,
2227#endif
2228 };
2229 int err;
2230 bool dma64 = false; /* stupid gcc */
2231 u32 ver;
2232 struct net_device *netdev;
2233 struct vmxnet3_adapter *adapter;
2234 u8 mac[ETH_ALEN];
2235
2236 netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter));
2237 if (!netdev) {
2238 printk(KERN_ERR "Failed to alloc ethernet device for adapter "
2239 "%s\n", pci_name(pdev));
2240 return -ENOMEM;
2241 }
2242
2243 pci_set_drvdata(pdev, netdev);
2244 adapter = netdev_priv(netdev);
2245 adapter->netdev = netdev;
2246 adapter->pdev = pdev;
2247
2248 adapter->shared = pci_alloc_consistent(adapter->pdev,
2249 sizeof(struct Vmxnet3_DriverShared),
2250 &adapter->shared_pa);
2251 if (!adapter->shared) {
2252 printk(KERN_ERR "Failed to allocate memory for %s\n",
2253 pci_name(pdev));
2254 err = -ENOMEM;
2255 goto err_alloc_shared;
2256 }
2257
2258 adapter->tqd_start = pci_alloc_consistent(adapter->pdev,
2259 sizeof(struct Vmxnet3_TxQueueDesc) +
2260 sizeof(struct Vmxnet3_RxQueueDesc),
2261 &adapter->queue_desc_pa);
2262
2263 if (!adapter->tqd_start) {
2264 printk(KERN_ERR "Failed to allocate memory for %s\n",
2265 pci_name(pdev));
2266 err = -ENOMEM;
2267 goto err_alloc_queue_desc;
2268 }
2269 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start
2270 + 1);
2271
2272 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
2273 if (adapter->pm_conf == NULL) {
2274 printk(KERN_ERR "Failed to allocate memory for %s\n",
2275 pci_name(pdev));
2276 err = -ENOMEM;
2277 goto err_alloc_pm;
2278 }
2279
2280 err = vmxnet3_alloc_pci_resources(adapter, &dma64);
2281 if (err < 0)
2282 goto err_alloc_pci;
2283
2284 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
2285 if (ver & 1) {
2286 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
2287 } else {
2288 printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter"
2289 " %s\n", ver, pci_name(pdev));
2290 err = -EBUSY;
2291 goto err_ver;
2292 }
2293
2294 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
2295 if (ver & 1) {
2296 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
2297 } else {
2298 printk(KERN_ERR "Incompatible upt version (0x%x) for "
2299 "adapter %s\n", ver, pci_name(pdev));
2300 err = -EBUSY;
2301 goto err_ver;
2302 }
2303
2304 vmxnet3_declare_features(adapter, dma64);
2305
2306 adapter->dev_number = atomic_read(&devices_found);
2307 vmxnet3_alloc_intr_resources(adapter);
2308
2309 vmxnet3_read_mac_addr(adapter, mac);
2310 memcpy(netdev->dev_addr, mac, netdev->addr_len);
2311
2312 netdev->netdev_ops = &vmxnet3_netdev_ops;
2313 netdev->watchdog_timeo = 5 * HZ;
2314 vmxnet3_set_ethtool_ops(netdev);
2315
2316 INIT_WORK(&adapter->work, vmxnet3_reset_work);
2317
2318 netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64);
2319 SET_NETDEV_DEV(netdev, &pdev->dev);
2320 err = register_netdev(netdev);
2321
2322 if (err) {
2323 printk(KERN_ERR "Failed to register adapter %s\n",
2324 pci_name(pdev));
2325 goto err_register;
2326 }
2327
2328 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2329 atomic_inc(&devices_found);
2330 return 0;
2331
2332err_register:
2333 vmxnet3_free_intr_resources(adapter);
2334err_ver:
2335 vmxnet3_free_pci_resources(adapter);
2336err_alloc_pci:
2337 kfree(adapter->pm_conf);
2338err_alloc_pm:
2339 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
2340 sizeof(struct Vmxnet3_RxQueueDesc),
2341 adapter->tqd_start, adapter->queue_desc_pa);
2342err_alloc_queue_desc:
2343 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
2344 adapter->shared, adapter->shared_pa);
2345err_alloc_shared:
2346 pci_set_drvdata(pdev, NULL);
2347 free_netdev(netdev);
2348 return err;
2349}
2350
2351
2352static void __devexit
2353vmxnet3_remove_device(struct pci_dev *pdev)
2354{
2355 struct net_device *netdev = pci_get_drvdata(pdev);
2356 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2357
2358 flush_scheduled_work();
2359
2360 unregister_netdev(netdev);
2361
2362 vmxnet3_free_intr_resources(adapter);
2363 vmxnet3_free_pci_resources(adapter);
2364 kfree(adapter->pm_conf);
2365 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
2366 sizeof(struct Vmxnet3_RxQueueDesc),
2367 adapter->tqd_start, adapter->queue_desc_pa);
2368 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
2369 adapter->shared, adapter->shared_pa);
2370 free_netdev(netdev);
2371}
2372
2373
2374#ifdef CONFIG_PM
2375
2376static int
2377vmxnet3_suspend(struct device *device)
2378{
2379 struct pci_dev *pdev = to_pci_dev(device);
2380 struct net_device *netdev = pci_get_drvdata(pdev);
2381 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2382 struct Vmxnet3_PMConf *pmConf;
2383 struct ethhdr *ehdr;
2384 struct arphdr *ahdr;
2385 u8 *arpreq;
2386 struct in_device *in_dev;
2387 struct in_ifaddr *ifa;
2388 int i = 0;
2389
2390 if (!netif_running(netdev))
2391 return 0;
2392
2393 vmxnet3_disable_all_intrs(adapter);
2394 vmxnet3_free_irqs(adapter);
2395 vmxnet3_free_intr_resources(adapter);
2396
2397 netif_device_detach(netdev);
2398 netif_stop_queue(netdev);
2399
2400 /* Create wake-up filters. */
2401 pmConf = adapter->pm_conf;
2402 memset(pmConf, 0, sizeof(*pmConf));
2403
2404 if (adapter->wol & WAKE_UCAST) {
2405 pmConf->filters[i].patternSize = ETH_ALEN;
2406 pmConf->filters[i].maskSize = 1;
2407 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
2408 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
2409
2410 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
2411 i++;
2412 }
2413
2414 if (adapter->wol & WAKE_ARP) {
2415 in_dev = in_dev_get(netdev);
2416 if (!in_dev)
2417 goto skip_arp;
2418
2419 ifa = (struct in_ifaddr *)in_dev->ifa_list;
2420 if (!ifa)
2421 goto skip_arp;
2422
2423 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
2424 sizeof(struct arphdr) + /* ARP header */
2425 2 * ETH_ALEN + /* 2 Ethernet addresses*/
2426 2 * sizeof(u32); /*2 IPv4 addresses */
2427 pmConf->filters[i].maskSize =
2428 (pmConf->filters[i].patternSize - 1) / 8 + 1;
2429
2430 /* ETH_P_ARP in Ethernet header. */
2431 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
2432 ehdr->h_proto = htons(ETH_P_ARP);
2433
2434 /* ARPOP_REQUEST in ARP header. */
2435 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
2436 ahdr->ar_op = htons(ARPOP_REQUEST);
2437 arpreq = (u8 *)(ahdr + 1);
2438
2439 /* The Unicast IPv4 address in 'tip' field. */
2440 arpreq += 2 * ETH_ALEN + sizeof(u32);
2441 *(u32 *)arpreq = ifa->ifa_address;
2442
2443 /* The mask for the relevant bits. */
2444 pmConf->filters[i].mask[0] = 0x00;
2445 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
2446 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
2447 pmConf->filters[i].mask[3] = 0x00;
2448 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
2449 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
2450 in_dev_put(in_dev);
2451
2452 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
2453 i++;
2454 }
2455
2456skip_arp:
2457 if (adapter->wol & WAKE_MAGIC)
2458 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
2459
2460 pmConf->numFilters = i;
2461
2462 adapter->shared->devRead.pmConfDesc.confVer = 1;
2463 adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf);
2464 adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf);
2465
2466 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2467 VMXNET3_CMD_UPDATE_PMCFG);
2468
2469 pci_save_state(pdev);
2470 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
2471 adapter->wol);
2472 pci_disable_device(pdev);
2473 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
2474
2475 return 0;
2476}
2477
2478
2479static int
2480vmxnet3_resume(struct device *device)
2481{
2482 int err;
2483 struct pci_dev *pdev = to_pci_dev(device);
2484 struct net_device *netdev = pci_get_drvdata(pdev);
2485 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2486 struct Vmxnet3_PMConf *pmConf;
2487
2488 if (!netif_running(netdev))
2489 return 0;
2490
2491 /* Destroy wake-up filters. */
2492 pmConf = adapter->pm_conf;
2493 memset(pmConf, 0, sizeof(*pmConf));
2494
2495 adapter->shared->devRead.pmConfDesc.confVer = 1;
2496 adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf);
2497 adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf);
2498
2499 netif_device_attach(netdev);
2500 pci_set_power_state(pdev, PCI_D0);
2501 pci_restore_state(pdev);
2502 err = pci_enable_device_mem(pdev);
2503 if (err != 0)
2504 return err;
2505
2506 pci_enable_wake(pdev, PCI_D0, 0);
2507
2508 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2509 VMXNET3_CMD_UPDATE_PMCFG);
2510 vmxnet3_alloc_intr_resources(adapter);
2511 vmxnet3_request_irqs(adapter);
2512 vmxnet3_enable_all_intrs(adapter);
2513
2514 return 0;
2515}
2516
2517static struct dev_pm_ops vmxnet3_pm_ops = {
2518 .suspend = vmxnet3_suspend,
2519 .resume = vmxnet3_resume,
2520};
2521#endif
2522
2523static struct pci_driver vmxnet3_driver = {
2524 .name = vmxnet3_driver_name,
2525 .id_table = vmxnet3_pciid_table,
2526 .probe = vmxnet3_probe_device,
2527 .remove = __devexit_p(vmxnet3_remove_device),
2528#ifdef CONFIG_PM
2529 .driver.pm = &vmxnet3_pm_ops,
2530#endif
2531};
2532
2533
2534static int __init
2535vmxnet3_init_module(void)
2536{
2537 printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC,
2538 VMXNET3_DRIVER_VERSION_REPORT);
2539 return pci_register_driver(&vmxnet3_driver);
2540}
2541
2542module_init(vmxnet3_init_module);
2543
2544
2545static void
2546vmxnet3_exit_module(void)
2547{
2548 pci_unregister_driver(&vmxnet3_driver);
2549}
2550
2551module_exit(vmxnet3_exit_module);
2552
2553MODULE_AUTHOR("VMware, Inc.");
2554MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
2555MODULE_LICENSE("GPL v2");
2556MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
new file mode 100644
index 000000000000..c2c15e4cafc7
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -0,0 +1,566 @@
1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24 *
25 */
26
27
28#include "vmxnet3_int.h"
29
30struct vmxnet3_stat_desc {
31 char desc[ETH_GSTRING_LEN];
32 int offset;
33};
34
35
36static u32
37vmxnet3_get_rx_csum(struct net_device *netdev)
38{
39 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
40 return adapter->rxcsum;
41}
42
43
44static int
45vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
46{
47 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
48
49 if (adapter->rxcsum != val) {
50 adapter->rxcsum = val;
51 if (netif_running(netdev)) {
52 if (val)
53 adapter->shared->devRead.misc.uptFeatures |=
54 UPT1_F_RXCSUM;
55 else
56 adapter->shared->devRead.misc.uptFeatures &=
57 ~UPT1_F_RXCSUM;
58
59 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
60 VMXNET3_CMD_UPDATE_FEATURE);
61 }
62 }
63 return 0;
64}
65
66
67/* per tq stats maintained by the device */
68static const struct vmxnet3_stat_desc
69vmxnet3_tq_dev_stats[] = {
70 /* description, offset */
71 { "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
72 { "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
73 { "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
74 { "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
75 { "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
76 { "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
77 { "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
78 { "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
79 { "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) },
80 { "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) },
81};
82
83/* per tq stats maintained by the driver */
84static const struct vmxnet3_stat_desc
85vmxnet3_tq_driver_stats[] = {
86 /* description, offset */
87 {"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
88 drop_total) },
89 { " too many frags", offsetof(struct vmxnet3_tq_driver_stats,
90 drop_too_many_frags) },
91 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
92 drop_oversized_hdr) },
93 { " hdr err", offsetof(struct vmxnet3_tq_driver_stats,
94 drop_hdr_inspect_err) },
95 { " tso", offsetof(struct vmxnet3_tq_driver_stats,
96 drop_tso) },
97 { "ring full", offsetof(struct vmxnet3_tq_driver_stats,
98 tx_ring_full) },
99 { "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats,
100 linearized) },
101 { "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats,
102 copy_skb_header) },
103 { "giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
104 oversized_hdr) },
105};
106
107/* per rq stats maintained by the device */
108static const struct vmxnet3_stat_desc
109vmxnet3_rq_dev_stats[] = {
110 { "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) },
111 { "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) },
112 { "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
113 { "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
114 { "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
115 { "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
116 { "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
117 { "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
118 { "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
119 { "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) },
120};
121
122/* per rq stats maintained by the driver */
123static const struct vmxnet3_stat_desc
124vmxnet3_rq_driver_stats[] = {
125 /* description, offset */
126 { "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
127 drop_total) },
128 { " err", offsetof(struct vmxnet3_rq_driver_stats,
129 drop_err) },
130 { " fcs", offsetof(struct vmxnet3_rq_driver_stats,
131 drop_fcs) },
132 { "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
133 rx_buf_alloc_failure) },
134};
135
136/* gloabl stats maintained by the driver */
137static const struct vmxnet3_stat_desc
138vmxnet3_global_stats[] = {
139 /* description, offset */
140 { "tx timeout count", offsetof(struct vmxnet3_adapter,
141 tx_timeout_count) }
142};
143
144
145struct net_device_stats *
146vmxnet3_get_stats(struct net_device *netdev)
147{
148 struct vmxnet3_adapter *adapter;
149 struct vmxnet3_tq_driver_stats *drvTxStats;
150 struct vmxnet3_rq_driver_stats *drvRxStats;
151 struct UPT1_TxStats *devTxStats;
152 struct UPT1_RxStats *devRxStats;
153 struct net_device_stats *net_stats = &netdev->stats;
154
155 adapter = netdev_priv(netdev);
156
157 /* Collect the dev stats into the shared area */
158 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
159
160 /* Assuming that we have a single queue device */
161 devTxStats = &adapter->tqd_start->stats;
162 devRxStats = &adapter->rqd_start->stats;
163
164 /* Get access to the driver stats per queue */
165 drvTxStats = &adapter->tx_queue.stats;
166 drvRxStats = &adapter->rx_queue.stats;
167
168 memset(net_stats, 0, sizeof(*net_stats));
169
170 net_stats->rx_packets = devRxStats->ucastPktsRxOK +
171 devRxStats->mcastPktsRxOK +
172 devRxStats->bcastPktsRxOK;
173
174 net_stats->tx_packets = devTxStats->ucastPktsTxOK +
175 devTxStats->mcastPktsTxOK +
176 devTxStats->bcastPktsTxOK;
177
178 net_stats->rx_bytes = devRxStats->ucastBytesRxOK +
179 devRxStats->mcastBytesRxOK +
180 devRxStats->bcastBytesRxOK;
181
182 net_stats->tx_bytes = devTxStats->ucastBytesTxOK +
183 devTxStats->mcastBytesTxOK +
184 devTxStats->bcastBytesTxOK;
185
186 net_stats->rx_errors = devRxStats->pktsRxError;
187 net_stats->tx_errors = devTxStats->pktsTxError;
188 net_stats->rx_dropped = drvRxStats->drop_total;
189 net_stats->tx_dropped = drvTxStats->drop_total;
190 net_stats->multicast = devRxStats->mcastPktsRxOK;
191
192 return net_stats;
193}
194
195static int
196vmxnet3_get_sset_count(struct net_device *netdev, int sset)
197{
198 switch (sset) {
199 case ETH_SS_STATS:
200 return ARRAY_SIZE(vmxnet3_tq_dev_stats) +
201 ARRAY_SIZE(vmxnet3_tq_driver_stats) +
202 ARRAY_SIZE(vmxnet3_rq_dev_stats) +
203 ARRAY_SIZE(vmxnet3_rq_driver_stats) +
204 ARRAY_SIZE(vmxnet3_global_stats);
205 default:
206 return -EOPNOTSUPP;
207 }
208}
209
210
211static int
212vmxnet3_get_regs_len(struct net_device *netdev)
213{
214 return 20 * sizeof(u32);
215}
216
217
218static void
219vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
220{
221 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
222
223 strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
224 drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
225
226 strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
227 sizeof(drvinfo->version));
228 drvinfo->driver[sizeof(drvinfo->version) - 1] = '\0';
229
230 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
231 drvinfo->fw_version[sizeof(drvinfo->fw_version) - 1] = '\0';
232
233 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
234 ETHTOOL_BUSINFO_LEN);
235 drvinfo->n_stats = vmxnet3_get_sset_count(netdev, ETH_SS_STATS);
236 drvinfo->testinfo_len = 0;
237 drvinfo->eedump_len = 0;
238 drvinfo->regdump_len = vmxnet3_get_regs_len(netdev);
239}
240
241
242static void
243vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
244{
245 if (stringset == ETH_SS_STATS) {
246 int i;
247
248 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
249 memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
250 ETH_GSTRING_LEN);
251 buf += ETH_GSTRING_LEN;
252 }
253 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) {
254 memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
255 ETH_GSTRING_LEN);
256 buf += ETH_GSTRING_LEN;
257 }
258 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
259 memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
260 ETH_GSTRING_LEN);
261 buf += ETH_GSTRING_LEN;
262 }
263 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) {
264 memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
265 ETH_GSTRING_LEN);
266 buf += ETH_GSTRING_LEN;
267 }
268 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) {
269 memcpy(buf, vmxnet3_global_stats[i].desc,
270 ETH_GSTRING_LEN);
271 buf += ETH_GSTRING_LEN;
272 }
273 }
274}
275
276static u32
277vmxnet3_get_flags(struct net_device *netdev) {
278 return netdev->features;
279}
280
281static int
282vmxnet3_set_flags(struct net_device *netdev, u32 data) {
283 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
284 u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
285 u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
286
287 if (lro_requested ^ lro_present) {
288 /* toggle the LRO feature*/
289 netdev->features ^= NETIF_F_LRO;
290
291 /* update harware LRO capability accordingly */
292 if (lro_requested)
293 adapter->shared->devRead.misc.uptFeatures &= UPT1_F_LRO;
294 else
295 adapter->shared->devRead.misc.uptFeatures &=
296 ~UPT1_F_LRO;
297 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
298 VMXNET3_CMD_UPDATE_FEATURE);
299 }
300 return 0;
301}
302
303static void
304vmxnet3_get_ethtool_stats(struct net_device *netdev,
305 struct ethtool_stats *stats, u64 *buf)
306{
307 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
308 u8 *base;
309 int i;
310
311 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
312
313 /* this does assume each counter is 64-bit wide */
314
315 base = (u8 *)&adapter->tqd_start->stats;
316 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
317 *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset);
318
319 base = (u8 *)&adapter->tx_queue.stats;
320 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
321 *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset);
322
323 base = (u8 *)&adapter->rqd_start->stats;
324 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
325 *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
326
327 base = (u8 *)&adapter->rx_queue.stats;
328 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
329 *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset);
330
331 base = (u8 *)adapter;
332 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
333 *buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset);
334}
335
336
337static void
338vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
339{
340 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
341 u32 *buf = p;
342
343 memset(p, 0, vmxnet3_get_regs_len(netdev));
344
345 regs->version = 1;
346
347 /* Update vmxnet3_get_regs_len if we want to dump more registers */
348
349 /* make each ring use multiple of 16 bytes */
350 buf[0] = adapter->tx_queue.tx_ring.next2fill;
351 buf[1] = adapter->tx_queue.tx_ring.next2comp;
352 buf[2] = adapter->tx_queue.tx_ring.gen;
353 buf[3] = 0;
354
355 buf[4] = adapter->tx_queue.comp_ring.next2proc;
356 buf[5] = adapter->tx_queue.comp_ring.gen;
357 buf[6] = adapter->tx_queue.stopped;
358 buf[7] = 0;
359
360 buf[8] = adapter->rx_queue.rx_ring[0].next2fill;
361 buf[9] = adapter->rx_queue.rx_ring[0].next2comp;
362 buf[10] = adapter->rx_queue.rx_ring[0].gen;
363 buf[11] = 0;
364
365 buf[12] = adapter->rx_queue.rx_ring[1].next2fill;
366 buf[13] = adapter->rx_queue.rx_ring[1].next2comp;
367 buf[14] = adapter->rx_queue.rx_ring[1].gen;
368 buf[15] = 0;
369
370 buf[16] = adapter->rx_queue.comp_ring.next2proc;
371 buf[17] = adapter->rx_queue.comp_ring.gen;
372 buf[18] = 0;
373 buf[19] = 0;
374}
375
376
377static void
378vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
379{
380 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
381
382 wol->supported = WAKE_UCAST | WAKE_ARP | WAKE_MAGIC;
383 wol->wolopts = adapter->wol;
384}
385
386
387static int
388vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
389{
390 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
391
392 if (wol->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST |
393 WAKE_MAGICSECURE)) {
394 return -EOPNOTSUPP;
395 }
396
397 adapter->wol = wol->wolopts;
398
399 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
400
401 return 0;
402}
403
404
405static int
406vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
407{
408 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
409
410 ecmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full |
411 SUPPORTED_TP;
412 ecmd->advertising = ADVERTISED_TP;
413 ecmd->port = PORT_TP;
414 ecmd->transceiver = XCVR_INTERNAL;
415
416 if (adapter->link_speed) {
417 ecmd->speed = adapter->link_speed;
418 ecmd->duplex = DUPLEX_FULL;
419 } else {
420 ecmd->speed = -1;
421 ecmd->duplex = -1;
422 }
423 return 0;
424}
425
426
427static void
428vmxnet3_get_ringparam(struct net_device *netdev,
429 struct ethtool_ringparam *param)
430{
431 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
432
433 param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE;
434 param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE;
435 param->rx_mini_max_pending = 0;
436 param->rx_jumbo_max_pending = 0;
437
438 param->rx_pending = adapter->rx_queue.rx_ring[0].size;
439 param->tx_pending = adapter->tx_queue.tx_ring.size;
440 param->rx_mini_pending = 0;
441 param->rx_jumbo_pending = 0;
442}
443
444
445static int
446vmxnet3_set_ringparam(struct net_device *netdev,
447 struct ethtool_ringparam *param)
448{
449 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
450 u32 new_tx_ring_size, new_rx_ring_size;
451 u32 sz;
452 int err = 0;
453
454 if (param->tx_pending == 0 || param->tx_pending >
455 VMXNET3_TX_RING_MAX_SIZE)
456 return -EINVAL;
457
458 if (param->rx_pending == 0 || param->rx_pending >
459 VMXNET3_RX_RING_MAX_SIZE)
460 return -EINVAL;
461
462
463 /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */
464 new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) &
465 ~VMXNET3_RING_SIZE_MASK;
466 new_tx_ring_size = min_t(u32, new_tx_ring_size,
467 VMXNET3_TX_RING_MAX_SIZE);
468 if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size %
469 VMXNET3_RING_SIZE_ALIGN) != 0)
470 return -EINVAL;
471
472 /* ring0 has to be a multiple of
473 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
474 */
475 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
476 new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz;
477 new_rx_ring_size = min_t(u32, new_rx_ring_size,
478 VMXNET3_RX_RING_MAX_SIZE / sz * sz);
479 if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size %
480 sz) != 0)
481 return -EINVAL;
482
483 if (new_tx_ring_size == adapter->tx_queue.tx_ring.size &&
484 new_rx_ring_size == adapter->rx_queue.rx_ring[0].size) {
485 return 0;
486 }
487
488 /*
489 * Reset_work may be in the middle of resetting the device, wait for its
490 * completion.
491 */
492 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
493 msleep(1);
494
495 if (netif_running(netdev)) {
496 vmxnet3_quiesce_dev(adapter);
497 vmxnet3_reset_dev(adapter);
498
499 /* recreate the rx queue and the tx queue based on the
500 * new sizes */
501 vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
502 vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
503
504 err = vmxnet3_create_queues(adapter, new_tx_ring_size,
505 new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE);
506 if (err) {
507 /* failed, most likely because of OOM, try default
508 * size */
509 printk(KERN_ERR "%s: failed to apply new sizes, try the"
510 " default ones\n", netdev->name);
511 err = vmxnet3_create_queues(adapter,
512 VMXNET3_DEF_TX_RING_SIZE,
513 VMXNET3_DEF_RX_RING_SIZE,
514 VMXNET3_DEF_RX_RING_SIZE);
515 if (err) {
516 printk(KERN_ERR "%s: failed to create queues "
517 "with default sizes. Closing it\n",
518 netdev->name);
519 goto out;
520 }
521 }
522
523 err = vmxnet3_activate_dev(adapter);
524 if (err)
525 printk(KERN_ERR "%s: failed to re-activate, error %d."
526 " Closing it\n", netdev->name, err);
527 }
528
529out:
530 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
531 if (err)
532 vmxnet3_force_close(adapter);
533
534 return err;
535}
536
537
538static struct ethtool_ops vmxnet3_ethtool_ops = {
539 .get_settings = vmxnet3_get_settings,
540 .get_drvinfo = vmxnet3_get_drvinfo,
541 .get_regs_len = vmxnet3_get_regs_len,
542 .get_regs = vmxnet3_get_regs,
543 .get_wol = vmxnet3_get_wol,
544 .set_wol = vmxnet3_set_wol,
545 .get_link = ethtool_op_get_link,
546 .get_rx_csum = vmxnet3_get_rx_csum,
547 .set_rx_csum = vmxnet3_set_rx_csum,
548 .get_tx_csum = ethtool_op_get_tx_csum,
549 .set_tx_csum = ethtool_op_set_tx_hw_csum,
550 .get_sg = ethtool_op_get_sg,
551 .set_sg = ethtool_op_set_sg,
552 .get_tso = ethtool_op_get_tso,
553 .set_tso = ethtool_op_set_tso,
554 .get_strings = vmxnet3_get_strings,
555 .get_flags = vmxnet3_get_flags,
556 .set_flags = vmxnet3_set_flags,
557 .get_sset_count = vmxnet3_get_sset_count,
558 .get_ethtool_stats = vmxnet3_get_ethtool_stats,
559 .get_ringparam = vmxnet3_get_ringparam,
560 .set_ringparam = vmxnet3_set_ringparam,
561};
562
563void vmxnet3_set_ethtool_ops(struct net_device *netdev)
564{
565 SET_ETHTOOL_OPS(netdev, &vmxnet3_ethtool_ops);
566}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
new file mode 100644
index 000000000000..6bb91576e999
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -0,0 +1,389 @@
1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24 *
25 */
26
27#ifndef _VMXNET3_INT_H
28#define _VMXNET3_INT_H
29
30#include <linux/types.h>
31#include <linux/ethtool.h>
32#include <linux/delay.h>
33#include <linux/netdevice.h>
34#include <linux/pci.h>
35#include <linux/ethtool.h>
36#include <linux/compiler.h>
37#include <linux/module.h>
38#include <linux/moduleparam.h>
39#include <linux/slab.h>
40#include <linux/spinlock.h>
41#include <linux/ioport.h>
42#include <linux/highmem.h>
43#include <linux/init.h>
44#include <linux/timer.h>
45#include <linux/skbuff.h>
46#include <linux/interrupt.h>
47#include <linux/workqueue.h>
48#include <linux/uaccess.h>
49#include <asm/dma.h>
50#include <asm/page.h>
51
52#include <linux/tcp.h>
53#include <linux/udp.h>
54#include <linux/ip.h>
55#include <linux/ipv6.h>
56#include <linux/in.h>
57#include <linux/etherdevice.h>
58#include <asm/checksum.h>
59#include <linux/if_vlan.h>
60#include <linux/if_arp.h>
61#include <linux/inetdevice.h>
62#include <linux/dst.h>
63
64#include "vmxnet3_defs.h"
65
66#ifdef DEBUG
67# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI(debug)"
68#else
69# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI"
70#endif
71
72
73/*
74 * Version numbers
75 */
76#define VMXNET3_DRIVER_VERSION_STRING "1.0.5.0-k"
77
78/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
79#define VMXNET3_DRIVER_VERSION_NUM 0x01000500
80
81
82/*
83 * Capabilities
84 */
85
86enum {
87 VMNET_CAP_SG = 0x0001, /* Can do scatter-gather transmits. */
88 VMNET_CAP_IP4_CSUM = 0x0002, /* Can checksum only TCP/UDP over
89 * IPv4 */
90 VMNET_CAP_HW_CSUM = 0x0004, /* Can checksum all packets. */
91 VMNET_CAP_HIGH_DMA = 0x0008, /* Can DMA to high memory. */
92 VMNET_CAP_TOE = 0x0010, /* Supports TCP/IP offload. */
93 VMNET_CAP_TSO = 0x0020, /* Supports TCP Segmentation
94 * offload */
95 VMNET_CAP_SW_TSO = 0x0040, /* Supports SW TCP Segmentation */
96 VMNET_CAP_VMXNET_APROM = 0x0080, /* Vmxnet APROM support */
97 VMNET_CAP_HW_TX_VLAN = 0x0100, /* Can we do VLAN tagging in HW */
98 VMNET_CAP_HW_RX_VLAN = 0x0200, /* Can we do VLAN untagging in HW */
99 VMNET_CAP_SW_VLAN = 0x0400, /* VLAN tagging/untagging in SW */
100 VMNET_CAP_WAKE_PCKT_RCV = 0x0800, /* Can wake on network packet recv? */
101 VMNET_CAP_ENABLE_INT_INLINE = 0x1000, /* Enable Interrupt Inline */
102 VMNET_CAP_ENABLE_HEADER_COPY = 0x2000, /* copy header for vmkernel */
103 VMNET_CAP_TX_CHAIN = 0x4000, /* Guest can use multiple tx entries
104 * for a pkt */
105 VMNET_CAP_RX_CHAIN = 0x8000, /* pkt can span multiple rx entries */
106 VMNET_CAP_LPD = 0x10000, /* large pkt delivery */
107 VMNET_CAP_BPF = 0x20000, /* BPF Support in VMXNET Virtual HW*/
108 VMNET_CAP_SG_SPAN_PAGES = 0x40000, /* Scatter-gather can span multiple*/
109 /* pages transmits */
110 VMNET_CAP_IP6_CSUM = 0x80000, /* Can do IPv6 csum offload. */
111 VMNET_CAP_TSO6 = 0x100000, /* TSO seg. offload for IPv6 pkts. */
112 VMNET_CAP_TSO256k = 0x200000, /* Can do TSO seg offload for */
113 /* pkts up to 256kB. */
114 VMNET_CAP_UPT = 0x400000 /* Support UPT */
115};
116
117/*
118 * PCI vendor and device IDs.
119 */
120#define PCI_VENDOR_ID_VMWARE 0x15AD
121#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0
122#define MAX_ETHERNET_CARDS 10
123#define MAX_PCI_PASSTHRU_DEVICE 6
124
125struct vmxnet3_cmd_ring {
126 union Vmxnet3_GenericDesc *base;
127 u32 size;
128 u32 next2fill;
129 u32 next2comp;
130 u8 gen;
131 dma_addr_t basePA;
132};
133
134static inline void
135vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring)
136{
137 ring->next2fill++;
138 if (unlikely(ring->next2fill == ring->size)) {
139 ring->next2fill = 0;
140 VMXNET3_FLIP_RING_GEN(ring->gen);
141 }
142}
143
144static inline void
145vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring)
146{
147 VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size);
148}
149
150static inline int
151vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring)
152{
153 return (ring->next2comp > ring->next2fill ? 0 : ring->size) +
154 ring->next2comp - ring->next2fill - 1;
155}
156
157struct vmxnet3_comp_ring {
158 union Vmxnet3_GenericDesc *base;
159 u32 size;
160 u32 next2proc;
161 u8 gen;
162 u8 intr_idx;
163 dma_addr_t basePA;
164};
165
166static inline void
167vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
168{
169 ring->next2proc++;
170 if (unlikely(ring->next2proc == ring->size)) {
171 ring->next2proc = 0;
172 VMXNET3_FLIP_RING_GEN(ring->gen);
173 }
174}
175
176struct vmxnet3_tx_data_ring {
177 struct Vmxnet3_TxDataDesc *base;
178 u32 size;
179 dma_addr_t basePA;
180};
181
182enum vmxnet3_buf_map_type {
183 VMXNET3_MAP_INVALID = 0,
184 VMXNET3_MAP_NONE,
185 VMXNET3_MAP_SINGLE,
186 VMXNET3_MAP_PAGE,
187};
188
189struct vmxnet3_tx_buf_info {
190 u32 map_type;
191 u16 len;
192 u16 sop_idx;
193 dma_addr_t dma_addr;
194 struct sk_buff *skb;
195};
196
197struct vmxnet3_tq_driver_stats {
198 u64 drop_total; /* # of pkts dropped by the driver, the
199 * counters below track droppings due to
200 * different reasons
201 */
202 u64 drop_too_many_frags;
203 u64 drop_oversized_hdr;
204 u64 drop_hdr_inspect_err;
205 u64 drop_tso;
206
207 u64 tx_ring_full;
208 u64 linearized; /* # of pkts linearized */
209 u64 copy_skb_header; /* # of times we have to copy skb header */
210 u64 oversized_hdr;
211};
212
213struct vmxnet3_tx_ctx {
214 bool ipv4;
215 u16 mss;
216 u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum
217 * offloading
218 */
219 u32 l4_hdr_size; /* only valid if mss != 0 */
220 u32 copy_size; /* # of bytes copied into the data ring */
221 union Vmxnet3_GenericDesc *sop_txd;
222 union Vmxnet3_GenericDesc *eop_txd;
223};
224
225struct vmxnet3_tx_queue {
226 spinlock_t tx_lock;
227 struct vmxnet3_cmd_ring tx_ring;
228 struct vmxnet3_tx_buf_info *buf_info;
229 struct vmxnet3_tx_data_ring data_ring;
230 struct vmxnet3_comp_ring comp_ring;
231 struct Vmxnet3_TxQueueCtrl *shared;
232 struct vmxnet3_tq_driver_stats stats;
233 bool stopped;
234 int num_stop; /* # of times the queue is
235 * stopped */
236} __attribute__((__aligned__(SMP_CACHE_BYTES)));
237
238enum vmxnet3_rx_buf_type {
239 VMXNET3_RX_BUF_NONE = 0,
240 VMXNET3_RX_BUF_SKB = 1,
241 VMXNET3_RX_BUF_PAGE = 2
242};
243
244struct vmxnet3_rx_buf_info {
245 enum vmxnet3_rx_buf_type buf_type;
246 u16 len;
247 union {
248 struct sk_buff *skb;
249 struct page *page;
250 };
251 dma_addr_t dma_addr;
252};
253
254struct vmxnet3_rx_ctx {
255 struct sk_buff *skb;
256 u32 sop_idx;
257};
258
259struct vmxnet3_rq_driver_stats {
260 u64 drop_total;
261 u64 drop_err;
262 u64 drop_fcs;
263 u64 rx_buf_alloc_failure;
264};
265
266struct vmxnet3_rx_queue {
267 struct vmxnet3_cmd_ring rx_ring[2];
268 struct vmxnet3_comp_ring comp_ring;
269 struct vmxnet3_rx_ctx rx_ctx;
270 u32 qid; /* rqID in RCD for buffer from 1st ring */
271 u32 qid2; /* rqID in RCD for buffer from 2nd ring */
272 u32 uncommitted[2]; /* # of buffers allocated since last RXPROD
273 * update */
274 struct vmxnet3_rx_buf_info *buf_info[2];
275 struct Vmxnet3_RxQueueCtrl *shared;
276 struct vmxnet3_rq_driver_stats stats;
277} __attribute__((__aligned__(SMP_CACHE_BYTES)));
278
279#define VMXNET3_LINUX_MAX_MSIX_VECT 1
280
281struct vmxnet3_intr {
282 enum vmxnet3_intr_mask_mode mask_mode;
283 enum vmxnet3_intr_type type; /* MSI-X, MSI, or INTx? */
284 u8 num_intrs; /* # of intr vectors */
285 u8 event_intr_idx; /* idx of the intr vector for event */
286 u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
287#ifdef CONFIG_PCI_MSI
288 struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
289#endif
290};
291
292#define VMXNET3_STATE_BIT_RESETTING 0
293#define VMXNET3_STATE_BIT_QUIESCED 1
294struct vmxnet3_adapter {
295 struct vmxnet3_tx_queue tx_queue;
296 struct vmxnet3_rx_queue rx_queue;
297 struct napi_struct napi;
298 struct vlan_group *vlan_grp;
299
300 struct vmxnet3_intr intr;
301
302 struct Vmxnet3_DriverShared *shared;
303 struct Vmxnet3_PMConf *pm_conf;
304 struct Vmxnet3_TxQueueDesc *tqd_start; /* first tx queue desc */
305 struct Vmxnet3_RxQueueDesc *rqd_start; /* first rx queue desc */
306 struct net_device *netdev;
307 struct pci_dev *pdev;
308
309 u8 *hw_addr0; /* for BAR 0 */
310 u8 *hw_addr1; /* for BAR 1 */
311
312 /* feature control */
313 bool rxcsum;
314 bool lro;
315 bool jumbo_frame;
316
317 /* rx buffer related */
318 unsigned skb_buf_size;
319 int rx_buf_per_pkt; /* only apply to the 1st ring */
320 dma_addr_t shared_pa;
321 dma_addr_t queue_desc_pa;
322
323 /* Wake-on-LAN */
324 u32 wol;
325
326 /* Link speed */
327 u32 link_speed; /* in mbps */
328
329 u64 tx_timeout_count;
330 struct work_struct work;
331
332 unsigned long state; /* VMXNET3_STATE_BIT_xxx */
333
334 int dev_number;
335};
336
337#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
338 writel((val), (adapter)->hw_addr0 + (reg))
339#define VMXNET3_READ_BAR0_REG(adapter, reg) \
340 readl((adapter)->hw_addr0 + (reg))
341
342#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \
343 writel((val), (adapter)->hw_addr1 + (reg))
344#define VMXNET3_READ_BAR1_REG(adapter, reg) \
345 readl((adapter)->hw_addr1 + (reg))
346
347#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
348#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
349 ((rq)->rx_ring[ring_idx].size >> 3)
350
351#define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma))
352#define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32))
353
354/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
355#define VMXNET3_DEF_TX_RING_SIZE 512
356#define VMXNET3_DEF_RX_RING_SIZE 256
357
358#define VMXNET3_MAX_ETH_HDR_SIZE 22
359#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
360
361int
362vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
363
364int
365vmxnet3_activate_dev(struct vmxnet3_adapter *adapter);
366
367void
368vmxnet3_force_close(struct vmxnet3_adapter *adapter);
369
370void
371vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
372
373void
374vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
375 struct vmxnet3_adapter *adapter);
376
377void
378vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
379 struct vmxnet3_adapter *adapter);
380
381int
382vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
383 u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
384
385extern void vmxnet3_set_ethtool_ops(struct net_device *netdev);
386extern struct net_device_stats *vmxnet3_get_stats(struct net_device *netdev);
387
388extern char vmxnet3_driver_name[];
389#endif
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index cf5fd17ad707..f1bff98acd1f 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -58,8 +58,7 @@ struct cisco_state {
58 spinlock_t lock; 58 spinlock_t lock;
59 unsigned long last_poll; 59 unsigned long last_poll;
60 int up; 60 int up;
61 int request_sent; 61 u32 txseq; /* TX sequence number, 0 = none */
62 u32 txseq; /* TX sequence number */
63 u32 rxseq; /* RX sequence number */ 62 u32 rxseq; /* RX sequence number */
64}; 63};
65 64
@@ -163,6 +162,7 @@ static int cisco_rx(struct sk_buff *skb)
163 struct cisco_packet *cisco_data; 162 struct cisco_packet *cisco_data;
164 struct in_device *in_dev; 163 struct in_device *in_dev;
165 __be32 addr, mask; 164 __be32 addr, mask;
165 u32 ack;
166 166
167 if (skb->len < sizeof(struct hdlc_header)) 167 if (skb->len < sizeof(struct hdlc_header))
168 goto rx_error; 168 goto rx_error;
@@ -223,8 +223,10 @@ static int cisco_rx(struct sk_buff *skb)
223 case CISCO_KEEPALIVE_REQ: 223 case CISCO_KEEPALIVE_REQ:
224 spin_lock(&st->lock); 224 spin_lock(&st->lock);
225 st->rxseq = ntohl(cisco_data->par1); 225 st->rxseq = ntohl(cisco_data->par1);
226 if (st->request_sent && 226 ack = ntohl(cisco_data->par2);
227 ntohl(cisco_data->par2) == st->txseq) { 227 if (ack && (ack == st->txseq ||
228 /* our current REQ may be in transit */
229 ack == st->txseq - 1)) {
228 st->last_poll = jiffies; 230 st->last_poll = jiffies;
229 if (!st->up) { 231 if (!st->up) {
230 u32 sec, min, hrs, days; 232 u32 sec, min, hrs, days;
@@ -275,7 +277,6 @@ static void cisco_timer(unsigned long arg)
275 277
276 cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq), 278 cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq),
277 htonl(st->rxseq)); 279 htonl(st->rxseq));
278 st->request_sent = 1;
279 spin_unlock(&st->lock); 280 spin_unlock(&st->lock);
280 281
281 st->timer.expires = jiffies + st->settings.interval * HZ; 282 st->timer.expires = jiffies + st->settings.interval * HZ;
@@ -293,9 +294,7 @@ static void cisco_start(struct net_device *dev)
293 unsigned long flags; 294 unsigned long flags;
294 295
295 spin_lock_irqsave(&st->lock, flags); 296 spin_lock_irqsave(&st->lock, flags);
296 st->up = 0; 297 st->up = st->txseq = st->rxseq = 0;
297 st->request_sent = 0;
298 st->txseq = st->rxseq = 0;
299 spin_unlock_irqrestore(&st->lock, flags); 298 spin_unlock_irqrestore(&st->lock, flags);
300 299
301 init_timer(&st->timer); 300 init_timer(&st->timer);
@@ -317,8 +316,7 @@ static void cisco_stop(struct net_device *dev)
317 316
318 spin_lock_irqsave(&st->lock, flags); 317 spin_lock_irqsave(&st->lock, flags);
319 netif_dormant_on(dev); 318 netif_dormant_on(dev);
320 st->up = 0; 319 st->up = st->txseq = 0;
321 st->request_sent = 0;
322 spin_unlock_irqrestore(&st->lock, flags); 320 spin_unlock_irqrestore(&st->lock, flags);
323} 321}
324 322
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 892573b27d50..f4e9695ec186 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -690,7 +690,10 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
690 } 690 }
691 691
692 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); 692 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
693
694 local_bh_disable();
693 ieee80211_rx(dev->wl->hw, skb); 695 ieee80211_rx(dev->wl->hw, skb);
696 local_bh_enable();
694 697
695#if B43_DEBUG 698#if B43_DEBUG
696 dev->rx_count++; 699 dev->rx_count++;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 046b571fd9ce..8d7bc38fe005 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -3156,8 +3156,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3156 out_pci_disable_device: 3156 out_pci_disable_device:
3157 pci_disable_device(pdev); 3157 pci_disable_device(pdev);
3158 out_ieee80211_free_hw: 3158 out_ieee80211_free_hw:
3159 ieee80211_free_hw(priv->hw);
3160 iwl_free_traffic_mem(priv); 3159 iwl_free_traffic_mem(priv);
3160 ieee80211_free_hw(priv->hw);
3161 out: 3161 out:
3162 return err; 3162 return err;
3163} 3163}
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index ecbe036ecb63..c347d6631d85 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -4099,8 +4099,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4099 pci_set_drvdata(pdev, NULL); 4099 pci_set_drvdata(pdev, NULL);
4100 pci_disable_device(pdev); 4100 pci_disable_device(pdev);
4101 out_ieee80211_free_hw: 4101 out_ieee80211_free_hw:
4102 ieee80211_free_hw(priv->hw);
4103 iwl_free_traffic_mem(priv); 4102 iwl_free_traffic_mem(priv);
4103 ieee80211_free_hw(priv->hw);
4104 out: 4104 out:
4105 return err; 4105 return err;
4106} 4106}
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 47d2b1909d69..9ee8bd11bda9 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -3,6 +3,7 @@
3 * responses as well as events generated by firmware. 3 * responses as well as events generated by firmware.
4 */ 4 */
5#include <linux/delay.h> 5#include <linux/delay.h>
6#include <linux/sched.h>
6#include <linux/if_arp.h> 7#include <linux/if_arp.h>
7#include <linux/netdevice.h> 8#include <linux/netdevice.h>
8#include <asm/unaligned.h> 9#include <asm/unaligned.h>
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 466859b285e1..c75b960c8ac8 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1669,6 +1669,8 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw);
1669 * to this function and ieee80211_rx_irqsafe() may not be mixed for a 1669 * to this function and ieee80211_rx_irqsafe() may not be mixed for a
1670 * single hardware. 1670 * single hardware.
1671 * 1671 *
1672 * Note that right now, this function must be called with softirqs disabled.
1673 *
1672 * @hw: the hardware this frame came in on 1674 * @hw: the hardware this frame came in on
1673 * @skb: the buffer to receive, owned by mac80211 after this call 1675 * @skb: the buffer to receive, owned by mac80211 after this call
1674 */ 1676 */
diff --git a/include/net/sock.h b/include/net/sock.h
index 10669b01eeab..1364428f53f2 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -226,12 +226,12 @@ struct sock {
226#define sk_prot __sk_common.skc_prot 226#define sk_prot __sk_common.skc_prot
227#define sk_net __sk_common.skc_net 227#define sk_net __sk_common.skc_net
228 kmemcheck_bitfield_begin(flags); 228 kmemcheck_bitfield_begin(flags);
229 unsigned char sk_shutdown : 2, 229 unsigned int sk_shutdown : 2,
230 sk_no_check : 2, 230 sk_no_check : 2,
231 sk_userlocks : 4; 231 sk_userlocks : 4,
232 sk_protocol : 8,
233 sk_type : 16;
232 kmemcheck_bitfield_end(flags); 234 kmemcheck_bitfield_end(flags);
233 unsigned char sk_protocol;
234 unsigned short sk_type;
235 int sk_rcvbuf; 235 int sk_rcvbuf;
236 socket_lock_t sk_lock; 236 socket_lock_t sk_lock;
237 /* 237 /*
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 624c3c9b3c2b..e320afea07fc 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -644,6 +644,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
644 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ 644 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
645 if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && 645 if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
646 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { 646 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
647 inet_csk(sk)->icsk_accept_queue.rskq_defer_accept--;
647 inet_rsk(req)->acked = 1; 648 inet_rsk(req)->acked = 1;
648 return NULL; 649 return NULL;
649 } 650 }
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 71e5353b30c8..ee61b3fc4cae 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -845,6 +845,42 @@ out:
845 return ret; 845 return ret;
846} 846}
847 847
848
849/**
850 * first_packet_length - return length of first packet in receive queue
851 * @sk: socket
852 *
853 * Drops all bad checksum frames, until a valid one is found.
854 * Returns the length of found skb, or 0 if none is found.
855 */
856static unsigned int first_packet_length(struct sock *sk)
857{
858 struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue;
859 struct sk_buff *skb;
860 unsigned int res;
861
862 __skb_queue_head_init(&list_kill);
863
864 spin_lock_bh(&rcvq->lock);
865 while ((skb = skb_peek(rcvq)) != NULL &&
866 udp_lib_checksum_complete(skb)) {
867 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
868 IS_UDPLITE(sk));
869 __skb_unlink(skb, rcvq);
870 __skb_queue_tail(&list_kill, skb);
871 }
872 res = skb ? skb->len : 0;
873 spin_unlock_bh(&rcvq->lock);
874
875 if (!skb_queue_empty(&list_kill)) {
876 lock_sock(sk);
877 __skb_queue_purge(&list_kill);
878 sk_mem_reclaim_partial(sk);
879 release_sock(sk);
880 }
881 return res;
882}
883
848/* 884/*
849 * IOCTL requests applicable to the UDP protocol 885 * IOCTL requests applicable to the UDP protocol
850 */ 886 */
@@ -861,21 +897,16 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
861 897
862 case SIOCINQ: 898 case SIOCINQ:
863 { 899 {
864 struct sk_buff *skb; 900 unsigned int amount = first_packet_length(sk);
865 unsigned long amount;
866 901
867 amount = 0; 902 if (amount)
868 spin_lock_bh(&sk->sk_receive_queue.lock);
869 skb = skb_peek(&sk->sk_receive_queue);
870 if (skb != NULL) {
871 /* 903 /*
872 * We will only return the amount 904 * We will only return the amount
873 * of this packet since that is all 905 * of this packet since that is all
874 * that will be read. 906 * that will be read.
875 */ 907 */
876 amount = skb->len - sizeof(struct udphdr); 908 amount -= sizeof(struct udphdr);
877 } 909
878 spin_unlock_bh(&sk->sk_receive_queue.lock);
879 return put_user(amount, (int __user *)arg); 910 return put_user(amount, (int __user *)arg);
880 } 911 }
881 912
@@ -1544,29 +1575,11 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
1544{ 1575{
1545 unsigned int mask = datagram_poll(file, sock, wait); 1576 unsigned int mask = datagram_poll(file, sock, wait);
1546 struct sock *sk = sock->sk; 1577 struct sock *sk = sock->sk;
1547 int is_lite = IS_UDPLITE(sk);
1548 1578
1549 /* Check for false positives due to checksum errors */ 1579 /* Check for false positives due to checksum errors */
1550 if ((mask & POLLRDNORM) && 1580 if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
1551 !(file->f_flags & O_NONBLOCK) && 1581 !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk))
1552 !(sk->sk_shutdown & RCV_SHUTDOWN)) { 1582 mask &= ~(POLLIN | POLLRDNORM);
1553 struct sk_buff_head *rcvq = &sk->sk_receive_queue;
1554 struct sk_buff *skb;
1555
1556 spin_lock_bh(&rcvq->lock);
1557 while ((skb = skb_peek(rcvq)) != NULL &&
1558 udp_lib_checksum_complete(skb)) {
1559 UDP_INC_STATS_BH(sock_net(sk),
1560 UDP_MIB_INERRORS, is_lite);
1561 __skb_unlink(skb, rcvq);
1562 kfree_skb(skb);
1563 }
1564 spin_unlock_bh(&rcvq->lock);
1565
1566 /* nothing to see, move along */
1567 if (skb == NULL)
1568 mask &= ~(POLLIN | POLLRDNORM);
1569 }
1570 1583
1571 return mask; 1584 return mask;
1572 1585
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 920ec8792f4b..6eaf69823439 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -544,7 +544,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
544 "%pM\n", bss->cbss.bssid, ifibss->bssid); 544 "%pM\n", bss->cbss.bssid, ifibss->bssid);
545#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 545#endif /* CONFIG_MAC80211_IBSS_DEBUG */
546 546
547 if (bss && memcmp(ifibss->bssid, bss->cbss.bssid, ETH_ALEN)) { 547 if (bss && !memcmp(ifibss->bssid, bss->cbss.bssid, ETH_ALEN)) {
548 printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM" 548 printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM"
549 " based on configured SSID\n", 549 " based on configured SSID\n",
550 sdata->dev->name, bss->cbss.bssid); 550 sdata->dev->name, bss->cbss.bssid);
@@ -829,7 +829,7 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
829 if (!sdata->u.ibss.ssid_len) 829 if (!sdata->u.ibss.ssid_len)
830 continue; 830 continue;
831 sdata->u.ibss.last_scan_completed = jiffies; 831 sdata->u.ibss.last_scan_completed = jiffies;
832 ieee80211_sta_find_ibss(sdata); 832 mod_timer(&sdata->u.ibss.timer, 0);
833 } 833 }
834 mutex_unlock(&local->iflist_mtx); 834 mutex_unlock(&local->iflist_mtx);
835} 835}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 865fbc09be1a..7170bf4565a8 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2453,6 +2453,8 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2453 struct ieee80211_supported_band *sband; 2453 struct ieee80211_supported_band *sband;
2454 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2454 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2455 2455
2456 WARN_ON_ONCE(softirq_count() == 0);
2457
2456 if (WARN_ON(status->band < 0 || 2458 if (WARN_ON(status->band < 0 ||
2457 status->band >= IEEE80211_NUM_BANDS)) 2459 status->band >= IEEE80211_NUM_BANDS))
2458 goto drop; 2460 goto drop;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 96c0ed115e2a..6b0359a500e6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -34,7 +34,7 @@ static struct tcf_hashinfo pedit_hash_info = {
34}; 34};
35 35
36static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = { 36static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = {
37 [TCA_PEDIT_PARMS] = { .len = sizeof(struct tcf_pedit) }, 37 [TCA_PEDIT_PARMS] = { .len = sizeof(struct tc_pedit) },
38}; 38};
39 39
40static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est, 40static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est,