aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig21
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/igbvf/Makefile38
-rw-r--r--drivers/net/igbvf/defines.h125
-rw-r--r--drivers/net/igbvf/ethtool.c540
-rw-r--r--drivers/net/igbvf/igbvf.h335
-rw-r--r--drivers/net/igbvf/mbx.c350
-rw-r--r--drivers/net/igbvf/mbx.h75
-rw-r--r--drivers/net/igbvf/netdev.c2919
-rw-r--r--drivers/net/igbvf/regs.h108
-rw-r--r--drivers/net/igbvf/vf.c398
-rw-r--r--drivers/net/igbvf/vf.h265
12 files changed, 5175 insertions, 0 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 78585fb3bdb5..9e921544ba20 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2058,6 +2058,27 @@ config IGB_DCA
2058 driver. DCA is a method for warming the CPU cache before data 2058 driver. DCA is a method for warming the CPU cache before data
2059 is used, with the intent of lessening the impact of cache misses. 2059 is used, with the intent of lessening the impact of cache misses.
2060 2060
2061config IGBVF
2062 tristate "Intel(R) 82576 Virtual Function Ethernet support"
2063 depends on PCI
2064 ---help---
2065 This driver supports Intel(R) 82576 virtual functions. For more
2066 information on how to identify your adapter, go to the Adapter &
2067 Driver ID Guide at:
2068
2069 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
2070
2071 For general information and support, go to the Intel support
2072 website at:
2073
2074 <http://support.intel.com>
2075
2076 More specific information on configuring the driver is in
2077 <file:Documentation/networking/e1000.txt>.
2078
2079 To compile this driver as a module, choose M here. The module
2080 will be called igbvf.
2081
2061source "drivers/net/ixp2000/Kconfig" 2082source "drivers/net/ixp2000/Kconfig"
2062 2083
2063config MYRI_SBUS 2084config MYRI_SBUS
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index edc9a0d6171d..1fc4602a6ff2 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_E1000) += e1000/
6obj-$(CONFIG_E1000E) += e1000e/ 6obj-$(CONFIG_E1000E) += e1000e/
7obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/ 7obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/
8obj-$(CONFIG_IGB) += igb/ 8obj-$(CONFIG_IGB) += igb/
9obj-$(CONFIG_IGBVF) += igbvf/
9obj-$(CONFIG_IXGBE) += ixgbe/ 10obj-$(CONFIG_IXGBE) += ixgbe/
10obj-$(CONFIG_IXGB) += ixgb/ 11obj-$(CONFIG_IXGB) += ixgb/
11obj-$(CONFIG_IP1000) += ipg.o 12obj-$(CONFIG_IP1000) += ipg.o
diff --git a/drivers/net/igbvf/Makefile b/drivers/net/igbvf/Makefile
new file mode 100644
index 000000000000..c2f150d8f2d9
--- /dev/null
+++ b/drivers/net/igbvf/Makefile
@@ -0,0 +1,38 @@
1################################################################################
2#
3# Intel(R) 82576 Virtual Function Linux driver
4# Copyright(c) 2009 Intel Corporation.
5#
6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License,
8# version 2, as published by the Free Software Foundation.
9#
10# This program is distributed in the hope it will be useful, but WITHOUT
11# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13# more details.
14#
15# You should have received a copy of the GNU General Public License along with
16# this program; if not, write to the Free Software Foundation, Inc.,
17# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18#
19# The full GNU General Public License is included in this distribution in
20# the file called "COPYING".
21#
22# Contact Information:
23# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25#
26################################################################################
27
28#
29# Makefile for the Intel(R) 82576 VF ethernet driver
30#
31
32obj-$(CONFIG_IGBVF) += igbvf.o
33
34igbvf-objs := vf.o \
35 mbx.o \
36 ethtool.o \
37 netdev.o
38
diff --git a/drivers/net/igbvf/defines.h b/drivers/net/igbvf/defines.h
new file mode 100644
index 000000000000..88a47537518a
--- /dev/null
+++ b/drivers/net/igbvf/defines.h
@@ -0,0 +1,125 @@
1/*******************************************************************************
2
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_DEFINES_H_
29#define _E1000_DEFINES_H_
30
31/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
32#define REQ_TX_DESCRIPTOR_MULTIPLE 8
33#define REQ_RX_DESCRIPTOR_MULTIPLE 8
34
35/* IVAR valid bit */
36#define E1000_IVAR_VALID 0x80
37
38/* Receive Descriptor bit definitions */
39#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
40#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
41#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
42#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
43#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
44#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
45#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
46#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
47#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
48
49#define E1000_RXDEXT_STATERR_CE 0x01000000
50#define E1000_RXDEXT_STATERR_SE 0x02000000
51#define E1000_RXDEXT_STATERR_SEQ 0x04000000
52#define E1000_RXDEXT_STATERR_CXE 0x10000000
53#define E1000_RXDEXT_STATERR_TCPE 0x20000000
54#define E1000_RXDEXT_STATERR_IPE 0x40000000
55#define E1000_RXDEXT_STATERR_RXE 0x80000000
56
57
58/* Same mask, but for extended and packet split descriptors */
59#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
60 E1000_RXDEXT_STATERR_CE | \
61 E1000_RXDEXT_STATERR_SE | \
62 E1000_RXDEXT_STATERR_SEQ | \
63 E1000_RXDEXT_STATERR_CXE | \
64 E1000_RXDEXT_STATERR_RXE)
65
66/* Device Control */
67#define E1000_CTRL_RST 0x04000000 /* Global reset */
68
69/* Device Status */
70#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
71#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
72#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
73#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
74#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
75#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
76
77#define SPEED_10 10
78#define SPEED_100 100
79#define SPEED_1000 1000
80#define HALF_DUPLEX 1
81#define FULL_DUPLEX 2
82
83/* Transmit Descriptor bit definitions */
84#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
85#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
86#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
87#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
88
89#define MAX_JUMBO_FRAME_SIZE 0x3F00
90
91/* 802.1q VLAN Packet Size */
92#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
93
94/* Error Codes */
95#define E1000_SUCCESS 0
96#define E1000_ERR_CONFIG 3
97#define E1000_ERR_MAC_INIT 5
98#define E1000_ERR_MBX 15
99
100#ifndef ETH_ADDR_LEN
101#define ETH_ADDR_LEN 6
102#endif
103
104/* SRRCTL bit definitions */
105#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
106#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
107#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
108#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
109#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
110#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
111#define E1000_SRRCTL_DROP_EN 0x80000000
112
113#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
114#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
115
116/* Additional Descriptor Control definitions */
117#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */
118#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
119
120/* Direct Cache Access (DCA) definitions */
121#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
122
123#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
124
125#endif /* _E1000_DEFINES_H_ */
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
new file mode 100644
index 000000000000..1dcaa6905312
--- /dev/null
+++ b/drivers/net/igbvf/ethtool.c
@@ -0,0 +1,540 @@
1/*******************************************************************************
2
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28/* ethtool support for igbvf */
29
30#include <linux/netdevice.h>
31#include <linux/ethtool.h>
32#include <linux/pci.h>
33#include <linux/vmalloc.h>
34#include <linux/delay.h>
35
36#include "igbvf.h"
37#include <linux/if_vlan.h>
38
39
40struct igbvf_stats {
41 char stat_string[ETH_GSTRING_LEN];
42 int sizeof_stat;
43 int stat_offset;
44 int base_stat_offset;
45};
46
47#define IGBVF_STAT(current, base) \
48 sizeof(((struct igbvf_adapter *)0)->current), \
49 offsetof(struct igbvf_adapter, current), \
50 offsetof(struct igbvf_adapter, base)
51
52static const struct igbvf_stats igbvf_gstrings_stats[] = {
53 { "rx_packets", IGBVF_STAT(stats.gprc, stats.base_gprc) },
54 { "tx_packets", IGBVF_STAT(stats.gptc, stats.base_gptc) },
55 { "rx_bytes", IGBVF_STAT(stats.gorc, stats.base_gorc) },
56 { "tx_bytes", IGBVF_STAT(stats.gotc, stats.base_gotc) },
57 { "multicast", IGBVF_STAT(stats.mprc, stats.base_mprc) },
58 { "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) },
59 { "lbrx_packets", IGBVF_STAT(stats.gprlbc, stats.base_gprlbc) },
60 { "tx_restart_queue", IGBVF_STAT(restart_queue, zero_base) },
61 { "rx_long_byte_count", IGBVF_STAT(stats.gorc, stats.base_gorc) },
62 { "rx_csum_offload_good", IGBVF_STAT(hw_csum_good, zero_base) },
63 { "rx_csum_offload_errors", IGBVF_STAT(hw_csum_err, zero_base) },
64 { "rx_header_split", IGBVF_STAT(rx_hdr_split, zero_base) },
65 { "alloc_rx_buff_failed", IGBVF_STAT(alloc_rx_buff_failed, zero_base) },
66};
67
68#define IGBVF_GLOBAL_STATS_LEN ARRAY_SIZE(igbvf_gstrings_stats)
69
70static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = {
71 "Link test (on/offline)"
72};
73
74#define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test)
75
76static int igbvf_get_settings(struct net_device *netdev,
77 struct ethtool_cmd *ecmd)
78{
79 struct igbvf_adapter *adapter = netdev_priv(netdev);
80 struct e1000_hw *hw = &adapter->hw;
81 u32 status;
82
83 ecmd->supported = SUPPORTED_1000baseT_Full;
84
85 ecmd->advertising = ADVERTISED_1000baseT_Full;
86
87 ecmd->port = -1;
88 ecmd->transceiver = XCVR_DUMMY1;
89
90 status = er32(STATUS);
91 if (status & E1000_STATUS_LU) {
92 if (status & E1000_STATUS_SPEED_1000)
93 ecmd->speed = 1000;
94 else if (status & E1000_STATUS_SPEED_100)
95 ecmd->speed = 100;
96 else
97 ecmd->speed = 10;
98
99 if (status & E1000_STATUS_FD)
100 ecmd->duplex = DUPLEX_FULL;
101 else
102 ecmd->duplex = DUPLEX_HALF;
103 } else {
104 ecmd->speed = -1;
105 ecmd->duplex = -1;
106 }
107
108 ecmd->autoneg = AUTONEG_DISABLE;
109
110 return 0;
111}
112
113static u32 igbvf_get_link(struct net_device *netdev)
114{
115 return netif_carrier_ok(netdev);
116}
117
118static int igbvf_set_settings(struct net_device *netdev,
119 struct ethtool_cmd *ecmd)
120{
121 return -EOPNOTSUPP;
122}
123
124static void igbvf_get_pauseparam(struct net_device *netdev,
125 struct ethtool_pauseparam *pause)
126{
127 return;
128}
129
130static int igbvf_set_pauseparam(struct net_device *netdev,
131 struct ethtool_pauseparam *pause)
132{
133 return -EOPNOTSUPP;
134}
135
136static u32 igbvf_get_tx_csum(struct net_device *netdev)
137{
138 return ((netdev->features & NETIF_F_IP_CSUM) != 0);
139}
140
141static int igbvf_set_tx_csum(struct net_device *netdev, u32 data)
142{
143 if (data)
144 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
145 else
146 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
147 return 0;
148}
149
150static int igbvf_set_tso(struct net_device *netdev, u32 data)
151{
152 struct igbvf_adapter *adapter = netdev_priv(netdev);
153 int i;
154 struct net_device *v_netdev;
155
156 if (data) {
157 netdev->features |= NETIF_F_TSO;
158 netdev->features |= NETIF_F_TSO6;
159 } else {
160 netdev->features &= ~NETIF_F_TSO;
161 netdev->features &= ~NETIF_F_TSO6;
162 /* disable TSO on all VLANs if they're present */
163 if (!adapter->vlgrp)
164 goto tso_out;
165 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
166 v_netdev = vlan_group_get_device(adapter->vlgrp, i);
167 if (!v_netdev)
168 continue;
169
170 v_netdev->features &= ~NETIF_F_TSO;
171 v_netdev->features &= ~NETIF_F_TSO6;
172 vlan_group_set_device(adapter->vlgrp, i, v_netdev);
173 }
174 }
175
176tso_out:
177 dev_info(&adapter->pdev->dev, "TSO is %s\n",
178 data ? "Enabled" : "Disabled");
179 adapter->flags |= FLAG_TSO_FORCE;
180 return 0;
181}
182
183static u32 igbvf_get_msglevel(struct net_device *netdev)
184{
185 struct igbvf_adapter *adapter = netdev_priv(netdev);
186 return adapter->msg_enable;
187}
188
189static void igbvf_set_msglevel(struct net_device *netdev, u32 data)
190{
191 struct igbvf_adapter *adapter = netdev_priv(netdev);
192 adapter->msg_enable = data;
193}
194
195static int igbvf_get_regs_len(struct net_device *netdev)
196{
197#define IGBVF_REGS_LEN 8
198 return IGBVF_REGS_LEN * sizeof(u32);
199}
200
201static void igbvf_get_regs(struct net_device *netdev,
202 struct ethtool_regs *regs, void *p)
203{
204 struct igbvf_adapter *adapter = netdev_priv(netdev);
205 struct e1000_hw *hw = &adapter->hw;
206 u32 *regs_buff = p;
207 u8 revision_id;
208
209 memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
210
211 pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id);
212
213 regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
214
215 regs_buff[0] = er32(CTRL);
216 regs_buff[1] = er32(STATUS);
217
218 regs_buff[2] = er32(RDLEN(0));
219 regs_buff[3] = er32(RDH(0));
220 regs_buff[4] = er32(RDT(0));
221
222 regs_buff[5] = er32(TDLEN(0));
223 regs_buff[6] = er32(TDH(0));
224 regs_buff[7] = er32(TDT(0));
225}
226
227static int igbvf_get_eeprom_len(struct net_device *netdev)
228{
229 return 0;
230}
231
232static int igbvf_get_eeprom(struct net_device *netdev,
233 struct ethtool_eeprom *eeprom, u8 *bytes)
234{
235 return -EOPNOTSUPP;
236}
237
238static int igbvf_set_eeprom(struct net_device *netdev,
239 struct ethtool_eeprom *eeprom, u8 *bytes)
240{
241 return -EOPNOTSUPP;
242}
243
244static void igbvf_get_drvinfo(struct net_device *netdev,
245 struct ethtool_drvinfo *drvinfo)
246{
247 struct igbvf_adapter *adapter = netdev_priv(netdev);
248 char firmware_version[32] = "N/A";
249
250 strncpy(drvinfo->driver, igbvf_driver_name, 32);
251 strncpy(drvinfo->version, igbvf_driver_version, 32);
252 strncpy(drvinfo->fw_version, firmware_version, 32);
253 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
254 drvinfo->regdump_len = igbvf_get_regs_len(netdev);
255 drvinfo->eedump_len = igbvf_get_eeprom_len(netdev);
256}
257
258static void igbvf_get_ringparam(struct net_device *netdev,
259 struct ethtool_ringparam *ring)
260{
261 struct igbvf_adapter *adapter = netdev_priv(netdev);
262 struct igbvf_ring *tx_ring = adapter->tx_ring;
263 struct igbvf_ring *rx_ring = adapter->rx_ring;
264
265 ring->rx_max_pending = IGBVF_MAX_RXD;
266 ring->tx_max_pending = IGBVF_MAX_TXD;
267 ring->rx_mini_max_pending = 0;
268 ring->rx_jumbo_max_pending = 0;
269 ring->rx_pending = rx_ring->count;
270 ring->tx_pending = tx_ring->count;
271 ring->rx_mini_pending = 0;
272 ring->rx_jumbo_pending = 0;
273}
274
275static int igbvf_set_ringparam(struct net_device *netdev,
276 struct ethtool_ringparam *ring)
277{
278 struct igbvf_adapter *adapter = netdev_priv(netdev);
279 struct igbvf_ring *temp_ring;
280 int err;
281 u32 new_rx_count, new_tx_count;
282
283 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
284 return -EINVAL;
285
286 new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD);
287 new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD);
288 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
289
290 new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD);
291 new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD);
292 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
293
294 if ((new_tx_count == adapter->tx_ring->count) &&
295 (new_rx_count == adapter->rx_ring->count)) {
296 /* nothing to do */
297 return 0;
298 }
299
300 temp_ring = vmalloc(sizeof(struct igbvf_ring));
301 if (!temp_ring)
302 return -ENOMEM;
303
304 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
305 msleep(1);
306
307 if (netif_running(adapter->netdev))
308 igbvf_down(adapter);
309
310 /*
311 * We can't just free everything and then setup again,
312 * because the ISRs in MSI-X mode get passed pointers
313 * to the tx and rx ring structs.
314 */
315 if (new_tx_count != adapter->tx_ring->count) {
316 memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring));
317
318 temp_ring->count = new_tx_count;
319 err = igbvf_setup_tx_resources(adapter, temp_ring);
320 if (err)
321 goto err_setup;
322
323 igbvf_free_tx_resources(adapter->tx_ring);
324
325 memcpy(adapter->tx_ring, temp_ring, sizeof(struct igbvf_ring));
326 }
327
328 if (new_rx_count != adapter->rx_ring->count) {
329 memcpy(temp_ring, adapter->rx_ring, sizeof(struct igbvf_ring));
330
331 temp_ring->count = new_rx_count;
332 err = igbvf_setup_rx_resources(adapter, temp_ring);
333 if (err)
334 goto err_setup;
335
336 igbvf_free_rx_resources(adapter->rx_ring);
337
338 memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring));
339 }
340
341 err = 0;
342err_setup:
343 if (netif_running(adapter->netdev))
344 igbvf_up(adapter);
345
346 clear_bit(__IGBVF_RESETTING, &adapter->state);
347 vfree(temp_ring);
348 return err;
349}
350
351static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data)
352{
353 struct e1000_hw *hw = &adapter->hw;
354 *data = 0;
355
356 hw->mac.ops.check_for_link(hw);
357
358 if (!(er32(STATUS) & E1000_STATUS_LU))
359 *data = 1;
360
361 return *data;
362}
363
364static int igbvf_get_self_test_count(struct net_device *netdev)
365{
366 return IGBVF_TEST_LEN;
367}
368
369static int igbvf_get_stats_count(struct net_device *netdev)
370{
371 return IGBVF_GLOBAL_STATS_LEN;
372}
373
374static void igbvf_diag_test(struct net_device *netdev,
375 struct ethtool_test *eth_test, u64 *data)
376{
377 struct igbvf_adapter *adapter = netdev_priv(netdev);
378
379 set_bit(__IGBVF_TESTING, &adapter->state);
380
381 /*
382 * Link test performed before hardware reset so autoneg doesn't
383 * interfere with test result
384 */
385 if (igbvf_link_test(adapter, &data[0]))
386 eth_test->flags |= ETH_TEST_FL_FAILED;
387
388 clear_bit(__IGBVF_TESTING, &adapter->state);
389 msleep_interruptible(4 * 1000);
390}
391
392static void igbvf_get_wol(struct net_device *netdev,
393 struct ethtool_wolinfo *wol)
394{
395 wol->supported = 0;
396 wol->wolopts = 0;
397
398 return;
399}
400
401static int igbvf_set_wol(struct net_device *netdev,
402 struct ethtool_wolinfo *wol)
403{
404 return -EOPNOTSUPP;
405}
406
407static int igbvf_phys_id(struct net_device *netdev, u32 data)
408{
409 return 0;
410}
411
412static int igbvf_get_coalesce(struct net_device *netdev,
413 struct ethtool_coalesce *ec)
414{
415 struct igbvf_adapter *adapter = netdev_priv(netdev);
416
417 if (adapter->itr_setting <= 3)
418 ec->rx_coalesce_usecs = adapter->itr_setting;
419 else
420 ec->rx_coalesce_usecs = adapter->itr_setting >> 2;
421
422 return 0;
423}
424
425static int igbvf_set_coalesce(struct net_device *netdev,
426 struct ethtool_coalesce *ec)
427{
428 struct igbvf_adapter *adapter = netdev_priv(netdev);
429 struct e1000_hw *hw = &adapter->hw;
430
431 if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) ||
432 ((ec->rx_coalesce_usecs > 3) &&
433 (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) ||
434 (ec->rx_coalesce_usecs == 2))
435 return -EINVAL;
436
437 /* convert to rate of irq's per second */
438 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
439 adapter->itr = IGBVF_START_ITR;
440 adapter->itr_setting = ec->rx_coalesce_usecs;
441 } else {
442 adapter->itr = ec->rx_coalesce_usecs << 2;
443 adapter->itr_setting = adapter->itr;
444 }
445
446 writel(adapter->itr,
447 hw->hw_addr + adapter->rx_ring[0].itr_register);
448
449 return 0;
450}
451
452static int igbvf_nway_reset(struct net_device *netdev)
453{
454 struct igbvf_adapter *adapter = netdev_priv(netdev);
455 if (netif_running(netdev))
456 igbvf_reinit_locked(adapter);
457 return 0;
458}
459
460
461static void igbvf_get_ethtool_stats(struct net_device *netdev,
462 struct ethtool_stats *stats,
463 u64 *data)
464{
465 struct igbvf_adapter *adapter = netdev_priv(netdev);
466 int i;
467
468 igbvf_update_stats(adapter);
469 for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) {
470 char *p = (char *)adapter +
471 igbvf_gstrings_stats[i].stat_offset;
472 char *b = (char *)adapter +
473 igbvf_gstrings_stats[i].base_stat_offset;
474 data[i] = ((igbvf_gstrings_stats[i].sizeof_stat ==
475 sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) :
476 (*(u32 *)p - *(u32 *)b));
477 }
478
479}
480
481static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
482 u8 *data)
483{
484 u8 *p = data;
485 int i;
486
487 switch (stringset) {
488 case ETH_SS_TEST:
489 memcpy(data, *igbvf_gstrings_test, sizeof(igbvf_gstrings_test));
490 break;
491 case ETH_SS_STATS:
492 for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) {
493 memcpy(p, igbvf_gstrings_stats[i].stat_string,
494 ETH_GSTRING_LEN);
495 p += ETH_GSTRING_LEN;
496 }
497 break;
498 }
499}
500
501static const struct ethtool_ops igbvf_ethtool_ops = {
502 .get_settings = igbvf_get_settings,
503 .set_settings = igbvf_set_settings,
504 .get_drvinfo = igbvf_get_drvinfo,
505 .get_regs_len = igbvf_get_regs_len,
506 .get_regs = igbvf_get_regs,
507 .get_wol = igbvf_get_wol,
508 .set_wol = igbvf_set_wol,
509 .get_msglevel = igbvf_get_msglevel,
510 .set_msglevel = igbvf_set_msglevel,
511 .nway_reset = igbvf_nway_reset,
512 .get_link = igbvf_get_link,
513 .get_eeprom_len = igbvf_get_eeprom_len,
514 .get_eeprom = igbvf_get_eeprom,
515 .set_eeprom = igbvf_set_eeprom,
516 .get_ringparam = igbvf_get_ringparam,
517 .set_ringparam = igbvf_set_ringparam,
518 .get_pauseparam = igbvf_get_pauseparam,
519 .set_pauseparam = igbvf_set_pauseparam,
520 .get_tx_csum = igbvf_get_tx_csum,
521 .set_tx_csum = igbvf_set_tx_csum,
522 .get_sg = ethtool_op_get_sg,
523 .set_sg = ethtool_op_set_sg,
524 .get_tso = ethtool_op_get_tso,
525 .set_tso = igbvf_set_tso,
526 .self_test = igbvf_diag_test,
527 .get_strings = igbvf_get_strings,
528 .phys_id = igbvf_phys_id,
529 .get_ethtool_stats = igbvf_get_ethtool_stats,
530 .self_test_count = igbvf_get_self_test_count,
531 .get_stats_count = igbvf_get_stats_count,
532 .get_coalesce = igbvf_get_coalesce,
533 .set_coalesce = igbvf_set_coalesce,
534};
535
536void igbvf_set_ethtool_ops(struct net_device *netdev)
537{
538 /* have to "undeclare" const on this struct to remove warnings */
539 SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igbvf_ethtool_ops);
540}
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
new file mode 100644
index 000000000000..936ed2a9435f
--- /dev/null
+++ b/drivers/net/igbvf/igbvf.h
@@ -0,0 +1,335 @@
1/*******************************************************************************
2
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28/* Linux PRO/1000 Ethernet Driver main header file */
29
30#ifndef _IGBVF_H_
31#define _IGBVF_H_
32
33#include <linux/types.h>
34#include <linux/timer.h>
35#include <linux/io.h>
36#include <linux/netdevice.h>
37
38
39#include "vf.h"
40
41/* Forward declarations */
42struct igbvf_info;
43struct igbvf_adapter;
44
45/* Interrupt defines */
46#define IGBVF_START_ITR 648 /* ~6000 ints/sec */
47
48/* Interrupt modes, as used by the IntMode paramter */
49#define IGBVF_INT_MODE_LEGACY 0
50#define IGBVF_INT_MODE_MSI 1
51#define IGBVF_INT_MODE_MSIX 2
52
53/* Tx/Rx descriptor defines */
54#define IGBVF_DEFAULT_TXD 256
55#define IGBVF_MAX_TXD 4096
56#define IGBVF_MIN_TXD 80
57
58#define IGBVF_DEFAULT_RXD 256
59#define IGBVF_MAX_RXD 4096
60#define IGBVF_MIN_RXD 80
61
62#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */
63#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */
64
65/* RX descriptor control thresholds.
66 * PTHRESH - MAC will consider prefetch if it has fewer than this number of
67 * descriptors available in its onboard memory.
68 * Setting this to 0 disables RX descriptor prefetch.
69 * HTHRESH - MAC will only prefetch if there are at least this many descriptors
70 * available in host memory.
71 * If PTHRESH is 0, this should also be 0.
72 * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
73 * descriptors until either it has this many to write back, or the
74 * ITR timer expires.
75 */
76#define IGBVF_RX_PTHRESH 16
77#define IGBVF_RX_HTHRESH 8
78#define IGBVF_RX_WTHRESH 1
79
80/* this is the size past which hardware will drop packets when setting LPE=0 */
81#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
82
83#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */
84
85/* How many Tx Descriptors do we need to call netif_wake_queue ? */
86#define IGBVF_TX_QUEUE_WAKE 32
87/* How many Rx Buffers do we bundle into one write to the hardware ? */
88#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
89
90#define AUTO_ALL_MODES 0
91#define IGBVF_EEPROM_APME 0x0400
92
93#define IGBVF_MNG_VLAN_NONE (-1)
94
95/* Number of packet split data buffers (not including the header buffer) */
96#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
97
98enum igbvf_boards {
99 board_vf,
100};
101
102struct igbvf_queue_stats {
103 u64 packets;
104 u64 bytes;
105};
106
107/*
108 * wrappers around a pointer to a socket buffer,
109 * so a DMA handle can be stored along with the buffer
110 */
111struct igbvf_buffer {
112 dma_addr_t dma;
113 struct sk_buff *skb;
114 union {
115 /* Tx */
116 struct {
117 unsigned long time_stamp;
118 u16 length;
119 u16 next_to_watch;
120 };
121 /* Rx */
122 struct {
123 struct page *page;
124 u64 page_dma;
125 unsigned int page_offset;
126 };
127 };
128 struct page *page;
129};
130
131union igbvf_desc {
132 union e1000_adv_rx_desc rx_desc;
133 union e1000_adv_tx_desc tx_desc;
134 struct e1000_adv_tx_context_desc tx_context_desc;
135};
136
137struct igbvf_ring {
138 struct igbvf_adapter *adapter; /* backlink */
139 union igbvf_desc *desc; /* pointer to ring memory */
140 dma_addr_t dma; /* phys address of ring */
141 unsigned int size; /* length of ring in bytes */
142 unsigned int count; /* number of desc. in ring */
143
144 u16 next_to_use;
145 u16 next_to_clean;
146
147 u16 head;
148 u16 tail;
149
150 /* array of buffer information structs */
151 struct igbvf_buffer *buffer_info;
152 struct napi_struct napi;
153
154 char name[IFNAMSIZ + 5];
155 u32 eims_value;
156 u32 itr_val;
157 u16 itr_register;
158 int set_itr;
159
160 struct sk_buff *rx_skb_top;
161
162 struct igbvf_queue_stats stats;
163};
164
165/* board specific private data structure */
166struct igbvf_adapter {
167 struct timer_list watchdog_timer;
168 struct timer_list blink_timer;
169
170 struct work_struct reset_task;
171 struct work_struct watchdog_task;
172
173 const struct igbvf_info *ei;
174
175 struct vlan_group *vlgrp;
176 u32 bd_number;
177 u32 rx_buffer_len;
178 u32 polling_interval;
179 u16 mng_vlan_id;
180 u16 link_speed;
181 u16 link_duplex;
182
183 spinlock_t tx_queue_lock; /* prevent concurrent tail updates */
184
185 /* track device up/down/testing state */
186 unsigned long state;
187
188 /* Interrupt Throttle Rate */
189 u32 itr;
190 u32 itr_setting;
191 u16 tx_itr;
192 u16 rx_itr;
193
194 /*
195 * Tx
196 */
197 struct igbvf_ring *tx_ring /* One per active queue */
198 ____cacheline_aligned_in_smp;
199
200 unsigned long tx_queue_len;
201 unsigned int restart_queue;
202 u32 txd_cmd;
203
204 bool detect_tx_hung;
205 u8 tx_timeout_factor;
206
207 u32 tx_int_delay;
208 u32 tx_abs_int_delay;
209
210 unsigned int total_tx_bytes;
211 unsigned int total_tx_packets;
212 unsigned int total_rx_bytes;
213 unsigned int total_rx_packets;
214
215 /* Tx stats */
216 u32 tx_timeout_count;
217 u32 tx_fifo_head;
218 u32 tx_head_addr;
219 u32 tx_fifo_size;
220 u32 tx_dma_failed;
221
222 /*
223 * Rx
224 */
225 struct igbvf_ring *rx_ring;
226
227 u32 rx_int_delay;
228 u32 rx_abs_int_delay;
229
230 /* Rx stats */
231 u64 hw_csum_err;
232 u64 hw_csum_good;
233 u64 rx_hdr_split;
234 u32 alloc_rx_buff_failed;
235 u32 rx_dma_failed;
236
237 unsigned int rx_ps_hdr_size;
238 u32 max_frame_size;
239 u32 min_frame_size;
240
241 /* OS defined structs */
242 struct net_device *netdev;
243 struct pci_dev *pdev;
244 struct net_device_stats net_stats;
245 spinlock_t stats_lock; /* prevent concurrent stats updates */
246
247 /* structs defined in e1000_hw.h */
248 struct e1000_hw hw;
249
250 /* The VF counters don't clear on read so we have to get a base
251 * count on driver start up and always subtract that base on
252 * on the first update, thus the flag..
253 */
254 struct e1000_vf_stats stats;
255 u64 zero_base;
256
257 struct igbvf_ring test_tx_ring;
258 struct igbvf_ring test_rx_ring;
259 u32 test_icr;
260
261 u32 msg_enable;
262 struct msix_entry *msix_entries;
263 int int_mode;
264 u32 eims_enable_mask;
265 u32 eims_other;
266 u32 int_counter0;
267 u32 int_counter1;
268
269 u32 eeprom_wol;
270 u32 wol;
271 u32 pba;
272
273 bool fc_autoneg;
274
275 unsigned long led_status;
276
277 unsigned int flags;
278};
279
280struct igbvf_info {
281 enum e1000_mac_type mac;
282 unsigned int flags;
283 u32 pba;
284 void (*init_ops)(struct e1000_hw *);
285 s32 (*get_variants)(struct igbvf_adapter *);
286};
287
288/* hardware capability, feature, and workaround flags */
289#define FLAG_HAS_HW_VLAN_FILTER (1 << 0)
290#define FLAG_HAS_JUMBO_FRAMES (1 << 1)
291#define FLAG_MSI_ENABLED (1 << 2)
292#define FLAG_RX_CSUM_ENABLED (1 << 3)
293#define FLAG_TSO_FORCE (1 << 4)
294
295#define IGBVF_RX_DESC_ADV(R, i) \
296 (&((((R).desc))[i].rx_desc))
297#define IGBVF_TX_DESC_ADV(R, i) \
298 (&((((R).desc))[i].tx_desc))
299#define IGBVF_TX_CTXTDESC_ADV(R, i) \
300 (&((((R).desc))[i].tx_context_desc))
301
302enum igbvf_state_t {
303 __IGBVF_TESTING,
304 __IGBVF_RESETTING,
305 __IGBVF_DOWN
306};
307
308enum latency_range {
309 lowest_latency = 0,
310 low_latency = 1,
311 bulk_latency = 2,
312 latency_invalid = 255
313};
314
315extern char igbvf_driver_name[];
316extern const char igbvf_driver_version[];
317
318extern void igbvf_check_options(struct igbvf_adapter *);
319extern void igbvf_set_ethtool_ops(struct net_device *);
320
321extern int igbvf_up(struct igbvf_adapter *);
322extern void igbvf_down(struct igbvf_adapter *);
323extern void igbvf_reinit_locked(struct igbvf_adapter *);
324extern void igbvf_reset(struct igbvf_adapter *);
325extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
326extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
327extern void igbvf_free_rx_resources(struct igbvf_ring *);
328extern void igbvf_free_tx_resources(struct igbvf_ring *);
329extern void igbvf_update_stats(struct igbvf_adapter *);
330extern void igbvf_set_interrupt_capability(struct igbvf_adapter *);
331extern void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
332
333extern unsigned int copybreak;
334
335#endif /* _IGBVF_H_ */
diff --git a/drivers/net/igbvf/mbx.c b/drivers/net/igbvf/mbx.c
new file mode 100644
index 000000000000..819a8ec901dc
--- /dev/null
+++ b/drivers/net/igbvf/mbx.c
@@ -0,0 +1,350 @@
1/*******************************************************************************
2
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include "mbx.h"
29
30/**
31 * e1000_poll_for_msg - Wait for message notification
32 * @hw: pointer to the HW structure
33 *
34 * returns SUCCESS if it successfully received a message notification
35 **/
36static s32 e1000_poll_for_msg(struct e1000_hw *hw)
37{
38 struct e1000_mbx_info *mbx = &hw->mbx;
39 int countdown = mbx->timeout;
40
41 if (!mbx->ops.check_for_msg)
42 goto out;
43
44 while (countdown && mbx->ops.check_for_msg(hw)) {
45 countdown--;
46 udelay(mbx->usec_delay);
47 }
48
49 /* if we failed, all future posted messages fail until reset */
50 if (!countdown)
51 mbx->timeout = 0;
52out:
53 return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
54}
55
56/**
57 * e1000_poll_for_ack - Wait for message acknowledgement
58 * @hw: pointer to the HW structure
59 *
60 * returns SUCCESS if it successfully received a message acknowledgement
61 **/
62static s32 e1000_poll_for_ack(struct e1000_hw *hw)
63{
64 struct e1000_mbx_info *mbx = &hw->mbx;
65 int countdown = mbx->timeout;
66
67 if (!mbx->ops.check_for_ack)
68 goto out;
69
70 while (countdown && mbx->ops.check_for_ack(hw)) {
71 countdown--;
72 udelay(mbx->usec_delay);
73 }
74
75 /* if we failed, all future posted messages fail until reset */
76 if (!countdown)
77 mbx->timeout = 0;
78out:
79 return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
80}
81
82/**
83 * e1000_read_posted_mbx - Wait for message notification and receive message
84 * @hw: pointer to the HW structure
85 * @msg: The message buffer
86 * @size: Length of buffer
87 *
88 * returns SUCCESS if it successfully received a message notification and
89 * copied it into the receive buffer.
90 **/
91static s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size)
92{
93 struct e1000_mbx_info *mbx = &hw->mbx;
94 s32 ret_val = -E1000_ERR_MBX;
95
96 if (!mbx->ops.read)
97 goto out;
98
99 ret_val = e1000_poll_for_msg(hw);
100
101 /* if ack received read message, otherwise we timed out */
102 if (!ret_val)
103 ret_val = mbx->ops.read(hw, msg, size);
104out:
105 return ret_val;
106}
107
108/**
109 * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack
110 * @hw: pointer to the HW structure
111 * @msg: The message buffer
112 * @size: Length of buffer
113 *
114 * returns SUCCESS if it successfully copied message into the buffer and
115 * received an ack to that message within delay * timeout period
116 **/
117static s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size)
118{
119 struct e1000_mbx_info *mbx = &hw->mbx;
120 s32 ret_val = -E1000_ERR_MBX;
121
122 /* exit if we either can't write or there isn't a defined timeout */
123 if (!mbx->ops.write || !mbx->timeout)
124 goto out;
125
126 /* send msg*/
127 ret_val = mbx->ops.write(hw, msg, size);
128
129 /* if msg sent wait until we receive an ack */
130 if (!ret_val)
131 ret_val = e1000_poll_for_ack(hw);
132out:
133 return ret_val;
134}
135
136/**
137 * e1000_read_v2p_mailbox - read v2p mailbox
138 * @hw: pointer to the HW structure
139 *
140 * This function is used to read the v2p mailbox without losing the read to
141 * clear status bits.
142 **/
143static u32 e1000_read_v2p_mailbox(struct e1000_hw *hw)
144{
145 u32 v2p_mailbox = er32(V2PMAILBOX(0));
146
147 v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox;
148 hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS;
149
150 return v2p_mailbox;
151}
152
153/**
154 * e1000_check_for_bit_vf - Determine if a status bit was set
155 * @hw: pointer to the HW structure
156 * @mask: bitmask for bits to be tested and cleared
157 *
158 * This function is used to check for the read to clear bits within
159 * the V2P mailbox.
160 **/
161static s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask)
162{
163 u32 v2p_mailbox = e1000_read_v2p_mailbox(hw);
164 s32 ret_val = -E1000_ERR_MBX;
165
166 if (v2p_mailbox & mask)
167 ret_val = E1000_SUCCESS;
168
169 hw->dev_spec.vf.v2p_mailbox &= ~mask;
170
171 return ret_val;
172}
173
174/**
175 * e1000_check_for_msg_vf - checks to see if the PF has sent mail
176 * @hw: pointer to the HW structure
177 *
178 * returns SUCCESS if the PF has set the Status bit or else ERR_MBX
179 **/
180static s32 e1000_check_for_msg_vf(struct e1000_hw *hw)
181{
182 s32 ret_val = -E1000_ERR_MBX;
183
184 if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) {
185 ret_val = E1000_SUCCESS;
186 hw->mbx.stats.reqs++;
187 }
188
189 return ret_val;
190}
191
192/**
193 * e1000_check_for_ack_vf - checks to see if the PF has ACK'd
194 * @hw: pointer to the HW structure
195 *
196 * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
197 **/
198static s32 e1000_check_for_ack_vf(struct e1000_hw *hw)
199{
200 s32 ret_val = -E1000_ERR_MBX;
201
202 if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) {
203 ret_val = E1000_SUCCESS;
204 hw->mbx.stats.acks++;
205 }
206
207 return ret_val;
208}
209
210/**
211 * e1000_check_for_rst_vf - checks to see if the PF has reset
212 * @hw: pointer to the HW structure
213 *
214 * returns true if the PF has set the reset done bit or else false
215 **/
216static s32 e1000_check_for_rst_vf(struct e1000_hw *hw)
217{
218 s32 ret_val = -E1000_ERR_MBX;
219
220 if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD |
221 E1000_V2PMAILBOX_RSTI))) {
222 ret_val = E1000_SUCCESS;
223 hw->mbx.stats.rsts++;
224 }
225
226 return ret_val;
227}
228
229/**
230 * e1000_obtain_mbx_lock_vf - obtain mailbox lock
231 * @hw: pointer to the HW structure
232 *
233 * return SUCCESS if we obtained the mailbox lock
234 **/
235static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw)
236{
237 s32 ret_val = -E1000_ERR_MBX;
238
239 /* Take ownership of the buffer */
240 ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
241
242 /* reserve mailbox for vf use */
243 if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU)
244 ret_val = E1000_SUCCESS;
245
246 return ret_val;
247}
248
249/**
250 * e1000_write_mbx_vf - Write a message to the mailbox
251 * @hw: pointer to the HW structure
252 * @msg: The message buffer
253 * @size: Length of buffer
254 *
255 * returns SUCCESS if it successfully copied message into the buffer
256 **/
257static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
258{
259 s32 err;
260 u16 i;
261
262 /* lock the mailbox to prevent pf/vf race condition */
263 err = e1000_obtain_mbx_lock_vf(hw);
264 if (err)
265 goto out_no_write;
266
267 /* flush any ack or msg as we are going to overwrite mailbox */
268 e1000_check_for_ack_vf(hw);
269 e1000_check_for_msg_vf(hw);
270
271 /* copy the caller specified message to the mailbox memory buffer */
272 for (i = 0; i < size; i++)
273 array_ew32(VMBMEM(0), i, msg[i]);
274
275 /* update stats */
276 hw->mbx.stats.msgs_tx++;
277
278 /* Drop VFU and interrupt the PF to tell it a message has been sent */
279 ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_REQ);
280
281out_no_write:
282 return err;
283}
284
285/**
286 * e1000_read_mbx_vf - Reads a message from the inbox intended for vf
287 * @hw: pointer to the HW structure
288 * @msg: The message buffer
289 * @size: Length of buffer
290 *
291 * returns SUCCESS if it successfuly read message from buffer
292 **/
293static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
294{
295 s32 err;
296 u16 i;
297
298 /* lock the mailbox to prevent pf/vf race condition */
299 err = e1000_obtain_mbx_lock_vf(hw);
300 if (err)
301 goto out_no_read;
302
303 /* copy the message from the mailbox memory buffer */
304 for (i = 0; i < size; i++)
305 msg[i] = array_er32(VMBMEM(0), i);
306
307 /* Acknowledge receipt and release mailbox, then we're done */
308 ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_ACK);
309
310 /* update stats */
311 hw->mbx.stats.msgs_rx++;
312
313out_no_read:
314 return err;
315}
316
317/**
318 * e1000_init_mbx_params_vf - set initial values for vf mailbox
319 * @hw: pointer to the HW structure
320 *
321 * Initializes the hw->mbx struct to correct values for vf mailbox
322 */
323s32 e1000_init_mbx_params_vf(struct e1000_hw *hw)
324{
325 struct e1000_mbx_info *mbx = &hw->mbx;
326
327 /* start mailbox as timed out and let the reset_hw call set the timeout
328 * value to being communications */
329 mbx->timeout = 0;
330 mbx->usec_delay = E1000_VF_MBX_INIT_DELAY;
331
332 mbx->size = E1000_VFMAILBOX_SIZE;
333
334 mbx->ops.read = e1000_read_mbx_vf;
335 mbx->ops.write = e1000_write_mbx_vf;
336 mbx->ops.read_posted = e1000_read_posted_mbx;
337 mbx->ops.write_posted = e1000_write_posted_mbx;
338 mbx->ops.check_for_msg = e1000_check_for_msg_vf;
339 mbx->ops.check_for_ack = e1000_check_for_ack_vf;
340 mbx->ops.check_for_rst = e1000_check_for_rst_vf;
341
342 mbx->stats.msgs_tx = 0;
343 mbx->stats.msgs_rx = 0;
344 mbx->stats.reqs = 0;
345 mbx->stats.acks = 0;
346 mbx->stats.rsts = 0;
347
348 return E1000_SUCCESS;
349}
350
diff --git a/drivers/net/igbvf/mbx.h b/drivers/net/igbvf/mbx.h
new file mode 100644
index 000000000000..4938609dbfb5
--- /dev/null
+++ b/drivers/net/igbvf/mbx.h
@@ -0,0 +1,75 @@
1/*******************************************************************************
2
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_MBX_H_
29#define _E1000_MBX_H_
30
31#include "vf.h"
32
33#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
34#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */
35#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
36#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
37#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
38#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
39#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */
40#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
41#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
42
43#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
44
45/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
46 * PF. The reverse is true if it is E1000_PF_*.
47 * Message ACK's are the value or'd with 0xF0000000
48 */
49#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
50 * this are the ACK */
51#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
52 * this are the NACK */
53#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
54 clear to send requests */
55
56/* We have a total wait time of 1s for vf mailbox posted messages */
57#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mailbox timeout */
58#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */
59
60#define E1000_VT_MSGINFO_SHIFT 16
61/* bits 23:16 are used for exra info for certain messages */
62#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
63
64#define E1000_VF_RESET 0x01 /* VF requests reset */
65#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
66#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
67#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
68#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
69
70#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
71
72void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
73s32 e1000_init_mbx_params_vf(struct e1000_hw *);
74
75#endif /* _E1000_MBX_H_ */
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
new file mode 100644
index 000000000000..c5648420dedf
--- /dev/null
+++ b/drivers/net/igbvf/netdev.c
@@ -0,0 +1,2919 @@
1/*******************************************************************************
2
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
31#include <linux/pci.h>
32#include <linux/vmalloc.h>
33#include <linux/pagemap.h>
34#include <linux/delay.h>
35#include <linux/netdevice.h>
36#include <linux/tcp.h>
37#include <linux/ipv6.h>
38#include <net/checksum.h>
39#include <net/ip6_checksum.h>
40#include <linux/mii.h>
41#include <linux/ethtool.h>
42#include <linux/if_vlan.h>
43#include <linux/pm_qos_params.h>
44
45#include "igbvf.h"
46
47#define DRV_VERSION "1.0.0-k0"
48char igbvf_driver_name[] = "igbvf";
49const char igbvf_driver_version[] = DRV_VERSION;
50static const char igbvf_driver_string[] =
51 "Intel(R) Virtual Function Network Driver";
52static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
53
54static int igbvf_poll(struct napi_struct *napi, int budget);
55
56static struct igbvf_info igbvf_vf_info = {
57 .mac = e1000_vfadapt,
58 .flags = FLAG_HAS_JUMBO_FRAMES
59 | FLAG_RX_CSUM_ENABLED,
60 .pba = 10,
61 .init_ops = e1000_init_function_pointers_vf,
62};
63
64static const struct igbvf_info *igbvf_info_tbl[] = {
65 [board_vf] = &igbvf_vf_info,
66};
67
68/**
69 * igbvf_desc_unused - calculate if we have unused descriptors
70 **/
71static int igbvf_desc_unused(struct igbvf_ring *ring)
72{
73 if (ring->next_to_clean > ring->next_to_use)
74 return ring->next_to_clean - ring->next_to_use - 1;
75
76 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
77}
78
79/**
80 * igbvf_receive_skb - helper function to handle Rx indications
81 * @adapter: board private structure
82 * @status: descriptor status field as written by hardware
83 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
84 * @skb: pointer to sk_buff to be indicated to stack
85 **/
86static void igbvf_receive_skb(struct igbvf_adapter *adapter,
87 struct net_device *netdev,
88 struct sk_buff *skb,
89 u32 status, u16 vlan)
90{
91 if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
92 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
93 le16_to_cpu(vlan) &
94 E1000_RXD_SPC_VLAN_MASK);
95 else
96 netif_receive_skb(skb);
97
98 netdev->last_rx = jiffies;
99}
100
101static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
102 u32 status_err, struct sk_buff *skb)
103{
104 skb->ip_summed = CHECKSUM_NONE;
105
106 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
107 if ((status_err & E1000_RXD_STAT_IXSM))
108 return;
109 /* TCP/UDP checksum error bit is set */
110 if (status_err &
111 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
112 /* let the stack verify checksum errors */
113 adapter->hw_csum_err++;
114 return;
115 }
116 /* It must be a TCP or UDP packet with a valid checksum */
117 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
118 skb->ip_summed = CHECKSUM_UNNECESSARY;
119
120 adapter->hw_csum_good++;
121}
122
123/**
124 * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split
125 * @rx_ring: address of ring structure to repopulate
126 * @cleaned_count: number of buffers to repopulate
127 **/
128static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
129 int cleaned_count)
130{
131 struct igbvf_adapter *adapter = rx_ring->adapter;
132 struct net_device *netdev = adapter->netdev;
133 struct pci_dev *pdev = adapter->pdev;
134 union e1000_adv_rx_desc *rx_desc;
135 struct igbvf_buffer *buffer_info;
136 struct sk_buff *skb;
137 unsigned int i;
138 int bufsz;
139
140 i = rx_ring->next_to_use;
141 buffer_info = &rx_ring->buffer_info[i];
142
143 if (adapter->rx_ps_hdr_size)
144 bufsz = adapter->rx_ps_hdr_size;
145 else
146 bufsz = adapter->rx_buffer_len;
147 bufsz += NET_IP_ALIGN;
148
149 while (cleaned_count--) {
150 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
151
152 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
153 if (!buffer_info->page) {
154 buffer_info->page = alloc_page(GFP_ATOMIC);
155 if (!buffer_info->page) {
156 adapter->alloc_rx_buff_failed++;
157 goto no_buffers;
158 }
159 buffer_info->page_offset = 0;
160 } else {
161 buffer_info->page_offset ^= PAGE_SIZE / 2;
162 }
163 buffer_info->page_dma =
164 pci_map_page(pdev, buffer_info->page,
165 buffer_info->page_offset,
166 PAGE_SIZE / 2,
167 PCI_DMA_FROMDEVICE);
168 }
169
170 if (!buffer_info->skb) {
171 skb = netdev_alloc_skb(netdev, bufsz);
172 if (!skb) {
173 adapter->alloc_rx_buff_failed++;
174 goto no_buffers;
175 }
176
177 /* Make buffer alignment 2 beyond a 16 byte boundary
178 * this will result in a 16 byte aligned IP header after
179 * the 14 byte MAC header is removed
180 */
181 skb_reserve(skb, NET_IP_ALIGN);
182
183 buffer_info->skb = skb;
184 buffer_info->dma = pci_map_single(pdev, skb->data,
185 bufsz,
186 PCI_DMA_FROMDEVICE);
187 }
188 /* Refresh the desc even if buffer_addrs didn't change because
189 * each write-back erases this info. */
190 if (adapter->rx_ps_hdr_size) {
191 rx_desc->read.pkt_addr =
192 cpu_to_le64(buffer_info->page_dma);
193 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
194 } else {
195 rx_desc->read.pkt_addr =
196 cpu_to_le64(buffer_info->dma);
197 rx_desc->read.hdr_addr = 0;
198 }
199
200 i++;
201 if (i == rx_ring->count)
202 i = 0;
203 buffer_info = &rx_ring->buffer_info[i];
204 }
205
206no_buffers:
207 if (rx_ring->next_to_use != i) {
208 rx_ring->next_to_use = i;
209 if (i == 0)
210 i = (rx_ring->count - 1);
211 else
212 i--;
213
214 /* Force memory writes to complete before letting h/w
215 * know there are new descriptors to fetch. (Only
216 * applicable for weak-ordered memory model archs,
217 * such as IA-64). */
218 wmb();
219 writel(i, adapter->hw.hw_addr + rx_ring->tail);
220 }
221}
222
223/**
224 * igbvf_clean_rx_irq - Send received data up the network stack; legacy
225 * @adapter: board private structure
226 *
227 * the return value indicates whether actual cleaning was done, there
228 * is no guarantee that everything was cleaned
229 **/
230static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
231 int *work_done, int work_to_do)
232{
233 struct igbvf_ring *rx_ring = adapter->rx_ring;
234 struct net_device *netdev = adapter->netdev;
235 struct pci_dev *pdev = adapter->pdev;
236 union e1000_adv_rx_desc *rx_desc, *next_rxd;
237 struct igbvf_buffer *buffer_info, *next_buffer;
238 struct sk_buff *skb;
239 bool cleaned = false;
240 int cleaned_count = 0;
241 unsigned int total_bytes = 0, total_packets = 0;
242 unsigned int i;
243 u32 length, hlen, staterr;
244
245 i = rx_ring->next_to_clean;
246 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
247 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
248
249 while (staterr & E1000_RXD_STAT_DD) {
250 if (*work_done >= work_to_do)
251 break;
252 (*work_done)++;
253
254 buffer_info = &rx_ring->buffer_info[i];
255
256 /* HW will not DMA in data larger than the given buffer, even
257 * if it parses the (NFS, of course) header to be larger. In
258 * that case, it fills the header buffer and spills the rest
259 * into the page.
260 */
261 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) &
262 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
263 if (hlen > adapter->rx_ps_hdr_size)
264 hlen = adapter->rx_ps_hdr_size;
265
266 length = le16_to_cpu(rx_desc->wb.upper.length);
267 cleaned = true;
268 cleaned_count++;
269
270 skb = buffer_info->skb;
271 prefetch(skb->data - NET_IP_ALIGN);
272 buffer_info->skb = NULL;
273 if (!adapter->rx_ps_hdr_size) {
274 pci_unmap_single(pdev, buffer_info->dma,
275 adapter->rx_buffer_len,
276 PCI_DMA_FROMDEVICE);
277 buffer_info->dma = 0;
278 skb_put(skb, length);
279 goto send_up;
280 }
281
282 if (!skb_shinfo(skb)->nr_frags) {
283 pci_unmap_single(pdev, buffer_info->dma,
284 adapter->rx_ps_hdr_size + NET_IP_ALIGN,
285 PCI_DMA_FROMDEVICE);
286 skb_put(skb, hlen);
287 }
288
289 if (length) {
290 pci_unmap_page(pdev, buffer_info->page_dma,
291 PAGE_SIZE / 2,
292 PCI_DMA_FROMDEVICE);
293 buffer_info->page_dma = 0;
294
295 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
296 buffer_info->page,
297 buffer_info->page_offset,
298 length);
299
300 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
301 (page_count(buffer_info->page) != 1))
302 buffer_info->page = NULL;
303 else
304 get_page(buffer_info->page);
305
306 skb->len += length;
307 skb->data_len += length;
308 skb->truesize += length;
309 }
310send_up:
311 i++;
312 if (i == rx_ring->count)
313 i = 0;
314 next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i);
315 prefetch(next_rxd);
316 next_buffer = &rx_ring->buffer_info[i];
317
318 if (!(staterr & E1000_RXD_STAT_EOP)) {
319 buffer_info->skb = next_buffer->skb;
320 buffer_info->dma = next_buffer->dma;
321 next_buffer->skb = skb;
322 next_buffer->dma = 0;
323 goto next_desc;
324 }
325
326 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
327 dev_kfree_skb_irq(skb);
328 goto next_desc;
329 }
330
331 total_bytes += skb->len;
332 total_packets++;
333
334 igbvf_rx_checksum_adv(adapter, staterr, skb);
335
336 skb->protocol = eth_type_trans(skb, netdev);
337
338 igbvf_receive_skb(adapter, netdev, skb, staterr,
339 rx_desc->wb.upper.vlan);
340
341 netdev->last_rx = jiffies;
342
343next_desc:
344 rx_desc->wb.upper.status_error = 0;
345
346 /* return some buffers to hardware, one at a time is too slow */
347 if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) {
348 igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
349 cleaned_count = 0;
350 }
351
352 /* use prefetched values */
353 rx_desc = next_rxd;
354 buffer_info = next_buffer;
355
356 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
357 }
358
359 rx_ring->next_to_clean = i;
360 cleaned_count = igbvf_desc_unused(rx_ring);
361
362 if (cleaned_count)
363 igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
364
365 adapter->total_rx_packets += total_packets;
366 adapter->total_rx_bytes += total_bytes;
367 adapter->net_stats.rx_bytes += total_bytes;
368 adapter->net_stats.rx_packets += total_packets;
369 return cleaned;
370}
371
372static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
373 struct igbvf_buffer *buffer_info)
374{
375 buffer_info->dma = 0;
376 if (buffer_info->skb) {
377 skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
378 DMA_TO_DEVICE);
379 dev_kfree_skb_any(buffer_info->skb);
380 buffer_info->skb = NULL;
381 }
382 buffer_info->time_stamp = 0;
383}
384
385static void igbvf_print_tx_hang(struct igbvf_adapter *adapter)
386{
387 struct igbvf_ring *tx_ring = adapter->tx_ring;
388 unsigned int i = tx_ring->next_to_clean;
389 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
390 union e1000_adv_tx_desc *eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
391
392 /* detected Tx unit hang */
393 dev_err(&adapter->pdev->dev,
394 "Detected Tx Unit Hang:\n"
395 " TDH <%x>\n"
396 " TDT <%x>\n"
397 " next_to_use <%x>\n"
398 " next_to_clean <%x>\n"
399 "buffer_info[next_to_clean]:\n"
400 " time_stamp <%lx>\n"
401 " next_to_watch <%x>\n"
402 " jiffies <%lx>\n"
403 " next_to_watch.status <%x>\n",
404 readl(adapter->hw.hw_addr + tx_ring->head),
405 readl(adapter->hw.hw_addr + tx_ring->tail),
406 tx_ring->next_to_use,
407 tx_ring->next_to_clean,
408 tx_ring->buffer_info[eop].time_stamp,
409 eop,
410 jiffies,
411 eop_desc->wb.status);
412}
413
414/**
415 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
416 * @adapter: board private structure
417 *
418 * Return 0 on success, negative on failure
419 **/
420int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
421 struct igbvf_ring *tx_ring)
422{
423 struct pci_dev *pdev = adapter->pdev;
424 int size;
425
426 size = sizeof(struct igbvf_buffer) * tx_ring->count;
427 tx_ring->buffer_info = vmalloc(size);
428 if (!tx_ring->buffer_info)
429 goto err;
430 memset(tx_ring->buffer_info, 0, size);
431
432 /* round up to nearest 4K */
433 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
434 tx_ring->size = ALIGN(tx_ring->size, 4096);
435
436 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
437 &tx_ring->dma);
438
439 if (!tx_ring->desc)
440 goto err;
441
442 tx_ring->adapter = adapter;
443 tx_ring->next_to_use = 0;
444 tx_ring->next_to_clean = 0;
445
446 return 0;
447err:
448 vfree(tx_ring->buffer_info);
449 dev_err(&adapter->pdev->dev,
450 "Unable to allocate memory for the transmit descriptor ring\n");
451 return -ENOMEM;
452}
453
454/**
455 * igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
456 * @adapter: board private structure
457 *
458 * Returns 0 on success, negative on failure
459 **/
460int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
461 struct igbvf_ring *rx_ring)
462{
463 struct pci_dev *pdev = adapter->pdev;
464 int size, desc_len;
465
466 size = sizeof(struct igbvf_buffer) * rx_ring->count;
467 rx_ring->buffer_info = vmalloc(size);
468 if (!rx_ring->buffer_info)
469 goto err;
470 memset(rx_ring->buffer_info, 0, size);
471
472 desc_len = sizeof(union e1000_adv_rx_desc);
473
474 /* Round up to nearest 4K */
475 rx_ring->size = rx_ring->count * desc_len;
476 rx_ring->size = ALIGN(rx_ring->size, 4096);
477
478 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
479 &rx_ring->dma);
480
481 if (!rx_ring->desc)
482 goto err;
483
484 rx_ring->next_to_clean = 0;
485 rx_ring->next_to_use = 0;
486
487 rx_ring->adapter = adapter;
488
489 return 0;
490
491err:
492 vfree(rx_ring->buffer_info);
493 rx_ring->buffer_info = NULL;
494 dev_err(&adapter->pdev->dev,
495 "Unable to allocate memory for the receive descriptor ring\n");
496 return -ENOMEM;
497}
498
499/**
500 * igbvf_clean_tx_ring - Free Tx Buffers
501 * @tx_ring: ring to be cleaned
502 **/
503static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring)
504{
505 struct igbvf_adapter *adapter = tx_ring->adapter;
506 struct igbvf_buffer *buffer_info;
507 unsigned long size;
508 unsigned int i;
509
510 if (!tx_ring->buffer_info)
511 return;
512
513 /* Free all the Tx ring sk_buffs */
514 for (i = 0; i < tx_ring->count; i++) {
515 buffer_info = &tx_ring->buffer_info[i];
516 igbvf_put_txbuf(adapter, buffer_info);
517 }
518
519 size = sizeof(struct igbvf_buffer) * tx_ring->count;
520 memset(tx_ring->buffer_info, 0, size);
521
522 /* Zero out the descriptor ring */
523 memset(tx_ring->desc, 0, tx_ring->size);
524
525 tx_ring->next_to_use = 0;
526 tx_ring->next_to_clean = 0;
527
528 writel(0, adapter->hw.hw_addr + tx_ring->head);
529 writel(0, adapter->hw.hw_addr + tx_ring->tail);
530}
531
532/**
533 * igbvf_free_tx_resources - Free Tx Resources per Queue
534 * @tx_ring: ring to free resources from
535 *
536 * Free all transmit software resources
537 **/
538void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
539{
540 struct pci_dev *pdev = tx_ring->adapter->pdev;
541
542 igbvf_clean_tx_ring(tx_ring);
543
544 vfree(tx_ring->buffer_info);
545 tx_ring->buffer_info = NULL;
546
547 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
548
549 tx_ring->desc = NULL;
550}
551
552/**
553 * igbvf_clean_rx_ring - Free Rx Buffers per Queue
554 * @adapter: board private structure
555 **/
556static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
557{
558 struct igbvf_adapter *adapter = rx_ring->adapter;
559 struct igbvf_buffer *buffer_info;
560 struct pci_dev *pdev = adapter->pdev;
561 unsigned long size;
562 unsigned int i;
563
564 if (!rx_ring->buffer_info)
565 return;
566
567 /* Free all the Rx ring sk_buffs */
568 for (i = 0; i < rx_ring->count; i++) {
569 buffer_info = &rx_ring->buffer_info[i];
570 if (buffer_info->dma) {
571 if (adapter->rx_ps_hdr_size){
572 pci_unmap_single(pdev, buffer_info->dma,
573 adapter->rx_ps_hdr_size,
574 PCI_DMA_FROMDEVICE);
575 } else {
576 pci_unmap_single(pdev, buffer_info->dma,
577 adapter->rx_buffer_len,
578 PCI_DMA_FROMDEVICE);
579 }
580 buffer_info->dma = 0;
581 }
582
583 if (buffer_info->skb) {
584 dev_kfree_skb(buffer_info->skb);
585 buffer_info->skb = NULL;
586 }
587
588 if (buffer_info->page) {
589 if (buffer_info->page_dma)
590 pci_unmap_page(pdev, buffer_info->page_dma,
591 PAGE_SIZE / 2,
592 PCI_DMA_FROMDEVICE);
593 put_page(buffer_info->page);
594 buffer_info->page = NULL;
595 buffer_info->page_dma = 0;
596 buffer_info->page_offset = 0;
597 }
598 }
599
600 size = sizeof(struct igbvf_buffer) * rx_ring->count;
601 memset(rx_ring->buffer_info, 0, size);
602
603 /* Zero out the descriptor ring */
604 memset(rx_ring->desc, 0, rx_ring->size);
605
606 rx_ring->next_to_clean = 0;
607 rx_ring->next_to_use = 0;
608
609 writel(0, adapter->hw.hw_addr + rx_ring->head);
610 writel(0, adapter->hw.hw_addr + rx_ring->tail);
611}
612
613/**
614 * igbvf_free_rx_resources - Free Rx Resources
615 * @rx_ring: ring to clean the resources from
616 *
617 * Free all receive software resources
618 **/
619
620void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
621{
622 struct pci_dev *pdev = rx_ring->adapter->pdev;
623
624 igbvf_clean_rx_ring(rx_ring);
625
626 vfree(rx_ring->buffer_info);
627 rx_ring->buffer_info = NULL;
628
629 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
630 rx_ring->dma);
631 rx_ring->desc = NULL;
632}
633
634/**
635 * igbvf_update_itr - update the dynamic ITR value based on statistics
636 * @adapter: pointer to adapter
637 * @itr_setting: current adapter->itr
638 * @packets: the number of packets during this measurement interval
639 * @bytes: the number of bytes during this measurement interval
640 *
641 * Stores a new ITR value based on packets and byte
642 * counts during the last interrupt. The advantage of per interrupt
643 * computation is faster updates and more accurate ITR for the current
644 * traffic pattern. Constants in this function were computed
645 * based on theoretical maximum wire speed and thresholds were set based
646 * on testing data as well as attempting to minimize response time
647 * while increasing bulk throughput. This functionality is controlled
648 * by the InterruptThrottleRate module parameter.
649 **/
650static unsigned int igbvf_update_itr(struct igbvf_adapter *adapter,
651 u16 itr_setting, int packets,
652 int bytes)
653{
654 unsigned int retval = itr_setting;
655
656 if (packets == 0)
657 goto update_itr_done;
658
659 switch (itr_setting) {
660 case lowest_latency:
661 /* handle TSO and jumbo frames */
662 if (bytes/packets > 8000)
663 retval = bulk_latency;
664 else if ((packets < 5) && (bytes > 512))
665 retval = low_latency;
666 break;
667 case low_latency: /* 50 usec aka 20000 ints/s */
668 if (bytes > 10000) {
669 /* this if handles the TSO accounting */
670 if (bytes/packets > 8000)
671 retval = bulk_latency;
672 else if ((packets < 10) || ((bytes/packets) > 1200))
673 retval = bulk_latency;
674 else if ((packets > 35))
675 retval = lowest_latency;
676 } else if (bytes/packets > 2000) {
677 retval = bulk_latency;
678 } else if (packets <= 2 && bytes < 512) {
679 retval = lowest_latency;
680 }
681 break;
682 case bulk_latency: /* 250 usec aka 4000 ints/s */
683 if (bytes > 25000) {
684 if (packets > 35)
685 retval = low_latency;
686 } else if (bytes < 6000) {
687 retval = low_latency;
688 }
689 break;
690 }
691
692update_itr_done:
693 return retval;
694}
695
696static void igbvf_set_itr(struct igbvf_adapter *adapter)
697{
698 struct e1000_hw *hw = &adapter->hw;
699 u16 current_itr;
700 u32 new_itr = adapter->itr;
701
702 adapter->tx_itr = igbvf_update_itr(adapter, adapter->tx_itr,
703 adapter->total_tx_packets,
704 adapter->total_tx_bytes);
705 /* conservative mode (itr 3) eliminates the lowest_latency setting */
706 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
707 adapter->tx_itr = low_latency;
708
709 adapter->rx_itr = igbvf_update_itr(adapter, adapter->rx_itr,
710 adapter->total_rx_packets,
711 adapter->total_rx_bytes);
712 /* conservative mode (itr 3) eliminates the lowest_latency setting */
713 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
714 adapter->rx_itr = low_latency;
715
716 current_itr = max(adapter->rx_itr, adapter->tx_itr);
717
718 switch (current_itr) {
719 /* counts and packets in update_itr are dependent on these numbers */
720 case lowest_latency:
721 new_itr = 70000;
722 break;
723 case low_latency:
724 new_itr = 20000; /* aka hwitr = ~200 */
725 break;
726 case bulk_latency:
727 new_itr = 4000;
728 break;
729 default:
730 break;
731 }
732
733 if (new_itr != adapter->itr) {
734 /*
735 * this attempts to bias the interrupt rate towards Bulk
736 * by adding intermediate steps when interrupt rate is
737 * increasing
738 */
739 new_itr = new_itr > adapter->itr ?
740 min(adapter->itr + (new_itr >> 2), new_itr) :
741 new_itr;
742 adapter->itr = new_itr;
743 adapter->rx_ring->itr_val = 1952;
744
745 if (adapter->msix_entries)
746 adapter->rx_ring->set_itr = 1;
747 else
748 ew32(ITR, 1952);
749 }
750}
751
752/**
753 * igbvf_clean_tx_irq - Reclaim resources after transmit completes
754 * @adapter: board private structure
755 * returns true if ring is completely cleaned
756 **/
757static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
758{
759 struct igbvf_adapter *adapter = tx_ring->adapter;
760 struct e1000_hw *hw = &adapter->hw;
761 struct net_device *netdev = adapter->netdev;
762 struct igbvf_buffer *buffer_info;
763 struct sk_buff *skb;
764 union e1000_adv_tx_desc *tx_desc, *eop_desc;
765 unsigned int total_bytes = 0, total_packets = 0;
766 unsigned int i, eop, count = 0;
767 bool cleaned = false;
768
769 i = tx_ring->next_to_clean;
770 eop = tx_ring->buffer_info[i].next_to_watch;
771 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
772
773 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
774 (count < tx_ring->count)) {
775 for (cleaned = false; !cleaned; count++) {
776 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
777 buffer_info = &tx_ring->buffer_info[i];
778 cleaned = (i == eop);
779 skb = buffer_info->skb;
780
781 if (skb) {
782 unsigned int segs, bytecount;
783
784 /* gso_segs is currently only valid for tcp */
785 segs = skb_shinfo(skb)->gso_segs ?: 1;
786 /* multiply data chunks by size of headers */
787 bytecount = ((segs - 1) * skb_headlen(skb)) +
788 skb->len;
789 total_packets += segs;
790 total_bytes += bytecount;
791 }
792
793 igbvf_put_txbuf(adapter, buffer_info);
794 tx_desc->wb.status = 0;
795
796 i++;
797 if (i == tx_ring->count)
798 i = 0;
799 }
800 eop = tx_ring->buffer_info[i].next_to_watch;
801 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
802 }
803
804 tx_ring->next_to_clean = i;
805
806 if (unlikely(count &&
807 netif_carrier_ok(netdev) &&
808 igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
809 /* Make sure that anybody stopping the queue after this
810 * sees the new next_to_clean.
811 */
812 smp_mb();
813 if (netif_queue_stopped(netdev) &&
814 !(test_bit(__IGBVF_DOWN, &adapter->state))) {
815 netif_wake_queue(netdev);
816 ++adapter->restart_queue;
817 }
818 }
819
820 if (adapter->detect_tx_hung) {
821 /* Detect a transmit hang in hardware, this serializes the
822 * check with the clearing of time_stamp and movement of i */
823 adapter->detect_tx_hung = false;
824 if (tx_ring->buffer_info[i].time_stamp &&
825 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
826 (adapter->tx_timeout_factor * HZ))
827 && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
828
829 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
830 /* detected Tx unit hang */
831 igbvf_print_tx_hang(adapter);
832
833 netif_stop_queue(netdev);
834 }
835 }
836 adapter->net_stats.tx_bytes += total_bytes;
837 adapter->net_stats.tx_packets += total_packets;
838 return (count < tx_ring->count);
839}
840
841static irqreturn_t igbvf_msix_other(int irq, void *data)
842{
843 struct net_device *netdev = data;
844 struct igbvf_adapter *adapter = netdev_priv(netdev);
845 struct e1000_hw *hw = &adapter->hw;
846
847 adapter->int_counter1++;
848
849 netif_carrier_off(netdev);
850 hw->mac.get_link_status = 1;
851 if (!test_bit(__IGBVF_DOWN, &adapter->state))
852 mod_timer(&adapter->watchdog_timer, jiffies + 1);
853
854 ew32(EIMS, adapter->eims_other);
855
856 return IRQ_HANDLED;
857}
858
859static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
860{
861 struct net_device *netdev = data;
862 struct igbvf_adapter *adapter = netdev_priv(netdev);
863 struct e1000_hw *hw = &adapter->hw;
864 struct igbvf_ring *tx_ring = adapter->tx_ring;
865
866
867 adapter->total_tx_bytes = 0;
868 adapter->total_tx_packets = 0;
869
870 /* auto mask will automatically reenable the interrupt when we write
871 * EICS */
872 if (!igbvf_clean_tx_irq(tx_ring))
873 /* Ring was not completely cleaned, so fire another interrupt */
874 ew32(EICS, tx_ring->eims_value);
875 else
876 ew32(EIMS, tx_ring->eims_value);
877
878 return IRQ_HANDLED;
879}
880
881static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
882{
883 struct net_device *netdev = data;
884 struct igbvf_adapter *adapter = netdev_priv(netdev);
885
886 adapter->int_counter0++;
887
888 /* Write the ITR value calculated at the end of the
889 * previous interrupt.
890 */
891 if (adapter->rx_ring->set_itr) {
892 writel(adapter->rx_ring->itr_val,
893 adapter->hw.hw_addr + adapter->rx_ring->itr_register);
894 adapter->rx_ring->set_itr = 0;
895 }
896
897 if (napi_schedule_prep(&adapter->rx_ring->napi)) {
898 adapter->total_rx_bytes = 0;
899 adapter->total_rx_packets = 0;
900 __napi_schedule(&adapter->rx_ring->napi);
901 }
902
903 return IRQ_HANDLED;
904}
905
906#define IGBVF_NO_QUEUE -1
907
908static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
909 int tx_queue, int msix_vector)
910{
911 struct e1000_hw *hw = &adapter->hw;
912 u32 ivar, index;
913
914 /* 82576 uses a table-based method for assigning vectors.
915 Each queue has a single entry in the table to which we write
916 a vector number along with a "valid" bit. Sadly, the layout
917 of the table is somewhat counterintuitive. */
918 if (rx_queue > IGBVF_NO_QUEUE) {
919 index = (rx_queue >> 1);
920 ivar = array_er32(IVAR0, index);
921 if (rx_queue & 0x1) {
922 /* vector goes into third byte of register */
923 ivar = ivar & 0xFF00FFFF;
924 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
925 } else {
926 /* vector goes into low byte of register */
927 ivar = ivar & 0xFFFFFF00;
928 ivar |= msix_vector | E1000_IVAR_VALID;
929 }
930 adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector;
931 array_ew32(IVAR0, index, ivar);
932 }
933 if (tx_queue > IGBVF_NO_QUEUE) {
934 index = (tx_queue >> 1);
935 ivar = array_er32(IVAR0, index);
936 if (tx_queue & 0x1) {
937 /* vector goes into high byte of register */
938 ivar = ivar & 0x00FFFFFF;
939 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
940 } else {
941 /* vector goes into second byte of register */
942 ivar = ivar & 0xFFFF00FF;
943 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
944 }
945 adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector;
946 array_ew32(IVAR0, index, ivar);
947 }
948}
949
950/**
951 * igbvf_configure_msix - Configure MSI-X hardware
952 *
953 * igbvf_configure_msix sets up the hardware to properly
954 * generate MSI-X interrupts.
955 **/
956static void igbvf_configure_msix(struct igbvf_adapter *adapter)
957{
958 u32 tmp;
959 struct e1000_hw *hw = &adapter->hw;
960 struct igbvf_ring *tx_ring = adapter->tx_ring;
961 struct igbvf_ring *rx_ring = adapter->rx_ring;
962 int vector = 0;
963
964 adapter->eims_enable_mask = 0;
965
966 igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++);
967 adapter->eims_enable_mask |= tx_ring->eims_value;
968 if (tx_ring->itr_val)
969 writel(tx_ring->itr_val,
970 hw->hw_addr + tx_ring->itr_register);
971 else
972 writel(1952, hw->hw_addr + tx_ring->itr_register);
973
974 igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++);
975 adapter->eims_enable_mask |= rx_ring->eims_value;
976 if (rx_ring->itr_val)
977 writel(rx_ring->itr_val,
978 hw->hw_addr + rx_ring->itr_register);
979 else
980 writel(1952, hw->hw_addr + rx_ring->itr_register);
981
982 /* set vector for other causes, i.e. link changes */
983
984 tmp = (vector++ | E1000_IVAR_VALID);
985
986 ew32(IVAR_MISC, tmp);
987
988 adapter->eims_enable_mask = (1 << (vector)) - 1;
989 adapter->eims_other = 1 << (vector - 1);
990 e1e_flush();
991}
992
993void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
994{
995 if (adapter->msix_entries) {
996 pci_disable_msix(adapter->pdev);
997 kfree(adapter->msix_entries);
998 adapter->msix_entries = NULL;
999 }
1000}
1001
1002/**
1003 * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
1004 *
1005 * Attempt to configure interrupts using the best available
1006 * capabilities of the hardware and kernel.
1007 **/
1008void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
1009{
1010 int err = -ENOMEM;
1011 int i;
1012
1013 /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */
1014 adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry),
1015 GFP_KERNEL);
1016 if (adapter->msix_entries) {
1017 for (i = 0; i < 3; i++)
1018 adapter->msix_entries[i].entry = i;
1019
1020 err = pci_enable_msix(adapter->pdev,
1021 adapter->msix_entries, 3);
1022 }
1023
1024 if (err) {
1025 /* MSI-X failed */
1026 dev_err(&adapter->pdev->dev,
1027 "Failed to initialize MSI-X interrupts.\n");
1028 igbvf_reset_interrupt_capability(adapter);
1029 }
1030}
1031
1032/**
1033 * igbvf_request_msix - Initialize MSI-X interrupts
1034 *
1035 * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
1036 * kernel.
1037 **/
1038static int igbvf_request_msix(struct igbvf_adapter *adapter)
1039{
1040 struct net_device *netdev = adapter->netdev;
1041 int err = 0, vector = 0;
1042
1043 if (strlen(netdev->name) < (IFNAMSIZ - 5)) {
1044 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
1045 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
1046 } else {
1047 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1048 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1049 }
1050
1051 err = request_irq(adapter->msix_entries[vector].vector,
1052 &igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
1053 netdev);
1054 if (err)
1055 goto out;
1056
1057 adapter->tx_ring->itr_register = E1000_EITR(vector);
1058 adapter->tx_ring->itr_val = 1952;
1059 vector++;
1060
1061 err = request_irq(adapter->msix_entries[vector].vector,
1062 &igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
1063 netdev);
1064 if (err)
1065 goto out;
1066
1067 adapter->rx_ring->itr_register = E1000_EITR(vector);
1068 adapter->rx_ring->itr_val = 1952;
1069 vector++;
1070
1071 err = request_irq(adapter->msix_entries[vector].vector,
1072 &igbvf_msix_other, 0, netdev->name, netdev);
1073 if (err)
1074 goto out;
1075
1076 igbvf_configure_msix(adapter);
1077 return 0;
1078out:
1079 return err;
1080}
1081
1082/**
1083 * igbvf_alloc_queues - Allocate memory for all rings
1084 * @adapter: board private structure to initialize
1085 **/
1086static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter)
1087{
1088 struct net_device *netdev = adapter->netdev;
1089
1090 adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1091 if (!adapter->tx_ring)
1092 return -ENOMEM;
1093
1094 adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1095 if (!adapter->rx_ring) {
1096 kfree(adapter->tx_ring);
1097 return -ENOMEM;
1098 }
1099
1100 netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64);
1101
1102 return 0;
1103}
1104
1105/**
1106 * igbvf_request_irq - initialize interrupts
1107 *
1108 * Attempts to configure interrupts using the best available
1109 * capabilities of the hardware and kernel.
1110 **/
1111static int igbvf_request_irq(struct igbvf_adapter *adapter)
1112{
1113 int err = -1;
1114
1115 /* igbvf supports msi-x only */
1116 if (adapter->msix_entries)
1117 err = igbvf_request_msix(adapter);
1118
1119 if (!err)
1120 return err;
1121
1122 dev_err(&adapter->pdev->dev,
1123 "Unable to allocate interrupt, Error: %d\n", err);
1124
1125 return err;
1126}
1127
1128static void igbvf_free_irq(struct igbvf_adapter *adapter)
1129{
1130 struct net_device *netdev = adapter->netdev;
1131 int vector;
1132
1133 if (adapter->msix_entries) {
1134 for (vector = 0; vector < 3; vector++)
1135 free_irq(adapter->msix_entries[vector].vector, netdev);
1136 }
1137}
1138
1139/**
1140 * igbvf_irq_disable - Mask off interrupt generation on the NIC
1141 **/
1142static void igbvf_irq_disable(struct igbvf_adapter *adapter)
1143{
1144 struct e1000_hw *hw = &adapter->hw;
1145
1146 ew32(EIMC, ~0);
1147
1148 if (adapter->msix_entries)
1149 ew32(EIAC, 0);
1150}
1151
1152/**
1153 * igbvf_irq_enable - Enable default interrupt generation settings
1154 **/
1155static void igbvf_irq_enable(struct igbvf_adapter *adapter)
1156{
1157 struct e1000_hw *hw = &adapter->hw;
1158
1159 ew32(EIAC, adapter->eims_enable_mask);
1160 ew32(EIAM, adapter->eims_enable_mask);
1161 ew32(EIMS, adapter->eims_enable_mask);
1162}
1163
1164/**
1165 * igbvf_poll - NAPI Rx polling callback
1166 * @napi: struct associated with this polling callback
1167 * @budget: amount of packets driver is allowed to process this poll
1168 **/
1169static int igbvf_poll(struct napi_struct *napi, int budget)
1170{
1171 struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi);
1172 struct igbvf_adapter *adapter = rx_ring->adapter;
1173 struct e1000_hw *hw = &adapter->hw;
1174 int work_done = 0;
1175
1176 igbvf_clean_rx_irq(adapter, &work_done, budget);
1177
1178 /* If not enough Rx work done, exit the polling mode */
1179 if (work_done < budget) {
1180 napi_complete(napi);
1181
1182 if (adapter->itr_setting & 3)
1183 igbvf_set_itr(adapter);
1184
1185 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1186 ew32(EIMS, adapter->rx_ring->eims_value);
1187 }
1188
1189 return work_done;
1190}
1191
1192/**
1193 * igbvf_set_rlpml - set receive large packet maximum length
1194 * @adapter: board private structure
1195 *
1196 * Configure the maximum size of packets that will be received
1197 */
1198static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
1199{
1200 int max_frame_size = adapter->max_frame_size;
1201 struct e1000_hw *hw = &adapter->hw;
1202
1203 if (adapter->vlgrp)
1204 max_frame_size += VLAN_TAG_SIZE;
1205
1206 e1000_rlpml_set_vf(hw, max_frame_size);
1207}
1208
1209static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1210{
1211 struct igbvf_adapter *adapter = netdev_priv(netdev);
1212 struct e1000_hw *hw = &adapter->hw;
1213
1214 if (hw->mac.ops.set_vfta(hw, vid, true))
1215 dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
1216}
1217
1218static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1219{
1220 struct igbvf_adapter *adapter = netdev_priv(netdev);
1221 struct e1000_hw *hw = &adapter->hw;
1222
1223 igbvf_irq_disable(adapter);
1224 vlan_group_set_device(adapter->vlgrp, vid, NULL);
1225
1226 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1227 igbvf_irq_enable(adapter);
1228
1229 if (hw->mac.ops.set_vfta(hw, vid, false))
1230 dev_err(&adapter->pdev->dev,
1231 "Failed to remove vlan id %d\n", vid);
1232}
1233
1234static void igbvf_vlan_rx_register(struct net_device *netdev,
1235 struct vlan_group *grp)
1236{
1237 struct igbvf_adapter *adapter = netdev_priv(netdev);
1238
1239 adapter->vlgrp = grp;
1240}
1241
1242static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
1243{
1244 u16 vid;
1245
1246 if (!adapter->vlgrp)
1247 return;
1248
1249 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1250 if (!vlan_group_get_device(adapter->vlgrp, vid))
1251 continue;
1252 igbvf_vlan_rx_add_vid(adapter->netdev, vid);
1253 }
1254
1255 igbvf_set_rlpml(adapter);
1256}
1257
1258/**
1259 * igbvf_configure_tx - Configure Transmit Unit after Reset
1260 * @adapter: board private structure
1261 *
1262 * Configure the Tx unit of the MAC after a reset.
1263 **/
1264static void igbvf_configure_tx(struct igbvf_adapter *adapter)
1265{
1266 struct e1000_hw *hw = &adapter->hw;
1267 struct igbvf_ring *tx_ring = adapter->tx_ring;
1268 u64 tdba;
1269 u32 txdctl, dca_txctrl;
1270
1271 /* disable transmits */
1272 txdctl = er32(TXDCTL(0));
1273 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1274 msleep(10);
1275
1276 /* Setup the HW Tx Head and Tail descriptor pointers */
1277 ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc));
1278 tdba = tx_ring->dma;
1279 ew32(TDBAL(0), (tdba & DMA_32BIT_MASK));
1280 ew32(TDBAH(0), (tdba >> 32));
1281 ew32(TDH(0), 0);
1282 ew32(TDT(0), 0);
1283 tx_ring->head = E1000_TDH(0);
1284 tx_ring->tail = E1000_TDT(0);
1285
1286 /* Turn off Relaxed Ordering on head write-backs. The writebacks
1287 * MUST be delivered in order or it will completely screw up
1288 * our bookeeping.
1289 */
1290 dca_txctrl = er32(DCA_TXCTRL(0));
1291 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1292 ew32(DCA_TXCTRL(0), dca_txctrl);
1293
1294 /* enable transmits */
1295 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1296 ew32(TXDCTL(0), txdctl);
1297
1298 /* Setup Transmit Descriptor Settings for eop descriptor */
1299 adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS;
1300
1301 /* enable Report Status bit */
1302 adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
1303
1304 adapter->tx_queue_len = adapter->netdev->tx_queue_len;
1305}
1306
1307/**
1308 * igbvf_setup_srrctl - configure the receive control registers
1309 * @adapter: Board private structure
1310 **/
1311static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
1312{
1313 struct e1000_hw *hw = &adapter->hw;
1314 u32 srrctl = 0;
1315
1316 srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK |
1317 E1000_SRRCTL_BSIZEHDR_MASK |
1318 E1000_SRRCTL_BSIZEPKT_MASK);
1319
1320 /* Enable queue drop to avoid head of line blocking */
1321 srrctl |= E1000_SRRCTL_DROP_EN;
1322
1323 /* Setup buffer sizes */
1324 srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >>
1325 E1000_SRRCTL_BSIZEPKT_SHIFT;
1326
1327 if (adapter->rx_buffer_len < 2048) {
1328 adapter->rx_ps_hdr_size = 0;
1329 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1330 } else {
1331 adapter->rx_ps_hdr_size = 128;
1332 srrctl |= adapter->rx_ps_hdr_size <<
1333 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
1334 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1335 }
1336
1337 ew32(SRRCTL(0), srrctl);
1338}
1339
1340/**
1341 * igbvf_configure_rx - Configure Receive Unit after Reset
1342 * @adapter: board private structure
1343 *
1344 * Configure the Rx unit of the MAC after a reset.
1345 **/
1346static void igbvf_configure_rx(struct igbvf_adapter *adapter)
1347{
1348 struct e1000_hw *hw = &adapter->hw;
1349 struct igbvf_ring *rx_ring = adapter->rx_ring;
1350 u64 rdba;
1351 u32 rdlen, rxdctl;
1352
1353 /* disable receives */
1354 rxdctl = er32(RXDCTL(0));
1355 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1356 msleep(10);
1357
1358 rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
1359
1360 /*
1361 * Setup the HW Rx Head and Tail Descriptor Pointers and
1362 * the Base and Length of the Rx Descriptor Ring
1363 */
1364 rdba = rx_ring->dma;
1365 ew32(RDBAL(0), (rdba & DMA_32BIT_MASK));
1366 ew32(RDBAH(0), (rdba >> 32));
1367 ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc));
1368 rx_ring->head = E1000_RDH(0);
1369 rx_ring->tail = E1000_RDT(0);
1370 ew32(RDH(0), 0);
1371 ew32(RDT(0), 0);
1372
1373 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1374 rxdctl &= 0xFFF00000;
1375 rxdctl |= IGBVF_RX_PTHRESH;
1376 rxdctl |= IGBVF_RX_HTHRESH << 8;
1377 rxdctl |= IGBVF_RX_WTHRESH << 16;
1378
1379 igbvf_set_rlpml(adapter);
1380
1381 /* enable receives */
1382 ew32(RXDCTL(0), rxdctl);
1383}
1384
1385/**
1386 * igbvf_set_multi - Multicast and Promiscuous mode set
1387 * @netdev: network interface device structure
1388 *
1389 * The set_multi entry point is called whenever the multicast address
1390 * list or the network interface flags are updated. This routine is
1391 * responsible for configuring the hardware for proper multicast,
1392 * promiscuous mode, and all-multi behavior.
1393 **/
1394static void igbvf_set_multi(struct net_device *netdev)
1395{
1396 struct igbvf_adapter *adapter = netdev_priv(netdev);
1397 struct e1000_hw *hw = &adapter->hw;
1398 struct dev_mc_list *mc_ptr;
1399 u8 *mta_list = NULL;
1400 int i;
1401
1402 if (netdev->mc_count) {
1403 mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC);
1404 if (!mta_list) {
1405 dev_err(&adapter->pdev->dev,
1406 "failed to allocate multicast filter list\n");
1407 return;
1408 }
1409 }
1410
1411 /* prepare a packed array of only addresses. */
1412 mc_ptr = netdev->mc_list;
1413
1414 for (i = 0; i < netdev->mc_count; i++) {
1415 if (!mc_ptr)
1416 break;
1417 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr,
1418 ETH_ALEN);
1419 mc_ptr = mc_ptr->next;
1420 }
1421
1422 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
1423 kfree(mta_list);
1424}
1425
1426/**
1427 * igbvf_configure - configure the hardware for Rx and Tx
1428 * @adapter: private board structure
1429 **/
1430static void igbvf_configure(struct igbvf_adapter *adapter)
1431{
1432 igbvf_set_multi(adapter->netdev);
1433
1434 igbvf_restore_vlan(adapter);
1435
1436 igbvf_configure_tx(adapter);
1437 igbvf_setup_srrctl(adapter);
1438 igbvf_configure_rx(adapter);
1439 igbvf_alloc_rx_buffers(adapter->rx_ring,
1440 igbvf_desc_unused(adapter->rx_ring));
1441}
1442
1443/* igbvf_reset - bring the hardware into a known good state
1444 *
1445 * This function boots the hardware and enables some settings that
1446 * require a configuration cycle of the hardware - those cannot be
1447 * set/changed during runtime. After reset the device needs to be
1448 * properly configured for Rx, Tx etc.
1449 */
1450void igbvf_reset(struct igbvf_adapter *adapter)
1451{
1452 struct e1000_mac_info *mac = &adapter->hw.mac;
1453 struct net_device *netdev = adapter->netdev;
1454 struct e1000_hw *hw = &adapter->hw;
1455
1456 /* Allow time for pending master requests to run */
1457 if (mac->ops.reset_hw(hw))
1458 dev_err(&adapter->pdev->dev, "PF still resetting\n");
1459
1460 mac->ops.init_hw(hw);
1461
1462 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1463 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1464 netdev->addr_len);
1465 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1466 netdev->addr_len);
1467 }
1468}
1469
1470int igbvf_up(struct igbvf_adapter *adapter)
1471{
1472 struct e1000_hw *hw = &adapter->hw;
1473
1474 /* hardware has been reset, we need to reload some things */
1475 igbvf_configure(adapter);
1476
1477 clear_bit(__IGBVF_DOWN, &adapter->state);
1478
1479 napi_enable(&adapter->rx_ring->napi);
1480 if (adapter->msix_entries)
1481 igbvf_configure_msix(adapter);
1482
1483 /* Clear any pending interrupts. */
1484 er32(EICR);
1485 igbvf_irq_enable(adapter);
1486
1487 /* start the watchdog */
1488 hw->mac.get_link_status = 1;
1489 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1490
1491
1492 return 0;
1493}
1494
1495void igbvf_down(struct igbvf_adapter *adapter)
1496{
1497 struct net_device *netdev = adapter->netdev;
1498 struct e1000_hw *hw = &adapter->hw;
1499 u32 rxdctl, txdctl;
1500
1501 /*
1502 * signal that we're down so the interrupt handler does not
1503 * reschedule our watchdog timer
1504 */
1505 set_bit(__IGBVF_DOWN, &adapter->state);
1506
1507 /* disable receives in the hardware */
1508 rxdctl = er32(RXDCTL(0));
1509 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1510
1511 netif_stop_queue(netdev);
1512
1513 /* disable transmits in the hardware */
1514 txdctl = er32(TXDCTL(0));
1515 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1516
1517 /* flush both disables and wait for them to finish */
1518 e1e_flush();
1519 msleep(10);
1520
1521 napi_disable(&adapter->rx_ring->napi);
1522
1523 igbvf_irq_disable(adapter);
1524
1525 del_timer_sync(&adapter->watchdog_timer);
1526
1527 netdev->tx_queue_len = adapter->tx_queue_len;
1528 netif_carrier_off(netdev);
1529
1530 /* record the stats before reset*/
1531 igbvf_update_stats(adapter);
1532
1533 adapter->link_speed = 0;
1534 adapter->link_duplex = 0;
1535
1536 igbvf_reset(adapter);
1537 igbvf_clean_tx_ring(adapter->tx_ring);
1538 igbvf_clean_rx_ring(adapter->rx_ring);
1539}
1540
1541void igbvf_reinit_locked(struct igbvf_adapter *adapter)
1542{
1543 might_sleep();
1544 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
1545 msleep(1);
1546 igbvf_down(adapter);
1547 igbvf_up(adapter);
1548 clear_bit(__IGBVF_RESETTING, &adapter->state);
1549}
1550
1551/**
1552 * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter)
1553 * @adapter: board private structure to initialize
1554 *
1555 * igbvf_sw_init initializes the Adapter private data structure.
1556 * Fields are initialized based on PCI device information and
1557 * OS network device settings (MTU size).
1558 **/
1559static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter)
1560{
1561 struct net_device *netdev = adapter->netdev;
1562 s32 rc;
1563
1564 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
1565 adapter->rx_ps_hdr_size = 0;
1566 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1567 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1568
1569 adapter->tx_int_delay = 8;
1570 adapter->tx_abs_int_delay = 32;
1571 adapter->rx_int_delay = 0;
1572 adapter->rx_abs_int_delay = 8;
1573 adapter->itr_setting = 3;
1574 adapter->itr = 20000;
1575
1576 /* Set various function pointers */
1577 adapter->ei->init_ops(&adapter->hw);
1578
1579 rc = adapter->hw.mac.ops.init_params(&adapter->hw);
1580 if (rc)
1581 return rc;
1582
1583 rc = adapter->hw.mbx.ops.init_params(&adapter->hw);
1584 if (rc)
1585 return rc;
1586
1587 igbvf_set_interrupt_capability(adapter);
1588
1589 if (igbvf_alloc_queues(adapter))
1590 return -ENOMEM;
1591
1592 spin_lock_init(&adapter->tx_queue_lock);
1593
1594 /* Explicitly disable IRQ since the NIC can be in any state. */
1595 igbvf_irq_disable(adapter);
1596
1597 spin_lock_init(&adapter->stats_lock);
1598
1599 set_bit(__IGBVF_DOWN, &adapter->state);
1600 return 0;
1601}
1602
1603static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter)
1604{
1605 struct e1000_hw *hw = &adapter->hw;
1606
1607 adapter->stats.last_gprc = er32(VFGPRC);
1608 adapter->stats.last_gorc = er32(VFGORC);
1609 adapter->stats.last_gptc = er32(VFGPTC);
1610 adapter->stats.last_gotc = er32(VFGOTC);
1611 adapter->stats.last_mprc = er32(VFMPRC);
1612 adapter->stats.last_gotlbc = er32(VFGOTLBC);
1613 adapter->stats.last_gptlbc = er32(VFGPTLBC);
1614 adapter->stats.last_gorlbc = er32(VFGORLBC);
1615 adapter->stats.last_gprlbc = er32(VFGPRLBC);
1616
1617 adapter->stats.base_gprc = er32(VFGPRC);
1618 adapter->stats.base_gorc = er32(VFGORC);
1619 adapter->stats.base_gptc = er32(VFGPTC);
1620 adapter->stats.base_gotc = er32(VFGOTC);
1621 adapter->stats.base_mprc = er32(VFMPRC);
1622 adapter->stats.base_gotlbc = er32(VFGOTLBC);
1623 adapter->stats.base_gptlbc = er32(VFGPTLBC);
1624 adapter->stats.base_gorlbc = er32(VFGORLBC);
1625 adapter->stats.base_gprlbc = er32(VFGPRLBC);
1626}
1627
1628/**
1629 * igbvf_open - Called when a network interface is made active
1630 * @netdev: network interface device structure
1631 *
1632 * Returns 0 on success, negative value on failure
1633 *
1634 * The open entry point is called when a network interface is made
1635 * active by the system (IFF_UP). At this point all resources needed
1636 * for transmit and receive operations are allocated, the interrupt
1637 * handler is registered with the OS, the watchdog timer is started,
1638 * and the stack is notified that the interface is ready.
1639 **/
1640static int igbvf_open(struct net_device *netdev)
1641{
1642 struct igbvf_adapter *adapter = netdev_priv(netdev);
1643 struct e1000_hw *hw = &adapter->hw;
1644 int err;
1645
1646 /* disallow open during test */
1647 if (test_bit(__IGBVF_TESTING, &adapter->state))
1648 return -EBUSY;
1649
1650 /* allocate transmit descriptors */
1651 err = igbvf_setup_tx_resources(adapter, adapter->tx_ring);
1652 if (err)
1653 goto err_setup_tx;
1654
1655 /* allocate receive descriptors */
1656 err = igbvf_setup_rx_resources(adapter, adapter->rx_ring);
1657 if (err)
1658 goto err_setup_rx;
1659
1660 /*
1661 * before we allocate an interrupt, we must be ready to handle it.
1662 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1663 * as soon as we call pci_request_irq, so we have to setup our
1664 * clean_rx handler before we do so.
1665 */
1666 igbvf_configure(adapter);
1667
1668 err = igbvf_request_irq(adapter);
1669 if (err)
1670 goto err_req_irq;
1671
1672 /* From here on the code is the same as igbvf_up() */
1673 clear_bit(__IGBVF_DOWN, &adapter->state);
1674
1675 napi_enable(&adapter->rx_ring->napi);
1676
1677 /* clear any pending interrupts */
1678 er32(EICR);
1679
1680 igbvf_irq_enable(adapter);
1681
1682 /* start the watchdog */
1683 hw->mac.get_link_status = 1;
1684 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1685
1686 return 0;
1687
1688err_req_irq:
1689 igbvf_free_rx_resources(adapter->rx_ring);
1690err_setup_rx:
1691 igbvf_free_tx_resources(adapter->tx_ring);
1692err_setup_tx:
1693 igbvf_reset(adapter);
1694
1695 return err;
1696}
1697
1698/**
1699 * igbvf_close - Disables a network interface
1700 * @netdev: network interface device structure
1701 *
1702 * Returns 0, this is not allowed to fail
1703 *
1704 * The close entry point is called when an interface is de-activated
1705 * by the OS. The hardware is still under the drivers control, but
1706 * needs to be disabled. A global MAC reset is issued to stop the
1707 * hardware, and all transmit and receive resources are freed.
1708 **/
1709static int igbvf_close(struct net_device *netdev)
1710{
1711 struct igbvf_adapter *adapter = netdev_priv(netdev);
1712
1713 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
1714 igbvf_down(adapter);
1715
1716 igbvf_free_irq(adapter);
1717
1718 igbvf_free_tx_resources(adapter->tx_ring);
1719 igbvf_free_rx_resources(adapter->rx_ring);
1720
1721 return 0;
1722}
1723/**
1724 * igbvf_set_mac - Change the Ethernet Address of the NIC
1725 * @netdev: network interface device structure
1726 * @p: pointer to an address structure
1727 *
1728 * Returns 0 on success, negative on failure
1729 **/
1730static int igbvf_set_mac(struct net_device *netdev, void *p)
1731{
1732 struct igbvf_adapter *adapter = netdev_priv(netdev);
1733 struct e1000_hw *hw = &adapter->hw;
1734 struct sockaddr *addr = p;
1735
1736 if (!is_valid_ether_addr(addr->sa_data))
1737 return -EADDRNOTAVAIL;
1738
1739 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
1740
1741 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
1742
1743 if (memcmp(addr->sa_data, hw->mac.addr, 6))
1744 return -EADDRNOTAVAIL;
1745
1746 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1747
1748 return 0;
1749}
1750
1751#define UPDATE_VF_COUNTER(reg, name) \
1752 { \
1753 u32 current_counter = er32(reg); \
1754 if (current_counter < adapter->stats.last_##name) \
1755 adapter->stats.name += 0x100000000LL; \
1756 adapter->stats.last_##name = current_counter; \
1757 adapter->stats.name &= 0xFFFFFFFF00000000LL; \
1758 adapter->stats.name |= current_counter; \
1759 }
1760
1761/**
1762 * igbvf_update_stats - Update the board statistics counters
1763 * @adapter: board private structure
1764**/
1765void igbvf_update_stats(struct igbvf_adapter *adapter)
1766{
1767 struct e1000_hw *hw = &adapter->hw;
1768 struct pci_dev *pdev = adapter->pdev;
1769
1770 /*
1771 * Prevent stats update while adapter is being reset, link is down
1772 * or if the pci connection is down.
1773 */
1774 if (adapter->link_speed == 0)
1775 return;
1776
1777 if (test_bit(__IGBVF_RESETTING, &adapter->state))
1778 return;
1779
1780 if (pci_channel_offline(pdev))
1781 return;
1782
1783 UPDATE_VF_COUNTER(VFGPRC, gprc);
1784 UPDATE_VF_COUNTER(VFGORC, gorc);
1785 UPDATE_VF_COUNTER(VFGPTC, gptc);
1786 UPDATE_VF_COUNTER(VFGOTC, gotc);
1787 UPDATE_VF_COUNTER(VFMPRC, mprc);
1788 UPDATE_VF_COUNTER(VFGOTLBC, gotlbc);
1789 UPDATE_VF_COUNTER(VFGPTLBC, gptlbc);
1790 UPDATE_VF_COUNTER(VFGORLBC, gorlbc);
1791 UPDATE_VF_COUNTER(VFGPRLBC, gprlbc);
1792
1793 /* Fill out the OS statistics structure */
1794 adapter->net_stats.multicast = adapter->stats.mprc;
1795}
1796
1797static void igbvf_print_link_info(struct igbvf_adapter *adapter)
1798{
1799 dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n",
1800 adapter->link_speed,
1801 ((adapter->link_duplex == FULL_DUPLEX) ?
1802 "Full Duplex" : "Half Duplex"));
1803}
1804
1805static bool igbvf_has_link(struct igbvf_adapter *adapter)
1806{
1807 struct e1000_hw *hw = &adapter->hw;
1808 s32 ret_val = E1000_SUCCESS;
1809 bool link_active;
1810
1811 ret_val = hw->mac.ops.check_for_link(hw);
1812 link_active = !hw->mac.get_link_status;
1813
1814 /* if check for link returns error we will need to reset */
1815 if (ret_val)
1816 schedule_work(&adapter->reset_task);
1817
1818 return link_active;
1819}
1820
1821/**
1822 * igbvf_watchdog - Timer Call-back
1823 * @data: pointer to adapter cast into an unsigned long
1824 **/
1825static void igbvf_watchdog(unsigned long data)
1826{
1827 struct igbvf_adapter *adapter = (struct igbvf_adapter *) data;
1828
1829 /* Do the rest outside of interrupt context */
1830 schedule_work(&adapter->watchdog_task);
1831}
1832
1833static void igbvf_watchdog_task(struct work_struct *work)
1834{
1835 struct igbvf_adapter *adapter = container_of(work,
1836 struct igbvf_adapter,
1837 watchdog_task);
1838 struct net_device *netdev = adapter->netdev;
1839 struct e1000_mac_info *mac = &adapter->hw.mac;
1840 struct igbvf_ring *tx_ring = adapter->tx_ring;
1841 struct e1000_hw *hw = &adapter->hw;
1842 u32 link;
1843 int tx_pending = 0;
1844
1845 link = igbvf_has_link(adapter);
1846
1847 if (link) {
1848 if (!netif_carrier_ok(netdev)) {
1849 bool txb2b = 1;
1850
1851 mac->ops.get_link_up_info(&adapter->hw,
1852 &adapter->link_speed,
1853 &adapter->link_duplex);
1854 igbvf_print_link_info(adapter);
1855
1856 /*
1857 * tweak tx_queue_len according to speed/duplex
1858 * and adjust the timeout factor
1859 */
1860 netdev->tx_queue_len = adapter->tx_queue_len;
1861 adapter->tx_timeout_factor = 1;
1862 switch (adapter->link_speed) {
1863 case SPEED_10:
1864 txb2b = 0;
1865 netdev->tx_queue_len = 10;
1866 adapter->tx_timeout_factor = 16;
1867 break;
1868 case SPEED_100:
1869 txb2b = 0;
1870 netdev->tx_queue_len = 100;
1871 /* maybe add some timeout factor ? */
1872 break;
1873 }
1874
1875 netif_carrier_on(netdev);
1876 netif_wake_queue(netdev);
1877 }
1878 } else {
1879 if (netif_carrier_ok(netdev)) {
1880 adapter->link_speed = 0;
1881 adapter->link_duplex = 0;
1882 dev_info(&adapter->pdev->dev, "Link is Down\n");
1883 netif_carrier_off(netdev);
1884 netif_stop_queue(netdev);
1885 }
1886 }
1887
1888 if (netif_carrier_ok(netdev)) {
1889 igbvf_update_stats(adapter);
1890 } else {
1891 tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
1892 tx_ring->count);
1893 if (tx_pending) {
1894 /*
1895 * We've lost link, so the controller stops DMA,
1896 * but we've got queued Tx work that's never going
1897 * to get done, so reset controller to flush Tx.
1898 * (Do the reset outside of interrupt context).
1899 */
1900 adapter->tx_timeout_count++;
1901 schedule_work(&adapter->reset_task);
1902 }
1903 }
1904
1905 /* Cause software interrupt to ensure Rx ring is cleaned */
1906 ew32(EICS, adapter->rx_ring->eims_value);
1907
1908 /* Force detection of hung controller every watchdog period */
1909 adapter->detect_tx_hung = 1;
1910
1911 /* Reset the timer */
1912 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1913 mod_timer(&adapter->watchdog_timer,
1914 round_jiffies(jiffies + (2 * HZ)));
1915}
1916
1917#define IGBVF_TX_FLAGS_CSUM 0x00000001
1918#define IGBVF_TX_FLAGS_VLAN 0x00000002
1919#define IGBVF_TX_FLAGS_TSO 0x00000004
1920#define IGBVF_TX_FLAGS_IPV4 0x00000008
1921#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
1922#define IGBVF_TX_FLAGS_VLAN_SHIFT 16
1923
1924static int igbvf_tso(struct igbvf_adapter *adapter,
1925 struct igbvf_ring *tx_ring,
1926 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
1927{
1928 struct e1000_adv_tx_context_desc *context_desc;
1929 unsigned int i;
1930 int err;
1931 struct igbvf_buffer *buffer_info;
1932 u32 info = 0, tu_cmd = 0;
1933 u32 mss_l4len_idx, l4len;
1934 *hdr_len = 0;
1935
1936 if (skb_header_cloned(skb)) {
1937 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1938 if (err) {
1939 dev_err(&adapter->pdev->dev,
1940 "igbvf_tso returning an error\n");
1941 return err;
1942 }
1943 }
1944
1945 l4len = tcp_hdrlen(skb);
1946 *hdr_len += l4len;
1947
1948 if (skb->protocol == htons(ETH_P_IP)) {
1949 struct iphdr *iph = ip_hdr(skb);
1950 iph->tot_len = 0;
1951 iph->check = 0;
1952 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1953 iph->daddr, 0,
1954 IPPROTO_TCP,
1955 0);
1956 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
1957 ipv6_hdr(skb)->payload_len = 0;
1958 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1959 &ipv6_hdr(skb)->daddr,
1960 0, IPPROTO_TCP, 0);
1961 }
1962
1963 i = tx_ring->next_to_use;
1964
1965 buffer_info = &tx_ring->buffer_info[i];
1966 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
1967 /* VLAN MACLEN IPLEN */
1968 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
1969 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
1970 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
1971 *hdr_len += skb_network_offset(skb);
1972 info |= (skb_transport_header(skb) - skb_network_header(skb));
1973 *hdr_len += (skb_transport_header(skb) - skb_network_header(skb));
1974 context_desc->vlan_macip_lens = cpu_to_le32(info);
1975
1976 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1977 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
1978
1979 if (skb->protocol == htons(ETH_P_IP))
1980 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
1981 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
1982
1983 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
1984
1985 /* MSS L4LEN IDX */
1986 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
1987 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
1988
1989 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1990 context_desc->seqnum_seed = 0;
1991
1992 buffer_info->time_stamp = jiffies;
1993 buffer_info->next_to_watch = i;
1994 buffer_info->dma = 0;
1995 i++;
1996 if (i == tx_ring->count)
1997 i = 0;
1998
1999 tx_ring->next_to_use = i;
2000
2001 return true;
2002}
2003
2004static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
2005 struct igbvf_ring *tx_ring,
2006 struct sk_buff *skb, u32 tx_flags)
2007{
2008 struct e1000_adv_tx_context_desc *context_desc;
2009 unsigned int i;
2010 struct igbvf_buffer *buffer_info;
2011 u32 info = 0, tu_cmd = 0;
2012
2013 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
2014 (tx_flags & IGBVF_TX_FLAGS_VLAN)) {
2015 i = tx_ring->next_to_use;
2016 buffer_info = &tx_ring->buffer_info[i];
2017 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
2018
2019 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
2020 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
2021
2022 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
2023 if (skb->ip_summed == CHECKSUM_PARTIAL)
2024 info |= (skb_transport_header(skb) -
2025 skb_network_header(skb));
2026
2027
2028 context_desc->vlan_macip_lens = cpu_to_le32(info);
2029
2030 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
2031
2032 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2033 switch (skb->protocol) {
2034 case __constant_htons(ETH_P_IP):
2035 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2036 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2037 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2038 break;
2039 case __constant_htons(ETH_P_IPV6):
2040 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2041 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2042 break;
2043 default:
2044 break;
2045 }
2046 }
2047
2048 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
2049 context_desc->seqnum_seed = 0;
2050 context_desc->mss_l4len_idx = 0;
2051
2052 buffer_info->time_stamp = jiffies;
2053 buffer_info->next_to_watch = i;
2054 buffer_info->dma = 0;
2055 i++;
2056 if (i == tx_ring->count)
2057 i = 0;
2058 tx_ring->next_to_use = i;
2059
2060 return true;
2061 }
2062
2063 return false;
2064}
2065
2066static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
2067{
2068 struct igbvf_adapter *adapter = netdev_priv(netdev);
2069
2070 /* there is enough descriptors then we don't need to worry */
2071 if (igbvf_desc_unused(adapter->tx_ring) >= size)
2072 return 0;
2073
2074 netif_stop_queue(netdev);
2075
2076 smp_mb();
2077
2078 /* We need to check again just in case room has been made available */
2079 if (igbvf_desc_unused(adapter->tx_ring) < size)
2080 return -EBUSY;
2081
2082 netif_wake_queue(netdev);
2083
2084 ++adapter->restart_queue;
2085 return 0;
2086}
2087
2088#define IGBVF_MAX_TXD_PWR 16
2089#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
2090
2091static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2092 struct igbvf_ring *tx_ring,
2093 struct sk_buff *skb,
2094 unsigned int first)
2095{
2096 struct igbvf_buffer *buffer_info;
2097 unsigned int len = skb_headlen(skb);
2098 unsigned int count = 0, i;
2099 unsigned int f;
2100 dma_addr_t *map;
2101
2102 i = tx_ring->next_to_use;
2103
2104 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
2105 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
2106 return 0;
2107 }
2108
2109 map = skb_shinfo(skb)->dma_maps;
2110
2111 buffer_info = &tx_ring->buffer_info[i];
2112 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2113 buffer_info->length = len;
2114 /* set time_stamp *before* dma to help avoid a possible race */
2115 buffer_info->time_stamp = jiffies;
2116 buffer_info->next_to_watch = i;
2117 buffer_info->dma = map[count];
2118 count++;
2119
2120 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2121 struct skb_frag_struct *frag;
2122
2123 i++;
2124 if (i == tx_ring->count)
2125 i = 0;
2126
2127 frag = &skb_shinfo(skb)->frags[f];
2128 len = frag->size;
2129
2130 buffer_info = &tx_ring->buffer_info[i];
2131 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2132 buffer_info->length = len;
2133 buffer_info->time_stamp = jiffies;
2134 buffer_info->next_to_watch = i;
2135 buffer_info->dma = map[count];
2136 count++;
2137 }
2138
2139 tx_ring->buffer_info[i].skb = skb;
2140 tx_ring->buffer_info[first].next_to_watch = i;
2141
2142 return count;
2143}
2144
2145static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
2146 struct igbvf_ring *tx_ring,
2147 int tx_flags, int count, u32 paylen,
2148 u8 hdr_len)
2149{
2150 union e1000_adv_tx_desc *tx_desc = NULL;
2151 struct igbvf_buffer *buffer_info;
2152 u32 olinfo_status = 0, cmd_type_len;
2153 unsigned int i;
2154
2155 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
2156 E1000_ADVTXD_DCMD_DEXT);
2157
2158 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
2159 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2160
2161 if (tx_flags & IGBVF_TX_FLAGS_TSO) {
2162 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
2163
2164 /* insert tcp checksum */
2165 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2166
2167 /* insert ip checksum */
2168 if (tx_flags & IGBVF_TX_FLAGS_IPV4)
2169 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
2170
2171 } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) {
2172 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2173 }
2174
2175 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
2176
2177 i = tx_ring->next_to_use;
2178 while (count--) {
2179 buffer_info = &tx_ring->buffer_info[i];
2180 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
2181 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
2182 tx_desc->read.cmd_type_len =
2183 cpu_to_le32(cmd_type_len | buffer_info->length);
2184 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2185 i++;
2186 if (i == tx_ring->count)
2187 i = 0;
2188 }
2189
2190 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
2191 /* Force memory writes to complete before letting h/w
2192 * know there are new descriptors to fetch. (Only
2193 * applicable for weak-ordered memory model archs,
2194 * such as IA-64). */
2195 wmb();
2196
2197 tx_ring->next_to_use = i;
2198 writel(i, adapter->hw.hw_addr + tx_ring->tail);
2199 /* we need this if more than one processor can write to our tail
2200 * at a time, it syncronizes IO on IA64/Altix systems */
2201 mmiowb();
2202}
2203
2204static int igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2205 struct net_device *netdev,
2206 struct igbvf_ring *tx_ring)
2207{
2208 struct igbvf_adapter *adapter = netdev_priv(netdev);
2209 unsigned int first, tx_flags = 0;
2210 u8 hdr_len = 0;
2211 int count = 0;
2212 int tso = 0;
2213
2214 if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2215 dev_kfree_skb_any(skb);
2216 return NETDEV_TX_OK;
2217 }
2218
2219 if (skb->len <= 0) {
2220 dev_kfree_skb_any(skb);
2221 return NETDEV_TX_OK;
2222 }
2223
2224 /*
2225 * need: count + 4 desc gap to keep tail from touching
2226 * + 2 desc gap to keep tail from touching head,
2227 * + 1 desc for skb->data,
2228 * + 1 desc for context descriptor,
2229 * head, otherwise try next time
2230 */
2231 if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
2232 /* this is a hard error */
2233 return NETDEV_TX_BUSY;
2234 }
2235
2236 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
2237 tx_flags |= IGBVF_TX_FLAGS_VLAN;
2238 tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
2239 }
2240
2241 if (skb->protocol == htons(ETH_P_IP))
2242 tx_flags |= IGBVF_TX_FLAGS_IPV4;
2243
2244 first = tx_ring->next_to_use;
2245
2246 tso = skb_is_gso(skb) ?
2247 igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0;
2248 if (unlikely(tso < 0)) {
2249 dev_kfree_skb_any(skb);
2250 return NETDEV_TX_OK;
2251 }
2252
2253 if (tso)
2254 tx_flags |= IGBVF_TX_FLAGS_TSO;
2255 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
2256 (skb->ip_summed == CHECKSUM_PARTIAL))
2257 tx_flags |= IGBVF_TX_FLAGS_CSUM;
2258
2259 /*
2260 * count reflects descriptors mapped, if 0 then mapping error
2261 * has occured and we need to rewind the descriptor queue
2262 */
2263 count = igbvf_tx_map_adv(adapter, tx_ring, skb, first);
2264
2265 if (count) {
2266 igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
2267 skb->len, hdr_len);
2268 netdev->trans_start = jiffies;
2269 /* Make sure there is space in the ring for the next send. */
2270 igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
2271 } else {
2272 dev_kfree_skb_any(skb);
2273 tx_ring->buffer_info[first].time_stamp = 0;
2274 tx_ring->next_to_use = first;
2275 }
2276
2277 return NETDEV_TX_OK;
2278}
2279
2280static int igbvf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2281{
2282 struct igbvf_adapter *adapter = netdev_priv(netdev);
2283 struct igbvf_ring *tx_ring;
2284 int retval;
2285
2286 if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2287 dev_kfree_skb_any(skb);
2288 return NETDEV_TX_OK;
2289 }
2290
2291 tx_ring = &adapter->tx_ring[0];
2292
2293 retval = igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring);
2294
2295 return retval;
2296}
2297
2298/**
2299 * igbvf_tx_timeout - Respond to a Tx Hang
2300 * @netdev: network interface device structure
2301 **/
2302static void igbvf_tx_timeout(struct net_device *netdev)
2303{
2304 struct igbvf_adapter *adapter = netdev_priv(netdev);
2305
2306 /* Do the reset outside of interrupt context */
2307 adapter->tx_timeout_count++;
2308 schedule_work(&adapter->reset_task);
2309}
2310
2311static void igbvf_reset_task(struct work_struct *work)
2312{
2313 struct igbvf_adapter *adapter;
2314 adapter = container_of(work, struct igbvf_adapter, reset_task);
2315
2316 igbvf_reinit_locked(adapter);
2317}
2318
2319/**
2320 * igbvf_get_stats - Get System Network Statistics
2321 * @netdev: network interface device structure
2322 *
2323 * Returns the address of the device statistics structure.
2324 * The statistics are actually updated from the timer callback.
2325 **/
2326static struct net_device_stats *igbvf_get_stats(struct net_device *netdev)
2327{
2328 struct igbvf_adapter *adapter = netdev_priv(netdev);
2329
2330 /* only return the current stats */
2331 return &adapter->net_stats;
2332}
2333
2334/**
2335 * igbvf_change_mtu - Change the Maximum Transfer Unit
2336 * @netdev: network interface device structure
2337 * @new_mtu: new value for maximum frame size
2338 *
2339 * Returns 0 on success, negative on failure
2340 **/
2341static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
2342{
2343 struct igbvf_adapter *adapter = netdev_priv(netdev);
2344 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2345
2346 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
2347 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
2348 return -EINVAL;
2349 }
2350
2351 /* Jumbo frame size limits */
2352 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
2353 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
2354 dev_err(&adapter->pdev->dev,
2355 "Jumbo Frames not supported.\n");
2356 return -EINVAL;
2357 }
2358 }
2359
2360#define MAX_STD_JUMBO_FRAME_SIZE 9234
2361 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2362 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
2363 return -EINVAL;
2364 }
2365
2366 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
2367 msleep(1);
2368 /* igbvf_down has a dependency on max_frame_size */
2369 adapter->max_frame_size = max_frame;
2370 if (netif_running(netdev))
2371 igbvf_down(adapter);
2372
2373 /*
2374 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2375 * means we reserve 2 more, this pushes us to allocate from the next
2376 * larger slab size.
2377 * i.e. RXBUFFER_2048 --> size-4096 slab
2378 * However with the new *_jumbo_rx* routines, jumbo receives will use
2379 * fragmented skbs
2380 */
2381
2382 if (max_frame <= 1024)
2383 adapter->rx_buffer_len = 1024;
2384 else if (max_frame <= 2048)
2385 adapter->rx_buffer_len = 2048;
2386 else
2387#if (PAGE_SIZE / 2) > 16384
2388 adapter->rx_buffer_len = 16384;
2389#else
2390 adapter->rx_buffer_len = PAGE_SIZE / 2;
2391#endif
2392
2393
2394 /* adjust allocation if LPE protects us, and we aren't using SBP */
2395 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
2396 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
2397 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
2398 ETH_FCS_LEN;
2399
2400 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
2401 netdev->mtu, new_mtu);
2402 netdev->mtu = new_mtu;
2403
2404 if (netif_running(netdev))
2405 igbvf_up(adapter);
2406 else
2407 igbvf_reset(adapter);
2408
2409 clear_bit(__IGBVF_RESETTING, &adapter->state);
2410
2411 return 0;
2412}
2413
2414static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2415{
2416 switch (cmd) {
2417 default:
2418 return -EOPNOTSUPP;
2419 }
2420}
2421
2422static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state)
2423{
2424 struct net_device *netdev = pci_get_drvdata(pdev);
2425 struct igbvf_adapter *adapter = netdev_priv(netdev);
2426#ifdef CONFIG_PM
2427 int retval = 0;
2428#endif
2429
2430 netif_device_detach(netdev);
2431
2432 if (netif_running(netdev)) {
2433 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
2434 igbvf_down(adapter);
2435 igbvf_free_irq(adapter);
2436 }
2437
2438#ifdef CONFIG_PM
2439 retval = pci_save_state(pdev);
2440 if (retval)
2441 return retval;
2442#endif
2443
2444 pci_disable_device(pdev);
2445
2446 return 0;
2447}
2448
2449#ifdef CONFIG_PM
2450static int igbvf_resume(struct pci_dev *pdev)
2451{
2452 struct net_device *netdev = pci_get_drvdata(pdev);
2453 struct igbvf_adapter *adapter = netdev_priv(netdev);
2454 u32 err;
2455
2456 pci_restore_state(pdev);
2457 err = pci_enable_device_mem(pdev);
2458 if (err) {
2459 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
2460 return err;
2461 }
2462
2463 pci_set_master(pdev);
2464
2465 if (netif_running(netdev)) {
2466 err = igbvf_request_irq(adapter);
2467 if (err)
2468 return err;
2469 }
2470
2471 igbvf_reset(adapter);
2472
2473 if (netif_running(netdev))
2474 igbvf_up(adapter);
2475
2476 netif_device_attach(netdev);
2477
2478 return 0;
2479}
2480#endif
2481
2482static void igbvf_shutdown(struct pci_dev *pdev)
2483{
2484 igbvf_suspend(pdev, PMSG_SUSPEND);
2485}
2486
2487#ifdef CONFIG_NET_POLL_CONTROLLER
2488/*
2489 * Polling 'interrupt' - used by things like netconsole to send skbs
2490 * without having to re-enable interrupts. It's not called while
2491 * the interrupt routine is executing.
2492 */
2493static void igbvf_netpoll(struct net_device *netdev)
2494{
2495 struct igbvf_adapter *adapter = netdev_priv(netdev);
2496
2497 disable_irq(adapter->pdev->irq);
2498
2499 igbvf_clean_tx_irq(adapter->tx_ring);
2500
2501 enable_irq(adapter->pdev->irq);
2502}
2503#endif
2504
2505/**
2506 * igbvf_io_error_detected - called when PCI error is detected
2507 * @pdev: Pointer to PCI device
2508 * @state: The current pci connection state
2509 *
2510 * This function is called after a PCI bus error affecting
2511 * this device has been detected.
2512 */
2513static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
2514 pci_channel_state_t state)
2515{
2516 struct net_device *netdev = pci_get_drvdata(pdev);
2517 struct igbvf_adapter *adapter = netdev_priv(netdev);
2518
2519 netif_device_detach(netdev);
2520
2521 if (netif_running(netdev))
2522 igbvf_down(adapter);
2523 pci_disable_device(pdev);
2524
2525 /* Request a slot slot reset. */
2526 return PCI_ERS_RESULT_NEED_RESET;
2527}
2528
2529/**
2530 * igbvf_io_slot_reset - called after the pci bus has been reset.
2531 * @pdev: Pointer to PCI device
2532 *
2533 * Restart the card from scratch, as if from a cold-boot. Implementation
2534 * resembles the first-half of the igbvf_resume routine.
2535 */
2536static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev)
2537{
2538 struct net_device *netdev = pci_get_drvdata(pdev);
2539 struct igbvf_adapter *adapter = netdev_priv(netdev);
2540
2541 if (pci_enable_device_mem(pdev)) {
2542 dev_err(&pdev->dev,
2543 "Cannot re-enable PCI device after reset.\n");
2544 return PCI_ERS_RESULT_DISCONNECT;
2545 }
2546 pci_set_master(pdev);
2547
2548 igbvf_reset(adapter);
2549
2550 return PCI_ERS_RESULT_RECOVERED;
2551}
2552
2553/**
2554 * igbvf_io_resume - called when traffic can start flowing again.
2555 * @pdev: Pointer to PCI device
2556 *
2557 * This callback is called when the error recovery driver tells us that
2558 * its OK to resume normal operation. Implementation resembles the
2559 * second-half of the igbvf_resume routine.
2560 */
2561static void igbvf_io_resume(struct pci_dev *pdev)
2562{
2563 struct net_device *netdev = pci_get_drvdata(pdev);
2564 struct igbvf_adapter *adapter = netdev_priv(netdev);
2565
2566 if (netif_running(netdev)) {
2567 if (igbvf_up(adapter)) {
2568 dev_err(&pdev->dev,
2569 "can't bring device back up after reset\n");
2570 return;
2571 }
2572 }
2573
2574 netif_device_attach(netdev);
2575}
2576
2577static void igbvf_print_device_info(struct igbvf_adapter *adapter)
2578{
2579 struct e1000_hw *hw = &adapter->hw;
2580 struct net_device *netdev = adapter->netdev;
2581 struct pci_dev *pdev = adapter->pdev;
2582
2583 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
2584 dev_info(&pdev->dev, "Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
2585 /* MAC address */
2586 netdev->dev_addr[0], netdev->dev_addr[1],
2587 netdev->dev_addr[2], netdev->dev_addr[3],
2588 netdev->dev_addr[4], netdev->dev_addr[5]);
2589 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
2590}
2591
2592static const struct net_device_ops igbvf_netdev_ops = {
2593 .ndo_open = igbvf_open,
2594 .ndo_stop = igbvf_close,
2595 .ndo_start_xmit = igbvf_xmit_frame,
2596 .ndo_get_stats = igbvf_get_stats,
2597 .ndo_set_multicast_list = igbvf_set_multi,
2598 .ndo_set_mac_address = igbvf_set_mac,
2599 .ndo_change_mtu = igbvf_change_mtu,
2600 .ndo_do_ioctl = igbvf_ioctl,
2601 .ndo_tx_timeout = igbvf_tx_timeout,
2602 .ndo_vlan_rx_register = igbvf_vlan_rx_register,
2603 .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid,
2604 .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid,
2605#ifdef CONFIG_NET_POLL_CONTROLLER
2606 .ndo_poll_controller = igbvf_netpoll,
2607#endif
2608};
2609
2610/**
2611 * igbvf_probe - Device Initialization Routine
2612 * @pdev: PCI device information struct
2613 * @ent: entry in igbvf_pci_tbl
2614 *
2615 * Returns 0 on success, negative on failure
2616 *
2617 * igbvf_probe initializes an adapter identified by a pci_dev structure.
2618 * The OS initialization, configuring of the adapter private structure,
2619 * and a hardware reset occur.
2620 **/
2621static int __devinit igbvf_probe(struct pci_dev *pdev,
2622 const struct pci_device_id *ent)
2623{
2624 struct net_device *netdev;
2625 struct igbvf_adapter *adapter;
2626 struct e1000_hw *hw;
2627 const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
2628
2629 static int cards_found;
2630 int err, pci_using_dac;
2631
2632 err = pci_enable_device_mem(pdev);
2633 if (err)
2634 return err;
2635
2636 pci_using_dac = 0;
2637 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
2638 if (!err) {
2639 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2640 if (!err)
2641 pci_using_dac = 1;
2642 } else {
2643 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2644 if (err) {
2645 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2646 if (err) {
2647 dev_err(&pdev->dev, "No usable DMA "
2648 "configuration, aborting\n");
2649 goto err_dma;
2650 }
2651 }
2652 }
2653
2654 err = pci_request_regions(pdev, igbvf_driver_name);
2655 if (err)
2656 goto err_pci_reg;
2657
2658 pci_set_master(pdev);
2659
2660 err = -ENOMEM;
2661 netdev = alloc_etherdev(sizeof(struct igbvf_adapter));
2662 if (!netdev)
2663 goto err_alloc_etherdev;
2664
2665 SET_NETDEV_DEV(netdev, &pdev->dev);
2666
2667 pci_set_drvdata(pdev, netdev);
2668 adapter = netdev_priv(netdev);
2669 hw = &adapter->hw;
2670 adapter->netdev = netdev;
2671 adapter->pdev = pdev;
2672 adapter->ei = ei;
2673 adapter->pba = ei->pba;
2674 adapter->flags = ei->flags;
2675 adapter->hw.back = adapter;
2676 adapter->hw.mac.type = ei->mac;
2677 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
2678
2679 /* PCI config space info */
2680
2681 hw->vendor_id = pdev->vendor;
2682 hw->device_id = pdev->device;
2683 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2684 hw->subsystem_device_id = pdev->subsystem_device;
2685
2686 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2687
2688 err = -EIO;
2689 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
2690 pci_resource_len(pdev, 0));
2691
2692 if (!adapter->hw.hw_addr)
2693 goto err_ioremap;
2694
2695 if (ei->get_variants) {
2696 err = ei->get_variants(adapter);
2697 if (err)
2698 goto err_ioremap;
2699 }
2700
2701 /* setup adapter struct */
2702 err = igbvf_sw_init(adapter);
2703 if (err)
2704 goto err_sw_init;
2705
2706 /* construct the net_device struct */
2707 netdev->netdev_ops = &igbvf_netdev_ops;
2708
2709 igbvf_set_ethtool_ops(netdev);
2710 netdev->watchdog_timeo = 5 * HZ;
2711 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2712
2713 adapter->bd_number = cards_found++;
2714
2715 netdev->features = NETIF_F_SG |
2716 NETIF_F_IP_CSUM |
2717 NETIF_F_HW_VLAN_TX |
2718 NETIF_F_HW_VLAN_RX |
2719 NETIF_F_HW_VLAN_FILTER;
2720
2721 netdev->features |= NETIF_F_IPV6_CSUM;
2722 netdev->features |= NETIF_F_TSO;
2723 netdev->features |= NETIF_F_TSO6;
2724
2725 if (pci_using_dac)
2726 netdev->features |= NETIF_F_HIGHDMA;
2727
2728 netdev->vlan_features |= NETIF_F_TSO;
2729 netdev->vlan_features |= NETIF_F_TSO6;
2730 netdev->vlan_features |= NETIF_F_IP_CSUM;
2731 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
2732 netdev->vlan_features |= NETIF_F_SG;
2733
2734 /*reset the controller to put the device in a known good state */
2735 err = hw->mac.ops.reset_hw(hw);
2736 if (err) {
2737 dev_info(&pdev->dev,
2738 "PF still in reset state, assigning new address\n");
2739 random_ether_addr(hw->mac.addr);
2740 } else {
2741 err = hw->mac.ops.read_mac_addr(hw);
2742 if (err) {
2743 dev_err(&pdev->dev, "Error reading MAC address\n");
2744 goto err_hw_init;
2745 }
2746 }
2747
2748 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2749 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
2750
2751 if (!is_valid_ether_addr(netdev->perm_addr)) {
2752 dev_err(&pdev->dev, "Invalid MAC Address: "
2753 "%02x:%02x:%02x:%02x:%02x:%02x\n",
2754 netdev->dev_addr[0], netdev->dev_addr[1],
2755 netdev->dev_addr[2], netdev->dev_addr[3],
2756 netdev->dev_addr[4], netdev->dev_addr[5]);
2757 err = -EIO;
2758 goto err_hw_init;
2759 }
2760
2761 setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
2762 (unsigned long) adapter);
2763
2764 INIT_WORK(&adapter->reset_task, igbvf_reset_task);
2765 INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
2766
2767 /* ring size defaults */
2768 adapter->rx_ring->count = 1024;
2769 adapter->tx_ring->count = 1024;
2770
2771 /* reset the hardware with the new settings */
2772 igbvf_reset(adapter);
2773
2774 /* tell the stack to leave us alone until igbvf_open() is called */
2775 netif_carrier_off(netdev);
2776 netif_stop_queue(netdev);
2777
2778 strcpy(netdev->name, "eth%d");
2779 err = register_netdev(netdev);
2780 if (err)
2781 goto err_hw_init;
2782
2783 igbvf_print_device_info(adapter);
2784
2785 igbvf_initialize_last_counter_stats(adapter);
2786
2787 return 0;
2788
2789err_hw_init:
2790 kfree(adapter->tx_ring);
2791 kfree(adapter->rx_ring);
2792err_sw_init:
2793 igbvf_reset_interrupt_capability(adapter);
2794 iounmap(adapter->hw.hw_addr);
2795err_ioremap:
2796 free_netdev(netdev);
2797err_alloc_etherdev:
2798 pci_release_regions(pdev);
2799err_pci_reg:
2800err_dma:
2801 pci_disable_device(pdev);
2802 return err;
2803}
2804
2805/**
2806 * igbvf_remove - Device Removal Routine
2807 * @pdev: PCI device information struct
2808 *
2809 * igbvf_remove is called by the PCI subsystem to alert the driver
2810 * that it should release a PCI device. The could be caused by a
2811 * Hot-Plug event, or because the driver is going to be removed from
2812 * memory.
2813 **/
2814static void __devexit igbvf_remove(struct pci_dev *pdev)
2815{
2816 struct net_device *netdev = pci_get_drvdata(pdev);
2817 struct igbvf_adapter *adapter = netdev_priv(netdev);
2818 struct e1000_hw *hw = &adapter->hw;
2819
2820 /*
2821 * flush_scheduled work may reschedule our watchdog task, so
2822 * explicitly disable watchdog tasks from being rescheduled
2823 */
2824 set_bit(__IGBVF_DOWN, &adapter->state);
2825 del_timer_sync(&adapter->watchdog_timer);
2826
2827 flush_scheduled_work();
2828
2829 unregister_netdev(netdev);
2830
2831 igbvf_reset_interrupt_capability(adapter);
2832
2833 /*
2834 * it is important to delete the napi struct prior to freeing the
2835 * rx ring so that you do not end up with null pointer refs
2836 */
2837 netif_napi_del(&adapter->rx_ring->napi);
2838 kfree(adapter->tx_ring);
2839 kfree(adapter->rx_ring);
2840
2841 iounmap(hw->hw_addr);
2842 if (hw->flash_address)
2843 iounmap(hw->flash_address);
2844 pci_release_regions(pdev);
2845
2846 free_netdev(netdev);
2847
2848 pci_disable_device(pdev);
2849}
2850
2851/* PCI Error Recovery (ERS) */
2852static struct pci_error_handlers igbvf_err_handler = {
2853 .error_detected = igbvf_io_error_detected,
2854 .slot_reset = igbvf_io_slot_reset,
2855 .resume = igbvf_io_resume,
2856};
2857
2858static struct pci_device_id igbvf_pci_tbl[] = {
2859 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
2860 { } /* terminate list */
2861};
2862MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
2863
2864/* PCI Device API Driver */
2865static struct pci_driver igbvf_driver = {
2866 .name = igbvf_driver_name,
2867 .id_table = igbvf_pci_tbl,
2868 .probe = igbvf_probe,
2869 .remove = __devexit_p(igbvf_remove),
2870#ifdef CONFIG_PM
2871 /* Power Management Hooks */
2872 .suspend = igbvf_suspend,
2873 .resume = igbvf_resume,
2874#endif
2875 .shutdown = igbvf_shutdown,
2876 .err_handler = &igbvf_err_handler
2877};
2878
2879/**
2880 * igbvf_init_module - Driver Registration Routine
2881 *
2882 * igbvf_init_module is the first routine called when the driver is
2883 * loaded. All it does is register with the PCI subsystem.
2884 **/
2885static int __init igbvf_init_module(void)
2886{
2887 int ret;
2888 printk(KERN_INFO "%s - version %s\n",
2889 igbvf_driver_string, igbvf_driver_version);
2890 printk(KERN_INFO "%s\n", igbvf_copyright);
2891
2892 ret = pci_register_driver(&igbvf_driver);
2893 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name,
2894 PM_QOS_DEFAULT_VALUE);
2895
2896 return ret;
2897}
2898module_init(igbvf_init_module);
2899
2900/**
2901 * igbvf_exit_module - Driver Exit Cleanup Routine
2902 *
2903 * igbvf_exit_module is called just before the driver is removed
2904 * from memory.
2905 **/
2906static void __exit igbvf_exit_module(void)
2907{
2908 pci_unregister_driver(&igbvf_driver);
2909 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name);
2910}
2911module_exit(igbvf_exit_module);
2912
2913
2914MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
2915MODULE_DESCRIPTION("Intel(R) 82576 Virtual Function Network Driver");
2916MODULE_LICENSE("GPL");
2917MODULE_VERSION(DRV_VERSION);
2918
2919/* netdev.c */
diff --git a/drivers/net/igbvf/regs.h b/drivers/net/igbvf/regs.h
new file mode 100644
index 000000000000..b9e24ed70d0a
--- /dev/null
+++ b/drivers/net/igbvf/regs.h
@@ -0,0 +1,108 @@
1/*******************************************************************************
2
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_REGS_H_
29#define _E1000_REGS_H_
30
31#define E1000_CTRL 0x00000 /* Device Control - RW */
32#define E1000_STATUS 0x00008 /* Device Status - RO */
33#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
34#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
35#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
36#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
37#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
38#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
39#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
40#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
41#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
42#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
43/*
44 * Convenience macros
45 *
46 * Note: "_n" is the queue number of the register to be written to.
47 *
48 * Example usage:
49 * E1000_RDBAL_REG(current_rx_queue)
50 */
51#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
52 (0x0C000 + ((_n) * 0x40)))
53#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
54 (0x0C004 + ((_n) * 0x40)))
55#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
56 (0x0C008 + ((_n) * 0x40)))
57#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
58 (0x0C00C + ((_n) * 0x40)))
59#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
60 (0x0C010 + ((_n) * 0x40)))
61#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
62 (0x0C018 + ((_n) * 0x40)))
63#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
64 (0x0C028 + ((_n) * 0x40)))
65#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
66 (0x0E000 + ((_n) * 0x40)))
67#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
68 (0x0E004 + ((_n) * 0x40)))
69#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
70 (0x0E008 + ((_n) * 0x40)))
71#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
72 (0x0E010 + ((_n) * 0x40)))
73#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
74 (0x0E018 + ((_n) * 0x40)))
75#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
76 (0x0E028 + ((_n) * 0x40)))
77#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
78#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
79#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
80 (0x054E0 + ((_i - 16) * 8)))
81#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
82 (0x054E4 + ((_i - 16) * 8)))
83
84/* Statistics registers */
85#define E1000_VFGPRC 0x00F10
86#define E1000_VFGORC 0x00F18
87#define E1000_VFMPRC 0x00F3C
88#define E1000_VFGPTC 0x00F14
89#define E1000_VFGOTC 0x00F34
90#define E1000_VFGOTLBC 0x00F50
91#define E1000_VFGPTLBC 0x00F44
92#define E1000_VFGORLBC 0x00F48
93#define E1000_VFGPRLBC 0x00F40
94
95/* These act per VF so an array friendly macro is used */
96#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n)))
97#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
98
99/* Define macros for handling registers */
100#define er32(reg) readl(hw->hw_addr + E1000_##reg)
101#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg)
102#define array_er32(reg, offset) \
103 readl(hw->hw_addr + E1000_##reg + (offset << 2))
104#define array_ew32(reg, offset, val) \
105 writel((val), hw->hw_addr + E1000_##reg + (offset << 2))
106#define e1e_flush() er32(STATUS)
107
108#endif
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
new file mode 100644
index 000000000000..aa246c93279d
--- /dev/null
+++ b/drivers/net/igbvf/vf.c
@@ -0,0 +1,398 @@
1/*******************************************************************************
2
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29#include "vf.h"
30
31static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
32static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
33 u16 *duplex);
34static s32 e1000_init_hw_vf(struct e1000_hw *hw);
35static s32 e1000_reset_hw_vf(struct e1000_hw *hw);
36
37static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *,
38 u32, u32, u32);
39static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32);
40static s32 e1000_read_mac_addr_vf(struct e1000_hw *);
41static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool);
42
43/**
44 * e1000_init_mac_params_vf - Inits MAC params
45 * @hw: pointer to the HW structure
46 **/
47s32 e1000_init_mac_params_vf(struct e1000_hw *hw)
48{
49 struct e1000_mac_info *mac = &hw->mac;
50
51 /* VF's have no MTA Registers - PF feature only */
52 mac->mta_reg_count = 128;
53 /* VF's have no access to RAR entries */
54 mac->rar_entry_count = 1;
55
56 /* Function pointers */
57 /* reset */
58 mac->ops.reset_hw = e1000_reset_hw_vf;
59 /* hw initialization */
60 mac->ops.init_hw = e1000_init_hw_vf;
61 /* check for link */
62 mac->ops.check_for_link = e1000_check_for_link_vf;
63 /* link info */
64 mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
65 /* multicast address update */
66 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
67 /* set mac address */
68 mac->ops.rar_set = e1000_rar_set_vf;
69 /* read mac address */
70 mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
71 /* set vlan filter table array */
72 mac->ops.set_vfta = e1000_set_vfta_vf;
73
74 return E1000_SUCCESS;
75}
76
77/**
78 * e1000_init_function_pointers_vf - Inits function pointers
79 * @hw: pointer to the HW structure
80 **/
81void e1000_init_function_pointers_vf(struct e1000_hw *hw)
82{
83 hw->mac.ops.init_params = e1000_init_mac_params_vf;
84 hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
85}
86
87/**
88 * e1000_get_link_up_info_vf - Gets link info.
89 * @hw: pointer to the HW structure
90 * @speed: pointer to 16 bit value to store link speed.
91 * @duplex: pointer to 16 bit value to store duplex.
92 *
93 * Since we cannot read the PHY and get accurate link info, we must rely upon
94 * the status register's data which is often stale and inaccurate.
95 **/
96static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
97 u16 *duplex)
98{
99 s32 status;
100
101 status = er32(STATUS);
102 if (status & E1000_STATUS_SPEED_1000)
103 *speed = SPEED_1000;
104 else if (status & E1000_STATUS_SPEED_100)
105 *speed = SPEED_100;
106 else
107 *speed = SPEED_10;
108
109 if (status & E1000_STATUS_FD)
110 *duplex = FULL_DUPLEX;
111 else
112 *duplex = HALF_DUPLEX;
113
114 return E1000_SUCCESS;
115}
116
117/**
118 * e1000_reset_hw_vf - Resets the HW
119 * @hw: pointer to the HW structure
120 *
121 * VF's provide a function level reset. This is done using bit 26 of ctrl_reg.
122 * This is all the reset we can perform on a VF.
123 **/
124static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
125{
126 struct e1000_mbx_info *mbx = &hw->mbx;
127 u32 timeout = E1000_VF_INIT_TIMEOUT;
128 u32 ret_val = -E1000_ERR_MAC_INIT;
129 u32 msgbuf[3];
130 u8 *addr = (u8 *)(&msgbuf[1]);
131 u32 ctrl;
132
133 /* assert vf queue/interrupt reset */
134 ctrl = er32(CTRL);
135 ew32(CTRL, ctrl | E1000_CTRL_RST);
136
137 /* we cannot initialize while the RSTI / RSTD bits are asserted */
138 while (!mbx->ops.check_for_rst(hw) && timeout) {
139 timeout--;
140 udelay(5);
141 }
142
143 if (timeout) {
144 /* mailbox timeout can now become active */
145 mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT;
146
147 /* notify pf of vf reset completion */
148 msgbuf[0] = E1000_VF_RESET;
149 mbx->ops.write_posted(hw, msgbuf, 1);
150
151 msleep(10);
152
153 /* set our "perm_addr" based on info provided by PF */
154 ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
155 if (!ret_val) {
156 if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK))
157 memcpy(hw->mac.perm_addr, addr, 6);
158 else
159 ret_val = -E1000_ERR_MAC_INIT;
160 }
161 }
162
163 return ret_val;
164}
165
166/**
167 * e1000_init_hw_vf - Inits the HW
168 * @hw: pointer to the HW structure
169 *
170 * Not much to do here except clear the PF Reset indication if there is one.
171 **/
172static s32 e1000_init_hw_vf(struct e1000_hw *hw)
173{
174 /* attempt to set and restore our mac address */
175 e1000_rar_set_vf(hw, hw->mac.addr, 0);
176
177 return E1000_SUCCESS;
178}
179
180/**
181 * e1000_hash_mc_addr_vf - Generate a multicast hash value
182 * @hw: pointer to the HW structure
183 * @mc_addr: pointer to a multicast address
184 *
185 * Generates a multicast address hash value which is used to determine
186 * the multicast filter table array address and new table value. See
187 * e1000_mta_set_generic()
188 **/
189static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
190{
191 u32 hash_value, hash_mask;
192 u8 bit_shift = 0;
193
194 /* Register count multiplied by bits per register */
195 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
196
197 /*
198 * The bit_shift is the number of left-shifts
199 * where 0xFF would still fall within the hash mask.
200 */
201 while (hash_mask >> bit_shift != 0xFF)
202 bit_shift++;
203
204 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
205 (((u16) mc_addr[5]) << bit_shift)));
206
207 return hash_value;
208}
209
210/**
211 * e1000_update_mc_addr_list_vf - Update Multicast addresses
212 * @hw: pointer to the HW structure
213 * @mc_addr_list: array of multicast addresses to program
214 * @mc_addr_count: number of multicast addresses to program
215 * @rar_used_count: the first RAR register free to program
216 * @rar_count: total number of supported Receive Address Registers
217 *
218 * Updates the Receive Address Registers and Multicast Table Array.
219 * The caller must have a packed mc_addr_list of multicast addresses.
220 * The parameter rar_count will usually be hw->mac.rar_entry_count
221 * unless there are workarounds that change this.
222 **/
223void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
224 u8 *mc_addr_list, u32 mc_addr_count,
225 u32 rar_used_count, u32 rar_count)
226{
227 struct e1000_mbx_info *mbx = &hw->mbx;
228 u32 msgbuf[E1000_VFMAILBOX_SIZE];
229 u16 *hash_list = (u16 *)&msgbuf[1];
230 u32 hash_value;
231 u32 cnt, i;
232
233 /* Each entry in the list uses 1 16 bit word. We have 30
234 * 16 bit words available in our HW msg buffer (minus 1 for the
235 * msg type). That's 30 hash values if we pack 'em right. If
236 * there are more than 30 MC addresses to add then punt the
237 * extras for now and then add code to handle more than 30 later.
238 * It would be unusual for a server to request that many multi-cast
239 * addresses except for in large enterprise network environments.
240 */
241
242 cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
243 msgbuf[0] = E1000_VF_SET_MULTICAST;
244 msgbuf[0] |= cnt << E1000_VT_MSGINFO_SHIFT;
245
246 for (i = 0; i < cnt; i++) {
247 hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list);
248 hash_list[i] = hash_value & 0x0FFFF;
249 mc_addr_list += ETH_ADDR_LEN;
250 }
251
252 mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE);
253}
254
255/**
256 * e1000_set_vfta_vf - Set/Unset vlan filter table address
257 * @hw: pointer to the HW structure
258 * @vid: determines the vfta register and bit to set/unset
259 * @set: if true then set bit, else clear bit
260 **/
261static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set)
262{
263 struct e1000_mbx_info *mbx = &hw->mbx;
264 u32 msgbuf[2];
265 s32 err;
266
267 msgbuf[0] = E1000_VF_SET_VLAN;
268 msgbuf[1] = vid;
269 /* Setting the 8 bit field MSG INFO to true indicates "add" */
270 if (set)
271 msgbuf[0] |= 1 << E1000_VT_MSGINFO_SHIFT;
272
273 mbx->ops.write_posted(hw, msgbuf, 2);
274
275 err = mbx->ops.read_posted(hw, msgbuf, 2);
276
277 /* if nacked the vlan was rejected */
278 if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK)))
279 err = -E1000_ERR_MAC_INIT;
280
281 return err;
282}
283
284/** e1000_rlpml_set_vf - Set the maximum receive packet length
285 * @hw: pointer to the HW structure
286 * @max_size: value to assign to max frame size
287 **/
288void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size)
289{
290 struct e1000_mbx_info *mbx = &hw->mbx;
291 u32 msgbuf[2];
292
293 msgbuf[0] = E1000_VF_SET_LPE;
294 msgbuf[1] = max_size;
295
296 mbx->ops.write_posted(hw, msgbuf, 2);
297}
298
299/**
300 * e1000_rar_set_vf - set device MAC address
301 * @hw: pointer to the HW structure
302 * @addr: pointer to the receive address
303 * @index receive address array register
304 **/
305static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
306{
307 struct e1000_mbx_info *mbx = &hw->mbx;
308 u32 msgbuf[3];
309 u8 *msg_addr = (u8 *)(&msgbuf[1]);
310 s32 ret_val;
311
312 memset(msgbuf, 0, 12);
313 msgbuf[0] = E1000_VF_SET_MAC_ADDR;
314 memcpy(msg_addr, addr, 6);
315 ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
316
317 if (!ret_val)
318 ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
319
320 /* if nacked the address was rejected, use "perm_addr" */
321 if (!ret_val &&
322 (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK)))
323 e1000_read_mac_addr_vf(hw);
324}
325
326/**
327 * e1000_read_mac_addr_vf - Read device MAC address
328 * @hw: pointer to the HW structure
329 **/
330static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw)
331{
332 int i;
333
334 for (i = 0; i < ETH_ADDR_LEN; i++)
335 hw->mac.addr[i] = hw->mac.perm_addr[i];
336
337 return E1000_SUCCESS;
338}
339
340/**
341 * e1000_check_for_link_vf - Check for link for a virtual interface
342 * @hw: pointer to the HW structure
343 *
344 * Checks to see if the underlying PF is still talking to the VF and
345 * if it is then it reports the link state to the hardware, otherwise
346 * it reports link down and returns an error.
347 **/
348static s32 e1000_check_for_link_vf(struct e1000_hw *hw)
349{
350 struct e1000_mbx_info *mbx = &hw->mbx;
351 struct e1000_mac_info *mac = &hw->mac;
352 s32 ret_val = E1000_SUCCESS;
353 u32 in_msg = 0;
354
355 /*
356 * We only want to run this if there has been a rst asserted.
357 * in this case that could mean a link change, device reset,
358 * or a virtual function reset
359 */
360
361 /* If we were hit with a reset drop the link */
362 if (!mbx->ops.check_for_rst(hw))
363 mac->get_link_status = true;
364
365 if (!mac->get_link_status)
366 goto out;
367
368 /* if link status is down no point in checking to see if pf is up */
369 if (!(er32(STATUS) & E1000_STATUS_LU))
370 goto out;
371
372 /* if the read failed it could just be a mailbox collision, best wait
373 * until we are called again and don't report an error */
374 if (mbx->ops.read(hw, &in_msg, 1))
375 goto out;
376
377 /* if incoming message isn't clear to send we are waiting on response */
378 if (!(in_msg & E1000_VT_MSGTYPE_CTS)) {
379 /* message is not CTS and is NACK we must have lost CTS status */
380 if (in_msg & E1000_VT_MSGTYPE_NACK)
381 ret_val = -E1000_ERR_MAC_INIT;
382 goto out;
383 }
384
385 /* the pf is talking, if we timed out in the past we reinit */
386 if (!mbx->timeout) {
387 ret_val = -E1000_ERR_MAC_INIT;
388 goto out;
389 }
390
391 /* if we passed all the tests above then the link is up and we no
392 * longer need to check for link */
393 mac->get_link_status = false;
394
395out:
396 return ret_val;
397}
398
diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
new file mode 100644
index 000000000000..ec07228f9478
--- /dev/null
+++ b/drivers/net/igbvf/vf.h
@@ -0,0 +1,265 @@
1/*******************************************************************************
2
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_VF_H_
29#define _E1000_VF_H_
30
31#include <linux/pci.h>
32#include <linux/delay.h>
33#include <linux/interrupt.h>
34#include <linux/if_ether.h>
35
36#include "regs.h"
37#include "defines.h"
38
39struct e1000_hw;
40
41#define E1000_DEV_ID_82576_VF 0x10CA
42#define E1000_REVISION_0 0
43#define E1000_REVISION_1 1
44#define E1000_REVISION_2 2
45#define E1000_REVISION_3 3
46#define E1000_REVISION_4 4
47
48#define E1000_FUNC_0 0
49#define E1000_FUNC_1 1
50
51/*
52 * Receive Address Register Count
53 * Number of high/low register pairs in the RAR. The RAR (Receive Address
54 * Registers) holds the directed and multicast addresses that we monitor.
55 * These entries are also used for MAC-based filtering.
56 */
57#define E1000_RAR_ENTRIES_VF 1
58
59/* Receive Descriptor - Advanced */
60union e1000_adv_rx_desc {
61 struct {
62 u64 pkt_addr; /* Packet buffer address */
63 u64 hdr_addr; /* Header buffer address */
64 } read;
65 struct {
66 struct {
67 union {
68 u32 data;
69 struct {
70 u16 pkt_info; /* RSS/Packet type */
71 u16 hdr_info; /* Split Header,
72 * hdr buffer length */
73 } hs_rss;
74 } lo_dword;
75 union {
76 u32 rss; /* RSS Hash */
77 struct {
78 u16 ip_id; /* IP id */
79 u16 csum; /* Packet Checksum */
80 } csum_ip;
81 } hi_dword;
82 } lower;
83 struct {
84 u32 status_error; /* ext status/error */
85 u16 length; /* Packet length */
86 u16 vlan; /* VLAN tag */
87 } upper;
88 } wb; /* writeback */
89};
90
91#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
92#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
93
94/* Transmit Descriptor - Advanced */
95union e1000_adv_tx_desc {
96 struct {
97 u64 buffer_addr; /* Address of descriptor's data buf */
98 u32 cmd_type_len;
99 u32 olinfo_status;
100 } read;
101 struct {
102 u64 rsvd; /* Reserved */
103 u32 nxtseq_seed;
104 u32 status;
105 } wb;
106};
107
108/* Adv Transmit Descriptor Config Masks */
109#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
110#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
111#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
112#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
113#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
114#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
115#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
116#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
117#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
118
119/* Context descriptors */
120struct e1000_adv_tx_context_desc {
121 u32 vlan_macip_lens;
122 u32 seqnum_seed;
123 u32 type_tucmd_mlhl;
124 u32 mss_l4len_idx;
125};
126
127#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
128#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
129#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
130#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
131#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
132
133enum e1000_mac_type {
134 e1000_undefined = 0,
135 e1000_vfadapt,
136 e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
137};
138
139struct e1000_vf_stats {
140 u64 base_gprc;
141 u64 base_gptc;
142 u64 base_gorc;
143 u64 base_gotc;
144 u64 base_mprc;
145 u64 base_gotlbc;
146 u64 base_gptlbc;
147 u64 base_gorlbc;
148 u64 base_gprlbc;
149
150 u32 last_gprc;
151 u32 last_gptc;
152 u32 last_gorc;
153 u32 last_gotc;
154 u32 last_mprc;
155 u32 last_gotlbc;
156 u32 last_gptlbc;
157 u32 last_gorlbc;
158 u32 last_gprlbc;
159
160 u64 gprc;
161 u64 gptc;
162 u64 gorc;
163 u64 gotc;
164 u64 mprc;
165 u64 gotlbc;
166 u64 gptlbc;
167 u64 gorlbc;
168 u64 gprlbc;
169};
170
171#include "mbx.h"
172
173struct e1000_mac_operations {
174 /* Function pointers for the MAC. */
175 s32 (*init_params)(struct e1000_hw *);
176 s32 (*check_for_link)(struct e1000_hw *);
177 void (*clear_vfta)(struct e1000_hw *);
178 s32 (*get_bus_info)(struct e1000_hw *);
179 s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
180 void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32);
181 s32 (*reset_hw)(struct e1000_hw *);
182 s32 (*init_hw)(struct e1000_hw *);
183 s32 (*setup_link)(struct e1000_hw *);
184 void (*write_vfta)(struct e1000_hw *, u32, u32);
185 void (*mta_set)(struct e1000_hw *, u32);
186 void (*rar_set)(struct e1000_hw *, u8*, u32);
187 s32 (*read_mac_addr)(struct e1000_hw *);
188 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
189};
190
191struct e1000_mac_info {
192 struct e1000_mac_operations ops;
193 u8 addr[6];
194 u8 perm_addr[6];
195
196 enum e1000_mac_type type;
197
198 u16 mta_reg_count;
199 u16 rar_entry_count;
200
201 bool get_link_status;
202};
203
204struct e1000_mbx_operations {
205 s32 (*init_params)(struct e1000_hw *hw);
206 s32 (*read)(struct e1000_hw *, u32 *, u16);
207 s32 (*write)(struct e1000_hw *, u32 *, u16);
208 s32 (*read_posted)(struct e1000_hw *, u32 *, u16);
209 s32 (*write_posted)(struct e1000_hw *, u32 *, u16);
210 s32 (*check_for_msg)(struct e1000_hw *);
211 s32 (*check_for_ack)(struct e1000_hw *);
212 s32 (*check_for_rst)(struct e1000_hw *);
213};
214
215struct e1000_mbx_stats {
216 u32 msgs_tx;
217 u32 msgs_rx;
218
219 u32 acks;
220 u32 reqs;
221 u32 rsts;
222};
223
224struct e1000_mbx_info {
225 struct e1000_mbx_operations ops;
226 struct e1000_mbx_stats stats;
227 u32 timeout;
228 u32 usec_delay;
229 u16 size;
230};
231
232struct e1000_dev_spec_vf {
233 u32 vf_number;
234 u32 v2p_mailbox;
235};
236
237struct e1000_hw {
238 void *back;
239
240 u8 __iomem *hw_addr;
241 u8 __iomem *flash_address;
242 unsigned long io_base;
243
244 struct e1000_mac_info mac;
245 struct e1000_mbx_info mbx;
246
247 union {
248 struct e1000_dev_spec_vf vf;
249 } dev_spec;
250
251 u16 device_id;
252 u16 subsystem_vendor_id;
253 u16 subsystem_device_id;
254 u16 vendor_id;
255
256 u8 revision_id;
257};
258
259/* These functions must be implemented by drivers */
260void e1000_rlpml_set_vf(struct e1000_hw *, u16);
261void e1000_init_function_pointers_vf(struct e1000_hw *hw);
262s32 e1000_init_mac_params_vf(struct e1000_hw *hw);
263
264
265#endif /* _E1000_VF_H_ */