aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgb')
-rw-r--r--drivers/net/ixgb/Makefile35
-rw-r--r--drivers/net/ixgb/ixgb.h200
-rw-r--r--drivers/net/ixgb/ixgb_ee.c774
-rw-r--r--drivers/net/ixgb/ixgb_ee.h106
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c704
-rw-r--r--drivers/net/ixgb/ixgb_hw.c1202
-rw-r--r--drivers/net/ixgb/ixgb_hw.h847
-rw-r--r--drivers/net/ixgb/ixgb_ids.h48
-rw-r--r--drivers/net/ixgb/ixgb_main.c2166
-rw-r--r--drivers/net/ixgb/ixgb_osdep.h96
-rw-r--r--drivers/net/ixgb/ixgb_param.c476
11 files changed, 6654 insertions, 0 deletions
diff --git a/drivers/net/ixgb/Makefile b/drivers/net/ixgb/Makefile
new file mode 100644
index 000000000000..7c7aff1ea7d5
--- /dev/null
+++ b/drivers/net/ixgb/Makefile
@@ -0,0 +1,35 @@
1################################################################################
2#
3#
4# Copyright(c) 1999 - 2002 Intel Corporation. All rights reserved.
5#
6# This program is free software; you can redistribute it and/or modify it
7# under the terms of the GNU General Public License as published by the Free
8# Software Foundation; either version 2 of the License, or (at your option)
9# any later version.
10#
11# This program is distributed in the hope that it will be useful, but WITHOUT
12# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14# more details.
15#
16# You should have received a copy of the GNU General Public License along with
17# this program; if not, write to the Free Software Foundation, Inc., 59
18# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19#
20# The full GNU General Public License is included in this distribution in the
21# file called LICENSE.
22#
23# Contact Information:
24# Linux NICS <linux.nics@intel.com>
25# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26#
27################################################################################
28
29#
30# Makefile for the Intel(R) PRO/10GbE driver
31#
32
33obj-$(CONFIG_IXGB) += ixgb.o
34
35ixgb-objs := ixgb_main.o ixgb_hw.o ixgb_ee.o ixgb_ethtool.o ixgb_param.o
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
new file mode 100644
index 000000000000..26c4f15f7fc0
--- /dev/null
+++ b/drivers/net/ixgb/ixgb.h
@@ -0,0 +1,200 @@
1/*******************************************************************************
2
3
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
9 any later version.
10
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19
20 The full GNU General Public License is included in this distribution in the
21 file called LICENSE.
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#ifndef _IXGB_H_
30#define _IXGB_H_
31
32#include <linux/stddef.h>
33#include <linux/config.h>
34#include <linux/module.h>
35#include <linux/types.h>
36#include <asm/byteorder.h>
37#include <linux/init.h>
38#include <linux/mm.h>
39#include <linux/errno.h>
40#include <linux/ioport.h>
41#include <linux/pci.h>
42#include <linux/kernel.h>
43#include <linux/netdevice.h>
44#include <linux/etherdevice.h>
45#include <linux/skbuff.h>
46#include <linux/delay.h>
47#include <linux/timer.h>
48#include <linux/slab.h>
49#include <linux/vmalloc.h>
50#include <linux/interrupt.h>
51#include <linux/string.h>
52#include <linux/pagemap.h>
53#include <linux/dma-mapping.h>
54#include <linux/bitops.h>
55#include <asm/io.h>
56#include <asm/irq.h>
57#include <linux/capability.h>
58#include <linux/in.h>
59#include <linux/ip.h>
60#include <linux/tcp.h>
61#include <linux/udp.h>
62#include <net/pkt_sched.h>
63#include <linux/list.h>
64#include <linux/reboot.h>
65#ifdef NETIF_F_TSO
66#include <net/checksum.h>
67#endif
68
69#include <linux/ethtool.h>
70#include <linux/if_vlan.h>
71
72#define BAR_0 0
73#define BAR_1 1
74#define BAR_5 5
75
76struct ixgb_adapter;
77#include "ixgb_hw.h"
78#include "ixgb_ee.h"
79#include "ixgb_ids.h"
80
81#ifdef _DEBUG_DRIVER_
82#define IXGB_DBG(args...) printk(KERN_DEBUG "ixgb: " args)
83#else
84#define IXGB_DBG(args...)
85#endif
86
87#define IXGB_ERR(args...) printk(KERN_ERR "ixgb: " args)
88
89/* TX/RX descriptor defines */
90#define DEFAULT_TXD 256
91#define MAX_TXD 4096
92#define MIN_TXD 64
93
94/* hardware cannot reliably support more than 512 descriptors owned by
95 * hardware descrioptor cache otherwise an unreliable ring under heavy
96 * recieve load may result */
97/* #define DEFAULT_RXD 1024 */
98/* #define MAX_RXD 4096 */
99#define DEFAULT_RXD 512
100#define MAX_RXD 512
101#define MIN_RXD 64
102
103/* Supported Rx Buffer Sizes */
104#define IXGB_RXBUFFER_2048 2048
105#define IXGB_RXBUFFER_4096 4096
106#define IXGB_RXBUFFER_8192 8192
107#define IXGB_RXBUFFER_16384 16384
108
109/* How many Tx Descriptors do we need to call netif_wake_queue? */
110#define IXGB_TX_QUEUE_WAKE 16
111
112/* How many Rx Buffers do we bundle into one write to the hardware ? */
113#define IXGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
114
115/* only works for sizes that are powers of 2 */
116#define IXGB_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))
117
118/* wrapper around a pointer to a socket buffer,
119 * so a DMA handle can be stored along with the buffer */
120struct ixgb_buffer {
121 struct sk_buff *skb;
122 uint64_t dma;
123 unsigned long time_stamp;
124 uint16_t length;
125 uint16_t next_to_watch;
126};
127
128struct ixgb_desc_ring {
129 /* pointer to the descriptor ring memory */
130 void *desc;
131 /* physical address of the descriptor ring */
132 dma_addr_t dma;
133 /* length of descriptor ring in bytes */
134 unsigned int size;
135 /* number of descriptors in the ring */
136 unsigned int count;
137 /* next descriptor to associate a buffer with */
138 unsigned int next_to_use;
139 /* next descriptor to check for DD status bit */
140 unsigned int next_to_clean;
141 /* array of buffer information structs */
142 struct ixgb_buffer *buffer_info;
143};
144
145#define IXGB_DESC_UNUSED(R) \
146 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
147 (R)->next_to_clean - (R)->next_to_use - 1)
148
149#define IXGB_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
150#define IXGB_RX_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_rx_desc)
151#define IXGB_TX_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_tx_desc)
152#define IXGB_CONTEXT_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_context_desc)
153
154/* board specific private data structure */
155
156struct ixgb_adapter {
157 struct timer_list watchdog_timer;
158 struct vlan_group *vlgrp;
159 uint32_t bd_number;
160 uint32_t rx_buffer_len;
161 uint32_t part_num;
162 uint16_t link_speed;
163 uint16_t link_duplex;
164 spinlock_t tx_lock;
165 atomic_t irq_sem;
166 struct work_struct tx_timeout_task;
167
168 struct timer_list blink_timer;
169 unsigned long led_status;
170
171 /* TX */
172 struct ixgb_desc_ring tx_ring;
173 unsigned long timeo_start;
174 uint32_t tx_cmd_type;
175 uint64_t hw_csum_tx_good;
176 uint64_t hw_csum_tx_error;
177 uint32_t tx_int_delay;
178 boolean_t tx_int_delay_enable;
179 boolean_t detect_tx_hung;
180
181 /* RX */
182 struct ixgb_desc_ring rx_ring;
183 uint64_t hw_csum_rx_error;
184 uint64_t hw_csum_rx_good;
185 uint32_t rx_int_delay;
186 boolean_t rx_csum;
187
188 /* OS defined structs */
189 struct net_device *netdev;
190 struct pci_dev *pdev;
191 struct net_device_stats net_stats;
192
193 /* structs defined in ixgb_hw.h */
194 struct ixgb_hw hw;
195 struct ixgb_hw_stats stats;
196#ifdef CONFIG_PCI_MSI
197 boolean_t have_msi;
198#endif
199};
200#endif /* _IXGB_H_ */
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
new file mode 100644
index 000000000000..653e99f919ce
--- /dev/null
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -0,0 +1,774 @@
1/*******************************************************************************
2
3
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
9 any later version.
10
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19
20 The full GNU General Public License is included in this distribution in the
21 file called LICENSE.
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "ixgb_hw.h"
30#include "ixgb_ee.h"
31/* Local prototypes */
32static uint16_t ixgb_shift_in_bits(struct ixgb_hw *hw);
33
34static void ixgb_shift_out_bits(struct ixgb_hw *hw,
35 uint16_t data,
36 uint16_t count);
37static void ixgb_standby_eeprom(struct ixgb_hw *hw);
38
39static boolean_t ixgb_wait_eeprom_command(struct ixgb_hw *hw);
40
41static void ixgb_cleanup_eeprom(struct ixgb_hw *hw);
42
43/******************************************************************************
44 * Raises the EEPROM's clock input.
45 *
46 * hw - Struct containing variables accessed by shared code
47 * eecd_reg - EECD's current value
48 *****************************************************************************/
49static void
50ixgb_raise_clock(struct ixgb_hw *hw,
51 uint32_t *eecd_reg)
52{
53 /* Raise the clock input to the EEPROM (by setting the SK bit), and then
54 * wait 50 microseconds.
55 */
56 *eecd_reg = *eecd_reg | IXGB_EECD_SK;
57 IXGB_WRITE_REG(hw, EECD, *eecd_reg);
58 udelay(50);
59 return;
60}
61
62/******************************************************************************
63 * Lowers the EEPROM's clock input.
64 *
65 * hw - Struct containing variables accessed by shared code
66 * eecd_reg - EECD's current value
67 *****************************************************************************/
68static void
69ixgb_lower_clock(struct ixgb_hw *hw,
70 uint32_t *eecd_reg)
71{
72 /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
73 * wait 50 microseconds.
74 */
75 *eecd_reg = *eecd_reg & ~IXGB_EECD_SK;
76 IXGB_WRITE_REG(hw, EECD, *eecd_reg);
77 udelay(50);
78 return;
79}
80
81/******************************************************************************
82 * Shift data bits out to the EEPROM.
83 *
84 * hw - Struct containing variables accessed by shared code
85 * data - data to send to the EEPROM
86 * count - number of bits to shift out
87 *****************************************************************************/
88static void
89ixgb_shift_out_bits(struct ixgb_hw *hw,
90 uint16_t data,
91 uint16_t count)
92{
93 uint32_t eecd_reg;
94 uint32_t mask;
95
96 /* We need to shift "count" bits out to the EEPROM. So, value in the
97 * "data" parameter will be shifted out to the EEPROM one bit at a time.
98 * In order to do this, "data" must be broken down into bits.
99 */
100 mask = 0x01 << (count - 1);
101 eecd_reg = IXGB_READ_REG(hw, EECD);
102 eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
103 do {
104 /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
105 * and then raising and then lowering the clock (the SK bit controls
106 * the clock input to the EEPROM). A "0" is shifted out to the EEPROM
107 * by setting "DI" to "0" and then raising and then lowering the clock.
108 */
109 eecd_reg &= ~IXGB_EECD_DI;
110
111 if(data & mask)
112 eecd_reg |= IXGB_EECD_DI;
113
114 IXGB_WRITE_REG(hw, EECD, eecd_reg);
115
116 udelay(50);
117
118 ixgb_raise_clock(hw, &eecd_reg);
119 ixgb_lower_clock(hw, &eecd_reg);
120
121 mask = mask >> 1;
122
123 } while(mask);
124
125 /* We leave the "DI" bit set to "0" when we leave this routine. */
126 eecd_reg &= ~IXGB_EECD_DI;
127 IXGB_WRITE_REG(hw, EECD, eecd_reg);
128 return;
129}
130
131/******************************************************************************
132 * Shift data bits in from the EEPROM
133 *
134 * hw - Struct containing variables accessed by shared code
135 *****************************************************************************/
136static uint16_t
137ixgb_shift_in_bits(struct ixgb_hw *hw)
138{
139 uint32_t eecd_reg;
140 uint32_t i;
141 uint16_t data;
142
143 /* In order to read a register from the EEPROM, we need to shift 16 bits
144 * in from the EEPROM. Bits are "shifted in" by raising the clock input to
145 * the EEPROM (setting the SK bit), and then reading the value of the "DO"
146 * bit. During this "shifting in" process the "DI" bit should always be
147 * clear..
148 */
149
150 eecd_reg = IXGB_READ_REG(hw, EECD);
151
152 eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
153 data = 0;
154
155 for(i = 0; i < 16; i++) {
156 data = data << 1;
157 ixgb_raise_clock(hw, &eecd_reg);
158
159 eecd_reg = IXGB_READ_REG(hw, EECD);
160
161 eecd_reg &= ~(IXGB_EECD_DI);
162 if(eecd_reg & IXGB_EECD_DO)
163 data |= 1;
164
165 ixgb_lower_clock(hw, &eecd_reg);
166 }
167
168 return data;
169}
170
171/******************************************************************************
172 * Prepares EEPROM for access
173 *
174 * hw - Struct containing variables accessed by shared code
175 *
176 * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
177 * function should be called before issuing a command to the EEPROM.
178 *****************************************************************************/
179static void
180ixgb_setup_eeprom(struct ixgb_hw *hw)
181{
182 uint32_t eecd_reg;
183
184 eecd_reg = IXGB_READ_REG(hw, EECD);
185
186 /* Clear SK and DI */
187 eecd_reg &= ~(IXGB_EECD_SK | IXGB_EECD_DI);
188 IXGB_WRITE_REG(hw, EECD, eecd_reg);
189
190 /* Set CS */
191 eecd_reg |= IXGB_EECD_CS;
192 IXGB_WRITE_REG(hw, EECD, eecd_reg);
193 return;
194}
195
196/******************************************************************************
197 * Returns EEPROM to a "standby" state
198 *
199 * hw - Struct containing variables accessed by shared code
200 *****************************************************************************/
201static void
202ixgb_standby_eeprom(struct ixgb_hw *hw)
203{
204 uint32_t eecd_reg;
205
206 eecd_reg = IXGB_READ_REG(hw, EECD);
207
208 /* Deselct EEPROM */
209 eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK);
210 IXGB_WRITE_REG(hw, EECD, eecd_reg);
211 udelay(50);
212
213 /* Clock high */
214 eecd_reg |= IXGB_EECD_SK;
215 IXGB_WRITE_REG(hw, EECD, eecd_reg);
216 udelay(50);
217
218 /* Select EEPROM */
219 eecd_reg |= IXGB_EECD_CS;
220 IXGB_WRITE_REG(hw, EECD, eecd_reg);
221 udelay(50);
222
223 /* Clock low */
224 eecd_reg &= ~IXGB_EECD_SK;
225 IXGB_WRITE_REG(hw, EECD, eecd_reg);
226 udelay(50);
227 return;
228}
229
230/******************************************************************************
231 * Raises then lowers the EEPROM's clock pin
232 *
233 * hw - Struct containing variables accessed by shared code
234 *****************************************************************************/
235static void
236ixgb_clock_eeprom(struct ixgb_hw *hw)
237{
238 uint32_t eecd_reg;
239
240 eecd_reg = IXGB_READ_REG(hw, EECD);
241
242 /* Rising edge of clock */
243 eecd_reg |= IXGB_EECD_SK;
244 IXGB_WRITE_REG(hw, EECD, eecd_reg);
245 udelay(50);
246
247 /* Falling edge of clock */
248 eecd_reg &= ~IXGB_EECD_SK;
249 IXGB_WRITE_REG(hw, EECD, eecd_reg);
250 udelay(50);
251 return;
252}
253
254/******************************************************************************
255 * Terminates a command by lowering the EEPROM's chip select pin
256 *
257 * hw - Struct containing variables accessed by shared code
258 *****************************************************************************/
259static void
260ixgb_cleanup_eeprom(struct ixgb_hw *hw)
261{
262 uint32_t eecd_reg;
263
264 eecd_reg = IXGB_READ_REG(hw, EECD);
265
266 eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_DI);
267
268 IXGB_WRITE_REG(hw, EECD, eecd_reg);
269
270 ixgb_clock_eeprom(hw);
271 return;
272}
273
274/******************************************************************************
275 * Waits for the EEPROM to finish the current command.
276 *
277 * hw - Struct containing variables accessed by shared code
278 *
279 * The command is done when the EEPROM's data out pin goes high.
280 *
281 * Returns:
282 * TRUE: EEPROM data pin is high before timeout.
283 * FALSE: Time expired.
284 *****************************************************************************/
285static boolean_t
286ixgb_wait_eeprom_command(struct ixgb_hw *hw)
287{
288 uint32_t eecd_reg;
289 uint32_t i;
290
291 /* Toggle the CS line. This in effect tells to EEPROM to actually execute
292 * the command in question.
293 */
294 ixgb_standby_eeprom(hw);
295
296 /* Now read DO repeatedly until is high (equal to '1'). The EEEPROM will
297 * signal that the command has been completed by raising the DO signal.
298 * If DO does not go high in 10 milliseconds, then error out.
299 */
300 for(i = 0; i < 200; i++) {
301 eecd_reg = IXGB_READ_REG(hw, EECD);
302
303 if(eecd_reg & IXGB_EECD_DO)
304 return (TRUE);
305
306 udelay(50);
307 }
308 ASSERT(0);
309 return (FALSE);
310}
311
312/******************************************************************************
313 * Verifies that the EEPROM has a valid checksum
314 *
315 * hw - Struct containing variables accessed by shared code
316 *
317 * Reads the first 64 16 bit words of the EEPROM and sums the values read.
318 * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
319 * valid.
320 *
321 * Returns:
322 * TRUE: Checksum is valid
323 * FALSE: Checksum is not valid.
324 *****************************************************************************/
325boolean_t
326ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
327{
328 uint16_t checksum = 0;
329 uint16_t i;
330
331 for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++)
332 checksum += ixgb_read_eeprom(hw, i);
333
334 if(checksum == (uint16_t) EEPROM_SUM)
335 return (TRUE);
336 else
337 return (FALSE);
338}
339
340/******************************************************************************
341 * Calculates the EEPROM checksum and writes it to the EEPROM
342 *
343 * hw - Struct containing variables accessed by shared code
344 *
345 * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
346 * Writes the difference to word offset 63 of the EEPROM.
347 *****************************************************************************/
348void
349ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
350{
351 uint16_t checksum = 0;
352 uint16_t i;
353
354 for(i = 0; i < EEPROM_CHECKSUM_REG; i++)
355 checksum += ixgb_read_eeprom(hw, i);
356
357 checksum = (uint16_t) EEPROM_SUM - checksum;
358
359 ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum);
360 return;
361}
362
363/******************************************************************************
364 * Writes a 16 bit word to a given offset in the EEPROM.
365 *
366 * hw - Struct containing variables accessed by shared code
367 * reg - offset within the EEPROM to be written to
368 * data - 16 bit word to be writen to the EEPROM
369 *
370 * If ixgb_update_eeprom_checksum is not called after this function, the
371 * EEPROM will most likely contain an invalid checksum.
372 *
373 *****************************************************************************/
374void
375ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data)
376{
377 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
378
379 /* Prepare the EEPROM for writing */
380 ixgb_setup_eeprom(hw);
381
382 /* Send the 9-bit EWEN (write enable) command to the EEPROM (5-bit opcode
383 * plus 4-bit dummy). This puts the EEPROM into write/erase mode.
384 */
385 ixgb_shift_out_bits(hw, EEPROM_EWEN_OPCODE, 5);
386 ixgb_shift_out_bits(hw, 0, 4);
387
388 /* Prepare the EEPROM */
389 ixgb_standby_eeprom(hw);
390
391 /* Send the Write command (3-bit opcode + 6-bit addr) */
392 ixgb_shift_out_bits(hw, EEPROM_WRITE_OPCODE, 3);
393 ixgb_shift_out_bits(hw, offset, 6);
394
395 /* Send the data */
396 ixgb_shift_out_bits(hw, data, 16);
397
398 ixgb_wait_eeprom_command(hw);
399
400 /* Recover from write */
401 ixgb_standby_eeprom(hw);
402
403 /* Send the 9-bit EWDS (write disable) command to the EEPROM (5-bit
404 * opcode plus 4-bit dummy). This takes the EEPROM out of write/erase
405 * mode.
406 */
407 ixgb_shift_out_bits(hw, EEPROM_EWDS_OPCODE, 5);
408 ixgb_shift_out_bits(hw, 0, 4);
409
410 /* Done with writing */
411 ixgb_cleanup_eeprom(hw);
412
413 /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */
414 ee_map->init_ctrl_reg_1 = EEPROM_ICW1_SIGNATURE_CLEAR;
415
416 return;
417}
418
419/******************************************************************************
420 * Reads a 16 bit word from the EEPROM.
421 *
422 * hw - Struct containing variables accessed by shared code
423 * offset - offset of 16 bit word in the EEPROM to read
424 *
425 * Returns:
426 * The 16-bit value read from the eeprom
427 *****************************************************************************/
428uint16_t
429ixgb_read_eeprom(struct ixgb_hw *hw,
430 uint16_t offset)
431{
432 uint16_t data;
433
434 /* Prepare the EEPROM for reading */
435 ixgb_setup_eeprom(hw);
436
437 /* Send the READ command (opcode + addr) */
438 ixgb_shift_out_bits(hw, EEPROM_READ_OPCODE, 3);
439 /*
440 * We have a 64 word EEPROM, there are 6 address bits
441 */
442 ixgb_shift_out_bits(hw, offset, 6);
443
444 /* Read the data */
445 data = ixgb_shift_in_bits(hw);
446
447 /* End this read operation */
448 ixgb_standby_eeprom(hw);
449
450 return (data);
451}
452
453/******************************************************************************
454 * Reads eeprom and stores data in shared structure.
455 * Validates eeprom checksum and eeprom signature.
456 *
457 * hw - Struct containing variables accessed by shared code
458 *
459 * Returns:
460 * TRUE: if eeprom read is successful
461 * FALSE: otherwise.
462 *****************************************************************************/
463boolean_t
464ixgb_get_eeprom_data(struct ixgb_hw *hw)
465{
466 uint16_t i;
467 uint16_t checksum = 0;
468 struct ixgb_ee_map_type *ee_map;
469
470 DEBUGFUNC("ixgb_get_eeprom_data");
471
472 ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
473
474 DEBUGOUT("ixgb_ee: Reading eeprom data\n");
475 for(i = 0; i < IXGB_EEPROM_SIZE ; i++) {
476 uint16_t ee_data;
477 ee_data = ixgb_read_eeprom(hw, i);
478 checksum += ee_data;
479 hw->eeprom[i] = le16_to_cpu(ee_data);
480 }
481
482 if (checksum != (uint16_t) EEPROM_SUM) {
483 DEBUGOUT("ixgb_ee: Checksum invalid.\n");
484 /* clear the init_ctrl_reg_1 to signify that the cache is
485 * invalidated */
486 ee_map->init_ctrl_reg_1 = EEPROM_ICW1_SIGNATURE_CLEAR;
487 return (FALSE);
488 }
489
490 if ((ee_map->init_ctrl_reg_1 & le16_to_cpu(EEPROM_ICW1_SIGNATURE_MASK))
491 != le16_to_cpu(EEPROM_ICW1_SIGNATURE_VALID)) {
492 DEBUGOUT("ixgb_ee: Signature invalid.\n");
493 return(FALSE);
494 }
495
496 return(TRUE);
497}
498
499/******************************************************************************
500 * Local function to check if the eeprom signature is good
501 * If the eeprom signature is good, calls ixgb)get_eeprom_data.
502 *
503 * hw - Struct containing variables accessed by shared code
504 *
505 * Returns:
506 * TRUE: eeprom signature was good and the eeprom read was successful
507 * FALSE: otherwise.
508 ******************************************************************************/
509static boolean_t
510ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw)
511{
512 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
513
514 if ((ee_map->init_ctrl_reg_1 & le16_to_cpu(EEPROM_ICW1_SIGNATURE_MASK))
515 == le16_to_cpu(EEPROM_ICW1_SIGNATURE_VALID)) {
516 return (TRUE);
517 } else {
518 return ixgb_get_eeprom_data(hw);
519 }
520}
521
522/******************************************************************************
523 * return a word from the eeprom
524 *
525 * hw - Struct containing variables accessed by shared code
526 * index - Offset of eeprom word
527 *
528 * Returns:
529 * Word at indexed offset in eeprom, if valid, 0 otherwise.
530 ******************************************************************************/
531uint16_t
532ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index)
533{
534
535 if ((index < IXGB_EEPROM_SIZE) &&
536 (ixgb_check_and_get_eeprom_data(hw) == TRUE)) {
537 return(hw->eeprom[index]);
538 }
539
540 return(0);
541}
542
543/******************************************************************************
544 * return the mac address from EEPROM
545 *
546 * hw - Struct containing variables accessed by shared code
547 * mac_addr - Ethernet Address if EEPROM contents are valid, 0 otherwise
548 *
549 * Returns: None.
550 ******************************************************************************/
551void
552ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
553 uint8_t *mac_addr)
554{
555 int i;
556 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
557
558 DEBUGFUNC("ixgb_get_ee_mac_addr");
559
560 if (ixgb_check_and_get_eeprom_data(hw) == TRUE) {
561 for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) {
562 mac_addr[i] = ee_map->mac_addr[i];
563 DEBUGOUT2("mac(%d) = %.2X\n", i, mac_addr[i]);
564 }
565 }
566}
567
568/******************************************************************************
569 * return the compatibility flags from EEPROM
570 *
571 * hw - Struct containing variables accessed by shared code
572 *
573 * Returns:
574 * compatibility flags if EEPROM contents are valid, 0 otherwise
575 ******************************************************************************/
576uint16_t
577ixgb_get_ee_compatibility(struct ixgb_hw *hw)
578{
579 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
580
581 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
582 return(ee_map->compatibility);
583
584 return(0);
585}
586
587/******************************************************************************
588 * return the Printed Board Assembly number from EEPROM
589 *
590 * hw - Struct containing variables accessed by shared code
591 *
592 * Returns:
593 * PBA number if EEPROM contents are valid, 0 otherwise
594 ******************************************************************************/
595uint32_t
596ixgb_get_ee_pba_number(struct ixgb_hw *hw)
597{
598 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
599 return (le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG])
600 | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16));
601
602 return(0);
603}
604
605/******************************************************************************
606 * return the Initialization Control Word 1 from EEPROM
607 *
608 * hw - Struct containing variables accessed by shared code
609 *
610 * Returns:
611 * Initialization Control Word 1 if EEPROM contents are valid, 0 otherwise
612 ******************************************************************************/
613uint16_t
614ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw)
615{
616 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
617
618 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
619 return(ee_map->init_ctrl_reg_1);
620
621 return(0);
622}
623
624/******************************************************************************
625 * return the Initialization Control Word 2 from EEPROM
626 *
627 * hw - Struct containing variables accessed by shared code
628 *
629 * Returns:
630 * Initialization Control Word 2 if EEPROM contents are valid, 0 otherwise
631 ******************************************************************************/
632uint16_t
633ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw *hw)
634{
635 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
636
637 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
638 return(ee_map->init_ctrl_reg_2);
639
640 return(0);
641}
642
643/******************************************************************************
644 * return the Subsystem Id from EEPROM
645 *
646 * hw - Struct containing variables accessed by shared code
647 *
648 * Returns:
649 * Subsystem Id if EEPROM contents are valid, 0 otherwise
650 ******************************************************************************/
651uint16_t
652ixgb_get_ee_subsystem_id(struct ixgb_hw *hw)
653{
654 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
655
656 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
657 return(ee_map->subsystem_id);
658
659 return(0);
660}
661
662/******************************************************************************
663 * return the Sub Vendor Id from EEPROM
664 *
665 * hw - Struct containing variables accessed by shared code
666 *
667 * Returns:
668 * Sub Vendor Id if EEPROM contents are valid, 0 otherwise
669 ******************************************************************************/
670uint16_t
671ixgb_get_ee_subvendor_id(struct ixgb_hw *hw)
672{
673 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
674
675 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
676 return(ee_map->subvendor_id);
677
678 return(0);
679}
680
681/******************************************************************************
682 * return the Device Id from EEPROM
683 *
684 * hw - Struct containing variables accessed by shared code
685 *
686 * Returns:
687 * Device Id if EEPROM contents are valid, 0 otherwise
688 ******************************************************************************/
689uint16_t
690ixgb_get_ee_device_id(struct ixgb_hw *hw)
691{
692 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
693
694 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
695 return(ee_map->device_id);
696
697 return(0);
698}
699
700/******************************************************************************
701 * return the Vendor Id from EEPROM
702 *
703 * hw - Struct containing variables accessed by shared code
704 *
705 * Returns:
706 * Device Id if EEPROM contents are valid, 0 otherwise
707 ******************************************************************************/
708uint16_t
709ixgb_get_ee_vendor_id(struct ixgb_hw *hw)
710{
711 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
712
713 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
714 return(ee_map->vendor_id);
715
716 return(0);
717}
718
719/******************************************************************************
720 * return the Software Defined Pins Register from EEPROM
721 *
722 * hw - Struct containing variables accessed by shared code
723 *
724 * Returns:
725 * SDP Register if EEPROM contents are valid, 0 otherwise
726 ******************************************************************************/
727uint16_t
728ixgb_get_ee_swdpins_reg(struct ixgb_hw *hw)
729{
730 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
731
732 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
733 return(ee_map->swdpins_reg);
734
735 return(0);
736}
737
738/******************************************************************************
739 * return the D3 Power Management Bits from EEPROM
740 *
741 * hw - Struct containing variables accessed by shared code
742 *
743 * Returns:
744 * D3 Power Management Bits if EEPROM contents are valid, 0 otherwise
745 ******************************************************************************/
746uint8_t
747ixgb_get_ee_d3_power(struct ixgb_hw *hw)
748{
749 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
750
751 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
752 return(ee_map->d3_power);
753
754 return(0);
755}
756
757/******************************************************************************
758 * return the D0 Power Management Bits from EEPROM
759 *
760 * hw - Struct containing variables accessed by shared code
761 *
762 * Returns:
763 * D0 Power Management Bits if EEPROM contents are valid, 0 otherwise
764 ******************************************************************************/
765uint8_t
766ixgb_get_ee_d0_power(struct ixgb_hw *hw)
767{
768 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
769
770 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
771 return(ee_map->d0_power);
772
773 return(0);
774}
diff --git a/drivers/net/ixgb/ixgb_ee.h b/drivers/net/ixgb/ixgb_ee.h
new file mode 100644
index 000000000000..5190aa8761a2
--- /dev/null
+++ b/drivers/net/ixgb/ixgb_ee.h
@@ -0,0 +1,106 @@
1/*******************************************************************************
2
3
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
9 any later version.
10
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19
20 The full GNU General Public License is included in this distribution in the
21 file called LICENSE.
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#ifndef _IXGB_EE_H_
30#define _IXGB_EE_H_
31
32#define IXGB_EEPROM_SIZE 64 /* Size in words */
33
34#define IXGB_ETH_LENGTH_OF_ADDRESS 6
35
36/* EEPROM Commands */
37#define EEPROM_READ_OPCODE 0x6 /* EERPOM read opcode */
38#define EEPROM_WRITE_OPCODE 0x5 /* EERPOM write opcode */
39#define EEPROM_ERASE_OPCODE 0x7 /* EERPOM erase opcode */
40#define EEPROM_EWEN_OPCODE 0x13 /* EERPOM erase/write enable */
41#define EEPROM_EWDS_OPCODE 0x10 /* EERPOM erast/write disable */
42
43/* EEPROM MAP (Word Offsets) */
44#define EEPROM_IA_1_2_REG 0x0000
45#define EEPROM_IA_3_4_REG 0x0001
46#define EEPROM_IA_5_6_REG 0x0002
47#define EEPROM_COMPATIBILITY_REG 0x0003
48#define EEPROM_PBA_1_2_REG 0x0008
49#define EEPROM_PBA_3_4_REG 0x0009
50#define EEPROM_INIT_CONTROL1_REG 0x000A
51#define EEPROM_SUBSYS_ID_REG 0x000B
52#define EEPROM_SUBVEND_ID_REG 0x000C
53#define EEPROM_DEVICE_ID_REG 0x000D
54#define EEPROM_VENDOR_ID_REG 0x000E
55#define EEPROM_INIT_CONTROL2_REG 0x000F
56#define EEPROM_SWDPINS_REG 0x0020
57#define EEPROM_CIRCUIT_CTRL_REG 0x0021
58#define EEPROM_D0_D3_POWER_REG 0x0022
59#define EEPROM_FLASH_VERSION 0x0032
60#define EEPROM_CHECKSUM_REG 0x003F
61
62/* Mask bits for fields in Word 0x0a of the EEPROM */
63
64#define EEPROM_ICW1_SIGNATURE_MASK 0xC000
65#define EEPROM_ICW1_SIGNATURE_VALID 0x4000
66#define EEPROM_ICW1_SIGNATURE_CLEAR 0x0000
67
68/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */
69#define EEPROM_SUM 0xBABA
70
71/* EEPROM Map Sizes (Byte Counts) */
72#define PBA_SIZE 4
73
74/* EEPROM Map defines (WORD OFFSETS)*/
75
76/* EEPROM structure */
77struct ixgb_ee_map_type {
78 uint8_t mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS];
79 uint16_t compatibility;
80 uint16_t reserved1[4];
81 uint32_t pba_number;
82 uint16_t init_ctrl_reg_1;
83 uint16_t subsystem_id;
84 uint16_t subvendor_id;
85 uint16_t device_id;
86 uint16_t vendor_id;
87 uint16_t init_ctrl_reg_2;
88 uint16_t oem_reserved[16];
89 uint16_t swdpins_reg;
90 uint16_t circuit_ctrl_reg;
91 uint8_t d3_power;
92 uint8_t d0_power;
93 uint16_t reserved2[28];
94 uint16_t checksum;
95};
96
97/* EEPROM Functions */
98uint16_t ixgb_read_eeprom(struct ixgb_hw *hw, uint16_t reg);
99
100boolean_t ixgb_validate_eeprom_checksum(struct ixgb_hw *hw);
101
102void ixgb_update_eeprom_checksum(struct ixgb_hw *hw);
103
104void ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t reg, uint16_t data);
105
106#endif /* IXGB_EE_H */
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
new file mode 100644
index 000000000000..aea10e8aaa72
--- /dev/null
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -0,0 +1,704 @@
1/*******************************************************************************
2
3
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
9 any later version.
10
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19
20 The full GNU General Public License is included in this distribution in the
21 file called LICENSE.
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/* ethtool support for ixgb */
30
31#include "ixgb.h"
32
33#include <asm/uaccess.h>
34
35extern char ixgb_driver_name[];
36extern char ixgb_driver_version[];
37
38extern int ixgb_up(struct ixgb_adapter *adapter);
39extern void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
40extern void ixgb_reset(struct ixgb_adapter *adapter);
41extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
42extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
43extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
44extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
45extern void ixgb_update_stats(struct ixgb_adapter *adapter);
46
47struct ixgb_stats {
48 char stat_string[ETH_GSTRING_LEN];
49 int sizeof_stat;
50 int stat_offset;
51};
52
53#define IXGB_STAT(m) sizeof(((struct ixgb_adapter *)0)->m), \
54 offsetof(struct ixgb_adapter, m)
55static struct ixgb_stats ixgb_gstrings_stats[] = {
56 {"rx_packets", IXGB_STAT(net_stats.rx_packets)},
57 {"tx_packets", IXGB_STAT(net_stats.tx_packets)},
58 {"rx_bytes", IXGB_STAT(net_stats.rx_bytes)},
59 {"tx_bytes", IXGB_STAT(net_stats.tx_bytes)},
60 {"rx_errors", IXGB_STAT(net_stats.rx_errors)},
61 {"tx_errors", IXGB_STAT(net_stats.tx_errors)},
62 {"rx_dropped", IXGB_STAT(net_stats.rx_dropped)},
63 {"tx_dropped", IXGB_STAT(net_stats.tx_dropped)},
64 {"multicast", IXGB_STAT(net_stats.multicast)},
65 {"collisions", IXGB_STAT(net_stats.collisions)},
66
67/* { "rx_length_errors", IXGB_STAT(net_stats.rx_length_errors) }, */
68 {"rx_over_errors", IXGB_STAT(net_stats.rx_over_errors)},
69 {"rx_crc_errors", IXGB_STAT(net_stats.rx_crc_errors)},
70 {"rx_frame_errors", IXGB_STAT(net_stats.rx_frame_errors)},
71 {"rx_fifo_errors", IXGB_STAT(net_stats.rx_fifo_errors)},
72 {"rx_missed_errors", IXGB_STAT(net_stats.rx_missed_errors)},
73 {"tx_aborted_errors", IXGB_STAT(net_stats.tx_aborted_errors)},
74 {"tx_carrier_errors", IXGB_STAT(net_stats.tx_carrier_errors)},
75 {"tx_fifo_errors", IXGB_STAT(net_stats.tx_fifo_errors)},
76 {"tx_heartbeat_errors", IXGB_STAT(net_stats.tx_heartbeat_errors)},
77 {"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)},
78 {"tx_deferred_ok", IXGB_STAT(stats.dc)},
79 {"rx_long_length_errors", IXGB_STAT(stats.roc)},
80 {"rx_short_length_errors", IXGB_STAT(stats.ruc)},
81#ifdef NETIF_F_TSO
82 {"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)},
83 {"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)},
84#endif
85 {"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)},
86 {"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)},
87 {"tx_flow_control_xon", IXGB_STAT(stats.xontxc)},
88 {"tx_flow_control_xoff", IXGB_STAT(stats.xofftxc)},
89 {"rx_csum_offload_good", IXGB_STAT(hw_csum_rx_good)},
90 {"rx_csum_offload_errors", IXGB_STAT(hw_csum_rx_error)},
91 {"tx_csum_offload_good", IXGB_STAT(hw_csum_tx_good)},
92 {"tx_csum_offload_errors", IXGB_STAT(hw_csum_tx_error)}
93};
94
95#define IXGB_STATS_LEN \
96 sizeof(ixgb_gstrings_stats) / sizeof(struct ixgb_stats)
97
98static int
99ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
100{
101 struct ixgb_adapter *adapter = netdev->priv;
102
103 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
104 ecmd->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
105 ecmd->port = PORT_FIBRE;
106 ecmd->transceiver = XCVR_EXTERNAL;
107
108 if(netif_carrier_ok(adapter->netdev)) {
109 ecmd->speed = SPEED_10000;
110 ecmd->duplex = DUPLEX_FULL;
111 } else {
112 ecmd->speed = -1;
113 ecmd->duplex = -1;
114 }
115
116 ecmd->autoneg = AUTONEG_DISABLE;
117 return 0;
118}
119
120static int
121ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
122{
123 struct ixgb_adapter *adapter = netdev->priv;
124
125 if(ecmd->autoneg == AUTONEG_ENABLE ||
126 ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)
127 return -EINVAL;
128
129 if(netif_running(adapter->netdev)) {
130 ixgb_down(adapter, TRUE);
131 ixgb_reset(adapter);
132 ixgb_up(adapter);
133 } else
134 ixgb_reset(adapter);
135
136 return 0;
137}
138
139static void
140ixgb_get_pauseparam(struct net_device *netdev,
141 struct ethtool_pauseparam *pause)
142{
143 struct ixgb_adapter *adapter = netdev->priv;
144 struct ixgb_hw *hw = &adapter->hw;
145
146 pause->autoneg = AUTONEG_DISABLE;
147
148 if(hw->fc.type == ixgb_fc_rx_pause)
149 pause->rx_pause = 1;
150 else if(hw->fc.type == ixgb_fc_tx_pause)
151 pause->tx_pause = 1;
152 else if(hw->fc.type == ixgb_fc_full) {
153 pause->rx_pause = 1;
154 pause->tx_pause = 1;
155 }
156}
157
158static int
159ixgb_set_pauseparam(struct net_device *netdev,
160 struct ethtool_pauseparam *pause)
161{
162 struct ixgb_adapter *adapter = netdev->priv;
163 struct ixgb_hw *hw = &adapter->hw;
164
165 if(pause->autoneg == AUTONEG_ENABLE)
166 return -EINVAL;
167
168 if(pause->rx_pause && pause->tx_pause)
169 hw->fc.type = ixgb_fc_full;
170 else if(pause->rx_pause && !pause->tx_pause)
171 hw->fc.type = ixgb_fc_rx_pause;
172 else if(!pause->rx_pause && pause->tx_pause)
173 hw->fc.type = ixgb_fc_tx_pause;
174 else if(!pause->rx_pause && !pause->tx_pause)
175 hw->fc.type = ixgb_fc_none;
176
177 if(netif_running(adapter->netdev)) {
178 ixgb_down(adapter, TRUE);
179 ixgb_up(adapter);
180 } else
181 ixgb_reset(adapter);
182
183 return 0;
184}
185
186static uint32_t
187ixgb_get_rx_csum(struct net_device *netdev)
188{
189 struct ixgb_adapter *adapter = netdev->priv;
190 return adapter->rx_csum;
191}
192
193static int
194ixgb_set_rx_csum(struct net_device *netdev, uint32_t data)
195{
196 struct ixgb_adapter *adapter = netdev->priv;
197 adapter->rx_csum = data;
198
199 if(netif_running(netdev)) {
200 ixgb_down(adapter,TRUE);
201 ixgb_up(adapter);
202 } else
203 ixgb_reset(adapter);
204 return 0;
205}
206
207static uint32_t
208ixgb_get_tx_csum(struct net_device *netdev)
209{
210 return (netdev->features & NETIF_F_HW_CSUM) != 0;
211}
212
213static int
214ixgb_set_tx_csum(struct net_device *netdev, uint32_t data)
215{
216 if (data)
217 netdev->features |= NETIF_F_HW_CSUM;
218 else
219 netdev->features &= ~NETIF_F_HW_CSUM;
220
221 return 0;
222}
223
224#ifdef NETIF_F_TSO
225static int
226ixgb_set_tso(struct net_device *netdev, uint32_t data)
227{
228 if(data)
229 netdev->features |= NETIF_F_TSO;
230 else
231 netdev->features &= ~NETIF_F_TSO;
232 return 0;
233}
234#endif /* NETIF_F_TSO */
235
236#define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_
237
238static int
239ixgb_get_regs_len(struct net_device *netdev)
240{
241#define IXGB_REG_DUMP_LEN 136*sizeof(uint32_t)
242 return IXGB_REG_DUMP_LEN;
243}
244
245static void
246ixgb_get_regs(struct net_device *netdev,
247 struct ethtool_regs *regs, void *p)
248{
249 struct ixgb_adapter *adapter = netdev->priv;
250 struct ixgb_hw *hw = &adapter->hw;
251 uint32_t *reg = p;
252 uint32_t *reg_start = reg;
253 uint8_t i;
254
255 regs->version = (adapter->hw.device_id << 16) | adapter->hw.subsystem_id;
256
257 /* General Registers */
258 *reg++ = IXGB_READ_REG(hw, CTRL0); /* 0 */
259 *reg++ = IXGB_READ_REG(hw, CTRL1); /* 1 */
260 *reg++ = IXGB_READ_REG(hw, STATUS); /* 2 */
261 *reg++ = IXGB_READ_REG(hw, EECD); /* 3 */
262 *reg++ = IXGB_READ_REG(hw, MFS); /* 4 */
263
264 /* Interrupt */
265 *reg++ = IXGB_READ_REG(hw, ICR); /* 5 */
266 *reg++ = IXGB_READ_REG(hw, ICS); /* 6 */
267 *reg++ = IXGB_READ_REG(hw, IMS); /* 7 */
268 *reg++ = IXGB_READ_REG(hw, IMC); /* 8 */
269
270 /* Receive */
271 *reg++ = IXGB_READ_REG(hw, RCTL); /* 9 */
272 *reg++ = IXGB_READ_REG(hw, FCRTL); /* 10 */
273 *reg++ = IXGB_READ_REG(hw, FCRTH); /* 11 */
274 *reg++ = IXGB_READ_REG(hw, RDBAL); /* 12 */
275 *reg++ = IXGB_READ_REG(hw, RDBAH); /* 13 */
276 *reg++ = IXGB_READ_REG(hw, RDLEN); /* 14 */
277 *reg++ = IXGB_READ_REG(hw, RDH); /* 15 */
278 *reg++ = IXGB_READ_REG(hw, RDT); /* 16 */
279 *reg++ = IXGB_READ_REG(hw, RDTR); /* 17 */
280 *reg++ = IXGB_READ_REG(hw, RXDCTL); /* 18 */
281 *reg++ = IXGB_READ_REG(hw, RAIDC); /* 19 */
282 *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */
283
284 for (i = 0; i < IXGB_RAR_ENTRIES; i++) {
285 *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
286 *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
287 }
288
289 /* Transmit */
290 *reg++ = IXGB_READ_REG(hw, TCTL); /* 53 */
291 *reg++ = IXGB_READ_REG(hw, TDBAL); /* 54 */
292 *reg++ = IXGB_READ_REG(hw, TDBAH); /* 55 */
293 *reg++ = IXGB_READ_REG(hw, TDLEN); /* 56 */
294 *reg++ = IXGB_READ_REG(hw, TDH); /* 57 */
295 *reg++ = IXGB_READ_REG(hw, TDT); /* 58 */
296 *reg++ = IXGB_READ_REG(hw, TIDV); /* 59 */
297 *reg++ = IXGB_READ_REG(hw, TXDCTL); /* 60 */
298 *reg++ = IXGB_READ_REG(hw, TSPMT); /* 61 */
299 *reg++ = IXGB_READ_REG(hw, PAP); /* 62 */
300
301 /* Physical */
302 *reg++ = IXGB_READ_REG(hw, PCSC1); /* 63 */
303 *reg++ = IXGB_READ_REG(hw, PCSC2); /* 64 */
304 *reg++ = IXGB_READ_REG(hw, PCSS1); /* 65 */
305 *reg++ = IXGB_READ_REG(hw, PCSS2); /* 66 */
306 *reg++ = IXGB_READ_REG(hw, XPCSS); /* 67 */
307 *reg++ = IXGB_READ_REG(hw, UCCR); /* 68 */
308 *reg++ = IXGB_READ_REG(hw, XPCSTC); /* 69 */
309 *reg++ = IXGB_READ_REG(hw, MACA); /* 70 */
310 *reg++ = IXGB_READ_REG(hw, APAE); /* 71 */
311 *reg++ = IXGB_READ_REG(hw, ARD); /* 72 */
312 *reg++ = IXGB_READ_REG(hw, AIS); /* 73 */
313 *reg++ = IXGB_READ_REG(hw, MSCA); /* 74 */
314 *reg++ = IXGB_READ_REG(hw, MSRWD); /* 75 */
315
316 /* Statistics */
317 *reg++ = IXGB_GET_STAT(adapter, tprl); /* 76 */
318 *reg++ = IXGB_GET_STAT(adapter, tprh); /* 77 */
319 *reg++ = IXGB_GET_STAT(adapter, gprcl); /* 78 */
320 *reg++ = IXGB_GET_STAT(adapter, gprch); /* 79 */
321 *reg++ = IXGB_GET_STAT(adapter, bprcl); /* 80 */
322 *reg++ = IXGB_GET_STAT(adapter, bprch); /* 81 */
323 *reg++ = IXGB_GET_STAT(adapter, mprcl); /* 82 */
324 *reg++ = IXGB_GET_STAT(adapter, mprch); /* 83 */
325 *reg++ = IXGB_GET_STAT(adapter, uprcl); /* 84 */
326 *reg++ = IXGB_GET_STAT(adapter, uprch); /* 85 */
327 *reg++ = IXGB_GET_STAT(adapter, vprcl); /* 86 */
328 *reg++ = IXGB_GET_STAT(adapter, vprch); /* 87 */
329 *reg++ = IXGB_GET_STAT(adapter, jprcl); /* 88 */
330 *reg++ = IXGB_GET_STAT(adapter, jprch); /* 89 */
331 *reg++ = IXGB_GET_STAT(adapter, gorcl); /* 90 */
332 *reg++ = IXGB_GET_STAT(adapter, gorch); /* 91 */
333 *reg++ = IXGB_GET_STAT(adapter, torl); /* 92 */
334 *reg++ = IXGB_GET_STAT(adapter, torh); /* 93 */
335 *reg++ = IXGB_GET_STAT(adapter, rnbc); /* 94 */
336 *reg++ = IXGB_GET_STAT(adapter, ruc); /* 95 */
337 *reg++ = IXGB_GET_STAT(adapter, roc); /* 96 */
338 *reg++ = IXGB_GET_STAT(adapter, rlec); /* 97 */
339 *reg++ = IXGB_GET_STAT(adapter, crcerrs); /* 98 */
340 *reg++ = IXGB_GET_STAT(adapter, icbc); /* 99 */
341 *reg++ = IXGB_GET_STAT(adapter, ecbc); /* 100 */
342 *reg++ = IXGB_GET_STAT(adapter, mpc); /* 101 */
343 *reg++ = IXGB_GET_STAT(adapter, tptl); /* 102 */
344 *reg++ = IXGB_GET_STAT(adapter, tpth); /* 103 */
345 *reg++ = IXGB_GET_STAT(adapter, gptcl); /* 104 */
346 *reg++ = IXGB_GET_STAT(adapter, gptch); /* 105 */
347 *reg++ = IXGB_GET_STAT(adapter, bptcl); /* 106 */
348 *reg++ = IXGB_GET_STAT(adapter, bptch); /* 107 */
349 *reg++ = IXGB_GET_STAT(adapter, mptcl); /* 108 */
350 *reg++ = IXGB_GET_STAT(adapter, mptch); /* 109 */
351 *reg++ = IXGB_GET_STAT(adapter, uptcl); /* 110 */
352 *reg++ = IXGB_GET_STAT(adapter, uptch); /* 111 */
353 *reg++ = IXGB_GET_STAT(adapter, vptcl); /* 112 */
354 *reg++ = IXGB_GET_STAT(adapter, vptch); /* 113 */
355 *reg++ = IXGB_GET_STAT(adapter, jptcl); /* 114 */
356 *reg++ = IXGB_GET_STAT(adapter, jptch); /* 115 */
357 *reg++ = IXGB_GET_STAT(adapter, gotcl); /* 116 */
358 *reg++ = IXGB_GET_STAT(adapter, gotch); /* 117 */
359 *reg++ = IXGB_GET_STAT(adapter, totl); /* 118 */
360 *reg++ = IXGB_GET_STAT(adapter, toth); /* 119 */
361 *reg++ = IXGB_GET_STAT(adapter, dc); /* 120 */
362 *reg++ = IXGB_GET_STAT(adapter, plt64c); /* 121 */
363 *reg++ = IXGB_GET_STAT(adapter, tsctc); /* 122 */
364 *reg++ = IXGB_GET_STAT(adapter, tsctfc); /* 123 */
365 *reg++ = IXGB_GET_STAT(adapter, ibic); /* 124 */
366 *reg++ = IXGB_GET_STAT(adapter, rfc); /* 125 */
367 *reg++ = IXGB_GET_STAT(adapter, lfc); /* 126 */
368 *reg++ = IXGB_GET_STAT(adapter, pfrc); /* 127 */
369 *reg++ = IXGB_GET_STAT(adapter, pftc); /* 128 */
370 *reg++ = IXGB_GET_STAT(adapter, mcfrc); /* 129 */
371 *reg++ = IXGB_GET_STAT(adapter, mcftc); /* 130 */
372 *reg++ = IXGB_GET_STAT(adapter, xonrxc); /* 131 */
373 *reg++ = IXGB_GET_STAT(adapter, xontxc); /* 132 */
374 *reg++ = IXGB_GET_STAT(adapter, xoffrxc); /* 133 */
375 *reg++ = IXGB_GET_STAT(adapter, xofftxc); /* 134 */
376 *reg++ = IXGB_GET_STAT(adapter, rjc); /* 135 */
377
378 regs->len = (reg - reg_start) * sizeof(uint32_t);
379}
380
381static int
382ixgb_get_eeprom_len(struct net_device *netdev)
383{
384 /* return size in bytes */
385 return (IXGB_EEPROM_SIZE << 1);
386}
387
388static int
389ixgb_get_eeprom(struct net_device *netdev,
390 struct ethtool_eeprom *eeprom, uint8_t *bytes)
391{
392 struct ixgb_adapter *adapter = netdev->priv;
393 struct ixgb_hw *hw = &adapter->hw;
394 uint16_t *eeprom_buff;
395 int i, max_len, first_word, last_word;
396 int ret_val = 0;
397
398 if(eeprom->len == 0) {
399 ret_val = -EINVAL;
400 goto geeprom_error;
401 }
402
403 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
404
405 max_len = ixgb_get_eeprom_len(netdev);
406
407 if(eeprom->offset > eeprom->offset + eeprom->len) {
408 ret_val = -EINVAL;
409 goto geeprom_error;
410 }
411
412 if((eeprom->offset + eeprom->len) > max_len)
413 eeprom->len = (max_len - eeprom->offset);
414
415 first_word = eeprom->offset >> 1;
416 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
417
418 eeprom_buff = kmalloc(sizeof(uint16_t) *
419 (last_word - first_word + 1), GFP_KERNEL);
420 if(!eeprom_buff)
421 return -ENOMEM;
422
423 /* note the eeprom was good because the driver loaded */
424 for(i = 0; i <= (last_word - first_word); i++) {
425 eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i));
426 }
427
428 memcpy(bytes, (uint8_t *)eeprom_buff + (eeprom->offset & 1),
429 eeprom->len);
430 kfree(eeprom_buff);
431
432geeprom_error:
433 return ret_val;
434}
435
436static int
437ixgb_set_eeprom(struct net_device *netdev,
438 struct ethtool_eeprom *eeprom, uint8_t *bytes)
439{
440 struct ixgb_adapter *adapter = netdev->priv;
441 struct ixgb_hw *hw = &adapter->hw;
442 uint16_t *eeprom_buff;
443 void *ptr;
444 int max_len, first_word, last_word;
445 uint16_t i;
446
447 if(eeprom->len == 0)
448 return -EINVAL;
449
450 if(eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
451 return -EFAULT;
452
453 max_len = ixgb_get_eeprom_len(netdev);
454
455 if(eeprom->offset > eeprom->offset + eeprom->len)
456 return -EINVAL;
457
458 if((eeprom->offset + eeprom->len) > max_len)
459 eeprom->len = (max_len - eeprom->offset);
460
461 first_word = eeprom->offset >> 1;
462 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
463 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
464 if(!eeprom_buff)
465 return -ENOMEM;
466
467 ptr = (void *)eeprom_buff;
468
469 if(eeprom->offset & 1) {
470 /* need read/modify/write of first changed EEPROM word */
471 /* only the second byte of the word is being modified */
472 eeprom_buff[0] = ixgb_read_eeprom(hw, first_word);
473 ptr++;
474 }
475 if((eeprom->offset + eeprom->len) & 1) {
476 /* need read/modify/write of last changed EEPROM word */
477 /* only the first byte of the word is being modified */
478 eeprom_buff[last_word - first_word]
479 = ixgb_read_eeprom(hw, last_word);
480 }
481
482 memcpy(ptr, bytes, eeprom->len);
483 for(i = 0; i <= (last_word - first_word); i++)
484 ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]);
485
486 /* Update the checksum over the first part of the EEPROM if needed */
487 if(first_word <= EEPROM_CHECKSUM_REG)
488 ixgb_update_eeprom_checksum(hw);
489
490 kfree(eeprom_buff);
491 return 0;
492}
493
494static void
495ixgb_get_drvinfo(struct net_device *netdev,
496 struct ethtool_drvinfo *drvinfo)
497{
498 struct ixgb_adapter *adapter = netdev->priv;
499
500 strncpy(drvinfo->driver, ixgb_driver_name, 32);
501 strncpy(drvinfo->version, ixgb_driver_version, 32);
502 strncpy(drvinfo->fw_version, "N/A", 32);
503 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
504 drvinfo->n_stats = IXGB_STATS_LEN;
505 drvinfo->regdump_len = ixgb_get_regs_len(netdev);
506 drvinfo->eedump_len = ixgb_get_eeprom_len(netdev);
507}
508
509static void
510ixgb_get_ringparam(struct net_device *netdev,
511 struct ethtool_ringparam *ring)
512{
513 struct ixgb_adapter *adapter = netdev->priv;
514 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
515 struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
516
517 ring->rx_max_pending = MAX_RXD;
518 ring->tx_max_pending = MAX_TXD;
519 ring->rx_mini_max_pending = 0;
520 ring->rx_jumbo_max_pending = 0;
521 ring->rx_pending = rxdr->count;
522 ring->tx_pending = txdr->count;
523 ring->rx_mini_pending = 0;
524 ring->rx_jumbo_pending = 0;
525}
526
527static int
528ixgb_set_ringparam(struct net_device *netdev,
529 struct ethtool_ringparam *ring)
530{
531 struct ixgb_adapter *adapter = netdev->priv;
532 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
533 struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
534 struct ixgb_desc_ring tx_old, tx_new, rx_old, rx_new;
535 int err;
536
537 tx_old = adapter->tx_ring;
538 rx_old = adapter->rx_ring;
539
540 if((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
541 return -EINVAL;
542
543 if(netif_running(adapter->netdev))
544 ixgb_down(adapter,TRUE);
545
546 rxdr->count = max(ring->rx_pending,(uint32_t)MIN_RXD);
547 rxdr->count = min(rxdr->count,(uint32_t)MAX_RXD);
548 IXGB_ROUNDUP(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
549
550 txdr->count = max(ring->tx_pending,(uint32_t)MIN_TXD);
551 txdr->count = min(txdr->count,(uint32_t)MAX_TXD);
552 IXGB_ROUNDUP(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
553
554 if(netif_running(adapter->netdev)) {
555 /* Try to get new resources before deleting old */
556 if((err = ixgb_setup_rx_resources(adapter)))
557 goto err_setup_rx;
558 if((err = ixgb_setup_tx_resources(adapter)))
559 goto err_setup_tx;
560
561 /* save the new, restore the old in order to free it,
562 * then restore the new back again */
563
564 rx_new = adapter->rx_ring;
565 tx_new = adapter->tx_ring;
566 adapter->rx_ring = rx_old;
567 adapter->tx_ring = tx_old;
568 ixgb_free_rx_resources(adapter);
569 ixgb_free_tx_resources(adapter);
570 adapter->rx_ring = rx_new;
571 adapter->tx_ring = tx_new;
572 if((err = ixgb_up(adapter)))
573 return err;
574 }
575
576 return 0;
577err_setup_tx:
578 ixgb_free_rx_resources(adapter);
579err_setup_rx:
580 adapter->rx_ring = rx_old;
581 adapter->tx_ring = tx_old;
582 ixgb_up(adapter);
583 return err;
584}
585
586/* toggle LED 4 times per second = 2 "blinks" per second */
587#define IXGB_ID_INTERVAL (HZ/4)
588
589/* bit defines for adapter->led_status */
590#define IXGB_LED_ON 0
591
592static void
593ixgb_led_blink_callback(unsigned long data)
594{
595 struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
596
597 if(test_and_change_bit(IXGB_LED_ON, &adapter->led_status))
598 ixgb_led_off(&adapter->hw);
599 else
600 ixgb_led_on(&adapter->hw);
601
602 mod_timer(&adapter->blink_timer, jiffies + IXGB_ID_INTERVAL);
603}
604
605static int
606ixgb_phys_id(struct net_device *netdev, uint32_t data)
607{
608 struct ixgb_adapter *adapter = netdev->priv;
609
610 if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
611 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
612
613 if(!adapter->blink_timer.function) {
614 init_timer(&adapter->blink_timer);
615 adapter->blink_timer.function = ixgb_led_blink_callback;
616 adapter->blink_timer.data = (unsigned long)adapter;
617 }
618
619 mod_timer(&adapter->blink_timer, jiffies);
620
621 set_current_state(TASK_INTERRUPTIBLE);
622 if(data)
623 schedule_timeout(data * HZ);
624 else
625 schedule_timeout(MAX_SCHEDULE_TIMEOUT);
626
627 del_timer_sync(&adapter->blink_timer);
628 ixgb_led_off(&adapter->hw);
629 clear_bit(IXGB_LED_ON, &adapter->led_status);
630
631 return 0;
632}
633
634static int
635ixgb_get_stats_count(struct net_device *netdev)
636{
637 return IXGB_STATS_LEN;
638}
639
640static void
641ixgb_get_ethtool_stats(struct net_device *netdev,
642 struct ethtool_stats *stats, uint64_t *data)
643{
644 struct ixgb_adapter *adapter = netdev->priv;
645 int i;
646
647 ixgb_update_stats(adapter);
648 for(i = 0; i < IXGB_STATS_LEN; i++) {
649 char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset;
650 data[i] = (ixgb_gstrings_stats[i].sizeof_stat ==
651 sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
652 }
653}
654
655static void
656ixgb_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
657{
658 int i;
659
660 switch(stringset) {
661 case ETH_SS_STATS:
662 for(i=0; i < IXGB_STATS_LEN; i++) {
663 memcpy(data + i * ETH_GSTRING_LEN,
664 ixgb_gstrings_stats[i].stat_string,
665 ETH_GSTRING_LEN);
666 }
667 break;
668 }
669}
670
671struct ethtool_ops ixgb_ethtool_ops = {
672 .get_settings = ixgb_get_settings,
673 .set_settings = ixgb_set_settings,
674 .get_drvinfo = ixgb_get_drvinfo,
675 .get_regs_len = ixgb_get_regs_len,
676 .get_regs = ixgb_get_regs,
677 .get_link = ethtool_op_get_link,
678 .get_eeprom_len = ixgb_get_eeprom_len,
679 .get_eeprom = ixgb_get_eeprom,
680 .set_eeprom = ixgb_set_eeprom,
681 .get_ringparam = ixgb_get_ringparam,
682 .set_ringparam = ixgb_set_ringparam,
683 .get_pauseparam = ixgb_get_pauseparam,
684 .set_pauseparam = ixgb_set_pauseparam,
685 .get_rx_csum = ixgb_get_rx_csum,
686 .set_rx_csum = ixgb_set_rx_csum,
687 .get_tx_csum = ixgb_get_tx_csum,
688 .set_tx_csum = ixgb_set_tx_csum,
689 .get_sg = ethtool_op_get_sg,
690 .set_sg = ethtool_op_set_sg,
691#ifdef NETIF_F_TSO
692 .get_tso = ethtool_op_get_tso,
693 .set_tso = ixgb_set_tso,
694#endif
695 .get_strings = ixgb_get_strings,
696 .phys_id = ixgb_phys_id,
697 .get_stats_count = ixgb_get_stats_count,
698 .get_ethtool_stats = ixgb_get_ethtool_stats,
699};
700
701void ixgb_set_ethtool_ops(struct net_device *netdev)
702{
703 SET_ETHTOOL_OPS(netdev, &ixgb_ethtool_ops);
704}
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
new file mode 100644
index 000000000000..69329c73095a
--- /dev/null
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -0,0 +1,1202 @@
1/*******************************************************************************
2
3
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
9 any later version.
10
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19
20 The full GNU General Public License is included in this distribution in the
21 file called LICENSE.
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/* ixgb_hw.c
30 * Shared functions for accessing and configuring the adapter
31 */
32
33#include "ixgb_hw.h"
34#include "ixgb_ids.h"
35
36/* Local function prototypes */
37
38static uint32_t ixgb_hash_mc_addr(struct ixgb_hw *hw, uint8_t * mc_addr);
39
40static void ixgb_mta_set(struct ixgb_hw *hw, uint32_t hash_value);
41
42static void ixgb_get_bus_info(struct ixgb_hw *hw);
43
44static boolean_t ixgb_link_reset(struct ixgb_hw *hw);
45
46static void ixgb_optics_reset(struct ixgb_hw *hw);
47
48static ixgb_phy_type ixgb_identify_phy(struct ixgb_hw *hw);
49
50uint32_t ixgb_mac_reset(struct ixgb_hw *hw);
51
52uint32_t ixgb_mac_reset(struct ixgb_hw *hw)
53{
54 uint32_t ctrl_reg;
55
56 ctrl_reg = IXGB_CTRL0_RST |
57 IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */
58 IXGB_CTRL0_SDP2_DIR |
59 IXGB_CTRL0_SDP1_DIR |
60 IXGB_CTRL0_SDP0_DIR |
61 IXGB_CTRL0_SDP3 | /* Initial value 1101 */
62 IXGB_CTRL0_SDP2 |
63 IXGB_CTRL0_SDP0;
64
65#ifdef HP_ZX1
66 /* Workaround for 82597EX reset errata */
67 IXGB_WRITE_REG_IO(hw, CTRL0, ctrl_reg);
68#else
69 IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
70#endif
71
72 /* Delay a few ms just to allow the reset to complete */
73 msec_delay(IXGB_DELAY_AFTER_RESET);
74 ctrl_reg = IXGB_READ_REG(hw, CTRL0);
75#ifdef DBG
76 /* Make sure the self-clearing global reset bit did self clear */
77 ASSERT(!(ctrl_reg & IXGB_CTRL0_RST));
78#endif
79
80 if (hw->phy_type == ixgb_phy_type_txn17401) {
81 ixgb_optics_reset(hw);
82 }
83
84 return ctrl_reg;
85}
86
87/******************************************************************************
88 * Reset the transmit and receive units; mask and clear all interrupts.
89 *
90 * hw - Struct containing variables accessed by shared code
91 *****************************************************************************/
92boolean_t
93ixgb_adapter_stop(struct ixgb_hw *hw)
94{
95 uint32_t ctrl_reg;
96 uint32_t icr_reg;
97
98 DEBUGFUNC("ixgb_adapter_stop");
99
100 /* If we are stopped or resetting exit gracefully and wait to be
101 * started again before accessing the hardware.
102 */
103 if(hw->adapter_stopped) {
104 DEBUGOUT("Exiting because the adapter is already stopped!!!\n");
105 return FALSE;
106 }
107
108 /* Set the Adapter Stopped flag so other driver functions stop
109 * touching the Hardware.
110 */
111 hw->adapter_stopped = TRUE;
112
113 /* Clear interrupt mask to stop board from generating interrupts */
114 DEBUGOUT("Masking off all interrupts\n");
115 IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF);
116
117 /* Disable the Transmit and Receive units. Then delay to allow
118 * any pending transactions to complete before we hit the MAC with
119 * the global reset.
120 */
121 IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN);
122 IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN);
123 msec_delay(IXGB_DELAY_BEFORE_RESET);
124
125 /* Issue a global reset to the MAC. This will reset the chip's
126 * transmit, receive, DMA, and link units. It will not effect
127 * the current PCI configuration. The global reset bit is self-
128 * clearing, and should clear within a microsecond.
129 */
130 DEBUGOUT("Issuing a global reset to MAC\n");
131
132 ctrl_reg = ixgb_mac_reset(hw);
133
134 /* Clear interrupt mask to stop board from generating interrupts */
135 DEBUGOUT("Masking off all interrupts\n");
136 IXGB_WRITE_REG(hw, IMC, 0xffffffff);
137
138 /* Clear any pending interrupt events. */
139 icr_reg = IXGB_READ_REG(hw, ICR);
140
141 return (ctrl_reg & IXGB_CTRL0_RST);
142}
143
144
145/******************************************************************************
146 * Identifies the vendor of the optics module on the adapter. The SR adapters
147 * support two different types of XPAK optics, so it is necessary to determine
148 * which optics are present before applying any optics-specific workarounds.
149 *
150 * hw - Struct containing variables accessed by shared code.
151 *
152 * Returns: the vendor of the XPAK optics module.
153 *****************************************************************************/
154static ixgb_xpak_vendor
155ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
156{
157 uint32_t i;
158 uint16_t vendor_name[5];
159 ixgb_xpak_vendor xpak_vendor;
160
161 DEBUGFUNC("ixgb_identify_xpak_vendor");
162
163 /* Read the first few bytes of the vendor string from the XPAK NVR
164 * registers. These are standard XENPAK/XPAK registers, so all XPAK
165 * devices should implement them. */
166 for (i = 0; i < 5; i++) {
167 vendor_name[i] = ixgb_read_phy_reg(hw,
168 MDIO_PMA_PMD_XPAK_VENDOR_NAME
169 + i, IXGB_PHY_ADDRESS,
170 MDIO_PMA_PMD_DID);
171 }
172
173 /* Determine the actual vendor */
174 if (vendor_name[0] == 'I' &&
175 vendor_name[1] == 'N' &&
176 vendor_name[2] == 'T' &&
177 vendor_name[3] == 'E' && vendor_name[4] == 'L') {
178 xpak_vendor = ixgb_xpak_vendor_intel;
179 } else {
180 xpak_vendor = ixgb_xpak_vendor_infineon;
181 }
182
183 return (xpak_vendor);
184}
185
186/******************************************************************************
187 * Determine the physical layer module on the adapter.
188 *
189 * hw - Struct containing variables accessed by shared code. The device_id
190 * field must be (correctly) populated before calling this routine.
191 *
192 * Returns: the phy type of the adapter.
193 *****************************************************************************/
194static ixgb_phy_type
195ixgb_identify_phy(struct ixgb_hw *hw)
196{
197 ixgb_phy_type phy_type;
198 ixgb_xpak_vendor xpak_vendor;
199
200 DEBUGFUNC("ixgb_identify_phy");
201
202 /* Infer the transceiver/phy type from the device id */
203 switch (hw->device_id) {
204 case IXGB_DEVICE_ID_82597EX:
205 DEBUGOUT("Identified TXN17401 optics\n");
206 phy_type = ixgb_phy_type_txn17401;
207 break;
208
209 case IXGB_DEVICE_ID_82597EX_SR:
210 /* The SR adapters carry two different types of XPAK optics
211 * modules; read the vendor identifier to determine the exact
212 * type of optics. */
213 xpak_vendor = ixgb_identify_xpak_vendor(hw);
214 if (xpak_vendor == ixgb_xpak_vendor_intel) {
215 DEBUGOUT("Identified TXN17201 optics\n");
216 phy_type = ixgb_phy_type_txn17201;
217 } else {
218 DEBUGOUT("Identified G6005 optics\n");
219 phy_type = ixgb_phy_type_g6005;
220 }
221 break;
222 case IXGB_DEVICE_ID_82597EX_LR:
223 DEBUGOUT("Identified G6104 optics\n");
224 phy_type = ixgb_phy_type_g6104;
225 break;
226 default:
227 DEBUGOUT("Unknown physical layer module\n");
228 phy_type = ixgb_phy_type_unknown;
229 break;
230 }
231
232 return (phy_type);
233}
234
235/******************************************************************************
236 * Performs basic configuration of the adapter.
237 *
238 * hw - Struct containing variables accessed by shared code
239 *
240 * Resets the controller.
241 * Reads and validates the EEPROM.
242 * Initializes the receive address registers.
243 * Initializes the multicast table.
244 * Clears all on-chip counters.
245 * Calls routine to setup flow control settings.
246 * Leaves the transmit and receive units disabled and uninitialized.
247 *
248 * Returns:
249 * TRUE if successful,
250 * FALSE if unrecoverable problems were encountered.
251 *****************************************************************************/
252boolean_t
253ixgb_init_hw(struct ixgb_hw *hw)
254{
255 uint32_t i;
256 uint32_t ctrl_reg;
257 boolean_t status;
258
259 DEBUGFUNC("ixgb_init_hw");
260
261 /* Issue a global reset to the MAC. This will reset the chip's
262 * transmit, receive, DMA, and link units. It will not effect
263 * the current PCI configuration. The global reset bit is self-
264 * clearing, and should clear within a microsecond.
265 */
266 DEBUGOUT("Issuing a global reset to MAC\n");
267
268 ctrl_reg = ixgb_mac_reset(hw);
269
270 DEBUGOUT("Issuing an EE reset to MAC\n");
271#ifdef HP_ZX1
272 /* Workaround for 82597EX reset errata */
273 IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST);
274#else
275 IXGB_WRITE_REG(hw, CTRL1, IXGB_CTRL1_EE_RST);
276#endif
277
278 /* Delay a few ms just to allow the reset to complete */
279 msec_delay(IXGB_DELAY_AFTER_EE_RESET);
280
281 if (ixgb_get_eeprom_data(hw) == FALSE) {
282 return(FALSE);
283 }
284
285 /* Use the device id to determine the type of phy/transceiver. */
286 hw->device_id = ixgb_get_ee_device_id(hw);
287 hw->phy_type = ixgb_identify_phy(hw);
288
289 /* Setup the receive addresses.
290 * Receive Address Registers (RARs 0 - 15).
291 */
292 ixgb_init_rx_addrs(hw);
293
294 /*
295 * Check that a valid MAC address has been set.
296 * If it is not valid, we fail hardware init.
297 */
298 if (!mac_addr_valid(hw->curr_mac_addr)) {
299 DEBUGOUT("MAC address invalid after ixgb_init_rx_addrs\n");
300 return(FALSE);
301 }
302
303 /* tell the routines in this file they can access hardware again */
304 hw->adapter_stopped = FALSE;
305
306 /* Fill in the bus_info structure */
307 ixgb_get_bus_info(hw);
308
309 /* Zero out the Multicast HASH table */
310 DEBUGOUT("Zeroing the MTA\n");
311 for(i = 0; i < IXGB_MC_TBL_SIZE; i++)
312 IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
313
314 /* Zero out the VLAN Filter Table Array */
315 ixgb_clear_vfta(hw);
316
317 /* Zero all of the hardware counters */
318 ixgb_clear_hw_cntrs(hw);
319
320 /* Call a subroutine to setup flow control. */
321 status = ixgb_setup_fc(hw);
322
323 /* 82597EX errata: Call check-for-link in case lane deskew is locked */
324 ixgb_check_for_link(hw);
325
326 return (status);
327}
328
329/******************************************************************************
330 * Initializes receive address filters.
331 *
332 * hw - Struct containing variables accessed by shared code
333 *
334 * Places the MAC address in receive address register 0 and clears the rest
335 * of the receive addresss registers. Clears the multicast table. Assumes
336 * the receiver is in reset when the routine is called.
337 *****************************************************************************/
338void
339ixgb_init_rx_addrs(struct ixgb_hw *hw)
340{
341 uint32_t i;
342
343 DEBUGFUNC("ixgb_init_rx_addrs");
344
345 /*
346 * If the current mac address is valid, assume it is a software override
347 * to the permanent address.
348 * Otherwise, use the permanent address from the eeprom.
349 */
350 if (!mac_addr_valid(hw->curr_mac_addr)) {
351
352 /* Get the MAC address from the eeprom for later reference */
353 ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr);
354
355 DEBUGOUT3(" Keeping Permanent MAC Addr =%.2X %.2X %.2X ",
356 hw->curr_mac_addr[0],
357 hw->curr_mac_addr[1], hw->curr_mac_addr[2]);
358 DEBUGOUT3("%.2X %.2X %.2X\n",
359 hw->curr_mac_addr[3],
360 hw->curr_mac_addr[4], hw->curr_mac_addr[5]);
361 } else {
362
363 /* Setup the receive address. */
364 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
365 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
366 hw->curr_mac_addr[0],
367 hw->curr_mac_addr[1], hw->curr_mac_addr[2]);
368 DEBUGOUT3("%.2X %.2X %.2X\n",
369 hw->curr_mac_addr[3],
370 hw->curr_mac_addr[4], hw->curr_mac_addr[5]);
371
372 ixgb_rar_set(hw, hw->curr_mac_addr, 0);
373 }
374
375 /* Zero out the other 15 receive addresses. */
376 DEBUGOUT("Clearing RAR[1-15]\n");
377 for(i = 1; i < IXGB_RAR_ENTRIES; i++) {
378 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
379 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
380 }
381
382 return;
383}
384
385/******************************************************************************
386 * Updates the MAC's list of multicast addresses.
387 *
388 * hw - Struct containing variables accessed by shared code
389 * mc_addr_list - the list of new multicast addresses
390 * mc_addr_count - number of addresses
391 * pad - number of bytes between addresses in the list
392 *
393 * The given list replaces any existing list. Clears the last 15 receive
394 * address registers and the multicast table. Uses receive address registers
395 * for the first 15 multicast addresses, and hashes the rest into the
396 * multicast table.
397 *****************************************************************************/
398void
399ixgb_mc_addr_list_update(struct ixgb_hw *hw,
400 uint8_t *mc_addr_list,
401 uint32_t mc_addr_count,
402 uint32_t pad)
403{
404 uint32_t hash_value;
405 uint32_t i;
406 uint32_t rar_used_count = 1; /* RAR[0] is used for our MAC address */
407
408 DEBUGFUNC("ixgb_mc_addr_list_update");
409
410 /* Set the new number of MC addresses that we are being requested to use. */
411 hw->num_mc_addrs = mc_addr_count;
412
413 /* Clear RAR[1-15] */
414 DEBUGOUT(" Clearing RAR[1-15]\n");
415 for(i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
416 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
417 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
418 }
419
420 /* Clear the MTA */
421 DEBUGOUT(" Clearing MTA\n");
422 for(i = 0; i < IXGB_MC_TBL_SIZE; i++) {
423 IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
424 }
425
426 /* Add the new addresses */
427 for(i = 0; i < mc_addr_count; i++) {
428 DEBUGOUT(" Adding the multicast addresses:\n");
429 DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i,
430 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)],
431 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
432 1],
433 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
434 2],
435 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
436 3],
437 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
438 4],
439 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
440 5]);
441
442 /* Place this multicast address in the RAR if there is room, *
443 * else put it in the MTA
444 */
445 if(rar_used_count < IXGB_RAR_ENTRIES) {
446 ixgb_rar_set(hw,
447 mc_addr_list +
448 (i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)),
449 rar_used_count);
450 DEBUGOUT1("Added a multicast address to RAR[%d]\n", i);
451 rar_used_count++;
452 } else {
453 hash_value = ixgb_hash_mc_addr(hw,
454 mc_addr_list +
455 (i *
456 (IXGB_ETH_LENGTH_OF_ADDRESS
457 + pad)));
458
459 DEBUGOUT1(" Hash value = 0x%03X\n", hash_value);
460
461 ixgb_mta_set(hw, hash_value);
462 }
463 }
464
465 DEBUGOUT("MC Update Complete\n");
466 return;
467}
468
469/******************************************************************************
470 * Hashes an address to determine its location in the multicast table
471 *
472 * hw - Struct containing variables accessed by shared code
473 * mc_addr - the multicast address to hash
474 *
475 * Returns:
476 * The hash value
477 *****************************************************************************/
478static uint32_t
479ixgb_hash_mc_addr(struct ixgb_hw *hw,
480 uint8_t *mc_addr)
481{
482 uint32_t hash_value = 0;
483
484 DEBUGFUNC("ixgb_hash_mc_addr");
485
486 /* The portion of the address that is used for the hash table is
487 * determined by the mc_filter_type setting.
488 */
489 switch (hw->mc_filter_type) {
490 /* [0] [1] [2] [3] [4] [5]
491 * 01 AA 00 12 34 56
492 * LSB MSB - According to H/W docs */
493 case 0:
494 /* [47:36] i.e. 0x563 for above example address */
495 hash_value =
496 ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4));
497 break;
498 case 1: /* [46:35] i.e. 0xAC6 for above example address */
499 hash_value =
500 ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5));
501 break;
502 case 2: /* [45:34] i.e. 0x5D8 for above example address */
503 hash_value =
504 ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6));
505 break;
506 case 3: /* [43:32] i.e. 0x634 for above example address */
507 hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8));
508 break;
509 default:
510 /* Invalid mc_filter_type, what should we do? */
511 DEBUGOUT("MC filter type param set incorrectly\n");
512 ASSERT(0);
513 break;
514 }
515
516 hash_value &= 0xFFF;
517 return (hash_value);
518}
519
520/******************************************************************************
521 * Sets the bit in the multicast table corresponding to the hash value.
522 *
523 * hw - Struct containing variables accessed by shared code
524 * hash_value - Multicast address hash value
525 *****************************************************************************/
526static void
527ixgb_mta_set(struct ixgb_hw *hw,
528 uint32_t hash_value)
529{
530 uint32_t hash_bit, hash_reg;
531 uint32_t mta_reg;
532
533 /* The MTA is a register array of 128 32-bit registers.
534 * It is treated like an array of 4096 bits. We want to set
535 * bit BitArray[hash_value]. So we figure out what register
536 * the bit is in, read it, OR in the new bit, then write
537 * back the new value. The register is determined by the
538 * upper 7 bits of the hash value and the bit within that
539 * register are determined by the lower 5 bits of the value.
540 */
541 hash_reg = (hash_value >> 5) & 0x7F;
542 hash_bit = hash_value & 0x1F;
543
544 mta_reg = IXGB_READ_REG_ARRAY(hw, MTA, hash_reg);
545
546 mta_reg |= (1 << hash_bit);
547
548 IXGB_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta_reg);
549
550 return;
551}
552
553/******************************************************************************
554 * Puts an ethernet address into a receive address register.
555 *
556 * hw - Struct containing variables accessed by shared code
557 * addr - Address to put into receive address register
558 * index - Receive address register to write
559 *****************************************************************************/
560void
561ixgb_rar_set(struct ixgb_hw *hw,
562 uint8_t *addr,
563 uint32_t index)
564{
565 uint32_t rar_low, rar_high;
566
567 DEBUGFUNC("ixgb_rar_set");
568
569 /* HW expects these in little endian so we reverse the byte order
570 * from network order (big endian) to little endian
571 */
572 rar_low = ((uint32_t) addr[0] |
573 ((uint32_t)addr[1] << 8) |
574 ((uint32_t)addr[2] << 16) |
575 ((uint32_t)addr[3] << 24));
576
577 rar_high = ((uint32_t) addr[4] |
578 ((uint32_t)addr[5] << 8) |
579 IXGB_RAH_AV);
580
581 IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
582 IXGB_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
583 return;
584}
585
586/******************************************************************************
587 * Writes a value to the specified offset in the VLAN filter table.
588 *
589 * hw - Struct containing variables accessed by shared code
590 * offset - Offset in VLAN filer table to write
591 * value - Value to write into VLAN filter table
592 *****************************************************************************/
593void
594ixgb_write_vfta(struct ixgb_hw *hw,
595 uint32_t offset,
596 uint32_t value)
597{
598 IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value);
599 return;
600}
601
602/******************************************************************************
603 * Clears the VLAN filer table
604 *
605 * hw - Struct containing variables accessed by shared code
606 *****************************************************************************/
607void
608ixgb_clear_vfta(struct ixgb_hw *hw)
609{
610 uint32_t offset;
611
612 for(offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
613 IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
614 return;
615}
616
617/******************************************************************************
618 * Configures the flow control settings based on SW configuration.
619 *
620 * hw - Struct containing variables accessed by shared code
621 *****************************************************************************/
622
623boolean_t
624ixgb_setup_fc(struct ixgb_hw *hw)
625{
626 uint32_t ctrl_reg;
627 uint32_t pap_reg = 0; /* by default, assume no pause time */
628 boolean_t status = TRUE;
629
630 DEBUGFUNC("ixgb_setup_fc");
631
632 /* Get the current control reg 0 settings */
633 ctrl_reg = IXGB_READ_REG(hw, CTRL0);
634
635 /* Clear the Receive Pause Enable and Transmit Pause Enable bits */
636 ctrl_reg &= ~(IXGB_CTRL0_RPE | IXGB_CTRL0_TPE);
637
638 /* The possible values of the "flow_control" parameter are:
639 * 0: Flow control is completely disabled
640 * 1: Rx flow control is enabled (we can receive pause frames
641 * but not send pause frames).
642 * 2: Tx flow control is enabled (we can send pause frames
643 * but we do not support receiving pause frames).
644 * 3: Both Rx and TX flow control (symmetric) are enabled.
645 * other: Invalid.
646 */
647 switch (hw->fc.type) {
648 case ixgb_fc_none: /* 0 */
649 /* Set CMDC bit to disable Rx Flow control */
650 ctrl_reg |= (IXGB_CTRL0_CMDC);
651 break;
652 case ixgb_fc_rx_pause: /* 1 */
653 /* RX Flow control is enabled, and TX Flow control is
654 * disabled.
655 */
656 ctrl_reg |= (IXGB_CTRL0_RPE);
657 break;
658 case ixgb_fc_tx_pause: /* 2 */
659 /* TX Flow control is enabled, and RX Flow control is
660 * disabled, by a software over-ride.
661 */
662 ctrl_reg |= (IXGB_CTRL0_TPE);
663 pap_reg = hw->fc.pause_time;
664 break;
665 case ixgb_fc_full: /* 3 */
666 /* Flow control (both RX and TX) is enabled by a software
667 * over-ride.
668 */
669 ctrl_reg |= (IXGB_CTRL0_RPE | IXGB_CTRL0_TPE);
670 pap_reg = hw->fc.pause_time;
671 break;
672 default:
673 /* We should never get here. The value should be 0-3. */
674 DEBUGOUT("Flow control param set incorrectly\n");
675 ASSERT(0);
676 break;
677 }
678
679 /* Write the new settings */
680 IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
681
682 if (pap_reg != 0) {
683 IXGB_WRITE_REG(hw, PAP, pap_reg);
684 }
685
686 /* Set the flow control receive threshold registers. Normally,
687 * these registers will be set to a default threshold that may be
688 * adjusted later by the driver's runtime code. However, if the
689 * ability to transmit pause frames in not enabled, then these
690 * registers will be set to 0.
691 */
692 if(!(hw->fc.type & ixgb_fc_tx_pause)) {
693 IXGB_WRITE_REG(hw, FCRTL, 0);
694 IXGB_WRITE_REG(hw, FCRTH, 0);
695 } else {
696 /* We need to set up the Receive Threshold high and low water
697 * marks as well as (optionally) enabling the transmission of XON
698 * frames. */
699 if(hw->fc.send_xon) {
700 IXGB_WRITE_REG(hw, FCRTL,
701 (hw->fc.low_water | IXGB_FCRTL_XONE));
702 } else {
703 IXGB_WRITE_REG(hw, FCRTL, hw->fc.low_water);
704 }
705 IXGB_WRITE_REG(hw, FCRTH, hw->fc.high_water);
706 }
707 return (status);
708}
709
710/******************************************************************************
711 * Reads a word from a device over the Management Data Interface (MDI) bus.
712 * This interface is used to manage Physical layer devices.
713 *
714 * hw - Struct containing variables accessed by hw code
715 * reg_address - Offset of device register being read.
716 * phy_address - Address of device on MDI.
717 *
718 * Returns: Data word (16 bits) from MDI device.
719 *
720 * The 82597EX has support for several MDI access methods. This routine
721 * uses the new protocol MDI Single Command and Address Operation.
722 * This requires that first an address cycle command is sent, followed by a
723 * read command.
724 *****************************************************************************/
725uint16_t
726ixgb_read_phy_reg(struct ixgb_hw *hw,
727 uint32_t reg_address,
728 uint32_t phy_address,
729 uint32_t device_type)
730{
731 uint32_t i;
732 uint32_t data;
733 uint32_t command = 0;
734
735 ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
736 ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
737 ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
738
739 /* Setup and write the address cycle command */
740 command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
741 (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
742 (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
743 (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
744
745 IXGB_WRITE_REG(hw, MSCA, command);
746
747 /**************************************************************
748 ** Check every 10 usec to see if the address cycle completed
749 ** The COMMAND bit will clear when the operation is complete.
750 ** This may take as long as 64 usecs (we'll wait 100 usecs max)
751 ** from the CPU Write to the Ready bit assertion.
752 **************************************************************/
753
754 for(i = 0; i < 10; i++)
755 {
756 udelay(10);
757
758 command = IXGB_READ_REG(hw, MSCA);
759
760 if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
761 break;
762 }
763
764 ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
765
766 /* Address cycle complete, setup and write the read command */
767 command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
768 (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
769 (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
770 (IXGB_MSCA_READ | IXGB_MSCA_MDI_COMMAND));
771
772 IXGB_WRITE_REG(hw, MSCA, command);
773
774 /**************************************************************
775 ** Check every 10 usec to see if the read command completed
776 ** The COMMAND bit will clear when the operation is complete.
777 ** The read may take as long as 64 usecs (we'll wait 100 usecs max)
778 ** from the CPU Write to the Ready bit assertion.
779 **************************************************************/
780
781 for(i = 0; i < 10; i++)
782 {
783 udelay(10);
784
785 command = IXGB_READ_REG(hw, MSCA);
786
787 if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
788 break;
789 }
790
791 ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
792
793 /* Operation is complete, get the data from the MDIO Read/Write Data
794 * register and return.
795 */
796 data = IXGB_READ_REG(hw, MSRWD);
797 data >>= IXGB_MSRWD_READ_DATA_SHIFT;
798 return((uint16_t) data);
799}
800
801/******************************************************************************
802 * Writes a word to a device over the Management Data Interface (MDI) bus.
803 * This interface is used to manage Physical layer devices.
804 *
805 * hw - Struct containing variables accessed by hw code
806 * reg_address - Offset of device register being read.
807 * phy_address - Address of device on MDI.
808 * device_type - Also known as the Device ID or DID.
809 * data - 16-bit value to be written
810 *
811 * Returns: void.
812 *
813 * The 82597EX has support for several MDI access methods. This routine
814 * uses the new protocol MDI Single Command and Address Operation.
815 * This requires that first an address cycle command is sent, followed by a
816 * write command.
817 *****************************************************************************/
818void
819ixgb_write_phy_reg(struct ixgb_hw *hw,
820 uint32_t reg_address,
821 uint32_t phy_address,
822 uint32_t device_type,
823 uint16_t data)
824{
825 uint32_t i;
826 uint32_t command = 0;
827
828 ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
829 ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
830 ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
831
832 /* Put the data in the MDIO Read/Write Data register */
833 IXGB_WRITE_REG(hw, MSRWD, (uint32_t)data);
834
835 /* Setup and write the address cycle command */
836 command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
837 (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
838 (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
839 (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
840
841 IXGB_WRITE_REG(hw, MSCA, command);
842
843 /**************************************************************
844 ** Check every 10 usec to see if the address cycle completed
845 ** The COMMAND bit will clear when the operation is complete.
846 ** This may take as long as 64 usecs (we'll wait 100 usecs max)
847 ** from the CPU Write to the Ready bit assertion.
848 **************************************************************/
849
850 for(i = 0; i < 10; i++)
851 {
852 udelay(10);
853
854 command = IXGB_READ_REG(hw, MSCA);
855
856 if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
857 break;
858 }
859
860 ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
861
862 /* Address cycle complete, setup and write the write command */
863 command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
864 (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
865 (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
866 (IXGB_MSCA_WRITE | IXGB_MSCA_MDI_COMMAND));
867
868 IXGB_WRITE_REG(hw, MSCA, command);
869
870 /**************************************************************
871 ** Check every 10 usec to see if the read command completed
872 ** The COMMAND bit will clear when the operation is complete.
873 ** The write may take as long as 64 usecs (we'll wait 100 usecs max)
874 ** from the CPU Write to the Ready bit assertion.
875 **************************************************************/
876
877 for(i = 0; i < 10; i++)
878 {
879 udelay(10);
880
881 command = IXGB_READ_REG(hw, MSCA);
882
883 if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
884 break;
885 }
886
887 ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
888
889 /* Operation is complete, return. */
890}
891
892/******************************************************************************
893 * Checks to see if the link status of the hardware has changed.
894 *
895 * hw - Struct containing variables accessed by hw code
896 *
897 * Called by any function that needs to check the link status of the adapter.
898 *****************************************************************************/
899void
900ixgb_check_for_link(struct ixgb_hw *hw)
901{
902 uint32_t status_reg;
903 uint32_t xpcss_reg;
904
905 DEBUGFUNC("ixgb_check_for_link");
906
907 xpcss_reg = IXGB_READ_REG(hw, XPCSS);
908 status_reg = IXGB_READ_REG(hw, STATUS);
909
910 if ((xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
911 (status_reg & IXGB_STATUS_LU)) {
912 hw->link_up = TRUE;
913 } else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
914 (status_reg & IXGB_STATUS_LU)) {
915 DEBUGOUT("XPCSS Not Aligned while Status:LU is set.\n");
916 hw->link_up = ixgb_link_reset(hw);
917 } else {
918 /*
919 * 82597EX errata. Since the lane deskew problem may prevent
920 * link, reset the link before reporting link down.
921 */
922 hw->link_up = ixgb_link_reset(hw);
923 }
924 /* Anything else for 10 Gig?? */
925}
926
927/******************************************************************************
928 * Check for a bad link condition that may have occured.
929 * The indication is that the RFC / LFC registers may be incrementing
930 * continually. A full adapter reset is required to recover.
931 *
932 * hw - Struct containing variables accessed by hw code
933 *
934 * Called by any function that needs to check the link status of the adapter.
935 *****************************************************************************/
936boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw)
937{
938 uint32_t newLFC, newRFC;
939 boolean_t bad_link_returncode = FALSE;
940
941 if (hw->phy_type == ixgb_phy_type_txn17401) {
942 newLFC = IXGB_READ_REG(hw, LFC);
943 newRFC = IXGB_READ_REG(hw, RFC);
944 if ((hw->lastLFC + 250 < newLFC)
945 || (hw->lastRFC + 250 < newRFC)) {
946 DEBUGOUT
947 ("BAD LINK! too many LFC/RFC since last check\n");
948 bad_link_returncode = TRUE;
949 }
950 hw->lastLFC = newLFC;
951 hw->lastRFC = newRFC;
952 }
953
954 return bad_link_returncode;
955}
956
957/******************************************************************************
958 * Clears all hardware statistics counters.
959 *
960 * hw - Struct containing variables accessed by shared code
961 *****************************************************************************/
962void
963ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
964{
965 volatile uint32_t temp_reg;
966
967 DEBUGFUNC("ixgb_clear_hw_cntrs");
968
969 /* if we are stopped or resetting exit gracefully */
970 if(hw->adapter_stopped) {
971 DEBUGOUT("Exiting because the adapter is stopped!!!\n");
972 return;
973 }
974
975 temp_reg = IXGB_READ_REG(hw, TPRL);
976 temp_reg = IXGB_READ_REG(hw, TPRH);
977 temp_reg = IXGB_READ_REG(hw, GPRCL);
978 temp_reg = IXGB_READ_REG(hw, GPRCH);
979 temp_reg = IXGB_READ_REG(hw, BPRCL);
980 temp_reg = IXGB_READ_REG(hw, BPRCH);
981 temp_reg = IXGB_READ_REG(hw, MPRCL);
982 temp_reg = IXGB_READ_REG(hw, MPRCH);
983 temp_reg = IXGB_READ_REG(hw, UPRCL);
984 temp_reg = IXGB_READ_REG(hw, UPRCH);
985 temp_reg = IXGB_READ_REG(hw, VPRCL);
986 temp_reg = IXGB_READ_REG(hw, VPRCH);
987 temp_reg = IXGB_READ_REG(hw, JPRCL);
988 temp_reg = IXGB_READ_REG(hw, JPRCH);
989 temp_reg = IXGB_READ_REG(hw, GORCL);
990 temp_reg = IXGB_READ_REG(hw, GORCH);
991 temp_reg = IXGB_READ_REG(hw, TORL);
992 temp_reg = IXGB_READ_REG(hw, TORH);
993 temp_reg = IXGB_READ_REG(hw, RNBC);
994 temp_reg = IXGB_READ_REG(hw, RUC);
995 temp_reg = IXGB_READ_REG(hw, ROC);
996 temp_reg = IXGB_READ_REG(hw, RLEC);
997 temp_reg = IXGB_READ_REG(hw, CRCERRS);
998 temp_reg = IXGB_READ_REG(hw, ICBC);
999 temp_reg = IXGB_READ_REG(hw, ECBC);
1000 temp_reg = IXGB_READ_REG(hw, MPC);
1001 temp_reg = IXGB_READ_REG(hw, TPTL);
1002 temp_reg = IXGB_READ_REG(hw, TPTH);
1003 temp_reg = IXGB_READ_REG(hw, GPTCL);
1004 temp_reg = IXGB_READ_REG(hw, GPTCH);
1005 temp_reg = IXGB_READ_REG(hw, BPTCL);
1006 temp_reg = IXGB_READ_REG(hw, BPTCH);
1007 temp_reg = IXGB_READ_REG(hw, MPTCL);
1008 temp_reg = IXGB_READ_REG(hw, MPTCH);
1009 temp_reg = IXGB_READ_REG(hw, UPTCL);
1010 temp_reg = IXGB_READ_REG(hw, UPTCH);
1011 temp_reg = IXGB_READ_REG(hw, VPTCL);
1012 temp_reg = IXGB_READ_REG(hw, VPTCH);
1013 temp_reg = IXGB_READ_REG(hw, JPTCL);
1014 temp_reg = IXGB_READ_REG(hw, JPTCH);
1015 temp_reg = IXGB_READ_REG(hw, GOTCL);
1016 temp_reg = IXGB_READ_REG(hw, GOTCH);
1017 temp_reg = IXGB_READ_REG(hw, TOTL);
1018 temp_reg = IXGB_READ_REG(hw, TOTH);
1019 temp_reg = IXGB_READ_REG(hw, DC);
1020 temp_reg = IXGB_READ_REG(hw, PLT64C);
1021 temp_reg = IXGB_READ_REG(hw, TSCTC);
1022 temp_reg = IXGB_READ_REG(hw, TSCTFC);
1023 temp_reg = IXGB_READ_REG(hw, IBIC);
1024 temp_reg = IXGB_READ_REG(hw, RFC);
1025 temp_reg = IXGB_READ_REG(hw, LFC);
1026 temp_reg = IXGB_READ_REG(hw, PFRC);
1027 temp_reg = IXGB_READ_REG(hw, PFTC);
1028 temp_reg = IXGB_READ_REG(hw, MCFRC);
1029 temp_reg = IXGB_READ_REG(hw, MCFTC);
1030 temp_reg = IXGB_READ_REG(hw, XONRXC);
1031 temp_reg = IXGB_READ_REG(hw, XONTXC);
1032 temp_reg = IXGB_READ_REG(hw, XOFFRXC);
1033 temp_reg = IXGB_READ_REG(hw, XOFFTXC);
1034 temp_reg = IXGB_READ_REG(hw, RJC);
1035 return;
1036}
1037
1038/******************************************************************************
1039 * Turns on the software controllable LED
1040 *
1041 * hw - Struct containing variables accessed by shared code
1042 *****************************************************************************/
1043void
1044ixgb_led_on(struct ixgb_hw *hw)
1045{
1046 uint32_t ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
1047
1048 /* To turn on the LED, clear software-definable pin 0 (SDP0). */
1049 ctrl0_reg &= ~IXGB_CTRL0_SDP0;
1050 IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
1051 return;
1052}
1053
1054/******************************************************************************
1055 * Turns off the software controllable LED
1056 *
1057 * hw - Struct containing variables accessed by shared code
1058 *****************************************************************************/
1059void
1060ixgb_led_off(struct ixgb_hw *hw)
1061{
1062 uint32_t ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
1063
1064 /* To turn off the LED, set software-definable pin 0 (SDP0). */
1065 ctrl0_reg |= IXGB_CTRL0_SDP0;
1066 IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
1067 return;
1068}
1069
1070/******************************************************************************
1071 * Gets the current PCI bus type, speed, and width of the hardware
1072 *
1073 * hw - Struct containing variables accessed by shared code
1074 *****************************************************************************/
1075static void
1076ixgb_get_bus_info(struct ixgb_hw *hw)
1077{
1078 uint32_t status_reg;
1079
1080 status_reg = IXGB_READ_REG(hw, STATUS);
1081
1082 hw->bus.type = (status_reg & IXGB_STATUS_PCIX_MODE) ?
1083 ixgb_bus_type_pcix : ixgb_bus_type_pci;
1084
1085 if (hw->bus.type == ixgb_bus_type_pci) {
1086 hw->bus.speed = (status_reg & IXGB_STATUS_PCI_SPD) ?
1087 ixgb_bus_speed_66 : ixgb_bus_speed_33;
1088 } else {
1089 switch (status_reg & IXGB_STATUS_PCIX_SPD_MASK) {
1090 case IXGB_STATUS_PCIX_SPD_66:
1091 hw->bus.speed = ixgb_bus_speed_66;
1092 break;
1093 case IXGB_STATUS_PCIX_SPD_100:
1094 hw->bus.speed = ixgb_bus_speed_100;
1095 break;
1096 case IXGB_STATUS_PCIX_SPD_133:
1097 hw->bus.speed = ixgb_bus_speed_133;
1098 break;
1099 default:
1100 hw->bus.speed = ixgb_bus_speed_reserved;
1101 break;
1102 }
1103 }
1104
1105 hw->bus.width = (status_reg & IXGB_STATUS_BUS64) ?
1106 ixgb_bus_width_64 : ixgb_bus_width_32;
1107
1108 return;
1109}
1110
1111/******************************************************************************
1112 * Tests a MAC address to ensure it is a valid Individual Address
1113 *
1114 * mac_addr - pointer to MAC address.
1115 *
1116 *****************************************************************************/
1117boolean_t
1118mac_addr_valid(uint8_t *mac_addr)
1119{
1120 boolean_t is_valid = TRUE;
1121 DEBUGFUNC("mac_addr_valid");
1122
1123 /* Make sure it is not a multicast address */
1124 if (IS_MULTICAST(mac_addr)) {
1125 DEBUGOUT("MAC address is multicast\n");
1126 is_valid = FALSE;
1127 }
1128 /* Not a broadcast address */
1129 else if (IS_BROADCAST(mac_addr)) {
1130 DEBUGOUT("MAC address is broadcast\n");
1131 is_valid = FALSE;
1132 }
1133 /* Reject the zero address */
1134 else if (mac_addr[0] == 0 &&
1135 mac_addr[1] == 0 &&
1136 mac_addr[2] == 0 &&
1137 mac_addr[3] == 0 &&
1138 mac_addr[4] == 0 &&
1139 mac_addr[5] == 0) {
1140 DEBUGOUT("MAC address is all zeros\n");
1141 is_valid = FALSE;
1142 }
1143 return (is_valid);
1144}
1145
1146/******************************************************************************
1147 * Resets the 10GbE link. Waits the settle time and returns the state of
1148 * the link.
1149 *
1150 * hw - Struct containing variables accessed by shared code
1151 *****************************************************************************/
1152boolean_t
1153ixgb_link_reset(struct ixgb_hw *hw)
1154{
1155 boolean_t link_status = FALSE;
1156 uint8_t wait_retries = MAX_RESET_ITERATIONS;
1157 uint8_t lrst_retries = MAX_RESET_ITERATIONS;
1158
1159 do {
1160 /* Reset the link */
1161 IXGB_WRITE_REG(hw, CTRL0,
1162 IXGB_READ_REG(hw, CTRL0) | IXGB_CTRL0_LRST);
1163
1164 /* Wait for link-up and lane re-alignment */
1165 do {
1166 udelay(IXGB_DELAY_USECS_AFTER_LINK_RESET);
1167 link_status =
1168 ((IXGB_READ_REG(hw, STATUS) & IXGB_STATUS_LU)
1169 && (IXGB_READ_REG(hw, XPCSS) &
1170 IXGB_XPCSS_ALIGN_STATUS)) ? TRUE : FALSE;
1171 } while (!link_status && --wait_retries);
1172
1173 } while (!link_status && --lrst_retries);
1174
1175 return link_status;
1176}
1177
1178/******************************************************************************
1179 * Resets the 10GbE optics module.
1180 *
1181 * hw - Struct containing variables accessed by shared code
1182 *****************************************************************************/
1183void
1184ixgb_optics_reset(struct ixgb_hw *hw)
1185{
1186 if (hw->phy_type == ixgb_phy_type_txn17401) {
1187 uint16_t mdio_reg;
1188
1189 ixgb_write_phy_reg(hw,
1190 MDIO_PMA_PMD_CR1,
1191 IXGB_PHY_ADDRESS,
1192 MDIO_PMA_PMD_DID,
1193 MDIO_PMA_PMD_CR1_RESET);
1194
1195 mdio_reg = ixgb_read_phy_reg( hw,
1196 MDIO_PMA_PMD_CR1,
1197 IXGB_PHY_ADDRESS,
1198 MDIO_PMA_PMD_DID);
1199 }
1200
1201 return;
1202}
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
new file mode 100644
index 000000000000..97898efe7cc8
--- /dev/null
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -0,0 +1,847 @@
1/*******************************************************************************
2
3
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
9 any later version.
10
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19
20 The full GNU General Public License is included in this distribution in the
21 file called LICENSE.
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#ifndef _IXGB_HW_H_
30#define _IXGB_HW_H_
31
32#include "ixgb_osdep.h"
33
34/* Enums */
35typedef enum {
36 ixgb_mac_unknown = 0,
37 ixgb_82597,
38 ixgb_num_macs
39} ixgb_mac_type;
40
41/* Types of physical layer modules */
42typedef enum {
43 ixgb_phy_type_unknown = 0,
44 ixgb_phy_type_g6005, /* 850nm, MM fiber, XPAK transceiver */
45 ixgb_phy_type_g6104, /* 1310nm, SM fiber, XPAK transceiver */
46 ixgb_phy_type_txn17201, /* 850nm, MM fiber, XPAK transceiver */
47 ixgb_phy_type_txn17401 /* 1310nm, SM fiber, XENPAK transceiver */
48} ixgb_phy_type;
49
50/* XPAK transceiver vendors, for the SR adapters */
51typedef enum {
52 ixgb_xpak_vendor_intel,
53 ixgb_xpak_vendor_infineon
54} ixgb_xpak_vendor;
55
56/* Media Types */
57typedef enum {
58 ixgb_media_type_unknown = 0,
59 ixgb_media_type_fiber = 1,
60 ixgb_num_media_types
61} ixgb_media_type;
62
63/* Flow Control Settings */
64typedef enum {
65 ixgb_fc_none = 0,
66 ixgb_fc_rx_pause = 1,
67 ixgb_fc_tx_pause = 2,
68 ixgb_fc_full = 3,
69 ixgb_fc_default = 0xFF
70} ixgb_fc_type;
71
72/* PCI bus types */
73typedef enum {
74 ixgb_bus_type_unknown = 0,
75 ixgb_bus_type_pci,
76 ixgb_bus_type_pcix
77} ixgb_bus_type;
78
79/* PCI bus speeds */
80typedef enum {
81 ixgb_bus_speed_unknown = 0,
82 ixgb_bus_speed_33,
83 ixgb_bus_speed_66,
84 ixgb_bus_speed_100,
85 ixgb_bus_speed_133,
86 ixgb_bus_speed_reserved
87} ixgb_bus_speed;
88
89/* PCI bus widths */
90typedef enum {
91 ixgb_bus_width_unknown = 0,
92 ixgb_bus_width_32,
93 ixgb_bus_width_64
94} ixgb_bus_width;
95
96#define IXGB_ETH_LENGTH_OF_ADDRESS 6
97
98#define IXGB_EEPROM_SIZE 64 /* Size in words */
99
100#define SPEED_10000 10000
101#define FULL_DUPLEX 2
102
103#define MIN_NUMBER_OF_DESCRIPTORS 8
104#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 /* 13 bits in RDLEN/TDLEN, 128B aligned */
105
106#define IXGB_DELAY_BEFORE_RESET 10 /* allow 10ms after idling rx/tx units */
107#define IXGB_DELAY_AFTER_RESET 1 /* allow 1ms after the reset */
108#define IXGB_DELAY_AFTER_EE_RESET 10 /* allow 10ms after the EEPROM reset */
109
110#define IXGB_DELAY_USECS_AFTER_LINK_RESET 13 /* allow 13 microseconds after the reset */
111 /* NOTE: this is MICROSECONDS */
112#define MAX_RESET_ITERATIONS 8 /* number of iterations to get things right */
113
114/* General Registers */
115#define IXGB_CTRL0 0x00000 /* Device Control Register 0 - RW */
116#define IXGB_CTRL1 0x00008 /* Device Control Register 1 - RW */
117#define IXGB_STATUS 0x00010 /* Device Status Register - RO */
118#define IXGB_EECD 0x00018 /* EEPROM/Flash Control/Data Register - RW */
119#define IXGB_MFS 0x00020 /* Maximum Frame Size - RW */
120
121/* Interrupt */
122#define IXGB_ICR 0x00080 /* Interrupt Cause Read - R/clr */
123#define IXGB_ICS 0x00088 /* Interrupt Cause Set - RW */
124#define IXGB_IMS 0x00090 /* Interrupt Mask Set/Read - RW */
125#define IXGB_IMC 0x00098 /* Interrupt Mask Clear - WO */
126
127/* Receive */
128#define IXGB_RCTL 0x00100 /* RX Control - RW */
129#define IXGB_FCRTL 0x00108 /* Flow Control Receive Threshold Low - RW */
130#define IXGB_FCRTH 0x00110 /* Flow Control Receive Threshold High - RW */
131#define IXGB_RDBAL 0x00118 /* RX Descriptor Base Low - RW */
132#define IXGB_RDBAH 0x0011C /* RX Descriptor Base High - RW */
133#define IXGB_RDLEN 0x00120 /* RX Descriptor Length - RW */
134#define IXGB_RDH 0x00128 /* RX Descriptor Head - RW */
135#define IXGB_RDT 0x00130 /* RX Descriptor Tail - RW */
136#define IXGB_RDTR 0x00138 /* RX Delay Timer Ring - RW */
137#define IXGB_RXDCTL 0x00140 /* Receive Descriptor Control - RW */
138#define IXGB_RAIDC 0x00148 /* Receive Adaptive Interrupt Delay Control - RW */
139#define IXGB_RXCSUM 0x00158 /* Receive Checksum Control - RW */
140#define IXGB_RA 0x00180 /* Receive Address Array Base - RW */
141#define IXGB_RAL 0x00180 /* Receive Address Low [0:15] - RW */
142#define IXGB_RAH 0x00184 /* Receive Address High [0:15] - RW */
143#define IXGB_MTA 0x00200 /* Multicast Table Array [0:127] - RW */
144#define IXGB_VFTA 0x00400 /* VLAN Filter Table Array [0:127] - RW */
145#define IXGB_REQ_RX_DESCRIPTOR_MULTIPLE 8
146
147/* Transmit */
148#define IXGB_TCTL 0x00600 /* TX Control - RW */
149#define IXGB_TDBAL 0x00608 /* TX Descriptor Base Low - RW */
150#define IXGB_TDBAH 0x0060C /* TX Descriptor Base High - RW */
151#define IXGB_TDLEN 0x00610 /* TX Descriptor Length - RW */
152#define IXGB_TDH 0x00618 /* TX Descriptor Head - RW */
153#define IXGB_TDT 0x00620 /* TX Descriptor Tail - RW */
154#define IXGB_TIDV 0x00628 /* TX Interrupt Delay Value - RW */
155#define IXGB_TXDCTL 0x00630 /* Transmit Descriptor Control - RW */
156#define IXGB_TSPMT 0x00638 /* TCP Segmentation PAD & Min Threshold - RW */
157#define IXGB_PAP 0x00640 /* Pause and Pace - RW */
158#define IXGB_REQ_TX_DESCRIPTOR_MULTIPLE 8
159
160/* Physical */
161#define IXGB_PCSC1 0x00700 /* PCS Control 1 - RW */
162#define IXGB_PCSC2 0x00708 /* PCS Control 2 - RW */
163#define IXGB_PCSS1 0x00710 /* PCS Status 1 - RO */
164#define IXGB_PCSS2 0x00718 /* PCS Status 2 - RO */
165#define IXGB_XPCSS 0x00720 /* 10GBASE-X PCS Status (or XGXS Lane Status) - RO */
166#define IXGB_UCCR 0x00728 /* Unilink Circuit Control Register */
167#define IXGB_XPCSTC 0x00730 /* 10GBASE-X PCS Test Control */
168#define IXGB_MACA 0x00738 /* MDI Autoscan Command and Address - RW */
169#define IXGB_APAE 0x00740 /* Autoscan PHY Address Enable - RW */
170#define IXGB_ARD 0x00748 /* Autoscan Read Data - RO */
171#define IXGB_AIS 0x00750 /* Autoscan Interrupt Status - RO */
172#define IXGB_MSCA 0x00758 /* MDI Single Command and Address - RW */
173#define IXGB_MSRWD 0x00760 /* MDI Single Read and Write Data - RW, RO */
174
175/* Wake-up */
176#define IXGB_WUFC 0x00808 /* Wake Up Filter Control - RW */
177#define IXGB_WUS 0x00810 /* Wake Up Status - RO */
178#define IXGB_FFLT 0x01000 /* Flexible Filter Length Table - RW */
179#define IXGB_FFMT 0x01020 /* Flexible Filter Mask Table - RW */
180#define IXGB_FTVT 0x01420 /* Flexible Filter Value Table - RW */
181
182/* Statistics */
183#define IXGB_TPRL 0x02000 /* Total Packets Received (Low) */
184#define IXGB_TPRH 0x02004 /* Total Packets Received (High) */
185#define IXGB_GPRCL 0x02008 /* Good Packets Received Count (Low) */
186#define IXGB_GPRCH 0x0200C /* Good Packets Received Count (High) */
187#define IXGB_BPRCL 0x02010 /* Broadcast Packets Received Count (Low) */
188#define IXGB_BPRCH 0x02014 /* Broadcast Packets Received Count (High) */
189#define IXGB_MPRCL 0x02018 /* Multicast Packets Received Count (Low) */
190#define IXGB_MPRCH 0x0201C /* Multicast Packets Received Count (High) */
191#define IXGB_UPRCL 0x02020 /* Unicast Packets Received Count (Low) */
192#define IXGB_UPRCH 0x02024 /* Unicast Packets Received Count (High) */
193#define IXGB_VPRCL 0x02028 /* VLAN Packets Received Count (Low) */
194#define IXGB_VPRCH 0x0202C /* VLAN Packets Received Count (High) */
195#define IXGB_JPRCL 0x02030 /* Jumbo Packets Received Count (Low) */
196#define IXGB_JPRCH 0x02034 /* Jumbo Packets Received Count (High) */
197#define IXGB_GORCL 0x02038 /* Good Octets Received Count (Low) */
198#define IXGB_GORCH 0x0203C /* Good Octets Received Count (High) */
199#define IXGB_TORL 0x02040 /* Total Octets Received (Low) */
200#define IXGB_TORH 0x02044 /* Total Octets Received (High) */
201#define IXGB_RNBC 0x02048 /* Receive No Buffers Count */
202#define IXGB_RUC 0x02050 /* Receive Undersize Count */
203#define IXGB_ROC 0x02058 /* Receive Oversize Count */
204#define IXGB_RLEC 0x02060 /* Receive Length Error Count */
205#define IXGB_CRCERRS 0x02068 /* CRC Error Count */
206#define IXGB_ICBC 0x02070 /* Illegal control byte in mid-packet Count */
207#define IXGB_ECBC 0x02078 /* Error Control byte in mid-packet Count */
208#define IXGB_MPC 0x02080 /* Missed Packets Count */
209#define IXGB_TPTL 0x02100 /* Total Packets Transmitted (Low) */
210#define IXGB_TPTH 0x02104 /* Total Packets Transmitted (High) */
211#define IXGB_GPTCL 0x02108 /* Good Packets Transmitted Count (Low) */
212#define IXGB_GPTCH 0x0210C /* Good Packets Transmitted Count (High) */
213#define IXGB_BPTCL 0x02110 /* Broadcast Packets Transmitted Count (Low) */
214#define IXGB_BPTCH 0x02114 /* Broadcast Packets Transmitted Count (High) */
215#define IXGB_MPTCL 0x02118 /* Multicast Packets Transmitted Count (Low) */
216#define IXGB_MPTCH 0x0211C /* Multicast Packets Transmitted Count (High) */
217#define IXGB_UPTCL 0x02120 /* Unicast Packets Transmitted Count (Low) */
218#define IXGB_UPTCH 0x02124 /* Unicast Packets Transmitted Count (High) */
219#define IXGB_VPTCL 0x02128 /* VLAN Packets Transmitted Count (Low) */
220#define IXGB_VPTCH 0x0212C /* VLAN Packets Transmitted Count (High) */
221#define IXGB_JPTCL 0x02130 /* Jumbo Packets Transmitted Count (Low) */
222#define IXGB_JPTCH 0x02134 /* Jumbo Packets Transmitted Count (High) */
223#define IXGB_GOTCL 0x02138 /* Good Octets Transmitted Count (Low) */
224#define IXGB_GOTCH 0x0213C /* Good Octets Transmitted Count (High) */
225#define IXGB_TOTL 0x02140 /* Total Octets Transmitted Count (Low) */
226#define IXGB_TOTH 0x02144 /* Total Octets Transmitted Count (High) */
227#define IXGB_DC 0x02148 /* Defer Count */
228#define IXGB_PLT64C 0x02150 /* Packet Transmitted was less than 64 bytes Count */
229#define IXGB_TSCTC 0x02170 /* TCP Segmentation Context Transmitted Count */
230#define IXGB_TSCTFC 0x02178 /* TCP Segmentation Context Tx Fail Count */
231#define IXGB_IBIC 0x02180 /* Illegal byte during Idle stream count */
232#define IXGB_RFC 0x02188 /* Remote Fault Count */
233#define IXGB_LFC 0x02190 /* Local Fault Count */
234#define IXGB_PFRC 0x02198 /* Pause Frame Receive Count */
235#define IXGB_PFTC 0x021A0 /* Pause Frame Transmit Count */
236#define IXGB_MCFRC 0x021A8 /* MAC Control Frames (non-Pause) Received Count */
237#define IXGB_MCFTC 0x021B0 /* MAC Control Frames (non-Pause) Transmitted Count */
238#define IXGB_XONRXC 0x021B8 /* XON Received Count */
239#define IXGB_XONTXC 0x021C0 /* XON Transmitted Count */
240#define IXGB_XOFFRXC 0x021C8 /* XOFF Received Count */
241#define IXGB_XOFFTXC 0x021D0 /* XOFF Transmitted Count */
242#define IXGB_RJC 0x021D8 /* Receive Jabber Count */
243
244/* CTRL0 Bit Masks */
245#define IXGB_CTRL0_LRST 0x00000008
246#define IXGB_CTRL0_JFE 0x00000010
247#define IXGB_CTRL0_XLE 0x00000020
248#define IXGB_CTRL0_MDCS 0x00000040
249#define IXGB_CTRL0_CMDC 0x00000080
250#define IXGB_CTRL0_SDP0 0x00040000
251#define IXGB_CTRL0_SDP1 0x00080000
252#define IXGB_CTRL0_SDP2 0x00100000
253#define IXGB_CTRL0_SDP3 0x00200000
254#define IXGB_CTRL0_SDP0_DIR 0x00400000
255#define IXGB_CTRL0_SDP1_DIR 0x00800000
256#define IXGB_CTRL0_SDP2_DIR 0x01000000
257#define IXGB_CTRL0_SDP3_DIR 0x02000000
258#define IXGB_CTRL0_RST 0x04000000
259#define IXGB_CTRL0_RPE 0x08000000
260#define IXGB_CTRL0_TPE 0x10000000
261#define IXGB_CTRL0_VME 0x40000000
262
263/* CTRL1 Bit Masks */
264#define IXGB_CTRL1_GPI0_EN 0x00000001
265#define IXGB_CTRL1_GPI1_EN 0x00000002
266#define IXGB_CTRL1_GPI2_EN 0x00000004
267#define IXGB_CTRL1_GPI3_EN 0x00000008
268#define IXGB_CTRL1_SDP4 0x00000010
269#define IXGB_CTRL1_SDP5 0x00000020
270#define IXGB_CTRL1_SDP6 0x00000040
271#define IXGB_CTRL1_SDP7 0x00000080
272#define IXGB_CTRL1_SDP4_DIR 0x00000100
273#define IXGB_CTRL1_SDP5_DIR 0x00000200
274#define IXGB_CTRL1_SDP6_DIR 0x00000400
275#define IXGB_CTRL1_SDP7_DIR 0x00000800
276#define IXGB_CTRL1_EE_RST 0x00002000
277#define IXGB_CTRL1_RO_DIS 0x00020000
278#define IXGB_CTRL1_PCIXHM_MASK 0x00C00000
279#define IXGB_CTRL1_PCIXHM_1_2 0x00000000
280#define IXGB_CTRL1_PCIXHM_5_8 0x00400000
281#define IXGB_CTRL1_PCIXHM_3_4 0x00800000
282#define IXGB_CTRL1_PCIXHM_7_8 0x00C00000
283
284/* STATUS Bit Masks */
285#define IXGB_STATUS_LU 0x00000002
286#define IXGB_STATUS_AIP 0x00000004
287#define IXGB_STATUS_TXOFF 0x00000010
288#define IXGB_STATUS_XAUIME 0x00000020
289#define IXGB_STATUS_RES 0x00000040
290#define IXGB_STATUS_RIS 0x00000080
291#define IXGB_STATUS_RIE 0x00000100
292#define IXGB_STATUS_RLF 0x00000200
293#define IXGB_STATUS_RRF 0x00000400
294#define IXGB_STATUS_PCI_SPD 0x00000800
295#define IXGB_STATUS_BUS64 0x00001000
296#define IXGB_STATUS_PCIX_MODE 0x00002000
297#define IXGB_STATUS_PCIX_SPD_MASK 0x0000C000
298#define IXGB_STATUS_PCIX_SPD_66 0x00000000
299#define IXGB_STATUS_PCIX_SPD_100 0x00004000
300#define IXGB_STATUS_PCIX_SPD_133 0x00008000
301#define IXGB_STATUS_REV_ID_MASK 0x000F0000
302#define IXGB_STATUS_REV_ID_SHIFT 16
303
304/* EECD Bit Masks */
305#define IXGB_EECD_SK 0x00000001
306#define IXGB_EECD_CS 0x00000002
307#define IXGB_EECD_DI 0x00000004
308#define IXGB_EECD_DO 0x00000008
309#define IXGB_EECD_FWE_MASK 0x00000030
310#define IXGB_EECD_FWE_DIS 0x00000010
311#define IXGB_EECD_FWE_EN 0x00000020
312
313/* MFS */
314#define IXGB_MFS_SHIFT 16
315
316/* Interrupt Register Bit Masks (used for ICR, ICS, IMS, and IMC) */
317#define IXGB_INT_TXDW 0x00000001
318#define IXGB_INT_TXQE 0x00000002
319#define IXGB_INT_LSC 0x00000004
320#define IXGB_INT_RXSEQ 0x00000008
321#define IXGB_INT_RXDMT0 0x00000010
322#define IXGB_INT_RXO 0x00000040
323#define IXGB_INT_RXT0 0x00000080
324#define IXGB_INT_AUTOSCAN 0x00000200
325#define IXGB_INT_GPI0 0x00000800
326#define IXGB_INT_GPI1 0x00001000
327#define IXGB_INT_GPI2 0x00002000
328#define IXGB_INT_GPI3 0x00004000
329
330/* RCTL Bit Masks */
331#define IXGB_RCTL_RXEN 0x00000002
332#define IXGB_RCTL_SBP 0x00000004
333#define IXGB_RCTL_UPE 0x00000008
334#define IXGB_RCTL_MPE 0x00000010
335#define IXGB_RCTL_RDMTS_MASK 0x00000300
336#define IXGB_RCTL_RDMTS_1_2 0x00000000
337#define IXGB_RCTL_RDMTS_1_4 0x00000100
338#define IXGB_RCTL_RDMTS_1_8 0x00000200
339#define IXGB_RCTL_MO_MASK 0x00003000
340#define IXGB_RCTL_MO_47_36 0x00000000
341#define IXGB_RCTL_MO_46_35 0x00001000
342#define IXGB_RCTL_MO_45_34 0x00002000
343#define IXGB_RCTL_MO_43_32 0x00003000
344#define IXGB_RCTL_MO_SHIFT 12
345#define IXGB_RCTL_BAM 0x00008000
346#define IXGB_RCTL_BSIZE_MASK 0x00030000
347#define IXGB_RCTL_BSIZE_2048 0x00000000
348#define IXGB_RCTL_BSIZE_4096 0x00010000
349#define IXGB_RCTL_BSIZE_8192 0x00020000
350#define IXGB_RCTL_BSIZE_16384 0x00030000
351#define IXGB_RCTL_VFE 0x00040000
352#define IXGB_RCTL_CFIEN 0x00080000
353#define IXGB_RCTL_CFI 0x00100000
354#define IXGB_RCTL_RPDA_MASK 0x00600000
355#define IXGB_RCTL_RPDA_MC_MAC 0x00000000
356#define IXGB_RCTL_MC_ONLY 0x00400000
357#define IXGB_RCTL_CFF 0x00800000
358#define IXGB_RCTL_SECRC 0x04000000
359#define IXGB_RDT_FPDB 0x80000000
360
361#define IXGB_RCTL_IDLE_RX_UNIT 0
362
363/* FCRTL Bit Masks */
364#define IXGB_FCRTL_XONE 0x80000000
365
366/* RXDCTL Bit Masks */
367#define IXGB_RXDCTL_PTHRESH_MASK 0x000001FF
368#define IXGB_RXDCTL_PTHRESH_SHIFT 0
369#define IXGB_RXDCTL_HTHRESH_MASK 0x0003FE00
370#define IXGB_RXDCTL_HTHRESH_SHIFT 9
371#define IXGB_RXDCTL_WTHRESH_MASK 0x07FC0000
372#define IXGB_RXDCTL_WTHRESH_SHIFT 18
373
374/* RAIDC Bit Masks */
375#define IXGB_RAIDC_HIGHTHRS_MASK 0x0000003F
376#define IXGB_RAIDC_DELAY_MASK 0x000FF800
377#define IXGB_RAIDC_DELAY_SHIFT 11
378#define IXGB_RAIDC_POLL_MASK 0x1FF00000
379#define IXGB_RAIDC_POLL_SHIFT 20
380#define IXGB_RAIDC_RXT_GATE 0x40000000
381#define IXGB_RAIDC_EN 0x80000000
382
383#define IXGB_RAIDC_POLL_1000_INTERRUPTS_PER_SECOND 1220
384#define IXGB_RAIDC_POLL_5000_INTERRUPTS_PER_SECOND 244
385#define IXGB_RAIDC_POLL_10000_INTERRUPTS_PER_SECOND 122
386#define IXGB_RAIDC_POLL_20000_INTERRUPTS_PER_SECOND 61
387
388/* RXCSUM Bit Masks */
389#define IXGB_RXCSUM_IPOFL 0x00000100
390#define IXGB_RXCSUM_TUOFL 0x00000200
391
392/* RAH Bit Masks */
393#define IXGB_RAH_ASEL_MASK 0x00030000
394#define IXGB_RAH_ASEL_DEST 0x00000000
395#define IXGB_RAH_ASEL_SRC 0x00010000
396#define IXGB_RAH_AV 0x80000000
397
398/* TCTL Bit Masks */
399#define IXGB_TCTL_TCE 0x00000001
400#define IXGB_TCTL_TXEN 0x00000002
401#define IXGB_TCTL_TPDE 0x00000004
402
403#define IXGB_TCTL_IDLE_TX_UNIT 0
404
405/* TXDCTL Bit Masks */
406#define IXGB_TXDCTL_PTHRESH_MASK 0x0000007F
407#define IXGB_TXDCTL_HTHRESH_MASK 0x00007F00
408#define IXGB_TXDCTL_HTHRESH_SHIFT 8
409#define IXGB_TXDCTL_WTHRESH_MASK 0x007F0000
410#define IXGB_TXDCTL_WTHRESH_SHIFT 16
411
412/* TSPMT Bit Masks */
413#define IXGB_TSPMT_TSMT_MASK 0x0000FFFF
414#define IXGB_TSPMT_TSPBP_MASK 0xFFFF0000
415#define IXGB_TSPMT_TSPBP_SHIFT 16
416
417/* PAP Bit Masks */
418#define IXGB_PAP_TXPC_MASK 0x0000FFFF
419#define IXGB_PAP_TXPV_MASK 0x000F0000
420#define IXGB_PAP_TXPV_10G 0x00000000
421#define IXGB_PAP_TXPV_1G 0x00010000
422#define IXGB_PAP_TXPV_2G 0x00020000
423#define IXGB_PAP_TXPV_3G 0x00030000
424#define IXGB_PAP_TXPV_4G 0x00040000
425#define IXGB_PAP_TXPV_5G 0x00050000
426#define IXGB_PAP_TXPV_6G 0x00060000
427#define IXGB_PAP_TXPV_7G 0x00070000
428#define IXGB_PAP_TXPV_8G 0x00080000
429#define IXGB_PAP_TXPV_9G 0x00090000
430#define IXGB_PAP_TXPV_WAN 0x000F0000
431
432/* PCSC1 Bit Masks */
433#define IXGB_PCSC1_LOOPBACK 0x00004000
434
435/* PCSC2 Bit Masks */
436#define IXGB_PCSC2_PCS_TYPE_MASK 0x00000003
437#define IXGB_PCSC2_PCS_TYPE_10GBX 0x00000001
438
439/* PCSS1 Bit Masks */
440#define IXGB_PCSS1_LOCAL_FAULT 0x00000080
441#define IXGB_PCSS1_RX_LINK_STATUS 0x00000004
442
443/* PCSS2 Bit Masks */
444#define IXGB_PCSS2_DEV_PRES_MASK 0x0000C000
445#define IXGB_PCSS2_DEV_PRES 0x00004000
446#define IXGB_PCSS2_TX_LF 0x00000800
447#define IXGB_PCSS2_RX_LF 0x00000400
448#define IXGB_PCSS2_10GBW 0x00000004
449#define IXGB_PCSS2_10GBX 0x00000002
450#define IXGB_PCSS2_10GBR 0x00000001
451
452/* XPCSS Bit Masks */
453#define IXGB_XPCSS_ALIGN_STATUS 0x00001000
454#define IXGB_XPCSS_PATTERN_TEST 0x00000800
455#define IXGB_XPCSS_LANE_3_SYNC 0x00000008
456#define IXGB_XPCSS_LANE_2_SYNC 0x00000004
457#define IXGB_XPCSS_LANE_1_SYNC 0x00000002
458#define IXGB_XPCSS_LANE_0_SYNC 0x00000001
459
460/* XPCSTC Bit Masks */
461#define IXGB_XPCSTC_BERT_TRIG 0x00200000
462#define IXGB_XPCSTC_BERT_SST 0x00100000
463#define IXGB_XPCSTC_BERT_PSZ_MASK 0x000C0000
464#define IXGB_XPCSTC_BERT_PSZ_SHIFT 17
465#define IXGB_XPCSTC_BERT_PSZ_INF 0x00000003
466#define IXGB_XPCSTC_BERT_PSZ_68 0x00000001
467#define IXGB_XPCSTC_BERT_PSZ_1028 0x00000000
468
469/* MSCA bit Masks */
470/* New Protocol Address */
471#define IXGB_MSCA_NP_ADDR_MASK 0x0000FFFF
472#define IXGB_MSCA_NP_ADDR_SHIFT 0
473/* Either Device Type or Register Address,depending on ST_CODE */
474#define IXGB_MSCA_DEV_TYPE_MASK 0x001F0000
475#define IXGB_MSCA_DEV_TYPE_SHIFT 16
476#define IXGB_MSCA_PHY_ADDR_MASK 0x03E00000
477#define IXGB_MSCA_PHY_ADDR_SHIFT 21
478#define IXGB_MSCA_OP_CODE_MASK 0x0C000000
479/* OP_CODE == 00, Address cycle, New Protocol */
480/* OP_CODE == 01, Write operation */
481/* OP_CODE == 10, Read operation */
482/* OP_CODE == 11, Read, auto increment, New Protocol */
483#define IXGB_MSCA_ADDR_CYCLE 0x00000000
484#define IXGB_MSCA_WRITE 0x04000000
485#define IXGB_MSCA_READ 0x08000000
486#define IXGB_MSCA_READ_AUTOINC 0x0C000000
487#define IXGB_MSCA_OP_CODE_SHIFT 26
488#define IXGB_MSCA_ST_CODE_MASK 0x30000000
489/* ST_CODE == 00, New Protocol */
490/* ST_CODE == 01, Old Protocol */
491#define IXGB_MSCA_NEW_PROTOCOL 0x00000000
492#define IXGB_MSCA_OLD_PROTOCOL 0x10000000
493#define IXGB_MSCA_ST_CODE_SHIFT 28
494/* Initiate command, self-clearing when command completes */
495#define IXGB_MSCA_MDI_COMMAND 0x40000000
496/*MDI In Progress Enable. */
497#define IXGB_MSCA_MDI_IN_PROG_EN 0x80000000
498
499/* MSRWD bit masks */
500#define IXGB_MSRWD_WRITE_DATA_MASK 0x0000FFFF
501#define IXGB_MSRWD_WRITE_DATA_SHIFT 0
502#define IXGB_MSRWD_READ_DATA_MASK 0xFFFF0000
503#define IXGB_MSRWD_READ_DATA_SHIFT 16
504
505/* Definitions for the optics devices on the MDIO bus. */
506#define IXGB_PHY_ADDRESS 0x0 /* Single PHY, multiple "Devices" */
507
508/* Standard five-bit Device IDs. See IEEE 802.3ae, clause 45 */
509#define MDIO_PMA_PMD_DID 0x01
510#define MDIO_WIS_DID 0x02
511#define MDIO_PCS_DID 0x03
512#define MDIO_XGXS_DID 0x04
513
514/* Standard PMA/PMD registers and bit definitions. */
515/* Note: This is a very limited set of definitions, */
516/* only implemented features are defined. */
517#define MDIO_PMA_PMD_CR1 0x0000
518#define MDIO_PMA_PMD_CR1_RESET 0x8000
519
520#define MDIO_PMA_PMD_XPAK_VENDOR_NAME 0x803A /* XPAK/XENPAK devices only */
521
522/* Vendor-specific MDIO registers */
523#define G6XXX_PMA_PMD_VS1 0xC001 /* Vendor-specific register */
524#define G6XXX_XGXS_XAUI_VS2 0x18 /* Vendor-specific register */
525
526#define G6XXX_PMA_PMD_VS1_PLL_RESET 0x80
527#define G6XXX_PMA_PMD_VS1_REMOVE_PLL_RESET 0x00
528#define G6XXX_XGXS_XAUI_VS2_INPUT_MASK 0x0F /* XAUI lanes synchronized */
529
530/* Layout of a single receive descriptor. The controller assumes that this
531 * structure is packed into 16 bytes, which is a safe assumption with most
532 * compilers. However, some compilers may insert padding between the fields,
533 * in which case the structure must be packed in some compiler-specific
534 * manner. */
535struct ixgb_rx_desc {
536 uint64_t buff_addr;
537 uint16_t length;
538 uint16_t reserved;
539 uint8_t status;
540 uint8_t errors;
541 uint16_t special;
542};
543
544#define IXGB_RX_DESC_STATUS_DD 0x01
545#define IXGB_RX_DESC_STATUS_EOP 0x02
546#define IXGB_RX_DESC_STATUS_IXSM 0x04
547#define IXGB_RX_DESC_STATUS_VP 0x08
548#define IXGB_RX_DESC_STATUS_TCPCS 0x20
549#define IXGB_RX_DESC_STATUS_IPCS 0x40
550#define IXGB_RX_DESC_STATUS_PIF 0x80
551
552#define IXGB_RX_DESC_ERRORS_CE 0x01
553#define IXGB_RX_DESC_ERRORS_SE 0x02
554#define IXGB_RX_DESC_ERRORS_P 0x08
555#define IXGB_RX_DESC_ERRORS_TCPE 0x20
556#define IXGB_RX_DESC_ERRORS_IPE 0x40
557#define IXGB_RX_DESC_ERRORS_RXE 0x80
558
559#define IXGB_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
560#define IXGB_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
561#define IXGB_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority is in upper 3 of 16 */
562
563/* Layout of a single transmit descriptor. The controller assumes that this
564 * structure is packed into 16 bytes, which is a safe assumption with most
565 * compilers. However, some compilers may insert padding between the fields,
566 * in which case the structure must be packed in some compiler-specific
567 * manner. */
568struct ixgb_tx_desc {
569 uint64_t buff_addr;
570 uint32_t cmd_type_len;
571 uint8_t status;
572 uint8_t popts;
573 uint16_t vlan;
574};
575
576#define IXGB_TX_DESC_LENGTH_MASK 0x000FFFFF
577#define IXGB_TX_DESC_TYPE_MASK 0x00F00000
578#define IXGB_TX_DESC_TYPE_SHIFT 20
579#define IXGB_TX_DESC_CMD_MASK 0xFF000000
580#define IXGB_TX_DESC_CMD_SHIFT 24
581#define IXGB_TX_DESC_CMD_EOP 0x01000000
582#define IXGB_TX_DESC_CMD_TSE 0x04000000
583#define IXGB_TX_DESC_CMD_RS 0x08000000
584#define IXGB_TX_DESC_CMD_VLE 0x40000000
585#define IXGB_TX_DESC_CMD_IDE 0x80000000
586
587#define IXGB_TX_DESC_TYPE 0x00100000
588
589#define IXGB_TX_DESC_STATUS_DD 0x01
590
591#define IXGB_TX_DESC_POPTS_IXSM 0x01
592#define IXGB_TX_DESC_POPTS_TXSM 0x02
593#define IXGB_TX_DESC_SPECIAL_PRI_SHIFT IXGB_RX_DESC_SPECIAL_PRI_SHIFT /* Priority is in upper 3 of 16 */
594
595struct ixgb_context_desc {
596 uint8_t ipcss;
597 uint8_t ipcso;
598 uint16_t ipcse;
599 uint8_t tucss;
600 uint8_t tucso;
601 uint16_t tucse;
602 uint32_t cmd_type_len;
603 uint8_t status;
604 uint8_t hdr_len;
605 uint16_t mss;
606};
607
608#define IXGB_CONTEXT_DESC_CMD_TCP 0x01000000
609#define IXGB_CONTEXT_DESC_CMD_IP 0x02000000
610#define IXGB_CONTEXT_DESC_CMD_TSE 0x04000000
611#define IXGB_CONTEXT_DESC_CMD_RS 0x08000000
612#define IXGB_CONTEXT_DESC_CMD_IDE 0x80000000
613
614#define IXGB_CONTEXT_DESC_TYPE 0x00000000
615
616#define IXGB_CONTEXT_DESC_STATUS_DD 0x01
617
618/* Filters */
619#define IXGB_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
620#define IXGB_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
621#define IXGB_RAR_ENTRIES 3 /* Number of entries in Rx Address array */
622
623#define IXGB_MEMORY_REGISTER_BASE_ADDRESS 0
624#define ENET_HEADER_SIZE 14
625#define ENET_FCS_LENGTH 4
626#define IXGB_MAX_NUM_MULTICAST_ADDRESSES 128
627#define IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS 60
628#define IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS 1514
629#define IXGB_MAX_JUMBO_FRAME_SIZE 0x3F00
630
631/* Phy Addresses */
632#define IXGB_OPTICAL_PHY_ADDR 0x0 /* Optical Module phy address */
633#define IXGB_XAUII_PHY_ADDR 0x1 /* Xauii transceiver phy address */
634#define IXGB_DIAG_PHY_ADDR 0x1F /* Diagnostic Device phy address */
635
636/* This structure takes a 64k flash and maps it for identification commands */
637struct ixgb_flash_buffer {
638 uint8_t manufacturer_id;
639 uint8_t device_id;
640 uint8_t filler1[0x2AA8];
641 uint8_t cmd2;
642 uint8_t filler2[0x2AAA];
643 uint8_t cmd1;
644 uint8_t filler3[0xAAAA];
645};
646
647/*
648 * This is a little-endian specific check.
649 */
650#define IS_MULTICAST(Address) \
651 (boolean_t)(((uint8_t *)(Address))[0] & ((uint8_t)0x01))
652
653/*
654 * Check whether an address is broadcast.
655 */
656#define IS_BROADCAST(Address) \
657 ((((uint8_t *)(Address))[0] == ((uint8_t)0xff)) && (((uint8_t *)(Address))[1] == ((uint8_t)0xff)))
658
659/* Flow control parameters */
660struct ixgb_fc {
661 uint32_t high_water; /* Flow Control High-water */
662 uint32_t low_water; /* Flow Control Low-water */
663 uint16_t pause_time; /* Flow Control Pause timer */
664 boolean_t send_xon; /* Flow control send XON */
665 ixgb_fc_type type; /* Type of flow control */
666};
667
668/* The historical defaults for the flow control values are given below. */
669#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */
670#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */
671#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */
672
673/* Phy definitions */
674#define IXGB_MAX_PHY_REG_ADDRESS 0xFFFF
675#define IXGB_MAX_PHY_ADDRESS 31
676#define IXGB_MAX_PHY_DEV_TYPE 31
677
678/* Bus parameters */
679struct ixgb_bus {
680 ixgb_bus_speed speed;
681 ixgb_bus_width width;
682 ixgb_bus_type type;
683};
684
685struct ixgb_hw {
686 uint8_t __iomem *hw_addr;/* Base Address of the hardware */
687 void *back; /* Pointer to OS-dependent struct */
688 struct ixgb_fc fc; /* Flow control parameters */
689 struct ixgb_bus bus; /* Bus parameters */
690 uint32_t phy_id; /* Phy Identifier */
691 uint32_t phy_addr; /* XGMII address of Phy */
692 ixgb_mac_type mac_type; /* Identifier for MAC controller */
693 ixgb_phy_type phy_type; /* Transceiver/phy identifier */
694 uint32_t max_frame_size; /* Maximum frame size supported */
695 uint32_t mc_filter_type; /* Multicast filter hash type */
696 uint32_t num_mc_addrs; /* Number of current Multicast addrs */
697 uint8_t curr_mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; /* Individual address currently programmed in MAC */
698 uint32_t num_tx_desc; /* Number of Transmit descriptors */
699 uint32_t num_rx_desc; /* Number of Receive descriptors */
700 uint32_t rx_buffer_size; /* Size of Receive buffer */
701 boolean_t link_up; /* TRUE if link is valid */
702 boolean_t adapter_stopped; /* State of adapter */
703 uint16_t device_id; /* device id from PCI configuration space */
704 uint16_t vendor_id; /* vendor id from PCI configuration space */
705 uint8_t revision_id; /* revision id from PCI configuration space */
706 uint16_t subsystem_vendor_id; /* subsystem vendor id from PCI configuration space */
707 uint16_t subsystem_id; /* subsystem id from PCI configuration space */
708 uint32_t bar0; /* Base Address registers */
709 uint32_t bar1;
710 uint32_t bar2;
711 uint32_t bar3;
712 uint16_t pci_cmd_word; /* PCI command register id from PCI configuration space */
713 uint16_t eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */
714 unsigned long io_base; /* Our I/O mapped location */
715 uint32_t lastLFC;
716 uint32_t lastRFC;
717};
718
719/* Statistics reported by the hardware */
720struct ixgb_hw_stats {
721 uint64_t tprl;
722 uint64_t tprh;
723 uint64_t gprcl;
724 uint64_t gprch;
725 uint64_t bprcl;
726 uint64_t bprch;
727 uint64_t mprcl;
728 uint64_t mprch;
729 uint64_t uprcl;
730 uint64_t uprch;
731 uint64_t vprcl;
732 uint64_t vprch;
733 uint64_t jprcl;
734 uint64_t jprch;
735 uint64_t gorcl;
736 uint64_t gorch;
737 uint64_t torl;
738 uint64_t torh;
739 uint64_t rnbc;
740 uint64_t ruc;
741 uint64_t roc;
742 uint64_t rlec;
743 uint64_t crcerrs;
744 uint64_t icbc;
745 uint64_t ecbc;
746 uint64_t mpc;
747 uint64_t tptl;
748 uint64_t tpth;
749 uint64_t gptcl;
750 uint64_t gptch;
751 uint64_t bptcl;
752 uint64_t bptch;
753 uint64_t mptcl;
754 uint64_t mptch;
755 uint64_t uptcl;
756 uint64_t uptch;
757 uint64_t vptcl;
758 uint64_t vptch;
759 uint64_t jptcl;
760 uint64_t jptch;
761 uint64_t gotcl;
762 uint64_t gotch;
763 uint64_t totl;
764 uint64_t toth;
765 uint64_t dc;
766 uint64_t plt64c;
767 uint64_t tsctc;
768 uint64_t tsctfc;
769 uint64_t ibic;
770 uint64_t rfc;
771 uint64_t lfc;
772 uint64_t pfrc;
773 uint64_t pftc;
774 uint64_t mcfrc;
775 uint64_t mcftc;
776 uint64_t xonrxc;
777 uint64_t xontxc;
778 uint64_t xoffrxc;
779 uint64_t xofftxc;
780 uint64_t rjc;
781};
782
783/* Function Prototypes */
784extern boolean_t ixgb_adapter_stop(struct ixgb_hw *hw);
785extern boolean_t ixgb_init_hw(struct ixgb_hw *hw);
786extern boolean_t ixgb_adapter_start(struct ixgb_hw *hw);
787extern void ixgb_init_rx_addrs(struct ixgb_hw *hw);
788extern void ixgb_check_for_link(struct ixgb_hw *hw);
789extern boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw);
790extern boolean_t ixgb_setup_fc(struct ixgb_hw *hw);
791extern void ixgb_clear_hw_cntrs(struct ixgb_hw *hw);
792extern boolean_t mac_addr_valid(uint8_t *mac_addr);
793
794extern uint16_t ixgb_read_phy_reg(struct ixgb_hw *hw,
795 uint32_t reg_addr,
796 uint32_t phy_addr,
797 uint32_t device_type);
798
799extern void ixgb_write_phy_reg(struct ixgb_hw *hw,
800 uint32_t reg_addr,
801 uint32_t phy_addr,
802 uint32_t device_type,
803 uint16_t data);
804
805extern void ixgb_rar_set(struct ixgb_hw *hw,
806 uint8_t *addr,
807 uint32_t index);
808
809
810/* Filters (multicast, vlan, receive) */
811extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw,
812 uint8_t *mc_addr_list,
813 uint32_t mc_addr_count,
814 uint32_t pad);
815
816/* Vfta functions */
817extern void ixgb_write_vfta(struct ixgb_hw *hw,
818 uint32_t offset,
819 uint32_t value);
820
821extern void ixgb_clear_vfta(struct ixgb_hw *hw);
822
823/* Access functions to eeprom data */
824void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr);
825uint16_t ixgb_get_ee_compatibility(struct ixgb_hw *hw);
826uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw);
827uint16_t ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw);
828uint16_t ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw *hw);
829uint16_t ixgb_get_ee_subsystem_id(struct ixgb_hw *hw);
830uint16_t ixgb_get_ee_subvendor_id(struct ixgb_hw *hw);
831uint16_t ixgb_get_ee_device_id(struct ixgb_hw *hw);
832uint16_t ixgb_get_ee_vendor_id(struct ixgb_hw *hw);
833uint16_t ixgb_get_ee_swdpins_reg(struct ixgb_hw *hw);
834uint8_t ixgb_get_ee_d3_power(struct ixgb_hw *hw);
835uint8_t ixgb_get_ee_d0_power(struct ixgb_hw *hw);
836boolean_t ixgb_get_eeprom_data(struct ixgb_hw *hw);
837uint16_t ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index);
838
839/* Everything else */
840void ixgb_led_on(struct ixgb_hw *hw);
841void ixgb_led_off(struct ixgb_hw *hw);
842void ixgb_write_pci_cfg(struct ixgb_hw *hw,
843 uint32_t reg,
844 uint16_t * value);
845
846
847#endif /* _IXGB_HW_H_ */
diff --git a/drivers/net/ixgb/ixgb_ids.h b/drivers/net/ixgb/ixgb_ids.h
new file mode 100644
index 000000000000..aee207eaa287
--- /dev/null
+++ b/drivers/net/ixgb/ixgb_ids.h
@@ -0,0 +1,48 @@
1/*******************************************************************************
2
3
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
9 any later version.
10
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19
20 The full GNU General Public License is included in this distribution in the
21 file called LICENSE.
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#ifndef _IXGB_IDS_H_
30#define _IXGB_IDS_H_
31
32/**********************************************************************
33** The Device and Vendor IDs for 10 Gigabit MACs
34**********************************************************************/
35
36#define INTEL_VENDOR_ID 0x8086
37#define INTEL_SUBVENDOR_ID 0x8086
38
39
40#define IXGB_DEVICE_ID_82597EX 0x1048
41#define IXGB_DEVICE_ID_82597EX_SR 0x1A48
42#define IXGB_DEVICE_ID_82597EX_LR 0x1B48
43#define IXGB_SUBDEVICE_ID_A11F 0xA11F
44#define IXGB_SUBDEVICE_ID_A01F 0xA01F
45
46#endif /* #ifndef _IXGB_IDS_H_ */
47
48/* End of File */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
new file mode 100644
index 000000000000..7d26623d8592
--- /dev/null
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -0,0 +1,2166 @@
1/*******************************************************************************
2
3
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
9 any later version.
10
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19
20 The full GNU General Public License is included in this distribution in the
21 file called LICENSE.
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "ixgb.h"
30
31/* Change Log
32 * 1.0.88 01/05/05
33 * - include fix to the condition that determines when to quit NAPI - Robert Olsson
34 * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
35 * 1.0.84 10/26/04
36 * - reset buffer_info->dma in Tx resource cleanup logic
37 * 1.0.83 10/12/04
38 * - sparse cleanup - shemminger@osdl.org
39 * - fix tx resource cleanup logic
40 */
41
42char ixgb_driver_name[] = "ixgb";
43char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
44
45#ifndef CONFIG_IXGB_NAPI
46#define DRIVERNAPI
47#else
48#define DRIVERNAPI "-NAPI"
49#endif
50char ixgb_driver_version[] = "1.0.90-k2"DRIVERNAPI;
51char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
52
53/* ixgb_pci_tbl - PCI Device ID Table
54 *
55 * Wildcard entries (PCI_ANY_ID) should come last
56 * Last entry must be all 0s
57 *
58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
59 * Class, Class Mask, private data (not used) }
60 */
61static struct pci_device_id ixgb_pci_tbl[] = {
62 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
63 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
64 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
65 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
66 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
67 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
68
69 /* required last entry */
70 {0,}
71};
72
73MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
74
75/* Local Function Prototypes */
76
77int ixgb_up(struct ixgb_adapter *adapter);
78void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
79void ixgb_reset(struct ixgb_adapter *adapter);
80int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
81int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
82void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
83void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
84void ixgb_update_stats(struct ixgb_adapter *adapter);
85
86static int ixgb_init_module(void);
87static void ixgb_exit_module(void);
88static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
89static void __devexit ixgb_remove(struct pci_dev *pdev);
90static int ixgb_sw_init(struct ixgb_adapter *adapter);
91static int ixgb_open(struct net_device *netdev);
92static int ixgb_close(struct net_device *netdev);
93static void ixgb_configure_tx(struct ixgb_adapter *adapter);
94static void ixgb_configure_rx(struct ixgb_adapter *adapter);
95static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
96static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
97static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
98static void ixgb_set_multi(struct net_device *netdev);
99static void ixgb_watchdog(unsigned long data);
100static int ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
101static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
102static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
103static int ixgb_set_mac(struct net_device *netdev, void *p);
104static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs);
105static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
106#ifdef CONFIG_IXGB_NAPI
107static int ixgb_clean(struct net_device *netdev, int *budget);
108static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
109 int *work_done, int work_to_do);
110#else
111static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
112#endif
113static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
114void ixgb_set_ethtool_ops(struct net_device *netdev);
115static void ixgb_tx_timeout(struct net_device *dev);
116static void ixgb_tx_timeout_task(struct net_device *dev);
117static void ixgb_vlan_rx_register(struct net_device *netdev,
118 struct vlan_group *grp);
119static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
120static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
121static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
122
123static int ixgb_notify_reboot(struct notifier_block *, unsigned long event,
124 void *ptr);
125static int ixgb_suspend(struct pci_dev *pdev, uint32_t state);
126
127#ifdef CONFIG_NET_POLL_CONTROLLER
128/* for netdump / net console */
129static void ixgb_netpoll(struct net_device *dev);
130#endif
131
132struct notifier_block ixgb_notifier_reboot = {
133 .notifier_call = ixgb_notify_reboot,
134 .next = NULL,
135 .priority = 0
136};
137
138/* Exported from other modules */
139
140extern void ixgb_check_options(struct ixgb_adapter *adapter);
141
142static struct pci_driver ixgb_driver = {
143 .name = ixgb_driver_name,
144 .id_table = ixgb_pci_tbl,
145 .probe = ixgb_probe,
146 .remove = __devexit_p(ixgb_remove),
147 /* Power Managment Hooks */
148 .suspend = NULL,
149 .resume = NULL
150};
151
152MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
153MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
154MODULE_LICENSE("GPL");
155
156/* some defines for controlling descriptor fetches in h/w */
157#define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */
158#define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is
159 pushed this many descriptors from head */
160#define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */
161
162/**
163 * ixgb_init_module - Driver Registration Routine
164 *
165 * ixgb_init_module is the first routine called when the driver is
166 * loaded. All it does is register with the PCI subsystem.
167 **/
168
169static int __init
170ixgb_init_module(void)
171{
172 int ret;
173 printk(KERN_INFO "%s - version %s\n",
174 ixgb_driver_string, ixgb_driver_version);
175
176 printk(KERN_INFO "%s\n", ixgb_copyright);
177
178 ret = pci_module_init(&ixgb_driver);
179 if(ret >= 0) {
180 register_reboot_notifier(&ixgb_notifier_reboot);
181 }
182 return ret;
183}
184
185module_init(ixgb_init_module);
186
187/**
188 * ixgb_exit_module - Driver Exit Cleanup Routine
189 *
190 * ixgb_exit_module is called just before the driver is removed
191 * from memory.
192 **/
193
194static void __exit
195ixgb_exit_module(void)
196{
197 unregister_reboot_notifier(&ixgb_notifier_reboot);
198 pci_unregister_driver(&ixgb_driver);
199}
200
201module_exit(ixgb_exit_module);
202
203/**
204 * ixgb_irq_disable - Mask off interrupt generation on the NIC
205 * @adapter: board private structure
206 **/
207
208static inline void
209ixgb_irq_disable(struct ixgb_adapter *adapter)
210{
211 atomic_inc(&adapter->irq_sem);
212 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
213 IXGB_WRITE_FLUSH(&adapter->hw);
214 synchronize_irq(adapter->pdev->irq);
215}
216
217/**
218 * ixgb_irq_enable - Enable default interrupt generation settings
219 * @adapter: board private structure
220 **/
221
222static inline void
223ixgb_irq_enable(struct ixgb_adapter *adapter)
224{
225 if(atomic_dec_and_test(&adapter->irq_sem)) {
226 IXGB_WRITE_REG(&adapter->hw, IMS,
227 IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
228 IXGB_INT_RXO | IXGB_INT_LSC);
229 IXGB_WRITE_FLUSH(&adapter->hw);
230 }
231}
232
233int
234ixgb_up(struct ixgb_adapter *adapter)
235{
236 struct net_device *netdev = adapter->netdev;
237 int err;
238 int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
239 struct ixgb_hw *hw = &adapter->hw;
240
241 /* hardware has been reset, we need to reload some things */
242
243 ixgb_set_multi(netdev);
244
245 ixgb_restore_vlan(adapter);
246
247 ixgb_configure_tx(adapter);
248 ixgb_setup_rctl(adapter);
249 ixgb_configure_rx(adapter);
250 ixgb_alloc_rx_buffers(adapter);
251
252#ifdef CONFIG_PCI_MSI
253 {
254 boolean_t pcix = (IXGB_READ_REG(&adapter->hw, STATUS) &
255 IXGB_STATUS_PCIX_MODE) ? TRUE : FALSE;
256 adapter->have_msi = TRUE;
257
258 if (!pcix)
259 adapter->have_msi = FALSE;
260 else if((err = pci_enable_msi(adapter->pdev))) {
261 printk (KERN_ERR
262 "Unable to allocate MSI interrupt Error: %d\n", err);
263 adapter->have_msi = FALSE;
264 /* proceed to try to request regular interrupt */
265 }
266 }
267
268#endif
269 if((err = request_irq(adapter->pdev->irq, &ixgb_intr,
270 SA_SHIRQ | SA_SAMPLE_RANDOM,
271 netdev->name, netdev)))
272 return err;
273
274 /* disable interrupts and get the hardware into a known state */
275 IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
276
277 if((hw->max_frame_size != max_frame) ||
278 (hw->max_frame_size !=
279 (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
280
281 hw->max_frame_size = max_frame;
282
283 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
284
285 if(hw->max_frame_size >
286 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
287 uint32_t ctrl0 = IXGB_READ_REG(hw, CTRL0);
288
289 if(!(ctrl0 & IXGB_CTRL0_JFE)) {
290 ctrl0 |= IXGB_CTRL0_JFE;
291 IXGB_WRITE_REG(hw, CTRL0, ctrl0);
292 }
293 }
294 }
295
296 mod_timer(&adapter->watchdog_timer, jiffies);
297 ixgb_irq_enable(adapter);
298
299#ifdef CONFIG_IXGB_NAPI
300 netif_poll_enable(netdev);
301#endif
302 return 0;
303}
304
305void
306ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog)
307{
308 struct net_device *netdev = adapter->netdev;
309
310 ixgb_irq_disable(adapter);
311 free_irq(adapter->pdev->irq, netdev);
312#ifdef CONFIG_PCI_MSI
313 if(adapter->have_msi == TRUE)
314 pci_disable_msi(adapter->pdev);
315
316#endif
317 if(kill_watchdog)
318 del_timer_sync(&adapter->watchdog_timer);
319#ifdef CONFIG_IXGB_NAPI
320 netif_poll_disable(netdev);
321#endif
322 adapter->link_speed = 0;
323 adapter->link_duplex = 0;
324 netif_carrier_off(netdev);
325 netif_stop_queue(netdev);
326
327 ixgb_reset(adapter);
328 ixgb_clean_tx_ring(adapter);
329 ixgb_clean_rx_ring(adapter);
330}
331
332void
333ixgb_reset(struct ixgb_adapter *adapter)
334{
335
336 ixgb_adapter_stop(&adapter->hw);
337 if(!ixgb_init_hw(&adapter->hw))
338 IXGB_DBG("ixgb_init_hw failed.\n");
339}
340
341/**
342 * ixgb_probe - Device Initialization Routine
343 * @pdev: PCI device information struct
344 * @ent: entry in ixgb_pci_tbl
345 *
346 * Returns 0 on success, negative on failure
347 *
348 * ixgb_probe initializes an adapter identified by a pci_dev structure.
349 * The OS initialization, configuring of the adapter private structure,
350 * and a hardware reset occur.
351 **/
352
353static int __devinit
354ixgb_probe(struct pci_dev *pdev,
355 const struct pci_device_id *ent)
356{
357 struct net_device *netdev = NULL;
358 struct ixgb_adapter *adapter;
359 static int cards_found = 0;
360 unsigned long mmio_start;
361 int mmio_len;
362 int pci_using_dac;
363 int i;
364 int err;
365
366 if((err = pci_enable_device(pdev)))
367 return err;
368
369 if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
370 pci_using_dac = 1;
371 } else {
372 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
373 IXGB_ERR("No usable DMA configuration, aborting\n");
374 return err;
375 }
376 pci_using_dac = 0;
377 }
378
379 if((err = pci_request_regions(pdev, ixgb_driver_name)))
380 return err;
381
382 pci_set_master(pdev);
383
384 netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
385 if(!netdev) {
386 err = -ENOMEM;
387 goto err_alloc_etherdev;
388 }
389
390 SET_MODULE_OWNER(netdev);
391 SET_NETDEV_DEV(netdev, &pdev->dev);
392
393 pci_set_drvdata(pdev, netdev);
394 adapter = netdev->priv;
395 adapter->netdev = netdev;
396 adapter->pdev = pdev;
397 adapter->hw.back = adapter;
398
399 mmio_start = pci_resource_start(pdev, BAR_0);
400 mmio_len = pci_resource_len(pdev, BAR_0);
401
402 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
403 if(!adapter->hw.hw_addr) {
404 err = -EIO;
405 goto err_ioremap;
406 }
407
408 for(i = BAR_1; i <= BAR_5; i++) {
409 if(pci_resource_len(pdev, i) == 0)
410 continue;
411 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
412 adapter->hw.io_base = pci_resource_start(pdev, i);
413 break;
414 }
415 }
416
417 netdev->open = &ixgb_open;
418 netdev->stop = &ixgb_close;
419 netdev->hard_start_xmit = &ixgb_xmit_frame;
420 netdev->get_stats = &ixgb_get_stats;
421 netdev->set_multicast_list = &ixgb_set_multi;
422 netdev->set_mac_address = &ixgb_set_mac;
423 netdev->change_mtu = &ixgb_change_mtu;
424 ixgb_set_ethtool_ops(netdev);
425 netdev->tx_timeout = &ixgb_tx_timeout;
426 netdev->watchdog_timeo = HZ;
427#ifdef CONFIG_IXGB_NAPI
428 netdev->poll = &ixgb_clean;
429 netdev->weight = 64;
430#endif
431 netdev->vlan_rx_register = ixgb_vlan_rx_register;
432 netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid;
433 netdev->vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid;
434#ifdef CONFIG_NET_POLL_CONTROLLER
435 netdev->poll_controller = ixgb_netpoll;
436#endif
437
438 netdev->mem_start = mmio_start;
439 netdev->mem_end = mmio_start + mmio_len;
440 netdev->base_addr = adapter->hw.io_base;
441
442 adapter->bd_number = cards_found;
443 adapter->link_speed = 0;
444 adapter->link_duplex = 0;
445
446 /* setup the private structure */
447
448 if((err = ixgb_sw_init(adapter)))
449 goto err_sw_init;
450
451 netdev->features = NETIF_F_SG |
452 NETIF_F_HW_CSUM |
453 NETIF_F_HW_VLAN_TX |
454 NETIF_F_HW_VLAN_RX |
455 NETIF_F_HW_VLAN_FILTER;
456#ifdef NETIF_F_TSO
457 netdev->features |= NETIF_F_TSO;
458#endif
459
460 if(pci_using_dac)
461 netdev->features |= NETIF_F_HIGHDMA;
462
463 /* make sure the EEPROM is good */
464
465 if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
466 printk(KERN_ERR "The EEPROM Checksum Is Not Valid\n");
467 err = -EIO;
468 goto err_eeprom;
469 }
470
471 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
472
473 if(!is_valid_ether_addr(netdev->dev_addr)) {
474 err = -EIO;
475 goto err_eeprom;
476 }
477
478 adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
479
480 init_timer(&adapter->watchdog_timer);
481 adapter->watchdog_timer.function = &ixgb_watchdog;
482 adapter->watchdog_timer.data = (unsigned long)adapter;
483
484 INIT_WORK(&adapter->tx_timeout_task,
485 (void (*)(void *))ixgb_tx_timeout_task, netdev);
486
487 if((err = register_netdev(netdev)))
488 goto err_register;
489
490 /* we're going to reset, so assume we have no link for now */
491
492 netif_carrier_off(netdev);
493 netif_stop_queue(netdev);
494
495 printk(KERN_INFO "%s: Intel(R) PRO/10GbE Network Connection\n",
496 netdev->name);
497 ixgb_check_options(adapter);
498 /* reset the hardware with the new settings */
499
500 ixgb_reset(adapter);
501
502 cards_found++;
503 return 0;
504
505err_register:
506err_sw_init:
507err_eeprom:
508 iounmap(adapter->hw.hw_addr);
509err_ioremap:
510 free_netdev(netdev);
511err_alloc_etherdev:
512 pci_release_regions(pdev);
513 return err;
514}
515
516/**
517 * ixgb_remove - Device Removal Routine
518 * @pdev: PCI device information struct
519 *
520 * ixgb_remove is called by the PCI subsystem to alert the driver
521 * that it should release a PCI device. The could be caused by a
522 * Hot-Plug event, or because the driver is going to be removed from
523 * memory.
524 **/
525
526static void __devexit
527ixgb_remove(struct pci_dev *pdev)
528{
529 struct net_device *netdev = pci_get_drvdata(pdev);
530 struct ixgb_adapter *adapter = netdev->priv;
531
532 unregister_netdev(netdev);
533
534 iounmap(adapter->hw.hw_addr);
535 pci_release_regions(pdev);
536
537 free_netdev(netdev);
538}
539
540/**
541 * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
542 * @adapter: board private structure to initialize
543 *
544 * ixgb_sw_init initializes the Adapter private data structure.
545 * Fields are initialized based on PCI device information and
546 * OS network device settings (MTU size).
547 **/
548
549static int __devinit
550ixgb_sw_init(struct ixgb_adapter *adapter)
551{
552 struct ixgb_hw *hw = &adapter->hw;
553 struct net_device *netdev = adapter->netdev;
554 struct pci_dev *pdev = adapter->pdev;
555
556 /* PCI config space info */
557
558 hw->vendor_id = pdev->vendor;
559 hw->device_id = pdev->device;
560 hw->subsystem_vendor_id = pdev->subsystem_vendor;
561 hw->subsystem_id = pdev->subsystem_device;
562
563 adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
564
565 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
566
567 if((hw->device_id == IXGB_DEVICE_ID_82597EX)
568 ||(hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
569 ||(hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
570 hw->mac_type = ixgb_82597;
571 else {
572 /* should never have loaded on this device */
573 printk(KERN_ERR "ixgb: unsupported device id\n");
574 }
575
576 /* enable flow control to be programmed */
577 hw->fc.send_xon = 1;
578
579 atomic_set(&adapter->irq_sem, 1);
580 spin_lock_init(&adapter->tx_lock);
581
582 return 0;
583}
584
585/**
586 * ixgb_open - Called when a network interface is made active
587 * @netdev: network interface device structure
588 *
589 * Returns 0 on success, negative value on failure
590 *
591 * The open entry point is called when a network interface is made
592 * active by the system (IFF_UP). At this point all resources needed
593 * for transmit and receive operations are allocated, the interrupt
594 * handler is registered with the OS, the watchdog timer is started,
595 * and the stack is notified that the interface is ready.
596 **/
597
598static int
599ixgb_open(struct net_device *netdev)
600{
601 struct ixgb_adapter *adapter = netdev->priv;
602 int err;
603
604 /* allocate transmit descriptors */
605
606 if((err = ixgb_setup_tx_resources(adapter)))
607 goto err_setup_tx;
608
609 /* allocate receive descriptors */
610
611 if((err = ixgb_setup_rx_resources(adapter)))
612 goto err_setup_rx;
613
614 if((err = ixgb_up(adapter)))
615 goto err_up;
616
617 return 0;
618
619err_up:
620 ixgb_free_rx_resources(adapter);
621err_setup_rx:
622 ixgb_free_tx_resources(adapter);
623err_setup_tx:
624 ixgb_reset(adapter);
625
626 return err;
627}
628
629/**
630 * ixgb_close - Disables a network interface
631 * @netdev: network interface device structure
632 *
633 * Returns 0, this is not allowed to fail
634 *
635 * The close entry point is called when an interface is de-activated
636 * by the OS. The hardware is still under the drivers control, but
637 * needs to be disabled. A global MAC reset is issued to stop the
638 * hardware, and all transmit and receive resources are freed.
639 **/
640
641static int
642ixgb_close(struct net_device *netdev)
643{
644 struct ixgb_adapter *adapter = netdev->priv;
645
646 ixgb_down(adapter, TRUE);
647
648 ixgb_free_tx_resources(adapter);
649 ixgb_free_rx_resources(adapter);
650
651 return 0;
652}
653
654/**
655 * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
656 * @adapter: board private structure
657 *
658 * Return 0 on success, negative on failure
659 **/
660
661int
662ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
663{
664 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
665 struct pci_dev *pdev = adapter->pdev;
666 int size;
667
668 size = sizeof(struct ixgb_buffer) * txdr->count;
669 txdr->buffer_info = vmalloc(size);
670 if(!txdr->buffer_info) {
671 return -ENOMEM;
672 }
673 memset(txdr->buffer_info, 0, size);
674
675 /* round up to nearest 4K */
676
677 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
678 IXGB_ROUNDUP(txdr->size, 4096);
679
680 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
681 if(!txdr->desc) {
682 vfree(txdr->buffer_info);
683 return -ENOMEM;
684 }
685 memset(txdr->desc, 0, txdr->size);
686
687 txdr->next_to_use = 0;
688 txdr->next_to_clean = 0;
689
690 return 0;
691}
692
693/**
694 * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
695 * @adapter: board private structure
696 *
697 * Configure the Tx unit of the MAC after a reset.
698 **/
699
700static void
701ixgb_configure_tx(struct ixgb_adapter *adapter)
702{
703 uint64_t tdba = adapter->tx_ring.dma;
704 uint32_t tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
705 uint32_t tctl;
706 struct ixgb_hw *hw = &adapter->hw;
707
708 /* Setup the Base and Length of the Tx Descriptor Ring
709 * tx_ring.dma can be either a 32 or 64 bit value
710 */
711
712 IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
713 IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
714
715 IXGB_WRITE_REG(hw, TDLEN, tdlen);
716
717 /* Setup the HW Tx Head and Tail descriptor pointers */
718
719 IXGB_WRITE_REG(hw, TDH, 0);
720 IXGB_WRITE_REG(hw, TDT, 0);
721
722 /* don't set up txdctl, it induces performance problems if configured
723 * incorrectly */
724 /* Set the Tx Interrupt Delay register */
725
726 IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
727
728 /* Program the Transmit Control Register */
729
730 tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
731 IXGB_WRITE_REG(hw, TCTL, tctl);
732
733 /* Setup Transmit Descriptor Settings for this adapter */
734 adapter->tx_cmd_type =
735 IXGB_TX_DESC_TYPE
736 | (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
737}
738
739/**
740 * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
741 * @adapter: board private structure
742 *
743 * Returns 0 on success, negative on failure
744 **/
745
746int
747ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
748{
749 struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
750 struct pci_dev *pdev = adapter->pdev;
751 int size;
752
753 size = sizeof(struct ixgb_buffer) * rxdr->count;
754 rxdr->buffer_info = vmalloc(size);
755 if(!rxdr->buffer_info) {
756 return -ENOMEM;
757 }
758 memset(rxdr->buffer_info, 0, size);
759
760 /* Round up to nearest 4K */
761
762 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
763 IXGB_ROUNDUP(rxdr->size, 4096);
764
765 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
766
767 if(!rxdr->desc) {
768 vfree(rxdr->buffer_info);
769 return -ENOMEM;
770 }
771 memset(rxdr->desc, 0, rxdr->size);
772
773 rxdr->next_to_clean = 0;
774 rxdr->next_to_use = 0;
775
776 return 0;
777}
778
779/**
780 * ixgb_setup_rctl - configure the receive control register
781 * @adapter: Board private structure
782 **/
783
784static void
785ixgb_setup_rctl(struct ixgb_adapter *adapter)
786{
787 uint32_t rctl;
788
789 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
790
791 rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
792
793 rctl |=
794 IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
795 IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
796 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
797
798 rctl |= IXGB_RCTL_SECRC;
799
800 switch (adapter->rx_buffer_len) {
801 case IXGB_RXBUFFER_2048:
802 default:
803 rctl |= IXGB_RCTL_BSIZE_2048;
804 break;
805 case IXGB_RXBUFFER_4096:
806 rctl |= IXGB_RCTL_BSIZE_4096;
807 break;
808 case IXGB_RXBUFFER_8192:
809 rctl |= IXGB_RCTL_BSIZE_8192;
810 break;
811 case IXGB_RXBUFFER_16384:
812 rctl |= IXGB_RCTL_BSIZE_16384;
813 break;
814 }
815
816 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
817}
818
819/**
820 * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
821 * @adapter: board private structure
822 *
823 * Configure the Rx unit of the MAC after a reset.
824 **/
825
826static void
827ixgb_configure_rx(struct ixgb_adapter *adapter)
828{
829 uint64_t rdba = adapter->rx_ring.dma;
830 uint32_t rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
831 struct ixgb_hw *hw = &adapter->hw;
832 uint32_t rctl;
833 uint32_t rxcsum;
834 uint32_t rxdctl;
835
836 /* make sure receives are disabled while setting up the descriptors */
837
838 rctl = IXGB_READ_REG(hw, RCTL);
839 IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
840
841 /* set the Receive Delay Timer Register */
842
843 IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
844
845 /* Setup the Base and Length of the Rx Descriptor Ring */
846
847 IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
848 IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
849
850 IXGB_WRITE_REG(hw, RDLEN, rdlen);
851
852 /* Setup the HW Rx Head and Tail Descriptor Pointers */
853 IXGB_WRITE_REG(hw, RDH, 0);
854 IXGB_WRITE_REG(hw, RDT, 0);
855
856 /* set up pre-fetching of receive buffers so we get some before we
857 * run out (default hardware behavior is to run out before fetching
858 * more). This sets up to fetch if HTHRESH rx descriptors are avail
859 * and the descriptors in hw cache are below PTHRESH. This avoids
860 * the hardware behavior of fetching <=512 descriptors in a single
861 * burst that pre-empts all other activity, usually causing fifo
862 * overflows. */
863 /* use WTHRESH to burst write 16 descriptors or burst when RXT0 */
864 rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT |
865 RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT |
866 RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
867 IXGB_WRITE_REG(hw, RXDCTL, rxdctl);
868
869 /* Enable Receive Checksum Offload for TCP and UDP */
870 if(adapter->rx_csum == TRUE) {
871 rxcsum = IXGB_READ_REG(hw, RXCSUM);
872 rxcsum |= IXGB_RXCSUM_TUOFL;
873 IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
874 }
875
876 /* Enable Receives */
877
878 IXGB_WRITE_REG(hw, RCTL, rctl);
879}
880
881/**
882 * ixgb_free_tx_resources - Free Tx Resources
883 * @adapter: board private structure
884 *
885 * Free all transmit software resources
886 **/
887
888void
889ixgb_free_tx_resources(struct ixgb_adapter *adapter)
890{
891 struct pci_dev *pdev = adapter->pdev;
892
893 ixgb_clean_tx_ring(adapter);
894
895 vfree(adapter->tx_ring.buffer_info);
896 adapter->tx_ring.buffer_info = NULL;
897
898 pci_free_consistent(pdev, adapter->tx_ring.size,
899 adapter->tx_ring.desc, adapter->tx_ring.dma);
900
901 adapter->tx_ring.desc = NULL;
902}
903
904static inline void
905ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
906 struct ixgb_buffer *buffer_info)
907{
908 struct pci_dev *pdev = adapter->pdev;
909 if(buffer_info->dma) {
910 pci_unmap_page(pdev,
911 buffer_info->dma,
912 buffer_info->length,
913 PCI_DMA_TODEVICE);
914 buffer_info->dma = 0;
915 }
916 if(buffer_info->skb) {
917 dev_kfree_skb_any(buffer_info->skb);
918 buffer_info->skb = NULL;
919 }
920}
921
922/**
923 * ixgb_clean_tx_ring - Free Tx Buffers
924 * @adapter: board private structure
925 **/
926
927static void
928ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
929{
930 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
931 struct ixgb_buffer *buffer_info;
932 unsigned long size;
933 unsigned int i;
934
935 /* Free all the Tx ring sk_buffs */
936
937 for(i = 0; i < tx_ring->count; i++) {
938 buffer_info = &tx_ring->buffer_info[i];
939 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
940 }
941
942 size = sizeof(struct ixgb_buffer) * tx_ring->count;
943 memset(tx_ring->buffer_info, 0, size);
944
945 /* Zero out the descriptor ring */
946
947 memset(tx_ring->desc, 0, tx_ring->size);
948
949 tx_ring->next_to_use = 0;
950 tx_ring->next_to_clean = 0;
951
952 IXGB_WRITE_REG(&adapter->hw, TDH, 0);
953 IXGB_WRITE_REG(&adapter->hw, TDT, 0);
954}
955
956/**
957 * ixgb_free_rx_resources - Free Rx Resources
958 * @adapter: board private structure
959 *
960 * Free all receive software resources
961 **/
962
963void
964ixgb_free_rx_resources(struct ixgb_adapter *adapter)
965{
966 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
967 struct pci_dev *pdev = adapter->pdev;
968
969 ixgb_clean_rx_ring(adapter);
970
971 vfree(rx_ring->buffer_info);
972 rx_ring->buffer_info = NULL;
973
974 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
975
976 rx_ring->desc = NULL;
977}
978
979/**
980 * ixgb_clean_rx_ring - Free Rx Buffers
981 * @adapter: board private structure
982 **/
983
984static void
985ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
986{
987 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
988 struct ixgb_buffer *buffer_info;
989 struct pci_dev *pdev = adapter->pdev;
990 unsigned long size;
991 unsigned int i;
992
993 /* Free all the Rx ring sk_buffs */
994
995 for(i = 0; i < rx_ring->count; i++) {
996 buffer_info = &rx_ring->buffer_info[i];
997 if(buffer_info->skb) {
998
999 pci_unmap_single(pdev,
1000 buffer_info->dma,
1001 buffer_info->length,
1002 PCI_DMA_FROMDEVICE);
1003
1004 dev_kfree_skb(buffer_info->skb);
1005
1006 buffer_info->skb = NULL;
1007 }
1008 }
1009
1010 size = sizeof(struct ixgb_buffer) * rx_ring->count;
1011 memset(rx_ring->buffer_info, 0, size);
1012
1013 /* Zero out the descriptor ring */
1014
1015 memset(rx_ring->desc, 0, rx_ring->size);
1016
1017 rx_ring->next_to_clean = 0;
1018 rx_ring->next_to_use = 0;
1019
1020 IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1021 IXGB_WRITE_REG(&adapter->hw, RDT, 0);
1022}
1023
1024/**
1025 * ixgb_set_mac - Change the Ethernet Address of the NIC
1026 * @netdev: network interface device structure
1027 * @p: pointer to an address structure
1028 *
1029 * Returns 0 on success, negative on failure
1030 **/
1031
1032static int
1033ixgb_set_mac(struct net_device *netdev, void *p)
1034{
1035 struct ixgb_adapter *adapter = netdev->priv;
1036 struct sockaddr *addr = p;
1037
1038 if(!is_valid_ether_addr(addr->sa_data))
1039 return -EADDRNOTAVAIL;
1040
1041 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1042
1043 ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
1044
1045 return 0;
1046}
1047
1048/**
1049 * ixgb_set_multi - Multicast and Promiscuous mode set
1050 * @netdev: network interface device structure
1051 *
1052 * The set_multi entry point is called whenever the multicast address
1053 * list or the network interface flags are updated. This routine is
1054 * responsible for configuring the hardware for proper multicast,
1055 * promiscuous mode, and all-multi behavior.
1056 **/
1057
1058static void
1059ixgb_set_multi(struct net_device *netdev)
1060{
1061 struct ixgb_adapter *adapter = netdev->priv;
1062 struct ixgb_hw *hw = &adapter->hw;
1063 struct dev_mc_list *mc_ptr;
1064 uint32_t rctl;
1065 int i;
1066
1067 /* Check for Promiscuous and All Multicast modes */
1068
1069 rctl = IXGB_READ_REG(hw, RCTL);
1070
1071 if(netdev->flags & IFF_PROMISC) {
1072 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1073 } else if(netdev->flags & IFF_ALLMULTI) {
1074 rctl |= IXGB_RCTL_MPE;
1075 rctl &= ~IXGB_RCTL_UPE;
1076 } else {
1077 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1078 }
1079
1080 if(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1081 rctl |= IXGB_RCTL_MPE;
1082 IXGB_WRITE_REG(hw, RCTL, rctl);
1083 } else {
1084 uint8_t mta[netdev->mc_count * IXGB_ETH_LENGTH_OF_ADDRESS];
1085
1086 IXGB_WRITE_REG(hw, RCTL, rctl);
1087
1088 for(i = 0, mc_ptr = netdev->mc_list; mc_ptr;
1089 i++, mc_ptr = mc_ptr->next)
1090 memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
1091 mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
1092
1093 ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
1094 }
1095}
1096
1097/**
1098 * ixgb_watchdog - Timer Call-back
1099 * @data: pointer to netdev cast into an unsigned long
1100 **/
1101
1102static void
1103ixgb_watchdog(unsigned long data)
1104{
1105 struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
1106 struct net_device *netdev = adapter->netdev;
1107 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
1108
1109 ixgb_check_for_link(&adapter->hw);
1110
1111 if (ixgb_check_for_bad_link(&adapter->hw)) {
1112 /* force the reset path */
1113 netif_stop_queue(netdev);
1114 }
1115
1116 if(adapter->hw.link_up) {
1117 if(!netif_carrier_ok(netdev)) {
1118 printk(KERN_INFO "ixgb: %s NIC Link is Up %d Mbps %s\n",
1119 netdev->name, 10000, "Full Duplex");
1120 adapter->link_speed = 10000;
1121 adapter->link_duplex = FULL_DUPLEX;
1122 netif_carrier_on(netdev);
1123 netif_wake_queue(netdev);
1124 }
1125 } else {
1126 if(netif_carrier_ok(netdev)) {
1127 adapter->link_speed = 0;
1128 adapter->link_duplex = 0;
1129 printk(KERN_INFO
1130 "ixgb: %s NIC Link is Down\n",
1131 netdev->name);
1132 netif_carrier_off(netdev);
1133 netif_stop_queue(netdev);
1134
1135 }
1136 }
1137
1138 ixgb_update_stats(adapter);
1139
1140 if(!netif_carrier_ok(netdev)) {
1141 if(IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1142 /* We've lost link, so the controller stops DMA,
1143 * but we've got queued Tx work that's never going
1144 * to get done, so reset controller to flush Tx.
1145 * (Do the reset outside of interrupt context). */
1146 schedule_work(&adapter->tx_timeout_task);
1147 }
1148 }
1149
1150 /* Force detection of hung controller every watchdog period */
1151 adapter->detect_tx_hung = TRUE;
1152
1153 /* generate an interrupt to force clean up of any stragglers */
1154 IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
1155
1156 /* Reset the timer */
1157 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1158}
1159
1160#define IXGB_TX_FLAGS_CSUM 0x00000001
1161#define IXGB_TX_FLAGS_VLAN 0x00000002
1162#define IXGB_TX_FLAGS_TSO 0x00000004
1163
1164static inline int
1165ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1166{
1167#ifdef NETIF_F_TSO
1168 struct ixgb_context_desc *context_desc;
1169 unsigned int i;
1170 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
1171 uint16_t ipcse, tucse, mss;
1172 int err;
1173
1174 if(likely(skb_shinfo(skb)->tso_size)) {
1175 if (skb_header_cloned(skb)) {
1176 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1177 if (err)
1178 return err;
1179 }
1180
1181 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1182 mss = skb_shinfo(skb)->tso_size;
1183 skb->nh.iph->tot_len = 0;
1184 skb->nh.iph->check = 0;
1185 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
1186 skb->nh.iph->daddr,
1187 0, IPPROTO_TCP, 0);
1188 ipcss = skb->nh.raw - skb->data;
1189 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
1190 ipcse = skb->h.raw - skb->data - 1;
1191 tucss = skb->h.raw - skb->data;
1192 tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
1193 tucse = 0;
1194
1195 i = adapter->tx_ring.next_to_use;
1196 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1197
1198 context_desc->ipcss = ipcss;
1199 context_desc->ipcso = ipcso;
1200 context_desc->ipcse = cpu_to_le16(ipcse);
1201 context_desc->tucss = tucss;
1202 context_desc->tucso = tucso;
1203 context_desc->tucse = cpu_to_le16(tucse);
1204 context_desc->mss = cpu_to_le16(mss);
1205 context_desc->hdr_len = hdr_len;
1206 context_desc->status = 0;
1207 context_desc->cmd_type_len = cpu_to_le32(
1208 IXGB_CONTEXT_DESC_TYPE
1209 | IXGB_CONTEXT_DESC_CMD_TSE
1210 | IXGB_CONTEXT_DESC_CMD_IP
1211 | IXGB_CONTEXT_DESC_CMD_TCP
1212 | IXGB_CONTEXT_DESC_CMD_RS
1213 | IXGB_CONTEXT_DESC_CMD_IDE
1214 | (skb->len - (hdr_len)));
1215
1216 if(++i == adapter->tx_ring.count) i = 0;
1217 adapter->tx_ring.next_to_use = i;
1218
1219 return 1;
1220 }
1221#endif
1222
1223 return 0;
1224}
1225
1226static inline boolean_t
1227ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1228{
1229 struct ixgb_context_desc *context_desc;
1230 unsigned int i;
1231 uint8_t css, cso;
1232
1233 if(likely(skb->ip_summed == CHECKSUM_HW)) {
1234 css = skb->h.raw - skb->data;
1235 cso = (skb->h.raw + skb->csum) - skb->data;
1236
1237 i = adapter->tx_ring.next_to_use;
1238 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1239
1240 context_desc->tucss = css;
1241 context_desc->tucso = cso;
1242 context_desc->tucse = 0;
1243 /* zero out any previously existing data in one instruction */
1244 *(uint32_t *)&(context_desc->ipcss) = 0;
1245 context_desc->status = 0;
1246 context_desc->hdr_len = 0;
1247 context_desc->mss = 0;
1248 context_desc->cmd_type_len =
1249 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1250 | IXGB_TX_DESC_CMD_RS
1251 | IXGB_TX_DESC_CMD_IDE);
1252
1253 if(++i == adapter->tx_ring.count) i = 0;
1254 adapter->tx_ring.next_to_use = i;
1255
1256 return TRUE;
1257 }
1258
1259 return FALSE;
1260}
1261
1262#define IXGB_MAX_TXD_PWR 14
1263#define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
1264
1265static inline int
1266ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1267 unsigned int first)
1268{
1269 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1270 struct ixgb_buffer *buffer_info;
1271 int len = skb->len;
1272 unsigned int offset = 0, size, count = 0, i;
1273
1274 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1275 unsigned int f;
1276 len -= skb->data_len;
1277
1278 i = tx_ring->next_to_use;
1279
1280 while(len) {
1281 buffer_info = &tx_ring->buffer_info[i];
1282 size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
1283 buffer_info->length = size;
1284 buffer_info->dma =
1285 pci_map_single(adapter->pdev,
1286 skb->data + offset,
1287 size,
1288 PCI_DMA_TODEVICE);
1289 buffer_info->time_stamp = jiffies;
1290
1291 len -= size;
1292 offset += size;
1293 count++;
1294 if(++i == tx_ring->count) i = 0;
1295 }
1296
1297 for(f = 0; f < nr_frags; f++) {
1298 struct skb_frag_struct *frag;
1299
1300 frag = &skb_shinfo(skb)->frags[f];
1301 len = frag->size;
1302 offset = 0;
1303
1304 while(len) {
1305 buffer_info = &tx_ring->buffer_info[i];
1306 size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
1307 buffer_info->length = size;
1308 buffer_info->dma =
1309 pci_map_page(adapter->pdev,
1310 frag->page,
1311 frag->page_offset + offset,
1312 size,
1313 PCI_DMA_TODEVICE);
1314 buffer_info->time_stamp = jiffies;
1315
1316 len -= size;
1317 offset += size;
1318 count++;
1319 if(++i == tx_ring->count) i = 0;
1320 }
1321 }
1322 i = (i == 0) ? tx_ring->count - 1 : i - 1;
1323 tx_ring->buffer_info[i].skb = skb;
1324 tx_ring->buffer_info[first].next_to_watch = i;
1325
1326 return count;
1327}
1328
1329static inline void
1330ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1331{
1332 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1333 struct ixgb_tx_desc *tx_desc = NULL;
1334 struct ixgb_buffer *buffer_info;
1335 uint32_t cmd_type_len = adapter->tx_cmd_type;
1336 uint8_t status = 0;
1337 uint8_t popts = 0;
1338 unsigned int i;
1339
1340 if(tx_flags & IXGB_TX_FLAGS_TSO) {
1341 cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1342 popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1343 }
1344
1345 if(tx_flags & IXGB_TX_FLAGS_CSUM)
1346 popts |= IXGB_TX_DESC_POPTS_TXSM;
1347
1348 if(tx_flags & IXGB_TX_FLAGS_VLAN) {
1349 cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1350 }
1351
1352 i = tx_ring->next_to_use;
1353
1354 while(count--) {
1355 buffer_info = &tx_ring->buffer_info[i];
1356 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1357 tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1358 tx_desc->cmd_type_len =
1359 cpu_to_le32(cmd_type_len | buffer_info->length);
1360 tx_desc->status = status;
1361 tx_desc->popts = popts;
1362 tx_desc->vlan = cpu_to_le16(vlan_id);
1363
1364 if(++i == tx_ring->count) i = 0;
1365 }
1366
1367 tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP
1368 | IXGB_TX_DESC_CMD_RS );
1369
1370 /* Force memory writes to complete before letting h/w
1371 * know there are new descriptors to fetch. (Only
1372 * applicable for weak-ordered memory model archs,
1373 * such as IA-64). */
1374 wmb();
1375
1376 tx_ring->next_to_use = i;
1377 IXGB_WRITE_REG(&adapter->hw, TDT, i);
1378}
1379
1380/* Tx Descriptors needed, worst case */
1381#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1382 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1383#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \
1384 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1
1385
1386static int
1387ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1388{
1389 struct ixgb_adapter *adapter = netdev->priv;
1390 unsigned int first;
1391 unsigned int tx_flags = 0;
1392 unsigned long flags;
1393 int vlan_id = 0;
1394 int tso;
1395
1396 if(skb->len <= 0) {
1397 dev_kfree_skb_any(skb);
1398 return 0;
1399 }
1400
1401 spin_lock_irqsave(&adapter->tx_lock, flags);
1402 if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) {
1403 netif_stop_queue(netdev);
1404 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1405 return 1;
1406 }
1407 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1408
1409 if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
1410 tx_flags |= IXGB_TX_FLAGS_VLAN;
1411 vlan_id = vlan_tx_tag_get(skb);
1412 }
1413
1414 first = adapter->tx_ring.next_to_use;
1415
1416 tso = ixgb_tso(adapter, skb);
1417 if (tso < 0) {
1418 dev_kfree_skb_any(skb);
1419 return NETDEV_TX_OK;
1420 }
1421
1422 if (tso)
1423 tx_flags |= IXGB_TX_FLAGS_TSO;
1424 else if(ixgb_tx_csum(adapter, skb))
1425 tx_flags |= IXGB_TX_FLAGS_CSUM;
1426
1427 ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id,
1428 tx_flags);
1429
1430 netdev->trans_start = jiffies;
1431
1432 return 0;
1433}
1434
1435/**
1436 * ixgb_tx_timeout - Respond to a Tx Hang
1437 * @netdev: network interface device structure
1438 **/
1439
1440static void
1441ixgb_tx_timeout(struct net_device *netdev)
1442{
1443 struct ixgb_adapter *adapter = netdev->priv;
1444
1445 /* Do the reset outside of interrupt context */
1446 schedule_work(&adapter->tx_timeout_task);
1447}
1448
1449static void
1450ixgb_tx_timeout_task(struct net_device *netdev)
1451{
1452 struct ixgb_adapter *adapter = netdev->priv;
1453
1454 ixgb_down(adapter, TRUE);
1455 ixgb_up(adapter);
1456}
1457
1458/**
1459 * ixgb_get_stats - Get System Network Statistics
1460 * @netdev: network interface device structure
1461 *
1462 * Returns the address of the device statistics structure.
1463 * The statistics are actually updated from the timer callback.
1464 **/
1465
1466static struct net_device_stats *
1467ixgb_get_stats(struct net_device *netdev)
1468{
1469 struct ixgb_adapter *adapter = netdev->priv;
1470
1471 return &adapter->net_stats;
1472}
1473
1474/**
1475 * ixgb_change_mtu - Change the Maximum Transfer Unit
1476 * @netdev: network interface device structure
1477 * @new_mtu: new value for maximum frame size
1478 *
1479 * Returns 0 on success, negative on failure
1480 **/
1481
1482static int
1483ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1484{
1485 struct ixgb_adapter *adapter = netdev->priv;
1486 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1487 int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1488
1489
1490 if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
1491 || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
1492 IXGB_ERR("Invalid MTU setting\n");
1493 return -EINVAL;
1494 }
1495
1496 if((max_frame <= IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
1497 || (max_frame <= IXGB_RXBUFFER_2048)) {
1498 adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
1499
1500 } else if(max_frame <= IXGB_RXBUFFER_4096) {
1501 adapter->rx_buffer_len = IXGB_RXBUFFER_4096;
1502
1503 } else if(max_frame <= IXGB_RXBUFFER_8192) {
1504 adapter->rx_buffer_len = IXGB_RXBUFFER_8192;
1505
1506 } else {
1507 adapter->rx_buffer_len = IXGB_RXBUFFER_16384;
1508 }
1509
1510 netdev->mtu = new_mtu;
1511
1512 if(old_max_frame != max_frame && netif_running(netdev)) {
1513
1514 ixgb_down(adapter, TRUE);
1515 ixgb_up(adapter);
1516 }
1517
1518 return 0;
1519}
1520
1521/**
1522 * ixgb_update_stats - Update the board statistics counters.
1523 * @adapter: board private structure
1524 **/
1525
1526void
1527ixgb_update_stats(struct ixgb_adapter *adapter)
1528{
1529 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1530 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1531 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1532 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1533 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1534 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1535 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1536 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1537 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1538 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1539 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
1540 adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
1541 adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
1542 adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
1543 adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
1544 adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
1545 adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
1546 adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
1547 adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
1548 adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
1549 adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
1550 adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
1551 adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
1552 adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
1553 adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
1554 adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
1555 adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
1556 adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
1557 adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
1558 adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
1559 adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
1560 adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
1561 adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
1562 adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
1563 adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
1564 adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
1565 adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
1566 adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
1567 adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
1568 adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
1569 adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
1570 adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
1571 adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
1572 adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
1573 adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
1574 adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
1575 adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
1576 adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
1577 adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
1578 adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
1579 adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
1580 adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
1581 adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
1582 adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
1583 adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
1584 adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
1585 adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
1586 adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
1587 adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
1588 adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
1589
1590 /* Fill out the OS statistics structure */
1591
1592 adapter->net_stats.rx_packets = adapter->stats.gprcl;
1593 adapter->net_stats.tx_packets = adapter->stats.gptcl;
1594 adapter->net_stats.rx_bytes = adapter->stats.gorcl;
1595 adapter->net_stats.tx_bytes = adapter->stats.gotcl;
1596 adapter->net_stats.multicast = adapter->stats.mprcl;
1597 adapter->net_stats.collisions = 0;
1598
1599 /* ignore RLEC as it reports errors for padded (<64bytes) frames
1600 * with a length in the type/len field */
1601 adapter->net_stats.rx_errors =
1602 /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1603 adapter->stats.ruc +
1604 adapter->stats.roc /*+ adapter->stats.rlec */ +
1605 adapter->stats.icbc +
1606 adapter->stats.ecbc + adapter->stats.mpc;
1607
1608 adapter->net_stats.rx_dropped = adapter->stats.mpc;
1609
1610 /* see above
1611 * adapter->net_stats.rx_length_errors = adapter->stats.rlec;
1612 */
1613
1614 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
1615 adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
1616 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
1617 adapter->net_stats.rx_over_errors = adapter->stats.mpc;
1618
1619 adapter->net_stats.tx_errors = 0;
1620 adapter->net_stats.rx_frame_errors = 0;
1621 adapter->net_stats.tx_aborted_errors = 0;
1622 adapter->net_stats.tx_carrier_errors = 0;
1623 adapter->net_stats.tx_fifo_errors = 0;
1624 adapter->net_stats.tx_heartbeat_errors = 0;
1625 adapter->net_stats.tx_window_errors = 0;
1626}
1627
1628#define IXGB_MAX_INTR 10
1629/**
1630 * ixgb_intr - Interrupt Handler
1631 * @irq: interrupt number
1632 * @data: pointer to a network interface device structure
1633 * @pt_regs: CPU registers structure
1634 **/
1635
1636static irqreturn_t
1637ixgb_intr(int irq, void *data, struct pt_regs *regs)
1638{
1639 struct net_device *netdev = data;
1640 struct ixgb_adapter *adapter = netdev->priv;
1641 struct ixgb_hw *hw = &adapter->hw;
1642 uint32_t icr = IXGB_READ_REG(hw, ICR);
1643#ifndef CONFIG_IXGB_NAPI
1644 unsigned int i;
1645#endif
1646
1647 if(unlikely(!icr))
1648 return IRQ_NONE; /* Not our interrupt */
1649
1650 if(unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) {
1651 mod_timer(&adapter->watchdog_timer, jiffies);
1652 }
1653
1654#ifdef CONFIG_IXGB_NAPI
1655 if(netif_rx_schedule_prep(netdev)) {
1656
1657 /* Disable interrupts and register for poll. The flush
1658 of the posted write is intentionally left out.
1659 */
1660
1661 atomic_inc(&adapter->irq_sem);
1662 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1663 __netif_rx_schedule(netdev);
1664 }
1665#else
1666 /* yes, that is actually a & and it is meant to make sure that
1667 * every pass through this for loop checks both receive and
1668 * transmit queues for completed descriptors, intended to
1669 * avoid starvation issues and assist tx/rx fairness. */
1670 for(i = 0; i < IXGB_MAX_INTR; i++)
1671 if(!ixgb_clean_rx_irq(adapter) &
1672 !ixgb_clean_tx_irq(adapter))
1673 break;
1674#endif
1675 return IRQ_HANDLED;
1676}
1677
1678#ifdef CONFIG_IXGB_NAPI
1679/**
1680 * ixgb_clean - NAPI Rx polling callback
1681 * @adapter: board private structure
1682 **/
1683
1684static int
1685ixgb_clean(struct net_device *netdev, int *budget)
1686{
1687 struct ixgb_adapter *adapter = netdev->priv;
1688 int work_to_do = min(*budget, netdev->quota);
1689 int tx_cleaned;
1690 int work_done = 0;
1691
1692 tx_cleaned = ixgb_clean_tx_irq(adapter);
1693 ixgb_clean_rx_irq(adapter, &work_done, work_to_do);
1694
1695 *budget -= work_done;
1696 netdev->quota -= work_done;
1697
1698 /* if no Tx and not enough Rx work done, exit the polling mode */
1699 if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
1700 netif_rx_complete(netdev);
1701 ixgb_irq_enable(adapter);
1702 return 0;
1703 }
1704
1705 return 1;
1706}
1707#endif
1708
1709/**
1710 * ixgb_clean_tx_irq - Reclaim resources after transmit completes
1711 * @adapter: board private structure
1712 **/
1713
1714static boolean_t
1715ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1716{
1717 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1718 struct net_device *netdev = adapter->netdev;
1719 struct ixgb_tx_desc *tx_desc, *eop_desc;
1720 struct ixgb_buffer *buffer_info;
1721 unsigned int i, eop;
1722 boolean_t cleaned = FALSE;
1723
1724 i = tx_ring->next_to_clean;
1725 eop = tx_ring->buffer_info[i].next_to_watch;
1726 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1727
1728 while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1729
1730 for(cleaned = FALSE; !cleaned; ) {
1731 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1732 buffer_info = &tx_ring->buffer_info[i];
1733
1734 if (tx_desc->popts
1735 & (IXGB_TX_DESC_POPTS_TXSM |
1736 IXGB_TX_DESC_POPTS_IXSM))
1737 adapter->hw_csum_tx_good++;
1738
1739 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1740
1741 *(uint32_t *)&(tx_desc->status) = 0;
1742
1743 cleaned = (i == eop);
1744 if(++i == tx_ring->count) i = 0;
1745 }
1746
1747 eop = tx_ring->buffer_info[i].next_to_watch;
1748 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1749 }
1750
1751 tx_ring->next_to_clean = i;
1752
1753 spin_lock(&adapter->tx_lock);
1754 if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
1755 (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE)) {
1756
1757 netif_wake_queue(netdev);
1758 }
1759 spin_unlock(&adapter->tx_lock);
1760
1761 if(adapter->detect_tx_hung) {
1762 /* detect a transmit hang in hardware, this serializes the
1763 * check with the clearing of time_stamp and movement of i */
1764 adapter->detect_tx_hung = FALSE;
1765 if(tx_ring->buffer_info[i].dma &&
1766 time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ)
1767 && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1768 IXGB_STATUS_TXOFF))
1769 netif_stop_queue(netdev);
1770 }
1771
1772 return cleaned;
1773}
1774
1775/**
1776 * ixgb_rx_checksum - Receive Checksum Offload for 82597.
1777 * @adapter: board private structure
1778 * @rx_desc: receive descriptor
1779 * @sk_buff: socket buffer with received data
1780 **/
1781
1782static inline void
1783ixgb_rx_checksum(struct ixgb_adapter *adapter,
1784 struct ixgb_rx_desc *rx_desc,
1785 struct sk_buff *skb)
1786{
1787 /* Ignore Checksum bit is set OR
1788 * TCP Checksum has not been calculated
1789 */
1790 if((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1791 (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1792 skb->ip_summed = CHECKSUM_NONE;
1793 return;
1794 }
1795
1796 /* At this point we know the hardware did the TCP checksum */
1797 /* now look at the TCP checksum error bit */
1798 if(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1799 /* let the stack verify checksum errors */
1800 skb->ip_summed = CHECKSUM_NONE;
1801 adapter->hw_csum_rx_error++;
1802 } else {
1803 /* TCP checksum is good */
1804 skb->ip_summed = CHECKSUM_UNNECESSARY;
1805 adapter->hw_csum_rx_good++;
1806 }
1807}
1808
1809/**
1810 * ixgb_clean_rx_irq - Send received data up the network stack,
1811 * @adapter: board private structure
1812 **/
1813
1814static boolean_t
1815#ifdef CONFIG_IXGB_NAPI
1816ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1817#else
1818ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1819#endif
1820{
1821 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1822 struct net_device *netdev = adapter->netdev;
1823 struct pci_dev *pdev = adapter->pdev;
1824 struct ixgb_rx_desc *rx_desc, *next_rxd;
1825 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1826 struct sk_buff *skb, *next_skb;
1827 uint32_t length;
1828 unsigned int i, j;
1829 boolean_t cleaned = FALSE;
1830
1831 i = rx_ring->next_to_clean;
1832 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1833 buffer_info = &rx_ring->buffer_info[i];
1834
1835 while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
1836
1837#ifdef CONFIG_IXGB_NAPI
1838 if(*work_done >= work_to_do)
1839 break;
1840
1841 (*work_done)++;
1842#endif
1843 skb = buffer_info->skb;
1844 prefetch(skb->data);
1845
1846 if(++i == rx_ring->count) i = 0;
1847 next_rxd = IXGB_RX_DESC(*rx_ring, i);
1848 prefetch(next_rxd);
1849
1850 if((j = i + 1) == rx_ring->count) j = 0;
1851 next2_buffer = &rx_ring->buffer_info[j];
1852 prefetch(next2_buffer);
1853
1854 next_buffer = &rx_ring->buffer_info[i];
1855 next_skb = next_buffer->skb;
1856 prefetch(next_skb);
1857
1858
1859 cleaned = TRUE;
1860
1861 pci_unmap_single(pdev,
1862 buffer_info->dma,
1863 buffer_info->length,
1864 PCI_DMA_FROMDEVICE);
1865
1866 length = le16_to_cpu(rx_desc->length);
1867
1868 if(unlikely(!(rx_desc->status & IXGB_RX_DESC_STATUS_EOP))) {
1869
1870 /* All receives must fit into a single buffer */
1871
1872 IXGB_DBG("Receive packet consumed multiple buffers "
1873 "length<%x>\n", length);
1874
1875 dev_kfree_skb_irq(skb);
1876 rx_desc->status = 0;
1877 buffer_info->skb = NULL;
1878
1879 rx_desc = next_rxd;
1880 buffer_info = next_buffer;
1881 continue;
1882 }
1883
1884 if (unlikely(rx_desc->errors
1885 & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE
1886 | IXGB_RX_DESC_ERRORS_P |
1887 IXGB_RX_DESC_ERRORS_RXE))) {
1888
1889 dev_kfree_skb_irq(skb);
1890 rx_desc->status = 0;
1891 buffer_info->skb = NULL;
1892
1893 rx_desc = next_rxd;
1894 buffer_info = next_buffer;
1895 continue;
1896 }
1897
1898 /* Good Receive */
1899 skb_put(skb, length);
1900
1901 /* Receive Checksum Offload */
1902 ixgb_rx_checksum(adapter, rx_desc, skb);
1903
1904 skb->protocol = eth_type_trans(skb, netdev);
1905#ifdef CONFIG_IXGB_NAPI
1906 if(adapter->vlgrp && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) {
1907 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
1908 le16_to_cpu(rx_desc->special) &
1909 IXGB_RX_DESC_SPECIAL_VLAN_MASK);
1910 } else {
1911 netif_receive_skb(skb);
1912 }
1913#else /* CONFIG_IXGB_NAPI */
1914 if(adapter->vlgrp && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) {
1915 vlan_hwaccel_rx(skb, adapter->vlgrp,
1916 le16_to_cpu(rx_desc->special) &
1917 IXGB_RX_DESC_SPECIAL_VLAN_MASK);
1918 } else {
1919 netif_rx(skb);
1920 }
1921#endif /* CONFIG_IXGB_NAPI */
1922 netdev->last_rx = jiffies;
1923
1924 rx_desc->status = 0;
1925 buffer_info->skb = NULL;
1926
1927 rx_desc = next_rxd;
1928 buffer_info = next_buffer;
1929 }
1930
1931 rx_ring->next_to_clean = i;
1932
1933 ixgb_alloc_rx_buffers(adapter);
1934
1935 return cleaned;
1936}
1937
1938/**
1939 * ixgb_alloc_rx_buffers - Replace used receive buffers
1940 * @adapter: address of board private structure
1941 **/
1942
1943static void
1944ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
1945{
1946 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1947 struct net_device *netdev = adapter->netdev;
1948 struct pci_dev *pdev = adapter->pdev;
1949 struct ixgb_rx_desc *rx_desc;
1950 struct ixgb_buffer *buffer_info;
1951 struct sk_buff *skb;
1952 unsigned int i;
1953 int num_group_tail_writes;
1954 long cleancount;
1955
1956 i = rx_ring->next_to_use;
1957 buffer_info = &rx_ring->buffer_info[i];
1958 cleancount = IXGB_DESC_UNUSED(rx_ring);
1959
1960 num_group_tail_writes = IXGB_RX_BUFFER_WRITE;
1961
1962 /* leave one descriptor unused */
1963 while(--cleancount > 0) {
1964 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1965
1966 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
1967
1968 if(unlikely(!skb)) {
1969 /* Better luck next round */
1970 break;
1971 }
1972
1973 /* Make buffer alignment 2 beyond a 16 byte boundary
1974 * this will result in a 16 byte aligned IP header after
1975 * the 14 byte MAC header is removed
1976 */
1977 skb_reserve(skb, NET_IP_ALIGN);
1978
1979 skb->dev = netdev;
1980
1981 buffer_info->skb = skb;
1982 buffer_info->length = adapter->rx_buffer_len;
1983 buffer_info->dma =
1984 pci_map_single(pdev,
1985 skb->data,
1986 adapter->rx_buffer_len,
1987 PCI_DMA_FROMDEVICE);
1988
1989 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1990
1991 if((i & ~(num_group_tail_writes- 1)) == i) {
1992 /* Force memory writes to complete before letting h/w
1993 * know there are new descriptors to fetch. (Only
1994 * applicable for weak-ordered memory model archs,
1995 * such as IA-64). */
1996 wmb();
1997
1998 IXGB_WRITE_REG(&adapter->hw, RDT, i);
1999 }
2000
2001 if(++i == rx_ring->count) i = 0;
2002 buffer_info = &rx_ring->buffer_info[i];
2003 }
2004
2005 rx_ring->next_to_use = i;
2006}
2007
2008/**
2009 * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
2010 *
2011 * @param netdev network interface device structure
2012 * @param grp indicates to enable or disable tagging/stripping
2013 **/
2014static void
2015ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2016{
2017 struct ixgb_adapter *adapter = netdev->priv;
2018 uint32_t ctrl, rctl;
2019
2020 ixgb_irq_disable(adapter);
2021 adapter->vlgrp = grp;
2022
2023 if(grp) {
2024 /* enable VLAN tag insert/strip */
2025 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2026 ctrl |= IXGB_CTRL0_VME;
2027 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2028
2029 /* enable VLAN receive filtering */
2030
2031 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
2032 rctl |= IXGB_RCTL_VFE;
2033 rctl &= ~IXGB_RCTL_CFIEN;
2034 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
2035 } else {
2036 /* disable VLAN tag insert/strip */
2037
2038 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2039 ctrl &= ~IXGB_CTRL0_VME;
2040 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2041
2042 /* disable VLAN filtering */
2043
2044 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
2045 rctl &= ~IXGB_RCTL_VFE;
2046 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
2047 }
2048
2049 ixgb_irq_enable(adapter);
2050}
2051
2052static void
2053ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2054{
2055 struct ixgb_adapter *adapter = netdev->priv;
2056 uint32_t vfta, index;
2057
2058 /* add VID to filter table */
2059
2060 index = (vid >> 5) & 0x7F;
2061 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2062 vfta |= (1 << (vid & 0x1F));
2063 ixgb_write_vfta(&adapter->hw, index, vfta);
2064}
2065
2066static void
2067ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
2068{
2069 struct ixgb_adapter *adapter = netdev->priv;
2070 uint32_t vfta, index;
2071
2072 ixgb_irq_disable(adapter);
2073
2074 if(adapter->vlgrp)
2075 adapter->vlgrp->vlan_devices[vid] = NULL;
2076
2077 ixgb_irq_enable(adapter);
2078
2079 /* remove VID from filter table*/
2080
2081 index = (vid >> 5) & 0x7F;
2082 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2083 vfta &= ~(1 << (vid & 0x1F));
2084 ixgb_write_vfta(&adapter->hw, index, vfta);
2085}
2086
2087static void
2088ixgb_restore_vlan(struct ixgb_adapter *adapter)
2089{
2090 ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2091
2092 if(adapter->vlgrp) {
2093 uint16_t vid;
2094 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2095 if(!adapter->vlgrp->vlan_devices[vid])
2096 continue;
2097 ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2098 }
2099 }
2100}
2101
2102/**
2103 * ixgb_notify_reboot - handles OS notification of reboot event.
2104 * @param nb notifier block, unused
2105 * @param event Event being passed to driver to act upon
2106 * @param p A pointer to our net device
2107 **/
2108static int
2109ixgb_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
2110{
2111 struct pci_dev *pdev = NULL;
2112
2113 switch(event) {
2114 case SYS_DOWN:
2115 case SYS_HALT:
2116 case SYS_POWER_OFF:
2117 while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
2118 if (pci_dev_driver(pdev) == &ixgb_driver)
2119 ixgb_suspend(pdev, 3);
2120 }
2121 }
2122 return NOTIFY_DONE;
2123}
2124
2125/**
2126 * ixgb_suspend - driver suspend function called from notify.
2127 * @param pdev pci driver structure used for passing to
2128 * @param state power state to enter
2129 **/
2130static int
2131ixgb_suspend(struct pci_dev *pdev, uint32_t state)
2132{
2133 struct net_device *netdev = pci_get_drvdata(pdev);
2134 struct ixgb_adapter *adapter = netdev->priv;
2135
2136 netif_device_detach(netdev);
2137
2138 if(netif_running(netdev))
2139 ixgb_down(adapter, TRUE);
2140
2141 pci_save_state(pdev);
2142
2143 state = (state > 0) ? 3 : 0;
2144 pci_set_power_state(pdev, state);
2145 msec_delay(200);
2146
2147 return 0;
2148}
2149
2150#ifdef CONFIG_NET_POLL_CONTROLLER
2151/*
2152 * Polling 'interrupt' - used by things like netconsole to send skbs
2153 * without having to re-enable interrupts. It's not called while
2154 * the interrupt routine is executing.
2155 */
2156
2157static void ixgb_netpoll(struct net_device *dev)
2158{
2159 struct ixgb_adapter *adapter = dev->priv;
2160 disable_irq(adapter->pdev->irq);
2161 ixgb_intr(adapter->pdev->irq, dev, NULL);
2162 enable_irq(adapter->pdev->irq);
2163}
2164#endif
2165
2166/* ixgb_main.c */
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h
new file mode 100644
index 000000000000..9eba92891901
--- /dev/null
+++ b/drivers/net/ixgb/ixgb_osdep.h
@@ -0,0 +1,96 @@
1/*******************************************************************************
2
3
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
9 any later version.
10
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19
20 The full GNU General Public License is included in this distribution in the
21 file called LICENSE.
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/* glue for the OS independent part of ixgb
30 * includes register access macros
31 */
32
33#ifndef _IXGB_OSDEP_H_
34#define _IXGB_OSDEP_H_
35
36#include <linux/types.h>
37#include <linux/pci.h>
38#include <linux/delay.h>
39#include <asm/io.h>
40#include <linux/interrupt.h>
41#include <linux/sched.h>
42
43#ifndef msec_delay
44#define msec_delay(x) do { if(in_interrupt()) { \
45 /* Don't mdelay in interrupt context! */ \
46 BUG(); \
47 } else { \
48 set_current_state(TASK_UNINTERRUPTIBLE); \
49 schedule_timeout((x * HZ)/1000 + 2); \
50 } } while(0)
51#endif
52
53#define PCI_COMMAND_REGISTER PCI_COMMAND
54#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
55
56typedef enum {
57#undef FALSE
58 FALSE = 0,
59#undef TRUE
60 TRUE = 1
61} boolean_t;
62
63#undef ASSERT
64#define ASSERT(x) if(!(x)) BUG()
65#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
66
67#ifdef DBG
68#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
69#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
70#else
71#define DEBUGOUT(S)
72#define DEBUGOUT1(S, A...)
73#endif
74
75#define DEBUGFUNC(F) DEBUGOUT(F)
76#define DEBUGOUT2 DEBUGOUT1
77#define DEBUGOUT3 DEBUGOUT2
78#define DEBUGOUT7 DEBUGOUT3
79
80#define IXGB_WRITE_REG(a, reg, value) ( \
81 writel((value), ((a)->hw_addr + IXGB_##reg)))
82
83#define IXGB_READ_REG(a, reg) ( \
84 readl((a)->hw_addr + IXGB_##reg))
85
86#define IXGB_WRITE_REG_ARRAY(a, reg, offset, value) ( \
87 writel((value), ((a)->hw_addr + IXGB_##reg + ((offset) << 2))))
88
89#define IXGB_READ_REG_ARRAY(a, reg, offset) ( \
90 readl((a)->hw_addr + IXGB_##reg + ((offset) << 2)))
91
92#define IXGB_WRITE_FLUSH(a) IXGB_READ_REG(a, STATUS)
93
94#define IXGB_MEMCPY memcpy
95
96#endif /* _IXGB_OSDEP_H_ */
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
new file mode 100644
index 000000000000..8a83dfdf746d
--- /dev/null
+++ b/drivers/net/ixgb/ixgb_param.c
@@ -0,0 +1,476 @@
1/*******************************************************************************
2
3
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
9 any later version.
10
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19
20 The full GNU General Public License is included in this distribution in the
21 file called LICENSE.
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "ixgb.h"
30
31/* This is the only thing that needs to be changed to adjust the
32 * maximum number of ports that the driver can manage.
33 */
34
35#define IXGB_MAX_NIC 8
36
37#define OPTION_UNSET -1
38#define OPTION_DISABLED 0
39#define OPTION_ENABLED 1
40
41/* All parameters are treated the same, as an integer array of values.
42 * This macro just reduces the need to repeat the same declaration code
43 * over and over (plus this helps to avoid typo bugs).
44 */
45
46#define IXGB_PARAM_INIT { [0 ... IXGB_MAX_NIC] = OPTION_UNSET }
47#define IXGB_PARAM(X, desc) \
48 static int __devinitdata X[IXGB_MAX_NIC+1] = IXGB_PARAM_INIT; \
49 static int num_##X = 0; \
50 module_param_array_named(X, X, int, &num_##X, 0); \
51 MODULE_PARM_DESC(X, desc);
52
53/* Transmit Descriptor Count
54 *
55 * Valid Range: 64-4096
56 *
57 * Default Value: 256
58 */
59
60IXGB_PARAM(TxDescriptors, "Number of transmit descriptors");
61
62/* Receive Descriptor Count
63 *
64 * Valid Range: 64-4096
65 *
66 * Default Value: 1024
67 */
68
69IXGB_PARAM(RxDescriptors, "Number of receive descriptors");
70
71/* User Specified Flow Control Override
72 *
73 * Valid Range: 0-3
74 * - 0 - No Flow Control
75 * - 1 - Rx only, respond to PAUSE frames but do not generate them
76 * - 2 - Tx only, generate PAUSE frames but ignore them on receive
77 * - 3 - Full Flow Control Support
78 *
79 * Default Value: Read flow control settings from the EEPROM
80 */
81
82IXGB_PARAM(FlowControl, "Flow Control setting");
83
84/* XsumRX - Receive Checksum Offload Enable/Disable
85 *
86 * Valid Range: 0, 1
87 * - 0 - disables all checksum offload
88 * - 1 - enables receive IP/TCP/UDP checksum offload
89 * on 82597 based NICs
90 *
91 * Default Value: 1
92 */
93
94IXGB_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
95
96/* Transmit Interrupt Delay in units of 0.8192 microseconds
97 *
98 * Valid Range: 0-65535
99 *
100 * Default Value: 32
101 */
102
103IXGB_PARAM(TxIntDelay, "Transmit Interrupt Delay");
104
105/* Receive Interrupt Delay in units of 0.8192 microseconds
106 *
107 * Valid Range: 0-65535
108 *
109 * Default Value: 72
110 */
111
112IXGB_PARAM(RxIntDelay, "Receive Interrupt Delay");
113
114/* Receive Flow control high threshold (when we send a pause frame)
115 * (FCRTH)
116 *
117 * Valid Range: 1,536 - 262,136 (0x600 - 0x3FFF8, 8 byte granularity)
118 *
119 * Default Value: 196,608 (0x30000)
120 */
121
122IXGB_PARAM(RxFCHighThresh, "Receive Flow Control High Threshold");
123
124/* Receive Flow control low threshold (when we send a resume frame)
125 * (FCRTL)
126 *
127 * Valid Range: 64 - 262,136 (0x40 - 0x3FFF8, 8 byte granularity)
128 * must be less than high threshold by at least 8 bytes
129 *
130 * Default Value: 163,840 (0x28000)
131 */
132
133IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold");
134
135/* Flow control request timeout (how long to pause the link partner's tx)
136 * (PAP 15:0)
137 *
138 * Valid Range: 1 - 65535
139 *
140 * Default Value: 256 (0x100)
141 */
142
143IXGB_PARAM(FCReqTimeout, "Flow Control Request Timeout");
144
145/* Interrupt Delay Enable
146 *
147 * Valid Range: 0, 1
148 *
149 * - 0 - disables transmit interrupt delay
150 * - 1 - enables transmmit interrupt delay
151 *
152 * Default Value: 1
153 */
154
155IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable");
156
157
158#define DEFAULT_TIDV 32
159#define MAX_TIDV 0xFFFF
160#define MIN_TIDV 0
161
162#define DEFAULT_RDTR 72
163#define MAX_RDTR 0xFFFF
164#define MIN_RDTR 0
165
166#define XSUMRX_DEFAULT OPTION_ENABLED
167
168#define FLOW_CONTROL_FULL ixgb_fc_full
169#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
170#define DEFAULT_FCRTL 0x28000
171#define DEFAULT_FCRTH 0x30000
172#define MIN_FCRTL 0
173#define MAX_FCRTL 0x3FFE8
174#define MIN_FCRTH 8
175#define MAX_FCRTH 0x3FFF0
176
177#define DEFAULT_FCPAUSE 0x100 /* this may be too long */
178#define MIN_FCPAUSE 1
179#define MAX_FCPAUSE 0xffff
180
181struct ixgb_option {
182 enum { enable_option, range_option, list_option } type;
183 char *name;
184 char *err;
185 int def;
186 union {
187 struct { /* range_option info */
188 int min;
189 int max;
190 } r;
191 struct { /* list_option info */
192 int nr;
193 struct ixgb_opt_list {
194 int i;
195 char *str;
196 } *p;
197 } l;
198 } arg;
199};
200
201static int __devinit
202ixgb_validate_option(int *value, struct ixgb_option *opt)
203{
204 if(*value == OPTION_UNSET) {
205 *value = opt->def;
206 return 0;
207 }
208
209 switch (opt->type) {
210 case enable_option:
211 switch (*value) {
212 case OPTION_ENABLED:
213 printk(KERN_INFO "%s Enabled\n", opt->name);
214 return 0;
215 case OPTION_DISABLED:
216 printk(KERN_INFO "%s Disabled\n", opt->name);
217 return 0;
218 }
219 break;
220 case range_option:
221 if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
222 printk(KERN_INFO "%s set to %i\n", opt->name, *value);
223 return 0;
224 }
225 break;
226 case list_option: {
227 int i;
228 struct ixgb_opt_list *ent;
229
230 for(i = 0; i < opt->arg.l.nr; i++) {
231 ent = &opt->arg.l.p[i];
232 if(*value == ent->i) {
233 if(ent->str[0] != '\0')
234 printk(KERN_INFO "%s\n", ent->str);
235 return 0;
236 }
237 }
238 }
239 break;
240 default:
241 BUG();
242 }
243
244 printk(KERN_INFO "Invalid %s specified (%i) %s\n",
245 opt->name, *value, opt->err);
246 *value = opt->def;
247 return -1;
248}
249
250#define LIST_LEN(l) (sizeof(l) / sizeof(l[0]))
251
252/**
253 * ixgb_check_options - Range Checking for Command Line Parameters
254 * @adapter: board private structure
255 *
256 * This routine checks all command line parameters for valid user
257 * input. If an invalid value is given, or if no user specified
258 * value exists, a default value is used. The final value is stored
259 * in a variable in the adapter structure.
260 **/
261
262void __devinit
263ixgb_check_options(struct ixgb_adapter *adapter)
264{
265 int bd = adapter->bd_number;
266 if(bd >= IXGB_MAX_NIC) {
267 printk(KERN_NOTICE
268 "Warning: no configuration for board #%i\n", bd);
269 printk(KERN_NOTICE "Using defaults for all values\n");
270 }
271
272 { /* Transmit Descriptor Count */
273 struct ixgb_option opt = {
274 .type = range_option,
275 .name = "Transmit Descriptors",
276 .err = "using default of " __MODULE_STRING(DEFAULT_TXD),
277 .def = DEFAULT_TXD,
278 .arg = { .r = { .min = MIN_TXD,
279 .max = MAX_TXD}}
280 };
281 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
282
283 if(num_TxDescriptors > bd) {
284 tx_ring->count = TxDescriptors[bd];
285 ixgb_validate_option(&tx_ring->count, &opt);
286 } else {
287 tx_ring->count = opt.def;
288 }
289 IXGB_ROUNDUP(tx_ring->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
290 }
291 { /* Receive Descriptor Count */
292 struct ixgb_option opt = {
293 .type = range_option,
294 .name = "Receive Descriptors",
295 .err = "using default of " __MODULE_STRING(DEFAULT_RXD),
296 .def = DEFAULT_RXD,
297 .arg = { .r = { .min = MIN_RXD,
298 .max = MAX_RXD}}
299 };
300 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
301
302 if(num_RxDescriptors > bd) {
303 rx_ring->count = RxDescriptors[bd];
304 ixgb_validate_option(&rx_ring->count, &opt);
305 } else {
306 rx_ring->count = opt.def;
307 }
308 IXGB_ROUNDUP(rx_ring->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
309 }
310 { /* Receive Checksum Offload Enable */
311 struct ixgb_option opt = {
312 .type = enable_option,
313 .name = "Receive Checksum Offload",
314 .err = "defaulting to Enabled",
315 .def = OPTION_ENABLED
316 };
317
318 if(num_XsumRX > bd) {
319 int rx_csum = XsumRX[bd];
320 ixgb_validate_option(&rx_csum, &opt);
321 adapter->rx_csum = rx_csum;
322 } else {
323 adapter->rx_csum = opt.def;
324 }
325 }
326 { /* Flow Control */
327
328 struct ixgb_opt_list fc_list[] =
329 {{ ixgb_fc_none, "Flow Control Disabled" },
330 { ixgb_fc_rx_pause,"Flow Control Receive Only" },
331 { ixgb_fc_tx_pause,"Flow Control Transmit Only" },
332 { ixgb_fc_full, "Flow Control Enabled" },
333 { ixgb_fc_default, "Flow Control Hardware Default" }};
334
335 struct ixgb_option opt = {
336 .type = list_option,
337 .name = "Flow Control",
338 .err = "reading default settings from EEPROM",
339 .def = ixgb_fc_full,
340 .arg = { .l = { .nr = LIST_LEN(fc_list),
341 .p = fc_list }}
342 };
343
344 if(num_FlowControl > bd) {
345 int fc = FlowControl[bd];
346 ixgb_validate_option(&fc, &opt);
347 adapter->hw.fc.type = fc;
348 } else {
349 adapter->hw.fc.type = opt.def;
350 }
351 }
352 { /* Receive Flow Control High Threshold */
353 struct ixgb_option opt = {
354 .type = range_option,
355 .name = "Rx Flow Control High Threshold",
356 .err = "using default of " __MODULE_STRING(DEFAULT_FCRTH),
357 .def = DEFAULT_FCRTH,
358 .arg = { .r = { .min = MIN_FCRTH,
359 .max = MAX_FCRTH}}
360 };
361
362 if(num_RxFCHighThresh > bd) {
363 adapter->hw.fc.high_water = RxFCHighThresh[bd];
364 ixgb_validate_option(&adapter->hw.fc.high_water, &opt);
365 } else {
366 adapter->hw.fc.high_water = opt.def;
367 }
368 if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) )
369 printk (KERN_INFO
370 "Ignoring RxFCHighThresh when no RxFC\n");
371 }
372 { /* Receive Flow Control Low Threshold */
373 struct ixgb_option opt = {
374 .type = range_option,
375 .name = "Rx Flow Control Low Threshold",
376 .err = "using default of " __MODULE_STRING(DEFAULT_FCRTL),
377 .def = DEFAULT_FCRTL,
378 .arg = { .r = { .min = MIN_FCRTL,
379 .max = MAX_FCRTL}}
380 };
381
382 if(num_RxFCLowThresh > bd) {
383 adapter->hw.fc.low_water = RxFCLowThresh[bd];
384 ixgb_validate_option(&adapter->hw.fc.low_water, &opt);
385 } else {
386 adapter->hw.fc.low_water = opt.def;
387 }
388 if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) )
389 printk (KERN_INFO
390 "Ignoring RxFCLowThresh when no RxFC\n");
391 }
392 { /* Flow Control Pause Time Request*/
393 struct ixgb_option opt = {
394 .type = range_option,
395 .name = "Flow Control Pause Time Request",
396 .err = "using default of "__MODULE_STRING(DEFAULT_FCPAUSE),
397 .def = DEFAULT_FCPAUSE,
398 .arg = { .r = { .min = MIN_FCPAUSE,
399 .max = MAX_FCPAUSE}}
400 };
401
402 if(num_FCReqTimeout > bd) {
403 int pause_time = FCReqTimeout[bd];
404 ixgb_validate_option(&pause_time, &opt);
405 adapter->hw.fc.pause_time = pause_time;
406 } else {
407 adapter->hw.fc.pause_time = opt.def;
408 }
409 if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) )
410 printk (KERN_INFO
411 "Ignoring FCReqTimeout when no RxFC\n");
412 }
413 /* high low and spacing check for rx flow control thresholds */
414 if (adapter->hw.fc.type & ixgb_fc_rx_pause) {
415 /* high must be greater than low */
416 if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
417 /* set defaults */
418 printk (KERN_INFO
419 "RxFCHighThresh must be >= (RxFCLowThresh + 8), "
420 "Using Defaults\n");
421 adapter->hw.fc.high_water = DEFAULT_FCRTH;
422 adapter->hw.fc.low_water = DEFAULT_FCRTL;
423 }
424 }
425 { /* Receive Interrupt Delay */
426 struct ixgb_option opt = {
427 .type = range_option,
428 .name = "Receive Interrupt Delay",
429 .err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
430 .def = DEFAULT_RDTR,
431 .arg = { .r = { .min = MIN_RDTR,
432 .max = MAX_RDTR}}
433 };
434
435 if(num_RxIntDelay > bd) {
436 adapter->rx_int_delay = RxIntDelay[bd];
437 ixgb_validate_option(&adapter->rx_int_delay, &opt);
438 } else {
439 adapter->rx_int_delay = opt.def;
440 }
441 }
442 { /* Transmit Interrupt Delay */
443 struct ixgb_option opt = {
444 .type = range_option,
445 .name = "Transmit Interrupt Delay",
446 .err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
447 .def = DEFAULT_TIDV,
448 .arg = { .r = { .min = MIN_TIDV,
449 .max = MAX_TIDV}}
450 };
451
452 if(num_TxIntDelay > bd) {
453 adapter->tx_int_delay = TxIntDelay[bd];
454 ixgb_validate_option(&adapter->tx_int_delay, &opt);
455 } else {
456 adapter->tx_int_delay = opt.def;
457 }
458 }
459
460 { /* Transmit Interrupt Delay Enable */
461 struct ixgb_option opt = {
462 .type = enable_option,
463 .name = "Tx Interrupt Delay Enable",
464 .err = "defaulting to Enabled",
465 .def = OPTION_ENABLED
466 };
467
468 if(num_IntDelayEnable > bd) {
469 int ide = IntDelayEnable[bd];
470 ixgb_validate_option(&ide, &opt);
471 adapter->tx_int_delay_enable = ide;
472 } else {
473 adapter->tx_int_delay_enable = opt.def;
474 }
475 }
476}