aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgb')
-rw-r--r--drivers/net/ixgb/Makefile2
-rw-r--r--drivers/net/ixgb/ixgb.h21
-rw-r--r--drivers/net/ixgb/ixgb_ee.c28
-rw-r--r--drivers/net/ixgb/ixgb_ee.h12
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c120
-rw-r--r--drivers/net/ixgb/ixgb_hw.c40
-rw-r--r--drivers/net/ixgb/ixgb_hw.h2
-rw-r--r--drivers/net/ixgb/ixgb_ids.h10
-rw-r--r--drivers/net/ixgb/ixgb_main.c480
-rw-r--r--drivers/net/ixgb/ixgb_osdep.h4
-rw-r--r--drivers/net/ixgb/ixgb_param.c44
11 files changed, 341 insertions, 422 deletions
diff --git a/drivers/net/ixgb/Makefile b/drivers/net/ixgb/Makefile
index 838a5084fa00..0b20c5e62ffe 100644
--- a/drivers/net/ixgb/Makefile
+++ b/drivers/net/ixgb/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel PRO/10GbE Linux driver 3# Intel PRO/10GbE Linux driver
4# Copyright(c) 1999 - 2006 Intel Corporation. 4# Copyright(c) 1999 - 2008 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 16f9c756aa46..804698fc6a8f 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/10GbE Linux driver 3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -89,18 +89,16 @@ struct ixgb_adapter;
89 89
90 90
91/* TX/RX descriptor defines */ 91/* TX/RX descriptor defines */
92#define DEFAULT_TXD 256 92#define DEFAULT_TXD 256
93#define MAX_TXD 4096 93#define MAX_TXD 4096
94#define MIN_TXD 64 94#define MIN_TXD 64
95 95
96/* hardware cannot reliably support more than 512 descriptors owned by 96/* hardware cannot reliably support more than 512 descriptors owned by
97 * hardware descrioptor cache otherwise an unreliable ring under heavy 97 * hardware descriptor cache otherwise an unreliable ring under heavy
98 * recieve load may result */ 98 * receive load may result */
99/* #define DEFAULT_RXD 1024 */ 99#define DEFAULT_RXD 512
100/* #define MAX_RXD 4096 */ 100#define MAX_RXD 512
101#define DEFAULT_RXD 512 101#define MIN_RXD 64
102#define MAX_RXD 512
103#define MIN_RXD 64
104 102
105/* Supported Rx Buffer Sizes */ 103/* Supported Rx Buffer Sizes */
106#define IXGB_RXBUFFER_2048 2048 104#define IXGB_RXBUFFER_2048 2048
@@ -157,7 +155,6 @@ struct ixgb_adapter {
157 u32 part_num; 155 u32 part_num;
158 u16 link_speed; 156 u16 link_speed;
159 u16 link_duplex; 157 u16 link_duplex;
160 spinlock_t tx_lock;
161 struct work_struct tx_timeout_task; 158 struct work_struct tx_timeout_task;
162 159
163 struct timer_list blink_timer; 160 struct timer_list blink_timer;
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 2f7ed52c7502..89ffa7264a12 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/10GbE Linux driver 3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -108,7 +108,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw,
108 */ 108 */
109 eecd_reg &= ~IXGB_EECD_DI; 109 eecd_reg &= ~IXGB_EECD_DI;
110 110
111 if(data & mask) 111 if (data & mask)
112 eecd_reg |= IXGB_EECD_DI; 112 eecd_reg |= IXGB_EECD_DI;
113 113
114 IXGB_WRITE_REG(hw, EECD, eecd_reg); 114 IXGB_WRITE_REG(hw, EECD, eecd_reg);
@@ -120,7 +120,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw,
120 120
121 mask = mask >> 1; 121 mask = mask >> 1;
122 122
123 } while(mask); 123 } while (mask);
124 124
125 /* We leave the "DI" bit set to "0" when we leave this routine. */ 125 /* We leave the "DI" bit set to "0" when we leave this routine. */
126 eecd_reg &= ~IXGB_EECD_DI; 126 eecd_reg &= ~IXGB_EECD_DI;
@@ -152,14 +152,14 @@ ixgb_shift_in_bits(struct ixgb_hw *hw)
152 eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI); 152 eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
153 data = 0; 153 data = 0;
154 154
155 for(i = 0; i < 16; i++) { 155 for (i = 0; i < 16; i++) {
156 data = data << 1; 156 data = data << 1;
157 ixgb_raise_clock(hw, &eecd_reg); 157 ixgb_raise_clock(hw, &eecd_reg);
158 158
159 eecd_reg = IXGB_READ_REG(hw, EECD); 159 eecd_reg = IXGB_READ_REG(hw, EECD);
160 160
161 eecd_reg &= ~(IXGB_EECD_DI); 161 eecd_reg &= ~(IXGB_EECD_DI);
162 if(eecd_reg & IXGB_EECD_DO) 162 if (eecd_reg & IXGB_EECD_DO)
163 data |= 1; 163 data |= 1;
164 164
165 ixgb_lower_clock(hw, &eecd_reg); 165 ixgb_lower_clock(hw, &eecd_reg);
@@ -205,7 +205,7 @@ ixgb_standby_eeprom(struct ixgb_hw *hw)
205 205
206 eecd_reg = IXGB_READ_REG(hw, EECD); 206 eecd_reg = IXGB_READ_REG(hw, EECD);
207 207
208 /* Deselct EEPROM */ 208 /* Deselect EEPROM */
209 eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK); 209 eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK);
210 IXGB_WRITE_REG(hw, EECD, eecd_reg); 210 IXGB_WRITE_REG(hw, EECD, eecd_reg);
211 udelay(50); 211 udelay(50);
@@ -293,14 +293,14 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
293 */ 293 */
294 ixgb_standby_eeprom(hw); 294 ixgb_standby_eeprom(hw);
295 295
296 /* Now read DO repeatedly until is high (equal to '1'). The EEEPROM will 296 /* Now read DO repeatedly until is high (equal to '1'). The EEPROM will
297 * signal that the command has been completed by raising the DO signal. 297 * signal that the command has been completed by raising the DO signal.
298 * If DO does not go high in 10 milliseconds, then error out. 298 * If DO does not go high in 10 milliseconds, then error out.
299 */ 299 */
300 for(i = 0; i < 200; i++) { 300 for (i = 0; i < 200; i++) {
301 eecd_reg = IXGB_READ_REG(hw, EECD); 301 eecd_reg = IXGB_READ_REG(hw, EECD);
302 302
303 if(eecd_reg & IXGB_EECD_DO) 303 if (eecd_reg & IXGB_EECD_DO)
304 return (true); 304 return (true);
305 305
306 udelay(50); 306 udelay(50);
@@ -328,10 +328,10 @@ ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
328 u16 checksum = 0; 328 u16 checksum = 0;
329 u16 i; 329 u16 i;
330 330
331 for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) 331 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++)
332 checksum += ixgb_read_eeprom(hw, i); 332 checksum += ixgb_read_eeprom(hw, i);
333 333
334 if(checksum == (u16) EEPROM_SUM) 334 if (checksum == (u16) EEPROM_SUM)
335 return (true); 335 return (true);
336 else 336 else
337 return (false); 337 return (false);
@@ -351,7 +351,7 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
351 u16 checksum = 0; 351 u16 checksum = 0;
352 u16 i; 352 u16 i;
353 353
354 for(i = 0; i < EEPROM_CHECKSUM_REG; i++) 354 for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
355 checksum += ixgb_read_eeprom(hw, i); 355 checksum += ixgb_read_eeprom(hw, i);
356 356
357 checksum = (u16) EEPROM_SUM - checksum; 357 checksum = (u16) EEPROM_SUM - checksum;
@@ -365,7 +365,7 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
365 * 365 *
366 * hw - Struct containing variables accessed by shared code 366 * hw - Struct containing variables accessed by shared code
367 * reg - offset within the EEPROM to be written to 367 * reg - offset within the EEPROM to be written to
368 * data - 16 bit word to be writen to the EEPROM 368 * data - 16 bit word to be written to the EEPROM
369 * 369 *
370 * If ixgb_update_eeprom_checksum is not called after this function, the 370 * If ixgb_update_eeprom_checksum is not called after this function, the
371 * EEPROM will most likely contain an invalid checksum. 371 * EEPROM will most likely contain an invalid checksum.
@@ -472,7 +472,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
472 ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 472 ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
473 473
474 DEBUGOUT("ixgb_ee: Reading eeprom data\n"); 474 DEBUGOUT("ixgb_ee: Reading eeprom data\n");
475 for(i = 0; i < IXGB_EEPROM_SIZE ; i++) { 475 for (i = 0; i < IXGB_EEPROM_SIZE ; i++) {
476 u16 ee_data; 476 u16 ee_data;
477 ee_data = ixgb_read_eeprom(hw, i); 477 ee_data = ixgb_read_eeprom(hw, i);
478 checksum += ee_data; 478 checksum += ee_data;
diff --git a/drivers/net/ixgb/ixgb_ee.h b/drivers/net/ixgb/ixgb_ee.h
index 4b7bd0d4a8a9..7ea12652f471 100644
--- a/drivers/net/ixgb/ixgb_ee.h
+++ b/drivers/net/ixgb/ixgb_ee.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/10GbE Linux driver 3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -34,11 +34,11 @@
34#define IXGB_ETH_LENGTH_OF_ADDRESS 6 34#define IXGB_ETH_LENGTH_OF_ADDRESS 6
35 35
36/* EEPROM Commands */ 36/* EEPROM Commands */
37#define EEPROM_READ_OPCODE 0x6 /* EERPOM read opcode */ 37#define EEPROM_READ_OPCODE 0x6 /* EEPROM read opcode */
38#define EEPROM_WRITE_OPCODE 0x5 /* EERPOM write opcode */ 38#define EEPROM_WRITE_OPCODE 0x5 /* EEPROM write opcode */
39#define EEPROM_ERASE_OPCODE 0x7 /* EERPOM erase opcode */ 39#define EEPROM_ERASE_OPCODE 0x7 /* EEPROM erase opcode */
40#define EEPROM_EWEN_OPCODE 0x13 /* EERPOM erase/write enable */ 40#define EEPROM_EWEN_OPCODE 0x13 /* EEPROM erase/write enable */
41#define EEPROM_EWDS_OPCODE 0x10 /* EERPOM erast/write disable */ 41#define EEPROM_EWDS_OPCODE 0x10 /* EEPROM erase/write disable */
42 42
43/* EEPROM MAP (Word Offsets) */ 43/* EEPROM MAP (Word Offsets) */
44#define EEPROM_IA_1_2_REG 0x0000 44#define EEPROM_IA_1_2_REG 0x0000
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 8464d8a013b0..288ee1d0f431 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/10GbE Linux driver 3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -95,7 +95,7 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
95 ecmd->port = PORT_FIBRE; 95 ecmd->port = PORT_FIBRE;
96 ecmd->transceiver = XCVR_EXTERNAL; 96 ecmd->transceiver = XCVR_EXTERNAL;
97 97
98 if(netif_carrier_ok(adapter->netdev)) { 98 if (netif_carrier_ok(adapter->netdev)) {
99 ecmd->speed = SPEED_10000; 99 ecmd->speed = SPEED_10000;
100 ecmd->duplex = DUPLEX_FULL; 100 ecmd->duplex = DUPLEX_FULL;
101 } else { 101 } else {
@@ -122,11 +122,11 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
122{ 122{
123 struct ixgb_adapter *adapter = netdev_priv(netdev); 123 struct ixgb_adapter *adapter = netdev_priv(netdev);
124 124
125 if(ecmd->autoneg == AUTONEG_ENABLE || 125 if (ecmd->autoneg == AUTONEG_ENABLE ||
126 ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL) 126 ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)
127 return -EINVAL; 127 return -EINVAL;
128 128
129 if(netif_running(adapter->netdev)) { 129 if (netif_running(adapter->netdev)) {
130 ixgb_down(adapter, true); 130 ixgb_down(adapter, true);
131 ixgb_reset(adapter); 131 ixgb_reset(adapter);
132 ixgb_up(adapter); 132 ixgb_up(adapter);
@@ -143,14 +143,14 @@ ixgb_get_pauseparam(struct net_device *netdev,
143{ 143{
144 struct ixgb_adapter *adapter = netdev_priv(netdev); 144 struct ixgb_adapter *adapter = netdev_priv(netdev);
145 struct ixgb_hw *hw = &adapter->hw; 145 struct ixgb_hw *hw = &adapter->hw;
146 146
147 pause->autoneg = AUTONEG_DISABLE; 147 pause->autoneg = AUTONEG_DISABLE;
148 148
149 if(hw->fc.type == ixgb_fc_rx_pause) 149 if (hw->fc.type == ixgb_fc_rx_pause)
150 pause->rx_pause = 1; 150 pause->rx_pause = 1;
151 else if(hw->fc.type == ixgb_fc_tx_pause) 151 else if (hw->fc.type == ixgb_fc_tx_pause)
152 pause->tx_pause = 1; 152 pause->tx_pause = 1;
153 else if(hw->fc.type == ixgb_fc_full) { 153 else if (hw->fc.type == ixgb_fc_full) {
154 pause->rx_pause = 1; 154 pause->rx_pause = 1;
155 pause->tx_pause = 1; 155 pause->tx_pause = 1;
156 } 156 }
@@ -162,26 +162,26 @@ ixgb_set_pauseparam(struct net_device *netdev,
162{ 162{
163 struct ixgb_adapter *adapter = netdev_priv(netdev); 163 struct ixgb_adapter *adapter = netdev_priv(netdev);
164 struct ixgb_hw *hw = &adapter->hw; 164 struct ixgb_hw *hw = &adapter->hw;
165 165
166 if(pause->autoneg == AUTONEG_ENABLE) 166 if (pause->autoneg == AUTONEG_ENABLE)
167 return -EINVAL; 167 return -EINVAL;
168 168
169 if(pause->rx_pause && pause->tx_pause) 169 if (pause->rx_pause && pause->tx_pause)
170 hw->fc.type = ixgb_fc_full; 170 hw->fc.type = ixgb_fc_full;
171 else if(pause->rx_pause && !pause->tx_pause) 171 else if (pause->rx_pause && !pause->tx_pause)
172 hw->fc.type = ixgb_fc_rx_pause; 172 hw->fc.type = ixgb_fc_rx_pause;
173 else if(!pause->rx_pause && pause->tx_pause) 173 else if (!pause->rx_pause && pause->tx_pause)
174 hw->fc.type = ixgb_fc_tx_pause; 174 hw->fc.type = ixgb_fc_tx_pause;
175 else if(!pause->rx_pause && !pause->tx_pause) 175 else if (!pause->rx_pause && !pause->tx_pause)
176 hw->fc.type = ixgb_fc_none; 176 hw->fc.type = ixgb_fc_none;
177 177
178 if(netif_running(adapter->netdev)) { 178 if (netif_running(adapter->netdev)) {
179 ixgb_down(adapter, true); 179 ixgb_down(adapter, true);
180 ixgb_up(adapter); 180 ixgb_up(adapter);
181 ixgb_set_speed_duplex(netdev); 181 ixgb_set_speed_duplex(netdev);
182 } else 182 } else
183 ixgb_reset(adapter); 183 ixgb_reset(adapter);
184 184
185 return 0; 185 return 0;
186} 186}
187 187
@@ -200,7 +200,7 @@ ixgb_set_rx_csum(struct net_device *netdev, u32 data)
200 200
201 adapter->rx_csum = data; 201 adapter->rx_csum = data;
202 202
203 if(netif_running(netdev)) { 203 if (netif_running(netdev)) {
204 ixgb_down(adapter, true); 204 ixgb_down(adapter, true);
205 ixgb_up(adapter); 205 ixgb_up(adapter);
206 ixgb_set_speed_duplex(netdev); 206 ixgb_set_speed_duplex(netdev);
@@ -208,7 +208,7 @@ ixgb_set_rx_csum(struct net_device *netdev, u32 data)
208 ixgb_reset(adapter); 208 ixgb_reset(adapter);
209 return 0; 209 return 0;
210} 210}
211 211
212static u32 212static u32
213ixgb_get_tx_csum(struct net_device *netdev) 213ixgb_get_tx_csum(struct net_device *netdev)
214{ 214{
@@ -229,12 +229,12 @@ ixgb_set_tx_csum(struct net_device *netdev, u32 data)
229static int 229static int
230ixgb_set_tso(struct net_device *netdev, u32 data) 230ixgb_set_tso(struct net_device *netdev, u32 data)
231{ 231{
232 if(data) 232 if (data)
233 netdev->features |= NETIF_F_TSO; 233 netdev->features |= NETIF_F_TSO;
234 else 234 else
235 netdev->features &= ~NETIF_F_TSO; 235 netdev->features &= ~NETIF_F_TSO;
236 return 0; 236 return 0;
237} 237}
238 238
239static u32 239static u32
240ixgb_get_msglevel(struct net_device *netdev) 240ixgb_get_msglevel(struct net_device *netdev)
@@ -251,7 +251,7 @@ ixgb_set_msglevel(struct net_device *netdev, u32 data)
251} 251}
252#define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_ 252#define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_
253 253
254static int 254static int
255ixgb_get_regs_len(struct net_device *netdev) 255ixgb_get_regs_len(struct net_device *netdev)
256{ 256{
257#define IXGB_REG_DUMP_LEN 136*sizeof(u32) 257#define IXGB_REG_DUMP_LEN 136*sizeof(u32)
@@ -301,7 +301,7 @@ ixgb_get_regs(struct net_device *netdev,
301 *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */ 301 *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */
302 302
303 /* there are 16 RAR entries in hardware, we only use 3 */ 303 /* there are 16 RAR entries in hardware, we only use 3 */
304 for(i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) { 304 for (i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) {
305 *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */ 305 *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
306 *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */ 306 *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
307 } 307 }
@@ -415,7 +415,7 @@ ixgb_get_eeprom(struct net_device *netdev,
415 int i, max_len, first_word, last_word; 415 int i, max_len, first_word, last_word;
416 int ret_val = 0; 416 int ret_val = 0;
417 417
418 if(eeprom->len == 0) { 418 if (eeprom->len == 0) {
419 ret_val = -EINVAL; 419 ret_val = -EINVAL;
420 goto geeprom_error; 420 goto geeprom_error;
421 } 421 }
@@ -424,12 +424,12 @@ ixgb_get_eeprom(struct net_device *netdev,
424 424
425 max_len = ixgb_get_eeprom_len(netdev); 425 max_len = ixgb_get_eeprom_len(netdev);
426 426
427 if(eeprom->offset > eeprom->offset + eeprom->len) { 427 if (eeprom->offset > eeprom->offset + eeprom->len) {
428 ret_val = -EINVAL; 428 ret_val = -EINVAL;
429 goto geeprom_error; 429 goto geeprom_error;
430 } 430 }
431 431
432 if((eeprom->offset + eeprom->len) > max_len) 432 if ((eeprom->offset + eeprom->len) > max_len)
433 eeprom->len = (max_len - eeprom->offset); 433 eeprom->len = (max_len - eeprom->offset);
434 434
435 first_word = eeprom->offset >> 1; 435 first_word = eeprom->offset >> 1;
@@ -437,16 +437,14 @@ ixgb_get_eeprom(struct net_device *netdev,
437 437
438 eeprom_buff = kmalloc(sizeof(__le16) * 438 eeprom_buff = kmalloc(sizeof(__le16) *
439 (last_word - first_word + 1), GFP_KERNEL); 439 (last_word - first_word + 1), GFP_KERNEL);
440 if(!eeprom_buff) 440 if (!eeprom_buff)
441 return -ENOMEM; 441 return -ENOMEM;
442 442
443 /* note the eeprom was good because the driver loaded */ 443 /* note the eeprom was good because the driver loaded */
444 for(i = 0; i <= (last_word - first_word); i++) { 444 for (i = 0; i <= (last_word - first_word); i++)
445 eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i)); 445 eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i));
446 }
447 446
448 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), 447 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
449 eeprom->len);
450 kfree(eeprom_buff); 448 kfree(eeprom_buff);
451 449
452geeprom_error: 450geeprom_error:
@@ -464,47 +462,47 @@ ixgb_set_eeprom(struct net_device *netdev,
464 int max_len, first_word, last_word; 462 int max_len, first_word, last_word;
465 u16 i; 463 u16 i;
466 464
467 if(eeprom->len == 0) 465 if (eeprom->len == 0)
468 return -EINVAL; 466 return -EINVAL;
469 467
470 if(eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 468 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
471 return -EFAULT; 469 return -EFAULT;
472 470
473 max_len = ixgb_get_eeprom_len(netdev); 471 max_len = ixgb_get_eeprom_len(netdev);
474 472
475 if(eeprom->offset > eeprom->offset + eeprom->len) 473 if (eeprom->offset > eeprom->offset + eeprom->len)
476 return -EINVAL; 474 return -EINVAL;
477 475
478 if((eeprom->offset + eeprom->len) > max_len) 476 if ((eeprom->offset + eeprom->len) > max_len)
479 eeprom->len = (max_len - eeprom->offset); 477 eeprom->len = (max_len - eeprom->offset);
480 478
481 first_word = eeprom->offset >> 1; 479 first_word = eeprom->offset >> 1;
482 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 480 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
483 eeprom_buff = kmalloc(max_len, GFP_KERNEL); 481 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
484 if(!eeprom_buff) 482 if (!eeprom_buff)
485 return -ENOMEM; 483 return -ENOMEM;
486 484
487 ptr = (void *)eeprom_buff; 485 ptr = (void *)eeprom_buff;
488 486
489 if(eeprom->offset & 1) { 487 if (eeprom->offset & 1) {
490 /* need read/modify/write of first changed EEPROM word */ 488 /* need read/modify/write of first changed EEPROM word */
491 /* only the second byte of the word is being modified */ 489 /* only the second byte of the word is being modified */
492 eeprom_buff[0] = ixgb_read_eeprom(hw, first_word); 490 eeprom_buff[0] = ixgb_read_eeprom(hw, first_word);
493 ptr++; 491 ptr++;
494 } 492 }
495 if((eeprom->offset + eeprom->len) & 1) { 493 if ((eeprom->offset + eeprom->len) & 1) {
496 /* need read/modify/write of last changed EEPROM word */ 494 /* need read/modify/write of last changed EEPROM word */
497 /* only the first byte of the word is being modified */ 495 /* only the first byte of the word is being modified */
498 eeprom_buff[last_word - first_word] 496 eeprom_buff[last_word - first_word]
499 = ixgb_read_eeprom(hw, last_word); 497 = ixgb_read_eeprom(hw, last_word);
500 } 498 }
501 499
502 memcpy(ptr, bytes, eeprom->len); 500 memcpy(ptr, bytes, eeprom->len);
503 for(i = 0; i <= (last_word - first_word); i++) 501 for (i = 0; i <= (last_word - first_word); i++)
504 ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]); 502 ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]);
505 503
506 /* Update the checksum over the first part of the EEPROM if needed */ 504 /* Update the checksum over the first part of the EEPROM if needed */
507 if(first_word <= EEPROM_CHECKSUM_REG) 505 if (first_word <= EEPROM_CHECKSUM_REG)
508 ixgb_update_eeprom_checksum(hw); 506 ixgb_update_eeprom_checksum(hw);
509 507
510 kfree(eeprom_buff); 508 kfree(eeprom_buff);
@@ -534,7 +532,7 @@ ixgb_get_ringparam(struct net_device *netdev,
534 struct ixgb_desc_ring *txdr = &adapter->tx_ring; 532 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
535 struct ixgb_desc_ring *rxdr = &adapter->rx_ring; 533 struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
536 534
537 ring->rx_max_pending = MAX_RXD; 535 ring->rx_max_pending = MAX_RXD;
538 ring->tx_max_pending = MAX_TXD; 536 ring->tx_max_pending = MAX_TXD;
539 ring->rx_mini_max_pending = 0; 537 ring->rx_mini_max_pending = 0;
540 ring->rx_jumbo_max_pending = 0; 538 ring->rx_jumbo_max_pending = 0;
@@ -544,7 +542,7 @@ ixgb_get_ringparam(struct net_device *netdev,
544 ring->rx_jumbo_pending = 0; 542 ring->rx_jumbo_pending = 0;
545} 543}
546 544
547static int 545static int
548ixgb_set_ringparam(struct net_device *netdev, 546ixgb_set_ringparam(struct net_device *netdev,
549 struct ethtool_ringparam *ring) 547 struct ethtool_ringparam *ring)
550{ 548{
@@ -557,10 +555,10 @@ ixgb_set_ringparam(struct net_device *netdev,
557 tx_old = adapter->tx_ring; 555 tx_old = adapter->tx_ring;
558 rx_old = adapter->rx_ring; 556 rx_old = adapter->rx_ring;
559 557
560 if((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 558 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
561 return -EINVAL; 559 return -EINVAL;
562 560
563 if(netif_running(adapter->netdev)) 561 if (netif_running(adapter->netdev))
564 ixgb_down(adapter, true); 562 ixgb_down(adapter, true);
565 563
566 rxdr->count = max(ring->rx_pending,(u32)MIN_RXD); 564 rxdr->count = max(ring->rx_pending,(u32)MIN_RXD);
@@ -571,11 +569,11 @@ ixgb_set_ringparam(struct net_device *netdev,
571 txdr->count = min(txdr->count,(u32)MAX_TXD); 569 txdr->count = min(txdr->count,(u32)MAX_TXD);
572 txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); 570 txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
573 571
574 if(netif_running(adapter->netdev)) { 572 if (netif_running(adapter->netdev)) {
575 /* Try to get new resources before deleting old */ 573 /* Try to get new resources before deleting old */
576 if((err = ixgb_setup_rx_resources(adapter))) 574 if ((err = ixgb_setup_rx_resources(adapter)))
577 goto err_setup_rx; 575 goto err_setup_rx;
578 if((err = ixgb_setup_tx_resources(adapter))) 576 if ((err = ixgb_setup_tx_resources(adapter)))
579 goto err_setup_tx; 577 goto err_setup_tx;
580 578
581 /* save the new, restore the old in order to free it, 579 /* save the new, restore the old in order to free it,
@@ -589,7 +587,7 @@ ixgb_set_ringparam(struct net_device *netdev,
589 ixgb_free_tx_resources(adapter); 587 ixgb_free_tx_resources(adapter);
590 adapter->rx_ring = rx_new; 588 adapter->rx_ring = rx_new;
591 adapter->tx_ring = tx_new; 589 adapter->tx_ring = tx_new;
592 if((err = ixgb_up(adapter))) 590 if ((err = ixgb_up(adapter)))
593 return err; 591 return err;
594 ixgb_set_speed_duplex(netdev); 592 ixgb_set_speed_duplex(netdev);
595 } 593 }
@@ -615,7 +613,7 @@ ixgb_led_blink_callback(unsigned long data)
615{ 613{
616 struct ixgb_adapter *adapter = (struct ixgb_adapter *)data; 614 struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
617 615
618 if(test_and_change_bit(IXGB_LED_ON, &adapter->led_status)) 616 if (test_and_change_bit(IXGB_LED_ON, &adapter->led_status))
619 ixgb_led_off(&adapter->hw); 617 ixgb_led_off(&adapter->hw);
620 else 618 else
621 ixgb_led_on(&adapter->hw); 619 ixgb_led_on(&adapter->hw);
@@ -631,7 +629,7 @@ ixgb_phys_id(struct net_device *netdev, u32 data)
631 if (!data) 629 if (!data)
632 data = INT_MAX; 630 data = INT_MAX;
633 631
634 if(!adapter->blink_timer.function) { 632 if (!adapter->blink_timer.function) {
635 init_timer(&adapter->blink_timer); 633 init_timer(&adapter->blink_timer);
636 adapter->blink_timer.function = ixgb_led_blink_callback; 634 adapter->blink_timer.function = ixgb_led_blink_callback;
637 adapter->blink_timer.data = (unsigned long)adapter; 635 adapter->blink_timer.data = (unsigned long)adapter;
@@ -647,7 +645,7 @@ ixgb_phys_id(struct net_device *netdev, u32 data)
647 return 0; 645 return 0;
648} 646}
649 647
650static int 648static int
651ixgb_get_sset_count(struct net_device *netdev, int sset) 649ixgb_get_sset_count(struct net_device *netdev, int sset)
652{ 650{
653 switch (sset) { 651 switch (sset) {
@@ -658,30 +656,30 @@ ixgb_get_sset_count(struct net_device *netdev, int sset)
658 } 656 }
659} 657}
660 658
661static void 659static void
662ixgb_get_ethtool_stats(struct net_device *netdev, 660ixgb_get_ethtool_stats(struct net_device *netdev,
663 struct ethtool_stats *stats, u64 *data) 661 struct ethtool_stats *stats, u64 *data)
664{ 662{
665 struct ixgb_adapter *adapter = netdev_priv(netdev); 663 struct ixgb_adapter *adapter = netdev_priv(netdev);
666 int i; 664 int i;
667 665
668 ixgb_update_stats(adapter); 666 ixgb_update_stats(adapter);
669 for(i = 0; i < IXGB_STATS_LEN; i++) { 667 for (i = 0; i < IXGB_STATS_LEN; i++) {
670 char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset; 668 char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset;
671 data[i] = (ixgb_gstrings_stats[i].sizeof_stat == 669 data[i] = (ixgb_gstrings_stats[i].sizeof_stat ==
672 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 670 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
673 } 671 }
674} 672}
675 673
676static void 674static void
677ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 675ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
678{ 676{
679 int i; 677 int i;
680 678
681 switch(stringset) { 679 switch(stringset) {
682 case ETH_SS_STATS: 680 case ETH_SS_STATS:
683 for(i=0; i < IXGB_STATS_LEN; i++) { 681 for (i = 0; i < IXGB_STATS_LEN; i++) {
684 memcpy(data + i * ETH_GSTRING_LEN, 682 memcpy(data + i * ETH_GSTRING_LEN,
685 ixgb_gstrings_stats[i].stat_string, 683 ixgb_gstrings_stats[i].stat_string,
686 ETH_GSTRING_LEN); 684 ETH_GSTRING_LEN);
687 } 685 }
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index 04d2003e24e1..11dcda0f453e 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/10GbE Linux driver 3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -125,7 +125,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
125 /* If we are stopped or resetting exit gracefully and wait to be 125 /* If we are stopped or resetting exit gracefully and wait to be
126 * started again before accessing the hardware. 126 * started again before accessing the hardware.
127 */ 127 */
128 if(hw->adapter_stopped) { 128 if (hw->adapter_stopped) {
129 DEBUGOUT("Exiting because the adapter is already stopped!!!\n"); 129 DEBUGOUT("Exiting because the adapter is already stopped!!!\n");
130 return false; 130 return false;
131 } 131 }
@@ -347,7 +347,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
347 347
348 /* Zero out the Multicast HASH table */ 348 /* Zero out the Multicast HASH table */
349 DEBUGOUT("Zeroing the MTA\n"); 349 DEBUGOUT("Zeroing the MTA\n");
350 for(i = 0; i < IXGB_MC_TBL_SIZE; i++) 350 for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
351 IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0); 351 IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
352 352
353 /* Zero out the VLAN Filter Table Array */ 353 /* Zero out the VLAN Filter Table Array */
@@ -371,7 +371,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
371 * hw - Struct containing variables accessed by shared code 371 * hw - Struct containing variables accessed by shared code
372 * 372 *
373 * Places the MAC address in receive address register 0 and clears the rest 373 * Places the MAC address in receive address register 0 and clears the rest
374 * of the receive addresss registers. Clears the multicast table. Assumes 374 * of the receive address registers. Clears the multicast table. Assumes
375 * the receiver is in reset when the routine is called. 375 * the receiver is in reset when the routine is called.
376 *****************************************************************************/ 376 *****************************************************************************/
377static void 377static void
@@ -413,7 +413,7 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
413 413
414 /* Zero out the other 15 receive addresses. */ 414 /* Zero out the other 15 receive addresses. */
415 DEBUGOUT("Clearing RAR[1-15]\n"); 415 DEBUGOUT("Clearing RAR[1-15]\n");
416 for(i = 1; i < IXGB_RAR_ENTRIES; i++) { 416 for (i = 1; i < IXGB_RAR_ENTRIES; i++) {
417 /* Write high reg first to disable the AV bit first */ 417 /* Write high reg first to disable the AV bit first */
418 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 418 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
419 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 419 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
@@ -452,19 +452,18 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
452 452
453 /* Clear RAR[1-15] */ 453 /* Clear RAR[1-15] */
454 DEBUGOUT(" Clearing RAR[1-15]\n"); 454 DEBUGOUT(" Clearing RAR[1-15]\n");
455 for(i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) { 455 for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
456 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 456 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
457 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 457 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
458 } 458 }
459 459
460 /* Clear the MTA */ 460 /* Clear the MTA */
461 DEBUGOUT(" Clearing MTA\n"); 461 DEBUGOUT(" Clearing MTA\n");
462 for(i = 0; i < IXGB_MC_TBL_SIZE; i++) { 462 for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
463 IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0); 463 IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
464 }
465 464
466 /* Add the new addresses */ 465 /* Add the new addresses */
467 for(i = 0; i < mc_addr_count; i++) { 466 for (i = 0; i < mc_addr_count; i++) {
468 DEBUGOUT(" Adding the multicast addresses:\n"); 467 DEBUGOUT(" Adding the multicast addresses:\n");
469 DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i, 468 DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i,
470 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)], 469 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)],
@@ -482,7 +481,7 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
482 /* Place this multicast address in the RAR if there is room, * 481 /* Place this multicast address in the RAR if there is room, *
483 * else put it in the MTA 482 * else put it in the MTA
484 */ 483 */
485 if(rar_used_count < IXGB_RAR_ENTRIES) { 484 if (rar_used_count < IXGB_RAR_ENTRIES) {
486 ixgb_rar_set(hw, 485 ixgb_rar_set(hw,
487 mc_addr_list + 486 mc_addr_list +
488 (i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)), 487 (i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)),
@@ -649,7 +648,7 @@ ixgb_clear_vfta(struct ixgb_hw *hw)
649{ 648{
650 u32 offset; 649 u32 offset;
651 650
652 for(offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++) 651 for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
653 IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0); 652 IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
654 return; 653 return;
655} 654}
@@ -719,9 +718,8 @@ ixgb_setup_fc(struct ixgb_hw *hw)
719 /* Write the new settings */ 718 /* Write the new settings */
720 IXGB_WRITE_REG(hw, CTRL0, ctrl_reg); 719 IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
721 720
722 if (pap_reg != 0) { 721 if (pap_reg != 0)
723 IXGB_WRITE_REG(hw, PAP, pap_reg); 722 IXGB_WRITE_REG(hw, PAP, pap_reg);
724 }
725 723
726 /* Set the flow control receive threshold registers. Normally, 724 /* Set the flow control receive threshold registers. Normally,
727 * these registers will be set to a default threshold that may be 725 * these registers will be set to a default threshold that may be
@@ -729,14 +727,14 @@ ixgb_setup_fc(struct ixgb_hw *hw)
729 * ability to transmit pause frames in not enabled, then these 727 * ability to transmit pause frames in not enabled, then these
730 * registers will be set to 0. 728 * registers will be set to 0.
731 */ 729 */
732 if(!(hw->fc.type & ixgb_fc_tx_pause)) { 730 if (!(hw->fc.type & ixgb_fc_tx_pause)) {
733 IXGB_WRITE_REG(hw, FCRTL, 0); 731 IXGB_WRITE_REG(hw, FCRTL, 0);
734 IXGB_WRITE_REG(hw, FCRTH, 0); 732 IXGB_WRITE_REG(hw, FCRTH, 0);
735 } else { 733 } else {
736 /* We need to set up the Receive Threshold high and low water 734 /* We need to set up the Receive Threshold high and low water
737 * marks as well as (optionally) enabling the transmission of XON 735 * marks as well as (optionally) enabling the transmission of XON
738 * frames. */ 736 * frames. */
739 if(hw->fc.send_xon) { 737 if (hw->fc.send_xon) {
740 IXGB_WRITE_REG(hw, FCRTL, 738 IXGB_WRITE_REG(hw, FCRTL,
741 (hw->fc.low_water | IXGB_FCRTL_XONE)); 739 (hw->fc.low_water | IXGB_FCRTL_XONE));
742 } else { 740 } else {
@@ -791,7 +789,7 @@ ixgb_read_phy_reg(struct ixgb_hw *hw,
791 ** from the CPU Write to the Ready bit assertion. 789 ** from the CPU Write to the Ready bit assertion.
792 **************************************************************/ 790 **************************************************************/
793 791
794 for(i = 0; i < 10; i++) 792 for (i = 0; i < 10; i++)
795 { 793 {
796 udelay(10); 794 udelay(10);
797 795
@@ -818,7 +816,7 @@ ixgb_read_phy_reg(struct ixgb_hw *hw,
818 ** from the CPU Write to the Ready bit assertion. 816 ** from the CPU Write to the Ready bit assertion.
819 **************************************************************/ 817 **************************************************************/
820 818
821 for(i = 0; i < 10; i++) 819 for (i = 0; i < 10; i++)
822 { 820 {
823 udelay(10); 821 udelay(10);
824 822
@@ -887,7 +885,7 @@ ixgb_write_phy_reg(struct ixgb_hw *hw,
887 ** from the CPU Write to the Ready bit assertion. 885 ** from the CPU Write to the Ready bit assertion.
888 **************************************************************/ 886 **************************************************************/
889 887
890 for(i = 0; i < 10; i++) 888 for (i = 0; i < 10; i++)
891 { 889 {
892 udelay(10); 890 udelay(10);
893 891
@@ -914,7 +912,7 @@ ixgb_write_phy_reg(struct ixgb_hw *hw,
914 ** from the CPU Write to the Ready bit assertion. 912 ** from the CPU Write to the Ready bit assertion.
915 **************************************************************/ 913 **************************************************************/
916 914
917 for(i = 0; i < 10; i++) 915 for (i = 0; i < 10; i++)
918 { 916 {
919 udelay(10); 917 udelay(10);
920 918
@@ -965,7 +963,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
965} 963}
966 964
967/****************************************************************************** 965/******************************************************************************
968 * Check for a bad link condition that may have occured. 966 * Check for a bad link condition that may have occurred.
969 * The indication is that the RFC / LFC registers may be incrementing 967 * The indication is that the RFC / LFC registers may be incrementing
970 * continually. A full adapter reset is required to recover. 968 * continually. A full adapter reset is required to recover.
971 * 969 *
@@ -1007,7 +1005,7 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
1007 DEBUGFUNC("ixgb_clear_hw_cntrs"); 1005 DEBUGFUNC("ixgb_clear_hw_cntrs");
1008 1006
1009 /* if we are stopped or resetting exit gracefully */ 1007 /* if we are stopped or resetting exit gracefully */
1010 if(hw->adapter_stopped) { 1008 if (hw->adapter_stopped) {
1011 DEBUGOUT("Exiting because the adapter is stopped!!!\n"); 1009 DEBUGOUT("Exiting because the adapter is stopped!!!\n");
1012 return; 1010 return;
1013 } 1011 }
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
index 39cfa47bea69..831fe0c58b2b 100644
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/10GbE Linux driver 3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgb/ixgb_ids.h b/drivers/net/ixgb/ixgb_ids.h
index 180d20e793a5..2a58847f46e8 100644
--- a/drivers/net/ixgb/ixgb_ids.h
+++ b/drivers/net/ixgb/ixgb_ids.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/10GbE Linux driver 3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -38,11 +38,11 @@
38#define SUN_VENDOR_ID 0x108E 38#define SUN_VENDOR_ID 0x108E
39#define SUN_SUBVENDOR_ID 0x108E 39#define SUN_SUBVENDOR_ID 0x108E
40 40
41#define IXGB_DEVICE_ID_82597EX 0x1048 41#define IXGB_DEVICE_ID_82597EX 0x1048
42#define IXGB_DEVICE_ID_82597EX_SR 0x1A48 42#define IXGB_DEVICE_ID_82597EX_SR 0x1A48
43#define IXGB_DEVICE_ID_82597EX_LR 0x1B48 43#define IXGB_DEVICE_ID_82597EX_LR 0x1B48
44#define IXGB_SUBDEVICE_ID_A11F 0xA11F 44#define IXGB_SUBDEVICE_ID_A11F 0xA11F
45#define IXGB_SUBDEVICE_ID_A01F 0xA01F 45#define IXGB_SUBDEVICE_ID_A01F 0xA01F
46 46
47#define IXGB_DEVICE_ID_82597EX_CX4 0x109E 47#define IXGB_DEVICE_ID_82597EX_CX4 0x109E
48#define IXGB_SUBDEVICE_ID_A00C 0xA00C 48#define IXGB_SUBDEVICE_ID_A00C 0xA00C
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 41f3adf5f375..e83feaf830bd 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/10GbE Linux driver 3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -31,14 +31,16 @@
31char ixgb_driver_name[] = "ixgb"; 31char ixgb_driver_name[] = "ixgb";
32static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver"; 32static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
33 33
34#ifndef CONFIG_IXGB_NAPI
35#define DRIVERNAPI
36#else
37#define DRIVERNAPI "-NAPI" 34#define DRIVERNAPI "-NAPI"
38#endif 35#define DRV_VERSION "1.0.135-k2" DRIVERNAPI
39#define DRV_VERSION "1.0.126-k4"DRIVERNAPI
40const char ixgb_driver_version[] = DRV_VERSION; 36const char ixgb_driver_version[] = DRV_VERSION;
41static const char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 37static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
38
39#define IXGB_CB_LENGTH 256
40static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
41module_param(copybreak, uint, 0644);
42MODULE_PARM_DESC(copybreak,
43 "Maximum size of packet that is copied to a new buffer on receive");
42 44
43/* ixgb_pci_tbl - PCI Device ID Table 45/* ixgb_pci_tbl - PCI Device ID Table
44 * 46 *
@@ -55,7 +57,7 @@ static struct pci_device_id ixgb_pci_tbl[] = {
55 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 57 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
56 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, 58 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
57 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 59 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
58 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR, 60 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
59 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 61 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
60 62
61 /* required last entry */ 63 /* required last entry */
@@ -65,16 +67,6 @@ static struct pci_device_id ixgb_pci_tbl[] = {
65MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl); 67MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
66 68
67/* Local Function Prototypes */ 69/* Local Function Prototypes */
68
69int ixgb_up(struct ixgb_adapter *adapter);
70void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
71void ixgb_reset(struct ixgb_adapter *adapter);
72int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
73int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
74void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
75void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
76void ixgb_update_stats(struct ixgb_adapter *adapter);
77
78static int ixgb_init_module(void); 70static int ixgb_init_module(void);
79static void ixgb_exit_module(void); 71static void ixgb_exit_module(void);
80static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 72static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -96,18 +88,15 @@ static int ixgb_set_mac(struct net_device *netdev, void *p);
96static irqreturn_t ixgb_intr(int irq, void *data); 88static irqreturn_t ixgb_intr(int irq, void *data);
97static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter); 89static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
98 90
99#ifdef CONFIG_IXGB_NAPI 91static int ixgb_clean(struct napi_struct *, int);
100static int ixgb_clean(struct napi_struct *napi, int budget); 92static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
101static bool ixgb_clean_rx_irq(struct ixgb_adapter *adapter, 93static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
102 int *work_done, int work_to_do); 94
103#else
104static bool ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
105#endif
106static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
107static void ixgb_tx_timeout(struct net_device *dev); 95static void ixgb_tx_timeout(struct net_device *dev);
108static void ixgb_tx_timeout_task(struct work_struct *work); 96static void ixgb_tx_timeout_task(struct work_struct *work);
97
109static void ixgb_vlan_rx_register(struct net_device *netdev, 98static void ixgb_vlan_rx_register(struct net_device *netdev,
110 struct vlan_group *grp); 99 struct vlan_group *grp);
111static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid); 100static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
112static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); 101static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
113static void ixgb_restore_vlan(struct ixgb_adapter *adapter); 102static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
@@ -118,7 +107,7 @@ static void ixgb_netpoll(struct net_device *dev);
118#endif 107#endif
119 108
120static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, 109static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
121 enum pci_channel_state state); 110 enum pci_channel_state state);
122static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); 111static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
123static void ixgb_io_resume (struct pci_dev *pdev); 112static void ixgb_io_resume (struct pci_dev *pdev);
124 113
@@ -146,14 +135,6 @@ static int debug = DEFAULT_DEBUG_LEVEL_SHIFT;
146module_param(debug, int, 0); 135module_param(debug, int, 0);
147MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 136MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
148 137
149/* some defines for controlling descriptor fetches in h/w */
150#define RXDCTL_WTHRESH_DEFAULT 15 /* chip writes back at this many or RXT0 */
151#define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below
152 * this */
153#define RXDCTL_HTHRESH_DEFAULT 0 /* chip will only prefetch if tail
154 * is pushed this many descriptors
155 * from head */
156
157/** 138/**
158 * ixgb_init_module - Driver Registration Routine 139 * ixgb_init_module - Driver Registration Routine
159 * 140 *
@@ -236,7 +217,7 @@ ixgb_up(struct ixgb_adapter *adapter)
236 ixgb_configure_tx(adapter); 217 ixgb_configure_tx(adapter);
237 ixgb_setup_rctl(adapter); 218 ixgb_setup_rctl(adapter);
238 ixgb_configure_rx(adapter); 219 ixgb_configure_rx(adapter);
239 ixgb_alloc_rx_buffers(adapter); 220 ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
240 221
241 /* disable interrupts and get the hardware into a known state */ 222 /* disable interrupts and get the hardware into a known state */
242 IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff); 223 IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
@@ -261,7 +242,7 @@ ixgb_up(struct ixgb_adapter *adapter)
261 return err; 242 return err;
262 } 243 }
263 244
264 if((hw->max_frame_size != max_frame) || 245 if ((hw->max_frame_size != max_frame) ||
265 (hw->max_frame_size != 246 (hw->max_frame_size !=
266 (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) { 247 (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
267 248
@@ -269,11 +250,11 @@ ixgb_up(struct ixgb_adapter *adapter)
269 250
270 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT); 251 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
271 252
272 if(hw->max_frame_size > 253 if (hw->max_frame_size >
273 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) { 254 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
274 u32 ctrl0 = IXGB_READ_REG(hw, CTRL0); 255 u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
275 256
276 if(!(ctrl0 & IXGB_CTRL0_JFE)) { 257 if (!(ctrl0 & IXGB_CTRL0_JFE)) {
277 ctrl0 |= IXGB_CTRL0_JFE; 258 ctrl0 |= IXGB_CTRL0_JFE;
278 IXGB_WRITE_REG(hw, CTRL0, ctrl0); 259 IXGB_WRITE_REG(hw, CTRL0, ctrl0);
279 } 260 }
@@ -282,9 +263,7 @@ ixgb_up(struct ixgb_adapter *adapter)
282 263
283 clear_bit(__IXGB_DOWN, &adapter->flags); 264 clear_bit(__IXGB_DOWN, &adapter->flags);
284 265
285#ifdef CONFIG_IXGB_NAPI
286 napi_enable(&adapter->napi); 266 napi_enable(&adapter->napi);
287#endif
288 ixgb_irq_enable(adapter); 267 ixgb_irq_enable(adapter);
289 268
290 mod_timer(&adapter->watchdog_timer, jiffies); 269 mod_timer(&adapter->watchdog_timer, jiffies);
@@ -300,9 +279,7 @@ ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
300 /* prevent the interrupt handler from restarting watchdog */ 279 /* prevent the interrupt handler from restarting watchdog */
301 set_bit(__IXGB_DOWN, &adapter->flags); 280 set_bit(__IXGB_DOWN, &adapter->flags);
302 281
303#ifdef CONFIG_IXGB_NAPI
304 napi_disable(&adapter->napi); 282 napi_disable(&adapter->napi);
305#endif
306 /* waiting for NAPI to complete can re-enable interrupts */ 283 /* waiting for NAPI to complete can re-enable interrupts */
307 ixgb_irq_disable(adapter); 284 ixgb_irq_disable(adapter);
308 free_irq(adapter->pdev->irq, netdev); 285 free_irq(adapter->pdev->irq, netdev);
@@ -310,7 +287,7 @@ ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
310 if (adapter->have_msi) 287 if (adapter->have_msi)
311 pci_disable_msi(adapter->pdev); 288 pci_disable_msi(adapter->pdev);
312 289
313 if(kill_watchdog) 290 if (kill_watchdog)
314 del_timer_sync(&adapter->watchdog_timer); 291 del_timer_sync(&adapter->watchdog_timer);
315 292
316 adapter->link_speed = 0; 293 adapter->link_speed = 0;
@@ -357,27 +334,25 @@ ixgb_reset(struct ixgb_adapter *adapter)
357 **/ 334 **/
358 335
359static int __devinit 336static int __devinit
360ixgb_probe(struct pci_dev *pdev, 337ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
361 const struct pci_device_id *ent)
362{ 338{
363 struct net_device *netdev = NULL; 339 struct net_device *netdev = NULL;
364 struct ixgb_adapter *adapter; 340 struct ixgb_adapter *adapter;
365 static int cards_found = 0; 341 static int cards_found = 0;
366 unsigned long mmio_start;
367 int mmio_len;
368 int pci_using_dac; 342 int pci_using_dac;
369 int i; 343 int i;
370 int err; 344 int err;
371 345
372 if((err = pci_enable_device(pdev))) 346 err = pci_enable_device(pdev);
347 if (err)
373 return err; 348 return err;
374 349
375 if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && 350 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
376 !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { 351 !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
377 pci_using_dac = 1; 352 pci_using_dac = 1;
378 } else { 353 } else {
379 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) || 354 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
380 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { 355 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
381 printk(KERN_ERR 356 printk(KERN_ERR
382 "ixgb: No usable DMA configuration, aborting\n"); 357 "ixgb: No usable DMA configuration, aborting\n");
383 goto err_dma_mask; 358 goto err_dma_mask;
@@ -385,13 +360,14 @@ ixgb_probe(struct pci_dev *pdev,
385 pci_using_dac = 0; 360 pci_using_dac = 0;
386 } 361 }
387 362
388 if((err = pci_request_regions(pdev, ixgb_driver_name))) 363 err = pci_request_regions(pdev, ixgb_driver_name);
364 if (err)
389 goto err_request_regions; 365 goto err_request_regions;
390 366
391 pci_set_master(pdev); 367 pci_set_master(pdev);
392 368
393 netdev = alloc_etherdev(sizeof(struct ixgb_adapter)); 369 netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
394 if(!netdev) { 370 if (!netdev) {
395 err = -ENOMEM; 371 err = -ENOMEM;
396 goto err_alloc_etherdev; 372 goto err_alloc_etherdev;
397 } 373 }
@@ -405,19 +381,17 @@ ixgb_probe(struct pci_dev *pdev,
405 adapter->hw.back = adapter; 381 adapter->hw.back = adapter;
406 adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT); 382 adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT);
407 383
408 mmio_start = pci_resource_start(pdev, BAR_0); 384 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
409 mmio_len = pci_resource_len(pdev, BAR_0); 385 pci_resource_len(pdev, BAR_0));
410 386 if (!adapter->hw.hw_addr) {
411 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
412 if(!adapter->hw.hw_addr) {
413 err = -EIO; 387 err = -EIO;
414 goto err_ioremap; 388 goto err_ioremap;
415 } 389 }
416 390
417 for(i = BAR_1; i <= BAR_5; i++) { 391 for (i = BAR_1; i <= BAR_5; i++) {
418 if(pci_resource_len(pdev, i) == 0) 392 if (pci_resource_len(pdev, i) == 0)
419 continue; 393 continue;
420 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) { 394 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
421 adapter->hw.io_base = pci_resource_start(pdev, i); 395 adapter->hw.io_base = pci_resource_start(pdev, i);
422 break; 396 break;
423 } 397 }
@@ -433,9 +407,7 @@ ixgb_probe(struct pci_dev *pdev,
433 ixgb_set_ethtool_ops(netdev); 407 ixgb_set_ethtool_ops(netdev);
434 netdev->tx_timeout = &ixgb_tx_timeout; 408 netdev->tx_timeout = &ixgb_tx_timeout;
435 netdev->watchdog_timeo = 5 * HZ; 409 netdev->watchdog_timeo = 5 * HZ;
436#ifdef CONFIG_IXGB_NAPI
437 netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64); 410 netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
438#endif
439 netdev->vlan_rx_register = ixgb_vlan_rx_register; 411 netdev->vlan_rx_register = ixgb_vlan_rx_register;
440 netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid; 412 netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid;
441 netdev->vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid; 413 netdev->vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid;
@@ -444,9 +416,6 @@ ixgb_probe(struct pci_dev *pdev,
444#endif 416#endif
445 417
446 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 418 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
447 netdev->mem_start = mmio_start;
448 netdev->mem_end = mmio_start + mmio_len;
449 netdev->base_addr = adapter->hw.io_base;
450 419
451 adapter->bd_number = cards_found; 420 adapter->bd_number = cards_found;
452 adapter->link_speed = 0; 421 adapter->link_speed = 0;
@@ -454,7 +423,8 @@ ixgb_probe(struct pci_dev *pdev,
454 423
455 /* setup the private structure */ 424 /* setup the private structure */
456 425
457 if((err = ixgb_sw_init(adapter))) 426 err = ixgb_sw_init(adapter);
427 if (err)
458 goto err_sw_init; 428 goto err_sw_init;
459 429
460 netdev->features = NETIF_F_SG | 430 netdev->features = NETIF_F_SG |
@@ -463,16 +433,13 @@ ixgb_probe(struct pci_dev *pdev,
463 NETIF_F_HW_VLAN_RX | 433 NETIF_F_HW_VLAN_RX |
464 NETIF_F_HW_VLAN_FILTER; 434 NETIF_F_HW_VLAN_FILTER;
465 netdev->features |= NETIF_F_TSO; 435 netdev->features |= NETIF_F_TSO;
466#ifdef NETIF_F_LLTX
467 netdev->features |= NETIF_F_LLTX;
468#endif
469 436
470 if(pci_using_dac) 437 if (pci_using_dac)
471 netdev->features |= NETIF_F_HIGHDMA; 438 netdev->features |= NETIF_F_HIGHDMA;
472 439
473 /* make sure the EEPROM is good */ 440 /* make sure the EEPROM is good */
474 441
475 if(!ixgb_validate_eeprom_checksum(&adapter->hw)) { 442 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
476 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); 443 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
477 err = -EIO; 444 err = -EIO;
478 goto err_eeprom; 445 goto err_eeprom;
@@ -481,7 +448,7 @@ ixgb_probe(struct pci_dev *pdev,
481 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); 448 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
482 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); 449 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
483 450
484 if(!is_valid_ether_addr(netdev->perm_addr)) { 451 if (!is_valid_ether_addr(netdev->perm_addr)) {
485 DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); 452 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
486 err = -EIO; 453 err = -EIO;
487 goto err_eeprom; 454 goto err_eeprom;
@@ -496,7 +463,8 @@ ixgb_probe(struct pci_dev *pdev,
496 INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); 463 INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
497 464
498 strcpy(netdev->name, "eth%d"); 465 strcpy(netdev->name, "eth%d");
499 if((err = register_netdev(netdev))) 466 err = register_netdev(netdev);
467 if (err)
500 goto err_register; 468 goto err_register;
501 469
502 /* we're going to reset, so assume we have no link for now */ 470 /* we're going to reset, so assume we have no link for now */
@@ -543,6 +511,8 @@ ixgb_remove(struct pci_dev *pdev)
543 struct net_device *netdev = pci_get_drvdata(pdev); 511 struct net_device *netdev = pci_get_drvdata(pdev);
544 struct ixgb_adapter *adapter = netdev_priv(netdev); 512 struct ixgb_adapter *adapter = netdev_priv(netdev);
545 513
514 flush_scheduled_work();
515
546 unregister_netdev(netdev); 516 unregister_netdev(netdev);
547 517
548 iounmap(adapter->hw.hw_addr); 518 iounmap(adapter->hw.hw_addr);
@@ -575,13 +545,13 @@ ixgb_sw_init(struct ixgb_adapter *adapter)
575 hw->subsystem_id = pdev->subsystem_device; 545 hw->subsystem_id = pdev->subsystem_device;
576 546
577 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; 547 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
578 adapter->rx_buffer_len = hw->max_frame_size; 548 adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
579 549
580 if((hw->device_id == IXGB_DEVICE_ID_82597EX) 550 if ((hw->device_id == IXGB_DEVICE_ID_82597EX)
581 || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) 551 || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4)
582 || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) 552 || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
583 || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR)) 553 || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
584 hw->mac_type = ixgb_82597; 554 hw->mac_type = ixgb_82597;
585 else { 555 else {
586 /* should never have loaded on this device */ 556 /* should never have loaded on this device */
587 DPRINTK(PROBE, ERR, "unsupported device id\n"); 557 DPRINTK(PROBE, ERR, "unsupported device id\n");
@@ -590,8 +560,6 @@ ixgb_sw_init(struct ixgb_adapter *adapter)
590 /* enable flow control to be programmed */ 560 /* enable flow control to be programmed */
591 hw->fc.send_xon = 1; 561 hw->fc.send_xon = 1;
592 562
593 spin_lock_init(&adapter->tx_lock);
594
595 set_bit(__IXGB_DOWN, &adapter->flags); 563 set_bit(__IXGB_DOWN, &adapter->flags);
596 return 0; 564 return 0;
597} 565}
@@ -616,16 +584,18 @@ ixgb_open(struct net_device *netdev)
616 int err; 584 int err;
617 585
618 /* allocate transmit descriptors */ 586 /* allocate transmit descriptors */
619 587 err = ixgb_setup_tx_resources(adapter);
620 if((err = ixgb_setup_tx_resources(adapter))) 588 if (err)
621 goto err_setup_tx; 589 goto err_setup_tx;
622 590
623 /* allocate receive descriptors */ 591 /* allocate receive descriptors */
624 592
625 if((err = ixgb_setup_rx_resources(adapter))) 593 err = ixgb_setup_rx_resources(adapter);
594 if (err)
626 goto err_setup_rx; 595 goto err_setup_rx;
627 596
628 if((err = ixgb_up(adapter))) 597 err = ixgb_up(adapter);
598 if (err)
629 goto err_up; 599 goto err_up;
630 600
631 return 0; 601 return 0;
@@ -681,7 +651,7 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
681 651
682 size = sizeof(struct ixgb_buffer) * txdr->count; 652 size = sizeof(struct ixgb_buffer) * txdr->count;
683 txdr->buffer_info = vmalloc(size); 653 txdr->buffer_info = vmalloc(size);
684 if(!txdr->buffer_info) { 654 if (!txdr->buffer_info) {
685 DPRINTK(PROBE, ERR, 655 DPRINTK(PROBE, ERR,
686 "Unable to allocate transmit descriptor ring memory\n"); 656 "Unable to allocate transmit descriptor ring memory\n");
687 return -ENOMEM; 657 return -ENOMEM;
@@ -694,7 +664,7 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
694 txdr->size = ALIGN(txdr->size, 4096); 664 txdr->size = ALIGN(txdr->size, 4096);
695 665
696 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 666 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
697 if(!txdr->desc) { 667 if (!txdr->desc) {
698 vfree(txdr->buffer_info); 668 vfree(txdr->buffer_info);
699 DPRINTK(PROBE, ERR, 669 DPRINTK(PROBE, ERR,
700 "Unable to allocate transmit descriptor memory\n"); 670 "Unable to allocate transmit descriptor memory\n");
@@ -723,8 +693,8 @@ ixgb_configure_tx(struct ixgb_adapter *adapter)
723 u32 tctl; 693 u32 tctl;
724 struct ixgb_hw *hw = &adapter->hw; 694 struct ixgb_hw *hw = &adapter->hw;
725 695
726 /* Setup the Base and Length of the Tx Descriptor Ring 696 /* Setup the Base and Length of the Tx Descriptor Ring
727 * tx_ring.dma can be either a 32 or 64 bit value 697 * tx_ring.dma can be either a 32 or 64 bit value
728 */ 698 */
729 699
730 IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); 700 IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
@@ -750,8 +720,8 @@ ixgb_configure_tx(struct ixgb_adapter *adapter)
750 720
751 /* Setup Transmit Descriptor Settings for this adapter */ 721 /* Setup Transmit Descriptor Settings for this adapter */
752 adapter->tx_cmd_type = 722 adapter->tx_cmd_type =
753 IXGB_TX_DESC_TYPE 723 IXGB_TX_DESC_TYPE |
754 | (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0); 724 (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
755} 725}
756 726
757/** 727/**
@@ -770,7 +740,7 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
770 740
771 size = sizeof(struct ixgb_buffer) * rxdr->count; 741 size = sizeof(struct ixgb_buffer) * rxdr->count;
772 rxdr->buffer_info = vmalloc(size); 742 rxdr->buffer_info = vmalloc(size);
773 if(!rxdr->buffer_info) { 743 if (!rxdr->buffer_info) {
774 DPRINTK(PROBE, ERR, 744 DPRINTK(PROBE, ERR,
775 "Unable to allocate receive descriptor ring\n"); 745 "Unable to allocate receive descriptor ring\n");
776 return -ENOMEM; 746 return -ENOMEM;
@@ -784,7 +754,7 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
784 754
785 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 755 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
786 756
787 if(!rxdr->desc) { 757 if (!rxdr->desc) {
788 vfree(rxdr->buffer_info); 758 vfree(rxdr->buffer_info);
789 DPRINTK(PROBE, ERR, 759 DPRINTK(PROBE, ERR,
790 "Unable to allocate receive descriptors\n"); 760 "Unable to allocate receive descriptors\n");
@@ -813,8 +783,8 @@ ixgb_setup_rctl(struct ixgb_adapter *adapter)
813 rctl &= ~(3 << IXGB_RCTL_MO_SHIFT); 783 rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
814 784
815 rctl |= 785 rctl |=
816 IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | 786 IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
817 IXGB_RCTL_RXEN | IXGB_RCTL_CFF | 787 IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
818 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT); 788 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
819 789
820 rctl |= IXGB_RCTL_SECRC; 790 rctl |= IXGB_RCTL_SECRC;
@@ -846,7 +816,6 @@ ixgb_configure_rx(struct ixgb_adapter *adapter)
846 struct ixgb_hw *hw = &adapter->hw; 816 struct ixgb_hw *hw = &adapter->hw;
847 u32 rctl; 817 u32 rctl;
848 u32 rxcsum; 818 u32 rxcsum;
849 u32 rxdctl;
850 819
851 /* make sure receives are disabled while setting up the descriptors */ 820 /* make sure receives are disabled while setting up the descriptors */
852 821
@@ -868,18 +837,12 @@ ixgb_configure_rx(struct ixgb_adapter *adapter)
868 IXGB_WRITE_REG(hw, RDH, 0); 837 IXGB_WRITE_REG(hw, RDH, 0);
869 IXGB_WRITE_REG(hw, RDT, 0); 838 IXGB_WRITE_REG(hw, RDT, 0);
870 839
871 /* set up pre-fetching of receive buffers so we get some before we 840 /* due to the hardware errata with RXDCTL, we are unable to use any of
872 * run out (default hardware behavior is to run out before fetching 841 * the performance enhancing features of it without causing other
873 * more). This sets up to fetch if HTHRESH rx descriptors are avail 842 * subtle bugs, some of the bugs could include receive length
874 * and the descriptors in hw cache are below PTHRESH. This avoids 843 * corruption at high data rates (WTHRESH > 0) and/or receive
875 * the hardware behavior of fetching <=512 descriptors in a single 844 * descriptor ring irregularites (particularly in hardware cache) */
876 * burst that pre-empts all other activity, usually causing fifo 845 IXGB_WRITE_REG(hw, RXDCTL, 0);
877 * overflows. */
878 /* use WTHRESH to burst write 16 descriptors or burst when RXT0 */
879 rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT |
880 RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT |
881 RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
882 IXGB_WRITE_REG(hw, RXDCTL, rxdctl);
883 846
884 /* Enable Receive Checksum Offload for TCP and UDP */ 847 /* Enable Receive Checksum Offload for TCP and UDP */
885 if (adapter->rx_csum) { 848 if (adapter->rx_csum) {
@@ -918,7 +881,7 @@ ixgb_free_tx_resources(struct ixgb_adapter *adapter)
918 881
919static void 882static void
920ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter, 883ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
921 struct ixgb_buffer *buffer_info) 884 struct ixgb_buffer *buffer_info)
922{ 885{
923 struct pci_dev *pdev = adapter->pdev; 886 struct pci_dev *pdev = adapter->pdev;
924 887
@@ -926,8 +889,10 @@ ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
926 pci_unmap_page(pdev, buffer_info->dma, buffer_info->length, 889 pci_unmap_page(pdev, buffer_info->dma, buffer_info->length,
927 PCI_DMA_TODEVICE); 890 PCI_DMA_TODEVICE);
928 891
892 /* okay to call kfree_skb here instead of kfree_skb_any because
893 * this is never called in interrupt context */
929 if (buffer_info->skb) 894 if (buffer_info->skb)
930 dev_kfree_skb_any(buffer_info->skb); 895 dev_kfree_skb(buffer_info->skb);
931 896
932 buffer_info->skb = NULL; 897 buffer_info->skb = NULL;
933 buffer_info->dma = 0; 898 buffer_info->dma = 0;
@@ -952,7 +917,7 @@ ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
952 917
953 /* Free all the Tx ring sk_buffs */ 918 /* Free all the Tx ring sk_buffs */
954 919
955 for(i = 0; i < tx_ring->count; i++) { 920 for (i = 0; i < tx_ring->count; i++) {
956 buffer_info = &tx_ring->buffer_info[i]; 921 buffer_info = &tx_ring->buffer_info[i];
957 ixgb_unmap_and_free_tx_resource(adapter, buffer_info); 922 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
958 } 923 }
@@ -1010,9 +975,9 @@ ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
1010 975
1011 /* Free all the Rx ring sk_buffs */ 976 /* Free all the Rx ring sk_buffs */
1012 977
1013 for(i = 0; i < rx_ring->count; i++) { 978 for (i = 0; i < rx_ring->count; i++) {
1014 buffer_info = &rx_ring->buffer_info[i]; 979 buffer_info = &rx_ring->buffer_info[i];
1015 if(buffer_info->skb) { 980 if (buffer_info->skb) {
1016 981
1017 pci_unmap_single(pdev, 982 pci_unmap_single(pdev,
1018 buffer_info->dma, 983 buffer_info->dma,
@@ -1053,7 +1018,7 @@ ixgb_set_mac(struct net_device *netdev, void *p)
1053 struct ixgb_adapter *adapter = netdev_priv(netdev); 1018 struct ixgb_adapter *adapter = netdev_priv(netdev);
1054 struct sockaddr *addr = p; 1019 struct sockaddr *addr = p;
1055 1020
1056 if(!is_valid_ether_addr(addr->sa_data)) 1021 if (!is_valid_ether_addr(addr->sa_data))
1057 return -EADDRNOTAVAIL; 1022 return -EADDRNOTAVAIL;
1058 1023
1059 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1024 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -1086,16 +1051,16 @@ ixgb_set_multi(struct net_device *netdev)
1086 1051
1087 rctl = IXGB_READ_REG(hw, RCTL); 1052 rctl = IXGB_READ_REG(hw, RCTL);
1088 1053
1089 if(netdev->flags & IFF_PROMISC) { 1054 if (netdev->flags & IFF_PROMISC) {
1090 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); 1055 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1091 } else if(netdev->flags & IFF_ALLMULTI) { 1056 } else if (netdev->flags & IFF_ALLMULTI) {
1092 rctl |= IXGB_RCTL_MPE; 1057 rctl |= IXGB_RCTL_MPE;
1093 rctl &= ~IXGB_RCTL_UPE; 1058 rctl &= ~IXGB_RCTL_UPE;
1094 } else { 1059 } else {
1095 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE); 1060 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1096 } 1061 }
1097 1062
1098 if(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) { 1063 if (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1099 rctl |= IXGB_RCTL_MPE; 1064 rctl |= IXGB_RCTL_MPE;
1100 IXGB_WRITE_REG(hw, RCTL, rctl); 1065 IXGB_WRITE_REG(hw, RCTL, rctl);
1101 } else { 1066 } else {
@@ -1104,10 +1069,11 @@ ixgb_set_multi(struct net_device *netdev)
1104 1069
1105 IXGB_WRITE_REG(hw, RCTL, rctl); 1070 IXGB_WRITE_REG(hw, RCTL, rctl);
1106 1071
1107 for(i = 0, mc_ptr = netdev->mc_list; mc_ptr; 1072 for (i = 0, mc_ptr = netdev->mc_list;
1108 i++, mc_ptr = mc_ptr->next) 1073 mc_ptr;
1074 i++, mc_ptr = mc_ptr->next)
1109 memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS], 1075 memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
1110 mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS); 1076 mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
1111 1077
1112 ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0); 1078 ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
1113 } 1079 }
@@ -1132,8 +1098,8 @@ ixgb_watchdog(unsigned long data)
1132 netif_stop_queue(netdev); 1098 netif_stop_queue(netdev);
1133 } 1099 }
1134 1100
1135 if(adapter->hw.link_up) { 1101 if (adapter->hw.link_up) {
1136 if(!netif_carrier_ok(netdev)) { 1102 if (!netif_carrier_ok(netdev)) {
1137 DPRINTK(LINK, INFO, 1103 DPRINTK(LINK, INFO,
1138 "NIC Link is Up 10000 Mbps Full Duplex\n"); 1104 "NIC Link is Up 10000 Mbps Full Duplex\n");
1139 adapter->link_speed = 10000; 1105 adapter->link_speed = 10000;
@@ -1142,7 +1108,7 @@ ixgb_watchdog(unsigned long data)
1142 netif_wake_queue(netdev); 1108 netif_wake_queue(netdev);
1143 } 1109 }
1144 } else { 1110 } else {
1145 if(netif_carrier_ok(netdev)) { 1111 if (netif_carrier_ok(netdev)) {
1146 adapter->link_speed = 0; 1112 adapter->link_speed = 0;
1147 adapter->link_duplex = 0; 1113 adapter->link_duplex = 0;
1148 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 1114 DPRINTK(LINK, INFO, "NIC Link is Down\n");
@@ -1154,8 +1120,8 @@ ixgb_watchdog(unsigned long data)
1154 1120
1155 ixgb_update_stats(adapter); 1121 ixgb_update_stats(adapter);
1156 1122
1157 if(!netif_carrier_ok(netdev)) { 1123 if (!netif_carrier_ok(netdev)) {
1158 if(IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) { 1124 if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1159 /* We've lost link, so the controller stops DMA, 1125 /* We've lost link, so the controller stops DMA,
1160 * but we've got queued Tx work that's never going 1126 * but we've got queued Tx work that's never going
1161 * to get done, so reset controller to flush Tx. 1127 * to get done, so reset controller to flush Tx.
@@ -1227,7 +1193,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1227 context_desc->hdr_len = hdr_len; 1193 context_desc->hdr_len = hdr_len;
1228 context_desc->status = 0; 1194 context_desc->status = 0;
1229 context_desc->cmd_type_len = cpu_to_le32( 1195 context_desc->cmd_type_len = cpu_to_le32(
1230 IXGB_CONTEXT_DESC_TYPE 1196 IXGB_CONTEXT_DESC_TYPE
1231 | IXGB_CONTEXT_DESC_CMD_TSE 1197 | IXGB_CONTEXT_DESC_CMD_TSE
1232 | IXGB_CONTEXT_DESC_CMD_IP 1198 | IXGB_CONTEXT_DESC_CMD_IP
1233 | IXGB_CONTEXT_DESC_CMD_TCP 1199 | IXGB_CONTEXT_DESC_CMD_TCP
@@ -1235,7 +1201,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1235 | (skb->len - (hdr_len))); 1201 | (skb->len - (hdr_len)));
1236 1202
1237 1203
1238 if(++i == adapter->tx_ring.count) i = 0; 1204 if (++i == adapter->tx_ring.count) i = 0;
1239 adapter->tx_ring.next_to_use = i; 1205 adapter->tx_ring.next_to_use = i;
1240 1206
1241 return 1; 1207 return 1;
@@ -1251,7 +1217,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1251 unsigned int i; 1217 unsigned int i;
1252 u8 css, cso; 1218 u8 css, cso;
1253 1219
1254 if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 1220 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1255 struct ixgb_buffer *buffer_info; 1221 struct ixgb_buffer *buffer_info;
1256 css = skb_transport_offset(skb); 1222 css = skb_transport_offset(skb);
1257 cso = css + skb->csum_offset; 1223 cso = css + skb->csum_offset;
@@ -1273,7 +1239,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1273 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE 1239 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1274 | IXGB_TX_DESC_CMD_IDE); 1240 | IXGB_TX_DESC_CMD_IDE);
1275 1241
1276 if(++i == adapter->tx_ring.count) i = 0; 1242 if (++i == adapter->tx_ring.count) i = 0;
1277 adapter->tx_ring.next_to_use = i; 1243 adapter->tx_ring.next_to_use = i;
1278 1244
1279 return true; 1245 return true;
@@ -1302,7 +1268,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1302 1268
1303 i = tx_ring->next_to_use; 1269 i = tx_ring->next_to_use;
1304 1270
1305 while(len) { 1271 while (len) {
1306 buffer_info = &tx_ring->buffer_info[i]; 1272 buffer_info = &tx_ring->buffer_info[i];
1307 size = min(len, IXGB_MAX_DATA_PER_TXD); 1273 size = min(len, IXGB_MAX_DATA_PER_TXD);
1308 /* Workaround for premature desc write-backs 1274 /* Workaround for premature desc write-backs
@@ -1312,28 +1278,28 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1312 1278
1313 buffer_info->length = size; 1279 buffer_info->length = size;
1314 WARN_ON(buffer_info->dma != 0); 1280 WARN_ON(buffer_info->dma != 0);
1281 buffer_info->time_stamp = jiffies;
1315 buffer_info->dma = 1282 buffer_info->dma =
1316 pci_map_single(adapter->pdev, 1283 pci_map_single(adapter->pdev,
1317 skb->data + offset, 1284 skb->data + offset,
1318 size, 1285 size,
1319 PCI_DMA_TODEVICE); 1286 PCI_DMA_TODEVICE);
1320 buffer_info->time_stamp = jiffies;
1321 buffer_info->next_to_watch = 0; 1287 buffer_info->next_to_watch = 0;
1322 1288
1323 len -= size; 1289 len -= size;
1324 offset += size; 1290 offset += size;
1325 count++; 1291 count++;
1326 if(++i == tx_ring->count) i = 0; 1292 if (++i == tx_ring->count) i = 0;
1327 } 1293 }
1328 1294
1329 for(f = 0; f < nr_frags; f++) { 1295 for (f = 0; f < nr_frags; f++) {
1330 struct skb_frag_struct *frag; 1296 struct skb_frag_struct *frag;
1331 1297
1332 frag = &skb_shinfo(skb)->frags[f]; 1298 frag = &skb_shinfo(skb)->frags[f];
1333 len = frag->size; 1299 len = frag->size;
1334 offset = 0; 1300 offset = 0;
1335 1301
1336 while(len) { 1302 while (len) {
1337 buffer_info = &tx_ring->buffer_info[i]; 1303 buffer_info = &tx_ring->buffer_info[i];
1338 size = min(len, IXGB_MAX_DATA_PER_TXD); 1304 size = min(len, IXGB_MAX_DATA_PER_TXD);
1339 1305
@@ -1344,19 +1310,19 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1344 size -= 4; 1310 size -= 4;
1345 1311
1346 buffer_info->length = size; 1312 buffer_info->length = size;
1313 buffer_info->time_stamp = jiffies;
1347 buffer_info->dma = 1314 buffer_info->dma =
1348 pci_map_page(adapter->pdev, 1315 pci_map_page(adapter->pdev,
1349 frag->page, 1316 frag->page,
1350 frag->page_offset + offset, 1317 frag->page_offset + offset,
1351 size, 1318 size,
1352 PCI_DMA_TODEVICE); 1319 PCI_DMA_TODEVICE);
1353 buffer_info->time_stamp = jiffies;
1354 buffer_info->next_to_watch = 0; 1320 buffer_info->next_to_watch = 0;
1355 1321
1356 len -= size; 1322 len -= size;
1357 offset += size; 1323 offset += size;
1358 count++; 1324 count++;
1359 if(++i == tx_ring->count) i = 0; 1325 if (++i == tx_ring->count) i = 0;
1360 } 1326 }
1361 } 1327 }
1362 i = (i == 0) ? tx_ring->count - 1 : i - 1; 1328 i = (i == 0) ? tx_ring->count - 1 : i - 1;
@@ -1377,21 +1343,20 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1377 u8 popts = 0; 1343 u8 popts = 0;
1378 unsigned int i; 1344 unsigned int i;
1379 1345
1380 if(tx_flags & IXGB_TX_FLAGS_TSO) { 1346 if (tx_flags & IXGB_TX_FLAGS_TSO) {
1381 cmd_type_len |= IXGB_TX_DESC_CMD_TSE; 1347 cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1382 popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM); 1348 popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1383 } 1349 }
1384 1350
1385 if(tx_flags & IXGB_TX_FLAGS_CSUM) 1351 if (tx_flags & IXGB_TX_FLAGS_CSUM)
1386 popts |= IXGB_TX_DESC_POPTS_TXSM; 1352 popts |= IXGB_TX_DESC_POPTS_TXSM;
1387 1353
1388 if(tx_flags & IXGB_TX_FLAGS_VLAN) { 1354 if (tx_flags & IXGB_TX_FLAGS_VLAN)
1389 cmd_type_len |= IXGB_TX_DESC_CMD_VLE; 1355 cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1390 }
1391 1356
1392 i = tx_ring->next_to_use; 1357 i = tx_ring->next_to_use;
1393 1358
1394 while(count--) { 1359 while (count--) {
1395 buffer_info = &tx_ring->buffer_info[i]; 1360 buffer_info = &tx_ring->buffer_info[i];
1396 tx_desc = IXGB_TX_DESC(*tx_ring, i); 1361 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1397 tx_desc->buff_addr = cpu_to_le64(buffer_info->dma); 1362 tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
@@ -1401,11 +1366,11 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1401 tx_desc->popts = popts; 1366 tx_desc->popts = popts;
1402 tx_desc->vlan = cpu_to_le16(vlan_id); 1367 tx_desc->vlan = cpu_to_le16(vlan_id);
1403 1368
1404 if(++i == tx_ring->count) i = 0; 1369 if (++i == tx_ring->count) i = 0;
1405 } 1370 }
1406 1371
1407 tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP 1372 tx_desc->cmd_type_len |=
1408 | IXGB_TX_DESC_CMD_RS ); 1373 cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS);
1409 1374
1410 /* Force memory writes to complete before letting h/w 1375 /* Force memory writes to complete before letting h/w
1411 * know there are new descriptors to fetch. (Only 1376 * know there are new descriptors to fetch. (Only
@@ -1461,7 +1426,6 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1461 struct ixgb_adapter *adapter = netdev_priv(netdev); 1426 struct ixgb_adapter *adapter = netdev_priv(netdev);
1462 unsigned int first; 1427 unsigned int first;
1463 unsigned int tx_flags = 0; 1428 unsigned int tx_flags = 0;
1464 unsigned long flags;
1465 int vlan_id = 0; 1429 int vlan_id = 0;
1466 int tso; 1430 int tso;
1467 1431
@@ -1470,51 +1434,31 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1470 return NETDEV_TX_OK; 1434 return NETDEV_TX_OK;
1471 } 1435 }
1472 1436
1473 if(skb->len <= 0) { 1437 if (skb->len <= 0) {
1474 dev_kfree_skb_any(skb); 1438 dev_kfree_skb(skb);
1475 return 0; 1439 return 0;
1476 } 1440 }
1477 1441
1478#ifdef NETIF_F_LLTX
1479 if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
1480 /* Collision - tell upper layer to requeue */
1481 local_irq_restore(flags);
1482 return NETDEV_TX_LOCKED;
1483 }
1484#else
1485 spin_lock_irqsave(&adapter->tx_lock, flags);
1486#endif
1487
1488 if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, 1442 if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
1489 DESC_NEEDED))) { 1443 DESC_NEEDED)))
1490 netif_stop_queue(netdev);
1491 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1492 return NETDEV_TX_BUSY; 1444 return NETDEV_TX_BUSY;
1493 }
1494 1445
1495#ifndef NETIF_F_LLTX 1446 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
1496 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1497#endif
1498
1499 if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
1500 tx_flags |= IXGB_TX_FLAGS_VLAN; 1447 tx_flags |= IXGB_TX_FLAGS_VLAN;
1501 vlan_id = vlan_tx_tag_get(skb); 1448 vlan_id = vlan_tx_tag_get(skb);
1502 } 1449 }
1503 1450
1504 first = adapter->tx_ring.next_to_use; 1451 first = adapter->tx_ring.next_to_use;
1505 1452
1506 tso = ixgb_tso(adapter, skb); 1453 tso = ixgb_tso(adapter, skb);
1507 if (tso < 0) { 1454 if (tso < 0) {
1508 dev_kfree_skb_any(skb); 1455 dev_kfree_skb(skb);
1509#ifdef NETIF_F_LLTX
1510 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1511#endif
1512 return NETDEV_TX_OK; 1456 return NETDEV_TX_OK;
1513 } 1457 }
1514 1458
1515 if (likely(tso)) 1459 if (likely(tso))
1516 tx_flags |= IXGB_TX_FLAGS_TSO; 1460 tx_flags |= IXGB_TX_FLAGS_TSO;
1517 else if(ixgb_tx_csum(adapter, skb)) 1461 else if (ixgb_tx_csum(adapter, skb))
1518 tx_flags |= IXGB_TX_FLAGS_CSUM; 1462 tx_flags |= IXGB_TX_FLAGS_CSUM;
1519 1463
1520 ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id, 1464 ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id,
@@ -1522,13 +1466,9 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1522 1466
1523 netdev->trans_start = jiffies; 1467 netdev->trans_start = jiffies;
1524 1468
1525#ifdef NETIF_F_LLTX
1526 /* Make sure there is space in the ring for the next send. */ 1469 /* Make sure there is space in the ring for the next send. */
1527 ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED); 1470 ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1528 1471
1529 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1530
1531#endif
1532 return NETDEV_TX_OK; 1472 return NETDEV_TX_OK;
1533} 1473}
1534 1474
@@ -1588,21 +1528,25 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1588 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; 1528 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1589 int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; 1529 int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1590 1530
1591 1531 /* MTU < 68 is an error for IPv4 traffic, just don't allow it */
1592 if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) 1532 if ((new_mtu < 68) ||
1593 || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) { 1533 (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
1594 DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu); 1534 DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu);
1595 return -EINVAL; 1535 return -EINVAL;
1596 } 1536 }
1597 1537
1598 adapter->rx_buffer_len = max_frame; 1538 if (old_max_frame == max_frame)
1539 return 0;
1540
1541 if (netif_running(netdev))
1542 ixgb_down(adapter, true);
1543
1544 adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */
1599 1545
1600 netdev->mtu = new_mtu; 1546 netdev->mtu = new_mtu;
1601 1547
1602 if ((old_max_frame != max_frame) && netif_running(netdev)) { 1548 if (netif_running(netdev))
1603 ixgb_down(adapter, true);
1604 ixgb_up(adapter); 1549 ixgb_up(adapter);
1605 }
1606 1550
1607 return 0; 1551 return 0;
1608} 1552}
@@ -1622,21 +1566,21 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
1622 if (pci_channel_offline(pdev)) 1566 if (pci_channel_offline(pdev))
1623 return; 1567 return;
1624 1568
1625 if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) || 1569 if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1626 (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) { 1570 (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1627 u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL); 1571 u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1628 u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL); 1572 u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1629 u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH); 1573 u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1630 u64 bcast = ((u64)bcast_h << 32) | bcast_l; 1574 u64 bcast = ((u64)bcast_h << 32) | bcast_l;
1631 1575
1632 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32); 1576 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1633 /* fix up multicast stats by removing broadcasts */ 1577 /* fix up multicast stats by removing broadcasts */
1634 if(multi >= bcast) 1578 if (multi >= bcast)
1635 multi -= bcast; 1579 multi -= bcast;
1636 1580
1637 adapter->stats.mprcl += (multi & 0xFFFFFFFF); 1581 adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1638 adapter->stats.mprch += (multi >> 32); 1582 adapter->stats.mprch += (multi >> 32);
1639 adapter->stats.bprcl += bcast_l; 1583 adapter->stats.bprcl += bcast_l;
1640 adapter->stats.bprch += bcast_h; 1584 adapter->stats.bprch += bcast_h;
1641 } else { 1585 } else {
1642 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL); 1586 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
@@ -1751,41 +1695,26 @@ ixgb_intr(int irq, void *data)
1751 struct ixgb_adapter *adapter = netdev_priv(netdev); 1695 struct ixgb_adapter *adapter = netdev_priv(netdev);
1752 struct ixgb_hw *hw = &adapter->hw; 1696 struct ixgb_hw *hw = &adapter->hw;
1753 u32 icr = IXGB_READ_REG(hw, ICR); 1697 u32 icr = IXGB_READ_REG(hw, ICR);
1754#ifndef CONFIG_IXGB_NAPI
1755 unsigned int i;
1756#endif
1757 1698
1758 if(unlikely(!icr)) 1699 if (unlikely(!icr))
1759 return IRQ_NONE; /* Not our interrupt */ 1700 return IRQ_NONE; /* Not our interrupt */
1760 1701
1761 if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) 1702 if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
1762 if (!test_bit(__IXGB_DOWN, &adapter->flags)) 1703 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1763 mod_timer(&adapter->watchdog_timer, jiffies); 1704 mod_timer(&adapter->watchdog_timer, jiffies);
1764 1705
1765#ifdef CONFIG_IXGB_NAPI
1766 if (netif_rx_schedule_prep(netdev, &adapter->napi)) { 1706 if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
1767 1707
1768 /* Disable interrupts and register for poll. The flush 1708 /* Disable interrupts and register for poll. The flush
1769 of the posted write is intentionally left out. 1709 of the posted write is intentionally left out.
1770 */ 1710 */
1771 1711
1772 IXGB_WRITE_REG(&adapter->hw, IMC, ~0); 1712 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1773 __netif_rx_schedule(netdev, &adapter->napi); 1713 __netif_rx_schedule(netdev, &adapter->napi);
1774 } 1714 }
1775#else
1776 /* yes, that is actually a & and it is meant to make sure that
1777 * every pass through this for loop checks both receive and
1778 * transmit queues for completed descriptors, intended to
1779 * avoid starvation issues and assist tx/rx fairness. */
1780 for(i = 0; i < IXGB_MAX_INTR; i++)
1781 if(!ixgb_clean_rx_irq(adapter) &
1782 !ixgb_clean_tx_irq(adapter))
1783 break;
1784#endif
1785 return IRQ_HANDLED; 1715 return IRQ_HANDLED;
1786} 1716}
1787 1717
1788#ifdef CONFIG_IXGB_NAPI
1789/** 1718/**
1790 * ixgb_clean - NAPI Rx polling callback 1719 * ixgb_clean - NAPI Rx polling callback
1791 * @adapter: board private structure 1720 * @adapter: board private structure
@@ -1804,12 +1733,12 @@ ixgb_clean(struct napi_struct *napi, int budget)
1804 /* If budget not fully consumed, exit the polling mode */ 1733 /* If budget not fully consumed, exit the polling mode */
1805 if (work_done < budget) { 1734 if (work_done < budget) {
1806 netif_rx_complete(netdev, napi); 1735 netif_rx_complete(netdev, napi);
1807 ixgb_irq_enable(adapter); 1736 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1737 ixgb_irq_enable(adapter);
1808 } 1738 }
1809 1739
1810 return work_done; 1740 return work_done;
1811} 1741}
1812#endif
1813 1742
1814/** 1743/**
1815 * ixgb_clean_tx_irq - Reclaim resources after transmit completes 1744 * ixgb_clean_tx_irq - Reclaim resources after transmit completes
@@ -1830,15 +1759,15 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1830 eop = tx_ring->buffer_info[i].next_to_watch; 1759 eop = tx_ring->buffer_info[i].next_to_watch;
1831 eop_desc = IXGB_TX_DESC(*tx_ring, eop); 1760 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1832 1761
1833 while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) { 1762 while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1834 1763
1835 for (cleaned = false; !cleaned; ) { 1764 for (cleaned = false; !cleaned; ) {
1836 tx_desc = IXGB_TX_DESC(*tx_ring, i); 1765 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1837 buffer_info = &tx_ring->buffer_info[i]; 1766 buffer_info = &tx_ring->buffer_info[i];
1838 1767
1839 if (tx_desc->popts 1768 if (tx_desc->popts &
1840 & (IXGB_TX_DESC_POPTS_TXSM | 1769 (IXGB_TX_DESC_POPTS_TXSM |
1841 IXGB_TX_DESC_POPTS_IXSM)) 1770 IXGB_TX_DESC_POPTS_IXSM))
1842 adapter->hw_csum_tx_good++; 1771 adapter->hw_csum_tx_good++;
1843 1772
1844 ixgb_unmap_and_free_tx_resource(adapter, buffer_info); 1773 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
@@ -1846,7 +1775,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1846 *(u32 *)&(tx_desc->status) = 0; 1775 *(u32 *)&(tx_desc->status) = 0;
1847 1776
1848 cleaned = (i == eop); 1777 cleaned = (i == eop);
1849 if(++i == tx_ring->count) i = 0; 1778 if (++i == tx_ring->count) i = 0;
1850 } 1779 }
1851 1780
1852 eop = tx_ring->buffer_info[i].next_to_watch; 1781 eop = tx_ring->buffer_info[i].next_to_watch;
@@ -1855,15 +1784,20 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1855 1784
1856 tx_ring->next_to_clean = i; 1785 tx_ring->next_to_clean = i;
1857 1786
1858 if (unlikely(netif_queue_stopped(netdev))) { 1787 if (unlikely(cleaned && netif_carrier_ok(netdev) &&
1859 spin_lock(&adapter->tx_lock); 1788 IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
1860 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) && 1789 /* Make sure that anybody stopping the queue after this
1861 (IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) 1790 * sees the new next_to_clean. */
1791 smp_mb();
1792
1793 if (netif_queue_stopped(netdev) &&
1794 !(test_bit(__IXGB_DOWN, &adapter->flags))) {
1862 netif_wake_queue(netdev); 1795 netif_wake_queue(netdev);
1863 spin_unlock(&adapter->tx_lock); 1796 ++adapter->restart_queue;
1797 }
1864 } 1798 }
1865 1799
1866 if(adapter->detect_tx_hung) { 1800 if (adapter->detect_tx_hung) {
1867 /* detect a transmit hang in hardware, this serializes the 1801 /* detect a transmit hang in hardware, this serializes the
1868 * check with the clearing of time_stamp and movement of i */ 1802 * check with the clearing of time_stamp and movement of i */
1869 adapter->detect_tx_hung = false; 1803 adapter->detect_tx_hung = false;
@@ -1906,13 +1840,13 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1906 1840
1907static void 1841static void
1908ixgb_rx_checksum(struct ixgb_adapter *adapter, 1842ixgb_rx_checksum(struct ixgb_adapter *adapter,
1909 struct ixgb_rx_desc *rx_desc, 1843 struct ixgb_rx_desc *rx_desc,
1910 struct sk_buff *skb) 1844 struct sk_buff *skb)
1911{ 1845{
1912 /* Ignore Checksum bit is set OR 1846 /* Ignore Checksum bit is set OR
1913 * TCP Checksum has not been calculated 1847 * TCP Checksum has not been calculated
1914 */ 1848 */
1915 if((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) || 1849 if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1916 (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) { 1850 (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1917 skb->ip_summed = CHECKSUM_NONE; 1851 skb->ip_summed = CHECKSUM_NONE;
1918 return; 1852 return;
@@ -1920,7 +1854,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
1920 1854
1921 /* At this point we know the hardware did the TCP checksum */ 1855 /* At this point we know the hardware did the TCP checksum */
1922 /* now look at the TCP checksum error bit */ 1856 /* now look at the TCP checksum error bit */
1923 if(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) { 1857 if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1924 /* let the stack verify checksum errors */ 1858 /* let the stack verify checksum errors */
1925 skb->ip_summed = CHECKSUM_NONE; 1859 skb->ip_summed = CHECKSUM_NONE;
1926 adapter->hw_csum_rx_error++; 1860 adapter->hw_csum_rx_error++;
@@ -1937,11 +1871,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
1937 **/ 1871 **/
1938 1872
1939static bool 1873static bool
1940#ifdef CONFIG_IXGB_NAPI
1941ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) 1874ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1942#else
1943ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1944#endif
1945{ 1875{
1946 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; 1876 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1947 struct net_device *netdev = adapter->netdev; 1877 struct net_device *netdev = adapter->netdev;
@@ -1950,50 +1880,50 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1950 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer; 1880 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1951 u32 length; 1881 u32 length;
1952 unsigned int i, j; 1882 unsigned int i, j;
1883 int cleaned_count = 0;
1953 bool cleaned = false; 1884 bool cleaned = false;
1954 1885
1955 i = rx_ring->next_to_clean; 1886 i = rx_ring->next_to_clean;
1956 rx_desc = IXGB_RX_DESC(*rx_ring, i); 1887 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1957 buffer_info = &rx_ring->buffer_info[i]; 1888 buffer_info = &rx_ring->buffer_info[i];
1958 1889
1959 while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) { 1890 while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
1960 struct sk_buff *skb, *next_skb; 1891 struct sk_buff *skb;
1961 u8 status; 1892 u8 status;
1962 1893
1963#ifdef CONFIG_IXGB_NAPI 1894 if (*work_done >= work_to_do)
1964 if(*work_done >= work_to_do)
1965 break; 1895 break;
1966 1896
1967 (*work_done)++; 1897 (*work_done)++;
1968#endif
1969 status = rx_desc->status; 1898 status = rx_desc->status;
1970 skb = buffer_info->skb; 1899 skb = buffer_info->skb;
1971 buffer_info->skb = NULL; 1900 buffer_info->skb = NULL;
1972 1901
1973 prefetch(skb->data); 1902 prefetch(skb->data - NET_IP_ALIGN);
1974 1903
1975 if(++i == rx_ring->count) i = 0; 1904 if (++i == rx_ring->count) i = 0;
1976 next_rxd = IXGB_RX_DESC(*rx_ring, i); 1905 next_rxd = IXGB_RX_DESC(*rx_ring, i);
1977 prefetch(next_rxd); 1906 prefetch(next_rxd);
1978 1907
1979 if((j = i + 1) == rx_ring->count) j = 0; 1908 if ((j = i + 1) == rx_ring->count) j = 0;
1980 next2_buffer = &rx_ring->buffer_info[j]; 1909 next2_buffer = &rx_ring->buffer_info[j];
1981 prefetch(next2_buffer); 1910 prefetch(next2_buffer);
1982 1911
1983 next_buffer = &rx_ring->buffer_info[i]; 1912 next_buffer = &rx_ring->buffer_info[i];
1984 next_skb = next_buffer->skb;
1985 prefetch(next_skb);
1986 1913
1987 cleaned = true; 1914 cleaned = true;
1915 cleaned_count++;
1988 1916
1989 pci_unmap_single(pdev, 1917 pci_unmap_single(pdev,
1990 buffer_info->dma, 1918 buffer_info->dma,
1991 buffer_info->length, 1919 buffer_info->length,
1992 PCI_DMA_FROMDEVICE); 1920 PCI_DMA_FROMDEVICE);
1921 buffer_info->dma = 0;
1993 1922
1994 length = le16_to_cpu(rx_desc->length); 1923 length = le16_to_cpu(rx_desc->length);
1924 rx_desc->length = 0;
1995 1925
1996 if(unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) { 1926 if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
1997 1927
1998 /* All receives must fit into a single buffer */ 1928 /* All receives must fit into a single buffer */
1999 1929
@@ -2004,11 +1934,9 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
2004 goto rxdesc_done; 1934 goto rxdesc_done;
2005 } 1935 }
2006 1936
2007 if (unlikely(rx_desc->errors 1937 if (unlikely(rx_desc->errors &
2008 & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE 1938 (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
2009 | IXGB_RX_DESC_ERRORS_P | 1939 IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) {
2010 IXGB_RX_DESC_ERRORS_RXE))) {
2011
2012 dev_kfree_skb_irq(skb); 1940 dev_kfree_skb_irq(skb);
2013 goto rxdesc_done; 1941 goto rxdesc_done;
2014 } 1942 }
@@ -2016,8 +1944,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
2016 /* code added for copybreak, this should improve 1944 /* code added for copybreak, this should improve
2017 * performance for small packets with large amounts 1945 * performance for small packets with large amounts
2018 * of reassembly being done in the stack */ 1946 * of reassembly being done in the stack */
2019#define IXGB_CB_LENGTH 256 1947 if (length < copybreak) {
2020 if (length < IXGB_CB_LENGTH) {
2021 struct sk_buff *new_skb = 1948 struct sk_buff *new_skb =
2022 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 1949 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
2023 if (new_skb) { 1950 if (new_skb) {
@@ -2042,27 +1969,24 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
2042 ixgb_rx_checksum(adapter, rx_desc, skb); 1969 ixgb_rx_checksum(adapter, rx_desc, skb);
2043 1970
2044 skb->protocol = eth_type_trans(skb, netdev); 1971 skb->protocol = eth_type_trans(skb, netdev);
2045#ifdef CONFIG_IXGB_NAPI 1972 if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
2046 if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
2047 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 1973 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
2048 le16_to_cpu(rx_desc->special)); 1974 le16_to_cpu(rx_desc->special));
2049 } else { 1975 } else {
2050 netif_receive_skb(skb); 1976 netif_receive_skb(skb);
2051 } 1977 }
2052#else /* CONFIG_IXGB_NAPI */
2053 if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
2054 vlan_hwaccel_rx(skb, adapter->vlgrp,
2055 le16_to_cpu(rx_desc->special));
2056 } else {
2057 netif_rx(skb);
2058 }
2059#endif /* CONFIG_IXGB_NAPI */
2060 netdev->last_rx = jiffies; 1978 netdev->last_rx = jiffies;
2061 1979
2062rxdesc_done: 1980rxdesc_done:
2063 /* clean up descriptor, might be written over by hw */ 1981 /* clean up descriptor, might be written over by hw */
2064 rx_desc->status = 0; 1982 rx_desc->status = 0;
2065 1983
1984 /* return some buffers to hardware, one at a time is too slow */
1985 if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
1986 ixgb_alloc_rx_buffers(adapter, cleaned_count);
1987 cleaned_count = 0;
1988 }
1989
2066 /* use prefetched values */ 1990 /* use prefetched values */
2067 rx_desc = next_rxd; 1991 rx_desc = next_rxd;
2068 buffer_info = next_buffer; 1992 buffer_info = next_buffer;
@@ -2070,7 +1994,9 @@ rxdesc_done:
2070 1994
2071 rx_ring->next_to_clean = i; 1995 rx_ring->next_to_clean = i;
2072 1996
2073 ixgb_alloc_rx_buffers(adapter); 1997 cleaned_count = IXGB_DESC_UNUSED(rx_ring);
1998 if (cleaned_count)
1999 ixgb_alloc_rx_buffers(adapter, cleaned_count);
2074 2000
2075 return cleaned; 2001 return cleaned;
2076} 2002}
@@ -2081,7 +2007,7 @@ rxdesc_done:
2081 **/ 2007 **/
2082 2008
2083static void 2009static void
2084ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter) 2010ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
2085{ 2011{
2086 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; 2012 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
2087 struct net_device *netdev = adapter->netdev; 2013 struct net_device *netdev = adapter->netdev;
@@ -2098,7 +2024,7 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
2098 2024
2099 2025
2100 /* leave three descriptors unused */ 2026 /* leave three descriptors unused */
2101 while(--cleancount > 2) { 2027 while (--cleancount > 2 && cleaned_count--) {
2102 /* recycle! its good for you */ 2028 /* recycle! its good for you */
2103 skb = buffer_info->skb; 2029 skb = buffer_info->skb;
2104 if (skb) { 2030 if (skb) {
@@ -2131,12 +2057,12 @@ map_skb:
2131 rx_desc = IXGB_RX_DESC(*rx_ring, i); 2057 rx_desc = IXGB_RX_DESC(*rx_ring, i);
2132 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); 2058 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
2133 /* guarantee DD bit not set now before h/w gets descriptor 2059 /* guarantee DD bit not set now before h/w gets descriptor
2134 * this is the rest of the workaround for h/w double 2060 * this is the rest of the workaround for h/w double
2135 * writeback. */ 2061 * writeback. */
2136 rx_desc->status = 0; 2062 rx_desc->status = 0;
2137 2063
2138 2064
2139 if(++i == rx_ring->count) i = 0; 2065 if (++i == rx_ring->count) i = 0;
2140 buffer_info = &rx_ring->buffer_info[i]; 2066 buffer_info = &rx_ring->buffer_info[i];
2141 } 2067 }
2142 2068
@@ -2156,7 +2082,7 @@ map_skb:
2156 2082
2157/** 2083/**
2158 * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping. 2084 * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
2159 * 2085 *
2160 * @param netdev network interface device structure 2086 * @param netdev network interface device structure
2161 * @param grp indicates to enable or disable tagging/stripping 2087 * @param grp indicates to enable or disable tagging/stripping
2162 **/ 2088 **/
@@ -2169,7 +2095,7 @@ ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2169 ixgb_irq_disable(adapter); 2095 ixgb_irq_disable(adapter);
2170 adapter->vlgrp = grp; 2096 adapter->vlgrp = grp;
2171 2097
2172 if(grp) { 2098 if (grp) {
2173 /* enable VLAN tag insert/strip */ 2099 /* enable VLAN tag insert/strip */
2174 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); 2100 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2175 ctrl |= IXGB_CTRL0_VME; 2101 ctrl |= IXGB_CTRL0_VME;
@@ -2241,10 +2167,10 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
2241{ 2167{
2242 ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp); 2168 ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2243 2169
2244 if(adapter->vlgrp) { 2170 if (adapter->vlgrp) {
2245 u16 vid; 2171 u16 vid;
2246 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 2172 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2247 if(!vlan_group_get_device(adapter->vlgrp, vid)) 2173 if (!vlan_group_get_device(adapter->vlgrp, vid))
2248 continue; 2174 continue;
2249 ixgb_vlan_rx_add_vid(adapter->netdev, vid); 2175 ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2250 } 2176 }
@@ -2276,13 +2202,13 @@ static void ixgb_netpoll(struct net_device *dev)
2276 * This callback is called by the PCI subsystem whenever 2202 * This callback is called by the PCI subsystem whenever
2277 * a PCI bus error is detected. 2203 * a PCI bus error is detected.
2278 */ 2204 */
2279static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, 2205static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
2280 enum pci_channel_state state) 2206 enum pci_channel_state state)
2281{ 2207{
2282 struct net_device *netdev = pci_get_drvdata(pdev); 2208 struct net_device *netdev = pci_get_drvdata(pdev);
2283 struct ixgb_adapter *adapter = netdev_priv(netdev); 2209 struct ixgb_adapter *adapter = netdev_priv(netdev);
2284 2210
2285 if(netif_running(netdev)) 2211 if (netif_running(netdev))
2286 ixgb_down(adapter, true); 2212 ixgb_down(adapter, true);
2287 2213
2288 pci_disable_device(pdev); 2214 pci_disable_device(pdev);
@@ -2295,17 +2221,17 @@ static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
2295 * ixgb_io_slot_reset - called after the pci bus has been reset. 2221 * ixgb_io_slot_reset - called after the pci bus has been reset.
2296 * @pdev pointer to pci device with error 2222 * @pdev pointer to pci device with error
2297 * 2223 *
2298 * This callback is called after the PCI buss has been reset. 2224 * This callback is called after the PCI bus has been reset.
2299 * Basically, this tries to restart the card from scratch. 2225 * Basically, this tries to restart the card from scratch.
2300 * This is a shortened version of the device probe/discovery code, 2226 * This is a shortened version of the device probe/discovery code,
2301 * it resembles the first-half of the ixgb_probe() routine. 2227 * it resembles the first-half of the ixgb_probe() routine.
2302 */ 2228 */
2303static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev) 2229static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
2304{ 2230{
2305 struct net_device *netdev = pci_get_drvdata(pdev); 2231 struct net_device *netdev = pci_get_drvdata(pdev);
2306 struct ixgb_adapter *adapter = netdev_priv(netdev); 2232 struct ixgb_adapter *adapter = netdev_priv(netdev);
2307 2233
2308 if(pci_enable_device(pdev)) { 2234 if (pci_enable_device(pdev)) {
2309 DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n"); 2235 DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n");
2310 return PCI_ERS_RESULT_DISCONNECT; 2236 return PCI_ERS_RESULT_DISCONNECT;
2311 } 2237 }
@@ -2321,14 +2247,14 @@ static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev)
2321 ixgb_reset(adapter); 2247 ixgb_reset(adapter);
2322 2248
2323 /* Make sure the EEPROM is good */ 2249 /* Make sure the EEPROM is good */
2324 if(!ixgb_validate_eeprom_checksum(&adapter->hw)) { 2250 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
2325 DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n"); 2251 DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n");
2326 return PCI_ERS_RESULT_DISCONNECT; 2252 return PCI_ERS_RESULT_DISCONNECT;
2327 } 2253 }
2328 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); 2254 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
2329 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); 2255 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2330 2256
2331 if(!is_valid_ether_addr(netdev->perm_addr)) { 2257 if (!is_valid_ether_addr(netdev->perm_addr)) {
2332 DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n"); 2258 DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n");
2333 return PCI_ERS_RESULT_DISCONNECT; 2259 return PCI_ERS_RESULT_DISCONNECT;
2334 } 2260 }
@@ -2344,15 +2270,15 @@ static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev)
2344 * normal operation. Implementation resembles the second-half 2270 * normal operation. Implementation resembles the second-half
2345 * of the ixgb_probe() routine. 2271 * of the ixgb_probe() routine.
2346 */ 2272 */
2347static void ixgb_io_resume (struct pci_dev *pdev) 2273static void ixgb_io_resume(struct pci_dev *pdev)
2348{ 2274{
2349 struct net_device *netdev = pci_get_drvdata(pdev); 2275 struct net_device *netdev = pci_get_drvdata(pdev);
2350 struct ixgb_adapter *adapter = netdev_priv(netdev); 2276 struct ixgb_adapter *adapter = netdev_priv(netdev);
2351 2277
2352 pci_set_master(pdev); 2278 pci_set_master(pdev);
2353 2279
2354 if(netif_running(netdev)) { 2280 if (netif_running(netdev)) {
2355 if(ixgb_up(adapter)) { 2281 if (ixgb_up(adapter)) {
2356 printk ("ixgb: can't bring device back up after reset\n"); 2282 printk ("ixgb: can't bring device back up after reset\n");
2357 return; 2283 return;
2358 } 2284 }
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h
index 4be1b273e1b8..d92e72bd627a 100644
--- a/drivers/net/ixgb/ixgb_osdep.h
+++ b/drivers/net/ixgb/ixgb_osdep.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/10GbE Linux driver 3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -40,7 +40,7 @@
40#include <linux/sched.h> 40#include <linux/sched.h>
41 41
42#undef ASSERT 42#undef ASSERT
43#define ASSERT(x) if(!(x)) BUG() 43#define ASSERT(x) if (!(x)) BUG()
44#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B) 44#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
45 45
46#ifdef DBG 46#ifdef DBG
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
index 865d14d6e5a7..af35e1ddadd6 100644
--- a/drivers/net/ixgb/ixgb_param.c
+++ b/drivers/net/ixgb/ixgb_param.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/10GbE Linux driver 3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -136,7 +136,7 @@ IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold");
136/* Flow control request timeout (how long to pause the link partner's tx) 136/* Flow control request timeout (how long to pause the link partner's tx)
137 * (PAP 15:0) 137 * (PAP 15:0)
138 * 138 *
139 * Valid Range: 1 - 65535 139 * Valid Range: 1 - 65535
140 * 140 *
141 * Default Value: 65535 (0xffff) (we'll send an xon if we recover) 141 * Default Value: 65535 (0xffff) (we'll send an xon if we recover)
142 */ 142 */
@@ -200,7 +200,7 @@ struct ixgb_option {
200static int __devinit 200static int __devinit
201ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt) 201ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
202{ 202{
203 if(*value == OPTION_UNSET) { 203 if (*value == OPTION_UNSET) {
204 *value = opt->def; 204 *value = opt->def;
205 return 0; 205 return 0;
206 } 206 }
@@ -217,7 +217,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
217 } 217 }
218 break; 218 break;
219 case range_option: 219 case range_option:
220 if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 220 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
221 printk(KERN_INFO "%s set to %i\n", opt->name, *value); 221 printk(KERN_INFO "%s set to %i\n", opt->name, *value);
222 return 0; 222 return 0;
223 } 223 }
@@ -226,10 +226,10 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
226 int i; 226 int i;
227 struct ixgb_opt_list *ent; 227 struct ixgb_opt_list *ent;
228 228
229 for(i = 0; i < opt->arg.l.nr; i++) { 229 for (i = 0; i < opt->arg.l.nr; i++) {
230 ent = &opt->arg.l.p[i]; 230 ent = &opt->arg.l.p[i];
231 if(*value == ent->i) { 231 if (*value == ent->i) {
232 if(ent->str[0] != '\0') 232 if (ent->str[0] != '\0')
233 printk(KERN_INFO "%s\n", ent->str); 233 printk(KERN_INFO "%s\n", ent->str);
234 return 0; 234 return 0;
235 } 235 }
@@ -260,7 +260,7 @@ void __devinit
260ixgb_check_options(struct ixgb_adapter *adapter) 260ixgb_check_options(struct ixgb_adapter *adapter)
261{ 261{
262 int bd = adapter->bd_number; 262 int bd = adapter->bd_number;
263 if(bd >= IXGB_MAX_NIC) { 263 if (bd >= IXGB_MAX_NIC) {
264 printk(KERN_NOTICE 264 printk(KERN_NOTICE
265 "Warning: no configuration for board #%i\n", bd); 265 "Warning: no configuration for board #%i\n", bd);
266 printk(KERN_NOTICE "Using defaults for all values\n"); 266 printk(KERN_NOTICE "Using defaults for all values\n");
@@ -277,7 +277,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
277 }; 277 };
278 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; 278 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
279 279
280 if(num_TxDescriptors > bd) { 280 if (num_TxDescriptors > bd) {
281 tx_ring->count = TxDescriptors[bd]; 281 tx_ring->count = TxDescriptors[bd];
282 ixgb_validate_option(&tx_ring->count, &opt); 282 ixgb_validate_option(&tx_ring->count, &opt);
283 } else { 283 } else {
@@ -296,7 +296,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
296 }; 296 };
297 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; 297 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
298 298
299 if(num_RxDescriptors > bd) { 299 if (num_RxDescriptors > bd) {
300 rx_ring->count = RxDescriptors[bd]; 300 rx_ring->count = RxDescriptors[bd];
301 ixgb_validate_option(&rx_ring->count, &opt); 301 ixgb_validate_option(&rx_ring->count, &opt);
302 } else { 302 } else {
@@ -312,7 +312,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
312 .def = OPTION_ENABLED 312 .def = OPTION_ENABLED
313 }; 313 };
314 314
315 if(num_XsumRX > bd) { 315 if (num_XsumRX > bd) {
316 unsigned int rx_csum = XsumRX[bd]; 316 unsigned int rx_csum = XsumRX[bd];
317 ixgb_validate_option(&rx_csum, &opt); 317 ixgb_validate_option(&rx_csum, &opt);
318 adapter->rx_csum = rx_csum; 318 adapter->rx_csum = rx_csum;
@@ -338,7 +338,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
338 .p = fc_list }} 338 .p = fc_list }}
339 }; 339 };
340 340
341 if(num_FlowControl > bd) { 341 if (num_FlowControl > bd) {
342 unsigned int fc = FlowControl[bd]; 342 unsigned int fc = FlowControl[bd];
343 ixgb_validate_option(&fc, &opt); 343 ixgb_validate_option(&fc, &opt);
344 adapter->hw.fc.type = fc; 344 adapter->hw.fc.type = fc;
@@ -356,14 +356,14 @@ ixgb_check_options(struct ixgb_adapter *adapter)
356 .max = MAX_FCRTH}} 356 .max = MAX_FCRTH}}
357 }; 357 };
358 358
359 if(num_RxFCHighThresh > bd) { 359 if (num_RxFCHighThresh > bd) {
360 adapter->hw.fc.high_water = RxFCHighThresh[bd]; 360 adapter->hw.fc.high_water = RxFCHighThresh[bd];
361 ixgb_validate_option(&adapter->hw.fc.high_water, &opt); 361 ixgb_validate_option(&adapter->hw.fc.high_water, &opt);
362 } else { 362 } else {
363 adapter->hw.fc.high_water = opt.def; 363 adapter->hw.fc.high_water = opt.def;
364 } 364 }
365 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) 365 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
366 printk (KERN_INFO 366 printk(KERN_INFO
367 "Ignoring RxFCHighThresh when no RxFC\n"); 367 "Ignoring RxFCHighThresh when no RxFC\n");
368 } 368 }
369 { /* Receive Flow Control Low Threshold */ 369 { /* Receive Flow Control Low Threshold */
@@ -376,14 +376,14 @@ ixgb_check_options(struct ixgb_adapter *adapter)
376 .max = MAX_FCRTL}} 376 .max = MAX_FCRTL}}
377 }; 377 };
378 378
379 if(num_RxFCLowThresh > bd) { 379 if (num_RxFCLowThresh > bd) {
380 adapter->hw.fc.low_water = RxFCLowThresh[bd]; 380 adapter->hw.fc.low_water = RxFCLowThresh[bd];
381 ixgb_validate_option(&adapter->hw.fc.low_water, &opt); 381 ixgb_validate_option(&adapter->hw.fc.low_water, &opt);
382 } else { 382 } else {
383 adapter->hw.fc.low_water = opt.def; 383 adapter->hw.fc.low_water = opt.def;
384 } 384 }
385 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) 385 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
386 printk (KERN_INFO 386 printk(KERN_INFO
387 "Ignoring RxFCLowThresh when no RxFC\n"); 387 "Ignoring RxFCLowThresh when no RxFC\n");
388 } 388 }
389 { /* Flow Control Pause Time Request*/ 389 { /* Flow Control Pause Time Request*/
@@ -396,7 +396,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
396 .max = MAX_FCPAUSE}} 396 .max = MAX_FCPAUSE}}
397 }; 397 };
398 398
399 if(num_FCReqTimeout > bd) { 399 if (num_FCReqTimeout > bd) {
400 unsigned int pause_time = FCReqTimeout[bd]; 400 unsigned int pause_time = FCReqTimeout[bd];
401 ixgb_validate_option(&pause_time, &opt); 401 ixgb_validate_option(&pause_time, &opt);
402 adapter->hw.fc.pause_time = pause_time; 402 adapter->hw.fc.pause_time = pause_time;
@@ -404,7 +404,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
404 adapter->hw.fc.pause_time = opt.def; 404 adapter->hw.fc.pause_time = opt.def;
405 } 405 }
406 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) 406 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
407 printk (KERN_INFO 407 printk(KERN_INFO
408 "Ignoring FCReqTimeout when no RxFC\n"); 408 "Ignoring FCReqTimeout when no RxFC\n");
409 } 409 }
410 /* high low and spacing check for rx flow control thresholds */ 410 /* high low and spacing check for rx flow control thresholds */
@@ -412,7 +412,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
412 /* high must be greater than low */ 412 /* high must be greater than low */
413 if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) { 413 if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
414 /* set defaults */ 414 /* set defaults */
415 printk (KERN_INFO 415 printk(KERN_INFO
416 "RxFCHighThresh must be >= (RxFCLowThresh + 8), " 416 "RxFCHighThresh must be >= (RxFCLowThresh + 8), "
417 "Using Defaults\n"); 417 "Using Defaults\n");
418 adapter->hw.fc.high_water = DEFAULT_FCRTH; 418 adapter->hw.fc.high_water = DEFAULT_FCRTH;
@@ -429,7 +429,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
429 .max = MAX_RDTR}} 429 .max = MAX_RDTR}}
430 }; 430 };
431 431
432 if(num_RxIntDelay > bd) { 432 if (num_RxIntDelay > bd) {
433 adapter->rx_int_delay = RxIntDelay[bd]; 433 adapter->rx_int_delay = RxIntDelay[bd];
434 ixgb_validate_option(&adapter->rx_int_delay, &opt); 434 ixgb_validate_option(&adapter->rx_int_delay, &opt);
435 } else { 435 } else {
@@ -446,7 +446,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
446 .max = MAX_TIDV}} 446 .max = MAX_TIDV}}
447 }; 447 };
448 448
449 if(num_TxIntDelay > bd) { 449 if (num_TxIntDelay > bd) {
450 adapter->tx_int_delay = TxIntDelay[bd]; 450 adapter->tx_int_delay = TxIntDelay[bd];
451 ixgb_validate_option(&adapter->tx_int_delay, &opt); 451 ixgb_validate_option(&adapter->tx_int_delay, &opt);
452 } else { 452 } else {
@@ -462,7 +462,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
462 .def = OPTION_ENABLED 462 .def = OPTION_ENABLED
463 }; 463 };
464 464
465 if(num_IntDelayEnable > bd) { 465 if (num_IntDelayEnable > bd) {
466 unsigned int ide = IntDelayEnable[bd]; 466 unsigned int ide = IntDelayEnable[bd];
467 ixgb_validate_option(&ide, &opt); 467 ixgb_validate_option(&ide, &opt);
468 adapter->tx_int_delay_enable = ide; 468 adapter->tx_int_delay_enable = ide;