diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-02-22 12:24:26 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2011-02-22 12:41:48 -0500 |
commit | 695884fb8acd9857e0e7120ccb2150e30f4b8fef (patch) | |
tree | 49aa424c1a021ce432e9fa5ea29d37a23e4e30cc /drivers/net/e1000e/netdev.c | |
parent | 5df91509d324d44cfb11e55d9cb02fe18b53b045 (diff) | |
parent | 04bea68b2f0eeebb089ecc67b618795925268b4a (diff) |
Merge branch 'devicetree/for-x86' of git://git.secretlab.ca/git/linux-2.6 into x86/platform
Reason: x86 devicetree support for ce4100 depends on those device tree
changes scheduled for .39.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'drivers/net/e1000e/netdev.c')
-rw-r--r-- | drivers/net/e1000e/netdev.c | 381 |
1 files changed, 216 insertions, 165 deletions
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index c4ca1629f532..1c18f26b0812 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -54,7 +54,7 @@ | |||
54 | 54 | ||
55 | #define DRV_EXTRAVERSION "-k2" | 55 | #define DRV_EXTRAVERSION "-k2" |
56 | 56 | ||
57 | #define DRV_VERSION "1.2.7" DRV_EXTRAVERSION | 57 | #define DRV_VERSION "1.2.20" DRV_EXTRAVERSION |
58 | char e1000e_driver_name[] = "e1000e"; | 58 | char e1000e_driver_name[] = "e1000e"; |
59 | const char e1000e_driver_version[] = DRV_VERSION; | 59 | const char e1000e_driver_version[] = DRV_VERSION; |
60 | 60 | ||
@@ -77,17 +77,17 @@ struct e1000_reg_info { | |||
77 | char *name; | 77 | char *name; |
78 | }; | 78 | }; |
79 | 79 | ||
80 | #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ | 80 | #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ |
81 | #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ | 81 | #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ |
82 | #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ | 82 | #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ |
83 | #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ | 83 | #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ |
84 | #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ | 84 | #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ |
85 | 85 | ||
86 | #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ | 86 | #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ |
87 | #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ | 87 | #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ |
88 | #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ | 88 | #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ |
89 | #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ | 89 | #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ |
90 | #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ | 90 | #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ |
91 | 91 | ||
92 | static const struct e1000_reg_info e1000_reg_info_tbl[] = { | 92 | static const struct e1000_reg_info e1000_reg_info_tbl[] = { |
93 | 93 | ||
@@ -99,7 +99,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { | |||
99 | /* Interrupt Registers */ | 99 | /* Interrupt Registers */ |
100 | {E1000_ICR, "ICR"}, | 100 | {E1000_ICR, "ICR"}, |
101 | 101 | ||
102 | /* RX Registers */ | 102 | /* Rx Registers */ |
103 | {E1000_RCTL, "RCTL"}, | 103 | {E1000_RCTL, "RCTL"}, |
104 | {E1000_RDLEN, "RDLEN"}, | 104 | {E1000_RDLEN, "RDLEN"}, |
105 | {E1000_RDH, "RDH"}, | 105 | {E1000_RDH, "RDH"}, |
@@ -115,7 +115,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { | |||
115 | {E1000_RDFTS, "RDFTS"}, | 115 | {E1000_RDFTS, "RDFTS"}, |
116 | {E1000_RDFPC, "RDFPC"}, | 116 | {E1000_RDFPC, "RDFPC"}, |
117 | 117 | ||
118 | /* TX Registers */ | 118 | /* Tx Registers */ |
119 | {E1000_TCTL, "TCTL"}, | 119 | {E1000_TCTL, "TCTL"}, |
120 | {E1000_TDBAL, "TDBAL"}, | 120 | {E1000_TDBAL, "TDBAL"}, |
121 | {E1000_TDBAH, "TDBAH"}, | 121 | {E1000_TDBAH, "TDBAH"}, |
@@ -160,7 +160,7 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) | |||
160 | break; | 160 | break; |
161 | default: | 161 | default: |
162 | printk(KERN_INFO "%-15s %08x\n", | 162 | printk(KERN_INFO "%-15s %08x\n", |
163 | reginfo->name, __er32(hw, reginfo->ofs)); | 163 | reginfo->name, __er32(hw, reginfo->ofs)); |
164 | return; | 164 | return; |
165 | } | 165 | } |
166 | 166 | ||
@@ -171,9 +171,8 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) | |||
171 | printk(KERN_CONT "\n"); | 171 | printk(KERN_CONT "\n"); |
172 | } | 172 | } |
173 | 173 | ||
174 | |||
175 | /* | 174 | /* |
176 | * e1000e_dump - Print registers, tx-ring and rx-ring | 175 | * e1000e_dump - Print registers, Tx-ring and Rx-ring |
177 | */ | 176 | */ |
178 | static void e1000e_dump(struct e1000_adapter *adapter) | 177 | static void e1000e_dump(struct e1000_adapter *adapter) |
179 | { | 178 | { |
@@ -182,12 +181,20 @@ static void e1000e_dump(struct e1000_adapter *adapter) | |||
182 | struct e1000_reg_info *reginfo; | 181 | struct e1000_reg_info *reginfo; |
183 | struct e1000_ring *tx_ring = adapter->tx_ring; | 182 | struct e1000_ring *tx_ring = adapter->tx_ring; |
184 | struct e1000_tx_desc *tx_desc; | 183 | struct e1000_tx_desc *tx_desc; |
185 | struct my_u0 { u64 a; u64 b; } *u0; | 184 | struct my_u0 { |
185 | u64 a; | ||
186 | u64 b; | ||
187 | } *u0; | ||
186 | struct e1000_buffer *buffer_info; | 188 | struct e1000_buffer *buffer_info; |
187 | struct e1000_ring *rx_ring = adapter->rx_ring; | 189 | struct e1000_ring *rx_ring = adapter->rx_ring; |
188 | union e1000_rx_desc_packet_split *rx_desc_ps; | 190 | union e1000_rx_desc_packet_split *rx_desc_ps; |
189 | struct e1000_rx_desc *rx_desc; | 191 | struct e1000_rx_desc *rx_desc; |
190 | struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1; | 192 | struct my_u1 { |
193 | u64 a; | ||
194 | u64 b; | ||
195 | u64 c; | ||
196 | u64 d; | ||
197 | } *u1; | ||
191 | u32 staterr; | 198 | u32 staterr; |
192 | int i = 0; | 199 | int i = 0; |
193 | 200 | ||
@@ -198,12 +205,10 @@ static void e1000e_dump(struct e1000_adapter *adapter) | |||
198 | if (netdev) { | 205 | if (netdev) { |
199 | dev_info(&adapter->pdev->dev, "Net device Info\n"); | 206 | dev_info(&adapter->pdev->dev, "Net device Info\n"); |
200 | printk(KERN_INFO "Device Name state " | 207 | printk(KERN_INFO "Device Name state " |
201 | "trans_start last_rx\n"); | 208 | "trans_start last_rx\n"); |
202 | printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", | 209 | printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", |
203 | netdev->name, | 210 | netdev->name, netdev->state, netdev->trans_start, |
204 | netdev->state, | 211 | netdev->last_rx); |
205 | netdev->trans_start, | ||
206 | netdev->last_rx); | ||
207 | } | 212 | } |
208 | 213 | ||
209 | /* Print Registers */ | 214 | /* Print Registers */ |
@@ -214,26 +219,26 @@ static void e1000e_dump(struct e1000_adapter *adapter) | |||
214 | e1000_regdump(hw, reginfo); | 219 | e1000_regdump(hw, reginfo); |
215 | } | 220 | } |
216 | 221 | ||
217 | /* Print TX Ring Summary */ | 222 | /* Print Tx Ring Summary */ |
218 | if (!netdev || !netif_running(netdev)) | 223 | if (!netdev || !netif_running(netdev)) |
219 | goto exit; | 224 | goto exit; |
220 | 225 | ||
221 | dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); | 226 | dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); |
222 | printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" | 227 | printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" |
223 | " leng ntw timestamp\n"); | 228 | " leng ntw timestamp\n"); |
224 | buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; | 229 | buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; |
225 | printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", | 230 | printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", |
226 | 0, tx_ring->next_to_use, tx_ring->next_to_clean, | 231 | 0, tx_ring->next_to_use, tx_ring->next_to_clean, |
227 | (unsigned long long)buffer_info->dma, | 232 | (unsigned long long)buffer_info->dma, |
228 | buffer_info->length, | 233 | buffer_info->length, |
229 | buffer_info->next_to_watch, | 234 | buffer_info->next_to_watch, |
230 | (unsigned long long)buffer_info->time_stamp); | 235 | (unsigned long long)buffer_info->time_stamp); |
231 | 236 | ||
232 | /* Print TX Rings */ | 237 | /* Print Tx Ring */ |
233 | if (!netif_msg_tx_done(adapter)) | 238 | if (!netif_msg_tx_done(adapter)) |
234 | goto rx_ring_summary; | 239 | goto rx_ring_summary; |
235 | 240 | ||
236 | dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); | 241 | dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); |
237 | 242 | ||
238 | /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) | 243 | /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) |
239 | * | 244 | * |
@@ -263,22 +268,22 @@ static void e1000e_dump(struct e1000_adapter *adapter) | |||
263 | * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 | 268 | * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 |
264 | */ | 269 | */ |
265 | printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]" | 270 | printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]" |
266 | " [bi->dma ] leng ntw timestamp bi->skb " | 271 | " [bi->dma ] leng ntw timestamp bi->skb " |
267 | "<-- Legacy format\n"); | 272 | "<-- Legacy format\n"); |
268 | printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]" | 273 | printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]" |
269 | " [bi->dma ] leng ntw timestamp bi->skb " | 274 | " [bi->dma ] leng ntw timestamp bi->skb " |
270 | "<-- Ext Context format\n"); | 275 | "<-- Ext Context format\n"); |
271 | printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]" | 276 | printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]" |
272 | " [bi->dma ] leng ntw timestamp bi->skb " | 277 | " [bi->dma ] leng ntw timestamp bi->skb " |
273 | "<-- Ext Data format\n"); | 278 | "<-- Ext Data format\n"); |
274 | for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { | 279 | for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { |
275 | tx_desc = E1000_TX_DESC(*tx_ring, i); | 280 | tx_desc = E1000_TX_DESC(*tx_ring, i); |
276 | buffer_info = &tx_ring->buffer_info[i]; | 281 | buffer_info = &tx_ring->buffer_info[i]; |
277 | u0 = (struct my_u0 *)tx_desc; | 282 | u0 = (struct my_u0 *)tx_desc; |
278 | printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX " | 283 | printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX " |
279 | "%04X %3X %016llX %p", | 284 | "%04X %3X %016llX %p", |
280 | (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' : | 285 | (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : |
281 | ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i, | 286 | ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i, |
282 | (unsigned long long)le64_to_cpu(u0->a), | 287 | (unsigned long long)le64_to_cpu(u0->a), |
283 | (unsigned long long)le64_to_cpu(u0->b), | 288 | (unsigned long long)le64_to_cpu(u0->b), |
284 | (unsigned long long)buffer_info->dma, | 289 | (unsigned long long)buffer_info->dma, |
@@ -296,22 +301,22 @@ static void e1000e_dump(struct e1000_adapter *adapter) | |||
296 | 301 | ||
297 | if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) | 302 | if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) |
298 | print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, | 303 | print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, |
299 | 16, 1, phys_to_virt(buffer_info->dma), | 304 | 16, 1, phys_to_virt(buffer_info->dma), |
300 | buffer_info->length, true); | 305 | buffer_info->length, true); |
301 | } | 306 | } |
302 | 307 | ||
303 | /* Print RX Rings Summary */ | 308 | /* Print Rx Ring Summary */ |
304 | rx_ring_summary: | 309 | rx_ring_summary: |
305 | dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); | 310 | dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); |
306 | printk(KERN_INFO "Queue [NTU] [NTC]\n"); | 311 | printk(KERN_INFO "Queue [NTU] [NTC]\n"); |
307 | printk(KERN_INFO " %5d %5X %5X\n", 0, | 312 | printk(KERN_INFO " %5d %5X %5X\n", 0, |
308 | rx_ring->next_to_use, rx_ring->next_to_clean); | 313 | rx_ring->next_to_use, rx_ring->next_to_clean); |
309 | 314 | ||
310 | /* Print RX Rings */ | 315 | /* Print Rx Ring */ |
311 | if (!netif_msg_rx_status(adapter)) | 316 | if (!netif_msg_rx_status(adapter)) |
312 | goto exit; | 317 | goto exit; |
313 | 318 | ||
314 | dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); | 319 | dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); |
315 | switch (adapter->rx_ps_pages) { | 320 | switch (adapter->rx_ps_pages) { |
316 | case 1: | 321 | case 1: |
317 | case 2: | 322 | case 2: |
@@ -329,7 +334,7 @@ rx_ring_summary: | |||
329 | * +-----------------------------------------------------+ | 334 | * +-----------------------------------------------------+ |
330 | */ | 335 | */ |
331 | printk(KERN_INFO "R [desc] [buffer 0 63:0 ] " | 336 | printk(KERN_INFO "R [desc] [buffer 0 63:0 ] " |
332 | "[buffer 1 63:0 ] " | 337 | "[buffer 1 63:0 ] " |
333 | "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] " | 338 | "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] " |
334 | "[bi->skb] <-- Ext Pkt Split format\n"); | 339 | "[bi->skb] <-- Ext Pkt Split format\n"); |
335 | /* [Extended] Receive Descriptor (Write-Back) Format | 340 | /* [Extended] Receive Descriptor (Write-Back) Format |
@@ -344,7 +349,7 @@ rx_ring_summary: | |||
344 | * 63 48 47 32 31 20 19 0 | 349 | * 63 48 47 32 31 20 19 0 |
345 | */ | 350 | */ |
346 | printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] " | 351 | printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] " |
347 | "[vl l0 ee es] " | 352 | "[vl l0 ee es] " |
348 | "[ l3 l2 l1 hs] [reserved ] ---------------- " | 353 | "[ l3 l2 l1 hs] [reserved ] ---------------- " |
349 | "[bi->skb] <-- Ext Rx Write-Back format\n"); | 354 | "[bi->skb] <-- Ext Rx Write-Back format\n"); |
350 | for (i = 0; i < rx_ring->count; i++) { | 355 | for (i = 0; i < rx_ring->count; i++) { |
@@ -352,26 +357,26 @@ rx_ring_summary: | |||
352 | rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); | 357 | rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); |
353 | u1 = (struct my_u1 *)rx_desc_ps; | 358 | u1 = (struct my_u1 *)rx_desc_ps; |
354 | staterr = | 359 | staterr = |
355 | le32_to_cpu(rx_desc_ps->wb.middle.status_error); | 360 | le32_to_cpu(rx_desc_ps->wb.middle.status_error); |
356 | if (staterr & E1000_RXD_STAT_DD) { | 361 | if (staterr & E1000_RXD_STAT_DD) { |
357 | /* Descriptor Done */ | 362 | /* Descriptor Done */ |
358 | printk(KERN_INFO "RWB[0x%03X] %016llX " | 363 | printk(KERN_INFO "RWB[0x%03X] %016llX " |
359 | "%016llX %016llX %016llX " | 364 | "%016llX %016llX %016llX " |
360 | "---------------- %p", i, | 365 | "---------------- %p", i, |
361 | (unsigned long long)le64_to_cpu(u1->a), | 366 | (unsigned long long)le64_to_cpu(u1->a), |
362 | (unsigned long long)le64_to_cpu(u1->b), | 367 | (unsigned long long)le64_to_cpu(u1->b), |
363 | (unsigned long long)le64_to_cpu(u1->c), | 368 | (unsigned long long)le64_to_cpu(u1->c), |
364 | (unsigned long long)le64_to_cpu(u1->d), | 369 | (unsigned long long)le64_to_cpu(u1->d), |
365 | buffer_info->skb); | 370 | buffer_info->skb); |
366 | } else { | 371 | } else { |
367 | printk(KERN_INFO "R [0x%03X] %016llX " | 372 | printk(KERN_INFO "R [0x%03X] %016llX " |
368 | "%016llX %016llX %016llX %016llX %p", i, | 373 | "%016llX %016llX %016llX %016llX %p", i, |
369 | (unsigned long long)le64_to_cpu(u1->a), | 374 | (unsigned long long)le64_to_cpu(u1->a), |
370 | (unsigned long long)le64_to_cpu(u1->b), | 375 | (unsigned long long)le64_to_cpu(u1->b), |
371 | (unsigned long long)le64_to_cpu(u1->c), | 376 | (unsigned long long)le64_to_cpu(u1->c), |
372 | (unsigned long long)le64_to_cpu(u1->d), | 377 | (unsigned long long)le64_to_cpu(u1->d), |
373 | (unsigned long long)buffer_info->dma, | 378 | (unsigned long long)buffer_info->dma, |
374 | buffer_info->skb); | 379 | buffer_info->skb); |
375 | 380 | ||
376 | if (netif_msg_pktdata(adapter)) | 381 | if (netif_msg_pktdata(adapter)) |
377 | print_hex_dump(KERN_INFO, "", | 382 | print_hex_dump(KERN_INFO, "", |
@@ -400,18 +405,18 @@ rx_ring_summary: | |||
400 | * 63 48 47 40 39 32 31 16 15 0 | 405 | * 63 48 47 40 39 32 31 16 15 0 |
401 | */ | 406 | */ |
402 | printk(KERN_INFO "Rl[desc] [address 63:0 ] " | 407 | printk(KERN_INFO "Rl[desc] [address 63:0 ] " |
403 | "[vl er S cks ln] [bi->dma ] [bi->skb] " | 408 | "[vl er S cks ln] [bi->dma ] [bi->skb] " |
404 | "<-- Legacy format\n"); | 409 | "<-- Legacy format\n"); |
405 | for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { | 410 | for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { |
406 | rx_desc = E1000_RX_DESC(*rx_ring, i); | 411 | rx_desc = E1000_RX_DESC(*rx_ring, i); |
407 | buffer_info = &rx_ring->buffer_info[i]; | 412 | buffer_info = &rx_ring->buffer_info[i]; |
408 | u0 = (struct my_u0 *)rx_desc; | 413 | u0 = (struct my_u0 *)rx_desc; |
409 | printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " | 414 | printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " |
410 | "%016llX %p", i, | 415 | "%016llX %p", i, |
411 | (unsigned long long)le64_to_cpu(u0->a), | 416 | (unsigned long long)le64_to_cpu(u0->a), |
412 | (unsigned long long)le64_to_cpu(u0->b), | 417 | (unsigned long long)le64_to_cpu(u0->b), |
413 | (unsigned long long)buffer_info->dma, | 418 | (unsigned long long)buffer_info->dma, |
414 | buffer_info->skb); | 419 | buffer_info->skb); |
415 | if (i == rx_ring->next_to_use) | 420 | if (i == rx_ring->next_to_use) |
416 | printk(KERN_CONT " NTU\n"); | 421 | printk(KERN_CONT " NTU\n"); |
417 | else if (i == rx_ring->next_to_clean) | 422 | else if (i == rx_ring->next_to_clean) |
@@ -421,9 +426,10 @@ rx_ring_summary: | |||
421 | 426 | ||
422 | if (netif_msg_pktdata(adapter)) | 427 | if (netif_msg_pktdata(adapter)) |
423 | print_hex_dump(KERN_INFO, "", | 428 | print_hex_dump(KERN_INFO, "", |
424 | DUMP_PREFIX_ADDRESS, | 429 | DUMP_PREFIX_ADDRESS, |
425 | 16, 1, phys_to_virt(buffer_info->dma), | 430 | 16, 1, |
426 | adapter->rx_buffer_len, true); | 431 | phys_to_virt(buffer_info->dma), |
432 | adapter->rx_buffer_len, true); | ||
427 | } | 433 | } |
428 | } | 434 | } |
429 | 435 | ||
@@ -450,8 +456,7 @@ static int e1000_desc_unused(struct e1000_ring *ring) | |||
450 | * @skb: pointer to sk_buff to be indicated to stack | 456 | * @skb: pointer to sk_buff to be indicated to stack |
451 | **/ | 457 | **/ |
452 | static void e1000_receive_skb(struct e1000_adapter *adapter, | 458 | static void e1000_receive_skb(struct e1000_adapter *adapter, |
453 | struct net_device *netdev, | 459 | struct net_device *netdev, struct sk_buff *skb, |
454 | struct sk_buff *skb, | ||
455 | u8 status, __le16 vlan) | 460 | u8 status, __le16 vlan) |
456 | { | 461 | { |
457 | skb->protocol = eth_type_trans(skb, netdev); | 462 | skb->protocol = eth_type_trans(skb, netdev); |
@@ -464,7 +469,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, | |||
464 | } | 469 | } |
465 | 470 | ||
466 | /** | 471 | /** |
467 | * e1000_rx_checksum - Receive Checksum Offload for 82543 | 472 | * e1000_rx_checksum - Receive Checksum Offload |
468 | * @adapter: board private structure | 473 | * @adapter: board private structure |
469 | * @status_err: receive descriptor status and error fields | 474 | * @status_err: receive descriptor status and error fields |
470 | * @csum: receive descriptor csum field | 475 | * @csum: receive descriptor csum field |
@@ -548,7 +553,7 @@ map_skb: | |||
548 | adapter->rx_buffer_len, | 553 | adapter->rx_buffer_len, |
549 | DMA_FROM_DEVICE); | 554 | DMA_FROM_DEVICE); |
550 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { | 555 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { |
551 | dev_err(&pdev->dev, "RX DMA map failed\n"); | 556 | dev_err(&pdev->dev, "Rx DMA map failed\n"); |
552 | adapter->rx_dma_failed++; | 557 | adapter->rx_dma_failed++; |
553 | break; | 558 | break; |
554 | } | 559 | } |
@@ -601,7 +606,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
601 | ps_page = &buffer_info->ps_pages[j]; | 606 | ps_page = &buffer_info->ps_pages[j]; |
602 | if (j >= adapter->rx_ps_pages) { | 607 | if (j >= adapter->rx_ps_pages) { |
603 | /* all unused desc entries get hw null ptr */ | 608 | /* all unused desc entries get hw null ptr */ |
604 | rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0); | 609 | rx_desc->read.buffer_addr[j + 1] = |
610 | ~cpu_to_le64(0); | ||
605 | continue; | 611 | continue; |
606 | } | 612 | } |
607 | if (!ps_page->page) { | 613 | if (!ps_page->page) { |
@@ -617,7 +623,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
617 | if (dma_mapping_error(&pdev->dev, | 623 | if (dma_mapping_error(&pdev->dev, |
618 | ps_page->dma)) { | 624 | ps_page->dma)) { |
619 | dev_err(&adapter->pdev->dev, | 625 | dev_err(&adapter->pdev->dev, |
620 | "RX DMA page map failed\n"); | 626 | "Rx DMA page map failed\n"); |
621 | adapter->rx_dma_failed++; | 627 | adapter->rx_dma_failed++; |
622 | goto no_buffers; | 628 | goto no_buffers; |
623 | } | 629 | } |
@@ -627,8 +633,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
627 | * didn't change because each write-back | 633 | * didn't change because each write-back |
628 | * erases this info. | 634 | * erases this info. |
629 | */ | 635 | */ |
630 | rx_desc->read.buffer_addr[j+1] = | 636 | rx_desc->read.buffer_addr[j + 1] = |
631 | cpu_to_le64(ps_page->dma); | 637 | cpu_to_le64(ps_page->dma); |
632 | } | 638 | } |
633 | 639 | ||
634 | skb = netdev_alloc_skb_ip_align(netdev, | 640 | skb = netdev_alloc_skb_ip_align(netdev, |
@@ -644,7 +650,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
644 | adapter->rx_ps_bsize0, | 650 | adapter->rx_ps_bsize0, |
645 | DMA_FROM_DEVICE); | 651 | DMA_FROM_DEVICE); |
646 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { | 652 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { |
647 | dev_err(&pdev->dev, "RX DMA map failed\n"); | 653 | dev_err(&pdev->dev, "Rx DMA map failed\n"); |
648 | adapter->rx_dma_failed++; | 654 | adapter->rx_dma_failed++; |
649 | /* cleanup skb */ | 655 | /* cleanup skb */ |
650 | dev_kfree_skb_any(skb); | 656 | dev_kfree_skb_any(skb); |
@@ -662,7 +668,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
662 | * such as IA-64). | 668 | * such as IA-64). |
663 | */ | 669 | */ |
664 | wmb(); | 670 | wmb(); |
665 | writel(i<<1, adapter->hw.hw_addr + rx_ring->tail); | 671 | writel(i << 1, adapter->hw.hw_addr + rx_ring->tail); |
666 | } | 672 | } |
667 | 673 | ||
668 | i++; | 674 | i++; |
@@ -1106,11 +1112,10 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
1106 | cleaned = 1; | 1112 | cleaned = 1; |
1107 | cleaned_count++; | 1113 | cleaned_count++; |
1108 | dma_unmap_single(&pdev->dev, buffer_info->dma, | 1114 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
1109 | adapter->rx_ps_bsize0, | 1115 | adapter->rx_ps_bsize0, DMA_FROM_DEVICE); |
1110 | DMA_FROM_DEVICE); | ||
1111 | buffer_info->dma = 0; | 1116 | buffer_info->dma = 0; |
1112 | 1117 | ||
1113 | /* see !EOP comment in other rx routine */ | 1118 | /* see !EOP comment in other Rx routine */ |
1114 | if (!(staterr & E1000_RXD_STAT_EOP)) | 1119 | if (!(staterr & E1000_RXD_STAT_EOP)) |
1115 | adapter->flags2 |= FLAG2_IS_DISCARDING; | 1120 | adapter->flags2 |= FLAG2_IS_DISCARDING; |
1116 | 1121 | ||
@@ -1325,7 +1330,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, | |||
1325 | goto next_desc; | 1330 | goto next_desc; |
1326 | } | 1331 | } |
1327 | 1332 | ||
1328 | #define rxtop rx_ring->rx_skb_top | 1333 | #define rxtop (rx_ring->rx_skb_top) |
1329 | if (!(status & E1000_RXD_STAT_EOP)) { | 1334 | if (!(status & E1000_RXD_STAT_EOP)) { |
1330 | /* this descriptor is only the beginning (or middle) */ | 1335 | /* this descriptor is only the beginning (or middle) */ |
1331 | if (!rxtop) { | 1336 | if (!rxtop) { |
@@ -1806,9 +1811,8 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) | |||
1806 | err = pci_enable_msix(adapter->pdev, | 1811 | err = pci_enable_msix(adapter->pdev, |
1807 | adapter->msix_entries, | 1812 | adapter->msix_entries, |
1808 | adapter->num_vectors); | 1813 | adapter->num_vectors); |
1809 | if (err == 0) { | 1814 | if (err == 0) |
1810 | return; | 1815 | return; |
1811 | } | ||
1812 | } | 1816 | } |
1813 | /* MSI-X failed, so fall through and try MSI */ | 1817 | /* MSI-X failed, so fall through and try MSI */ |
1814 | e_err("Failed to initialize MSI-X interrupts. " | 1818 | e_err("Failed to initialize MSI-X interrupts. " |
@@ -1981,15 +1985,15 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) | |||
1981 | } | 1985 | } |
1982 | 1986 | ||
1983 | /** | 1987 | /** |
1984 | * e1000_get_hw_control - get control of the h/w from f/w | 1988 | * e1000e_get_hw_control - get control of the h/w from f/w |
1985 | * @adapter: address of board private structure | 1989 | * @adapter: address of board private structure |
1986 | * | 1990 | * |
1987 | * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. | 1991 | * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. |
1988 | * For ASF and Pass Through versions of f/w this means that | 1992 | * For ASF and Pass Through versions of f/w this means that |
1989 | * the driver is loaded. For AMT version (only with 82573) | 1993 | * the driver is loaded. For AMT version (only with 82573) |
1990 | * of the f/w this means that the network i/f is open. | 1994 | * of the f/w this means that the network i/f is open. |
1991 | **/ | 1995 | **/ |
1992 | static void e1000_get_hw_control(struct e1000_adapter *adapter) | 1996 | void e1000e_get_hw_control(struct e1000_adapter *adapter) |
1993 | { | 1997 | { |
1994 | struct e1000_hw *hw = &adapter->hw; | 1998 | struct e1000_hw *hw = &adapter->hw; |
1995 | u32 ctrl_ext; | 1999 | u32 ctrl_ext; |
@@ -2006,16 +2010,16 @@ static void e1000_get_hw_control(struct e1000_adapter *adapter) | |||
2006 | } | 2010 | } |
2007 | 2011 | ||
2008 | /** | 2012 | /** |
2009 | * e1000_release_hw_control - release control of the h/w to f/w | 2013 | * e1000e_release_hw_control - release control of the h/w to f/w |
2010 | * @adapter: address of board private structure | 2014 | * @adapter: address of board private structure |
2011 | * | 2015 | * |
2012 | * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. | 2016 | * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. |
2013 | * For ASF and Pass Through versions of f/w this means that the | 2017 | * For ASF and Pass Through versions of f/w this means that the |
2014 | * driver is no longer loaded. For AMT version (only with 82573) i | 2018 | * driver is no longer loaded. For AMT version (only with 82573) i |
2015 | * of the f/w this means that the network i/f is closed. | 2019 | * of the f/w this means that the network i/f is closed. |
2016 | * | 2020 | * |
2017 | **/ | 2021 | **/ |
2018 | static void e1000_release_hw_control(struct e1000_adapter *adapter) | 2022 | void e1000e_release_hw_control(struct e1000_adapter *adapter) |
2019 | { | 2023 | { |
2020 | struct e1000_hw *hw = &adapter->hw; | 2024 | struct e1000_hw *hw = &adapter->hw; |
2021 | u32 ctrl_ext; | 2025 | u32 ctrl_ext; |
@@ -2059,10 +2063,9 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter) | |||
2059 | int err = -ENOMEM, size; | 2063 | int err = -ENOMEM, size; |
2060 | 2064 | ||
2061 | size = sizeof(struct e1000_buffer) * tx_ring->count; | 2065 | size = sizeof(struct e1000_buffer) * tx_ring->count; |
2062 | tx_ring->buffer_info = vmalloc(size); | 2066 | tx_ring->buffer_info = vzalloc(size); |
2063 | if (!tx_ring->buffer_info) | 2067 | if (!tx_ring->buffer_info) |
2064 | goto err; | 2068 | goto err; |
2065 | memset(tx_ring->buffer_info, 0, size); | ||
2066 | 2069 | ||
2067 | /* round up to nearest 4K */ | 2070 | /* round up to nearest 4K */ |
2068 | tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); | 2071 | tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); |
@@ -2095,10 +2098,9 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter) | |||
2095 | int i, size, desc_len, err = -ENOMEM; | 2098 | int i, size, desc_len, err = -ENOMEM; |
2096 | 2099 | ||
2097 | size = sizeof(struct e1000_buffer) * rx_ring->count; | 2100 | size = sizeof(struct e1000_buffer) * rx_ring->count; |
2098 | rx_ring->buffer_info = vmalloc(size); | 2101 | rx_ring->buffer_info = vzalloc(size); |
2099 | if (!rx_ring->buffer_info) | 2102 | if (!rx_ring->buffer_info) |
2100 | goto err; | 2103 | goto err; |
2101 | memset(rx_ring->buffer_info, 0, size); | ||
2102 | 2104 | ||
2103 | for (i = 0; i < rx_ring->count; i++) { | 2105 | for (i = 0; i < rx_ring->count; i++) { |
2104 | buffer_info = &rx_ring->buffer_info[i]; | 2106 | buffer_info = &rx_ring->buffer_info[i]; |
@@ -2132,7 +2134,7 @@ err_pages: | |||
2132 | } | 2134 | } |
2133 | err: | 2135 | err: |
2134 | vfree(rx_ring->buffer_info); | 2136 | vfree(rx_ring->buffer_info); |
2135 | e_err("Unable to allocate memory for the transmit descriptor ring\n"); | 2137 | e_err("Unable to allocate memory for the receive descriptor ring\n"); |
2136 | return err; | 2138 | return err; |
2137 | } | 2139 | } |
2138 | 2140 | ||
@@ -2200,9 +2202,8 @@ void e1000e_free_rx_resources(struct e1000_adapter *adapter) | |||
2200 | 2202 | ||
2201 | e1000_clean_rx_ring(adapter); | 2203 | e1000_clean_rx_ring(adapter); |
2202 | 2204 | ||
2203 | for (i = 0; i < rx_ring->count; i++) { | 2205 | for (i = 0; i < rx_ring->count; i++) |
2204 | kfree(rx_ring->buffer_info[i].ps_pages); | 2206 | kfree(rx_ring->buffer_info[i].ps_pages); |
2205 | } | ||
2206 | 2207 | ||
2207 | vfree(rx_ring->buffer_info); | 2208 | vfree(rx_ring->buffer_info); |
2208 | rx_ring->buffer_info = NULL; | 2209 | rx_ring->buffer_info = NULL; |
@@ -2242,20 +2243,18 @@ static unsigned int e1000_update_itr(struct e1000_adapter *adapter, | |||
2242 | /* handle TSO and jumbo frames */ | 2243 | /* handle TSO and jumbo frames */ |
2243 | if (bytes/packets > 8000) | 2244 | if (bytes/packets > 8000) |
2244 | retval = bulk_latency; | 2245 | retval = bulk_latency; |
2245 | else if ((packets < 5) && (bytes > 512)) { | 2246 | else if ((packets < 5) && (bytes > 512)) |
2246 | retval = low_latency; | 2247 | retval = low_latency; |
2247 | } | ||
2248 | break; | 2248 | break; |
2249 | case low_latency: /* 50 usec aka 20000 ints/s */ | 2249 | case low_latency: /* 50 usec aka 20000 ints/s */ |
2250 | if (bytes > 10000) { | 2250 | if (bytes > 10000) { |
2251 | /* this if handles the TSO accounting */ | 2251 | /* this if handles the TSO accounting */ |
2252 | if (bytes/packets > 8000) { | 2252 | if (bytes/packets > 8000) |
2253 | retval = bulk_latency; | 2253 | retval = bulk_latency; |
2254 | } else if ((packets < 10) || ((bytes/packets) > 1200)) { | 2254 | else if ((packets < 10) || ((bytes/packets) > 1200)) |
2255 | retval = bulk_latency; | 2255 | retval = bulk_latency; |
2256 | } else if ((packets > 35)) { | 2256 | else if ((packets > 35)) |
2257 | retval = lowest_latency; | 2257 | retval = lowest_latency; |
2258 | } | ||
2259 | } else if (bytes/packets > 2000) { | 2258 | } else if (bytes/packets > 2000) { |
2260 | retval = bulk_latency; | 2259 | retval = bulk_latency; |
2261 | } else if (packets <= 2 && bytes < 512) { | 2260 | } else if (packets <= 2 && bytes < 512) { |
@@ -2264,9 +2263,8 @@ static unsigned int e1000_update_itr(struct e1000_adapter *adapter, | |||
2264 | break; | 2263 | break; |
2265 | case bulk_latency: /* 250 usec aka 4000 ints/s */ | 2264 | case bulk_latency: /* 250 usec aka 4000 ints/s */ |
2266 | if (bytes > 25000) { | 2265 | if (bytes > 25000) { |
2267 | if (packets > 35) { | 2266 | if (packets > 35) |
2268 | retval = low_latency; | 2267 | retval = low_latency; |
2269 | } | ||
2270 | } else if (bytes < 6000) { | 2268 | } else if (bytes < 6000) { |
2271 | retval = low_latency; | 2269 | retval = low_latency; |
2272 | } | 2270 | } |
@@ -2452,7 +2450,7 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
2452 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && | 2450 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && |
2453 | (vid == adapter->mng_vlan_id)) { | 2451 | (vid == adapter->mng_vlan_id)) { |
2454 | /* release control to f/w */ | 2452 | /* release control to f/w */ |
2455 | e1000_release_hw_control(adapter); | 2453 | e1000e_release_hw_control(adapter); |
2456 | return; | 2454 | return; |
2457 | } | 2455 | } |
2458 | 2456 | ||
@@ -2617,7 +2615,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter) | |||
2617 | } | 2615 | } |
2618 | 2616 | ||
2619 | /** | 2617 | /** |
2620 | * e1000_configure_tx - Configure 8254x Transmit Unit after Reset | 2618 | * e1000_configure_tx - Configure Transmit Unit after Reset |
2621 | * @adapter: board private structure | 2619 | * @adapter: board private structure |
2622 | * | 2620 | * |
2623 | * Configure the Tx unit of the MAC after a reset. | 2621 | * Configure the Tx unit of the MAC after a reset. |
@@ -2670,7 +2668,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
2670 | * hthresh = 1 ==> prefetch when one or more available | 2668 | * hthresh = 1 ==> prefetch when one or more available |
2671 | * pthresh = 0x1f ==> prefetch if internal cache 31 or less | 2669 | * pthresh = 0x1f ==> prefetch if internal cache 31 or less |
2672 | * BEWARE: this seems to work but should be considered first if | 2670 | * BEWARE: this seems to work but should be considered first if |
2673 | * there are tx hangs or other tx related bugs | 2671 | * there are Tx hangs or other Tx related bugs |
2674 | */ | 2672 | */ |
2675 | txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; | 2673 | txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; |
2676 | ew32(TXDCTL(0), txdctl); | 2674 | ew32(TXDCTL(0), txdctl); |
@@ -2741,6 +2739,9 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
2741 | ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); | 2739 | ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); |
2742 | else | 2740 | else |
2743 | ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); | 2741 | ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); |
2742 | |||
2743 | if (ret_val) | ||
2744 | e_dbg("failed to enable jumbo frame workaround mode\n"); | ||
2744 | } | 2745 | } |
2745 | 2746 | ||
2746 | /* Program MC offset vector base */ | 2747 | /* Program MC offset vector base */ |
@@ -2881,7 +2882,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2881 | if (adapter->rx_ps_pages) { | 2882 | if (adapter->rx_ps_pages) { |
2882 | /* this is a 32 byte descriptor */ | 2883 | /* this is a 32 byte descriptor */ |
2883 | rdlen = rx_ring->count * | 2884 | rdlen = rx_ring->count * |
2884 | sizeof(union e1000_rx_desc_packet_split); | 2885 | sizeof(union e1000_rx_desc_packet_split); |
2885 | adapter->clean_rx = e1000_clean_rx_irq_ps; | 2886 | adapter->clean_rx = e1000_clean_rx_irq_ps; |
2886 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; | 2887 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; |
2887 | } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { | 2888 | } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { |
@@ -2904,7 +2905,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2904 | /* | 2905 | /* |
2905 | * set the writeback threshold (only takes effect if the RDTR | 2906 | * set the writeback threshold (only takes effect if the RDTR |
2906 | * is set). set GRAN=1 and write back up to 0x4 worth, and | 2907 | * is set). set GRAN=1 and write back up to 0x4 worth, and |
2907 | * enable prefetching of 0x20 rx descriptors | 2908 | * enable prefetching of 0x20 Rx descriptors |
2908 | * granularity = 01 | 2909 | * granularity = 01 |
2909 | * wthresh = 04, | 2910 | * wthresh = 04, |
2910 | * hthresh = 04, | 2911 | * hthresh = 04, |
@@ -2985,12 +2986,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2985 | * excessive C-state transition latencies result in | 2986 | * excessive C-state transition latencies result in |
2986 | * dropped transactions. | 2987 | * dropped transactions. |
2987 | */ | 2988 | */ |
2988 | pm_qos_update_request( | 2989 | pm_qos_update_request(&adapter->netdev->pm_qos_req, 55); |
2989 | &adapter->netdev->pm_qos_req, 55); | ||
2990 | } else { | 2990 | } else { |
2991 | pm_qos_update_request( | 2991 | pm_qos_update_request(&adapter->netdev->pm_qos_req, |
2992 | &adapter->netdev->pm_qos_req, | 2992 | PM_QOS_DEFAULT_VALUE); |
2993 | PM_QOS_DEFAULT_VALUE); | ||
2994 | } | 2993 | } |
2995 | } | 2994 | } |
2996 | 2995 | ||
@@ -3156,7 +3155,7 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3156 | /* lower 16 bits has Rx packet buffer allocation size in KB */ | 3155 | /* lower 16 bits has Rx packet buffer allocation size in KB */ |
3157 | pba &= 0xffff; | 3156 | pba &= 0xffff; |
3158 | /* | 3157 | /* |
3159 | * the Tx fifo also stores 16 bytes of information about the tx | 3158 | * the Tx fifo also stores 16 bytes of information about the Tx |
3160 | * but don't include ethernet FCS because hardware appends it | 3159 | * but don't include ethernet FCS because hardware appends it |
3161 | */ | 3160 | */ |
3162 | min_tx_space = (adapter->max_frame_size + | 3161 | min_tx_space = (adapter->max_frame_size + |
@@ -3179,7 +3178,7 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3179 | pba -= min_tx_space - tx_space; | 3178 | pba -= min_tx_space - tx_space; |
3180 | 3179 | ||
3181 | /* | 3180 | /* |
3182 | * if short on Rx space, Rx wins and must trump tx | 3181 | * if short on Rx space, Rx wins and must trump Tx |
3183 | * adjustment or use Early Receive if available | 3182 | * adjustment or use Early Receive if available |
3184 | */ | 3183 | */ |
3185 | if ((pba < min_rx_space) && | 3184 | if ((pba < min_rx_space) && |
@@ -3191,7 +3190,6 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3191 | ew32(PBA, pba); | 3190 | ew32(PBA, pba); |
3192 | } | 3191 | } |
3193 | 3192 | ||
3194 | |||
3195 | /* | 3193 | /* |
3196 | * flow control settings | 3194 | * flow control settings |
3197 | * | 3195 | * |
@@ -3279,7 +3277,7 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3279 | * that the network interface is in control | 3277 | * that the network interface is in control |
3280 | */ | 3278 | */ |
3281 | if (adapter->flags & FLAG_HAS_AMT) | 3279 | if (adapter->flags & FLAG_HAS_AMT) |
3282 | e1000_get_hw_control(adapter); | 3280 | e1000e_get_hw_control(adapter); |
3283 | 3281 | ||
3284 | ew32(WUC, 0); | 3282 | ew32(WUC, 0); |
3285 | 3283 | ||
@@ -3292,6 +3290,13 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3292 | ew32(VET, ETH_P_8021Q); | 3290 | ew32(VET, ETH_P_8021Q); |
3293 | 3291 | ||
3294 | e1000e_reset_adaptive(hw); | 3292 | e1000e_reset_adaptive(hw); |
3293 | |||
3294 | if (!netif_running(adapter->netdev) && | ||
3295 | !test_bit(__E1000_TESTING, &adapter->state)) { | ||
3296 | e1000_power_down_phy(adapter); | ||
3297 | return; | ||
3298 | } | ||
3299 | |||
3295 | e1000_get_phy_info(hw); | 3300 | e1000_get_phy_info(hw); |
3296 | 3301 | ||
3297 | if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && | 3302 | if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && |
@@ -3577,7 +3582,7 @@ static int e1000_open(struct net_device *netdev) | |||
3577 | * interface is now open and reset the part to a known state. | 3582 | * interface is now open and reset the part to a known state. |
3578 | */ | 3583 | */ |
3579 | if (adapter->flags & FLAG_HAS_AMT) { | 3584 | if (adapter->flags & FLAG_HAS_AMT) { |
3580 | e1000_get_hw_control(adapter); | 3585 | e1000e_get_hw_control(adapter); |
3581 | e1000e_reset(adapter); | 3586 | e1000e_reset(adapter); |
3582 | } | 3587 | } |
3583 | 3588 | ||
@@ -3641,7 +3646,7 @@ static int e1000_open(struct net_device *netdev) | |||
3641 | return 0; | 3646 | return 0; |
3642 | 3647 | ||
3643 | err_req_irq: | 3648 | err_req_irq: |
3644 | e1000_release_hw_control(adapter); | 3649 | e1000e_release_hw_control(adapter); |
3645 | e1000_power_down_phy(adapter); | 3650 | e1000_power_down_phy(adapter); |
3646 | e1000e_free_rx_resources(adapter); | 3651 | e1000e_free_rx_resources(adapter); |
3647 | err_setup_rx: | 3652 | err_setup_rx: |
@@ -3696,8 +3701,9 @@ static int e1000_close(struct net_device *netdev) | |||
3696 | * If AMT is enabled, let the firmware know that the network | 3701 | * If AMT is enabled, let the firmware know that the network |
3697 | * interface is now closed | 3702 | * interface is now closed |
3698 | */ | 3703 | */ |
3699 | if (adapter->flags & FLAG_HAS_AMT) | 3704 | if ((adapter->flags & FLAG_HAS_AMT) && |
3700 | e1000_release_hw_control(adapter); | 3705 | !test_bit(__E1000_TESTING, &adapter->state)) |
3706 | e1000e_release_hw_control(adapter); | ||
3701 | 3707 | ||
3702 | if ((adapter->flags & FLAG_HAS_ERT) || | 3708 | if ((adapter->flags & FLAG_HAS_ERT) || |
3703 | (adapter->hw.mac.type == e1000_pch2lan)) | 3709 | (adapter->hw.mac.type == e1000_pch2lan)) |
@@ -4036,11 +4042,11 @@ static void e1000_print_link_info(struct e1000_adapter *adapter) | |||
4036 | adapter->netdev->name, | 4042 | adapter->netdev->name, |
4037 | adapter->link_speed, | 4043 | adapter->link_speed, |
4038 | (adapter->link_duplex == FULL_DUPLEX) ? | 4044 | (adapter->link_duplex == FULL_DUPLEX) ? |
4039 | "Full Duplex" : "Half Duplex", | 4045 | "Full Duplex" : "Half Duplex", |
4040 | ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? | 4046 | ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? |
4041 | "RX/TX" : | 4047 | "Rx/Tx" : |
4042 | ((ctrl & E1000_CTRL_RFCE) ? "RX" : | 4048 | ((ctrl & E1000_CTRL_RFCE) ? "Rx" : |
4043 | ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); | 4049 | ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"))); |
4044 | } | 4050 | } |
4045 | 4051 | ||
4046 | static bool e1000e_has_link(struct e1000_adapter *adapter) | 4052 | static bool e1000e_has_link(struct e1000_adapter *adapter) |
@@ -4335,7 +4341,7 @@ link_up: | |||
4335 | /* Force detection of hung controller every watchdog period */ | 4341 | /* Force detection of hung controller every watchdog period */ |
4336 | adapter->detect_tx_hung = 1; | 4342 | adapter->detect_tx_hung = 1; |
4337 | 4343 | ||
4338 | /* flush partial descriptors to memory before detecting tx hang */ | 4344 | /* flush partial descriptors to memory before detecting Tx hang */ |
4339 | if (adapter->flags2 & FLAG2_DMA_BURST) { | 4345 | if (adapter->flags2 & FLAG2_DMA_BURST) { |
4340 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); | 4346 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); |
4341 | ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); | 4347 | ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); |
@@ -4475,7 +4481,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) | |||
4475 | break; | 4481 | break; |
4476 | } | 4482 | } |
4477 | 4483 | ||
4478 | css = skb_transport_offset(skb); | 4484 | css = skb_checksum_start_offset(skb); |
4479 | 4485 | ||
4480 | i = tx_ring->next_to_use; | 4486 | i = tx_ring->next_to_use; |
4481 | buffer_info = &tx_ring->buffer_info[i]; | 4487 | buffer_info = &tx_ring->buffer_info[i]; |
@@ -4526,7 +4532,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
4526 | buffer_info->next_to_watch = i; | 4532 | buffer_info->next_to_watch = i; |
4527 | buffer_info->dma = dma_map_single(&pdev->dev, | 4533 | buffer_info->dma = dma_map_single(&pdev->dev, |
4528 | skb->data + offset, | 4534 | skb->data + offset, |
4529 | size, DMA_TO_DEVICE); | 4535 | size, DMA_TO_DEVICE); |
4530 | buffer_info->mapped_as_page = false; | 4536 | buffer_info->mapped_as_page = false; |
4531 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) | 4537 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) |
4532 | goto dma_error; | 4538 | goto dma_error; |
@@ -4573,7 +4579,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
4573 | } | 4579 | } |
4574 | } | 4580 | } |
4575 | 4581 | ||
4576 | segs = skb_shinfo(skb)->gso_segs ?: 1; | 4582 | segs = skb_shinfo(skb)->gso_segs ? : 1; |
4577 | /* multiply data chunks by size of headers */ | 4583 | /* multiply data chunks by size of headers */ |
4578 | bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; | 4584 | bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; |
4579 | 4585 | ||
@@ -4585,17 +4591,17 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
4585 | return count; | 4591 | return count; |
4586 | 4592 | ||
4587 | dma_error: | 4593 | dma_error: |
4588 | dev_err(&pdev->dev, "TX DMA map failed\n"); | 4594 | dev_err(&pdev->dev, "Tx DMA map failed\n"); |
4589 | buffer_info->dma = 0; | 4595 | buffer_info->dma = 0; |
4590 | if (count) | 4596 | if (count) |
4591 | count--; | 4597 | count--; |
4592 | 4598 | ||
4593 | while (count--) { | 4599 | while (count--) { |
4594 | if (i==0) | 4600 | if (i == 0) |
4595 | i += tx_ring->count; | 4601 | i += tx_ring->count; |
4596 | i--; | 4602 | i--; |
4597 | buffer_info = &tx_ring->buffer_info[i]; | 4603 | buffer_info = &tx_ring->buffer_info[i]; |
4598 | e1000_put_txbuf(adapter, buffer_info);; | 4604 | e1000_put_txbuf(adapter, buffer_info); |
4599 | } | 4605 | } |
4600 | 4606 | ||
4601 | return 0; | 4607 | return 0; |
@@ -4631,7 +4637,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, | |||
4631 | 4637 | ||
4632 | i = tx_ring->next_to_use; | 4638 | i = tx_ring->next_to_use; |
4633 | 4639 | ||
4634 | while (count--) { | 4640 | do { |
4635 | buffer_info = &tx_ring->buffer_info[i]; | 4641 | buffer_info = &tx_ring->buffer_info[i]; |
4636 | tx_desc = E1000_TX_DESC(*tx_ring, i); | 4642 | tx_desc = E1000_TX_DESC(*tx_ring, i); |
4637 | tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | 4643 | tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
@@ -4642,7 +4648,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, | |||
4642 | i++; | 4648 | i++; |
4643 | if (i == tx_ring->count) | 4649 | if (i == tx_ring->count) |
4644 | i = 0; | 4650 | i = 0; |
4645 | } | 4651 | } while (--count > 0); |
4646 | 4652 | ||
4647 | tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); | 4653 | tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); |
4648 | 4654 | ||
@@ -5216,7 +5222,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, | |||
5216 | * Release control of h/w to f/w. If f/w is AMT enabled, this | 5222 | * Release control of h/w to f/w. If f/w is AMT enabled, this |
5217 | * would have already happened in close and is redundant. | 5223 | * would have already happened in close and is redundant. |
5218 | */ | 5224 | */ |
5219 | e1000_release_hw_control(adapter); | 5225 | e1000e_release_hw_control(adapter); |
5220 | 5226 | ||
5221 | pci_disable_device(pdev); | 5227 | pci_disable_device(pdev); |
5222 | 5228 | ||
@@ -5373,7 +5379,7 @@ static int __e1000_resume(struct pci_dev *pdev) | |||
5373 | * under the control of the driver. | 5379 | * under the control of the driver. |
5374 | */ | 5380 | */ |
5375 | if (!(adapter->flags & FLAG_HAS_AMT)) | 5381 | if (!(adapter->flags & FLAG_HAS_AMT)) |
5376 | e1000_get_hw_control(adapter); | 5382 | e1000e_get_hw_control(adapter); |
5377 | 5383 | ||
5378 | return 0; | 5384 | return 0; |
5379 | } | 5385 | } |
@@ -5465,6 +5471,36 @@ static void e1000_shutdown(struct pci_dev *pdev) | |||
5465 | } | 5471 | } |
5466 | 5472 | ||
5467 | #ifdef CONFIG_NET_POLL_CONTROLLER | 5473 | #ifdef CONFIG_NET_POLL_CONTROLLER |
5474 | |||
5475 | static irqreturn_t e1000_intr_msix(int irq, void *data) | ||
5476 | { | ||
5477 | struct net_device *netdev = data; | ||
5478 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
5479 | int vector, msix_irq; | ||
5480 | |||
5481 | if (adapter->msix_entries) { | ||
5482 | vector = 0; | ||
5483 | msix_irq = adapter->msix_entries[vector].vector; | ||
5484 | disable_irq(msix_irq); | ||
5485 | e1000_intr_msix_rx(msix_irq, netdev); | ||
5486 | enable_irq(msix_irq); | ||
5487 | |||
5488 | vector++; | ||
5489 | msix_irq = adapter->msix_entries[vector].vector; | ||
5490 | disable_irq(msix_irq); | ||
5491 | e1000_intr_msix_tx(msix_irq, netdev); | ||
5492 | enable_irq(msix_irq); | ||
5493 | |||
5494 | vector++; | ||
5495 | msix_irq = adapter->msix_entries[vector].vector; | ||
5496 | disable_irq(msix_irq); | ||
5497 | e1000_msix_other(msix_irq, netdev); | ||
5498 | enable_irq(msix_irq); | ||
5499 | } | ||
5500 | |||
5501 | return IRQ_HANDLED; | ||
5502 | } | ||
5503 | |||
5468 | /* | 5504 | /* |
5469 | * Polling 'interrupt' - used by things like netconsole to send skbs | 5505 | * Polling 'interrupt' - used by things like netconsole to send skbs |
5470 | * without having to re-enable interrupts. It's not called while | 5506 | * without having to re-enable interrupts. It's not called while |
@@ -5474,10 +5510,21 @@ static void e1000_netpoll(struct net_device *netdev) | |||
5474 | { | 5510 | { |
5475 | struct e1000_adapter *adapter = netdev_priv(netdev); | 5511 | struct e1000_adapter *adapter = netdev_priv(netdev); |
5476 | 5512 | ||
5477 | disable_irq(adapter->pdev->irq); | 5513 | switch (adapter->int_mode) { |
5478 | e1000_intr(adapter->pdev->irq, netdev); | 5514 | case E1000E_INT_MODE_MSIX: |
5479 | 5515 | e1000_intr_msix(adapter->pdev->irq, netdev); | |
5480 | enable_irq(adapter->pdev->irq); | 5516 | break; |
5517 | case E1000E_INT_MODE_MSI: | ||
5518 | disable_irq(adapter->pdev->irq); | ||
5519 | e1000_intr_msi(adapter->pdev->irq, netdev); | ||
5520 | enable_irq(adapter->pdev->irq); | ||
5521 | break; | ||
5522 | default: /* E1000E_INT_MODE_LEGACY */ | ||
5523 | disable_irq(adapter->pdev->irq); | ||
5524 | e1000_intr(adapter->pdev->irq, netdev); | ||
5525 | enable_irq(adapter->pdev->irq); | ||
5526 | break; | ||
5527 | } | ||
5481 | } | 5528 | } |
5482 | #endif | 5529 | #endif |
5483 | 5530 | ||
@@ -5579,7 +5626,7 @@ static void e1000_io_resume(struct pci_dev *pdev) | |||
5579 | * under the control of the driver. | 5626 | * under the control of the driver. |
5580 | */ | 5627 | */ |
5581 | if (!(adapter->flags & FLAG_HAS_AMT)) | 5628 | if (!(adapter->flags & FLAG_HAS_AMT)) |
5582 | e1000_get_hw_control(adapter); | 5629 | e1000e_get_hw_control(adapter); |
5583 | 5630 | ||
5584 | } | 5631 | } |
5585 | 5632 | ||
@@ -5587,7 +5634,8 @@ static void e1000_print_device_info(struct e1000_adapter *adapter) | |||
5587 | { | 5634 | { |
5588 | struct e1000_hw *hw = &adapter->hw; | 5635 | struct e1000_hw *hw = &adapter->hw; |
5589 | struct net_device *netdev = adapter->netdev; | 5636 | struct net_device *netdev = adapter->netdev; |
5590 | u32 pba_num; | 5637 | u32 ret_val; |
5638 | u8 pba_str[E1000_PBANUM_LENGTH]; | ||
5591 | 5639 | ||
5592 | /* print bus type/speed/width info */ | 5640 | /* print bus type/speed/width info */ |
5593 | e_info("(PCI Express:2.5GB/s:%s) %pM\n", | 5641 | e_info("(PCI Express:2.5GB/s:%s) %pM\n", |
@@ -5598,9 +5646,12 @@ static void e1000_print_device_info(struct e1000_adapter *adapter) | |||
5598 | netdev->dev_addr); | 5646 | netdev->dev_addr); |
5599 | e_info("Intel(R) PRO/%s Network Connection\n", | 5647 | e_info("Intel(R) PRO/%s Network Connection\n", |
5600 | (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); | 5648 | (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); |
5601 | e1000e_read_pba_num(hw, &pba_num); | 5649 | ret_val = e1000_read_pba_string_generic(hw, pba_str, |
5602 | e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n", | 5650 | E1000_PBANUM_LENGTH); |
5603 | hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff)); | 5651 | if (ret_val) |
5652 | strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1); | ||
5653 | e_info("MAC: %d, PHY: %d, PBA No: %s\n", | ||
5654 | hw->mac.type, hw->phy.type, pba_str); | ||
5604 | } | 5655 | } |
5605 | 5656 | ||
5606 | static void e1000_eeprom_checks(struct e1000_adapter *adapter) | 5657 | static void e1000_eeprom_checks(struct e1000_adapter *adapter) |
@@ -5864,6 +5915,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
5864 | INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); | 5915 | INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); |
5865 | INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); | 5916 | INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); |
5866 | INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); | 5917 | INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); |
5918 | INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task); | ||
5867 | 5919 | ||
5868 | /* Initialize link parameters. User can change them with ethtool */ | 5920 | /* Initialize link parameters. User can change them with ethtool */ |
5869 | adapter->hw.mac.autoneg = 1; | 5921 | adapter->hw.mac.autoneg = 1; |
@@ -5924,9 +5976,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
5924 | * under the control of the driver. | 5976 | * under the control of the driver. |
5925 | */ | 5977 | */ |
5926 | if (!(adapter->flags & FLAG_HAS_AMT)) | 5978 | if (!(adapter->flags & FLAG_HAS_AMT)) |
5927 | e1000_get_hw_control(adapter); | 5979 | e1000e_get_hw_control(adapter); |
5928 | 5980 | ||
5929 | strcpy(netdev->name, "eth%d"); | 5981 | strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1); |
5930 | err = register_netdev(netdev); | 5982 | err = register_netdev(netdev); |
5931 | if (err) | 5983 | if (err) |
5932 | goto err_register; | 5984 | goto err_register; |
@@ -5943,12 +5995,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
5943 | 5995 | ||
5944 | err_register: | 5996 | err_register: |
5945 | if (!(adapter->flags & FLAG_HAS_AMT)) | 5997 | if (!(adapter->flags & FLAG_HAS_AMT)) |
5946 | e1000_release_hw_control(adapter); | 5998 | e1000e_release_hw_control(adapter); |
5947 | err_eeprom: | 5999 | err_eeprom: |
5948 | if (!e1000_check_reset_block(&adapter->hw)) | 6000 | if (!e1000_check_reset_block(&adapter->hw)) |
5949 | e1000_phy_hw_reset(&adapter->hw); | 6001 | e1000_phy_hw_reset(&adapter->hw); |
5950 | err_hw_init: | 6002 | err_hw_init: |
5951 | |||
5952 | kfree(adapter->tx_ring); | 6003 | kfree(adapter->tx_ring); |
5953 | kfree(adapter->rx_ring); | 6004 | kfree(adapter->rx_ring); |
5954 | err_sw_init: | 6005 | err_sw_init: |
@@ -5984,8 +6035,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev) | |||
5984 | bool down = test_bit(__E1000_DOWN, &adapter->state); | 6035 | bool down = test_bit(__E1000_DOWN, &adapter->state); |
5985 | 6036 | ||
5986 | /* | 6037 | /* |
5987 | * flush_scheduled work may reschedule our watchdog task, so | 6038 | * The timers may be rescheduled, so explicitly disable them |
5988 | * explicitly disable watchdog tasks from being rescheduled | 6039 | * from being rescheduled. |
5989 | */ | 6040 | */ |
5990 | if (!down) | 6041 | if (!down) |
5991 | set_bit(__E1000_DOWN, &adapter->state); | 6042 | set_bit(__E1000_DOWN, &adapter->state); |
@@ -5996,8 +6047,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev) | |||
5996 | cancel_work_sync(&adapter->watchdog_task); | 6047 | cancel_work_sync(&adapter->watchdog_task); |
5997 | cancel_work_sync(&adapter->downshift_task); | 6048 | cancel_work_sync(&adapter->downshift_task); |
5998 | cancel_work_sync(&adapter->update_phy_task); | 6049 | cancel_work_sync(&adapter->update_phy_task); |
6050 | cancel_work_sync(&adapter->led_blink_task); | ||
5999 | cancel_work_sync(&adapter->print_hang_task); | 6051 | cancel_work_sync(&adapter->print_hang_task); |
6000 | flush_scheduled_work(); | ||
6001 | 6052 | ||
6002 | if (!(netdev->flags & IFF_UP)) | 6053 | if (!(netdev->flags & IFF_UP)) |
6003 | e1000_power_down_phy(adapter); | 6054 | e1000_power_down_phy(adapter); |
@@ -6014,7 +6065,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev) | |||
6014 | * Release control of h/w to f/w. If f/w is AMT enabled, this | 6065 | * Release control of h/w to f/w. If f/w is AMT enabled, this |
6015 | * would have already happened in close and is redundant. | 6066 | * would have already happened in close and is redundant. |
6016 | */ | 6067 | */ |
6017 | e1000_release_hw_control(adapter); | 6068 | e1000e_release_hw_control(adapter); |
6018 | 6069 | ||
6019 | e1000e_reset_interrupt_capability(adapter); | 6070 | e1000e_reset_interrupt_capability(adapter); |
6020 | kfree(adapter->tx_ring); | 6071 | kfree(adapter->tx_ring); |
@@ -6145,7 +6196,7 @@ static int __init e1000_init_module(void) | |||
6145 | int ret; | 6196 | int ret; |
6146 | pr_info("Intel(R) PRO/1000 Network Driver - %s\n", | 6197 | pr_info("Intel(R) PRO/1000 Network Driver - %s\n", |
6147 | e1000e_driver_version); | 6198 | e1000e_driver_version); |
6148 | pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n"); | 6199 | pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n"); |
6149 | ret = pci_register_driver(&e1000_driver); | 6200 | ret = pci_register_driver(&e1000_driver); |
6150 | 6201 | ||
6151 | return ret; | 6202 | return ret; |