aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000e/netdev.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 00:04:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 00:04:44 -0400
commitf8965467f366fd18f01feafb5db10512d7b4422c (patch)
tree3706a9cd779859271ca61b85c63a1bc3f82d626e /drivers/net/e1000e/netdev.c
parenta26272e5200765691e67d6780e52b32498fdb659 (diff)
parent2ec8c6bb5d8f3a62a79f463525054bae1e3d4487 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1674 commits) qlcnic: adding co maintainer ixgbe: add support for active DA cables ixgbe: dcb, do not tag tc_prio_control frames ixgbe: fix ixgbe_tx_is_paused logic ixgbe: always enable vlan strip/insert when DCB is enabled ixgbe: remove some redundant code in setting FCoE FIP filter ixgbe: fix wrong offset to fc_frame_header in ixgbe_fcoe_ddp ixgbe: fix header len when unsplit packet overflows to data buffer ipv6: Never schedule DAD timer on dead address ipv6: Use POSTDAD state ipv6: Use state_lock to protect ifa state ipv6: Replace inet6_ifaddr->dead with state cxgb4: notify upper drivers if the device is already up when they load cxgb4: keep interrupts available when the ports are brought down cxgb4: fix initial addition of MAC address cnic: Return SPQ credit to bnx2x after ring setup and shutdown. cnic: Convert cnic_local_flags to atomic ops. can: Fix SJA1000 command register writes on SMP systems bridge: fix build for CONFIG_SYSFS disabled ARCNET: Limit com20020 PCI ID matches for SOHARD cards ... Fix up various conflicts with pcmcia tree drivers/net/ {pcmcia/3c589_cs.c, wireless/orinoco/orinoco_cs.c and wireless/orinoco/spectrum_cs.c} and feature removal (Documentation/feature-removal-schedule.txt). Also fix a non-content conflict due to pm_qos_requirement getting renamed in the PM tree (now pm_qos_request) in net/mac80211/scan.c
Diffstat (limited to 'drivers/net/e1000e/netdev.c')
-rw-r--r--drivers/net/e1000e/netdev.c844
1 files changed, 680 insertions, 164 deletions
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index d5d55c6a373f..24507f3b8b17 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -26,6 +26,8 @@
26 26
27*******************************************************************************/ 27*******************************************************************************/
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include <linux/module.h> 31#include <linux/module.h>
30#include <linux/types.h> 32#include <linux/types.h>
31#include <linux/init.h> 33#include <linux/init.h>
@@ -45,11 +47,12 @@
45#include <linux/cpu.h> 47#include <linux/cpu.h>
46#include <linux/smp.h> 48#include <linux/smp.h>
47#include <linux/pm_qos_params.h> 49#include <linux/pm_qos_params.h>
50#include <linux/pm_runtime.h>
48#include <linux/aer.h> 51#include <linux/aer.h>
49 52
50#include "e1000.h" 53#include "e1000.h"
51 54
52#define DRV_VERSION "1.0.2-k2" 55#define DRV_VERSION "1.0.2-k4"
53char e1000e_driver_name[] = "e1000e"; 56char e1000e_driver_name[] = "e1000e";
54const char e1000e_driver_version[] = DRV_VERSION; 57const char e1000e_driver_version[] = DRV_VERSION;
55 58
@@ -66,6 +69,361 @@ static const struct e1000_info *e1000_info_tbl[] = {
66 [board_pchlan] = &e1000_pch_info, 69 [board_pchlan] = &e1000_pch_info,
67}; 70};
68 71
72struct e1000_reg_info {
73 u32 ofs;
74 char *name;
75};
76
77#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
78#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
79#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
80#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
81#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
82
83#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
84#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
85#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
86#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
87#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
88
89static const struct e1000_reg_info e1000_reg_info_tbl[] = {
90
91 /* General Registers */
92 {E1000_CTRL, "CTRL"},
93 {E1000_STATUS, "STATUS"},
94 {E1000_CTRL_EXT, "CTRL_EXT"},
95
96 /* Interrupt Registers */
97 {E1000_ICR, "ICR"},
98
99 /* RX Registers */
100 {E1000_RCTL, "RCTL"},
101 {E1000_RDLEN, "RDLEN"},
102 {E1000_RDH, "RDH"},
103 {E1000_RDT, "RDT"},
104 {E1000_RDTR, "RDTR"},
105 {E1000_RXDCTL(0), "RXDCTL"},
106 {E1000_ERT, "ERT"},
107 {E1000_RDBAL, "RDBAL"},
108 {E1000_RDBAH, "RDBAH"},
109 {E1000_RDFH, "RDFH"},
110 {E1000_RDFT, "RDFT"},
111 {E1000_RDFHS, "RDFHS"},
112 {E1000_RDFTS, "RDFTS"},
113 {E1000_RDFPC, "RDFPC"},
114
115 /* TX Registers */
116 {E1000_TCTL, "TCTL"},
117 {E1000_TDBAL, "TDBAL"},
118 {E1000_TDBAH, "TDBAH"},
119 {E1000_TDLEN, "TDLEN"},
120 {E1000_TDH, "TDH"},
121 {E1000_TDT, "TDT"},
122 {E1000_TIDV, "TIDV"},
123 {E1000_TXDCTL(0), "TXDCTL"},
124 {E1000_TADV, "TADV"},
125 {E1000_TARC(0), "TARC"},
126 {E1000_TDFH, "TDFH"},
127 {E1000_TDFT, "TDFT"},
128 {E1000_TDFHS, "TDFHS"},
129 {E1000_TDFTS, "TDFTS"},
130 {E1000_TDFPC, "TDFPC"},
131
132 /* List Terminator */
133 {}
134};
135
136/*
137 * e1000_regdump - register printout routine
138 */
139static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
140{
141 int n = 0;
142 char rname[16];
143 u32 regs[8];
144
145 switch (reginfo->ofs) {
146 case E1000_RXDCTL(0):
147 for (n = 0; n < 2; n++)
148 regs[n] = __er32(hw, E1000_RXDCTL(n));
149 break;
150 case E1000_TXDCTL(0):
151 for (n = 0; n < 2; n++)
152 regs[n] = __er32(hw, E1000_TXDCTL(n));
153 break;
154 case E1000_TARC(0):
155 for (n = 0; n < 2; n++)
156 regs[n] = __er32(hw, E1000_TARC(n));
157 break;
158 default:
159 printk(KERN_INFO "%-15s %08x\n",
160 reginfo->name, __er32(hw, reginfo->ofs));
161 return;
162 }
163
164 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
165 printk(KERN_INFO "%-15s ", rname);
166 for (n = 0; n < 2; n++)
167 printk(KERN_CONT "%08x ", regs[n]);
168 printk(KERN_CONT "\n");
169}
170
171
172/*
173 * e1000e_dump - Print registers, tx-ring and rx-ring
174 */
175static void e1000e_dump(struct e1000_adapter *adapter)
176{
177 struct net_device *netdev = adapter->netdev;
178 struct e1000_hw *hw = &adapter->hw;
179 struct e1000_reg_info *reginfo;
180 struct e1000_ring *tx_ring = adapter->tx_ring;
181 struct e1000_tx_desc *tx_desc;
182 struct my_u0 { u64 a; u64 b; } *u0;
183 struct e1000_buffer *buffer_info;
184 struct e1000_ring *rx_ring = adapter->rx_ring;
185 union e1000_rx_desc_packet_split *rx_desc_ps;
186 struct e1000_rx_desc *rx_desc;
187 struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
188 u32 staterr;
189 int i = 0;
190
191 if (!netif_msg_hw(adapter))
192 return;
193
194 /* Print netdevice Info */
195 if (netdev) {
196 dev_info(&adapter->pdev->dev, "Net device Info\n");
197 printk(KERN_INFO "Device Name state "
198 "trans_start last_rx\n");
199 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
200 netdev->name,
201 netdev->state,
202 netdev->trans_start,
203 netdev->last_rx);
204 }
205
206 /* Print Registers */
207 dev_info(&adapter->pdev->dev, "Register Dump\n");
208 printk(KERN_INFO " Register Name Value\n");
209 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
210 reginfo->name; reginfo++) {
211 e1000_regdump(hw, reginfo);
212 }
213
214 /* Print TX Ring Summary */
215 if (!netdev || !netif_running(netdev))
216 goto exit;
217
218 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
219 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
220 " leng ntw timestamp\n");
221 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
222 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
223 0, tx_ring->next_to_use, tx_ring->next_to_clean,
224 (u64)buffer_info->dma,
225 buffer_info->length,
226 buffer_info->next_to_watch,
227 (u64)buffer_info->time_stamp);
228
229 /* Print TX Rings */
230 if (!netif_msg_tx_done(adapter))
231 goto rx_ring_summary;
232
233 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
234
235 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
236 *
237 * Legacy Transmit Descriptor
238 * +--------------------------------------------------------------+
239 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
240 * +--------------------------------------------------------------+
241 * 8 | Special | CSS | Status | CMD | CSO | Length |
242 * +--------------------------------------------------------------+
243 * 63 48 47 36 35 32 31 24 23 16 15 0
244 *
245 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
246 * 63 48 47 40 39 32 31 16 15 8 7 0
247 * +----------------------------------------------------------------+
248 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
249 * +----------------------------------------------------------------+
250 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
251 * +----------------------------------------------------------------+
252 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
253 *
254 * Extended Data Descriptor (DTYP=0x1)
255 * +----------------------------------------------------------------+
256 * 0 | Buffer Address [63:0] |
257 * +----------------------------------------------------------------+
258 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
259 * +----------------------------------------------------------------+
260 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
261 */
262 printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
263 " [bi->dma ] leng ntw timestamp bi->skb "
264 "<-- Legacy format\n");
265 printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
266 " [bi->dma ] leng ntw timestamp bi->skb "
267 "<-- Ext Context format\n");
268 printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
269 " [bi->dma ] leng ntw timestamp bi->skb "
270 "<-- Ext Data format\n");
271 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
272 tx_desc = E1000_TX_DESC(*tx_ring, i);
273 buffer_info = &tx_ring->buffer_info[i];
274 u0 = (struct my_u0 *)tx_desc;
275 printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
276 "%04X %3X %016llX %p",
277 (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
278 ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
279 le64_to_cpu(u0->a), le64_to_cpu(u0->b),
280 (u64)buffer_info->dma, buffer_info->length,
281 buffer_info->next_to_watch, (u64)buffer_info->time_stamp,
282 buffer_info->skb);
283 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
284 printk(KERN_CONT " NTC/U\n");
285 else if (i == tx_ring->next_to_use)
286 printk(KERN_CONT " NTU\n");
287 else if (i == tx_ring->next_to_clean)
288 printk(KERN_CONT " NTC\n");
289 else
290 printk(KERN_CONT "\n");
291
292 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
293 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
294 16, 1, phys_to_virt(buffer_info->dma),
295 buffer_info->length, true);
296 }
297
298 /* Print RX Rings Summary */
299rx_ring_summary:
300 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
301 printk(KERN_INFO "Queue [NTU] [NTC]\n");
302 printk(KERN_INFO " %5d %5X %5X\n", 0,
303 rx_ring->next_to_use, rx_ring->next_to_clean);
304
305 /* Print RX Rings */
306 if (!netif_msg_rx_status(adapter))
307 goto exit;
308
309 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
310 switch (adapter->rx_ps_pages) {
311 case 1:
312 case 2:
313 case 3:
314 /* [Extended] Packet Split Receive Descriptor Format
315 *
316 * +-----------------------------------------------------+
317 * 0 | Buffer Address 0 [63:0] |
318 * +-----------------------------------------------------+
319 * 8 | Buffer Address 1 [63:0] |
320 * +-----------------------------------------------------+
321 * 16 | Buffer Address 2 [63:0] |
322 * +-----------------------------------------------------+
323 * 24 | Buffer Address 3 [63:0] |
324 * +-----------------------------------------------------+
325 */
326 printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
327 "[buffer 1 63:0 ] "
328 "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
329 "[bi->skb] <-- Ext Pkt Split format\n");
330 /* [Extended] Receive Descriptor (Write-Back) Format
331 *
332 * 63 48 47 32 31 13 12 8 7 4 3 0
333 * +------------------------------------------------------+
334 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
335 * | Checksum | Ident | | Queue | | Type |
336 * +------------------------------------------------------+
337 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
338 * +------------------------------------------------------+
339 * 63 48 47 32 31 20 19 0
340 */
341 printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
342 "[vl l0 ee es] "
343 "[ l3 l2 l1 hs] [reserved ] ---------------- "
344 "[bi->skb] <-- Ext Rx Write-Back format\n");
345 for (i = 0; i < rx_ring->count; i++) {
346 buffer_info = &rx_ring->buffer_info[i];
347 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
348 u1 = (struct my_u1 *)rx_desc_ps;
349 staterr =
350 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
351 if (staterr & E1000_RXD_STAT_DD) {
352 /* Descriptor Done */
353 printk(KERN_INFO "RWB[0x%03X] %016llX "
354 "%016llX %016llX %016llX "
355 "---------------- %p", i,
356 le64_to_cpu(u1->a),
357 le64_to_cpu(u1->b),
358 le64_to_cpu(u1->c),
359 le64_to_cpu(u1->d),
360 buffer_info->skb);
361 } else {
362 printk(KERN_INFO "R [0x%03X] %016llX "
363 "%016llX %016llX %016llX %016llX %p", i,
364 le64_to_cpu(u1->a),
365 le64_to_cpu(u1->b),
366 le64_to_cpu(u1->c),
367 le64_to_cpu(u1->d),
368 (u64)buffer_info->dma,
369 buffer_info->skb);
370
371 if (netif_msg_pktdata(adapter))
372 print_hex_dump(KERN_INFO, "",
373 DUMP_PREFIX_ADDRESS, 16, 1,
374 phys_to_virt(buffer_info->dma),
375 adapter->rx_ps_bsize0, true);
376 }
377
378 if (i == rx_ring->next_to_use)
379 printk(KERN_CONT " NTU\n");
380 else if (i == rx_ring->next_to_clean)
381 printk(KERN_CONT " NTC\n");
382 else
383 printk(KERN_CONT "\n");
384 }
385 break;
386 default:
387 case 0:
388 /* Legacy Receive Descriptor Format
389 *
390 * +-----------------------------------------------------+
391 * | Buffer Address [63:0] |
392 * +-----------------------------------------------------+
393 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
394 * +-----------------------------------------------------+
395 * 63 48 47 40 39 32 31 16 15 0
396 */
397 printk(KERN_INFO "Rl[desc] [address 63:0 ] "
398 "[vl er S cks ln] [bi->dma ] [bi->skb] "
399 "<-- Legacy format\n");
400 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
401 rx_desc = E1000_RX_DESC(*rx_ring, i);
402 buffer_info = &rx_ring->buffer_info[i];
403 u0 = (struct my_u0 *)rx_desc;
404 printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
405 "%016llX %p",
406 i, le64_to_cpu(u0->a), le64_to_cpu(u0->b),
407 (u64)buffer_info->dma, buffer_info->skb);
408 if (i == rx_ring->next_to_use)
409 printk(KERN_CONT " NTU\n");
410 else if (i == rx_ring->next_to_clean)
411 printk(KERN_CONT " NTC\n");
412 else
413 printk(KERN_CONT "\n");
414
415 if (netif_msg_pktdata(adapter))
416 print_hex_dump(KERN_INFO, "",
417 DUMP_PREFIX_ADDRESS,
418 16, 1, phys_to_virt(buffer_info->dma),
419 adapter->rx_buffer_len, true);
420 }
421 }
422
423exit:
424 return;
425}
426
69/** 427/**
70 * e1000_desc_unused - calculate if we have unused descriptors 428 * e1000_desc_unused - calculate if we have unused descriptors
71 **/ 429 **/
@@ -178,10 +536,10 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
178 536
179 buffer_info->skb = skb; 537 buffer_info->skb = skb;
180map_skb: 538map_skb:
181 buffer_info->dma = pci_map_single(pdev, skb->data, 539 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
182 adapter->rx_buffer_len, 540 adapter->rx_buffer_len,
183 PCI_DMA_FROMDEVICE); 541 DMA_FROM_DEVICE);
184 if (pci_dma_mapping_error(pdev, buffer_info->dma)) { 542 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
185 dev_err(&pdev->dev, "RX DMA map failed\n"); 543 dev_err(&pdev->dev, "RX DMA map failed\n");
186 adapter->rx_dma_failed++; 544 adapter->rx_dma_failed++;
187 break; 545 break;
@@ -190,26 +548,23 @@ map_skb:
190 rx_desc = E1000_RX_DESC(*rx_ring, i); 548 rx_desc = E1000_RX_DESC(*rx_ring, i);
191 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 549 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
192 550
551 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
552 /*
553 * Force memory writes to complete before letting h/w
554 * know there are new descriptors to fetch. (Only
555 * applicable for weak-ordered memory model archs,
556 * such as IA-64).
557 */
558 wmb();
559 writel(i, adapter->hw.hw_addr + rx_ring->tail);
560 }
193 i++; 561 i++;
194 if (i == rx_ring->count) 562 if (i == rx_ring->count)
195 i = 0; 563 i = 0;
196 buffer_info = &rx_ring->buffer_info[i]; 564 buffer_info = &rx_ring->buffer_info[i];
197 } 565 }
198 566
199 if (rx_ring->next_to_use != i) { 567 rx_ring->next_to_use = i;
200 rx_ring->next_to_use = i;
201 if (i-- == 0)
202 i = (rx_ring->count - 1);
203
204 /*
205 * Force memory writes to complete before letting h/w
206 * know there are new descriptors to fetch. (Only
207 * applicable for weak-ordered memory model archs,
208 * such as IA-64).
209 */
210 wmb();
211 writel(i, adapter->hw.hw_addr + rx_ring->tail);
212 }
213} 568}
214 569
215/** 570/**
@@ -247,11 +602,12 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
247 adapter->alloc_rx_buff_failed++; 602 adapter->alloc_rx_buff_failed++;
248 goto no_buffers; 603 goto no_buffers;
249 } 604 }
250 ps_page->dma = pci_map_page(pdev, 605 ps_page->dma = dma_map_page(&pdev->dev,
251 ps_page->page, 606 ps_page->page,
252 0, PAGE_SIZE, 607 0, PAGE_SIZE,
253 PCI_DMA_FROMDEVICE); 608 DMA_FROM_DEVICE);
254 if (pci_dma_mapping_error(pdev, ps_page->dma)) { 609 if (dma_mapping_error(&pdev->dev,
610 ps_page->dma)) {
255 dev_err(&adapter->pdev->dev, 611 dev_err(&adapter->pdev->dev,
256 "RX DMA page map failed\n"); 612 "RX DMA page map failed\n");
257 adapter->rx_dma_failed++; 613 adapter->rx_dma_failed++;
@@ -276,10 +632,10 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
276 } 632 }
277 633
278 buffer_info->skb = skb; 634 buffer_info->skb = skb;
279 buffer_info->dma = pci_map_single(pdev, skb->data, 635 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
280 adapter->rx_ps_bsize0, 636 adapter->rx_ps_bsize0,
281 PCI_DMA_FROMDEVICE); 637 DMA_FROM_DEVICE);
282 if (pci_dma_mapping_error(pdev, buffer_info->dma)) { 638 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
283 dev_err(&pdev->dev, "RX DMA map failed\n"); 639 dev_err(&pdev->dev, "RX DMA map failed\n");
284 adapter->rx_dma_failed++; 640 adapter->rx_dma_failed++;
285 /* cleanup skb */ 641 /* cleanup skb */
@@ -290,6 +646,17 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
290 646
291 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); 647 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
292 648
649 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
650 /*
651 * Force memory writes to complete before letting h/w
652 * know there are new descriptors to fetch. (Only
653 * applicable for weak-ordered memory model archs,
654 * such as IA-64).
655 */
656 wmb();
657 writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
658 }
659
293 i++; 660 i++;
294 if (i == rx_ring->count) 661 if (i == rx_ring->count)
295 i = 0; 662 i = 0;
@@ -297,26 +664,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
297 } 664 }
298 665
299no_buffers: 666no_buffers:
300 if (rx_ring->next_to_use != i) { 667 rx_ring->next_to_use = i;
301 rx_ring->next_to_use = i;
302
303 if (!(i--))
304 i = (rx_ring->count - 1);
305
306 /*
307 * Force memory writes to complete before letting h/w
308 * know there are new descriptors to fetch. (Only
309 * applicable for weak-ordered memory model archs,
310 * such as IA-64).
311 */
312 wmb();
313 /*
314 * Hardware increments by 16 bytes, but packet split
315 * descriptors are 32 bytes...so we increment tail
316 * twice as much.
317 */
318 writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
319 }
320} 668}
321 669
322/** 670/**
@@ -366,10 +714,10 @@ check_page:
366 } 714 }
367 715
368 if (!buffer_info->dma) 716 if (!buffer_info->dma)
369 buffer_info->dma = pci_map_page(pdev, 717 buffer_info->dma = dma_map_page(&pdev->dev,
370 buffer_info->page, 0, 718 buffer_info->page, 0,
371 PAGE_SIZE, 719 PAGE_SIZE,
372 PCI_DMA_FROMDEVICE); 720 DMA_FROM_DEVICE);
373 721
374 rx_desc = E1000_RX_DESC(*rx_ring, i); 722 rx_desc = E1000_RX_DESC(*rx_ring, i);
375 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 723 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -443,10 +791,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
443 791
444 cleaned = 1; 792 cleaned = 1;
445 cleaned_count++; 793 cleaned_count++;
446 pci_unmap_single(pdev, 794 dma_unmap_single(&pdev->dev,
447 buffer_info->dma, 795 buffer_info->dma,
448 adapter->rx_buffer_len, 796 adapter->rx_buffer_len,
449 PCI_DMA_FROMDEVICE); 797 DMA_FROM_DEVICE);
450 buffer_info->dma = 0; 798 buffer_info->dma = 0;
451 799
452 length = le16_to_cpu(rx_desc->length); 800 length = le16_to_cpu(rx_desc->length);
@@ -547,12 +895,11 @@ static void e1000_put_txbuf(struct e1000_adapter *adapter,
547{ 895{
548 if (buffer_info->dma) { 896 if (buffer_info->dma) {
549 if (buffer_info->mapped_as_page) 897 if (buffer_info->mapped_as_page)
550 pci_unmap_page(adapter->pdev, buffer_info->dma, 898 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
551 buffer_info->length, PCI_DMA_TODEVICE); 899 buffer_info->length, DMA_TO_DEVICE);
552 else 900 else
553 pci_unmap_single(adapter->pdev, buffer_info->dma, 901 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
554 buffer_info->length, 902 buffer_info->length, DMA_TO_DEVICE);
555 PCI_DMA_TODEVICE);
556 buffer_info->dma = 0; 903 buffer_info->dma = 0;
557 } 904 }
558 if (buffer_info->skb) { 905 if (buffer_info->skb) {
@@ -643,14 +990,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
643 cleaned = (i == eop); 990 cleaned = (i == eop);
644 991
645 if (cleaned) { 992 if (cleaned) {
646 struct sk_buff *skb = buffer_info->skb; 993 total_tx_packets += buffer_info->segs;
647 unsigned int segs, bytecount; 994 total_tx_bytes += buffer_info->bytecount;
648 segs = skb_shinfo(skb)->gso_segs ?: 1;
649 /* multiply data chunks by size of headers */
650 bytecount = ((segs - 1) * skb_headlen(skb)) +
651 skb->len;
652 total_tx_packets += segs;
653 total_tx_bytes += bytecount;
654 } 995 }
655 996
656 e1000_put_txbuf(adapter, buffer_info); 997 e1000_put_txbuf(adapter, buffer_info);
@@ -753,9 +1094,9 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
753 1094
754 cleaned = 1; 1095 cleaned = 1;
755 cleaned_count++; 1096 cleaned_count++;
756 pci_unmap_single(pdev, buffer_info->dma, 1097 dma_unmap_single(&pdev->dev, buffer_info->dma,
757 adapter->rx_ps_bsize0, 1098 adapter->rx_ps_bsize0,
758 PCI_DMA_FROMDEVICE); 1099 DMA_FROM_DEVICE);
759 buffer_info->dma = 0; 1100 buffer_info->dma = 0;
760 1101
761 /* see !EOP comment in other rx routine */ 1102 /* see !EOP comment in other rx routine */
@@ -811,13 +1152,13 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
811 * kmap_atomic, so we can't hold the mapping 1152 * kmap_atomic, so we can't hold the mapping
812 * very long 1153 * very long
813 */ 1154 */
814 pci_dma_sync_single_for_cpu(pdev, ps_page->dma, 1155 dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
815 PAGE_SIZE, PCI_DMA_FROMDEVICE); 1156 PAGE_SIZE, DMA_FROM_DEVICE);
816 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); 1157 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
817 memcpy(skb_tail_pointer(skb), vaddr, l1); 1158 memcpy(skb_tail_pointer(skb), vaddr, l1);
818 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); 1159 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
819 pci_dma_sync_single_for_device(pdev, ps_page->dma, 1160 dma_sync_single_for_device(&pdev->dev, ps_page->dma,
820 PAGE_SIZE, PCI_DMA_FROMDEVICE); 1161 PAGE_SIZE, DMA_FROM_DEVICE);
821 1162
822 /* remove the CRC */ 1163 /* remove the CRC */
823 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) 1164 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
@@ -834,8 +1175,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
834 break; 1175 break;
835 1176
836 ps_page = &buffer_info->ps_pages[j]; 1177 ps_page = &buffer_info->ps_pages[j];
837 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, 1178 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
838 PCI_DMA_FROMDEVICE); 1179 DMA_FROM_DEVICE);
839 ps_page->dma = 0; 1180 ps_page->dma = 0;
840 skb_fill_page_desc(skb, j, ps_page->page, 0, length); 1181 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
841 ps_page->page = NULL; 1182 ps_page->page = NULL;
@@ -953,8 +1294,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
953 1294
954 cleaned = true; 1295 cleaned = true;
955 cleaned_count++; 1296 cleaned_count++;
956 pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE, 1297 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
957 PCI_DMA_FROMDEVICE); 1298 DMA_FROM_DEVICE);
958 buffer_info->dma = 0; 1299 buffer_info->dma = 0;
959 1300
960 length = le16_to_cpu(rx_desc->length); 1301 length = le16_to_cpu(rx_desc->length);
@@ -1090,17 +1431,17 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1090 buffer_info = &rx_ring->buffer_info[i]; 1431 buffer_info = &rx_ring->buffer_info[i];
1091 if (buffer_info->dma) { 1432 if (buffer_info->dma) {
1092 if (adapter->clean_rx == e1000_clean_rx_irq) 1433 if (adapter->clean_rx == e1000_clean_rx_irq)
1093 pci_unmap_single(pdev, buffer_info->dma, 1434 dma_unmap_single(&pdev->dev, buffer_info->dma,
1094 adapter->rx_buffer_len, 1435 adapter->rx_buffer_len,
1095 PCI_DMA_FROMDEVICE); 1436 DMA_FROM_DEVICE);
1096 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) 1437 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1097 pci_unmap_page(pdev, buffer_info->dma, 1438 dma_unmap_page(&pdev->dev, buffer_info->dma,
1098 PAGE_SIZE, 1439 PAGE_SIZE,
1099 PCI_DMA_FROMDEVICE); 1440 DMA_FROM_DEVICE);
1100 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) 1441 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1101 pci_unmap_single(pdev, buffer_info->dma, 1442 dma_unmap_single(&pdev->dev, buffer_info->dma,
1102 adapter->rx_ps_bsize0, 1443 adapter->rx_ps_bsize0,
1103 PCI_DMA_FROMDEVICE); 1444 DMA_FROM_DEVICE);
1104 buffer_info->dma = 0; 1445 buffer_info->dma = 0;
1105 } 1446 }
1106 1447
@@ -1118,8 +1459,8 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1118 ps_page = &buffer_info->ps_pages[j]; 1459 ps_page = &buffer_info->ps_pages[j];
1119 if (!ps_page->page) 1460 if (!ps_page->page)
1120 break; 1461 break;
1121 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, 1462 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1122 PCI_DMA_FROMDEVICE); 1463 DMA_FROM_DEVICE);
1123 ps_page->dma = 0; 1464 ps_page->dma = 0;
1124 put_page(ps_page->page); 1465 put_page(ps_page->page);
1125 ps_page->page = NULL; 1466 ps_page->page = NULL;
@@ -1426,8 +1767,6 @@ void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
1426 pci_disable_msi(adapter->pdev); 1767 pci_disable_msi(adapter->pdev);
1427 adapter->flags &= ~FLAG_MSI_ENABLED; 1768 adapter->flags &= ~FLAG_MSI_ENABLED;
1428 } 1769 }
1429
1430 return;
1431} 1770}
1432 1771
1433/** 1772/**
@@ -1479,8 +1818,6 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1479 /* Don't do anything; this is the system default */ 1818 /* Don't do anything; this is the system default */
1480 break; 1819 break;
1481 } 1820 }
1482
1483 return;
1484} 1821}
1485 1822
1486/** 1823/**
@@ -2185,10 +2522,10 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
2185 } 2522 }
2186} 2523}
2187 2524
2188static void e1000_init_manageability(struct e1000_adapter *adapter) 2525static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2189{ 2526{
2190 struct e1000_hw *hw = &adapter->hw; 2527 struct e1000_hw *hw = &adapter->hw;
2191 u32 manc, manc2h; 2528 u32 manc, manc2h, mdef, i, j;
2192 2529
2193 if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) 2530 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2194 return; 2531 return;
@@ -2202,10 +2539,49 @@ static void e1000_init_manageability(struct e1000_adapter *adapter)
2202 */ 2539 */
2203 manc |= E1000_MANC_EN_MNG2HOST; 2540 manc |= E1000_MANC_EN_MNG2HOST;
2204 manc2h = er32(MANC2H); 2541 manc2h = er32(MANC2H);
2205#define E1000_MNG2HOST_PORT_623 (1 << 5) 2542
2206#define E1000_MNG2HOST_PORT_664 (1 << 6) 2543 switch (hw->mac.type) {
2207 manc2h |= E1000_MNG2HOST_PORT_623; 2544 default:
2208 manc2h |= E1000_MNG2HOST_PORT_664; 2545 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
2546 break;
2547 case e1000_82574:
2548 case e1000_82583:
2549 /*
2550 * Check if IPMI pass-through decision filter already exists;
2551 * if so, enable it.
2552 */
2553 for (i = 0, j = 0; i < 8; i++) {
2554 mdef = er32(MDEF(i));
2555
2556 /* Ignore filters with anything other than IPMI ports */
2557 if (mdef & !(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2558 continue;
2559
2560 /* Enable this decision filter in MANC2H */
2561 if (mdef)
2562 manc2h |= (1 << i);
2563
2564 j |= mdef;
2565 }
2566
2567 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2568 break;
2569
2570 /* Create new decision filter in an empty filter */
2571 for (i = 0, j = 0; i < 8; i++)
2572 if (er32(MDEF(i)) == 0) {
2573 ew32(MDEF(i), (E1000_MDEF_PORT_623 |
2574 E1000_MDEF_PORT_664));
2575 manc2h |= (1 << 1);
2576 j++;
2577 break;
2578 }
2579
2580 if (!j)
2581 e_warn("Unable to create IPMI pass-through filter\n");
2582 break;
2583 }
2584
2209 ew32(MANC2H, manc2h); 2585 ew32(MANC2H, manc2h);
2210 ew32(MANC, manc); 2586 ew32(MANC, manc);
2211} 2587}
@@ -2565,7 +2941,7 @@ static void e1000_set_multi(struct net_device *netdev)
2565{ 2941{
2566 struct e1000_adapter *adapter = netdev_priv(netdev); 2942 struct e1000_adapter *adapter = netdev_priv(netdev);
2567 struct e1000_hw *hw = &adapter->hw; 2943 struct e1000_hw *hw = &adapter->hw;
2568 struct dev_mc_list *mc_ptr; 2944 struct netdev_hw_addr *ha;
2569 u8 *mta_list; 2945 u8 *mta_list;
2570 u32 rctl; 2946 u32 rctl;
2571 int i; 2947 int i;
@@ -2597,9 +2973,8 @@ static void e1000_set_multi(struct net_device *netdev)
2597 2973
2598 /* prepare a packed array of only addresses. */ 2974 /* prepare a packed array of only addresses. */
2599 i = 0; 2975 i = 0;
2600 netdev_for_each_mc_addr(mc_ptr, netdev) 2976 netdev_for_each_mc_addr(ha, netdev)
2601 memcpy(mta_list + (i++ * ETH_ALEN), 2977 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
2602 mc_ptr->dmi_addr, ETH_ALEN);
2603 2978
2604 e1000_update_mc_addr_list(hw, mta_list, i); 2979 e1000_update_mc_addr_list(hw, mta_list, i);
2605 kfree(mta_list); 2980 kfree(mta_list);
@@ -2621,7 +2996,7 @@ static void e1000_configure(struct e1000_adapter *adapter)
2621 e1000_set_multi(adapter->netdev); 2996 e1000_set_multi(adapter->netdev);
2622 2997
2623 e1000_restore_vlan(adapter); 2998 e1000_restore_vlan(adapter);
2624 e1000_init_manageability(adapter); 2999 e1000_init_manageability_pt(adapter);
2625 3000
2626 e1000_configure_tx(adapter); 3001 e1000_configure_tx(adapter);
2627 e1000_setup_rctl(adapter); 3002 e1000_setup_rctl(adapter);
@@ -2755,6 +3130,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
2755 fc->high_water = 0x5000; 3130 fc->high_water = 0x5000;
2756 fc->low_water = 0x3000; 3131 fc->low_water = 0x3000;
2757 } 3132 }
3133 fc->refresh_time = 0x1000;
2758 } else { 3134 } else {
2759 if ((adapter->flags & FLAG_HAS_ERT) && 3135 if ((adapter->flags & FLAG_HAS_ERT) &&
2760 (adapter->netdev->mtu > ETH_DATA_LEN)) 3136 (adapter->netdev->mtu > ETH_DATA_LEN))
@@ -2792,10 +3168,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
2792 if (mac->ops.init_hw(hw)) 3168 if (mac->ops.init_hw(hw))
2793 e_err("Hardware Error\n"); 3169 e_err("Hardware Error\n");
2794 3170
2795 /* additional part of the flow-control workaround above */
2796 if (hw->mac.type == e1000_pchlan)
2797 ew32(FCRTV_PCH, 0x1000);
2798
2799 e1000_update_mng_vlan(adapter); 3171 e1000_update_mng_vlan(adapter);
2800 3172
2801 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 3173 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
@@ -2841,7 +3213,11 @@ int e1000e_up(struct e1000_adapter *adapter)
2841 netif_wake_queue(adapter->netdev); 3213 netif_wake_queue(adapter->netdev);
2842 3214
2843 /* fire a link change interrupt to start the watchdog */ 3215 /* fire a link change interrupt to start the watchdog */
2844 ew32(ICS, E1000_ICS_LSC); 3216 if (adapter->msix_entries)
3217 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3218 else
3219 ew32(ICS, E1000_ICS_LSC);
3220
2845 return 0; 3221 return 0;
2846} 3222}
2847 3223
@@ -3085,12 +3461,15 @@ static int e1000_open(struct net_device *netdev)
3085{ 3461{
3086 struct e1000_adapter *adapter = netdev_priv(netdev); 3462 struct e1000_adapter *adapter = netdev_priv(netdev);
3087 struct e1000_hw *hw = &adapter->hw; 3463 struct e1000_hw *hw = &adapter->hw;
3464 struct pci_dev *pdev = adapter->pdev;
3088 int err; 3465 int err;
3089 3466
3090 /* disallow open during test */ 3467 /* disallow open during test */
3091 if (test_bit(__E1000_TESTING, &adapter->state)) 3468 if (test_bit(__E1000_TESTING, &adapter->state))
3092 return -EBUSY; 3469 return -EBUSY;
3093 3470
3471 pm_runtime_get_sync(&pdev->dev);
3472
3094 netif_carrier_off(netdev); 3473 netif_carrier_off(netdev);
3095 3474
3096 /* allocate transmit descriptors */ 3475 /* allocate transmit descriptors */
@@ -3103,6 +3482,15 @@ static int e1000_open(struct net_device *netdev)
3103 if (err) 3482 if (err)
3104 goto err_setup_rx; 3483 goto err_setup_rx;
3105 3484
3485 /*
3486 * If AMT is enabled, let the firmware know that the network
3487 * interface is now open and reset the part to a known state.
3488 */
3489 if (adapter->flags & FLAG_HAS_AMT) {
3490 e1000_get_hw_control(adapter);
3491 e1000e_reset(adapter);
3492 }
3493
3106 e1000e_power_up_phy(adapter); 3494 e1000e_power_up_phy(adapter);
3107 3495
3108 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 3496 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -3111,13 +3499,6 @@ static int e1000_open(struct net_device *netdev)
3111 e1000_update_mng_vlan(adapter); 3499 e1000_update_mng_vlan(adapter);
3112 3500
3113 /* 3501 /*
3114 * If AMT is enabled, let the firmware know that the network
3115 * interface is now open
3116 */
3117 if (adapter->flags & FLAG_HAS_AMT)
3118 e1000_get_hw_control(adapter);
3119
3120 /*
3121 * before we allocate an interrupt, we must be ready to handle it. 3502 * before we allocate an interrupt, we must be ready to handle it.
3122 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 3503 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3123 * as soon as we call pci_request_irq, so we have to setup our 3504 * as soon as we call pci_request_irq, so we have to setup our
@@ -3151,8 +3532,14 @@ static int e1000_open(struct net_device *netdev)
3151 3532
3152 netif_start_queue(netdev); 3533 netif_start_queue(netdev);
3153 3534
3535 adapter->idle_check = true;
3536 pm_runtime_put(&pdev->dev);
3537
3154 /* fire a link status change interrupt to start the watchdog */ 3538 /* fire a link status change interrupt to start the watchdog */
3155 ew32(ICS, E1000_ICS_LSC); 3539 if (adapter->msix_entries)
3540 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3541 else
3542 ew32(ICS, E1000_ICS_LSC);
3156 3543
3157 return 0; 3544 return 0;
3158 3545
@@ -3164,6 +3551,7 @@ err_setup_rx:
3164 e1000e_free_tx_resources(adapter); 3551 e1000e_free_tx_resources(adapter);
3165err_setup_tx: 3552err_setup_tx:
3166 e1000e_reset(adapter); 3553 e1000e_reset(adapter);
3554 pm_runtime_put_sync(&pdev->dev);
3167 3555
3168 return err; 3556 return err;
3169} 3557}
@@ -3182,11 +3570,17 @@ err_setup_tx:
3182static int e1000_close(struct net_device *netdev) 3570static int e1000_close(struct net_device *netdev)
3183{ 3571{
3184 struct e1000_adapter *adapter = netdev_priv(netdev); 3572 struct e1000_adapter *adapter = netdev_priv(netdev);
3573 struct pci_dev *pdev = adapter->pdev;
3185 3574
3186 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 3575 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3187 e1000e_down(adapter); 3576
3577 pm_runtime_get_sync(&pdev->dev);
3578
3579 if (!test_bit(__E1000_DOWN, &adapter->state)) {
3580 e1000e_down(adapter);
3581 e1000_free_irq(adapter);
3582 }
3188 e1000_power_down_phy(adapter); 3583 e1000_power_down_phy(adapter);
3189 e1000_free_irq(adapter);
3190 3584
3191 e1000e_free_tx_resources(adapter); 3585 e1000e_free_tx_resources(adapter);
3192 e1000e_free_rx_resources(adapter); 3586 e1000e_free_rx_resources(adapter);
@@ -3208,6 +3602,8 @@ static int e1000_close(struct net_device *netdev)
3208 if (adapter->flags & FLAG_HAS_AMT) 3602 if (adapter->flags & FLAG_HAS_AMT)
3209 e1000_release_hw_control(adapter); 3603 e1000_release_hw_control(adapter);
3210 3604
3605 pm_runtime_put_sync(&pdev->dev);
3606
3211 return 0; 3607 return 0;
3212} 3608}
3213/** 3609/**
@@ -3552,6 +3948,9 @@ static void e1000_watchdog_task(struct work_struct *work)
3552 3948
3553 link = e1000e_has_link(adapter); 3949 link = e1000e_has_link(adapter);
3554 if ((netif_carrier_ok(netdev)) && link) { 3950 if ((netif_carrier_ok(netdev)) && link) {
3951 /* Cancel scheduled suspend requests. */
3952 pm_runtime_resume(netdev->dev.parent);
3953
3555 e1000e_enable_receives(adapter); 3954 e1000e_enable_receives(adapter);
3556 goto link_up; 3955 goto link_up;
3557 } 3956 }
@@ -3563,6 +3962,10 @@ static void e1000_watchdog_task(struct work_struct *work)
3563 if (link) { 3962 if (link) {
3564 if (!netif_carrier_ok(netdev)) { 3963 if (!netif_carrier_ok(netdev)) {
3565 bool txb2b = 1; 3964 bool txb2b = 1;
3965
3966 /* Cancel scheduled suspend requests. */
3967 pm_runtime_resume(netdev->dev.parent);
3968
3566 /* update snapshot of PHY registers on LSC */ 3969 /* update snapshot of PHY registers on LSC */
3567 e1000_phy_read_status(adapter); 3970 e1000_phy_read_status(adapter);
3568 mac->ops.get_link_up_info(&adapter->hw, 3971 mac->ops.get_link_up_info(&adapter->hw,
@@ -3672,6 +4075,9 @@ static void e1000_watchdog_task(struct work_struct *work)
3672 4075
3673 if (adapter->flags & FLAG_RX_NEEDS_RESTART) 4076 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
3674 schedule_work(&adapter->reset_task); 4077 schedule_work(&adapter->reset_task);
4078 else
4079 pm_schedule_suspend(netdev->dev.parent,
4080 LINK_TIMEOUT);
3675 } 4081 }
3676 } 4082 }
3677 4083
@@ -3707,6 +4113,22 @@ link_up:
3707 } 4113 }
3708 } 4114 }
3709 4115
4116 /* Simple mode for Interrupt Throttle Rate (ITR) */
4117 if (adapter->itr_setting == 4) {
4118 /*
4119 * Symmetric Tx/Rx gets a reduced ITR=2000;
4120 * Total asymmetrical Tx or Rx gets ITR=8000;
4121 * everyone else is between 2000-8000.
4122 */
4123 u32 goc = (adapter->gotc + adapter->gorc) / 10000;
4124 u32 dif = (adapter->gotc > adapter->gorc ?
4125 adapter->gotc - adapter->gorc :
4126 adapter->gorc - adapter->gotc) / 10000;
4127 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
4128
4129 ew32(ITR, 1000000000 / (itr * 256));
4130 }
4131
3710 /* Cause software interrupt to ensure Rx ring is cleaned */ 4132 /* Cause software interrupt to ensure Rx ring is cleaned */
3711 if (adapter->msix_entries) 4133 if (adapter->msix_entries)
3712 ew32(ICS, adapter->rx_ring->ims_val); 4134 ew32(ICS, adapter->rx_ring->ims_val);
@@ -3881,7 +4303,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3881 struct e1000_buffer *buffer_info; 4303 struct e1000_buffer *buffer_info;
3882 unsigned int len = skb_headlen(skb); 4304 unsigned int len = skb_headlen(skb);
3883 unsigned int offset = 0, size, count = 0, i; 4305 unsigned int offset = 0, size, count = 0, i;
3884 unsigned int f; 4306 unsigned int f, bytecount, segs;
3885 4307
3886 i = tx_ring->next_to_use; 4308 i = tx_ring->next_to_use;
3887 4309
@@ -3892,10 +4314,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3892 buffer_info->length = size; 4314 buffer_info->length = size;
3893 buffer_info->time_stamp = jiffies; 4315 buffer_info->time_stamp = jiffies;
3894 buffer_info->next_to_watch = i; 4316 buffer_info->next_to_watch = i;
3895 buffer_info->dma = pci_map_single(pdev, skb->data + offset, 4317 buffer_info->dma = dma_map_single(&pdev->dev,
3896 size, PCI_DMA_TODEVICE); 4318 skb->data + offset,
4319 size, DMA_TO_DEVICE);
3897 buffer_info->mapped_as_page = false; 4320 buffer_info->mapped_as_page = false;
3898 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 4321 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
3899 goto dma_error; 4322 goto dma_error;
3900 4323
3901 len -= size; 4324 len -= size;
@@ -3927,11 +4350,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3927 buffer_info->length = size; 4350 buffer_info->length = size;
3928 buffer_info->time_stamp = jiffies; 4351 buffer_info->time_stamp = jiffies;
3929 buffer_info->next_to_watch = i; 4352 buffer_info->next_to_watch = i;
3930 buffer_info->dma = pci_map_page(pdev, frag->page, 4353 buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
3931 offset, size, 4354 offset, size,
3932 PCI_DMA_TODEVICE); 4355 DMA_TO_DEVICE);
3933 buffer_info->mapped_as_page = true; 4356 buffer_info->mapped_as_page = true;
3934 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 4357 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
3935 goto dma_error; 4358 goto dma_error;
3936 4359
3937 len -= size; 4360 len -= size;
@@ -3940,7 +4363,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3940 } 4363 }
3941 } 4364 }
3942 4365
4366 segs = skb_shinfo(skb)->gso_segs ?: 1;
4367 /* multiply data chunks by size of headers */
4368 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
4369
3943 tx_ring->buffer_info[i].skb = skb; 4370 tx_ring->buffer_info[i].skb = skb;
4371 tx_ring->buffer_info[i].segs = segs;
4372 tx_ring->buffer_info[i].bytecount = bytecount;
3944 tx_ring->buffer_info[first].next_to_watch = i; 4373 tx_ring->buffer_info[first].next_to_watch = i;
3945 4374
3946 return count; 4375 return count;
@@ -4107,7 +4536,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
4107 unsigned int max_per_txd = E1000_MAX_PER_TXD; 4536 unsigned int max_per_txd = E1000_MAX_PER_TXD;
4108 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 4537 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
4109 unsigned int tx_flags = 0; 4538 unsigned int tx_flags = 0;
4110 unsigned int len = skb->len - skb->data_len; 4539 unsigned int len = skb_headlen(skb);
4111 unsigned int nr_frags; 4540 unsigned int nr_frags;
4112 unsigned int mss; 4541 unsigned int mss;
4113 int count = 0; 4542 int count = 0;
@@ -4157,7 +4586,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
4157 dev_kfree_skb_any(skb); 4586 dev_kfree_skb_any(skb);
4158 return NETDEV_TX_OK; 4587 return NETDEV_TX_OK;
4159 } 4588 }
4160 len = skb->len - skb->data_len; 4589 len = skb_headlen(skb);
4161 } 4590 }
4162 } 4591 }
4163 4592
@@ -4243,6 +4672,8 @@ static void e1000_reset_task(struct work_struct *work)
4243 struct e1000_adapter *adapter; 4672 struct e1000_adapter *adapter;
4244 adapter = container_of(work, struct e1000_adapter, reset_task); 4673 adapter = container_of(work, struct e1000_adapter, reset_task);
4245 4674
4675 e1000e_dump(adapter);
4676 e_err("Reset adapter\n");
4246 e1000e_reinit_locked(adapter); 4677 e1000e_reinit_locked(adapter);
4247} 4678}
4248 4679
@@ -4477,13 +4908,15 @@ out:
4477 return retval; 4908 return retval;
4478} 4909}
4479 4910
4480static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) 4911static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
4912 bool runtime)
4481{ 4913{
4482 struct net_device *netdev = pci_get_drvdata(pdev); 4914 struct net_device *netdev = pci_get_drvdata(pdev);
4483 struct e1000_adapter *adapter = netdev_priv(netdev); 4915 struct e1000_adapter *adapter = netdev_priv(netdev);
4484 struct e1000_hw *hw = &adapter->hw; 4916 struct e1000_hw *hw = &adapter->hw;
4485 u32 ctrl, ctrl_ext, rctl, status; 4917 u32 ctrl, ctrl_ext, rctl, status;
4486 u32 wufc = adapter->wol; 4918 /* Runtime suspend should only enable wakeup for link changes */
4919 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
4487 int retval = 0; 4920 int retval = 0;
4488 4921
4489 netif_device_detach(netdev); 4922 netif_device_detach(netdev);
@@ -4653,20 +5086,13 @@ void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
4653 __e1000e_disable_aspm(pdev, state); 5086 __e1000e_disable_aspm(pdev, state);
4654} 5087}
4655 5088
4656#ifdef CONFIG_PM 5089#ifdef CONFIG_PM_OPS
4657static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) 5090static bool e1000e_pm_ready(struct e1000_adapter *adapter)
4658{ 5091{
4659 int retval; 5092 return !!adapter->tx_ring->buffer_info;
4660 bool wake;
4661
4662 retval = __e1000_shutdown(pdev, &wake);
4663 if (!retval)
4664 e1000_complete_shutdown(pdev, true, wake);
4665
4666 return retval;
4667} 5093}
4668 5094
4669static int e1000_resume(struct pci_dev *pdev) 5095static int __e1000_resume(struct pci_dev *pdev)
4670{ 5096{
4671 struct net_device *netdev = pci_get_drvdata(pdev); 5097 struct net_device *netdev = pci_get_drvdata(pdev);
4672 struct e1000_adapter *adapter = netdev_priv(netdev); 5098 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -4679,18 +5105,6 @@ static int e1000_resume(struct pci_dev *pdev)
4679 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) 5105 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
4680 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1); 5106 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
4681 5107
4682 err = pci_enable_device_mem(pdev);
4683 if (err) {
4684 dev_err(&pdev->dev,
4685 "Cannot enable PCI device from suspend\n");
4686 return err;
4687 }
4688
4689 pci_set_master(pdev);
4690
4691 pci_enable_wake(pdev, PCI_D3hot, 0);
4692 pci_enable_wake(pdev, PCI_D3cold, 0);
4693
4694 e1000e_set_interrupt_capability(adapter); 5108 e1000e_set_interrupt_capability(adapter);
4695 if (netif_running(netdev)) { 5109 if (netif_running(netdev)) {
4696 err = e1000_request_irq(adapter); 5110 err = e1000_request_irq(adapter);
@@ -4731,7 +5145,7 @@ static int e1000_resume(struct pci_dev *pdev)
4731 5145
4732 e1000e_reset(adapter); 5146 e1000e_reset(adapter);
4733 5147
4734 e1000_init_manageability(adapter); 5148 e1000_init_manageability_pt(adapter);
4735 5149
4736 if (netif_running(netdev)) 5150 if (netif_running(netdev))
4737 e1000e_up(adapter); 5151 e1000e_up(adapter);
@@ -4748,13 +5162,88 @@ static int e1000_resume(struct pci_dev *pdev)
4748 5162
4749 return 0; 5163 return 0;
4750} 5164}
4751#endif 5165
5166#ifdef CONFIG_PM_SLEEP
5167static int e1000_suspend(struct device *dev)
5168{
5169 struct pci_dev *pdev = to_pci_dev(dev);
5170 int retval;
5171 bool wake;
5172
5173 retval = __e1000_shutdown(pdev, &wake, false);
5174 if (!retval)
5175 e1000_complete_shutdown(pdev, true, wake);
5176
5177 return retval;
5178}
5179
5180static int e1000_resume(struct device *dev)
5181{
5182 struct pci_dev *pdev = to_pci_dev(dev);
5183 struct net_device *netdev = pci_get_drvdata(pdev);
5184 struct e1000_adapter *adapter = netdev_priv(netdev);
5185
5186 if (e1000e_pm_ready(adapter))
5187 adapter->idle_check = true;
5188
5189 return __e1000_resume(pdev);
5190}
5191#endif /* CONFIG_PM_SLEEP */
5192
5193#ifdef CONFIG_PM_RUNTIME
5194static int e1000_runtime_suspend(struct device *dev)
5195{
5196 struct pci_dev *pdev = to_pci_dev(dev);
5197 struct net_device *netdev = pci_get_drvdata(pdev);
5198 struct e1000_adapter *adapter = netdev_priv(netdev);
5199
5200 if (e1000e_pm_ready(adapter)) {
5201 bool wake;
5202
5203 __e1000_shutdown(pdev, &wake, true);
5204 }
5205
5206 return 0;
5207}
5208
5209static int e1000_idle(struct device *dev)
5210{
5211 struct pci_dev *pdev = to_pci_dev(dev);
5212 struct net_device *netdev = pci_get_drvdata(pdev);
5213 struct e1000_adapter *adapter = netdev_priv(netdev);
5214
5215 if (!e1000e_pm_ready(adapter))
5216 return 0;
5217
5218 if (adapter->idle_check) {
5219 adapter->idle_check = false;
5220 if (!e1000e_has_link(adapter))
5221 pm_schedule_suspend(dev, MSEC_PER_SEC);
5222 }
5223
5224 return -EBUSY;
5225}
5226
5227static int e1000_runtime_resume(struct device *dev)
5228{
5229 struct pci_dev *pdev = to_pci_dev(dev);
5230 struct net_device *netdev = pci_get_drvdata(pdev);
5231 struct e1000_adapter *adapter = netdev_priv(netdev);
5232
5233 if (!e1000e_pm_ready(adapter))
5234 return 0;
5235
5236 adapter->idle_check = !dev->power.runtime_auto;
5237 return __e1000_resume(pdev);
5238}
5239#endif /* CONFIG_PM_RUNTIME */
5240#endif /* CONFIG_PM_OPS */
4752 5241
4753static void e1000_shutdown(struct pci_dev *pdev) 5242static void e1000_shutdown(struct pci_dev *pdev)
4754{ 5243{
4755 bool wake = false; 5244 bool wake = false;
4756 5245
4757 __e1000_shutdown(pdev, &wake); 5246 __e1000_shutdown(pdev, &wake, false);
4758 5247
4759 if (system_state == SYSTEM_POWER_OFF) 5248 if (system_state == SYSTEM_POWER_OFF)
4760 e1000_complete_shutdown(pdev, false, wake); 5249 e1000_complete_shutdown(pdev, false, wake);
@@ -4828,8 +5317,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4828 result = PCI_ERS_RESULT_DISCONNECT; 5317 result = PCI_ERS_RESULT_DISCONNECT;
4829 } else { 5318 } else {
4830 pci_set_master(pdev); 5319 pci_set_master(pdev);
5320 pdev->state_saved = true;
4831 pci_restore_state(pdev); 5321 pci_restore_state(pdev);
4832 pci_save_state(pdev);
4833 5322
4834 pci_enable_wake(pdev, PCI_D3hot, 0); 5323 pci_enable_wake(pdev, PCI_D3hot, 0);
4835 pci_enable_wake(pdev, PCI_D3cold, 0); 5324 pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -4857,7 +5346,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
4857 struct net_device *netdev = pci_get_drvdata(pdev); 5346 struct net_device *netdev = pci_get_drvdata(pdev);
4858 struct e1000_adapter *adapter = netdev_priv(netdev); 5347 struct e1000_adapter *adapter = netdev_priv(netdev);
4859 5348
4860 e1000_init_manageability(adapter); 5349 e1000_init_manageability_pt(adapter);
4861 5350
4862 if (netif_running(netdev)) { 5351 if (netif_running(netdev)) {
4863 if (e1000e_up(adapter)) { 5352 if (e1000e_up(adapter)) {
@@ -4970,16 +5459,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4970 return err; 5459 return err;
4971 5460
4972 pci_using_dac = 0; 5461 pci_using_dac = 0;
4973 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 5462 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4974 if (!err) { 5463 if (!err) {
4975 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 5464 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4976 if (!err) 5465 if (!err)
4977 pci_using_dac = 1; 5466 pci_using_dac = 1;
4978 } else { 5467 } else {
4979 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 5468 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4980 if (err) { 5469 if (err) {
4981 err = pci_set_consistent_dma_mask(pdev, 5470 err = dma_set_coherent_mask(&pdev->dev,
4982 DMA_BIT_MASK(32)); 5471 DMA_BIT_MASK(32));
4983 if (err) { 5472 if (err) {
4984 dev_err(&pdev->dev, "No usable DMA " 5473 dev_err(&pdev->dev, "No usable DMA "
4985 "configuration, aborting\n"); 5474 "configuration, aborting\n");
@@ -5010,6 +5499,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5010 5499
5011 SET_NETDEV_DEV(netdev, &pdev->dev); 5500 SET_NETDEV_DEV(netdev, &pdev->dev);
5012 5501
5502 netdev->irq = pdev->irq;
5503
5013 pci_set_drvdata(pdev, netdev); 5504 pci_set_drvdata(pdev, netdev);
5014 adapter = netdev_priv(netdev); 5505 adapter = netdev_priv(netdev);
5015 hw = &adapter->hw; 5506 hw = &adapter->hw;
@@ -5230,6 +5721,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5230 5721
5231 e1000_print_device_info(adapter); 5722 e1000_print_device_info(adapter);
5232 5723
5724 if (pci_dev_run_wake(pdev)) {
5725 pm_runtime_set_active(&pdev->dev);
5726 pm_runtime_enable(&pdev->dev);
5727 }
5728 pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC);
5729
5233 return 0; 5730 return 0;
5234 5731
5235err_register: 5732err_register:
@@ -5272,12 +5769,16 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5272{ 5769{
5273 struct net_device *netdev = pci_get_drvdata(pdev); 5770 struct net_device *netdev = pci_get_drvdata(pdev);
5274 struct e1000_adapter *adapter = netdev_priv(netdev); 5771 struct e1000_adapter *adapter = netdev_priv(netdev);
5772 bool down = test_bit(__E1000_DOWN, &adapter->state);
5773
5774 pm_runtime_get_sync(&pdev->dev);
5275 5775
5276 /* 5776 /*
5277 * flush_scheduled work may reschedule our watchdog task, so 5777 * flush_scheduled work may reschedule our watchdog task, so
5278 * explicitly disable watchdog tasks from being rescheduled 5778 * explicitly disable watchdog tasks from being rescheduled
5279 */ 5779 */
5280 set_bit(__E1000_DOWN, &adapter->state); 5780 if (!down)
5781 set_bit(__E1000_DOWN, &adapter->state);
5281 del_timer_sync(&adapter->watchdog_timer); 5782 del_timer_sync(&adapter->watchdog_timer);
5282 del_timer_sync(&adapter->phy_info_timer); 5783 del_timer_sync(&adapter->phy_info_timer);
5283 5784
@@ -5291,8 +5792,17 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5291 if (!(netdev->flags & IFF_UP)) 5792 if (!(netdev->flags & IFF_UP))
5292 e1000_power_down_phy(adapter); 5793 e1000_power_down_phy(adapter);
5293 5794
5795 /* Don't lie to e1000_close() down the road. */
5796 if (!down)
5797 clear_bit(__E1000_DOWN, &adapter->state);
5294 unregister_netdev(netdev); 5798 unregister_netdev(netdev);
5295 5799
5800 if (pci_dev_run_wake(pdev)) {
5801 pm_runtime_disable(&pdev->dev);
5802 pm_runtime_set_suspended(&pdev->dev);
5803 }
5804 pm_runtime_put_noidle(&pdev->dev);
5805
5296 /* 5806 /*
5297 * Release control of h/w to f/w. If f/w is AMT enabled, this 5807 * Release control of h/w to f/w. If f/w is AMT enabled, this
5298 * would have already happened in close and is redundant. 5808 * would have already happened in close and is redundant.
@@ -5382,6 +5892,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
5382 5892
5383 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, 5893 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
5384 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, 5894 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
5895 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
5385 5896
5386 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan }, 5897 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
5387 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan }, 5898 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
@@ -5392,16 +5903,22 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
5392}; 5903};
5393MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 5904MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
5394 5905
5906#ifdef CONFIG_PM_OPS
5907static const struct dev_pm_ops e1000_pm_ops = {
5908 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
5909 SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
5910 e1000_runtime_resume, e1000_idle)
5911};
5912#endif
5913
5395/* PCI Device API Driver */ 5914/* PCI Device API Driver */
5396static struct pci_driver e1000_driver = { 5915static struct pci_driver e1000_driver = {
5397 .name = e1000e_driver_name, 5916 .name = e1000e_driver_name,
5398 .id_table = e1000_pci_tbl, 5917 .id_table = e1000_pci_tbl,
5399 .probe = e1000_probe, 5918 .probe = e1000_probe,
5400 .remove = __devexit_p(e1000_remove), 5919 .remove = __devexit_p(e1000_remove),
5401#ifdef CONFIG_PM 5920#ifdef CONFIG_PM_OPS
5402 /* Power Management Hooks */ 5921 .driver.pm = &e1000_pm_ops,
5403 .suspend = e1000_suspend,
5404 .resume = e1000_resume,
5405#endif 5922#endif
5406 .shutdown = e1000_shutdown, 5923 .shutdown = e1000_shutdown,
5407 .err_handler = &e1000_err_handler 5924 .err_handler = &e1000_err_handler
@@ -5416,10 +5933,9 @@ static struct pci_driver e1000_driver = {
5416static int __init e1000_init_module(void) 5933static int __init e1000_init_module(void)
5417{ 5934{
5418 int ret; 5935 int ret;
5419 printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", 5936 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
5420 e1000e_driver_name, e1000e_driver_version); 5937 e1000e_driver_version);
5421 printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n", 5938 pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n");
5422 e1000e_driver_name);
5423 ret = pci_register_driver(&e1000_driver); 5939 ret = pci_register_driver(&e1000_driver);
5424 5940
5425 return ret; 5941 return ret;