diff options
Diffstat (limited to 'drivers/net/e1000e/netdev.c')
-rw-r--r-- | drivers/net/e1000e/netdev.c | 520 |
1 files changed, 269 insertions, 251 deletions
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 0687c6aa4e46..c3105c5087e0 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -65,17 +65,6 @@ static const struct e1000_info *e1000_info_tbl[] = { | |||
65 | [board_pchlan] = &e1000_pch_info, | 65 | [board_pchlan] = &e1000_pch_info, |
66 | }; | 66 | }; |
67 | 67 | ||
68 | #ifdef DEBUG | ||
69 | /** | ||
70 | * e1000_get_hw_dev_name - return device name string | ||
71 | * used by hardware layer to print debugging information | ||
72 | **/ | ||
73 | char *e1000e_get_hw_dev_name(struct e1000_hw *hw) | ||
74 | { | ||
75 | return hw->adapter->netdev->name; | ||
76 | } | ||
77 | #endif | ||
78 | |||
79 | /** | 68 | /** |
80 | * e1000_desc_unused - calculate if we have unused descriptors | 69 | * e1000_desc_unused - calculate if we have unused descriptors |
81 | **/ | 70 | **/ |
@@ -167,7 +156,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
167 | struct e1000_buffer *buffer_info; | 156 | struct e1000_buffer *buffer_info; |
168 | struct sk_buff *skb; | 157 | struct sk_buff *skb; |
169 | unsigned int i; | 158 | unsigned int i; |
170 | unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; | 159 | unsigned int bufsz = adapter->rx_buffer_len; |
171 | 160 | ||
172 | i = rx_ring->next_to_use; | 161 | i = rx_ring->next_to_use; |
173 | buffer_info = &rx_ring->buffer_info[i]; | 162 | buffer_info = &rx_ring->buffer_info[i]; |
@@ -179,20 +168,13 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
179 | goto map_skb; | 168 | goto map_skb; |
180 | } | 169 | } |
181 | 170 | ||
182 | skb = netdev_alloc_skb(netdev, bufsz); | 171 | skb = netdev_alloc_skb_ip_align(netdev, bufsz); |
183 | if (!skb) { | 172 | if (!skb) { |
184 | /* Better luck next round */ | 173 | /* Better luck next round */ |
185 | adapter->alloc_rx_buff_failed++; | 174 | adapter->alloc_rx_buff_failed++; |
186 | break; | 175 | break; |
187 | } | 176 | } |
188 | 177 | ||
189 | /* | ||
190 | * Make buffer alignment 2 beyond a 16 byte boundary | ||
191 | * this will result in a 16 byte aligned IP header after | ||
192 | * the 14 byte MAC header is removed | ||
193 | */ | ||
194 | skb_reserve(skb, NET_IP_ALIGN); | ||
195 | |||
196 | buffer_info->skb = skb; | 178 | buffer_info->skb = skb; |
197 | map_skb: | 179 | map_skb: |
198 | buffer_info->dma = pci_map_single(pdev, skb->data, | 180 | buffer_info->dma = pci_map_single(pdev, skb->data, |
@@ -284,21 +266,14 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
284 | cpu_to_le64(ps_page->dma); | 266 | cpu_to_le64(ps_page->dma); |
285 | } | 267 | } |
286 | 268 | ||
287 | skb = netdev_alloc_skb(netdev, | 269 | skb = netdev_alloc_skb_ip_align(netdev, |
288 | adapter->rx_ps_bsize0 + NET_IP_ALIGN); | 270 | adapter->rx_ps_bsize0); |
289 | 271 | ||
290 | if (!skb) { | 272 | if (!skb) { |
291 | adapter->alloc_rx_buff_failed++; | 273 | adapter->alloc_rx_buff_failed++; |
292 | break; | 274 | break; |
293 | } | 275 | } |
294 | 276 | ||
295 | /* | ||
296 | * Make buffer alignment 2 beyond a 16 byte boundary | ||
297 | * this will result in a 16 byte aligned IP header after | ||
298 | * the 14 byte MAC header is removed | ||
299 | */ | ||
300 | skb_reserve(skb, NET_IP_ALIGN); | ||
301 | |||
302 | buffer_info->skb = skb; | 277 | buffer_info->skb = skb; |
303 | buffer_info->dma = pci_map_single(pdev, skb->data, | 278 | buffer_info->dma = pci_map_single(pdev, skb->data, |
304 | adapter->rx_ps_bsize0, | 279 | adapter->rx_ps_bsize0, |
@@ -359,9 +334,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, | |||
359 | struct e1000_buffer *buffer_info; | 334 | struct e1000_buffer *buffer_info; |
360 | struct sk_buff *skb; | 335 | struct sk_buff *skb; |
361 | unsigned int i; | 336 | unsigned int i; |
362 | unsigned int bufsz = 256 - | 337 | unsigned int bufsz = 256 - 16 /* for skb_reserve */; |
363 | 16 /* for skb_reserve */ - | ||
364 | NET_IP_ALIGN; | ||
365 | 338 | ||
366 | i = rx_ring->next_to_use; | 339 | i = rx_ring->next_to_use; |
367 | buffer_info = &rx_ring->buffer_info[i]; | 340 | buffer_info = &rx_ring->buffer_info[i]; |
@@ -373,19 +346,13 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, | |||
373 | goto check_page; | 346 | goto check_page; |
374 | } | 347 | } |
375 | 348 | ||
376 | skb = netdev_alloc_skb(netdev, bufsz); | 349 | skb = netdev_alloc_skb_ip_align(netdev, bufsz); |
377 | if (unlikely(!skb)) { | 350 | if (unlikely(!skb)) { |
378 | /* Better luck next round */ | 351 | /* Better luck next round */ |
379 | adapter->alloc_rx_buff_failed++; | 352 | adapter->alloc_rx_buff_failed++; |
380 | break; | 353 | break; |
381 | } | 354 | } |
382 | 355 | ||
383 | /* Make buffer alignment 2 beyond a 16 byte boundary | ||
384 | * this will result in a 16 byte aligned IP header after | ||
385 | * the 14 byte MAC header is removed | ||
386 | */ | ||
387 | skb_reserve(skb, NET_IP_ALIGN); | ||
388 | |||
389 | buffer_info->skb = skb; | 356 | buffer_info->skb = skb; |
390 | check_page: | 357 | check_page: |
391 | /* allocate a new page if necessary */ | 358 | /* allocate a new page if necessary */ |
@@ -437,6 +404,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
437 | { | 404 | { |
438 | struct net_device *netdev = adapter->netdev; | 405 | struct net_device *netdev = adapter->netdev; |
439 | struct pci_dev *pdev = adapter->pdev; | 406 | struct pci_dev *pdev = adapter->pdev; |
407 | struct e1000_hw *hw = &adapter->hw; | ||
440 | struct e1000_ring *rx_ring = adapter->rx_ring; | 408 | struct e1000_ring *rx_ring = adapter->rx_ring; |
441 | struct e1000_rx_desc *rx_desc, *next_rxd; | 409 | struct e1000_rx_desc *rx_desc, *next_rxd; |
442 | struct e1000_buffer *buffer_info, *next_buffer; | 410 | struct e1000_buffer *buffer_info, *next_buffer; |
@@ -486,8 +454,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
486 | * packet, also make sure the frame isn't just CRC only */ | 454 | * packet, also make sure the frame isn't just CRC only */ |
487 | if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { | 455 | if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { |
488 | /* All receives must fit into a single buffer */ | 456 | /* All receives must fit into a single buffer */ |
489 | e_dbg("%s: Receive packet consumed multiple buffers\n", | 457 | e_dbg("Receive packet consumed multiple buffers\n"); |
490 | netdev->name); | ||
491 | /* recycle */ | 458 | /* recycle */ |
492 | buffer_info->skb = skb; | 459 | buffer_info->skb = skb; |
493 | goto next_desc; | 460 | goto next_desc; |
@@ -513,9 +480,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
513 | */ | 480 | */ |
514 | if (length < copybreak) { | 481 | if (length < copybreak) { |
515 | struct sk_buff *new_skb = | 482 | struct sk_buff *new_skb = |
516 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); | 483 | netdev_alloc_skb_ip_align(netdev, length); |
517 | if (new_skb) { | 484 | if (new_skb) { |
518 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
519 | skb_copy_to_linear_data_offset(new_skb, | 485 | skb_copy_to_linear_data_offset(new_skb, |
520 | -NET_IP_ALIGN, | 486 | -NET_IP_ALIGN, |
521 | (skb->data - | 487 | (skb->data - |
@@ -560,33 +526,52 @@ next_desc: | |||
560 | 526 | ||
561 | adapter->total_rx_bytes += total_rx_bytes; | 527 | adapter->total_rx_bytes += total_rx_bytes; |
562 | adapter->total_rx_packets += total_rx_packets; | 528 | adapter->total_rx_packets += total_rx_packets; |
563 | adapter->net_stats.rx_bytes += total_rx_bytes; | 529 | netdev->stats.rx_bytes += total_rx_bytes; |
564 | adapter->net_stats.rx_packets += total_rx_packets; | 530 | netdev->stats.rx_packets += total_rx_packets; |
565 | return cleaned; | 531 | return cleaned; |
566 | } | 532 | } |
567 | 533 | ||
568 | static void e1000_put_txbuf(struct e1000_adapter *adapter, | 534 | static void e1000_put_txbuf(struct e1000_adapter *adapter, |
569 | struct e1000_buffer *buffer_info) | 535 | struct e1000_buffer *buffer_info) |
570 | { | 536 | { |
571 | buffer_info->dma = 0; | 537 | if (buffer_info->dma) { |
538 | if (buffer_info->mapped_as_page) | ||
539 | pci_unmap_page(adapter->pdev, buffer_info->dma, | ||
540 | buffer_info->length, PCI_DMA_TODEVICE); | ||
541 | else | ||
542 | pci_unmap_single(adapter->pdev, buffer_info->dma, | ||
543 | buffer_info->length, | ||
544 | PCI_DMA_TODEVICE); | ||
545 | buffer_info->dma = 0; | ||
546 | } | ||
572 | if (buffer_info->skb) { | 547 | if (buffer_info->skb) { |
573 | skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb, | ||
574 | DMA_TO_DEVICE); | ||
575 | dev_kfree_skb_any(buffer_info->skb); | 548 | dev_kfree_skb_any(buffer_info->skb); |
576 | buffer_info->skb = NULL; | 549 | buffer_info->skb = NULL; |
577 | } | 550 | } |
578 | buffer_info->time_stamp = 0; | 551 | buffer_info->time_stamp = 0; |
579 | } | 552 | } |
580 | 553 | ||
581 | static void e1000_print_tx_hang(struct e1000_adapter *adapter) | 554 | static void e1000_print_hw_hang(struct work_struct *work) |
582 | { | 555 | { |
556 | struct e1000_adapter *adapter = container_of(work, | ||
557 | struct e1000_adapter, | ||
558 | print_hang_task); | ||
583 | struct e1000_ring *tx_ring = adapter->tx_ring; | 559 | struct e1000_ring *tx_ring = adapter->tx_ring; |
584 | unsigned int i = tx_ring->next_to_clean; | 560 | unsigned int i = tx_ring->next_to_clean; |
585 | unsigned int eop = tx_ring->buffer_info[i].next_to_watch; | 561 | unsigned int eop = tx_ring->buffer_info[i].next_to_watch; |
586 | struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); | 562 | struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); |
563 | struct e1000_hw *hw = &adapter->hw; | ||
564 | u16 phy_status, phy_1000t_status, phy_ext_status; | ||
565 | u16 pci_status; | ||
566 | |||
567 | e1e_rphy(hw, PHY_STATUS, &phy_status); | ||
568 | e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); | ||
569 | e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); | ||
570 | |||
571 | pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); | ||
587 | 572 | ||
588 | /* detected Tx unit hang */ | 573 | /* detected Hardware unit hang */ |
589 | e_err("Detected Tx Unit Hang:\n" | 574 | e_err("Detected Hardware Unit Hang:\n" |
590 | " TDH <%x>\n" | 575 | " TDH <%x>\n" |
591 | " TDT <%x>\n" | 576 | " TDT <%x>\n" |
592 | " next_to_use <%x>\n" | 577 | " next_to_use <%x>\n" |
@@ -595,7 +580,12 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter) | |||
595 | " time_stamp <%lx>\n" | 580 | " time_stamp <%lx>\n" |
596 | " next_to_watch <%x>\n" | 581 | " next_to_watch <%x>\n" |
597 | " jiffies <%lx>\n" | 582 | " jiffies <%lx>\n" |
598 | " next_to_watch.status <%x>\n", | 583 | " next_to_watch.status <%x>\n" |
584 | "MAC Status <%x>\n" | ||
585 | "PHY Status <%x>\n" | ||
586 | "PHY 1000BASE-T Status <%x>\n" | ||
587 | "PHY Extended Status <%x>\n" | ||
588 | "PCI Status <%x>\n", | ||
599 | readl(adapter->hw.hw_addr + tx_ring->head), | 589 | readl(adapter->hw.hw_addr + tx_ring->head), |
600 | readl(adapter->hw.hw_addr + tx_ring->tail), | 590 | readl(adapter->hw.hw_addr + tx_ring->tail), |
601 | tx_ring->next_to_use, | 591 | tx_ring->next_to_use, |
@@ -603,7 +593,12 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter) | |||
603 | tx_ring->buffer_info[eop].time_stamp, | 593 | tx_ring->buffer_info[eop].time_stamp, |
604 | eop, | 594 | eop, |
605 | jiffies, | 595 | jiffies, |
606 | eop_desc->upper.fields.status); | 596 | eop_desc->upper.fields.status, |
597 | er32(STATUS), | ||
598 | phy_status, | ||
599 | phy_1000t_status, | ||
600 | phy_ext_status, | ||
601 | pci_status); | ||
607 | } | 602 | } |
608 | 603 | ||
609 | /** | 604 | /** |
@@ -677,21 +672,23 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | |||
677 | } | 672 | } |
678 | 673 | ||
679 | if (adapter->detect_tx_hung) { | 674 | if (adapter->detect_tx_hung) { |
680 | /* Detect a transmit hang in hardware, this serializes the | 675 | /* |
681 | * check with the clearing of time_stamp and movement of i */ | 676 | * Detect a transmit hang in hardware, this serializes the |
677 | * check with the clearing of time_stamp and movement of i | ||
678 | */ | ||
682 | adapter->detect_tx_hung = 0; | 679 | adapter->detect_tx_hung = 0; |
683 | if (tx_ring->buffer_info[i].time_stamp && | 680 | if (tx_ring->buffer_info[i].time_stamp && |
684 | time_after(jiffies, tx_ring->buffer_info[i].time_stamp | 681 | time_after(jiffies, tx_ring->buffer_info[i].time_stamp |
685 | + (adapter->tx_timeout_factor * HZ)) | 682 | + (adapter->tx_timeout_factor * HZ)) && |
686 | && !(er32(STATUS) & E1000_STATUS_TXOFF)) { | 683 | !(er32(STATUS) & E1000_STATUS_TXOFF)) { |
687 | e1000_print_tx_hang(adapter); | 684 | schedule_work(&adapter->print_hang_task); |
688 | netif_stop_queue(netdev); | 685 | netif_stop_queue(netdev); |
689 | } | 686 | } |
690 | } | 687 | } |
691 | adapter->total_tx_bytes += total_tx_bytes; | 688 | adapter->total_tx_bytes += total_tx_bytes; |
692 | adapter->total_tx_packets += total_tx_packets; | 689 | adapter->total_tx_packets += total_tx_packets; |
693 | adapter->net_stats.tx_bytes += total_tx_bytes; | 690 | netdev->stats.tx_bytes += total_tx_bytes; |
694 | adapter->net_stats.tx_packets += total_tx_packets; | 691 | netdev->stats.tx_packets += total_tx_packets; |
695 | return (count < tx_ring->count); | 692 | return (count < tx_ring->count); |
696 | } | 693 | } |
697 | 694 | ||
@@ -705,6 +702,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | |||
705 | static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | 702 | static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, |
706 | int *work_done, int work_to_do) | 703 | int *work_done, int work_to_do) |
707 | { | 704 | { |
705 | struct e1000_hw *hw = &adapter->hw; | ||
708 | union e1000_rx_desc_packet_split *rx_desc, *next_rxd; | 706 | union e1000_rx_desc_packet_split *rx_desc, *next_rxd; |
709 | struct net_device *netdev = adapter->netdev; | 707 | struct net_device *netdev = adapter->netdev; |
710 | struct pci_dev *pdev = adapter->pdev; | 708 | struct pci_dev *pdev = adapter->pdev; |
@@ -748,8 +746,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
748 | buffer_info->dma = 0; | 746 | buffer_info->dma = 0; |
749 | 747 | ||
750 | if (!(staterr & E1000_RXD_STAT_EOP)) { | 748 | if (!(staterr & E1000_RXD_STAT_EOP)) { |
751 | e_dbg("%s: Packet Split buffers didn't pick up the " | 749 | e_dbg("Packet Split buffers didn't pick up the full " |
752 | "full packet\n", netdev->name); | 750 | "packet\n"); |
753 | dev_kfree_skb_irq(skb); | 751 | dev_kfree_skb_irq(skb); |
754 | goto next_desc; | 752 | goto next_desc; |
755 | } | 753 | } |
@@ -762,8 +760,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
762 | length = le16_to_cpu(rx_desc->wb.middle.length0); | 760 | length = le16_to_cpu(rx_desc->wb.middle.length0); |
763 | 761 | ||
764 | if (!length) { | 762 | if (!length) { |
765 | e_dbg("%s: Last part of the packet spanning multiple " | 763 | e_dbg("Last part of the packet spanning multiple " |
766 | "descriptors\n", netdev->name); | 764 | "descriptors\n"); |
767 | dev_kfree_skb_irq(skb); | 765 | dev_kfree_skb_irq(skb); |
768 | goto next_desc; | 766 | goto next_desc; |
769 | } | 767 | } |
@@ -871,8 +869,8 @@ next_desc: | |||
871 | 869 | ||
872 | adapter->total_rx_bytes += total_rx_bytes; | 870 | adapter->total_rx_bytes += total_rx_bytes; |
873 | adapter->total_rx_packets += total_rx_packets; | 871 | adapter->total_rx_packets += total_rx_packets; |
874 | adapter->net_stats.rx_bytes += total_rx_bytes; | 872 | netdev->stats.rx_bytes += total_rx_bytes; |
875 | adapter->net_stats.rx_packets += total_rx_packets; | 873 | netdev->stats.rx_packets += total_rx_packets; |
876 | return cleaned; | 874 | return cleaned; |
877 | } | 875 | } |
878 | 876 | ||
@@ -1051,8 +1049,8 @@ next_desc: | |||
1051 | 1049 | ||
1052 | adapter->total_rx_bytes += total_rx_bytes; | 1050 | adapter->total_rx_bytes += total_rx_bytes; |
1053 | adapter->total_rx_packets += total_rx_packets; | 1051 | adapter->total_rx_packets += total_rx_packets; |
1054 | adapter->net_stats.rx_bytes += total_rx_bytes; | 1052 | netdev->stats.rx_bytes += total_rx_bytes; |
1055 | adapter->net_stats.rx_packets += total_rx_packets; | 1053 | netdev->stats.rx_packets += total_rx_packets; |
1056 | return cleaned; | 1054 | return cleaned; |
1057 | } | 1055 | } |
1058 | 1056 | ||
@@ -1199,7 +1197,7 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
1199 | struct e1000_hw *hw = &adapter->hw; | 1197 | struct e1000_hw *hw = &adapter->hw; |
1200 | u32 rctl, icr = er32(ICR); | 1198 | u32 rctl, icr = er32(ICR); |
1201 | 1199 | ||
1202 | if (!icr) | 1200 | if (!icr || test_bit(__E1000_DOWN, &adapter->state)) |
1203 | return IRQ_NONE; /* Not our interrupt */ | 1201 | return IRQ_NONE; /* Not our interrupt */ |
1204 | 1202 | ||
1205 | /* | 1203 | /* |
@@ -1481,7 +1479,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter) | |||
1481 | else | 1479 | else |
1482 | memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); | 1480 | memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); |
1483 | err = request_irq(adapter->msix_entries[vector].vector, | 1481 | err = request_irq(adapter->msix_entries[vector].vector, |
1484 | &e1000_intr_msix_rx, 0, adapter->rx_ring->name, | 1482 | e1000_intr_msix_rx, 0, adapter->rx_ring->name, |
1485 | netdev); | 1483 | netdev); |
1486 | if (err) | 1484 | if (err) |
1487 | goto out; | 1485 | goto out; |
@@ -1494,7 +1492,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter) | |||
1494 | else | 1492 | else |
1495 | memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); | 1493 | memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); |
1496 | err = request_irq(adapter->msix_entries[vector].vector, | 1494 | err = request_irq(adapter->msix_entries[vector].vector, |
1497 | &e1000_intr_msix_tx, 0, adapter->tx_ring->name, | 1495 | e1000_intr_msix_tx, 0, adapter->tx_ring->name, |
1498 | netdev); | 1496 | netdev); |
1499 | if (err) | 1497 | if (err) |
1500 | goto out; | 1498 | goto out; |
@@ -1503,7 +1501,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter) | |||
1503 | vector++; | 1501 | vector++; |
1504 | 1502 | ||
1505 | err = request_irq(adapter->msix_entries[vector].vector, | 1503 | err = request_irq(adapter->msix_entries[vector].vector, |
1506 | &e1000_msix_other, 0, netdev->name, netdev); | 1504 | e1000_msix_other, 0, netdev->name, netdev); |
1507 | if (err) | 1505 | if (err) |
1508 | goto out; | 1506 | goto out; |
1509 | 1507 | ||
@@ -1534,7 +1532,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) | |||
1534 | e1000e_set_interrupt_capability(adapter); | 1532 | e1000e_set_interrupt_capability(adapter); |
1535 | } | 1533 | } |
1536 | if (adapter->flags & FLAG_MSI_ENABLED) { | 1534 | if (adapter->flags & FLAG_MSI_ENABLED) { |
1537 | err = request_irq(adapter->pdev->irq, &e1000_intr_msi, 0, | 1535 | err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, |
1538 | netdev->name, netdev); | 1536 | netdev->name, netdev); |
1539 | if (!err) | 1537 | if (!err) |
1540 | return err; | 1538 | return err; |
@@ -1544,7 +1542,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) | |||
1544 | adapter->int_mode = E1000E_INT_MODE_LEGACY; | 1542 | adapter->int_mode = E1000E_INT_MODE_LEGACY; |
1545 | } | 1543 | } |
1546 | 1544 | ||
1547 | err = request_irq(adapter->pdev->irq, &e1000_intr, IRQF_SHARED, | 1545 | err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, |
1548 | netdev->name, netdev); | 1546 | netdev->name, netdev); |
1549 | if (err) | 1547 | if (err) |
1550 | e_err("Unable to allocate interrupt, Error: %d\n", err); | 1548 | e_err("Unable to allocate interrupt, Error: %d\n", err); |
@@ -2040,11 +2038,14 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | |||
2040 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && | 2038 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && |
2041 | (vid == adapter->mng_vlan_id)) | 2039 | (vid == adapter->mng_vlan_id)) |
2042 | return; | 2040 | return; |
2041 | |||
2043 | /* add VID to filter table */ | 2042 | /* add VID to filter table */ |
2044 | index = (vid >> 5) & 0x7F; | 2043 | if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { |
2045 | vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); | 2044 | index = (vid >> 5) & 0x7F; |
2046 | vfta |= (1 << (vid & 0x1F)); | 2045 | vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); |
2047 | e1000e_write_vfta(hw, index, vfta); | 2046 | vfta |= (1 << (vid & 0x1F)); |
2047 | hw->mac.ops.write_vfta(hw, index, vfta); | ||
2048 | } | ||
2048 | } | 2049 | } |
2049 | 2050 | ||
2050 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | 2051 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) |
@@ -2069,10 +2070,12 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
2069 | } | 2070 | } |
2070 | 2071 | ||
2071 | /* remove VID from filter table */ | 2072 | /* remove VID from filter table */ |
2072 | index = (vid >> 5) & 0x7F; | 2073 | if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { |
2073 | vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); | 2074 | index = (vid >> 5) & 0x7F; |
2074 | vfta &= ~(1 << (vid & 0x1F)); | 2075 | vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); |
2075 | e1000e_write_vfta(hw, index, vfta); | 2076 | vfta &= ~(1 << (vid & 0x1F)); |
2077 | hw->mac.ops.write_vfta(hw, index, vfta); | ||
2078 | } | ||
2076 | } | 2079 | } |
2077 | 2080 | ||
2078 | static void e1000_update_mng_vlan(struct e1000_adapter *adapter) | 2081 | static void e1000_update_mng_vlan(struct e1000_adapter *adapter) |
@@ -2464,8 +2467,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2464 | ew32(ITR, 1000000000 / (adapter->itr * 256)); | 2467 | ew32(ITR, 1000000000 / (adapter->itr * 256)); |
2465 | 2468 | ||
2466 | ctrl_ext = er32(CTRL_EXT); | 2469 | ctrl_ext = er32(CTRL_EXT); |
2467 | /* Reset delay timers after every interrupt */ | ||
2468 | ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; | ||
2469 | /* Auto-Mask interrupts upon ICR access */ | 2470 | /* Auto-Mask interrupts upon ICR access */ |
2470 | ctrl_ext |= E1000_CTRL_EXT_IAME; | 2471 | ctrl_ext |= E1000_CTRL_EXT_IAME; |
2471 | ew32(IAM, 0xffffffff); | 2472 | ew32(IAM, 0xffffffff); |
@@ -2507,21 +2508,23 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2507 | * packet size is equal or larger than the specified value (in 8 byte | 2508 | * packet size is equal or larger than the specified value (in 8 byte |
2508 | * units), e.g. using jumbo frames when setting to E1000_ERT_2048 | 2509 | * units), e.g. using jumbo frames when setting to E1000_ERT_2048 |
2509 | */ | 2510 | */ |
2510 | if ((adapter->flags & FLAG_HAS_ERT) && | 2511 | if (adapter->flags & FLAG_HAS_ERT) { |
2511 | (adapter->netdev->mtu > ETH_DATA_LEN)) { | 2512 | if (adapter->netdev->mtu > ETH_DATA_LEN) { |
2512 | u32 rxdctl = er32(RXDCTL(0)); | 2513 | u32 rxdctl = er32(RXDCTL(0)); |
2513 | ew32(RXDCTL(0), rxdctl | 0x3); | 2514 | ew32(RXDCTL(0), rxdctl | 0x3); |
2514 | ew32(ERT, E1000_ERT_2048 | (1 << 13)); | 2515 | ew32(ERT, E1000_ERT_2048 | (1 << 13)); |
2515 | /* | 2516 | /* |
2516 | * With jumbo frames and early-receive enabled, excessive | 2517 | * With jumbo frames and early-receive enabled, |
2517 | * C4->C2 latencies result in dropped transactions. | 2518 | * excessive C-state transition latencies result in |
2518 | */ | 2519 | * dropped transactions. |
2519 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, | 2520 | */ |
2520 | e1000e_driver_name, 55); | 2521 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, |
2521 | } else { | 2522 | adapter->netdev->name, 55); |
2522 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, | 2523 | } else { |
2523 | e1000e_driver_name, | 2524 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, |
2524 | PM_QOS_DEFAULT_VALUE); | 2525 | adapter->netdev->name, |
2526 | PM_QOS_DEFAULT_VALUE); | ||
2527 | } | ||
2525 | } | 2528 | } |
2526 | 2529 | ||
2527 | /* Enable Receives */ | 2530 | /* Enable Receives */ |
@@ -2645,18 +2648,8 @@ static void e1000_configure(struct e1000_adapter *adapter) | |||
2645 | **/ | 2648 | **/ |
2646 | void e1000e_power_up_phy(struct e1000_adapter *adapter) | 2649 | void e1000e_power_up_phy(struct e1000_adapter *adapter) |
2647 | { | 2650 | { |
2648 | u16 mii_reg = 0; | 2651 | if (adapter->hw.phy.ops.power_up) |
2649 | 2652 | adapter->hw.phy.ops.power_up(&adapter->hw); | |
2650 | /* Just clear the power down bit to wake the phy back up */ | ||
2651 | if (adapter->hw.phy.media_type == e1000_media_type_copper) { | ||
2652 | /* | ||
2653 | * According to the manual, the phy will retain its | ||
2654 | * settings across a power-down/up cycle | ||
2655 | */ | ||
2656 | e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg); | ||
2657 | mii_reg &= ~MII_CR_POWER_DOWN; | ||
2658 | e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg); | ||
2659 | } | ||
2660 | 2653 | ||
2661 | adapter->hw.mac.ops.setup_link(&adapter->hw); | 2654 | adapter->hw.mac.ops.setup_link(&adapter->hw); |
2662 | } | 2655 | } |
@@ -2664,35 +2657,17 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter) | |||
2664 | /** | 2657 | /** |
2665 | * e1000_power_down_phy - Power down the PHY | 2658 | * e1000_power_down_phy - Power down the PHY |
2666 | * | 2659 | * |
2667 | * Power down the PHY so no link is implied when interface is down | 2660 | * Power down the PHY so no link is implied when interface is down. |
2668 | * The PHY cannot be powered down is management or WoL is active | 2661 | * The PHY cannot be powered down if management or WoL is active. |
2669 | */ | 2662 | */ |
2670 | static void e1000_power_down_phy(struct e1000_adapter *adapter) | 2663 | static void e1000_power_down_phy(struct e1000_adapter *adapter) |
2671 | { | 2664 | { |
2672 | struct e1000_hw *hw = &adapter->hw; | ||
2673 | u16 mii_reg; | ||
2674 | |||
2675 | /* WoL is enabled */ | 2665 | /* WoL is enabled */ |
2676 | if (adapter->wol) | 2666 | if (adapter->wol) |
2677 | return; | 2667 | return; |
2678 | 2668 | ||
2679 | /* non-copper PHY? */ | 2669 | if (adapter->hw.phy.ops.power_down) |
2680 | if (adapter->hw.phy.media_type != e1000_media_type_copper) | 2670 | adapter->hw.phy.ops.power_down(&adapter->hw); |
2681 | return; | ||
2682 | |||
2683 | /* reset is blocked because of a SoL/IDER session */ | ||
2684 | if (e1000e_check_mng_mode(hw) || e1000_check_reset_block(hw)) | ||
2685 | return; | ||
2686 | |||
2687 | /* manageability (AMT) is enabled */ | ||
2688 | if (er32(MANC) & E1000_MANC_SMBUS_EN) | ||
2689 | return; | ||
2690 | |||
2691 | /* power down the PHY */ | ||
2692 | e1e_rphy(hw, PHY_CONTROL, &mii_reg); | ||
2693 | mii_reg |= MII_CR_POWER_DOWN; | ||
2694 | e1e_wphy(hw, PHY_CONTROL, mii_reg); | ||
2695 | mdelay(1); | ||
2696 | } | 2671 | } |
2697 | 2672 | ||
2698 | /** | 2673 | /** |
@@ -2769,25 +2744,38 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
2769 | /* | 2744 | /* |
2770 | * flow control settings | 2745 | * flow control settings |
2771 | * | 2746 | * |
2772 | * The high water mark must be low enough to fit two full frame | 2747 | * The high water mark must be low enough to fit one full frame |
2773 | * (or the size used for early receive) above it in the Rx FIFO. | 2748 | * (or the size used for early receive) above it in the Rx FIFO. |
2774 | * Set it to the lower of: | 2749 | * Set it to the lower of: |
2775 | * - 90% of the Rx FIFO size, and | 2750 | * - 90% of the Rx FIFO size, and |
2776 | * - the full Rx FIFO size minus the early receive size (for parts | 2751 | * - the full Rx FIFO size minus the early receive size (for parts |
2777 | * with ERT support assuming ERT set to E1000_ERT_2048), or | 2752 | * with ERT support assuming ERT set to E1000_ERT_2048), or |
2778 | * - the full Rx FIFO size minus two full frames | 2753 | * - the full Rx FIFO size minus one full frame |
2779 | */ | 2754 | */ |
2780 | if ((adapter->flags & FLAG_HAS_ERT) && | 2755 | if (hw->mac.type == e1000_pchlan) { |
2781 | (adapter->netdev->mtu > ETH_DATA_LEN)) | 2756 | /* |
2782 | hwm = min(((pba << 10) * 9 / 10), | 2757 | * Workaround PCH LOM adapter hangs with certain network |
2783 | ((pba << 10) - (E1000_ERT_2048 << 3))); | 2758 | * loads. If hangs persist, try disabling Tx flow control. |
2784 | else | 2759 | */ |
2785 | hwm = min(((pba << 10) * 9 / 10), | 2760 | if (adapter->netdev->mtu > ETH_DATA_LEN) { |
2786 | ((pba << 10) - (2 * adapter->max_frame_size))); | 2761 | fc->high_water = 0x3500; |
2762 | fc->low_water = 0x1500; | ||
2763 | } else { | ||
2764 | fc->high_water = 0x5000; | ||
2765 | fc->low_water = 0x3000; | ||
2766 | } | ||
2767 | } else { | ||
2768 | if ((adapter->flags & FLAG_HAS_ERT) && | ||
2769 | (adapter->netdev->mtu > ETH_DATA_LEN)) | ||
2770 | hwm = min(((pba << 10) * 9 / 10), | ||
2771 | ((pba << 10) - (E1000_ERT_2048 << 3))); | ||
2772 | else | ||
2773 | hwm = min(((pba << 10) * 9 / 10), | ||
2774 | ((pba << 10) - adapter->max_frame_size)); | ||
2787 | 2775 | ||
2788 | fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ | 2776 | fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ |
2789 | fc->low_water = (fc->high_water - (2 * adapter->max_frame_size)); | 2777 | fc->low_water = fc->high_water - 8; |
2790 | fc->low_water &= E1000_FCRTL_RTL; /* 8-byte granularity */ | 2778 | } |
2791 | 2779 | ||
2792 | if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) | 2780 | if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) |
2793 | fc->pause_time = 0xFFFF; | 2781 | fc->pause_time = 0xFFFF; |
@@ -2813,6 +2801,10 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
2813 | if (mac->ops.init_hw(hw)) | 2801 | if (mac->ops.init_hw(hw)) |
2814 | e_err("Hardware Error\n"); | 2802 | e_err("Hardware Error\n"); |
2815 | 2803 | ||
2804 | /* additional part of the flow-control workaround above */ | ||
2805 | if (hw->mac.type == e1000_pchlan) | ||
2806 | ew32(FCRTV_PCH, 0x1000); | ||
2807 | |||
2816 | e1000_update_mng_vlan(adapter); | 2808 | e1000_update_mng_vlan(adapter); |
2817 | 2809 | ||
2818 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ | 2810 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ |
@@ -2839,6 +2831,12 @@ int e1000e_up(struct e1000_adapter *adapter) | |||
2839 | { | 2831 | { |
2840 | struct e1000_hw *hw = &adapter->hw; | 2832 | struct e1000_hw *hw = &adapter->hw; |
2841 | 2833 | ||
2834 | /* DMA latency requirement to workaround early-receive/jumbo issue */ | ||
2835 | if (adapter->flags & FLAG_HAS_ERT) | ||
2836 | pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, | ||
2837 | adapter->netdev->name, | ||
2838 | PM_QOS_DEFAULT_VALUE); | ||
2839 | |||
2842 | /* hardware has been reset, we need to reload some things */ | 2840 | /* hardware has been reset, we need to reload some things */ |
2843 | e1000_configure(adapter); | 2841 | e1000_configure(adapter); |
2844 | 2842 | ||
@@ -2899,6 +2897,10 @@ void e1000e_down(struct e1000_adapter *adapter) | |||
2899 | e1000_clean_tx_ring(adapter); | 2897 | e1000_clean_tx_ring(adapter); |
2900 | e1000_clean_rx_ring(adapter); | 2898 | e1000_clean_rx_ring(adapter); |
2901 | 2899 | ||
2900 | if (adapter->flags & FLAG_HAS_ERT) | ||
2901 | pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, | ||
2902 | adapter->netdev->name); | ||
2903 | |||
2902 | /* | 2904 | /* |
2903 | * TODO: for power management, we could drop the link and | 2905 | * TODO: for power management, we could drop the link and |
2904 | * pci_disable_device here. | 2906 | * pci_disable_device here. |
@@ -2956,7 +2958,7 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data) | |||
2956 | struct e1000_hw *hw = &adapter->hw; | 2958 | struct e1000_hw *hw = &adapter->hw; |
2957 | u32 icr = er32(ICR); | 2959 | u32 icr = er32(ICR); |
2958 | 2960 | ||
2959 | e_dbg("%s: icr is %08X\n", netdev->name, icr); | 2961 | e_dbg("icr is %08X\n", icr); |
2960 | if (icr & E1000_ICR_RXSEQ) { | 2962 | if (icr & E1000_ICR_RXSEQ) { |
2961 | adapter->flags &= ~FLAG_MSI_TEST_FAILED; | 2963 | adapter->flags &= ~FLAG_MSI_TEST_FAILED; |
2962 | wmb(); | 2964 | wmb(); |
@@ -2993,7 +2995,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) | |||
2993 | if (err) | 2995 | if (err) |
2994 | goto msi_test_failed; | 2996 | goto msi_test_failed; |
2995 | 2997 | ||
2996 | err = request_irq(adapter->pdev->irq, &e1000_intr_msi_test, 0, | 2998 | err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, |
2997 | netdev->name, netdev); | 2999 | netdev->name, netdev); |
2998 | if (err) { | 3000 | if (err) { |
2999 | pci_disable_msi(adapter->pdev); | 3001 | pci_disable_msi(adapter->pdev); |
@@ -3026,7 +3028,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) | |||
3026 | goto msi_test_failed; | 3028 | goto msi_test_failed; |
3027 | 3029 | ||
3028 | /* okay so the test worked, restore settings */ | 3030 | /* okay so the test worked, restore settings */ |
3029 | e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name); | 3031 | e_dbg("MSI interrupt test succeeded!\n"); |
3030 | msi_test_failed: | 3032 | msi_test_failed: |
3031 | e1000e_set_interrupt_capability(adapter); | 3033 | e1000e_set_interrupt_capability(adapter); |
3032 | e1000_request_irq(adapter); | 3034 | e1000_request_irq(adapter); |
@@ -3287,6 +3289,7 @@ static void e1000_update_phy_info(unsigned long data) | |||
3287 | **/ | 3289 | **/ |
3288 | void e1000e_update_stats(struct e1000_adapter *adapter) | 3290 | void e1000e_update_stats(struct e1000_adapter *adapter) |
3289 | { | 3291 | { |
3292 | struct net_device *netdev = adapter->netdev; | ||
3290 | struct e1000_hw *hw = &adapter->hw; | 3293 | struct e1000_hw *hw = &adapter->hw; |
3291 | struct pci_dev *pdev = adapter->pdev; | 3294 | struct pci_dev *pdev = adapter->pdev; |
3292 | u16 phy_data; | 3295 | u16 phy_data; |
@@ -3381,8 +3384,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
3381 | adapter->stats.tsctfc += er32(TSCTFC); | 3384 | adapter->stats.tsctfc += er32(TSCTFC); |
3382 | 3385 | ||
3383 | /* Fill out the OS statistics structure */ | 3386 | /* Fill out the OS statistics structure */ |
3384 | adapter->net_stats.multicast = adapter->stats.mprc; | 3387 | netdev->stats.multicast = adapter->stats.mprc; |
3385 | adapter->net_stats.collisions = adapter->stats.colc; | 3388 | netdev->stats.collisions = adapter->stats.colc; |
3386 | 3389 | ||
3387 | /* Rx Errors */ | 3390 | /* Rx Errors */ |
3388 | 3391 | ||
@@ -3390,22 +3393,22 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
3390 | * RLEC on some newer hardware can be incorrect so build | 3393 | * RLEC on some newer hardware can be incorrect so build |
3391 | * our own version based on RUC and ROC | 3394 | * our own version based on RUC and ROC |
3392 | */ | 3395 | */ |
3393 | adapter->net_stats.rx_errors = adapter->stats.rxerrc + | 3396 | netdev->stats.rx_errors = adapter->stats.rxerrc + |
3394 | adapter->stats.crcerrs + adapter->stats.algnerrc + | 3397 | adapter->stats.crcerrs + adapter->stats.algnerrc + |
3395 | adapter->stats.ruc + adapter->stats.roc + | 3398 | adapter->stats.ruc + adapter->stats.roc + |
3396 | adapter->stats.cexterr; | 3399 | adapter->stats.cexterr; |
3397 | adapter->net_stats.rx_length_errors = adapter->stats.ruc + | 3400 | netdev->stats.rx_length_errors = adapter->stats.ruc + |
3398 | adapter->stats.roc; | 3401 | adapter->stats.roc; |
3399 | adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; | 3402 | netdev->stats.rx_crc_errors = adapter->stats.crcerrs; |
3400 | adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; | 3403 | netdev->stats.rx_frame_errors = adapter->stats.algnerrc; |
3401 | adapter->net_stats.rx_missed_errors = adapter->stats.mpc; | 3404 | netdev->stats.rx_missed_errors = adapter->stats.mpc; |
3402 | 3405 | ||
3403 | /* Tx Errors */ | 3406 | /* Tx Errors */ |
3404 | adapter->net_stats.tx_errors = adapter->stats.ecol + | 3407 | netdev->stats.tx_errors = adapter->stats.ecol + |
3405 | adapter->stats.latecol; | 3408 | adapter->stats.latecol; |
3406 | adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; | 3409 | netdev->stats.tx_aborted_errors = adapter->stats.ecol; |
3407 | adapter->net_stats.tx_window_errors = adapter->stats.latecol; | 3410 | netdev->stats.tx_window_errors = adapter->stats.latecol; |
3408 | adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; | 3411 | netdev->stats.tx_carrier_errors = adapter->stats.tncrs; |
3409 | 3412 | ||
3410 | /* Tx Dropped needs to be maintained elsewhere */ | 3413 | /* Tx Dropped needs to be maintained elsewhere */ |
3411 | 3414 | ||
@@ -3610,7 +3613,7 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
3610 | case SPEED_100: | 3613 | case SPEED_100: |
3611 | txb2b = 0; | 3614 | txb2b = 0; |
3612 | netdev->tx_queue_len = 100; | 3615 | netdev->tx_queue_len = 100; |
3613 | /* maybe add some timeout factor ? */ | 3616 | adapter->tx_timeout_factor = 10; |
3614 | break; | 3617 | break; |
3615 | } | 3618 | } |
3616 | 3619 | ||
@@ -3759,68 +3762,64 @@ static int e1000_tso(struct e1000_adapter *adapter, | |||
3759 | u8 ipcss, ipcso, tucss, tucso, hdr_len; | 3762 | u8 ipcss, ipcso, tucss, tucso, hdr_len; |
3760 | int err; | 3763 | int err; |
3761 | 3764 | ||
3762 | if (skb_is_gso(skb)) { | 3765 | if (!skb_is_gso(skb)) |
3763 | if (skb_header_cloned(skb)) { | 3766 | return 0; |
3764 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | ||
3765 | if (err) | ||
3766 | return err; | ||
3767 | } | ||
3768 | 3767 | ||
3769 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | 3768 | if (skb_header_cloned(skb)) { |
3770 | mss = skb_shinfo(skb)->gso_size; | 3769 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
3771 | if (skb->protocol == htons(ETH_P_IP)) { | 3770 | if (err) |
3772 | struct iphdr *iph = ip_hdr(skb); | 3771 | return err; |
3773 | iph->tot_len = 0; | 3772 | } |
3774 | iph->check = 0; | ||
3775 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | ||
3776 | iph->daddr, 0, | ||
3777 | IPPROTO_TCP, | ||
3778 | 0); | ||
3779 | cmd_length = E1000_TXD_CMD_IP; | ||
3780 | ipcse = skb_transport_offset(skb) - 1; | ||
3781 | } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { | ||
3782 | ipv6_hdr(skb)->payload_len = 0; | ||
3783 | tcp_hdr(skb)->check = | ||
3784 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | ||
3785 | &ipv6_hdr(skb)->daddr, | ||
3786 | 0, IPPROTO_TCP, 0); | ||
3787 | ipcse = 0; | ||
3788 | } | ||
3789 | ipcss = skb_network_offset(skb); | ||
3790 | ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; | ||
3791 | tucss = skb_transport_offset(skb); | ||
3792 | tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; | ||
3793 | tucse = 0; | ||
3794 | 3773 | ||
3795 | cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | | 3774 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
3796 | E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); | 3775 | mss = skb_shinfo(skb)->gso_size; |
3776 | if (skb->protocol == htons(ETH_P_IP)) { | ||
3777 | struct iphdr *iph = ip_hdr(skb); | ||
3778 | iph->tot_len = 0; | ||
3779 | iph->check = 0; | ||
3780 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | ||
3781 | 0, IPPROTO_TCP, 0); | ||
3782 | cmd_length = E1000_TXD_CMD_IP; | ||
3783 | ipcse = skb_transport_offset(skb) - 1; | ||
3784 | } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { | ||
3785 | ipv6_hdr(skb)->payload_len = 0; | ||
3786 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | ||
3787 | &ipv6_hdr(skb)->daddr, | ||
3788 | 0, IPPROTO_TCP, 0); | ||
3789 | ipcse = 0; | ||
3790 | } | ||
3791 | ipcss = skb_network_offset(skb); | ||
3792 | ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; | ||
3793 | tucss = skb_transport_offset(skb); | ||
3794 | tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; | ||
3795 | tucse = 0; | ||
3797 | 3796 | ||
3798 | i = tx_ring->next_to_use; | 3797 | cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | |
3799 | context_desc = E1000_CONTEXT_DESC(*tx_ring, i); | 3798 | E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); |
3800 | buffer_info = &tx_ring->buffer_info[i]; | ||
3801 | 3799 | ||
3802 | context_desc->lower_setup.ip_fields.ipcss = ipcss; | 3800 | i = tx_ring->next_to_use; |
3803 | context_desc->lower_setup.ip_fields.ipcso = ipcso; | 3801 | context_desc = E1000_CONTEXT_DESC(*tx_ring, i); |
3804 | context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); | 3802 | buffer_info = &tx_ring->buffer_info[i]; |
3805 | context_desc->upper_setup.tcp_fields.tucss = tucss; | ||
3806 | context_desc->upper_setup.tcp_fields.tucso = tucso; | ||
3807 | context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); | ||
3808 | context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); | ||
3809 | context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; | ||
3810 | context_desc->cmd_and_length = cpu_to_le32(cmd_length); | ||
3811 | 3803 | ||
3812 | buffer_info->time_stamp = jiffies; | 3804 | context_desc->lower_setup.ip_fields.ipcss = ipcss; |
3813 | buffer_info->next_to_watch = i; | 3805 | context_desc->lower_setup.ip_fields.ipcso = ipcso; |
3806 | context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); | ||
3807 | context_desc->upper_setup.tcp_fields.tucss = tucss; | ||
3808 | context_desc->upper_setup.tcp_fields.tucso = tucso; | ||
3809 | context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); | ||
3810 | context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); | ||
3811 | context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; | ||
3812 | context_desc->cmd_and_length = cpu_to_le32(cmd_length); | ||
3814 | 3813 | ||
3815 | i++; | 3814 | buffer_info->time_stamp = jiffies; |
3816 | if (i == tx_ring->count) | 3815 | buffer_info->next_to_watch = i; |
3817 | i = 0; | ||
3818 | tx_ring->next_to_use = i; | ||
3819 | 3816 | ||
3820 | return 1; | 3817 | i++; |
3821 | } | 3818 | if (i == tx_ring->count) |
3819 | i = 0; | ||
3820 | tx_ring->next_to_use = i; | ||
3822 | 3821 | ||
3823 | return 0; | 3822 | return 1; |
3824 | } | 3823 | } |
3825 | 3824 | ||
3826 | static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) | 3825 | static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) |
@@ -3892,23 +3891,14 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3892 | unsigned int mss) | 3891 | unsigned int mss) |
3893 | { | 3892 | { |
3894 | struct e1000_ring *tx_ring = adapter->tx_ring; | 3893 | struct e1000_ring *tx_ring = adapter->tx_ring; |
3894 | struct pci_dev *pdev = adapter->pdev; | ||
3895 | struct e1000_buffer *buffer_info; | 3895 | struct e1000_buffer *buffer_info; |
3896 | unsigned int len = skb_headlen(skb); | 3896 | unsigned int len = skb_headlen(skb); |
3897 | unsigned int offset, size, count = 0, i; | 3897 | unsigned int offset = 0, size, count = 0, i; |
3898 | unsigned int f; | 3898 | unsigned int f; |
3899 | dma_addr_t *map; | ||
3900 | 3899 | ||
3901 | i = tx_ring->next_to_use; | 3900 | i = tx_ring->next_to_use; |
3902 | 3901 | ||
3903 | if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) { | ||
3904 | dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); | ||
3905 | adapter->tx_dma_failed++; | ||
3906 | return 0; | ||
3907 | } | ||
3908 | |||
3909 | map = skb_shinfo(skb)->dma_maps; | ||
3910 | offset = 0; | ||
3911 | |||
3912 | while (len) { | 3902 | while (len) { |
3913 | buffer_info = &tx_ring->buffer_info[i]; | 3903 | buffer_info = &tx_ring->buffer_info[i]; |
3914 | size = min(len, max_per_txd); | 3904 | size = min(len, max_per_txd); |
@@ -3916,11 +3906,15 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3916 | buffer_info->length = size; | 3906 | buffer_info->length = size; |
3917 | buffer_info->time_stamp = jiffies; | 3907 | buffer_info->time_stamp = jiffies; |
3918 | buffer_info->next_to_watch = i; | 3908 | buffer_info->next_to_watch = i; |
3919 | buffer_info->dma = skb_shinfo(skb)->dma_head + offset; | 3909 | buffer_info->dma = pci_map_single(pdev, skb->data + offset, |
3920 | count++; | 3910 | size, PCI_DMA_TODEVICE); |
3911 | buffer_info->mapped_as_page = false; | ||
3912 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) | ||
3913 | goto dma_error; | ||
3921 | 3914 | ||
3922 | len -= size; | 3915 | len -= size; |
3923 | offset += size; | 3916 | offset += size; |
3917 | count++; | ||
3924 | 3918 | ||
3925 | if (len) { | 3919 | if (len) { |
3926 | i++; | 3920 | i++; |
@@ -3934,7 +3928,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3934 | 3928 | ||
3935 | frag = &skb_shinfo(skb)->frags[f]; | 3929 | frag = &skb_shinfo(skb)->frags[f]; |
3936 | len = frag->size; | 3930 | len = frag->size; |
3937 | offset = 0; | 3931 | offset = frag->page_offset; |
3938 | 3932 | ||
3939 | while (len) { | 3933 | while (len) { |
3940 | i++; | 3934 | i++; |
@@ -3947,7 +3941,12 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3947 | buffer_info->length = size; | 3941 | buffer_info->length = size; |
3948 | buffer_info->time_stamp = jiffies; | 3942 | buffer_info->time_stamp = jiffies; |
3949 | buffer_info->next_to_watch = i; | 3943 | buffer_info->next_to_watch = i; |
3950 | buffer_info->dma = map[f] + offset; | 3944 | buffer_info->dma = pci_map_page(pdev, frag->page, |
3945 | offset, size, | ||
3946 | PCI_DMA_TODEVICE); | ||
3947 | buffer_info->mapped_as_page = true; | ||
3948 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) | ||
3949 | goto dma_error; | ||
3951 | 3950 | ||
3952 | len -= size; | 3951 | len -= size; |
3953 | offset += size; | 3952 | offset += size; |
@@ -3959,6 +3958,22 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3959 | tx_ring->buffer_info[first].next_to_watch = i; | 3958 | tx_ring->buffer_info[first].next_to_watch = i; |
3960 | 3959 | ||
3961 | return count; | 3960 | return count; |
3961 | |||
3962 | dma_error: | ||
3963 | dev_err(&pdev->dev, "TX DMA map failed\n"); | ||
3964 | buffer_info->dma = 0; | ||
3965 | count--; | ||
3966 | |||
3967 | while (count >= 0) { | ||
3968 | count--; | ||
3969 | i--; | ||
3970 | if (i < 0) | ||
3971 | i += tx_ring->count; | ||
3972 | buffer_info = &tx_ring->buffer_info[i]; | ||
3973 | e1000_put_txbuf(adapter, buffer_info);; | ||
3974 | } | ||
3975 | |||
3976 | return 0; | ||
3962 | } | 3977 | } |
3963 | 3978 | ||
3964 | static void e1000_tx_queue(struct e1000_adapter *adapter, | 3979 | static void e1000_tx_queue(struct e1000_adapter *adapter, |
@@ -4031,8 +4046,8 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, | |||
4031 | u16 length, offset; | 4046 | u16 length, offset; |
4032 | 4047 | ||
4033 | if (vlan_tx_tag_present(skb)) { | 4048 | if (vlan_tx_tag_present(skb)) { |
4034 | if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) | 4049 | if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && |
4035 | && (adapter->hw.mng_cookie.status & | 4050 | (adapter->hw.mng_cookie.status & |
4036 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) | 4051 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) |
4037 | return 0; | 4052 | return 0; |
4038 | } | 4053 | } |
@@ -4254,10 +4269,8 @@ static void e1000_reset_task(struct work_struct *work) | |||
4254 | **/ | 4269 | **/ |
4255 | static struct net_device_stats *e1000_get_stats(struct net_device *netdev) | 4270 | static struct net_device_stats *e1000_get_stats(struct net_device *netdev) |
4256 | { | 4271 | { |
4257 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
4258 | |||
4259 | /* only return the current stats */ | 4272 | /* only return the current stats */ |
4260 | return &adapter->net_stats; | 4273 | return &netdev->stats; |
4261 | } | 4274 | } |
4262 | 4275 | ||
4263 | /** | 4276 | /** |
@@ -4288,8 +4301,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
4288 | 4301 | ||
4289 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | 4302 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) |
4290 | msleep(1); | 4303 | msleep(1); |
4291 | /* e1000e_down has a dependency on max_frame_size */ | 4304 | /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ |
4292 | adapter->max_frame_size = max_frame; | 4305 | adapter->max_frame_size = max_frame; |
4306 | e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); | ||
4307 | netdev->mtu = new_mtu; | ||
4293 | if (netif_running(netdev)) | 4308 | if (netif_running(netdev)) |
4294 | e1000e_down(adapter); | 4309 | e1000e_down(adapter); |
4295 | 4310 | ||
@@ -4319,9 +4334,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
4319 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN | 4334 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN |
4320 | + ETH_FCS_LEN; | 4335 | + ETH_FCS_LEN; |
4321 | 4336 | ||
4322 | e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); | ||
4323 | netdev->mtu = new_mtu; | ||
4324 | |||
4325 | if (netif_running(netdev)) | 4337 | if (netif_running(netdev)) |
4326 | e1000e_up(adapter); | 4338 | e1000e_up(adapter); |
4327 | else | 4339 | else |
@@ -4346,6 +4358,8 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, | |||
4346 | data->phy_id = adapter->hw.phy.addr; | 4358 | data->phy_id = adapter->hw.phy.addr; |
4347 | break; | 4359 | break; |
4348 | case SIOCGMIIREG: | 4360 | case SIOCGMIIREG: |
4361 | e1000_phy_read_status(adapter); | ||
4362 | |||
4349 | switch (data->reg_num & 0x1F) { | 4363 | switch (data->reg_num & 0x1F) { |
4350 | case MII_BMCR: | 4364 | case MII_BMCR: |
4351 | data->val_out = adapter->phy_regs.bmcr; | 4365 | data->val_out = adapter->phy_regs.bmcr; |
@@ -4453,7 +4467,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) | |||
4453 | e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); | 4467 | e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); |
4454 | 4468 | ||
4455 | /* activate PHY wakeup */ | 4469 | /* activate PHY wakeup */ |
4456 | retval = hw->phy.ops.acquire_phy(hw); | 4470 | retval = hw->phy.ops.acquire(hw); |
4457 | if (retval) { | 4471 | if (retval) { |
4458 | e_err("Could not acquire PHY\n"); | 4472 | e_err("Could not acquire PHY\n"); |
4459 | return retval; | 4473 | return retval; |
@@ -4470,7 +4484,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) | |||
4470 | if (retval) | 4484 | if (retval) |
4471 | e_err("Could not set PHY Host Wakeup bit\n"); | 4485 | e_err("Could not set PHY Host Wakeup bit\n"); |
4472 | out: | 4486 | out: |
4473 | hw->phy.ops.release_phy(hw); | 4487 | hw->phy.ops.release(hw); |
4474 | 4488 | ||
4475 | return retval; | 4489 | return retval; |
4476 | } | 4490 | } |
@@ -5144,6 +5158,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
5144 | INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); | 5158 | INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); |
5145 | INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); | 5159 | INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); |
5146 | INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); | 5160 | INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); |
5161 | INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); | ||
5147 | 5162 | ||
5148 | /* Initialize link parameters. User can change them with ethtool */ | 5163 | /* Initialize link parameters. User can change them with ethtool */ |
5149 | adapter->hw.mac.autoneg = 1; | 5164 | adapter->hw.mac.autoneg = 1; |
@@ -5267,19 +5282,24 @@ static void __devexit e1000_remove(struct pci_dev *pdev) | |||
5267 | del_timer_sync(&adapter->watchdog_timer); | 5282 | del_timer_sync(&adapter->watchdog_timer); |
5268 | del_timer_sync(&adapter->phy_info_timer); | 5283 | del_timer_sync(&adapter->phy_info_timer); |
5269 | 5284 | ||
5285 | cancel_work_sync(&adapter->reset_task); | ||
5286 | cancel_work_sync(&adapter->watchdog_task); | ||
5287 | cancel_work_sync(&adapter->downshift_task); | ||
5288 | cancel_work_sync(&adapter->update_phy_task); | ||
5289 | cancel_work_sync(&adapter->print_hang_task); | ||
5270 | flush_scheduled_work(); | 5290 | flush_scheduled_work(); |
5271 | 5291 | ||
5292 | if (!(netdev->flags & IFF_UP)) | ||
5293 | e1000_power_down_phy(adapter); | ||
5294 | |||
5295 | unregister_netdev(netdev); | ||
5296 | |||
5272 | /* | 5297 | /* |
5273 | * Release control of h/w to f/w. If f/w is AMT enabled, this | 5298 | * Release control of h/w to f/w. If f/w is AMT enabled, this |
5274 | * would have already happened in close and is redundant. | 5299 | * would have already happened in close and is redundant. |
5275 | */ | 5300 | */ |
5276 | e1000_release_hw_control(adapter); | 5301 | e1000_release_hw_control(adapter); |
5277 | 5302 | ||
5278 | unregister_netdev(netdev); | ||
5279 | |||
5280 | if (!e1000_check_reset_block(&adapter->hw)) | ||
5281 | e1000_phy_hw_reset(&adapter->hw); | ||
5282 | |||
5283 | e1000e_reset_interrupt_capability(adapter); | 5303 | e1000e_reset_interrupt_capability(adapter); |
5284 | kfree(adapter->tx_ring); | 5304 | kfree(adapter->tx_ring); |
5285 | kfree(adapter->rx_ring); | 5305 | kfree(adapter->rx_ring); |
@@ -5345,6 +5365,7 @@ static struct pci_device_id e1000_pci_tbl[] = { | |||
5345 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, | 5365 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, |
5346 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, | 5366 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, |
5347 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, | 5367 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, |
5368 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan }, | ||
5348 | 5369 | ||
5349 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, | 5370 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, |
5350 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, | 5371 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, |
@@ -5398,12 +5419,10 @@ static int __init e1000_init_module(void) | |||
5398 | int ret; | 5419 | int ret; |
5399 | printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", | 5420 | printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", |
5400 | e1000e_driver_name, e1000e_driver_version); | 5421 | e1000e_driver_name, e1000e_driver_version); |
5401 | printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n", | 5422 | printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n", |
5402 | e1000e_driver_name); | 5423 | e1000e_driver_name); |
5403 | ret = pci_register_driver(&e1000_driver); | 5424 | ret = pci_register_driver(&e1000_driver); |
5404 | pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name, | 5425 | |
5405 | PM_QOS_DEFAULT_VALUE); | ||
5406 | |||
5407 | return ret; | 5426 | return ret; |
5408 | } | 5427 | } |
5409 | module_init(e1000_init_module); | 5428 | module_init(e1000_init_module); |
@@ -5417,7 +5436,6 @@ module_init(e1000_init_module); | |||
5417 | static void __exit e1000_exit_module(void) | 5436 | static void __exit e1000_exit_module(void) |
5418 | { | 5437 | { |
5419 | pci_unregister_driver(&e1000_driver); | 5438 | pci_unregister_driver(&e1000_driver); |
5420 | pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name); | ||
5421 | } | 5439 | } |
5422 | module_exit(e1000_exit_module); | 5440 | module_exit(e1000_exit_module); |
5423 | 5441 | ||