aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000e/netdev.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000e/netdev.c')
-rw-r--r--drivers/net/e1000e/netdev.c679
1 files changed, 339 insertions, 340 deletions
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index fad8f9ea0043..dbf81788bb40 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -36,6 +36,7 @@
36#include <linux/netdevice.h> 36#include <linux/netdevice.h>
37#include <linux/tcp.h> 37#include <linux/tcp.h>
38#include <linux/ipv6.h> 38#include <linux/ipv6.h>
39#include <linux/slab.h>
39#include <net/checksum.h> 40#include <net/checksum.h>
40#include <net/ip6_checksum.h> 41#include <net/ip6_checksum.h>
41#include <linux/mii.h> 42#include <linux/mii.h>
@@ -65,17 +66,6 @@ static const struct e1000_info *e1000_info_tbl[] = {
65 [board_pchlan] = &e1000_pch_info, 66 [board_pchlan] = &e1000_pch_info,
66}; 67};
67 68
68#ifdef DEBUG
69/**
70 * e1000_get_hw_dev_name - return device name string
71 * used by hardware layer to print debugging information
72 **/
73char *e1000e_get_hw_dev_name(struct e1000_hw *hw)
74{
75 return hw->adapter->netdev->name;
76}
77#endif
78
79/** 69/**
80 * e1000_desc_unused - calculate if we have unused descriptors 70 * e1000_desc_unused - calculate if we have unused descriptors
81 **/ 71 **/
@@ -167,7 +157,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
167 struct e1000_buffer *buffer_info; 157 struct e1000_buffer *buffer_info;
168 struct sk_buff *skb; 158 struct sk_buff *skb;
169 unsigned int i; 159 unsigned int i;
170 unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; 160 unsigned int bufsz = adapter->rx_buffer_len;
171 161
172 i = rx_ring->next_to_use; 162 i = rx_ring->next_to_use;
173 buffer_info = &rx_ring->buffer_info[i]; 163 buffer_info = &rx_ring->buffer_info[i];
@@ -179,20 +169,13 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
179 goto map_skb; 169 goto map_skb;
180 } 170 }
181 171
182 skb = netdev_alloc_skb(netdev, bufsz); 172 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
183 if (!skb) { 173 if (!skb) {
184 /* Better luck next round */ 174 /* Better luck next round */
185 adapter->alloc_rx_buff_failed++; 175 adapter->alloc_rx_buff_failed++;
186 break; 176 break;
187 } 177 }
188 178
189 /*
190 * Make buffer alignment 2 beyond a 16 byte boundary
191 * this will result in a 16 byte aligned IP header after
192 * the 14 byte MAC header is removed
193 */
194 skb_reserve(skb, NET_IP_ALIGN);
195
196 buffer_info->skb = skb; 179 buffer_info->skb = skb;
197map_skb: 180map_skb:
198 buffer_info->dma = pci_map_single(pdev, skb->data, 181 buffer_info->dma = pci_map_single(pdev, skb->data,
@@ -284,21 +267,14 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
284 cpu_to_le64(ps_page->dma); 267 cpu_to_le64(ps_page->dma);
285 } 268 }
286 269
287 skb = netdev_alloc_skb(netdev, 270 skb = netdev_alloc_skb_ip_align(netdev,
288 adapter->rx_ps_bsize0 + NET_IP_ALIGN); 271 adapter->rx_ps_bsize0);
289 272
290 if (!skb) { 273 if (!skb) {
291 adapter->alloc_rx_buff_failed++; 274 adapter->alloc_rx_buff_failed++;
292 break; 275 break;
293 } 276 }
294 277
295 /*
296 * Make buffer alignment 2 beyond a 16 byte boundary
297 * this will result in a 16 byte aligned IP header after
298 * the 14 byte MAC header is removed
299 */
300 skb_reserve(skb, NET_IP_ALIGN);
301
302 buffer_info->skb = skb; 278 buffer_info->skb = skb;
303 buffer_info->dma = pci_map_single(pdev, skb->data, 279 buffer_info->dma = pci_map_single(pdev, skb->data,
304 adapter->rx_ps_bsize0, 280 adapter->rx_ps_bsize0,
@@ -359,9 +335,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
359 struct e1000_buffer *buffer_info; 335 struct e1000_buffer *buffer_info;
360 struct sk_buff *skb; 336 struct sk_buff *skb;
361 unsigned int i; 337 unsigned int i;
362 unsigned int bufsz = 256 - 338 unsigned int bufsz = 256 - 16 /* for skb_reserve */;
363 16 /* for skb_reserve */ -
364 NET_IP_ALIGN;
365 339
366 i = rx_ring->next_to_use; 340 i = rx_ring->next_to_use;
367 buffer_info = &rx_ring->buffer_info[i]; 341 buffer_info = &rx_ring->buffer_info[i];
@@ -373,19 +347,13 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
373 goto check_page; 347 goto check_page;
374 } 348 }
375 349
376 skb = netdev_alloc_skb(netdev, bufsz); 350 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
377 if (unlikely(!skb)) { 351 if (unlikely(!skb)) {
378 /* Better luck next round */ 352 /* Better luck next round */
379 adapter->alloc_rx_buff_failed++; 353 adapter->alloc_rx_buff_failed++;
380 break; 354 break;
381 } 355 }
382 356
383 /* Make buffer alignment 2 beyond a 16 byte boundary
384 * this will result in a 16 byte aligned IP header after
385 * the 14 byte MAC header is removed
386 */
387 skb_reserve(skb, NET_IP_ALIGN);
388
389 buffer_info->skb = skb; 357 buffer_info->skb = skb;
390check_page: 358check_page:
391 /* allocate a new page if necessary */ 359 /* allocate a new page if necessary */
@@ -437,6 +405,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
437{ 405{
438 struct net_device *netdev = adapter->netdev; 406 struct net_device *netdev = adapter->netdev;
439 struct pci_dev *pdev = adapter->pdev; 407 struct pci_dev *pdev = adapter->pdev;
408 struct e1000_hw *hw = &adapter->hw;
440 struct e1000_ring *rx_ring = adapter->rx_ring; 409 struct e1000_ring *rx_ring = adapter->rx_ring;
441 struct e1000_rx_desc *rx_desc, *next_rxd; 410 struct e1000_rx_desc *rx_desc, *next_rxd;
442 struct e1000_buffer *buffer_info, *next_buffer; 411 struct e1000_buffer *buffer_info, *next_buffer;
@@ -482,14 +451,23 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
482 451
483 length = le16_to_cpu(rx_desc->length); 452 length = le16_to_cpu(rx_desc->length);
484 453
485 /* !EOP means multiple descriptors were used to store a single 454 /*
486 * packet, also make sure the frame isn't just CRC only */ 455 * !EOP means multiple descriptors were used to store a single
487 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { 456 * packet, if that's the case we need to toss it. In fact, we
457 * need to toss every packet with the EOP bit clear and the
458 * next frame that _does_ have the EOP bit set, as it is by
459 * definition only a frame fragment
460 */
461 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
462 adapter->flags2 |= FLAG2_IS_DISCARDING;
463
464 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
488 /* All receives must fit into a single buffer */ 465 /* All receives must fit into a single buffer */
489 e_dbg("%s: Receive packet consumed multiple buffers\n", 466 e_dbg("Receive packet consumed multiple buffers\n");
490 netdev->name);
491 /* recycle */ 467 /* recycle */
492 buffer_info->skb = skb; 468 buffer_info->skb = skb;
469 if (status & E1000_RXD_STAT_EOP)
470 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
493 goto next_desc; 471 goto next_desc;
494 } 472 }
495 473
@@ -513,9 +491,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
513 */ 491 */
514 if (length < copybreak) { 492 if (length < copybreak) {
515 struct sk_buff *new_skb = 493 struct sk_buff *new_skb =
516 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 494 netdev_alloc_skb_ip_align(netdev, length);
517 if (new_skb) { 495 if (new_skb) {
518 skb_reserve(new_skb, NET_IP_ALIGN);
519 skb_copy_to_linear_data_offset(new_skb, 496 skb_copy_to_linear_data_offset(new_skb,
520 -NET_IP_ALIGN, 497 -NET_IP_ALIGN,
521 (skb->data - 498 (skb->data -
@@ -560,33 +537,52 @@ next_desc:
560 537
561 adapter->total_rx_bytes += total_rx_bytes; 538 adapter->total_rx_bytes += total_rx_bytes;
562 adapter->total_rx_packets += total_rx_packets; 539 adapter->total_rx_packets += total_rx_packets;
563 adapter->net_stats.rx_bytes += total_rx_bytes; 540 netdev->stats.rx_bytes += total_rx_bytes;
564 adapter->net_stats.rx_packets += total_rx_packets; 541 netdev->stats.rx_packets += total_rx_packets;
565 return cleaned; 542 return cleaned;
566} 543}
567 544
568static void e1000_put_txbuf(struct e1000_adapter *adapter, 545static void e1000_put_txbuf(struct e1000_adapter *adapter,
569 struct e1000_buffer *buffer_info) 546 struct e1000_buffer *buffer_info)
570{ 547{
571 buffer_info->dma = 0; 548 if (buffer_info->dma) {
549 if (buffer_info->mapped_as_page)
550 pci_unmap_page(adapter->pdev, buffer_info->dma,
551 buffer_info->length, PCI_DMA_TODEVICE);
552 else
553 pci_unmap_single(adapter->pdev, buffer_info->dma,
554 buffer_info->length,
555 PCI_DMA_TODEVICE);
556 buffer_info->dma = 0;
557 }
572 if (buffer_info->skb) { 558 if (buffer_info->skb) {
573 skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
574 DMA_TO_DEVICE);
575 dev_kfree_skb_any(buffer_info->skb); 559 dev_kfree_skb_any(buffer_info->skb);
576 buffer_info->skb = NULL; 560 buffer_info->skb = NULL;
577 } 561 }
578 buffer_info->time_stamp = 0; 562 buffer_info->time_stamp = 0;
579} 563}
580 564
581static void e1000_print_tx_hang(struct e1000_adapter *adapter) 565static void e1000_print_hw_hang(struct work_struct *work)
582{ 566{
567 struct e1000_adapter *adapter = container_of(work,
568 struct e1000_adapter,
569 print_hang_task);
583 struct e1000_ring *tx_ring = adapter->tx_ring; 570 struct e1000_ring *tx_ring = adapter->tx_ring;
584 unsigned int i = tx_ring->next_to_clean; 571 unsigned int i = tx_ring->next_to_clean;
585 unsigned int eop = tx_ring->buffer_info[i].next_to_watch; 572 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
586 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); 573 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
574 struct e1000_hw *hw = &adapter->hw;
575 u16 phy_status, phy_1000t_status, phy_ext_status;
576 u16 pci_status;
577
578 e1e_rphy(hw, PHY_STATUS, &phy_status);
579 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
580 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
587 581
588 /* detected Tx unit hang */ 582 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
589 e_err("Detected Tx Unit Hang:\n" 583
584 /* detected Hardware unit hang */
585 e_err("Detected Hardware Unit Hang:\n"
590 " TDH <%x>\n" 586 " TDH <%x>\n"
591 " TDT <%x>\n" 587 " TDT <%x>\n"
592 " next_to_use <%x>\n" 588 " next_to_use <%x>\n"
@@ -595,7 +591,12 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter)
595 " time_stamp <%lx>\n" 591 " time_stamp <%lx>\n"
596 " next_to_watch <%x>\n" 592 " next_to_watch <%x>\n"
597 " jiffies <%lx>\n" 593 " jiffies <%lx>\n"
598 " next_to_watch.status <%x>\n", 594 " next_to_watch.status <%x>\n"
595 "MAC Status <%x>\n"
596 "PHY Status <%x>\n"
597 "PHY 1000BASE-T Status <%x>\n"
598 "PHY Extended Status <%x>\n"
599 "PCI Status <%x>\n",
599 readl(adapter->hw.hw_addr + tx_ring->head), 600 readl(adapter->hw.hw_addr + tx_ring->head),
600 readl(adapter->hw.hw_addr + tx_ring->tail), 601 readl(adapter->hw.hw_addr + tx_ring->tail),
601 tx_ring->next_to_use, 602 tx_ring->next_to_use,
@@ -603,7 +604,12 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter)
603 tx_ring->buffer_info[eop].time_stamp, 604 tx_ring->buffer_info[eop].time_stamp,
604 eop, 605 eop,
605 jiffies, 606 jiffies,
606 eop_desc->upper.fields.status); 607 eop_desc->upper.fields.status,
608 er32(STATUS),
609 phy_status,
610 phy_1000t_status,
611 phy_ext_status,
612 pci_status);
607} 613}
608 614
609/** 615/**
@@ -655,6 +661,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
655 i = 0; 661 i = 0;
656 } 662 }
657 663
664 if (i == tx_ring->next_to_use)
665 break;
658 eop = tx_ring->buffer_info[i].next_to_watch; 666 eop = tx_ring->buffer_info[i].next_to_watch;
659 eop_desc = E1000_TX_DESC(*tx_ring, eop); 667 eop_desc = E1000_TX_DESC(*tx_ring, eop);
660 } 668 }
@@ -677,21 +685,23 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
677 } 685 }
678 686
679 if (adapter->detect_tx_hung) { 687 if (adapter->detect_tx_hung) {
680 /* Detect a transmit hang in hardware, this serializes the 688 /*
681 * check with the clearing of time_stamp and movement of i */ 689 * Detect a transmit hang in hardware, this serializes the
690 * check with the clearing of time_stamp and movement of i
691 */
682 adapter->detect_tx_hung = 0; 692 adapter->detect_tx_hung = 0;
683 if (tx_ring->buffer_info[i].time_stamp && 693 if (tx_ring->buffer_info[i].time_stamp &&
684 time_after(jiffies, tx_ring->buffer_info[i].time_stamp 694 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
685 + (adapter->tx_timeout_factor * HZ)) 695 + (adapter->tx_timeout_factor * HZ)) &&
686 && !(er32(STATUS) & E1000_STATUS_TXOFF)) { 696 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
687 e1000_print_tx_hang(adapter); 697 schedule_work(&adapter->print_hang_task);
688 netif_stop_queue(netdev); 698 netif_stop_queue(netdev);
689 } 699 }
690 } 700 }
691 adapter->total_tx_bytes += total_tx_bytes; 701 adapter->total_tx_bytes += total_tx_bytes;
692 adapter->total_tx_packets += total_tx_packets; 702 adapter->total_tx_packets += total_tx_packets;
693 adapter->net_stats.tx_bytes += total_tx_bytes; 703 netdev->stats.tx_bytes += total_tx_bytes;
694 adapter->net_stats.tx_packets += total_tx_packets; 704 netdev->stats.tx_packets += total_tx_packets;
695 return (count < tx_ring->count); 705 return (count < tx_ring->count);
696} 706}
697 707
@@ -705,6 +715,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
705static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 715static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
706 int *work_done, int work_to_do) 716 int *work_done, int work_to_do)
707{ 717{
718 struct e1000_hw *hw = &adapter->hw;
708 union e1000_rx_desc_packet_split *rx_desc, *next_rxd; 719 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
709 struct net_device *netdev = adapter->netdev; 720 struct net_device *netdev = adapter->netdev;
710 struct pci_dev *pdev = adapter->pdev; 721 struct pci_dev *pdev = adapter->pdev;
@@ -747,10 +758,16 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
747 PCI_DMA_FROMDEVICE); 758 PCI_DMA_FROMDEVICE);
748 buffer_info->dma = 0; 759 buffer_info->dma = 0;
749 760
750 if (!(staterr & E1000_RXD_STAT_EOP)) { 761 /* see !EOP comment in other rx routine */
751 e_dbg("%s: Packet Split buffers didn't pick up the " 762 if (!(staterr & E1000_RXD_STAT_EOP))
752 "full packet\n", netdev->name); 763 adapter->flags2 |= FLAG2_IS_DISCARDING;
764
765 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
766 e_dbg("Packet Split buffers didn't pick up the full "
767 "packet\n");
753 dev_kfree_skb_irq(skb); 768 dev_kfree_skb_irq(skb);
769 if (staterr & E1000_RXD_STAT_EOP)
770 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
754 goto next_desc; 771 goto next_desc;
755 } 772 }
756 773
@@ -762,8 +779,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
762 length = le16_to_cpu(rx_desc->wb.middle.length0); 779 length = le16_to_cpu(rx_desc->wb.middle.length0);
763 780
764 if (!length) { 781 if (!length) {
765 e_dbg("%s: Last part of the packet spanning multiple " 782 e_dbg("Last part of the packet spanning multiple "
766 "descriptors\n", netdev->name); 783 "descriptors\n");
767 dev_kfree_skb_irq(skb); 784 dev_kfree_skb_irq(skb);
768 goto next_desc; 785 goto next_desc;
769 } 786 }
@@ -871,8 +888,8 @@ next_desc:
871 888
872 adapter->total_rx_bytes += total_rx_bytes; 889 adapter->total_rx_bytes += total_rx_bytes;
873 adapter->total_rx_packets += total_rx_packets; 890 adapter->total_rx_packets += total_rx_packets;
874 adapter->net_stats.rx_bytes += total_rx_bytes; 891 netdev->stats.rx_bytes += total_rx_bytes;
875 adapter->net_stats.rx_packets += total_rx_packets; 892 netdev->stats.rx_packets += total_rx_packets;
876 return cleaned; 893 return cleaned;
877} 894}
878 895
@@ -1051,8 +1068,8 @@ next_desc:
1051 1068
1052 adapter->total_rx_bytes += total_rx_bytes; 1069 adapter->total_rx_bytes += total_rx_bytes;
1053 adapter->total_rx_packets += total_rx_packets; 1070 adapter->total_rx_packets += total_rx_packets;
1054 adapter->net_stats.rx_bytes += total_rx_bytes; 1071 netdev->stats.rx_bytes += total_rx_bytes;
1055 adapter->net_stats.rx_packets += total_rx_packets; 1072 netdev->stats.rx_packets += total_rx_packets;
1056 return cleaned; 1073 return cleaned;
1057} 1074}
1058 1075
@@ -1120,6 +1137,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1120 1137
1121 rx_ring->next_to_clean = 0; 1138 rx_ring->next_to_clean = 0;
1122 rx_ring->next_to_use = 0; 1139 rx_ring->next_to_use = 0;
1140 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1123 1141
1124 writel(0, adapter->hw.hw_addr + rx_ring->head); 1142 writel(0, adapter->hw.hw_addr + rx_ring->head);
1125 writel(0, adapter->hw.hw_addr + rx_ring->tail); 1143 writel(0, adapter->hw.hw_addr + rx_ring->tail);
@@ -1199,7 +1217,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
1199 struct e1000_hw *hw = &adapter->hw; 1217 struct e1000_hw *hw = &adapter->hw;
1200 u32 rctl, icr = er32(ICR); 1218 u32 rctl, icr = er32(ICR);
1201 1219
1202 if (!icr) 1220 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1203 return IRQ_NONE; /* Not our interrupt */ 1221 return IRQ_NONE; /* Not our interrupt */
1204 1222
1205 /* 1223 /*
@@ -1481,7 +1499,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
1481 else 1499 else
1482 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); 1500 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1483 err = request_irq(adapter->msix_entries[vector].vector, 1501 err = request_irq(adapter->msix_entries[vector].vector,
1484 &e1000_intr_msix_rx, 0, adapter->rx_ring->name, 1502 e1000_intr_msix_rx, 0, adapter->rx_ring->name,
1485 netdev); 1503 netdev);
1486 if (err) 1504 if (err)
1487 goto out; 1505 goto out;
@@ -1494,7 +1512,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
1494 else 1512 else
1495 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); 1513 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1496 err = request_irq(adapter->msix_entries[vector].vector, 1514 err = request_irq(adapter->msix_entries[vector].vector,
1497 &e1000_intr_msix_tx, 0, adapter->tx_ring->name, 1515 e1000_intr_msix_tx, 0, adapter->tx_ring->name,
1498 netdev); 1516 netdev);
1499 if (err) 1517 if (err)
1500 goto out; 1518 goto out;
@@ -1503,7 +1521,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
1503 vector++; 1521 vector++;
1504 1522
1505 err = request_irq(adapter->msix_entries[vector].vector, 1523 err = request_irq(adapter->msix_entries[vector].vector,
1506 &e1000_msix_other, 0, netdev->name, netdev); 1524 e1000_msix_other, 0, netdev->name, netdev);
1507 if (err) 1525 if (err)
1508 goto out; 1526 goto out;
1509 1527
@@ -1534,7 +1552,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
1534 e1000e_set_interrupt_capability(adapter); 1552 e1000e_set_interrupt_capability(adapter);
1535 } 1553 }
1536 if (adapter->flags & FLAG_MSI_ENABLED) { 1554 if (adapter->flags & FLAG_MSI_ENABLED) {
1537 err = request_irq(adapter->pdev->irq, &e1000_intr_msi, 0, 1555 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
1538 netdev->name, netdev); 1556 netdev->name, netdev);
1539 if (!err) 1557 if (!err)
1540 return err; 1558 return err;
@@ -1544,7 +1562,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
1544 adapter->int_mode = E1000E_INT_MODE_LEGACY; 1562 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1545 } 1563 }
1546 1564
1547 err = request_irq(adapter->pdev->irq, &e1000_intr, IRQF_SHARED, 1565 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
1548 netdev->name, netdev); 1566 netdev->name, netdev);
1549 if (err) 1567 if (err)
1550 e_err("Unable to allocate interrupt, Error: %d\n", err); 1568 e_err("Unable to allocate interrupt, Error: %d\n", err);
@@ -2040,11 +2058,14 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2040 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2058 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2041 (vid == adapter->mng_vlan_id)) 2059 (vid == adapter->mng_vlan_id))
2042 return; 2060 return;
2061
2043 /* add VID to filter table */ 2062 /* add VID to filter table */
2044 index = (vid >> 5) & 0x7F; 2063 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2045 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); 2064 index = (vid >> 5) & 0x7F;
2046 vfta |= (1 << (vid & 0x1F)); 2065 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2047 e1000e_write_vfta(hw, index, vfta); 2066 vfta |= (1 << (vid & 0x1F));
2067 hw->mac.ops.write_vfta(hw, index, vfta);
2068 }
2048} 2069}
2049 2070
2050static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 2071static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -2069,10 +2090,12 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2069 } 2090 }
2070 2091
2071 /* remove VID from filter table */ 2092 /* remove VID from filter table */
2072 index = (vid >> 5) & 0x7F; 2093 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2073 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); 2094 index = (vid >> 5) & 0x7F;
2074 vfta &= ~(1 << (vid & 0x1F)); 2095 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2075 e1000e_write_vfta(hw, index, vfta); 2096 vfta &= ~(1 << (vid & 0x1F));
2097 hw->mac.ops.write_vfta(hw, index, vfta);
2098 }
2076} 2099}
2077 2100
2078static void e1000_update_mng_vlan(struct e1000_adapter *adapter) 2101static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
@@ -2269,8 +2292,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2269 ew32(TCTL, tctl); 2292 ew32(TCTL, tctl);
2270 2293
2271 e1000e_config_collision_dist(hw); 2294 e1000e_config_collision_dist(hw);
2272
2273 adapter->tx_queue_len = adapter->netdev->tx_queue_len;
2274} 2295}
2275 2296
2276/** 2297/**
@@ -2330,18 +2351,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2330 rctl &= ~E1000_RCTL_SZ_4096; 2351 rctl &= ~E1000_RCTL_SZ_4096;
2331 rctl |= E1000_RCTL_BSEX; 2352 rctl |= E1000_RCTL_BSEX;
2332 switch (adapter->rx_buffer_len) { 2353 switch (adapter->rx_buffer_len) {
2333 case 256:
2334 rctl |= E1000_RCTL_SZ_256;
2335 rctl &= ~E1000_RCTL_BSEX;
2336 break;
2337 case 512:
2338 rctl |= E1000_RCTL_SZ_512;
2339 rctl &= ~E1000_RCTL_BSEX;
2340 break;
2341 case 1024:
2342 rctl |= E1000_RCTL_SZ_1024;
2343 rctl &= ~E1000_RCTL_BSEX;
2344 break;
2345 case 2048: 2354 case 2048:
2346 default: 2355 default:
2347 rctl |= E1000_RCTL_SZ_2048; 2356 rctl |= E1000_RCTL_SZ_2048;
@@ -2464,8 +2473,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2464 ew32(ITR, 1000000000 / (adapter->itr * 256)); 2473 ew32(ITR, 1000000000 / (adapter->itr * 256));
2465 2474
2466 ctrl_ext = er32(CTRL_EXT); 2475 ctrl_ext = er32(CTRL_EXT);
2467 /* Reset delay timers after every interrupt */
2468 ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
2469 /* Auto-Mask interrupts upon ICR access */ 2476 /* Auto-Mask interrupts upon ICR access */
2470 ctrl_ext |= E1000_CTRL_EXT_IAME; 2477 ctrl_ext |= E1000_CTRL_EXT_IAME;
2471 ew32(IAM, 0xffffffff); 2478 ew32(IAM, 0xffffffff);
@@ -2507,21 +2514,23 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2507 * packet size is equal or larger than the specified value (in 8 byte 2514 * packet size is equal or larger than the specified value (in 8 byte
2508 * units), e.g. using jumbo frames when setting to E1000_ERT_2048 2515 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
2509 */ 2516 */
2510 if ((adapter->flags & FLAG_HAS_ERT) && 2517 if (adapter->flags & FLAG_HAS_ERT) {
2511 (adapter->netdev->mtu > ETH_DATA_LEN)) { 2518 if (adapter->netdev->mtu > ETH_DATA_LEN) {
2512 u32 rxdctl = er32(RXDCTL(0)); 2519 u32 rxdctl = er32(RXDCTL(0));
2513 ew32(RXDCTL(0), rxdctl | 0x3); 2520 ew32(RXDCTL(0), rxdctl | 0x3);
2514 ew32(ERT, E1000_ERT_2048 | (1 << 13)); 2521 ew32(ERT, E1000_ERT_2048 | (1 << 13));
2515 /* 2522 /*
2516 * With jumbo frames and early-receive enabled, excessive 2523 * With jumbo frames and early-receive enabled,
2517 * C4->C2 latencies result in dropped transactions. 2524 * excessive C-state transition latencies result in
2518 */ 2525 * dropped transactions.
2519 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, 2526 */
2520 e1000e_driver_name, 55); 2527 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
2521 } else { 2528 adapter->netdev->name, 55);
2522 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, 2529 } else {
2523 e1000e_driver_name, 2530 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
2524 PM_QOS_DEFAULT_VALUE); 2531 adapter->netdev->name,
2532 PM_QOS_DEFAULT_VALUE);
2533 }
2525 } 2534 }
2526 2535
2527 /* Enable Receives */ 2536 /* Enable Receives */
@@ -2533,22 +2542,14 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2533 * @hw: pointer to the HW structure 2542 * @hw: pointer to the HW structure
2534 * @mc_addr_list: array of multicast addresses to program 2543 * @mc_addr_list: array of multicast addresses to program
2535 * @mc_addr_count: number of multicast addresses to program 2544 * @mc_addr_count: number of multicast addresses to program
2536 * @rar_used_count: the first RAR register free to program
2537 * @rar_count: total number of supported Receive Address Registers
2538 * 2545 *
2539 * Updates the Receive Address Registers and Multicast Table Array. 2546 * Updates the Multicast Table Array.
2540 * The caller must have a packed mc_addr_list of multicast addresses. 2547 * The caller must have a packed mc_addr_list of multicast addresses.
2541 * The parameter rar_count will usually be hw->mac.rar_entry_count
2542 * unless there are workarounds that change this. Currently no func pointer
2543 * exists and all implementations are handled in the generic version of this
2544 * function.
2545 **/ 2548 **/
2546static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, 2549static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
2547 u32 mc_addr_count, u32 rar_used_count, 2550 u32 mc_addr_count)
2548 u32 rar_count)
2549{ 2551{
2550 hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count, 2552 hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
2551 rar_used_count, rar_count);
2552} 2553}
2553 2554
2554/** 2555/**
@@ -2564,7 +2565,6 @@ static void e1000_set_multi(struct net_device *netdev)
2564{ 2565{
2565 struct e1000_adapter *adapter = netdev_priv(netdev); 2566 struct e1000_adapter *adapter = netdev_priv(netdev);
2566 struct e1000_hw *hw = &adapter->hw; 2567 struct e1000_hw *hw = &adapter->hw;
2567 struct e1000_mac_info *mac = &hw->mac;
2568 struct dev_mc_list *mc_ptr; 2568 struct dev_mc_list *mc_ptr;
2569 u8 *mta_list; 2569 u8 *mta_list;
2570 u32 rctl; 2570 u32 rctl;
@@ -2590,31 +2590,25 @@ static void e1000_set_multi(struct net_device *netdev)
2590 2590
2591 ew32(RCTL, rctl); 2591 ew32(RCTL, rctl);
2592 2592
2593 if (netdev->mc_count) { 2593 if (!netdev_mc_empty(netdev)) {
2594 mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC); 2594 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
2595 if (!mta_list) 2595 if (!mta_list)
2596 return; 2596 return;
2597 2597
2598 /* prepare a packed array of only addresses. */ 2598 /* prepare a packed array of only addresses. */
2599 mc_ptr = netdev->mc_list; 2599 i = 0;
2600 2600 netdev_for_each_mc_addr(mc_ptr, netdev)
2601 for (i = 0; i < netdev->mc_count; i++) { 2601 memcpy(mta_list + (i++ * ETH_ALEN),
2602 if (!mc_ptr) 2602 mc_ptr->dmi_addr, ETH_ALEN);
2603 break;
2604 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr,
2605 ETH_ALEN);
2606 mc_ptr = mc_ptr->next;
2607 }
2608 2603
2609 e1000_update_mc_addr_list(hw, mta_list, i, 1, 2604 e1000_update_mc_addr_list(hw, mta_list, i);
2610 mac->rar_entry_count);
2611 kfree(mta_list); 2605 kfree(mta_list);
2612 } else { 2606 } else {
2613 /* 2607 /*
2614 * if we're called from probe, we might not have 2608 * if we're called from probe, we might not have
2615 * anything to do here, so clear out the list 2609 * anything to do here, so clear out the list
2616 */ 2610 */
2617 e1000_update_mc_addr_list(hw, NULL, 0, 1, mac->rar_entry_count); 2611 e1000_update_mc_addr_list(hw, NULL, 0);
2618 } 2612 }
2619} 2613}
2620 2614
@@ -2645,18 +2639,8 @@ static void e1000_configure(struct e1000_adapter *adapter)
2645 **/ 2639 **/
2646void e1000e_power_up_phy(struct e1000_adapter *adapter) 2640void e1000e_power_up_phy(struct e1000_adapter *adapter)
2647{ 2641{
2648 u16 mii_reg = 0; 2642 if (adapter->hw.phy.ops.power_up)
2649 2643 adapter->hw.phy.ops.power_up(&adapter->hw);
2650 /* Just clear the power down bit to wake the phy back up */
2651 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
2652 /*
2653 * According to the manual, the phy will retain its
2654 * settings across a power-down/up cycle
2655 */
2656 e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg);
2657 mii_reg &= ~MII_CR_POWER_DOWN;
2658 e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg);
2659 }
2660 2644
2661 adapter->hw.mac.ops.setup_link(&adapter->hw); 2645 adapter->hw.mac.ops.setup_link(&adapter->hw);
2662} 2646}
@@ -2664,35 +2648,17 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter)
2664/** 2648/**
2665 * e1000_power_down_phy - Power down the PHY 2649 * e1000_power_down_phy - Power down the PHY
2666 * 2650 *
2667 * Power down the PHY so no link is implied when interface is down 2651 * Power down the PHY so no link is implied when interface is down.
2668 * The PHY cannot be powered down is management or WoL is active 2652 * The PHY cannot be powered down if management or WoL is active.
2669 */ 2653 */
2670static void e1000_power_down_phy(struct e1000_adapter *adapter) 2654static void e1000_power_down_phy(struct e1000_adapter *adapter)
2671{ 2655{
2672 struct e1000_hw *hw = &adapter->hw;
2673 u16 mii_reg;
2674
2675 /* WoL is enabled */ 2656 /* WoL is enabled */
2676 if (adapter->wol) 2657 if (adapter->wol)
2677 return; 2658 return;
2678 2659
2679 /* non-copper PHY? */ 2660 if (adapter->hw.phy.ops.power_down)
2680 if (adapter->hw.phy.media_type != e1000_media_type_copper) 2661 adapter->hw.phy.ops.power_down(&adapter->hw);
2681 return;
2682
2683 /* reset is blocked because of a SoL/IDER session */
2684 if (e1000e_check_mng_mode(hw) || e1000_check_reset_block(hw))
2685 return;
2686
2687 /* manageability (AMT) is enabled */
2688 if (er32(MANC) & E1000_MANC_SMBUS_EN)
2689 return;
2690
2691 /* power down the PHY */
2692 e1e_rphy(hw, PHY_CONTROL, &mii_reg);
2693 mii_reg |= MII_CR_POWER_DOWN;
2694 e1e_wphy(hw, PHY_CONTROL, mii_reg);
2695 mdelay(1);
2696} 2662}
2697 2663
2698/** 2664/**
@@ -2856,6 +2822,12 @@ int e1000e_up(struct e1000_adapter *adapter)
2856{ 2822{
2857 struct e1000_hw *hw = &adapter->hw; 2823 struct e1000_hw *hw = &adapter->hw;
2858 2824
2825 /* DMA latency requirement to workaround early-receive/jumbo issue */
2826 if (adapter->flags & FLAG_HAS_ERT)
2827 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY,
2828 adapter->netdev->name,
2829 PM_QOS_DEFAULT_VALUE);
2830
2859 /* hardware has been reset, we need to reload some things */ 2831 /* hardware has been reset, we need to reload some things */
2860 e1000_configure(adapter); 2832 e1000_configure(adapter);
2861 2833
@@ -2906,7 +2878,6 @@ void e1000e_down(struct e1000_adapter *adapter)
2906 del_timer_sync(&adapter->watchdog_timer); 2878 del_timer_sync(&adapter->watchdog_timer);
2907 del_timer_sync(&adapter->phy_info_timer); 2879 del_timer_sync(&adapter->phy_info_timer);
2908 2880
2909 netdev->tx_queue_len = adapter->tx_queue_len;
2910 netif_carrier_off(netdev); 2881 netif_carrier_off(netdev);
2911 adapter->link_speed = 0; 2882 adapter->link_speed = 0;
2912 adapter->link_duplex = 0; 2883 adapter->link_duplex = 0;
@@ -2916,6 +2887,10 @@ void e1000e_down(struct e1000_adapter *adapter)
2916 e1000_clean_tx_ring(adapter); 2887 e1000_clean_tx_ring(adapter);
2917 e1000_clean_rx_ring(adapter); 2888 e1000_clean_rx_ring(adapter);
2918 2889
2890 if (adapter->flags & FLAG_HAS_ERT)
2891 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY,
2892 adapter->netdev->name);
2893
2919 /* 2894 /*
2920 * TODO: for power management, we could drop the link and 2895 * TODO: for power management, we could drop the link and
2921 * pci_disable_device here. 2896 * pci_disable_device here.
@@ -2973,7 +2948,7 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data)
2973 struct e1000_hw *hw = &adapter->hw; 2948 struct e1000_hw *hw = &adapter->hw;
2974 u32 icr = er32(ICR); 2949 u32 icr = er32(ICR);
2975 2950
2976 e_dbg("%s: icr is %08X\n", netdev->name, icr); 2951 e_dbg("icr is %08X\n", icr);
2977 if (icr & E1000_ICR_RXSEQ) { 2952 if (icr & E1000_ICR_RXSEQ) {
2978 adapter->flags &= ~FLAG_MSI_TEST_FAILED; 2953 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
2979 wmb(); 2954 wmb();
@@ -3010,7 +2985,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3010 if (err) 2985 if (err)
3011 goto msi_test_failed; 2986 goto msi_test_failed;
3012 2987
3013 err = request_irq(adapter->pdev->irq, &e1000_intr_msi_test, 0, 2988 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
3014 netdev->name, netdev); 2989 netdev->name, netdev);
3015 if (err) { 2990 if (err) {
3016 pci_disable_msi(adapter->pdev); 2991 pci_disable_msi(adapter->pdev);
@@ -3043,7 +3018,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3043 goto msi_test_failed; 3018 goto msi_test_failed;
3044 3019
3045 /* okay so the test worked, restore settings */ 3020 /* okay so the test worked, restore settings */
3046 e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name); 3021 e_dbg("MSI interrupt test succeeded!\n");
3047msi_test_failed: 3022msi_test_failed:
3048 e1000e_set_interrupt_capability(adapter); 3023 e1000e_set_interrupt_capability(adapter);
3049 e1000_request_irq(adapter); 3024 e1000_request_irq(adapter);
@@ -3304,6 +3279,7 @@ static void e1000_update_phy_info(unsigned long data)
3304 **/ 3279 **/
3305void e1000e_update_stats(struct e1000_adapter *adapter) 3280void e1000e_update_stats(struct e1000_adapter *adapter)
3306{ 3281{
3282 struct net_device *netdev = adapter->netdev;
3307 struct e1000_hw *hw = &adapter->hw; 3283 struct e1000_hw *hw = &adapter->hw;
3308 struct pci_dev *pdev = adapter->pdev; 3284 struct pci_dev *pdev = adapter->pdev;
3309 u16 phy_data; 3285 u16 phy_data;
@@ -3329,24 +3305,24 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3329 if ((hw->phy.type == e1000_phy_82578) || 3305 if ((hw->phy.type == e1000_phy_82578) ||
3330 (hw->phy.type == e1000_phy_82577)) { 3306 (hw->phy.type == e1000_phy_82577)) {
3331 e1e_rphy(hw, HV_SCC_UPPER, &phy_data); 3307 e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
3332 e1e_rphy(hw, HV_SCC_LOWER, &phy_data); 3308 if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data))
3333 adapter->stats.scc += phy_data; 3309 adapter->stats.scc += phy_data;
3334 3310
3335 e1e_rphy(hw, HV_ECOL_UPPER, &phy_data); 3311 e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
3336 e1e_rphy(hw, HV_ECOL_LOWER, &phy_data); 3312 if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data))
3337 adapter->stats.ecol += phy_data; 3313 adapter->stats.ecol += phy_data;
3338 3314
3339 e1e_rphy(hw, HV_MCC_UPPER, &phy_data); 3315 e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
3340 e1e_rphy(hw, HV_MCC_LOWER, &phy_data); 3316 if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data))
3341 adapter->stats.mcc += phy_data; 3317 adapter->stats.mcc += phy_data;
3342 3318
3343 e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data); 3319 e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
3344 e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data); 3320 if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data))
3345 adapter->stats.latecol += phy_data; 3321 adapter->stats.latecol += phy_data;
3346 3322
3347 e1e_rphy(hw, HV_DC_UPPER, &phy_data); 3323 e1e_rphy(hw, HV_DC_UPPER, &phy_data);
3348 e1e_rphy(hw, HV_DC_LOWER, &phy_data); 3324 if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data))
3349 adapter->stats.dc += phy_data; 3325 adapter->stats.dc += phy_data;
3350 } else { 3326 } else {
3351 adapter->stats.scc += er32(SCC); 3327 adapter->stats.scc += er32(SCC);
3352 adapter->stats.ecol += er32(ECOL); 3328 adapter->stats.ecol += er32(ECOL);
@@ -3374,8 +3350,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3374 if ((hw->phy.type == e1000_phy_82578) || 3350 if ((hw->phy.type == e1000_phy_82578) ||
3375 (hw->phy.type == e1000_phy_82577)) { 3351 (hw->phy.type == e1000_phy_82577)) {
3376 e1e_rphy(hw, HV_COLC_UPPER, &phy_data); 3352 e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
3377 e1e_rphy(hw, HV_COLC_LOWER, &phy_data); 3353 if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data))
3378 hw->mac.collision_delta = phy_data; 3354 hw->mac.collision_delta = phy_data;
3379 } else { 3355 } else {
3380 hw->mac.collision_delta = er32(COLC); 3356 hw->mac.collision_delta = er32(COLC);
3381 } 3357 }
@@ -3386,8 +3362,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3386 if ((hw->phy.type == e1000_phy_82578) || 3362 if ((hw->phy.type == e1000_phy_82578) ||
3387 (hw->phy.type == e1000_phy_82577)) { 3363 (hw->phy.type == e1000_phy_82577)) {
3388 e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data); 3364 e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
3389 e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data); 3365 if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data))
3390 adapter->stats.tncrs += phy_data; 3366 adapter->stats.tncrs += phy_data;
3391 } else { 3367 } else {
3392 if ((hw->mac.type != e1000_82574) && 3368 if ((hw->mac.type != e1000_82574) &&
3393 (hw->mac.type != e1000_82583)) 3369 (hw->mac.type != e1000_82583))
@@ -3398,8 +3374,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3398 adapter->stats.tsctfc += er32(TSCTFC); 3374 adapter->stats.tsctfc += er32(TSCTFC);
3399 3375
3400 /* Fill out the OS statistics structure */ 3376 /* Fill out the OS statistics structure */
3401 adapter->net_stats.multicast = adapter->stats.mprc; 3377 netdev->stats.multicast = adapter->stats.mprc;
3402 adapter->net_stats.collisions = adapter->stats.colc; 3378 netdev->stats.collisions = adapter->stats.colc;
3403 3379
3404 /* Rx Errors */ 3380 /* Rx Errors */
3405 3381
@@ -3407,22 +3383,22 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3407 * RLEC on some newer hardware can be incorrect so build 3383 * RLEC on some newer hardware can be incorrect so build
3408 * our own version based on RUC and ROC 3384 * our own version based on RUC and ROC
3409 */ 3385 */
3410 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 3386 netdev->stats.rx_errors = adapter->stats.rxerrc +
3411 adapter->stats.crcerrs + adapter->stats.algnerrc + 3387 adapter->stats.crcerrs + adapter->stats.algnerrc +
3412 adapter->stats.ruc + adapter->stats.roc + 3388 adapter->stats.ruc + adapter->stats.roc +
3413 adapter->stats.cexterr; 3389 adapter->stats.cexterr;
3414 adapter->net_stats.rx_length_errors = adapter->stats.ruc + 3390 netdev->stats.rx_length_errors = adapter->stats.ruc +
3415 adapter->stats.roc; 3391 adapter->stats.roc;
3416 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 3392 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3417 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; 3393 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3418 adapter->net_stats.rx_missed_errors = adapter->stats.mpc; 3394 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3419 3395
3420 /* Tx Errors */ 3396 /* Tx Errors */
3421 adapter->net_stats.tx_errors = adapter->stats.ecol + 3397 netdev->stats.tx_errors = adapter->stats.ecol +
3422 adapter->stats.latecol; 3398 adapter->stats.latecol;
3423 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; 3399 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3424 adapter->net_stats.tx_window_errors = adapter->stats.latecol; 3400 netdev->stats.tx_window_errors = adapter->stats.latecol;
3425 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; 3401 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3426 3402
3427 /* Tx Dropped needs to be maintained elsewhere */ 3403 /* Tx Dropped needs to be maintained elsewhere */
3428 3404
@@ -3491,7 +3467,7 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
3491 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); 3467 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
3492} 3468}
3493 3469
3494bool e1000_has_link(struct e1000_adapter *adapter) 3470bool e1000e_has_link(struct e1000_adapter *adapter)
3495{ 3471{
3496 struct e1000_hw *hw = &adapter->hw; 3472 struct e1000_hw *hw = &adapter->hw;
3497 bool link_active = 0; 3473 bool link_active = 0;
@@ -3572,7 +3548,7 @@ static void e1000_watchdog_task(struct work_struct *work)
3572 u32 link, tctl; 3548 u32 link, tctl;
3573 int tx_pending = 0; 3549 int tx_pending = 0;
3574 3550
3575 link = e1000_has_link(adapter); 3551 link = e1000e_has_link(adapter);
3576 if ((netif_carrier_ok(netdev)) && link) { 3552 if ((netif_carrier_ok(netdev)) && link) {
3577 e1000e_enable_receives(adapter); 3553 e1000e_enable_receives(adapter);
3578 goto link_up; 3554 goto link_up;
@@ -3612,21 +3588,15 @@ static void e1000_watchdog_task(struct work_struct *work)
3612 "link gets many collisions.\n"); 3588 "link gets many collisions.\n");
3613 } 3589 }
3614 3590
3615 /* 3591 /* adjust timeout factor according to speed/duplex */
3616 * tweak tx_queue_len according to speed/duplex
3617 * and adjust the timeout factor
3618 */
3619 netdev->tx_queue_len = adapter->tx_queue_len;
3620 adapter->tx_timeout_factor = 1; 3592 adapter->tx_timeout_factor = 1;
3621 switch (adapter->link_speed) { 3593 switch (adapter->link_speed) {
3622 case SPEED_10: 3594 case SPEED_10:
3623 txb2b = 0; 3595 txb2b = 0;
3624 netdev->tx_queue_len = 10;
3625 adapter->tx_timeout_factor = 16; 3596 adapter->tx_timeout_factor = 16;
3626 break; 3597 break;
3627 case SPEED_100: 3598 case SPEED_100:
3628 txb2b = 0; 3599 txb2b = 0;
3629 netdev->tx_queue_len = 100;
3630 adapter->tx_timeout_factor = 10; 3600 adapter->tx_timeout_factor = 10;
3631 break; 3601 break;
3632 } 3602 }
@@ -3776,68 +3746,64 @@ static int e1000_tso(struct e1000_adapter *adapter,
3776 u8 ipcss, ipcso, tucss, tucso, hdr_len; 3746 u8 ipcss, ipcso, tucss, tucso, hdr_len;
3777 int err; 3747 int err;
3778 3748
3779 if (skb_is_gso(skb)) { 3749 if (!skb_is_gso(skb))
3780 if (skb_header_cloned(skb)) { 3750 return 0;
3781 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3782 if (err)
3783 return err;
3784 }
3785 3751
3786 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 3752 if (skb_header_cloned(skb)) {
3787 mss = skb_shinfo(skb)->gso_size; 3753 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3788 if (skb->protocol == htons(ETH_P_IP)) { 3754 if (err)
3789 struct iphdr *iph = ip_hdr(skb); 3755 return err;
3790 iph->tot_len = 0; 3756 }
3791 iph->check = 0;
3792 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3793 iph->daddr, 0,
3794 IPPROTO_TCP,
3795 0);
3796 cmd_length = E1000_TXD_CMD_IP;
3797 ipcse = skb_transport_offset(skb) - 1;
3798 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
3799 ipv6_hdr(skb)->payload_len = 0;
3800 tcp_hdr(skb)->check =
3801 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3802 &ipv6_hdr(skb)->daddr,
3803 0, IPPROTO_TCP, 0);
3804 ipcse = 0;
3805 }
3806 ipcss = skb_network_offset(skb);
3807 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
3808 tucss = skb_transport_offset(skb);
3809 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
3810 tucse = 0;
3811 3757
3812 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 3758 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3813 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 3759 mss = skb_shinfo(skb)->gso_size;
3760 if (skb->protocol == htons(ETH_P_IP)) {
3761 struct iphdr *iph = ip_hdr(skb);
3762 iph->tot_len = 0;
3763 iph->check = 0;
3764 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
3765 0, IPPROTO_TCP, 0);
3766 cmd_length = E1000_TXD_CMD_IP;
3767 ipcse = skb_transport_offset(skb) - 1;
3768 } else if (skb_is_gso_v6(skb)) {
3769 ipv6_hdr(skb)->payload_len = 0;
3770 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3771 &ipv6_hdr(skb)->daddr,
3772 0, IPPROTO_TCP, 0);
3773 ipcse = 0;
3774 }
3775 ipcss = skb_network_offset(skb);
3776 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
3777 tucss = skb_transport_offset(skb);
3778 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
3779 tucse = 0;
3814 3780
3815 i = tx_ring->next_to_use; 3781 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
3816 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 3782 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
3817 buffer_info = &tx_ring->buffer_info[i];
3818 3783
3819 context_desc->lower_setup.ip_fields.ipcss = ipcss; 3784 i = tx_ring->next_to_use;
3820 context_desc->lower_setup.ip_fields.ipcso = ipcso; 3785 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
3821 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 3786 buffer_info = &tx_ring->buffer_info[i];
3822 context_desc->upper_setup.tcp_fields.tucss = tucss;
3823 context_desc->upper_setup.tcp_fields.tucso = tucso;
3824 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
3825 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
3826 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
3827 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
3828 3787
3829 buffer_info->time_stamp = jiffies; 3788 context_desc->lower_setup.ip_fields.ipcss = ipcss;
3830 buffer_info->next_to_watch = i; 3789 context_desc->lower_setup.ip_fields.ipcso = ipcso;
3790 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
3791 context_desc->upper_setup.tcp_fields.tucss = tucss;
3792 context_desc->upper_setup.tcp_fields.tucso = tucso;
3793 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
3794 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
3795 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
3796 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
3831 3797
3832 i++; 3798 buffer_info->time_stamp = jiffies;
3833 if (i == tx_ring->count) 3799 buffer_info->next_to_watch = i;
3834 i = 0;
3835 tx_ring->next_to_use = i;
3836 3800
3837 return 1; 3801 i++;
3838 } 3802 if (i == tx_ring->count)
3803 i = 0;
3804 tx_ring->next_to_use = i;
3839 3805
3840 return 0; 3806 return 1;
3841} 3807}
3842 3808
3843static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) 3809static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
@@ -3909,23 +3875,14 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3909 unsigned int mss) 3875 unsigned int mss)
3910{ 3876{
3911 struct e1000_ring *tx_ring = adapter->tx_ring; 3877 struct e1000_ring *tx_ring = adapter->tx_ring;
3878 struct pci_dev *pdev = adapter->pdev;
3912 struct e1000_buffer *buffer_info; 3879 struct e1000_buffer *buffer_info;
3913 unsigned int len = skb_headlen(skb); 3880 unsigned int len = skb_headlen(skb);
3914 unsigned int offset, size, count = 0, i; 3881 unsigned int offset = 0, size, count = 0, i;
3915 unsigned int f; 3882 unsigned int f;
3916 dma_addr_t *map;
3917 3883
3918 i = tx_ring->next_to_use; 3884 i = tx_ring->next_to_use;
3919 3885
3920 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
3921 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
3922 adapter->tx_dma_failed++;
3923 return 0;
3924 }
3925
3926 map = skb_shinfo(skb)->dma_maps;
3927 offset = 0;
3928
3929 while (len) { 3886 while (len) {
3930 buffer_info = &tx_ring->buffer_info[i]; 3887 buffer_info = &tx_ring->buffer_info[i];
3931 size = min(len, max_per_txd); 3888 size = min(len, max_per_txd);
@@ -3933,11 +3890,15 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3933 buffer_info->length = size; 3890 buffer_info->length = size;
3934 buffer_info->time_stamp = jiffies; 3891 buffer_info->time_stamp = jiffies;
3935 buffer_info->next_to_watch = i; 3892 buffer_info->next_to_watch = i;
3936 buffer_info->dma = skb_shinfo(skb)->dma_head + offset; 3893 buffer_info->dma = pci_map_single(pdev, skb->data + offset,
3937 count++; 3894 size, PCI_DMA_TODEVICE);
3895 buffer_info->mapped_as_page = false;
3896 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3897 goto dma_error;
3938 3898
3939 len -= size; 3899 len -= size;
3940 offset += size; 3900 offset += size;
3901 count++;
3941 3902
3942 if (len) { 3903 if (len) {
3943 i++; 3904 i++;
@@ -3951,7 +3912,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3951 3912
3952 frag = &skb_shinfo(skb)->frags[f]; 3913 frag = &skb_shinfo(skb)->frags[f];
3953 len = frag->size; 3914 len = frag->size;
3954 offset = 0; 3915 offset = frag->page_offset;
3955 3916
3956 while (len) { 3917 while (len) {
3957 i++; 3918 i++;
@@ -3964,7 +3925,12 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3964 buffer_info->length = size; 3925 buffer_info->length = size;
3965 buffer_info->time_stamp = jiffies; 3926 buffer_info->time_stamp = jiffies;
3966 buffer_info->next_to_watch = i; 3927 buffer_info->next_to_watch = i;
3967 buffer_info->dma = map[f] + offset; 3928 buffer_info->dma = pci_map_page(pdev, frag->page,
3929 offset, size,
3930 PCI_DMA_TODEVICE);
3931 buffer_info->mapped_as_page = true;
3932 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3933 goto dma_error;
3968 3934
3969 len -= size; 3935 len -= size;
3970 offset += size; 3936 offset += size;
@@ -3976,6 +3942,22 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3976 tx_ring->buffer_info[first].next_to_watch = i; 3942 tx_ring->buffer_info[first].next_to_watch = i;
3977 3943
3978 return count; 3944 return count;
3945
3946dma_error:
3947 dev_err(&pdev->dev, "TX DMA map failed\n");
3948 buffer_info->dma = 0;
3949 if (count)
3950 count--;
3951
3952 while (count--) {
3953 if (i==0)
3954 i += tx_ring->count;
3955 i--;
3956 buffer_info = &tx_ring->buffer_info[i];
3957 e1000_put_txbuf(adapter, buffer_info);;
3958 }
3959
3960 return 0;
3979} 3961}
3980 3962
3981static void e1000_tx_queue(struct e1000_adapter *adapter, 3963static void e1000_tx_queue(struct e1000_adapter *adapter,
@@ -4048,8 +4030,8 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
4048 u16 length, offset; 4030 u16 length, offset;
4049 4031
4050 if (vlan_tx_tag_present(skb)) { 4032 if (vlan_tx_tag_present(skb)) {
4051 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) 4033 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
4052 && (adapter->hw.mng_cookie.status & 4034 (adapter->hw.mng_cookie.status &
4053 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) 4035 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
4054 return 0; 4036 return 0;
4055 } 4037 }
@@ -4271,10 +4253,8 @@ static void e1000_reset_task(struct work_struct *work)
4271 **/ 4253 **/
4272static struct net_device_stats *e1000_get_stats(struct net_device *netdev) 4254static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
4273{ 4255{
4274 struct e1000_adapter *adapter = netdev_priv(netdev);
4275
4276 /* only return the current stats */ 4256 /* only return the current stats */
4277 return &adapter->net_stats; 4257 return &netdev->stats;
4278} 4258}
4279 4259
4280/** 4260/**
@@ -4303,6 +4283,14 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4303 return -EINVAL; 4283 return -EINVAL;
4304 } 4284 }
4305 4285
4286 /* 82573 Errata 17 */
4287 if (((adapter->hw.mac.type == e1000_82573) ||
4288 (adapter->hw.mac.type == e1000_82574)) &&
4289 (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
4290 adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
4291 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
4292 }
4293
4306 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 4294 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
4307 msleep(1); 4295 msleep(1);
4308 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ 4296 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
@@ -4321,13 +4309,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4321 * fragmented skbs 4309 * fragmented skbs
4322 */ 4310 */
4323 4311
4324 if (max_frame <= 256) 4312 if (max_frame <= 2048)
4325 adapter->rx_buffer_len = 256;
4326 else if (max_frame <= 512)
4327 adapter->rx_buffer_len = 512;
4328 else if (max_frame <= 1024)
4329 adapter->rx_buffer_len = 1024;
4330 else if (max_frame <= 2048)
4331 adapter->rx_buffer_len = 2048; 4313 adapter->rx_buffer_len = 2048;
4332 else 4314 else
4333 adapter->rx_buffer_len = 4096; 4315 adapter->rx_buffer_len = 4096;
@@ -4362,6 +4344,8 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4362 data->phy_id = adapter->hw.phy.addr; 4344 data->phy_id = adapter->hw.phy.addr;
4363 break; 4345 break;
4364 case SIOCGMIIREG: 4346 case SIOCGMIIREG:
4347 e1000_phy_read_status(adapter);
4348
4365 switch (data->reg_num & 0x1F) { 4349 switch (data->reg_num & 0x1F) {
4366 case MII_BMCR: 4350 case MII_BMCR:
4367 data->val_out = adapter->phy_regs.bmcr; 4351 data->val_out = adapter->phy_regs.bmcr;
@@ -4469,7 +4453,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
4469 e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); 4453 e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4470 4454
4471 /* activate PHY wakeup */ 4455 /* activate PHY wakeup */
4472 retval = hw->phy.ops.acquire_phy(hw); 4456 retval = hw->phy.ops.acquire(hw);
4473 if (retval) { 4457 if (retval) {
4474 e_err("Could not acquire PHY\n"); 4458 e_err("Could not acquire PHY\n");
4475 return retval; 4459 return retval;
@@ -4486,7 +4470,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
4486 if (retval) 4470 if (retval)
4487 e_err("Could not set PHY Host Wakeup bit\n"); 4471 e_err("Could not set PHY Host Wakeup bit\n");
4488out: 4472out:
4489 hw->phy.ops.release_phy(hw); 4473 hw->phy.ops.release(hw);
4490 4474
4491 return retval; 4475 return retval;
4492} 4476}
@@ -4543,7 +4527,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4543 e1000_media_type_internal_serdes) { 4527 e1000_media_type_internal_serdes) {
4544 /* keep the laser running in D3 */ 4528 /* keep the laser running in D3 */
4545 ctrl_ext = er32(CTRL_EXT); 4529 ctrl_ext = er32(CTRL_EXT);
4546 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; 4530 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4547 ew32(CTRL_EXT, ctrl_ext); 4531 ew32(CTRL_EXT, ctrl_ext);
4548 } 4532 }
4549 4533
@@ -4629,29 +4613,42 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
4629 } 4613 }
4630} 4614}
4631 4615
4632static void e1000e_disable_l1aspm(struct pci_dev *pdev) 4616#ifdef CONFIG_PCIEASPM
4617static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
4618{
4619 pci_disable_link_state(pdev, state);
4620}
4621#else
4622static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
4633{ 4623{
4634 int pos; 4624 int pos;
4635 u16 val; 4625 u16 reg16;
4636 4626
4637 /* 4627 /*
4638 * 82573 workaround - disable L1 ASPM on mobile chipsets 4628 * Both device and parent should have the same ASPM setting.
4639 * 4629 * Disable ASPM in downstream component first and then upstream.
4640 * L1 ASPM on various mobile (ich7) chipsets do not behave properly
4641 * resulting in lost data or garbage information on the pci-e link
4642 * level. This could result in (false) bad EEPROM checksum errors,
4643 * long ping times (up to 2s) or even a system freeze/hang.
4644 *
4645 * Unfortunately this feature saves about 1W power consumption when
4646 * active.
4647 */ 4630 */
4648 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 4631 pos = pci_pcie_cap(pdev);
4649 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val); 4632 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
4650 if (val & 0x2) { 4633 reg16 &= ~state;
4651 dev_warn(&pdev->dev, "Disabling L1 ASPM\n"); 4634 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
4652 val &= ~0x2; 4635
4653 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, val); 4636 if (!pdev->bus->self)
4654 } 4637 return;
4638
4639 pos = pci_pcie_cap(pdev->bus->self);
4640 pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
4641 reg16 &= ~state;
4642 pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
4643}
4644#endif
4645void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
4646{
4647 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
4648 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
4649 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
4650
4651 __e1000e_disable_aspm(pdev, state);
4655} 4652}
4656 4653
4657#ifdef CONFIG_PM 4654#ifdef CONFIG_PM
@@ -4676,7 +4673,9 @@ static int e1000_resume(struct pci_dev *pdev)
4676 4673
4677 pci_set_power_state(pdev, PCI_D0); 4674 pci_set_power_state(pdev, PCI_D0);
4678 pci_restore_state(pdev); 4675 pci_restore_state(pdev);
4679 e1000e_disable_l1aspm(pdev); 4676 pci_save_state(pdev);
4677 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
4678 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
4680 4679
4681 err = pci_enable_device_mem(pdev); 4680 err = pci_enable_device_mem(pdev);
4682 if (err) { 4681 if (err) {
@@ -4818,7 +4817,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4818 int err; 4817 int err;
4819 pci_ers_result_t result; 4818 pci_ers_result_t result;
4820 4819
4821 e1000e_disable_l1aspm(pdev); 4820 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
4821 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
4822 err = pci_enable_device_mem(pdev); 4822 err = pci_enable_device_mem(pdev);
4823 if (err) { 4823 if (err) {
4824 dev_err(&pdev->dev, 4824 dev_err(&pdev->dev,
@@ -4827,6 +4827,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4827 } else { 4827 } else {
4828 pci_set_master(pdev); 4828 pci_set_master(pdev);
4829 pci_restore_state(pdev); 4829 pci_restore_state(pdev);
4830 pci_save_state(pdev);
4830 4831
4831 pci_enable_wake(pdev, PCI_D3hot, 0); 4832 pci_enable_wake(pdev, PCI_D3hot, 0);
4832 pci_enable_wake(pdev, PCI_D3cold, 0); 4833 pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -4911,13 +4912,6 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
4911 dev_warn(&adapter->pdev->dev, 4912 dev_warn(&adapter->pdev->dev,
4912 "Warning: detected DSPD enabled in EEPROM\n"); 4913 "Warning: detected DSPD enabled in EEPROM\n");
4913 } 4914 }
4914
4915 ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
4916 if (!ret_val && (le16_to_cpu(buf) & (3 << 2))) {
4917 /* ASPM enable */
4918 dev_warn(&adapter->pdev->dev,
4919 "Warning: detected ASPM enabled in EEPROM\n");
4920 }
4921} 4915}
4922 4916
4923static const struct net_device_ops e1000e_netdev_ops = { 4917static const struct net_device_ops e1000e_netdev_ops = {
@@ -4966,7 +4960,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4966 u16 eeprom_data = 0; 4960 u16 eeprom_data = 0;
4967 u16 eeprom_apme_mask = E1000_EEPROM_APME; 4961 u16 eeprom_apme_mask = E1000_EEPROM_APME;
4968 4962
4969 e1000e_disable_l1aspm(pdev); 4963 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
4964 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
4970 4965
4971 err = pci_enable_device_mem(pdev); 4966 err = pci_enable_device_mem(pdev);
4972 if (err) 4967 if (err)
@@ -5135,7 +5130,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5135 5130
5136 e1000_eeprom_checks(adapter); 5131 e1000_eeprom_checks(adapter);
5137 5132
5138 /* copy the MAC address out of the NVM */ 5133 /* copy the MAC address */
5139 if (e1000e_read_mac_addr(&adapter->hw)) 5134 if (e1000e_read_mac_addr(&adapter->hw))
5140 e_err("NVM Read Error while reading MAC address\n"); 5135 e_err("NVM Read Error while reading MAC address\n");
5141 5136
@@ -5160,6 +5155,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5160 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); 5155 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
5161 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); 5156 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
5162 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); 5157 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
5158 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
5163 5159
5164 /* Initialize link parameters. User can change them with ethtool */ 5160 /* Initialize link parameters. User can change them with ethtool */
5165 adapter->hw.mac.autoneg = 1; 5161 adapter->hw.mac.autoneg = 1;
@@ -5283,19 +5279,24 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5283 del_timer_sync(&adapter->watchdog_timer); 5279 del_timer_sync(&adapter->watchdog_timer);
5284 del_timer_sync(&adapter->phy_info_timer); 5280 del_timer_sync(&adapter->phy_info_timer);
5285 5281
5282 cancel_work_sync(&adapter->reset_task);
5283 cancel_work_sync(&adapter->watchdog_task);
5284 cancel_work_sync(&adapter->downshift_task);
5285 cancel_work_sync(&adapter->update_phy_task);
5286 cancel_work_sync(&adapter->print_hang_task);
5286 flush_scheduled_work(); 5287 flush_scheduled_work();
5287 5288
5289 if (!(netdev->flags & IFF_UP))
5290 e1000_power_down_phy(adapter);
5291
5292 unregister_netdev(netdev);
5293
5288 /* 5294 /*
5289 * Release control of h/w to f/w. If f/w is AMT enabled, this 5295 * Release control of h/w to f/w. If f/w is AMT enabled, this
5290 * would have already happened in close and is redundant. 5296 * would have already happened in close and is redundant.
5291 */ 5297 */
5292 e1000_release_hw_control(adapter); 5298 e1000_release_hw_control(adapter);
5293 5299
5294 unregister_netdev(netdev);
5295
5296 if (!e1000_check_reset_block(&adapter->hw))
5297 e1000_phy_hw_reset(&adapter->hw);
5298
5299 e1000e_reset_interrupt_capability(adapter); 5300 e1000e_reset_interrupt_capability(adapter);
5300 kfree(adapter->tx_ring); 5301 kfree(adapter->tx_ring);
5301 kfree(adapter->rx_ring); 5302 kfree(adapter->rx_ring);
@@ -5321,7 +5322,7 @@ static struct pci_error_handlers e1000_err_handler = {
5321 .resume = e1000_io_resume, 5322 .resume = e1000_io_resume,
5322}; 5323};
5323 5324
5324static struct pci_device_id e1000_pci_tbl[] = { 5325static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
5325 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, 5326 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
5326 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, 5327 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
5327 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, 5328 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
@@ -5361,6 +5362,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
5361 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, 5362 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
5362 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, 5363 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
5363 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, 5364 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
5365 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
5364 5366
5365 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, 5367 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
5366 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, 5368 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
@@ -5414,12 +5416,10 @@ static int __init e1000_init_module(void)
5414 int ret; 5416 int ret;
5415 printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", 5417 printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
5416 e1000e_driver_name, e1000e_driver_version); 5418 e1000e_driver_name, e1000e_driver_version);
5417 printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n", 5419 printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n",
5418 e1000e_driver_name); 5420 e1000e_driver_name);
5419 ret = pci_register_driver(&e1000_driver); 5421 ret = pci_register_driver(&e1000_driver);
5420 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name, 5422
5421 PM_QOS_DEFAULT_VALUE);
5422
5423 return ret; 5423 return ret;
5424} 5424}
5425module_init(e1000_init_module); 5425module_init(e1000_init_module);
@@ -5433,7 +5433,6 @@ module_init(e1000_init_module);
5433static void __exit e1000_exit_module(void) 5433static void __exit e1000_exit_module(void)
5434{ 5434{
5435 pci_unregister_driver(&e1000_driver); 5435 pci_unregister_driver(&e1000_driver);
5436 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name);
5437} 5436}
5438module_exit(e1000_exit_module); 5437module_exit(e1000_exit_module);
5439 5438