aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000e/netdev.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-05-08 22:03:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-05-08 22:03:26 -0400
commit28a4acb48586dc21d2d14a75a7aab7be78b7c83b (patch)
tree1e95503037a68286ba732dbc0a844dbf0f826223 /drivers/net/e1000e/netdev.c
parent89f92d6425b099538932e9b881588f87ef9f3184 (diff)
parente46b66bc42b6b1430b04cc5c207ecb2b2f4553dc (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (32 commits) net: Added ASSERT_RTNL() to dev_open() and dev_close(). can: Fix can_send() handling on dev_queue_xmit() failures netns: Fix arbitrary net_device-s corruptions on net_ns stop. netfilter: Kconfig: default DCCP/SCTP conntrack support to the protocol config values netfilter: nf_conntrack_sip: restrict RTP expect flushing on error to last request macvlan: Fix memleak on device removal/crash on module removal net/ipv4: correct RFC 1122 section reference in comment tcp FRTO: SACK variant is errorneously used with NewReno e1000e: don't return half-read eeprom on error ucc_geth: Don't use RX clock as TX clock. cxgb3: Use CAP_SYS_RAWIO for firmware pcnet32: delete non NAPI code from driver. fs_enet: Fix a memory leak in fs_enet_mdio_probe [netdrvr] eexpress: IPv6 fails - multicast problems 3c59x: use netstats in net_device structure 3c980-TX needs EXTRA_PREAMBLE fix warning in drivers/net/appletalk/cops.c e1000e: Add support for BM PHYs on ICH9 uli526x: fix endianness issues in the setup frame uli526x: initialize the hardware prior to requesting interrupts ...
Diffstat (limited to 'drivers/net/e1000e/netdev.c')
-rw-r--r--drivers/net/e1000e/netdev.c330
1 files changed, 322 insertions, 8 deletions
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 8991ab8911e2..8cbb40f3a506 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -43,10 +43,11 @@
43#include <linux/if_vlan.h> 43#include <linux/if_vlan.h>
44#include <linux/cpu.h> 44#include <linux/cpu.h>
45#include <linux/smp.h> 45#include <linux/smp.h>
46#include <linux/pm_qos_params.h>
46 47
47#include "e1000.h" 48#include "e1000.h"
48 49
49#define DRV_VERSION "0.2.1" 50#define DRV_VERSION "0.3.3.3-k2"
50char e1000e_driver_name[] = "e1000e"; 51char e1000e_driver_name[] = "e1000e";
51const char e1000e_driver_version[] = DRV_VERSION; 52const char e1000e_driver_version[] = DRV_VERSION;
52 53
@@ -341,6 +342,89 @@ no_buffers:
341} 342}
342 343
343/** 344/**
345 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
346 * @adapter: address of board private structure
347 * @rx_ring: pointer to receive ring structure
348 * @cleaned_count: number of buffers to allocate this pass
349 **/
350
351static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
352 int cleaned_count)
353{
354 struct net_device *netdev = adapter->netdev;
355 struct pci_dev *pdev = adapter->pdev;
356 struct e1000_rx_desc *rx_desc;
357 struct e1000_ring *rx_ring = adapter->rx_ring;
358 struct e1000_buffer *buffer_info;
359 struct sk_buff *skb;
360 unsigned int i;
361 unsigned int bufsz = 256 -
362 16 /* for skb_reserve */ -
363 NET_IP_ALIGN;
364
365 i = rx_ring->next_to_use;
366 buffer_info = &rx_ring->buffer_info[i];
367
368 while (cleaned_count--) {
369 skb = buffer_info->skb;
370 if (skb) {
371 skb_trim(skb, 0);
372 goto check_page;
373 }
374
375 skb = netdev_alloc_skb(netdev, bufsz);
376 if (unlikely(!skb)) {
377 /* Better luck next round */
378 adapter->alloc_rx_buff_failed++;
379 break;
380 }
381
382 /* Make buffer alignment 2 beyond a 16 byte boundary
383 * this will result in a 16 byte aligned IP header after
384 * the 14 byte MAC header is removed
385 */
386 skb_reserve(skb, NET_IP_ALIGN);
387
388 buffer_info->skb = skb;
389check_page:
390 /* allocate a new page if necessary */
391 if (!buffer_info->page) {
392 buffer_info->page = alloc_page(GFP_ATOMIC);
393 if (unlikely(!buffer_info->page)) {
394 adapter->alloc_rx_buff_failed++;
395 break;
396 }
397 }
398
399 if (!buffer_info->dma)
400 buffer_info->dma = pci_map_page(pdev,
401 buffer_info->page, 0,
402 PAGE_SIZE,
403 PCI_DMA_FROMDEVICE);
404
405 rx_desc = E1000_RX_DESC(*rx_ring, i);
406 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
407
408 if (unlikely(++i == rx_ring->count))
409 i = 0;
410 buffer_info = &rx_ring->buffer_info[i];
411 }
412
413 if (likely(rx_ring->next_to_use != i)) {
414 rx_ring->next_to_use = i;
415 if (unlikely(i-- == 0))
416 i = (rx_ring->count - 1);
417
418 /* Force memory writes to complete before letting h/w
419 * know there are new descriptors to fetch. (Only
420 * applicable for weak-ordered memory model archs,
421 * such as IA-64). */
422 wmb();
423 writel(i, adapter->hw.hw_addr + rx_ring->tail);
424 }
425}
426
427/**
344 * e1000_clean_rx_irq - Send received data up the network stack; legacy 428 * e1000_clean_rx_irq - Send received data up the network stack; legacy
345 * @adapter: board private structure 429 * @adapter: board private structure
346 * 430 *
@@ -783,6 +867,186 @@ next_desc:
783} 867}
784 868
785/** 869/**
870 * e1000_consume_page - helper function
871 **/
872static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
873 u16 length)
874{
875 bi->page = NULL;
876 skb->len += length;
877 skb->data_len += length;
878 skb->truesize += length;
879}
880
881/**
882 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
883 * @adapter: board private structure
884 *
885 * the return value indicates whether actual cleaning was done, there
886 * is no guarantee that everything was cleaned
887 **/
888
889static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
890 int *work_done, int work_to_do)
891{
892 struct net_device *netdev = adapter->netdev;
893 struct pci_dev *pdev = adapter->pdev;
894 struct e1000_ring *rx_ring = adapter->rx_ring;
895 struct e1000_rx_desc *rx_desc, *next_rxd;
896 struct e1000_buffer *buffer_info, *next_buffer;
897 u32 length;
898 unsigned int i;
899 int cleaned_count = 0;
900 bool cleaned = false;
901 unsigned int total_rx_bytes=0, total_rx_packets=0;
902
903 i = rx_ring->next_to_clean;
904 rx_desc = E1000_RX_DESC(*rx_ring, i);
905 buffer_info = &rx_ring->buffer_info[i];
906
907 while (rx_desc->status & E1000_RXD_STAT_DD) {
908 struct sk_buff *skb;
909 u8 status;
910
911 if (*work_done >= work_to_do)
912 break;
913 (*work_done)++;
914
915 status = rx_desc->status;
916 skb = buffer_info->skb;
917 buffer_info->skb = NULL;
918
919 ++i;
920 if (i == rx_ring->count)
921 i = 0;
922 next_rxd = E1000_RX_DESC(*rx_ring, i);
923 prefetch(next_rxd);
924
925 next_buffer = &rx_ring->buffer_info[i];
926
927 cleaned = true;
928 cleaned_count++;
929 pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,
930 PCI_DMA_FROMDEVICE);
931 buffer_info->dma = 0;
932
933 length = le16_to_cpu(rx_desc->length);
934
935 /* errors is only valid for DD + EOP descriptors */
936 if (unlikely((status & E1000_RXD_STAT_EOP) &&
937 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
938 /* recycle both page and skb */
939 buffer_info->skb = skb;
940 /* an error means any chain goes out the window
941 * too */
942 if (rx_ring->rx_skb_top)
943 dev_kfree_skb(rx_ring->rx_skb_top);
944 rx_ring->rx_skb_top = NULL;
945 goto next_desc;
946 }
947
948#define rxtop rx_ring->rx_skb_top
949 if (!(status & E1000_RXD_STAT_EOP)) {
950 /* this descriptor is only the beginning (or middle) */
951 if (!rxtop) {
952 /* this is the beginning of a chain */
953 rxtop = skb;
954 skb_fill_page_desc(rxtop, 0, buffer_info->page,
955 0, length);
956 } else {
957 /* this is the middle of a chain */
958 skb_fill_page_desc(rxtop,
959 skb_shinfo(rxtop)->nr_frags,
960 buffer_info->page, 0, length);
961 /* re-use the skb, only consumed the page */
962 buffer_info->skb = skb;
963 }
964 e1000_consume_page(buffer_info, rxtop, length);
965 goto next_desc;
966 } else {
967 if (rxtop) {
968 /* end of the chain */
969 skb_fill_page_desc(rxtop,
970 skb_shinfo(rxtop)->nr_frags,
971 buffer_info->page, 0, length);
972 /* re-use the current skb, we only consumed the
973 * page */
974 buffer_info->skb = skb;
975 skb = rxtop;
976 rxtop = NULL;
977 e1000_consume_page(buffer_info, skb, length);
978 } else {
979 /* no chain, got EOP, this buf is the packet
980 * copybreak to save the put_page/alloc_page */
981 if (length <= copybreak &&
982 skb_tailroom(skb) >= length) {
983 u8 *vaddr;
984 vaddr = kmap_atomic(buffer_info->page,
985 KM_SKB_DATA_SOFTIRQ);
986 memcpy(skb_tail_pointer(skb), vaddr,
987 length);
988 kunmap_atomic(vaddr,
989 KM_SKB_DATA_SOFTIRQ);
990 /* re-use the page, so don't erase
991 * buffer_info->page */
992 skb_put(skb, length);
993 } else {
994 skb_fill_page_desc(skb, 0,
995 buffer_info->page, 0,
996 length);
997 e1000_consume_page(buffer_info, skb,
998 length);
999 }
1000 }
1001 }
1002
1003 /* Receive Checksum Offload XXX recompute due to CRC strip? */
1004 e1000_rx_checksum(adapter,
1005 (u32)(status) |
1006 ((u32)(rx_desc->errors) << 24),
1007 le16_to_cpu(rx_desc->csum), skb);
1008
1009 /* probably a little skewed due to removing CRC */
1010 total_rx_bytes += skb->len;
1011 total_rx_packets++;
1012
1013 /* eth type trans needs skb->data to point to something */
1014 if (!pskb_may_pull(skb, ETH_HLEN)) {
1015 ndev_err(netdev, "pskb_may_pull failed.\n");
1016 dev_kfree_skb(skb);
1017 goto next_desc;
1018 }
1019
1020 e1000_receive_skb(adapter, netdev, skb, status,
1021 rx_desc->special);
1022
1023next_desc:
1024 rx_desc->status = 0;
1025
1026 /* return some buffers to hardware, one at a time is too slow */
1027 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1028 adapter->alloc_rx_buf(adapter, cleaned_count);
1029 cleaned_count = 0;
1030 }
1031
1032 /* use prefetched values */
1033 rx_desc = next_rxd;
1034 buffer_info = next_buffer;
1035 }
1036 rx_ring->next_to_clean = i;
1037
1038 cleaned_count = e1000_desc_unused(rx_ring);
1039 if (cleaned_count)
1040 adapter->alloc_rx_buf(adapter, cleaned_count);
1041
1042 adapter->total_rx_bytes += total_rx_bytes;
1043 adapter->total_rx_packets += total_rx_packets;
1044 adapter->net_stats.rx_bytes += total_rx_bytes;
1045 adapter->net_stats.rx_packets += total_rx_packets;
1046 return cleaned;
1047}
1048
1049/**
786 * e1000_clean_rx_ring - Free Rx Buffers per Queue 1050 * e1000_clean_rx_ring - Free Rx Buffers per Queue
787 * @adapter: board private structure 1051 * @adapter: board private structure
788 **/ 1052 **/
@@ -802,6 +1066,10 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
802 pci_unmap_single(pdev, buffer_info->dma, 1066 pci_unmap_single(pdev, buffer_info->dma,
803 adapter->rx_buffer_len, 1067 adapter->rx_buffer_len,
804 PCI_DMA_FROMDEVICE); 1068 PCI_DMA_FROMDEVICE);
1069 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1070 pci_unmap_page(pdev, buffer_info->dma,
1071 PAGE_SIZE,
1072 PCI_DMA_FROMDEVICE);
805 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) 1073 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
806 pci_unmap_single(pdev, buffer_info->dma, 1074 pci_unmap_single(pdev, buffer_info->dma,
807 adapter->rx_ps_bsize0, 1075 adapter->rx_ps_bsize0,
@@ -809,6 +1077,11 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
809 buffer_info->dma = 0; 1077 buffer_info->dma = 0;
810 } 1078 }
811 1079
1080 if (buffer_info->page) {
1081 put_page(buffer_info->page);
1082 buffer_info->page = NULL;
1083 }
1084
812 if (buffer_info->skb) { 1085 if (buffer_info->skb) {
813 dev_kfree_skb(buffer_info->skb); 1086 dev_kfree_skb(buffer_info->skb);
814 buffer_info->skb = NULL; 1087 buffer_info->skb = NULL;
@@ -1755,10 +2028,12 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1755 * a lot of memory, since we allocate 3 pages at all times 2028 * a lot of memory, since we allocate 3 pages at all times
1756 * per packet. 2029 * per packet.
1757 */ 2030 */
1758 adapter->rx_ps_pages = 0;
1759 pages = PAGE_USE_COUNT(adapter->netdev->mtu); 2031 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1760 if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) 2032 if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) &&
2033 (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
1761 adapter->rx_ps_pages = pages; 2034 adapter->rx_ps_pages = pages;
2035 else
2036 adapter->rx_ps_pages = 0;
1762 2037
1763 if (adapter->rx_ps_pages) { 2038 if (adapter->rx_ps_pages) {
1764 /* Configure extra packet-split registers */ 2039 /* Configure extra packet-split registers */
@@ -1819,9 +2094,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1819 sizeof(union e1000_rx_desc_packet_split); 2094 sizeof(union e1000_rx_desc_packet_split);
1820 adapter->clean_rx = e1000_clean_rx_irq_ps; 2095 adapter->clean_rx = e1000_clean_rx_irq_ps;
1821 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 2096 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2097 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
2098 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
2099 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
2100 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1822 } else { 2101 } else {
1823 rdlen = rx_ring->count * 2102 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
1824 sizeof(struct e1000_rx_desc);
1825 adapter->clean_rx = e1000_clean_rx_irq; 2103 adapter->clean_rx = e1000_clean_rx_irq;
1826 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 2104 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1827 } 2105 }
@@ -1885,8 +2163,21 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1885 * units), e.g. using jumbo frames when setting to E1000_ERT_2048 2163 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
1886 */ 2164 */
1887 if ((adapter->flags & FLAG_HAS_ERT) && 2165 if ((adapter->flags & FLAG_HAS_ERT) &&
1888 (adapter->netdev->mtu > ETH_DATA_LEN)) 2166 (adapter->netdev->mtu > ETH_DATA_LEN)) {
1889 ew32(ERT, E1000_ERT_2048); 2167 u32 rxdctl = er32(RXDCTL(0));
2168 ew32(RXDCTL(0), rxdctl | 0x3);
2169 ew32(ERT, E1000_ERT_2048 | (1 << 13));
2170 /*
2171 * With jumbo frames and early-receive enabled, excessive
2172 * C4->C2 latencies result in dropped transactions.
2173 */
2174 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
2175 e1000e_driver_name, 55);
2176 } else {
2177 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
2178 e1000e_driver_name,
2179 PM_QOS_DEFAULT_VALUE);
2180 }
1890 2181
1891 /* Enable Receives */ 2182 /* Enable Receives */
1892 ew32(RCTL, rctl); 2183 ew32(RCTL, rctl);
@@ -2155,6 +2446,14 @@ void e1000e_reset(struct e1000_adapter *adapter)
2155 2446
2156 /* Allow time for pending master requests to run */ 2447 /* Allow time for pending master requests to run */
2157 mac->ops.reset_hw(hw); 2448 mac->ops.reset_hw(hw);
2449
2450 /*
2451 * For parts with AMT enabled, let the firmware know
2452 * that the network interface is in control
2453 */
2454 if ((adapter->flags & FLAG_HAS_AMT) && e1000e_check_mng_mode(hw))
2455 e1000_get_hw_control(adapter);
2456
2158 ew32(WUC, 0); 2457 ew32(WUC, 0);
2159 2458
2160 if (mac->ops.init_hw(hw)) 2459 if (mac->ops.init_hw(hw))
@@ -3469,6 +3768,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3469 * means we reserve 2 more, this pushes us to allocate from the next 3768 * means we reserve 2 more, this pushes us to allocate from the next
3470 * larger slab size. 3769 * larger slab size.
3471 * i.e. RXBUFFER_2048 --> size-4096 slab 3770 * i.e. RXBUFFER_2048 --> size-4096 slab
3771 * However with the new *_jumbo_rx* routines, jumbo receives will use
3772 * fragmented skbs
3472 */ 3773 */
3473 3774
3474 if (max_frame <= 256) 3775 if (max_frame <= 256)
@@ -3626,6 +3927,9 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
3626 ew32(CTRL_EXT, ctrl_ext); 3927 ew32(CTRL_EXT, ctrl_ext);
3627 } 3928 }
3628 3929
3930 if (adapter->flags & FLAG_IS_ICH)
3931 e1000e_disable_gig_wol_ich8lan(&adapter->hw);
3932
3629 /* Allow time for pending master requests to run */ 3933 /* Allow time for pending master requests to run */
3630 e1000e_disable_pcie_master(&adapter->hw); 3934 e1000e_disable_pcie_master(&adapter->hw);
3631 3935
@@ -4292,6 +4596,13 @@ static struct pci_device_id e1000_pci_tbl[] = {
4292 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, 4596 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
4293 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, 4597 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
4294 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, 4598 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
4599 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
4600 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
4601 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
4602
4603 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
4604 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
4605 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
4295 4606
4296 { } /* terminate list */ 4607 { } /* terminate list */
4297}; 4608};
@@ -4326,7 +4637,9 @@ static int __init e1000_init_module(void)
4326 printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n", 4637 printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n",
4327 e1000e_driver_name); 4638 e1000e_driver_name);
4328 ret = pci_register_driver(&e1000_driver); 4639 ret = pci_register_driver(&e1000_driver);
4329 4640 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name,
4641 PM_QOS_DEFAULT_VALUE);
4642
4330 return ret; 4643 return ret;
4331} 4644}
4332module_init(e1000_init_module); 4645module_init(e1000_init_module);
@@ -4340,6 +4653,7 @@ module_init(e1000_init_module);
4340static void __exit e1000_exit_module(void) 4653static void __exit e1000_exit_module(void)
4341{ 4654{
4342 pci_unregister_driver(&e1000_driver); 4655 pci_unregister_driver(&e1000_driver);
4656 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name);
4343} 4657}
4344module_exit(e1000_exit_module); 4658module_exit(e1000_exit_module);
4345 4659