aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-03-01 18:10:45 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-01 18:10:45 -0500
commite0a5c57848f7690a247bb8af4fa412844b0b00bb (patch)
tree3cf1fd6858cab9c030e5cd0d98d04fc0b7bd41b3 /drivers/net
parentfe05f54181db868c5720cc4dc0741227b9ba5a60 (diff)
parentb5b9d6647c1cd5eee90b58941c55f874c2a7e707 (diff)
Merge branch 'upstream-fixes' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/e1000/e1000.h3
-rw-r--r--drivers/net/e1000/e1000_main.c119
2 files changed, 46 insertions, 76 deletions
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 27c77306193b..99baf0e099fc 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -225,9 +225,6 @@ struct e1000_rx_ring {
225 struct e1000_ps_page *ps_page; 225 struct e1000_ps_page *ps_page;
226 struct e1000_ps_page_dma *ps_page_dma; 226 struct e1000_ps_page_dma *ps_page_dma;
227 227
228 struct sk_buff *rx_skb_top;
229 struct sk_buff *rx_skb_prev;
230
231 /* cpu for rx queue */ 228 /* cpu for rx queue */
232 int cpu; 229 int cpu;
233 230
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 31e332935e5a..5b7d0f425af2 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -103,7 +103,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
103#else 103#else
104#define DRIVERNAPI "-NAPI" 104#define DRIVERNAPI "-NAPI"
105#endif 105#endif
106#define DRV_VERSION "6.3.9-k2"DRIVERNAPI 106#define DRV_VERSION "6.3.9-k4"DRIVERNAPI
107char e1000_driver_version[] = DRV_VERSION; 107char e1000_driver_version[] = DRV_VERSION;
108static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; 108static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
109 109
@@ -1635,8 +1635,6 @@ setup_rx_desc_die:
1635 1635
1636 rxdr->next_to_clean = 0; 1636 rxdr->next_to_clean = 0;
1637 rxdr->next_to_use = 0; 1637 rxdr->next_to_use = 0;
1638 rxdr->rx_skb_top = NULL;
1639 rxdr->rx_skb_prev = NULL;
1640 1638
1641 return 0; 1639 return 0;
1642} 1640}
@@ -1713,8 +1711,23 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1713 rctl |= adapter->rx_buffer_len << 0x11; 1711 rctl |= adapter->rx_buffer_len << 0x11;
1714 } else { 1712 } else {
1715 rctl &= ~E1000_RCTL_SZ_4096; 1713 rctl &= ~E1000_RCTL_SZ_4096;
1716 rctl &= ~E1000_RCTL_BSEX; 1714 rctl |= E1000_RCTL_BSEX;
1717 rctl |= E1000_RCTL_SZ_2048; 1715 switch (adapter->rx_buffer_len) {
1716 case E1000_RXBUFFER_2048:
1717 default:
1718 rctl |= E1000_RCTL_SZ_2048;
1719 rctl &= ~E1000_RCTL_BSEX;
1720 break;
1721 case E1000_RXBUFFER_4096:
1722 rctl |= E1000_RCTL_SZ_4096;
1723 break;
1724 case E1000_RXBUFFER_8192:
1725 rctl |= E1000_RCTL_SZ_8192;
1726 break;
1727 case E1000_RXBUFFER_16384:
1728 rctl |= E1000_RCTL_SZ_16384;
1729 break;
1730 }
1718 } 1731 }
1719 1732
1720#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT 1733#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
@@ -2107,16 +2120,6 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
2107 } 2120 }
2108 } 2121 }
2109 2122
2110 /* there also may be some cached data in our adapter */
2111 if (rx_ring->rx_skb_top) {
2112 dev_kfree_skb(rx_ring->rx_skb_top);
2113
2114 /* rx_skb_prev will be wiped out by rx_skb_top */
2115 rx_ring->rx_skb_top = NULL;
2116 rx_ring->rx_skb_prev = NULL;
2117 }
2118
2119
2120 size = sizeof(struct e1000_buffer) * rx_ring->count; 2123 size = sizeof(struct e1000_buffer) * rx_ring->count;
2121 memset(rx_ring->buffer_info, 0, size); 2124 memset(rx_ring->buffer_info, 0, size);
2122 size = sizeof(struct e1000_ps_page) * rx_ring->count; 2125 size = sizeof(struct e1000_ps_page) * rx_ring->count;
@@ -3106,24 +3109,27 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3106 break; 3109 break;
3107 } 3110 }
3108 3111
3109 /* since the driver code now supports splitting a packet across 3112
3110 * multiple descriptors, most of the fifo related limitations on
3111 * jumbo frame traffic have gone away.
3112 * simply use 2k descriptors for everything.
3113 *
3114 * NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3115 * means we reserve 2 more, this pushes us to allocate from the next
3116 * larger slab size
3117 * i.e. RXBUFFER_2048 --> size-4096 slab */
3118
3119 /* recent hardware supports 1KB granularity */
3120 if (adapter->hw.mac_type > e1000_82547_rev_2) { 3113 if (adapter->hw.mac_type > e1000_82547_rev_2) {
3121 adapter->rx_buffer_len = 3114 adapter->rx_buffer_len = max_frame;
3122 ((max_frame < E1000_RXBUFFER_2048) ?
3123 max_frame : E1000_RXBUFFER_2048);
3124 E1000_ROUNDUP(adapter->rx_buffer_len, 1024); 3115 E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
3125 } else 3116 } else {
3126 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3117 if(unlikely((adapter->hw.mac_type < e1000_82543) &&
3118 (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
3119 DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
3120 "on 82542\n");
3121 return -EINVAL;
3122 } else {
3123 if(max_frame <= E1000_RXBUFFER_2048)
3124 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3125 else if(max_frame <= E1000_RXBUFFER_4096)
3126 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
3127 else if(max_frame <= E1000_RXBUFFER_8192)
3128 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
3129 else if(max_frame <= E1000_RXBUFFER_16384)
3130 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3131 }
3132 }
3127 3133
3128 netdev->mtu = new_mtu; 3134 netdev->mtu = new_mtu;
3129 3135
@@ -3620,7 +3626,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3620 uint8_t last_byte; 3626 uint8_t last_byte;
3621 unsigned int i; 3627 unsigned int i;
3622 int cleaned_count = 0; 3628 int cleaned_count = 0;
3623 boolean_t cleaned = FALSE, multi_descriptor = FALSE; 3629 boolean_t cleaned = FALSE;
3624 3630
3625 i = rx_ring->next_to_clean; 3631 i = rx_ring->next_to_clean;
3626 rx_desc = E1000_RX_DESC(*rx_ring, i); 3632 rx_desc = E1000_RX_DESC(*rx_ring, i);
@@ -3652,43 +3658,12 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3652 3658
3653 length = le16_to_cpu(rx_desc->length); 3659 length = le16_to_cpu(rx_desc->length);
3654 3660
3655 skb_put(skb, length); 3661 if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
3656 3662 /* All receives must fit into a single buffer */
3657 if (!(status & E1000_RXD_STAT_EOP)) { 3663 E1000_DBG("%s: Receive packet consumed multiple"
3658 if (!rx_ring->rx_skb_top) { 3664 " buffers\n", netdev->name);
3659 rx_ring->rx_skb_top = skb; 3665 dev_kfree_skb_irq(skb);
3660 rx_ring->rx_skb_top->len = length;
3661 rx_ring->rx_skb_prev = skb;
3662 } else {
3663 if (skb_shinfo(rx_ring->rx_skb_top)->frag_list) {
3664 rx_ring->rx_skb_prev->next = skb;
3665 skb->prev = rx_ring->rx_skb_prev;
3666 } else {
3667 skb_shinfo(rx_ring->rx_skb_top)->frag_list = skb;
3668 }
3669 rx_ring->rx_skb_prev = skb;
3670 rx_ring->rx_skb_top->data_len += length;
3671 }
3672 goto next_desc; 3666 goto next_desc;
3673 } else {
3674 if (rx_ring->rx_skb_top) {
3675 if (skb_shinfo(rx_ring->rx_skb_top)
3676 ->frag_list) {
3677 rx_ring->rx_skb_prev->next = skb;
3678 skb->prev = rx_ring->rx_skb_prev;
3679 } else
3680 skb_shinfo(rx_ring->rx_skb_top)
3681 ->frag_list = skb;
3682
3683 rx_ring->rx_skb_top->data_len += length;
3684 rx_ring->rx_skb_top->len +=
3685 rx_ring->rx_skb_top->data_len;
3686
3687 skb = rx_ring->rx_skb_top;
3688 multi_descriptor = TRUE;
3689 rx_ring->rx_skb_top = NULL;
3690 rx_ring->rx_skb_prev = NULL;
3691 }
3692 } 3667 }
3693 3668
3694 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { 3669 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
@@ -3712,10 +3687,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3712 * performance for small packets with large amounts 3687 * performance for small packets with large amounts
3713 * of reassembly being done in the stack */ 3688 * of reassembly being done in the stack */
3714#define E1000_CB_LENGTH 256 3689#define E1000_CB_LENGTH 256
3715 if ((length < E1000_CB_LENGTH) && 3690 if (length < E1000_CB_LENGTH) {
3716 !rx_ring->rx_skb_top &&
3717 /* or maybe (status & E1000_RXD_STAT_EOP) && */
3718 !multi_descriptor) {
3719 struct sk_buff *new_skb = 3691 struct sk_buff *new_skb =
3720 dev_alloc_skb(length + NET_IP_ALIGN); 3692 dev_alloc_skb(length + NET_IP_ALIGN);
3721 if (new_skb) { 3693 if (new_skb) {
@@ -3729,7 +3701,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3729 skb = new_skb; 3701 skb = new_skb;
3730 skb_put(skb, length); 3702 skb_put(skb, length);
3731 } 3703 }
3732 } 3704 } else
3705 skb_put(skb, length);
3733 3706
3734 /* end copybreak code */ 3707 /* end copybreak code */
3735 3708