diff options
author | Jesse Brandeburg <jesse.brandeburg@intel.com> | 2009-07-06 06:44:39 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-07-06 21:07:51 -0400 |
commit | edbbb3ca107715067b27a71e6ea7f58750912aa2 (patch) | |
tree | 4b9772ff2dc239ad61812cce4b4e6249a25512a2 /drivers/net/e1000/e1000_main.c | |
parent | 94c9e5a89349a1f1ebabe0876c059dc387b8b2a0 (diff) |
e1000: implement jumbo receive with partial descriptors
This is code extremely similar to what is committed in e1000e
already.
e1000 will no longer request 32kB slab buffers to support jumbo
frames on PCI/PCI-X adapters. This will significantly reduce the
likelyhood of order:3 allocation failures.
This new code adds support for using pages as receive buffers,
and the driver will chain multiple pages together to build a
jumbo frame for OS consumption.
The hardware takes a power of two buffer size and will
dump as much data as it can receive into 1 or more buffers.
The benefits of applying this are
1) stop akpm's dissing :-) of this lame e1000 behavior [1]
2) more efficient memory allocation (half) when using jumbo
frames, which will also allow for much better socket utilization
with jumbos since the socket is charged for the full allocation
of each receive buffer, regardless of how much is used.
3) this was a feature request by a customer
4) copybreak for small packets < 256 bytes still applies
[1] http://lkml.org/lkml/2008/7/10/68
http://article.gmane.org/gmane.linux.network/130986
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
CC: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r-- | drivers/net/e1000/e1000_main.c | 425 |
1 files changed, 388 insertions, 37 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 5b8cbdb4b520..f2db9e2069e7 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -137,9 +137,15 @@ static int e1000_clean(struct napi_struct *napi, int budget); | |||
137 | static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | 137 | static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, |
138 | struct e1000_rx_ring *rx_ring, | 138 | struct e1000_rx_ring *rx_ring, |
139 | int *work_done, int work_to_do); | 139 | int *work_done, int work_to_do); |
140 | static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, | ||
141 | struct e1000_rx_ring *rx_ring, | ||
142 | int *work_done, int work_to_do); | ||
140 | static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | 143 | static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, |
141 | struct e1000_rx_ring *rx_ring, | 144 | struct e1000_rx_ring *rx_ring, |
142 | int cleaned_count); | 145 | int cleaned_count); |
146 | static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, | ||
147 | struct e1000_rx_ring *rx_ring, | ||
148 | int cleaned_count); | ||
143 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); | 149 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); |
144 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, | 150 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, |
145 | int cmd); | 151 | int cmd); |
@@ -761,10 +767,7 @@ void e1000_reset(struct e1000_adapter *adapter) | |||
761 | 767 | ||
762 | hw->fc_high_water = fc_high_water_mark; | 768 | hw->fc_high_water = fc_high_water_mark; |
763 | hw->fc_low_water = fc_high_water_mark - 8; | 769 | hw->fc_low_water = fc_high_water_mark - 8; |
764 | if (hw->mac_type == e1000_80003es2lan) | 770 | hw->fc_pause_time = E1000_FC_PAUSE_TIME; |
765 | hw->fc_pause_time = 0xFFFF; | ||
766 | else | ||
767 | hw->fc_pause_time = E1000_FC_PAUSE_TIME; | ||
768 | hw->fc_send_xon = 1; | 771 | hw->fc_send_xon = 1; |
769 | hw->fc = hw->original_fc; | 772 | hw->fc = hw->original_fc; |
770 | 773 | ||
@@ -1862,6 +1865,7 @@ setup_rx_desc_die: | |||
1862 | 1865 | ||
1863 | rxdr->next_to_clean = 0; | 1866 | rxdr->next_to_clean = 0; |
1864 | rxdr->next_to_use = 0; | 1867 | rxdr->next_to_use = 0; |
1868 | rxdr->rx_skb_top = NULL; | ||
1865 | 1869 | ||
1866 | return 0; | 1870 | return 0; |
1867 | } | 1871 | } |
@@ -1968,10 +1972,17 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
1968 | struct e1000_hw *hw = &adapter->hw; | 1972 | struct e1000_hw *hw = &adapter->hw; |
1969 | u32 rdlen, rctl, rxcsum, ctrl_ext; | 1973 | u32 rdlen, rctl, rxcsum, ctrl_ext; |
1970 | 1974 | ||
1971 | rdlen = adapter->rx_ring[0].count * | 1975 | if (adapter->netdev->mtu > ETH_DATA_LEN) { |
1972 | sizeof(struct e1000_rx_desc); | 1976 | rdlen = adapter->rx_ring[0].count * |
1973 | adapter->clean_rx = e1000_clean_rx_irq; | 1977 | sizeof(struct e1000_rx_desc); |
1974 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers; | 1978 | adapter->clean_rx = e1000_clean_jumbo_rx_irq; |
1979 | adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; | ||
1980 | } else { | ||
1981 | rdlen = adapter->rx_ring[0].count * | ||
1982 | sizeof(struct e1000_rx_desc); | ||
1983 | adapter->clean_rx = e1000_clean_rx_irq; | ||
1984 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers; | ||
1985 | } | ||
1975 | 1986 | ||
1976 | /* disable receives while setting up the descriptors */ | 1987 | /* disable receives while setting up the descriptors */ |
1977 | rctl = er32(RCTL); | 1988 | rctl = er32(RCTL); |
@@ -2185,26 +2196,39 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter, | |||
2185 | /* Free all the Rx ring sk_buffs */ | 2196 | /* Free all the Rx ring sk_buffs */ |
2186 | for (i = 0; i < rx_ring->count; i++) { | 2197 | for (i = 0; i < rx_ring->count; i++) { |
2187 | buffer_info = &rx_ring->buffer_info[i]; | 2198 | buffer_info = &rx_ring->buffer_info[i]; |
2188 | if (buffer_info->dma) { | 2199 | if (buffer_info->dma && |
2189 | pci_unmap_single(pdev, | 2200 | adapter->clean_rx == e1000_clean_rx_irq) { |
2190 | buffer_info->dma, | 2201 | pci_unmap_single(pdev, buffer_info->dma, |
2191 | buffer_info->length, | 2202 | buffer_info->length, |
2192 | PCI_DMA_FROMDEVICE); | 2203 | PCI_DMA_FROMDEVICE); |
2204 | } else if (buffer_info->dma && | ||
2205 | adapter->clean_rx == e1000_clean_jumbo_rx_irq) { | ||
2206 | pci_unmap_page(pdev, buffer_info->dma, | ||
2207 | buffer_info->length, | ||
2208 | PCI_DMA_FROMDEVICE); | ||
2193 | } | 2209 | } |
2194 | 2210 | ||
2195 | buffer_info->dma = 0; | 2211 | buffer_info->dma = 0; |
2196 | 2212 | if (buffer_info->page) { | |
2213 | put_page(buffer_info->page); | ||
2214 | buffer_info->page = NULL; | ||
2215 | } | ||
2197 | if (buffer_info->skb) { | 2216 | if (buffer_info->skb) { |
2198 | dev_kfree_skb(buffer_info->skb); | 2217 | dev_kfree_skb(buffer_info->skb); |
2199 | buffer_info->skb = NULL; | 2218 | buffer_info->skb = NULL; |
2200 | } | 2219 | } |
2201 | } | 2220 | } |
2202 | 2221 | ||
2222 | /* there also may be some cached data from a chained receive */ | ||
2223 | if (rx_ring->rx_skb_top) { | ||
2224 | dev_kfree_skb(rx_ring->rx_skb_top); | ||
2225 | rx_ring->rx_skb_top = NULL; | ||
2226 | } | ||
2227 | |||
2203 | size = sizeof(struct e1000_buffer) * rx_ring->count; | 2228 | size = sizeof(struct e1000_buffer) * rx_ring->count; |
2204 | memset(rx_ring->buffer_info, 0, size); | 2229 | memset(rx_ring->buffer_info, 0, size); |
2205 | 2230 | ||
2206 | /* Zero out the descriptor ring */ | 2231 | /* Zero out the descriptor ring */ |
2207 | |||
2208 | memset(rx_ring->desc, 0, rx_ring->size); | 2232 | memset(rx_ring->desc, 0, rx_ring->size); |
2209 | 2233 | ||
2210 | rx_ring->next_to_clean = 0; | 2234 | rx_ring->next_to_clean = 0; |
@@ -3489,8 +3513,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3489 | 3513 | ||
3490 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | 3514 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN |
3491 | * means we reserve 2 more, this pushes us to allocate from the next | 3515 | * means we reserve 2 more, this pushes us to allocate from the next |
3492 | * larger slab size | 3516 | * larger slab size. |
3493 | * i.e. RXBUFFER_2048 --> size-4096 slab */ | 3517 | * i.e. RXBUFFER_2048 --> size-4096 slab |
3518 | * however with the new *_jumbo_rx* routines, jumbo receives will use | ||
3519 | * fragmented skbs */ | ||
3494 | 3520 | ||
3495 | if (max_frame <= E1000_RXBUFFER_256) | 3521 | if (max_frame <= E1000_RXBUFFER_256) |
3496 | adapter->rx_buffer_len = E1000_RXBUFFER_256; | 3522 | adapter->rx_buffer_len = E1000_RXBUFFER_256; |
@@ -3500,12 +3526,12 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3500 | adapter->rx_buffer_len = E1000_RXBUFFER_1024; | 3526 | adapter->rx_buffer_len = E1000_RXBUFFER_1024; |
3501 | else if (max_frame <= E1000_RXBUFFER_2048) | 3527 | else if (max_frame <= E1000_RXBUFFER_2048) |
3502 | adapter->rx_buffer_len = E1000_RXBUFFER_2048; | 3528 | adapter->rx_buffer_len = E1000_RXBUFFER_2048; |
3503 | else if (max_frame <= E1000_RXBUFFER_4096) | 3529 | else |
3504 | adapter->rx_buffer_len = E1000_RXBUFFER_4096; | 3530 | #if (PAGE_SIZE >= E1000_RXBUFFER_16384) |
3505 | else if (max_frame <= E1000_RXBUFFER_8192) | ||
3506 | adapter->rx_buffer_len = E1000_RXBUFFER_8192; | ||
3507 | else if (max_frame <= E1000_RXBUFFER_16384) | ||
3508 | adapter->rx_buffer_len = E1000_RXBUFFER_16384; | 3531 | adapter->rx_buffer_len = E1000_RXBUFFER_16384; |
3532 | #elif (PAGE_SIZE >= E1000_RXBUFFER_4096) | ||
3533 | adapter->rx_buffer_len = PAGE_SIZE; | ||
3534 | #endif | ||
3509 | 3535 | ||
3510 | /* adjust allocation if LPE protects us, and we aren't using SBP */ | 3536 | /* adjust allocation if LPE protects us, and we aren't using SBP */ |
3511 | if (!hw->tbi_compatibility_on && | 3537 | if (!hw->tbi_compatibility_on && |
@@ -3987,9 +4013,227 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, | |||
3987 | } | 4013 | } |
3988 | 4014 | ||
3989 | /** | 4015 | /** |
4016 | * e1000_consume_page - helper function | ||
4017 | **/ | ||
4018 | static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, | ||
4019 | u16 length) | ||
4020 | { | ||
4021 | bi->page = NULL; | ||
4022 | skb->len += length; | ||
4023 | skb->data_len += length; | ||
4024 | skb->truesize += length; | ||
4025 | } | ||
4026 | |||
4027 | /** | ||
4028 | * e1000_receive_skb - helper function to handle rx indications | ||
4029 | * @adapter: board private structure | ||
4030 | * @status: descriptor status field as written by hardware | ||
4031 | * @vlan: descriptor vlan field as written by hardware (no le/be conversion) | ||
4032 | * @skb: pointer to sk_buff to be indicated to stack | ||
4033 | */ | ||
4034 | static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, | ||
4035 | __le16 vlan, struct sk_buff *skb) | ||
4036 | { | ||
4037 | if (unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))) { | ||
4038 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, | ||
4039 | le16_to_cpu(vlan) & | ||
4040 | E1000_RXD_SPC_VLAN_MASK); | ||
4041 | } else { | ||
4042 | netif_receive_skb(skb); | ||
4043 | } | ||
4044 | } | ||
4045 | |||
4046 | /** | ||
4047 | * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy | ||
4048 | * @adapter: board private structure | ||
4049 | * @rx_ring: ring to clean | ||
4050 | * @work_done: amount of napi work completed this call | ||
4051 | * @work_to_do: max amount of work allowed for this call to do | ||
4052 | * | ||
4053 | * the return value indicates whether actual cleaning was done, there | ||
4054 | * is no guarantee that everything was cleaned | ||
4055 | */ | ||
4056 | static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, | ||
4057 | struct e1000_rx_ring *rx_ring, | ||
4058 | int *work_done, int work_to_do) | ||
4059 | { | ||
4060 | struct e1000_hw *hw = &adapter->hw; | ||
4061 | struct net_device *netdev = adapter->netdev; | ||
4062 | struct pci_dev *pdev = adapter->pdev; | ||
4063 | struct e1000_rx_desc *rx_desc, *next_rxd; | ||
4064 | struct e1000_buffer *buffer_info, *next_buffer; | ||
4065 | unsigned long irq_flags; | ||
4066 | u32 length; | ||
4067 | unsigned int i; | ||
4068 | int cleaned_count = 0; | ||
4069 | bool cleaned = false; | ||
4070 | unsigned int total_rx_bytes=0, total_rx_packets=0; | ||
4071 | |||
4072 | i = rx_ring->next_to_clean; | ||
4073 | rx_desc = E1000_RX_DESC(*rx_ring, i); | ||
4074 | buffer_info = &rx_ring->buffer_info[i]; | ||
4075 | |||
4076 | while (rx_desc->status & E1000_RXD_STAT_DD) { | ||
4077 | struct sk_buff *skb; | ||
4078 | u8 status; | ||
4079 | |||
4080 | if (*work_done >= work_to_do) | ||
4081 | break; | ||
4082 | (*work_done)++; | ||
4083 | |||
4084 | status = rx_desc->status; | ||
4085 | skb = buffer_info->skb; | ||
4086 | buffer_info->skb = NULL; | ||
4087 | |||
4088 | if (++i == rx_ring->count) i = 0; | ||
4089 | next_rxd = E1000_RX_DESC(*rx_ring, i); | ||
4090 | prefetch(next_rxd); | ||
4091 | |||
4092 | next_buffer = &rx_ring->buffer_info[i]; | ||
4093 | |||
4094 | cleaned = true; | ||
4095 | cleaned_count++; | ||
4096 | pci_unmap_page(pdev, buffer_info->dma, buffer_info->length, | ||
4097 | PCI_DMA_FROMDEVICE); | ||
4098 | buffer_info->dma = 0; | ||
4099 | |||
4100 | length = le16_to_cpu(rx_desc->length); | ||
4101 | |||
4102 | /* errors is only valid for DD + EOP descriptors */ | ||
4103 | if (unlikely((status & E1000_RXD_STAT_EOP) && | ||
4104 | (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { | ||
4105 | u8 last_byte = *(skb->data + length - 1); | ||
4106 | if (TBI_ACCEPT(hw, status, rx_desc->errors, length, | ||
4107 | last_byte)) { | ||
4108 | spin_lock_irqsave(&adapter->stats_lock, | ||
4109 | irq_flags); | ||
4110 | e1000_tbi_adjust_stats(hw, &adapter->stats, | ||
4111 | length, skb->data); | ||
4112 | spin_unlock_irqrestore(&adapter->stats_lock, | ||
4113 | irq_flags); | ||
4114 | length--; | ||
4115 | } else { | ||
4116 | /* recycle both page and skb */ | ||
4117 | buffer_info->skb = skb; | ||
4118 | /* an error means any chain goes out the window | ||
4119 | * too */ | ||
4120 | if (rx_ring->rx_skb_top) | ||
4121 | dev_kfree_skb(rx_ring->rx_skb_top); | ||
4122 | rx_ring->rx_skb_top = NULL; | ||
4123 | goto next_desc; | ||
4124 | } | ||
4125 | } | ||
4126 | |||
4127 | #define rxtop rx_ring->rx_skb_top | ||
4128 | if (!(status & E1000_RXD_STAT_EOP)) { | ||
4129 | /* this descriptor is only the beginning (or middle) */ | ||
4130 | if (!rxtop) { | ||
4131 | /* this is the beginning of a chain */ | ||
4132 | rxtop = skb; | ||
4133 | skb_fill_page_desc(rxtop, 0, buffer_info->page, | ||
4134 | 0, length); | ||
4135 | } else { | ||
4136 | /* this is the middle of a chain */ | ||
4137 | skb_fill_page_desc(rxtop, | ||
4138 | skb_shinfo(rxtop)->nr_frags, | ||
4139 | buffer_info->page, 0, length); | ||
4140 | /* re-use the skb, only consumed the page */ | ||
4141 | buffer_info->skb = skb; | ||
4142 | } | ||
4143 | e1000_consume_page(buffer_info, rxtop, length); | ||
4144 | goto next_desc; | ||
4145 | } else { | ||
4146 | if (rxtop) { | ||
4147 | /* end of the chain */ | ||
4148 | skb_fill_page_desc(rxtop, | ||
4149 | skb_shinfo(rxtop)->nr_frags, | ||
4150 | buffer_info->page, 0, length); | ||
4151 | /* re-use the current skb, we only consumed the | ||
4152 | * page */ | ||
4153 | buffer_info->skb = skb; | ||
4154 | skb = rxtop; | ||
4155 | rxtop = NULL; | ||
4156 | e1000_consume_page(buffer_info, skb, length); | ||
4157 | } else { | ||
4158 | /* no chain, got EOP, this buf is the packet | ||
4159 | * copybreak to save the put_page/alloc_page */ | ||
4160 | if (length <= copybreak && | ||
4161 | skb_tailroom(skb) >= length) { | ||
4162 | u8 *vaddr; | ||
4163 | vaddr = kmap_atomic(buffer_info->page, | ||
4164 | KM_SKB_DATA_SOFTIRQ); | ||
4165 | memcpy(skb_tail_pointer(skb), vaddr, length); | ||
4166 | kunmap_atomic(vaddr, | ||
4167 | KM_SKB_DATA_SOFTIRQ); | ||
4168 | /* re-use the page, so don't erase | ||
4169 | * buffer_info->page */ | ||
4170 | skb_put(skb, length); | ||
4171 | } else { | ||
4172 | skb_fill_page_desc(skb, 0, | ||
4173 | buffer_info->page, 0, | ||
4174 | length); | ||
4175 | e1000_consume_page(buffer_info, skb, | ||
4176 | length); | ||
4177 | } | ||
4178 | } | ||
4179 | } | ||
4180 | |||
4181 | /* Receive Checksum Offload XXX recompute due to CRC strip? */ | ||
4182 | e1000_rx_checksum(adapter, | ||
4183 | (u32)(status) | | ||
4184 | ((u32)(rx_desc->errors) << 24), | ||
4185 | le16_to_cpu(rx_desc->csum), skb); | ||
4186 | |||
4187 | pskb_trim(skb, skb->len - 4); | ||
4188 | |||
4189 | /* probably a little skewed due to removing CRC */ | ||
4190 | total_rx_bytes += skb->len; | ||
4191 | total_rx_packets++; | ||
4192 | |||
4193 | /* eth type trans needs skb->data to point to something */ | ||
4194 | if (!pskb_may_pull(skb, ETH_HLEN)) { | ||
4195 | DPRINTK(DRV, ERR, "pskb_may_pull failed.\n"); | ||
4196 | dev_kfree_skb(skb); | ||
4197 | goto next_desc; | ||
4198 | } | ||
4199 | |||
4200 | skb->protocol = eth_type_trans(skb, netdev); | ||
4201 | |||
4202 | e1000_receive_skb(adapter, status, rx_desc->special, skb); | ||
4203 | |||
4204 | next_desc: | ||
4205 | rx_desc->status = 0; | ||
4206 | |||
4207 | /* return some buffers to hardware, one at a time is too slow */ | ||
4208 | if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { | ||
4209 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); | ||
4210 | cleaned_count = 0; | ||
4211 | } | ||
4212 | |||
4213 | /* use prefetched values */ | ||
4214 | rx_desc = next_rxd; | ||
4215 | buffer_info = next_buffer; | ||
4216 | } | ||
4217 | rx_ring->next_to_clean = i; | ||
4218 | |||
4219 | cleaned_count = E1000_DESC_UNUSED(rx_ring); | ||
4220 | if (cleaned_count) | ||
4221 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); | ||
4222 | |||
4223 | adapter->total_rx_packets += total_rx_packets; | ||
4224 | adapter->total_rx_bytes += total_rx_bytes; | ||
4225 | adapter->net_stats.rx_bytes += total_rx_bytes; | ||
4226 | adapter->net_stats.rx_packets += total_rx_packets; | ||
4227 | return cleaned; | ||
4228 | } | ||
4229 | |||
4230 | /** | ||
3990 | * e1000_clean_rx_irq - Send received data up the network stack; legacy | 4231 | * e1000_clean_rx_irq - Send received data up the network stack; legacy |
3991 | * @adapter: board private structure | 4232 | * @adapter: board private structure |
3992 | **/ | 4233 | * @rx_ring: ring to clean |
4234 | * @work_done: amount of napi work completed this call | ||
4235 | * @work_to_do: max amount of work allowed for this call to do | ||
4236 | */ | ||
3993 | static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | 4237 | static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, |
3994 | struct e1000_rx_ring *rx_ring, | 4238 | struct e1000_rx_ring *rx_ring, |
3995 | int *work_done, int work_to_do) | 4239 | int *work_done, int work_to_do) |
@@ -4001,7 +4245,6 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
4001 | struct e1000_buffer *buffer_info, *next_buffer; | 4245 | struct e1000_buffer *buffer_info, *next_buffer; |
4002 | unsigned long flags; | 4246 | unsigned long flags; |
4003 | u32 length; | 4247 | u32 length; |
4004 | u8 last_byte; | ||
4005 | unsigned int i; | 4248 | unsigned int i; |
4006 | int cleaned_count = 0; | 4249 | int cleaned_count = 0; |
4007 | bool cleaned = false; | 4250 | bool cleaned = false; |
@@ -4033,9 +4276,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
4033 | 4276 | ||
4034 | cleaned = true; | 4277 | cleaned = true; |
4035 | cleaned_count++; | 4278 | cleaned_count++; |
4036 | pci_unmap_single(pdev, | 4279 | pci_unmap_single(pdev, buffer_info->dma, buffer_info->length, |
4037 | buffer_info->dma, | ||
4038 | buffer_info->length, | ||
4039 | PCI_DMA_FROMDEVICE); | 4280 | PCI_DMA_FROMDEVICE); |
4040 | buffer_info->dma = 0; | 4281 | buffer_info->dma = 0; |
4041 | 4282 | ||
@@ -4052,7 +4293,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
4052 | } | 4293 | } |
4053 | 4294 | ||
4054 | if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { | 4295 | if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { |
4055 | last_byte = *(skb->data + length - 1); | 4296 | u8 last_byte = *(skb->data + length - 1); |
4056 | if (TBI_ACCEPT(hw, status, rx_desc->errors, length, | 4297 | if (TBI_ACCEPT(hw, status, rx_desc->errors, length, |
4057 | last_byte)) { | 4298 | last_byte)) { |
4058 | spin_lock_irqsave(&adapter->stats_lock, flags); | 4299 | spin_lock_irqsave(&adapter->stats_lock, flags); |
@@ -4107,13 +4348,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
4107 | 4348 | ||
4108 | skb->protocol = eth_type_trans(skb, netdev); | 4349 | skb->protocol = eth_type_trans(skb, netdev); |
4109 | 4350 | ||
4110 | if (unlikely(adapter->vlgrp && | 4351 | e1000_receive_skb(adapter, status, rx_desc->special, skb); |
4111 | (status & E1000_RXD_STAT_VP))) { | ||
4112 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, | ||
4113 | le16_to_cpu(rx_desc->special)); | ||
4114 | } else { | ||
4115 | netif_receive_skb(skb); | ||
4116 | } | ||
4117 | 4352 | ||
4118 | next_desc: | 4353 | next_desc: |
4119 | rx_desc->status = 0; | 4354 | rx_desc->status = 0; |
@@ -4142,6 +4377,114 @@ next_desc: | |||
4142 | } | 4377 | } |
4143 | 4378 | ||
4144 | /** | 4379 | /** |
4380 | * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers | ||
4381 | * @adapter: address of board private structure | ||
4382 | * @rx_ring: pointer to receive ring structure | ||
4383 | * @cleaned_count: number of buffers to allocate this pass | ||
4384 | **/ | ||
4385 | |||
4386 | static void | ||
4387 | e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, | ||
4388 | struct e1000_rx_ring *rx_ring, int cleaned_count) | ||
4389 | { | ||
4390 | struct net_device *netdev = adapter->netdev; | ||
4391 | struct pci_dev *pdev = adapter->pdev; | ||
4392 | struct e1000_rx_desc *rx_desc; | ||
4393 | struct e1000_buffer *buffer_info; | ||
4394 | struct sk_buff *skb; | ||
4395 | unsigned int i; | ||
4396 | unsigned int bufsz = 256 - | ||
4397 | 16 /*for skb_reserve */ - | ||
4398 | NET_IP_ALIGN; | ||
4399 | |||
4400 | i = rx_ring->next_to_use; | ||
4401 | buffer_info = &rx_ring->buffer_info[i]; | ||
4402 | |||
4403 | while (cleaned_count--) { | ||
4404 | skb = buffer_info->skb; | ||
4405 | if (skb) { | ||
4406 | skb_trim(skb, 0); | ||
4407 | goto check_page; | ||
4408 | } | ||
4409 | |||
4410 | skb = netdev_alloc_skb(netdev, bufsz); | ||
4411 | if (unlikely(!skb)) { | ||
4412 | /* Better luck next round */ | ||
4413 | adapter->alloc_rx_buff_failed++; | ||
4414 | break; | ||
4415 | } | ||
4416 | |||
4417 | /* Fix for errata 23, can't cross 64kB boundary */ | ||
4418 | if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { | ||
4419 | struct sk_buff *oldskb = skb; | ||
4420 | DPRINTK(PROBE, ERR, "skb align check failed: %u bytes " | ||
4421 | "at %p\n", bufsz, skb->data); | ||
4422 | /* Try again, without freeing the previous */ | ||
4423 | skb = netdev_alloc_skb(netdev, bufsz); | ||
4424 | /* Failed allocation, critical failure */ | ||
4425 | if (!skb) { | ||
4426 | dev_kfree_skb(oldskb); | ||
4427 | adapter->alloc_rx_buff_failed++; | ||
4428 | break; | ||
4429 | } | ||
4430 | |||
4431 | if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { | ||
4432 | /* give up */ | ||
4433 | dev_kfree_skb(skb); | ||
4434 | dev_kfree_skb(oldskb); | ||
4435 | break; /* while (cleaned_count--) */ | ||
4436 | } | ||
4437 | |||
4438 | /* Use new allocation */ | ||
4439 | dev_kfree_skb(oldskb); | ||
4440 | } | ||
4441 | /* Make buffer alignment 2 beyond a 16 byte boundary | ||
4442 | * this will result in a 16 byte aligned IP header after | ||
4443 | * the 14 byte MAC header is removed | ||
4444 | */ | ||
4445 | skb_reserve(skb, NET_IP_ALIGN); | ||
4446 | |||
4447 | buffer_info->skb = skb; | ||
4448 | buffer_info->length = adapter->rx_buffer_len; | ||
4449 | check_page: | ||
4450 | /* allocate a new page if necessary */ | ||
4451 | if (!buffer_info->page) { | ||
4452 | buffer_info->page = alloc_page(GFP_ATOMIC); | ||
4453 | if (unlikely(!buffer_info->page)) { | ||
4454 | adapter->alloc_rx_buff_failed++; | ||
4455 | break; | ||
4456 | } | ||
4457 | } | ||
4458 | |||
4459 | if (!buffer_info->dma) | ||
4460 | buffer_info->dma = pci_map_page(pdev, | ||
4461 | buffer_info->page, 0, | ||
4462 | buffer_info->length, | ||
4463 | PCI_DMA_FROMDEVICE); | ||
4464 | |||
4465 | rx_desc = E1000_RX_DESC(*rx_ring, i); | ||
4466 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | ||
4467 | |||
4468 | if (unlikely(++i == rx_ring->count)) | ||
4469 | i = 0; | ||
4470 | buffer_info = &rx_ring->buffer_info[i]; | ||
4471 | } | ||
4472 | |||
4473 | if (likely(rx_ring->next_to_use != i)) { | ||
4474 | rx_ring->next_to_use = i; | ||
4475 | if (unlikely(i-- == 0)) | ||
4476 | i = (rx_ring->count - 1); | ||
4477 | |||
4478 | /* Force memory writes to complete before letting h/w | ||
4479 | * know there are new descriptors to fetch. (Only | ||
4480 | * applicable for weak-ordered memory model archs, | ||
4481 | * such as IA-64). */ | ||
4482 | wmb(); | ||
4483 | writel(i, adapter->hw.hw_addr + rx_ring->rdt); | ||
4484 | } | ||
4485 | } | ||
4486 | |||
4487 | /** | ||
4145 | * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended | 4488 | * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended |
4146 | * @adapter: address of board private structure | 4489 | * @adapter: address of board private structure |
4147 | **/ | 4490 | **/ |
@@ -4186,6 +4529,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
4186 | /* Failed allocation, critical failure */ | 4529 | /* Failed allocation, critical failure */ |
4187 | if (!skb) { | 4530 | if (!skb) { |
4188 | dev_kfree_skb(oldskb); | 4531 | dev_kfree_skb(oldskb); |
4532 | adapter->alloc_rx_buff_failed++; | ||
4189 | break; | 4533 | break; |
4190 | } | 4534 | } |
4191 | 4535 | ||
@@ -4193,6 +4537,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
4193 | /* give up */ | 4537 | /* give up */ |
4194 | dev_kfree_skb(skb); | 4538 | dev_kfree_skb(skb); |
4195 | dev_kfree_skb(oldskb); | 4539 | dev_kfree_skb(oldskb); |
4540 | adapter->alloc_rx_buff_failed++; | ||
4196 | break; /* while !buffer_info->skb */ | 4541 | break; /* while !buffer_info->skb */ |
4197 | } | 4542 | } |
4198 | 4543 | ||
@@ -4210,9 +4555,14 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
4210 | map_skb: | 4555 | map_skb: |
4211 | buffer_info->dma = pci_map_single(pdev, | 4556 | buffer_info->dma = pci_map_single(pdev, |
4212 | skb->data, | 4557 | skb->data, |
4213 | adapter->rx_buffer_len, | 4558 | buffer_info->length, |
4214 | PCI_DMA_FROMDEVICE); | 4559 | PCI_DMA_FROMDEVICE); |
4215 | 4560 | ||
4561 | /* | ||
4562 | * XXX if it was allocated cleanly it will never map to a | ||
4563 | * boundary crossing | ||
4564 | */ | ||
4565 | |||
4216 | /* Fix for errata 23, can't cross 64kB boundary */ | 4566 | /* Fix for errata 23, can't cross 64kB boundary */ |
4217 | if (!e1000_check_64k_bound(adapter, | 4567 | if (!e1000_check_64k_bound(adapter, |
4218 | (void *)(unsigned long)buffer_info->dma, | 4568 | (void *)(unsigned long)buffer_info->dma, |
@@ -4229,6 +4579,7 @@ map_skb: | |||
4229 | PCI_DMA_FROMDEVICE); | 4579 | PCI_DMA_FROMDEVICE); |
4230 | buffer_info->dma = 0; | 4580 | buffer_info->dma = 0; |
4231 | 4581 | ||
4582 | adapter->alloc_rx_buff_failed++; | ||
4232 | break; /* while !buffer_info->skb */ | 4583 | break; /* while !buffer_info->skb */ |
4233 | } | 4584 | } |
4234 | rx_desc = E1000_RX_DESC(*rx_ring, i); | 4585 | rx_desc = E1000_RX_DESC(*rx_ring, i); |