diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2009-10-13 01:34:20 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-10-13 14:48:18 -0400 |
commit | 89d71a66c40d629e3b1285def543ab1425558cd5 (patch) | |
tree | 45159e85418170fe36e4e023d9617693625d1740 /drivers/net/e1000e | |
parent | bff1c09640b3006bca711e18ef08a5fb955ad9b5 (diff) |
net: Use netdev_alloc_skb_ip_align()
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/e1000e')
-rw-r--r-- | drivers/net/e1000e/netdev.c | 37 |
1 files changed, 7 insertions, 30 deletions
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 21af3984e5c2..376924804f3f 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -167,7 +167,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
167 | struct e1000_buffer *buffer_info; | 167 | struct e1000_buffer *buffer_info; |
168 | struct sk_buff *skb; | 168 | struct sk_buff *skb; |
169 | unsigned int i; | 169 | unsigned int i; |
170 | unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; | 170 | unsigned int bufsz = adapter->rx_buffer_len; |
171 | 171 | ||
172 | i = rx_ring->next_to_use; | 172 | i = rx_ring->next_to_use; |
173 | buffer_info = &rx_ring->buffer_info[i]; | 173 | buffer_info = &rx_ring->buffer_info[i]; |
@@ -179,20 +179,13 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
179 | goto map_skb; | 179 | goto map_skb; |
180 | } | 180 | } |
181 | 181 | ||
182 | skb = netdev_alloc_skb(netdev, bufsz); | 182 | skb = netdev_alloc_skb_ip_align(netdev, bufsz); |
183 | if (!skb) { | 183 | if (!skb) { |
184 | /* Better luck next round */ | 184 | /* Better luck next round */ |
185 | adapter->alloc_rx_buff_failed++; | 185 | adapter->alloc_rx_buff_failed++; |
186 | break; | 186 | break; |
187 | } | 187 | } |
188 | 188 | ||
189 | /* | ||
190 | * Make buffer alignment 2 beyond a 16 byte boundary | ||
191 | * this will result in a 16 byte aligned IP header after | ||
192 | * the 14 byte MAC header is removed | ||
193 | */ | ||
194 | skb_reserve(skb, NET_IP_ALIGN); | ||
195 | |||
196 | buffer_info->skb = skb; | 189 | buffer_info->skb = skb; |
197 | map_skb: | 190 | map_skb: |
198 | buffer_info->dma = pci_map_single(pdev, skb->data, | 191 | buffer_info->dma = pci_map_single(pdev, skb->data, |
@@ -284,21 +277,14 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
284 | cpu_to_le64(ps_page->dma); | 277 | cpu_to_le64(ps_page->dma); |
285 | } | 278 | } |
286 | 279 | ||
287 | skb = netdev_alloc_skb(netdev, | 280 | skb = netdev_alloc_skb_ip_align(netdev, |
288 | adapter->rx_ps_bsize0 + NET_IP_ALIGN); | 281 | adapter->rx_ps_bsize0); |
289 | 282 | ||
290 | if (!skb) { | 283 | if (!skb) { |
291 | adapter->alloc_rx_buff_failed++; | 284 | adapter->alloc_rx_buff_failed++; |
292 | break; | 285 | break; |
293 | } | 286 | } |
294 | 287 | ||
295 | /* | ||
296 | * Make buffer alignment 2 beyond a 16 byte boundary | ||
297 | * this will result in a 16 byte aligned IP header after | ||
298 | * the 14 byte MAC header is removed | ||
299 | */ | ||
300 | skb_reserve(skb, NET_IP_ALIGN); | ||
301 | |||
302 | buffer_info->skb = skb; | 288 | buffer_info->skb = skb; |
303 | buffer_info->dma = pci_map_single(pdev, skb->data, | 289 | buffer_info->dma = pci_map_single(pdev, skb->data, |
304 | adapter->rx_ps_bsize0, | 290 | adapter->rx_ps_bsize0, |
@@ -359,9 +345,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, | |||
359 | struct e1000_buffer *buffer_info; | 345 | struct e1000_buffer *buffer_info; |
360 | struct sk_buff *skb; | 346 | struct sk_buff *skb; |
361 | unsigned int i; | 347 | unsigned int i; |
362 | unsigned int bufsz = 256 - | 348 | unsigned int bufsz = 256 - 16 /* for skb_reserve */; |
363 | 16 /* for skb_reserve */ - | ||
364 | NET_IP_ALIGN; | ||
365 | 349 | ||
366 | i = rx_ring->next_to_use; | 350 | i = rx_ring->next_to_use; |
367 | buffer_info = &rx_ring->buffer_info[i]; | 351 | buffer_info = &rx_ring->buffer_info[i]; |
@@ -373,19 +357,13 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, | |||
373 | goto check_page; | 357 | goto check_page; |
374 | } | 358 | } |
375 | 359 | ||
376 | skb = netdev_alloc_skb(netdev, bufsz); | 360 | skb = netdev_alloc_skb_ip_align(netdev, bufsz); |
377 | if (unlikely(!skb)) { | 361 | if (unlikely(!skb)) { |
378 | /* Better luck next round */ | 362 | /* Better luck next round */ |
379 | adapter->alloc_rx_buff_failed++; | 363 | adapter->alloc_rx_buff_failed++; |
380 | break; | 364 | break; |
381 | } | 365 | } |
382 | 366 | ||
383 | /* Make buffer alignment 2 beyond a 16 byte boundary | ||
384 | * this will result in a 16 byte aligned IP header after | ||
385 | * the 14 byte MAC header is removed | ||
386 | */ | ||
387 | skb_reserve(skb, NET_IP_ALIGN); | ||
388 | |||
389 | buffer_info->skb = skb; | 367 | buffer_info->skb = skb; |
390 | check_page: | 368 | check_page: |
391 | /* allocate a new page if necessary */ | 369 | /* allocate a new page if necessary */ |
@@ -513,9 +491,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
513 | */ | 491 | */ |
514 | if (length < copybreak) { | 492 | if (length < copybreak) { |
515 | struct sk_buff *new_skb = | 493 | struct sk_buff *new_skb = |
516 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); | 494 | netdev_alloc_skb_ip_align(netdev, length); |
517 | if (new_skb) { | 495 | if (new_skb) { |
518 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
519 | skb_copy_to_linear_data_offset(new_skb, | 496 | skb_copy_to_linear_data_offset(new_skb, |
520 | -NET_IP_ALIGN, | 497 | -NET_IP_ALIGN, |
521 | (skb->data - | 498 | (skb->data - |