aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000/e1000_main.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2006-08-31 17:27:47 -0400
committerAuke Kok <juke-jan.h.kok@intel.com>2006-08-31 17:27:47 -0400
commitca6f72241966602d254900b9d624cac00df745bf (patch)
treef600780a8d0aeb8d7814c0de9237c9c39ffb75ec /drivers/net/e1000/e1000_main.c
parent5881cde8a38cab3b228a63516ab64f8d79acc4f5 (diff)
e1000: clean up skb allocation code
Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com>
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r--drivers/net/e1000/e1000_main.c17
1 files changed, 6 insertions, 11 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 3b61b9775e53..b943bed967be 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3741,7 +3741,6 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3741 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 3741 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
3742 if (new_skb) { 3742 if (new_skb) {
3743 skb_reserve(new_skb, NET_IP_ALIGN); 3743 skb_reserve(new_skb, NET_IP_ALIGN);
3744 new_skb->dev = netdev;
3745 memcpy(new_skb->data - NET_IP_ALIGN, 3744 memcpy(new_skb->data - NET_IP_ALIGN,
3746 skb->data - NET_IP_ALIGN, 3745 skb->data - NET_IP_ALIGN,
3747 length + NET_IP_ALIGN); 3746 length + NET_IP_ALIGN);
@@ -4008,13 +4007,13 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4008 buffer_info = &rx_ring->buffer_info[i]; 4007 buffer_info = &rx_ring->buffer_info[i];
4009 4008
4010 while (cleaned_count--) { 4009 while (cleaned_count--) {
4011 if (!(skb = buffer_info->skb)) 4010 skb = buffer_info->skb;
4012 skb = netdev_alloc_skb(netdev, bufsz); 4011 if (skb) {
4013 else {
4014 skb_trim(skb, 0); 4012 skb_trim(skb, 0);
4015 goto map_skb; 4013 goto map_skb;
4016 } 4014 }
4017 4015
4016 skb = netdev_alloc_skb(netdev, bufsz);
4018 if (unlikely(!skb)) { 4017 if (unlikely(!skb)) {
4019 /* Better luck next round */ 4018 /* Better luck next round */
4020 adapter->alloc_rx_buff_failed++; 4019 adapter->alloc_rx_buff_failed++;
@@ -4039,10 +4038,10 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4039 dev_kfree_skb(skb); 4038 dev_kfree_skb(skb);
4040 dev_kfree_skb(oldskb); 4039 dev_kfree_skb(oldskb);
4041 break; /* while !buffer_info->skb */ 4040 break; /* while !buffer_info->skb */
4042 } else {
4043 /* Use new allocation */
4044 dev_kfree_skb(oldskb);
4045 } 4041 }
4042
4043 /* Use new allocation */
4044 dev_kfree_skb(oldskb);
4046 } 4045 }
4047 /* Make buffer alignment 2 beyond a 16 byte boundary 4046 /* Make buffer alignment 2 beyond a 16 byte boundary
4048 * this will result in a 16 byte aligned IP header after 4047 * this will result in a 16 byte aligned IP header after
@@ -4050,8 +4049,6 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4050 */ 4049 */
4051 skb_reserve(skb, NET_IP_ALIGN); 4050 skb_reserve(skb, NET_IP_ALIGN);
4052 4051
4053 skb->dev = netdev;
4054
4055 buffer_info->skb = skb; 4052 buffer_info->skb = skb;
4056 buffer_info->length = adapter->rx_buffer_len; 4053 buffer_info->length = adapter->rx_buffer_len;
4057map_skb: 4054map_skb:
@@ -4165,8 +4162,6 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4165 */ 4162 */
4166 skb_reserve(skb, NET_IP_ALIGN); 4163 skb_reserve(skb, NET_IP_ALIGN);
4167 4164
4168 skb->dev = netdev;
4169
4170 buffer_info->skb = skb; 4165 buffer_info->skb = skb;
4171 buffer_info->length = adapter->rx_ps_bsize0; 4166 buffer_info->length = adapter->rx_ps_bsize0;
4172 buffer_info->dma = pci_map_single(pdev, skb->data, 4167 buffer_info->dma = pci_map_single(pdev, skb->data,