diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2009-10-13 01:34:20 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-10-13 14:48:18 -0400 |
commit | 89d71a66c40d629e3b1285def543ab1425558cd5 (patch) | |
tree | 45159e85418170fe36e4e023d9617693625d1740 /drivers/net/dl2k.c | |
parent | bff1c09640b3006bca711e18ef08a5fb955ad9b5 (diff) |
net: Use netdev_alloc_skb_ip_align()
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/dl2k.c')
-rw-r--r-- | drivers/net/dl2k.c | 18 |
1 files changed, 7 insertions, 11 deletions
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index 7fa7a907f134..ce8fef184f2c 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c | |||
@@ -505,7 +505,8 @@ rio_timer (unsigned long data) | |||
505 | entry = np->old_rx % RX_RING_SIZE; | 505 | entry = np->old_rx % RX_RING_SIZE; |
506 | /* Dropped packets don't need to re-allocate */ | 506 | /* Dropped packets don't need to re-allocate */ |
507 | if (np->rx_skbuff[entry] == NULL) { | 507 | if (np->rx_skbuff[entry] == NULL) { |
508 | skb = netdev_alloc_skb (dev, np->rx_buf_sz); | 508 | skb = netdev_alloc_skb_ip_align(dev, |
509 | np->rx_buf_sz); | ||
509 | if (skb == NULL) { | 510 | if (skb == NULL) { |
510 | np->rx_ring[entry].fraginfo = 0; | 511 | np->rx_ring[entry].fraginfo = 0; |
511 | printk (KERN_INFO | 512 | printk (KERN_INFO |
@@ -514,8 +515,6 @@ rio_timer (unsigned long data) | |||
514 | break; | 515 | break; |
515 | } | 516 | } |
516 | np->rx_skbuff[entry] = skb; | 517 | np->rx_skbuff[entry] = skb; |
517 | /* 16 byte align the IP header */ | ||
518 | skb_reserve (skb, 2); | ||
519 | np->rx_ring[entry].fraginfo = | 518 | np->rx_ring[entry].fraginfo = |
520 | cpu_to_le64 (pci_map_single | 519 | cpu_to_le64 (pci_map_single |
521 | (np->pdev, skb->data, np->rx_buf_sz, | 520 | (np->pdev, skb->data, np->rx_buf_sz, |
@@ -576,7 +575,9 @@ alloc_list (struct net_device *dev) | |||
576 | /* Allocate the rx buffers */ | 575 | /* Allocate the rx buffers */ |
577 | for (i = 0; i < RX_RING_SIZE; i++) { | 576 | for (i = 0; i < RX_RING_SIZE; i++) { |
578 | /* Allocated fixed size of skbuff */ | 577 | /* Allocated fixed size of skbuff */ |
579 | struct sk_buff *skb = netdev_alloc_skb (dev, np->rx_buf_sz); | 578 | struct sk_buff *skb; |
579 | |||
580 | skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); | ||
580 | np->rx_skbuff[i] = skb; | 581 | np->rx_skbuff[i] = skb; |
581 | if (skb == NULL) { | 582 | if (skb == NULL) { |
582 | printk (KERN_ERR | 583 | printk (KERN_ERR |
@@ -584,7 +585,6 @@ alloc_list (struct net_device *dev) | |||
584 | dev->name); | 585 | dev->name); |
585 | break; | 586 | break; |
586 | } | 587 | } |
587 | skb_reserve (skb, 2); /* 16 byte align the IP header. */ | ||
588 | /* Rubicon now supports 40 bits of addressing space. */ | 588 | /* Rubicon now supports 40 bits of addressing space. */ |
589 | np->rx_ring[i].fraginfo = | 589 | np->rx_ring[i].fraginfo = |
590 | cpu_to_le64 ( pci_map_single ( | 590 | cpu_to_le64 ( pci_map_single ( |
@@ -871,13 +871,11 @@ receive_packet (struct net_device *dev) | |||
871 | PCI_DMA_FROMDEVICE); | 871 | PCI_DMA_FROMDEVICE); |
872 | skb_put (skb = np->rx_skbuff[entry], pkt_len); | 872 | skb_put (skb = np->rx_skbuff[entry], pkt_len); |
873 | np->rx_skbuff[entry] = NULL; | 873 | np->rx_skbuff[entry] = NULL; |
874 | } else if ((skb = netdev_alloc_skb(dev, pkt_len + 2))) { | 874 | } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) { |
875 | pci_dma_sync_single_for_cpu(np->pdev, | 875 | pci_dma_sync_single_for_cpu(np->pdev, |
876 | desc_to_dma(desc), | 876 | desc_to_dma(desc), |
877 | np->rx_buf_sz, | 877 | np->rx_buf_sz, |
878 | PCI_DMA_FROMDEVICE); | 878 | PCI_DMA_FROMDEVICE); |
879 | /* 16 byte align the IP header */ | ||
880 | skb_reserve (skb, 2); | ||
881 | skb_copy_to_linear_data (skb, | 879 | skb_copy_to_linear_data (skb, |
882 | np->rx_skbuff[entry]->data, | 880 | np->rx_skbuff[entry]->data, |
883 | pkt_len); | 881 | pkt_len); |
@@ -907,7 +905,7 @@ receive_packet (struct net_device *dev) | |||
907 | struct sk_buff *skb; | 905 | struct sk_buff *skb; |
908 | /* Dropped packets don't need to re-allocate */ | 906 | /* Dropped packets don't need to re-allocate */ |
909 | if (np->rx_skbuff[entry] == NULL) { | 907 | if (np->rx_skbuff[entry] == NULL) { |
910 | skb = netdev_alloc_skb(dev, np->rx_buf_sz); | 908 | skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); |
911 | if (skb == NULL) { | 909 | if (skb == NULL) { |
912 | np->rx_ring[entry].fraginfo = 0; | 910 | np->rx_ring[entry].fraginfo = 0; |
913 | printk (KERN_INFO | 911 | printk (KERN_INFO |
@@ -917,8 +915,6 @@ receive_packet (struct net_device *dev) | |||
917 | break; | 915 | break; |
918 | } | 916 | } |
919 | np->rx_skbuff[entry] = skb; | 917 | np->rx_skbuff[entry] = skb; |
920 | /* 16 byte align the IP header */ | ||
921 | skb_reserve (skb, 2); | ||
922 | np->rx_ring[entry].fraginfo = | 918 | np->rx_ring[entry].fraginfo = |
923 | cpu_to_le64 (pci_map_single | 919 | cpu_to_le64 (pci_map_single |
924 | (np->pdev, skb->data, np->rx_buf_sz, | 920 | (np->pdev, skb->data, np->rx_buf_sz, |