aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c59x.c3
-rw-r--r--drivers/net/8139cp.c8
-rw-r--r--drivers/net/8139too.c3
-rw-r--r--drivers/net/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/atlx/atl1.c11
-rw-r--r--drivers/net/atlx/atl2.c3
-rw-r--r--drivers/net/bcm63xx_enet.c5
-rw-r--r--drivers/net/benet/be_main.c4
-rw-r--r--drivers/net/cpmac.c6
-rw-r--r--drivers/net/dl2k.c18
-rw-r--r--drivers/net/e100.c5
-rw-r--r--drivers/net/e1000/e1000_main.c29
-rw-r--r--drivers/net/e1000e/netdev.c37
-rw-r--r--drivers/net/ehea/ehea_main.c9
-rw-r--r--drivers/net/enic/enic_main.c15
-rw-r--r--drivers/net/ethoc.c4
-rw-r--r--drivers/net/hamachi.c12
-rw-r--r--drivers/net/igb/igb_main.c8
-rw-r--r--drivers/net/igbvf/netdev.c8
-rw-r--r--drivers/net/ipg.c7
-rw-r--r--drivers/net/ixgb/ixgb_main.c12
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c12
-rw-r--r--drivers/net/ixp2000/ixpdev.c3
-rw-r--r--drivers/net/korina.c5
-rw-r--r--drivers/net/ks8842.c5
-rw-r--r--drivers/net/lib82596.c11
-rw-r--r--drivers/net/r8169.c3
-rw-r--r--drivers/net/sc92031.c4
-rw-r--r--drivers/net/sgiseeq.c7
-rw-r--r--drivers/net/sis190.c3
-rw-r--r--drivers/net/skge.c7
-rw-r--r--drivers/net/sky2.c3
-rw-r--r--drivers/net/tlan.c7
-rw-r--r--drivers/net/tsi108_eth.c10
-rw-r--r--drivers/net/via-rhine.c8
-rw-r--r--drivers/net/via-velocity.c3
-rw-r--r--drivers/net/virtio_net.c7
37 files changed, 87 insertions, 222 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 975e25b19ebe..32031eaf4910 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -2560,7 +2560,7 @@ boomerang_rx(struct net_device *dev)
2560 struct sk_buff *skb; 2560 struct sk_buff *skb;
2561 entry = vp->dirty_rx % RX_RING_SIZE; 2561 entry = vp->dirty_rx % RX_RING_SIZE;
2562 if (vp->rx_skbuff[entry] == NULL) { 2562 if (vp->rx_skbuff[entry] == NULL) {
2563 skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); 2563 skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
2564 if (skb == NULL) { 2564 if (skb == NULL) {
2565 static unsigned long last_jif; 2565 static unsigned long last_jif;
2566 if (time_after(jiffies, last_jif + 10 * HZ)) { 2566 if (time_after(jiffies, last_jif + 10 * HZ)) {
@@ -2572,7 +2572,6 @@ boomerang_rx(struct net_device *dev)
2572 break; /* Bad news! */ 2572 break; /* Bad news! */
2573 } 2573 }
2574 2574
2575 skb_reserve(skb, NET_IP_ALIGN);
2576 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 2575 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
2577 vp->rx_skbuff[entry] = skb; 2576 vp->rx_skbuff[entry] = skb;
2578 } 2577 }
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 83a1922e68e0..ab451bb8995a 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -549,14 +549,12 @@ rx_status_loop:
549 pr_debug("%s: rx slot %d status 0x%x len %d\n", 549 pr_debug("%s: rx slot %d status 0x%x len %d\n",
550 dev->name, rx_tail, status, len); 550 dev->name, rx_tail, status, len);
551 551
552 new_skb = netdev_alloc_skb(dev, buflen + NET_IP_ALIGN); 552 new_skb = netdev_alloc_skb_ip_align(dev, buflen);
553 if (!new_skb) { 553 if (!new_skb) {
554 dev->stats.rx_dropped++; 554 dev->stats.rx_dropped++;
555 goto rx_next; 555 goto rx_next;
556 } 556 }
557 557
558 skb_reserve(new_skb, NET_IP_ALIGN);
559
560 dma_unmap_single(&cp->pdev->dev, mapping, 558 dma_unmap_single(&cp->pdev->dev, mapping,
561 buflen, PCI_DMA_FROMDEVICE); 559 buflen, PCI_DMA_FROMDEVICE);
562 560
@@ -1057,12 +1055,10 @@ static int cp_refill_rx(struct cp_private *cp)
1057 struct sk_buff *skb; 1055 struct sk_buff *skb;
1058 dma_addr_t mapping; 1056 dma_addr_t mapping;
1059 1057
1060 skb = netdev_alloc_skb(dev, cp->rx_buf_sz + NET_IP_ALIGN); 1058 skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1061 if (!skb) 1059 if (!skb)
1062 goto err_out; 1060 goto err_out;
1063 1061
1064 skb_reserve(skb, NET_IP_ALIGN);
1065
1066 mapping = dma_map_single(&cp->pdev->dev, skb->data, 1062 mapping = dma_map_single(&cp->pdev->dev, skb->data,
1067 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1063 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1068 cp->rx_skb[i] = skb; 1064 cp->rx_skb[i] = skb;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 4a3628755026..7e333f73b228 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -2004,9 +2004,8 @@ no_early_rx:
2004 /* Malloc up new buffer, compatible with net-2e. */ 2004 /* Malloc up new buffer, compatible with net-2e. */
2005 /* Omit the four octet CRC from the length. */ 2005 /* Omit the four octet CRC from the length. */
2006 2006
2007 skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN); 2007 skb = netdev_alloc_skb_ip_align(dev, pkt_size);
2008 if (likely(skb)) { 2008 if (likely(skb)) {
2009 skb_reserve (skb, NET_IP_ALIGN); /* 16 byte align the IP fields. */
2010#if RX_BUF_IDX == 3 2009#if RX_BUF_IDX == 3
2011 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); 2010 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
2012#else 2011#else
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 955da733c2ad..8b889ab544b0 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -1433,14 +1433,12 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1433 1433
1434 packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & 1434 packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
1435 RRS_PKT_SIZE_MASK) - 4; /* CRC */ 1435 RRS_PKT_SIZE_MASK) - 4; /* CRC */
1436 skb = netdev_alloc_skb(netdev, 1436 skb = netdev_alloc_skb_ip_align(netdev, packet_size);
1437 packet_size + NET_IP_ALIGN);
1438 if (skb == NULL) { 1437 if (skb == NULL) {
1439 dev_warn(&pdev->dev, "%s: Memory squeeze," 1438 dev_warn(&pdev->dev, "%s: Memory squeeze,"
1440 "deferring packet.\n", netdev->name); 1439 "deferring packet.\n", netdev->name);
1441 goto skip_pkt; 1440 goto skip_pkt;
1442 } 1441 }
1443 skb_reserve(skb, NET_IP_ALIGN);
1444 skb->dev = netdev; 1442 skb->dev = netdev;
1445 memcpy(skb->data, (u8 *)(prrs + 1), packet_size); 1443 memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
1446 skb_put(skb, packet_size); 1444 skb_put(skb, packet_size);
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 00569dc1313c..963df502260a 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1864,21 +1864,14 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
1864 1864
1865 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); 1865 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
1866 1866
1867 skb = netdev_alloc_skb(adapter->netdev, 1867 skb = netdev_alloc_skb_ip_align(adapter->netdev,
1868 adapter->rx_buffer_len + NET_IP_ALIGN); 1868 adapter->rx_buffer_len);
1869 if (unlikely(!skb)) { 1869 if (unlikely(!skb)) {
1870 /* Better luck next round */ 1870 /* Better luck next round */
1871 adapter->netdev->stats.rx_dropped++; 1871 adapter->netdev->stats.rx_dropped++;
1872 break; 1872 break;
1873 } 1873 }
1874 1874
1875 /*
1876 * Make buffer alignment 2 beyond a 16 byte boundary
1877 * this will result in a 16 byte aligned IP header after
1878 * the 14 byte MAC header is removed
1879 */
1880 skb_reserve(skb, NET_IP_ALIGN);
1881
1882 buffer_info->alloced = 1; 1875 buffer_info->alloced = 1;
1883 buffer_info->skb = skb; 1876 buffer_info->skb = skb;
1884 buffer_info->length = (u16) adapter->rx_buffer_len; 1877 buffer_info->length = (u16) adapter->rx_buffer_len;
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index ab688862093f..0d268075bad5 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -409,7 +409,7 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
409 if (rxd->status.ok && rxd->status.pkt_size >= 60) { 409 if (rxd->status.ok && rxd->status.pkt_size >= 60) {
410 int rx_size = (int)(rxd->status.pkt_size - 4); 410 int rx_size = (int)(rxd->status.pkt_size - 4);
411 /* alloc new buffer */ 411 /* alloc new buffer */
412 skb = netdev_alloc_skb(netdev, rx_size + NET_IP_ALIGN); 412 skb = netdev_alloc_skb_ip_align(netdev, rx_size);
413 if (NULL == skb) { 413 if (NULL == skb) {
414 printk(KERN_WARNING 414 printk(KERN_WARNING
415 "%s: Mem squeeze, deferring packet.\n", 415 "%s: Mem squeeze, deferring packet.\n",
@@ -421,7 +421,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
421 netdev->stats.rx_dropped++; 421 netdev->stats.rx_dropped++;
422 break; 422 break;
423 } 423 }
424 skb_reserve(skb, NET_IP_ALIGN);
425 skb->dev = netdev; 424 skb->dev = netdev;
426 memcpy(skb->data, rxd->packet, rx_size); 425 memcpy(skb->data, rxd->packet, rx_size);
427 skb_put(skb, rx_size); 426 skb_put(skb, rx_size);
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index ba29dc319b34..1f6c5486d715 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -320,16 +320,13 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
320 if (len < copybreak) { 320 if (len < copybreak) {
321 struct sk_buff *nskb; 321 struct sk_buff *nskb;
322 322
323 nskb = netdev_alloc_skb(dev, len + NET_IP_ALIGN); 323 nskb = netdev_alloc_skb_ip_align(dev, len);
324 if (!nskb) { 324 if (!nskb) {
325 /* forget packet, just rearm desc */ 325 /* forget packet, just rearm desc */
326 priv->stats.rx_dropped++; 326 priv->stats.rx_dropped++;
327 continue; 327 continue;
328 } 328 }
329 329
330 /* since we're copying the data, we can align
331 * them properly */
332 skb_reserve(nskb, NET_IP_ALIGN);
333 dma_sync_single_for_cpu(kdev, desc->address, 330 dma_sync_single_for_cpu(kdev, desc->address,
334 len, DMA_FROM_DEVICE); 331 len, DMA_FROM_DEVICE);
335 memcpy(nskb->data, skb->data, len); 332 memcpy(nskb->data, skb->data, len);
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 0e92a1f055a2..e0f9d6477184 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -756,7 +756,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
756 if ((adapter->cap == 0x400) && !vtm) 756 if ((adapter->cap == 0x400) && !vtm)
757 vlanf = 0; 757 vlanf = 0;
758 758
759 skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN); 759 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
760 if (!skb) { 760 if (!skb) {
761 if (net_ratelimit()) 761 if (net_ratelimit())
762 dev_warn(&adapter->pdev->dev, "skb alloc failed\n"); 762 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
@@ -764,8 +764,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
764 return; 764 return;
765 } 765 }
766 766
767 skb_reserve(skb, NET_IP_ALIGN);
768
769 skb_fill_rx_data(adapter, skb, rxcp); 767 skb_fill_rx_data(adapter, skb, rxcp);
770 768
771 if (do_pkt_csum(rxcp, adapter->rx_csum)) 769 if (do_pkt_csum(rxcp, adapter->rx_csum))
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 61f9da2b4943..678222389407 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -380,9 +380,8 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
380 return NULL; 380 return NULL;
381 } 381 }
382 382
383 skb = netdev_alloc_skb(priv->dev, CPMAC_SKB_SIZE); 383 skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
384 if (likely(skb)) { 384 if (likely(skb)) {
385 skb_reserve(skb, 2);
386 skb_put(desc->skb, desc->datalen); 385 skb_put(desc->skb, desc->datalen);
387 desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); 386 desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
388 desc->skb->ip_summed = CHECKSUM_NONE; 387 desc->skb->ip_summed = CHECKSUM_NONE;
@@ -991,12 +990,11 @@ static int cpmac_open(struct net_device *dev)
991 990
992 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; 991 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
993 for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) { 992 for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
994 skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE); 993 skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
995 if (unlikely(!skb)) { 994 if (unlikely(!skb)) {
996 res = -ENOMEM; 995 res = -ENOMEM;
997 goto fail_desc; 996 goto fail_desc;
998 } 997 }
999 skb_reserve(skb, 2);
1000 desc->skb = skb; 998 desc->skb = skb;
1001 desc->data_mapping = dma_map_single(&dev->dev, skb->data, 999 desc->data_mapping = dma_map_single(&dev->dev, skb->data,
1002 CPMAC_SKB_SIZE, 1000 CPMAC_SKB_SIZE,
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 7fa7a907f134..ce8fef184f2c 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -505,7 +505,8 @@ rio_timer (unsigned long data)
505 entry = np->old_rx % RX_RING_SIZE; 505 entry = np->old_rx % RX_RING_SIZE;
506 /* Dropped packets don't need to re-allocate */ 506 /* Dropped packets don't need to re-allocate */
507 if (np->rx_skbuff[entry] == NULL) { 507 if (np->rx_skbuff[entry] == NULL) {
508 skb = netdev_alloc_skb (dev, np->rx_buf_sz); 508 skb = netdev_alloc_skb_ip_align(dev,
509 np->rx_buf_sz);
509 if (skb == NULL) { 510 if (skb == NULL) {
510 np->rx_ring[entry].fraginfo = 0; 511 np->rx_ring[entry].fraginfo = 0;
511 printk (KERN_INFO 512 printk (KERN_INFO
@@ -514,8 +515,6 @@ rio_timer (unsigned long data)
514 break; 515 break;
515 } 516 }
516 np->rx_skbuff[entry] = skb; 517 np->rx_skbuff[entry] = skb;
517 /* 16 byte align the IP header */
518 skb_reserve (skb, 2);
519 np->rx_ring[entry].fraginfo = 518 np->rx_ring[entry].fraginfo =
520 cpu_to_le64 (pci_map_single 519 cpu_to_le64 (pci_map_single
521 (np->pdev, skb->data, np->rx_buf_sz, 520 (np->pdev, skb->data, np->rx_buf_sz,
@@ -576,7 +575,9 @@ alloc_list (struct net_device *dev)
576 /* Allocate the rx buffers */ 575 /* Allocate the rx buffers */
577 for (i = 0; i < RX_RING_SIZE; i++) { 576 for (i = 0; i < RX_RING_SIZE; i++) {
578 /* Allocated fixed size of skbuff */ 577 /* Allocated fixed size of skbuff */
579 struct sk_buff *skb = netdev_alloc_skb (dev, np->rx_buf_sz); 578 struct sk_buff *skb;
579
580 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
580 np->rx_skbuff[i] = skb; 581 np->rx_skbuff[i] = skb;
581 if (skb == NULL) { 582 if (skb == NULL) {
582 printk (KERN_ERR 583 printk (KERN_ERR
@@ -584,7 +585,6 @@ alloc_list (struct net_device *dev)
584 dev->name); 585 dev->name);
585 break; 586 break;
586 } 587 }
587 skb_reserve (skb, 2); /* 16 byte align the IP header. */
588 /* Rubicon now supports 40 bits of addressing space. */ 588 /* Rubicon now supports 40 bits of addressing space. */
589 np->rx_ring[i].fraginfo = 589 np->rx_ring[i].fraginfo =
590 cpu_to_le64 ( pci_map_single ( 590 cpu_to_le64 ( pci_map_single (
@@ -871,13 +871,11 @@ receive_packet (struct net_device *dev)
871 PCI_DMA_FROMDEVICE); 871 PCI_DMA_FROMDEVICE);
872 skb_put (skb = np->rx_skbuff[entry], pkt_len); 872 skb_put (skb = np->rx_skbuff[entry], pkt_len);
873 np->rx_skbuff[entry] = NULL; 873 np->rx_skbuff[entry] = NULL;
874 } else if ((skb = netdev_alloc_skb(dev, pkt_len + 2))) { 874 } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
875 pci_dma_sync_single_for_cpu(np->pdev, 875 pci_dma_sync_single_for_cpu(np->pdev,
876 desc_to_dma(desc), 876 desc_to_dma(desc),
877 np->rx_buf_sz, 877 np->rx_buf_sz,
878 PCI_DMA_FROMDEVICE); 878 PCI_DMA_FROMDEVICE);
879 /* 16 byte align the IP header */
880 skb_reserve (skb, 2);
881 skb_copy_to_linear_data (skb, 879 skb_copy_to_linear_data (skb,
882 np->rx_skbuff[entry]->data, 880 np->rx_skbuff[entry]->data,
883 pkt_len); 881 pkt_len);
@@ -907,7 +905,7 @@ receive_packet (struct net_device *dev)
907 struct sk_buff *skb; 905 struct sk_buff *skb;
908 /* Dropped packets don't need to re-allocate */ 906 /* Dropped packets don't need to re-allocate */
909 if (np->rx_skbuff[entry] == NULL) { 907 if (np->rx_skbuff[entry] == NULL) {
910 skb = netdev_alloc_skb(dev, np->rx_buf_sz); 908 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
911 if (skb == NULL) { 909 if (skb == NULL) {
912 np->rx_ring[entry].fraginfo = 0; 910 np->rx_ring[entry].fraginfo = 0;
913 printk (KERN_INFO 911 printk (KERN_INFO
@@ -917,8 +915,6 @@ receive_packet (struct net_device *dev)
917 break; 915 break;
918 } 916 }
919 np->rx_skbuff[entry] = skb; 917 np->rx_skbuff[entry] = skb;
920 /* 16 byte align the IP header */
921 skb_reserve (skb, 2);
922 np->rx_ring[entry].fraginfo = 918 np->rx_ring[entry].fraginfo =
923 cpu_to_le64 (pci_map_single 919 cpu_to_le64 (pci_map_single
924 (np->pdev, skb->data, np->rx_buf_sz, 920 (np->pdev, skb->data, np->rx_buf_sz,
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 679965c2bb86..ff83efd47b0d 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1839,11 +1839,10 @@ static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1839#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN) 1839#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
1840static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) 1840static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1841{ 1841{
1842 if (!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN))) 1842 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
1843 return -ENOMEM; 1843 return -ENOMEM;
1844 1844
1845 /* Align, init, and map the RFD. */ 1845 /* Init, and map the RFD. */
1846 skb_reserve(rx->skb, NET_IP_ALIGN);
1847 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); 1846 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1848 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, 1847 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1849 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); 1848 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 6a6141482979..c938114a34ab 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3866,9 +3866,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3866 * of reassembly being done in the stack */ 3866 * of reassembly being done in the stack */
3867 if (length < copybreak) { 3867 if (length < copybreak) {
3868 struct sk_buff *new_skb = 3868 struct sk_buff *new_skb =
3869 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 3869 netdev_alloc_skb_ip_align(netdev, length);
3870 if (new_skb) { 3870 if (new_skb) {
3871 skb_reserve(new_skb, NET_IP_ALIGN);
3872 skb_copy_to_linear_data_offset(new_skb, 3871 skb_copy_to_linear_data_offset(new_skb,
3873 -NET_IP_ALIGN, 3872 -NET_IP_ALIGN,
3874 (skb->data - 3873 (skb->data -
@@ -3937,9 +3936,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
3937 struct e1000_buffer *buffer_info; 3936 struct e1000_buffer *buffer_info;
3938 struct sk_buff *skb; 3937 struct sk_buff *skb;
3939 unsigned int i; 3938 unsigned int i;
3940 unsigned int bufsz = 256 - 3939 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
3941 16 /*for skb_reserve */ -
3942 NET_IP_ALIGN;
3943 3940
3944 i = rx_ring->next_to_use; 3941 i = rx_ring->next_to_use;
3945 buffer_info = &rx_ring->buffer_info[i]; 3942 buffer_info = &rx_ring->buffer_info[i];
@@ -3951,7 +3948,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
3951 goto check_page; 3948 goto check_page;
3952 } 3949 }
3953 3950
3954 skb = netdev_alloc_skb(netdev, bufsz); 3951 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
3955 if (unlikely(!skb)) { 3952 if (unlikely(!skb)) {
3956 /* Better luck next round */ 3953 /* Better luck next round */
3957 adapter->alloc_rx_buff_failed++; 3954 adapter->alloc_rx_buff_failed++;
@@ -3964,7 +3961,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
3964 DPRINTK(PROBE, ERR, "skb align check failed: %u bytes " 3961 DPRINTK(PROBE, ERR, "skb align check failed: %u bytes "
3965 "at %p\n", bufsz, skb->data); 3962 "at %p\n", bufsz, skb->data);
3966 /* Try again, without freeing the previous */ 3963 /* Try again, without freeing the previous */
3967 skb = netdev_alloc_skb(netdev, bufsz); 3964 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
3968 /* Failed allocation, critical failure */ 3965 /* Failed allocation, critical failure */
3969 if (!skb) { 3966 if (!skb) {
3970 dev_kfree_skb(oldskb); 3967 dev_kfree_skb(oldskb);
@@ -3982,12 +3979,6 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
3982 /* Use new allocation */ 3979 /* Use new allocation */
3983 dev_kfree_skb(oldskb); 3980 dev_kfree_skb(oldskb);
3984 } 3981 }
3985 /* Make buffer alignment 2 beyond a 16 byte boundary
3986 * this will result in a 16 byte aligned IP header after
3987 * the 14 byte MAC header is removed
3988 */
3989 skb_reserve(skb, NET_IP_ALIGN);
3990
3991 buffer_info->skb = skb; 3982 buffer_info->skb = skb;
3992 buffer_info->length = adapter->rx_buffer_len; 3983 buffer_info->length = adapter->rx_buffer_len;
3993check_page: 3984check_page:
@@ -4044,7 +4035,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4044 struct e1000_buffer *buffer_info; 4035 struct e1000_buffer *buffer_info;
4045 struct sk_buff *skb; 4036 struct sk_buff *skb;
4046 unsigned int i; 4037 unsigned int i;
4047 unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; 4038 unsigned int bufsz = adapter->rx_buffer_len;
4048 4039
4049 i = rx_ring->next_to_use; 4040 i = rx_ring->next_to_use;
4050 buffer_info = &rx_ring->buffer_info[i]; 4041 buffer_info = &rx_ring->buffer_info[i];
@@ -4056,7 +4047,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4056 goto map_skb; 4047 goto map_skb;
4057 } 4048 }
4058 4049
4059 skb = netdev_alloc_skb(netdev, bufsz); 4050 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4060 if (unlikely(!skb)) { 4051 if (unlikely(!skb)) {
4061 /* Better luck next round */ 4052 /* Better luck next round */
4062 adapter->alloc_rx_buff_failed++; 4053 adapter->alloc_rx_buff_failed++;
@@ -4069,7 +4060,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4069 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " 4060 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
4070 "at %p\n", bufsz, skb->data); 4061 "at %p\n", bufsz, skb->data);
4071 /* Try again, without freeing the previous */ 4062 /* Try again, without freeing the previous */
4072 skb = netdev_alloc_skb(netdev, bufsz); 4063 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4073 /* Failed allocation, critical failure */ 4064 /* Failed allocation, critical failure */
4074 if (!skb) { 4065 if (!skb) {
4075 dev_kfree_skb(oldskb); 4066 dev_kfree_skb(oldskb);
@@ -4088,12 +4079,6 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4088 /* Use new allocation */ 4079 /* Use new allocation */
4089 dev_kfree_skb(oldskb); 4080 dev_kfree_skb(oldskb);
4090 } 4081 }
4091 /* Make buffer alignment 2 beyond a 16 byte boundary
4092 * this will result in a 16 byte aligned IP header after
4093 * the 14 byte MAC header is removed
4094 */
4095 skb_reserve(skb, NET_IP_ALIGN);
4096
4097 buffer_info->skb = skb; 4082 buffer_info->skb = skb;
4098 buffer_info->length = adapter->rx_buffer_len; 4083 buffer_info->length = adapter->rx_buffer_len;
4099map_skb: 4084map_skb:
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 21af3984e5c2..376924804f3f 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -167,7 +167,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
167 struct e1000_buffer *buffer_info; 167 struct e1000_buffer *buffer_info;
168 struct sk_buff *skb; 168 struct sk_buff *skb;
169 unsigned int i; 169 unsigned int i;
170 unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; 170 unsigned int bufsz = adapter->rx_buffer_len;
171 171
172 i = rx_ring->next_to_use; 172 i = rx_ring->next_to_use;
173 buffer_info = &rx_ring->buffer_info[i]; 173 buffer_info = &rx_ring->buffer_info[i];
@@ -179,20 +179,13 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
179 goto map_skb; 179 goto map_skb;
180 } 180 }
181 181
182 skb = netdev_alloc_skb(netdev, bufsz); 182 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
183 if (!skb) { 183 if (!skb) {
184 /* Better luck next round */ 184 /* Better luck next round */
185 adapter->alloc_rx_buff_failed++; 185 adapter->alloc_rx_buff_failed++;
186 break; 186 break;
187 } 187 }
188 188
189 /*
190 * Make buffer alignment 2 beyond a 16 byte boundary
191 * this will result in a 16 byte aligned IP header after
192 * the 14 byte MAC header is removed
193 */
194 skb_reserve(skb, NET_IP_ALIGN);
195
196 buffer_info->skb = skb; 189 buffer_info->skb = skb;
197map_skb: 190map_skb:
198 buffer_info->dma = pci_map_single(pdev, skb->data, 191 buffer_info->dma = pci_map_single(pdev, skb->data,
@@ -284,21 +277,14 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
284 cpu_to_le64(ps_page->dma); 277 cpu_to_le64(ps_page->dma);
285 } 278 }
286 279
287 skb = netdev_alloc_skb(netdev, 280 skb = netdev_alloc_skb_ip_align(netdev,
288 adapter->rx_ps_bsize0 + NET_IP_ALIGN); 281 adapter->rx_ps_bsize0);
289 282
290 if (!skb) { 283 if (!skb) {
291 adapter->alloc_rx_buff_failed++; 284 adapter->alloc_rx_buff_failed++;
292 break; 285 break;
293 } 286 }
294 287
295 /*
296 * Make buffer alignment 2 beyond a 16 byte boundary
297 * this will result in a 16 byte aligned IP header after
298 * the 14 byte MAC header is removed
299 */
300 skb_reserve(skb, NET_IP_ALIGN);
301
302 buffer_info->skb = skb; 288 buffer_info->skb = skb;
303 buffer_info->dma = pci_map_single(pdev, skb->data, 289 buffer_info->dma = pci_map_single(pdev, skb->data,
304 adapter->rx_ps_bsize0, 290 adapter->rx_ps_bsize0,
@@ -359,9 +345,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
359 struct e1000_buffer *buffer_info; 345 struct e1000_buffer *buffer_info;
360 struct sk_buff *skb; 346 struct sk_buff *skb;
361 unsigned int i; 347 unsigned int i;
362 unsigned int bufsz = 256 - 348 unsigned int bufsz = 256 - 16 /* for skb_reserve */;
363 16 /* for skb_reserve */ -
364 NET_IP_ALIGN;
365 349
366 i = rx_ring->next_to_use; 350 i = rx_ring->next_to_use;
367 buffer_info = &rx_ring->buffer_info[i]; 351 buffer_info = &rx_ring->buffer_info[i];
@@ -373,19 +357,13 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
373 goto check_page; 357 goto check_page;
374 } 358 }
375 359
376 skb = netdev_alloc_skb(netdev, bufsz); 360 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
377 if (unlikely(!skb)) { 361 if (unlikely(!skb)) {
378 /* Better luck next round */ 362 /* Better luck next round */
379 adapter->alloc_rx_buff_failed++; 363 adapter->alloc_rx_buff_failed++;
380 break; 364 break;
381 } 365 }
382 366
383 /* Make buffer alignment 2 beyond a 16 byte boundary
384 * this will result in a 16 byte aligned IP header after
385 * the 14 byte MAC header is removed
386 */
387 skb_reserve(skb, NET_IP_ALIGN);
388
389 buffer_info->skb = skb; 367 buffer_info->skb = skb;
390check_page: 368check_page:
391 /* allocate a new page if necessary */ 369 /* allocate a new page if necessary */
@@ -513,9 +491,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
513 */ 491 */
514 if (length < copybreak) { 492 if (length < copybreak) {
515 struct sk_buff *new_skb = 493 struct sk_buff *new_skb =
516 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 494 netdev_alloc_skb_ip_align(netdev, length);
517 if (new_skb) { 495 if (new_skb) {
518 skb_reserve(new_skb, NET_IP_ALIGN);
519 skb_copy_to_linear_data_offset(new_skb, 496 skb_copy_to_linear_data_offset(new_skb,
520 -NET_IP_ALIGN, 497 -NET_IP_ALIGN,
521 (skb->data - 498 (skb->data -
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 41bd7aeafd82..7f8fcc2fa748 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -447,7 +447,9 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
447 max_index_mask = q_skba->len - 1; 447 max_index_mask = q_skba->len - 1;
448 for (i = 0; i < fill_wqes; i++) { 448 for (i = 0; i < fill_wqes; i++) {
449 u64 tmp_addr; 449 u64 tmp_addr;
450 struct sk_buff *skb = netdev_alloc_skb(dev, packet_size); 450 struct sk_buff *skb;
451
452 skb = netdev_alloc_skb_ip_align(dev, packet_size);
451 if (!skb) { 453 if (!skb) {
452 q_skba->os_skbs = fill_wqes - i; 454 q_skba->os_skbs = fill_wqes - i;
453 if (q_skba->os_skbs == q_skba->len - 2) { 455 if (q_skba->os_skbs == q_skba->len - 2) {
@@ -457,7 +459,6 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
457 } 459 }
458 break; 460 break;
459 } 461 }
460 skb_reserve(skb, NET_IP_ALIGN);
461 462
462 skb_arr[index] = skb; 463 skb_arr[index] = skb;
463 tmp_addr = ehea_map_vaddr(skb->data); 464 tmp_addr = ehea_map_vaddr(skb->data);
@@ -500,7 +501,7 @@ static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
500{ 501{
501 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2, 502 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
502 nr_of_wqes, EHEA_RWQE2_TYPE, 503 nr_of_wqes, EHEA_RWQE2_TYPE,
503 EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN); 504 EHEA_RQ2_PKT_SIZE);
504} 505}
505 506
506 507
@@ -508,7 +509,7 @@ static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
508{ 509{
509 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3, 510 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
510 nr_of_wqes, EHEA_RWQE3_TYPE, 511 nr_of_wqes, EHEA_RWQE3_TYPE,
511 EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN); 512 EHEA_MAX_PACKET_SIZE);
512} 513}
513 514
514static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num) 515static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index d69d52ed7726..f875751af15e 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -870,19 +870,6 @@ static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
870 dev_kfree_skb_any(buf->os_buf); 870 dev_kfree_skb_any(buf->os_buf);
871} 871}
872 872
873static inline struct sk_buff *enic_rq_alloc_skb(struct net_device *netdev,
874 unsigned int size)
875{
876 struct sk_buff *skb;
877
878 skb = netdev_alloc_skb(netdev, size + NET_IP_ALIGN);
879
880 if (skb)
881 skb_reserve(skb, NET_IP_ALIGN);
882
883 return skb;
884}
885
886static int enic_rq_alloc_buf(struct vnic_rq *rq) 873static int enic_rq_alloc_buf(struct vnic_rq *rq)
887{ 874{
888 struct enic *enic = vnic_dev_priv(rq->vdev); 875 struct enic *enic = vnic_dev_priv(rq->vdev);
@@ -892,7 +879,7 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
892 unsigned int os_buf_index = 0; 879 unsigned int os_buf_index = 0;
893 dma_addr_t dma_addr; 880 dma_addr_t dma_addr;
894 881
895 skb = enic_rq_alloc_skb(netdev, len); 882 skb = netdev_alloc_skb_ip_align(netdev, len);
896 if (!skb) 883 if (!skb)
897 return -ENOMEM; 884 return -ENOMEM;
898 885
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index 34d0c69e67f7..0c229a5fa82a 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -404,10 +404,10 @@ static int ethoc_rx(struct net_device *dev, int limit)
404 404
405 if (ethoc_update_rx_stats(priv, &bd) == 0) { 405 if (ethoc_update_rx_stats(priv, &bd) == 0) {
406 int size = bd.stat >> 16; 406 int size = bd.stat >> 16;
407 struct sk_buff *skb = netdev_alloc_skb(dev, size); 407 struct sk_buff *skb;
408 408
409 size -= 4; /* strip the CRC */ 409 size -= 4; /* strip the CRC */
410 skb_reserve(skb, 2); /* align TCP/IP header */ 410 skb = netdev_alloc_skb_ip_align(dev, size);
411 411
412 if (likely(skb)) { 412 if (likely(skb)) {
413 void *src = phys_to_virt(bd.addr); 413 void *src = phys_to_virt(bd.addr);
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 1d5064a09aca..18bd9fe20d77 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -406,10 +406,9 @@ that case.
406/* A few values that may be tweaked. */ 406/* A few values that may be tweaked. */
407/* Size of each temporary Rx buffer, calculated as: 407/* Size of each temporary Rx buffer, calculated as:
408 * 1518 bytes (ethernet packet) + 2 bytes (to get 8 byte alignment for 408 * 1518 bytes (ethernet packet) + 2 bytes (to get 8 byte alignment for
409 * the card) + 8 bytes of status info + 8 bytes for the Rx Checksum + 409 * the card) + 8 bytes of status info + 8 bytes for the Rx Checksum
410 * 2 more because we use skb_reserve.
411 */ 410 */
412#define PKT_BUF_SZ 1538 411#define PKT_BUF_SZ 1536
413 412
414/* For now, this is going to be set to the maximum size of an ethernet 413/* For now, this is going to be set to the maximum size of an ethernet
415 * packet. Eventually, we may want to make it a variable that is 414 * packet. Eventually, we may want to make it a variable that is
@@ -1151,12 +1150,13 @@ static void hamachi_tx_timeout(struct net_device *dev)
1151 } 1150 }
1152 /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 1151 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1153 for (i = 0; i < RX_RING_SIZE; i++) { 1152 for (i = 0; i < RX_RING_SIZE; i++) {
1154 struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz); 1153 struct sk_buff *skb;
1154
1155 skb = netdev_alloc_skb_ip_align(dev, hmp->rx_buf_sz);
1155 hmp->rx_skbuff[i] = skb; 1156 hmp->rx_skbuff[i] = skb;
1156 if (skb == NULL) 1157 if (skb == NULL)
1157 break; 1158 break;
1158 1159
1159 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1160 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, 1160 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
1161 skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1161 skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1162 hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn | 1162 hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
@@ -1195,7 +1195,7 @@ static void hamachi_init_ring(struct net_device *dev)
1195 * card. -KDU 1195 * card. -KDU
1196 */ 1196 */
1197 hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ : 1197 hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ :
1198 (((dev->mtu+26+7) & ~7) + 2 + 16)); 1198 (((dev->mtu+26+7) & ~7) + 16));
1199 1199
1200 /* Initialize all Rx descriptors. */ 1200 /* Initialize all Rx descriptors. */
1201 for (i = 0; i < RX_RING_SIZE; i++) { 1201 for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 428d50475351..2ffe0997b838 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -4934,18 +4934,12 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4934 } 4934 }
4935 4935
4936 if (!buffer_info->skb) { 4936 if (!buffer_info->skb) {
4937 skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN); 4937 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4938 if (!skb) { 4938 if (!skb) {
4939 adapter->alloc_rx_buff_failed++; 4939 adapter->alloc_rx_buff_failed++;
4940 goto no_buffers; 4940 goto no_buffers;
4941 } 4941 }
4942 4942
4943 /* Make buffer alignment 2 beyond a 16 byte boundary
4944 * this will result in a 16 byte aligned IP header after
4945 * the 14 byte MAC header is removed
4946 */
4947 skb_reserve(skb, NET_IP_ALIGN);
4948
4949 buffer_info->skb = skb; 4943 buffer_info->skb = skb;
4950 buffer_info->dma = pci_map_single(pdev, skb->data, 4944 buffer_info->dma = pci_map_single(pdev, skb->data,
4951 bufsz, 4945 bufsz,
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 91024a3cdad3..fad7f348dd1b 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -170,18 +170,12 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
170 } 170 }
171 171
172 if (!buffer_info->skb) { 172 if (!buffer_info->skb) {
173 skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN); 173 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
174 if (!skb) { 174 if (!skb) {
175 adapter->alloc_rx_buff_failed++; 175 adapter->alloc_rx_buff_failed++;
176 goto no_buffers; 176 goto no_buffers;
177 } 177 }
178 178
179 /* Make buffer alignment 2 beyond a 16 byte boundary
180 * this will result in a 16 byte aligned IP header after
181 * the 14 byte MAC header is removed
182 */
183 skb_reserve(skb, NET_IP_ALIGN);
184
185 buffer_info->skb = skb; 179 buffer_info->skb = skb;
186 buffer_info->dma = pci_map_single(pdev, skb->data, 180 buffer_info->dma = pci_map_single(pdev, skb->data,
187 bufsz, 181 bufsz,
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 9f7b5d4172b8..63056e7b9e22 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -738,17 +738,12 @@ static int ipg_get_rxbuff(struct net_device *dev, int entry)
738 738
739 IPG_DEBUG_MSG("_get_rxbuff\n"); 739 IPG_DEBUG_MSG("_get_rxbuff\n");
740 740
741 skb = netdev_alloc_skb(dev, sp->rxsupport_size + NET_IP_ALIGN); 741 skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size);
742 if (!skb) { 742 if (!skb) {
743 sp->rx_buff[entry] = NULL; 743 sp->rx_buff[entry] = NULL;
744 return -ENOMEM; 744 return -ENOMEM;
745 } 745 }
746 746
747 /* Adjust the data start location within the buffer to
748 * align IP address field to a 16 byte boundary.
749 */
750 skb_reserve(skb, NET_IP_ALIGN);
751
752 /* Associate the receive buffer with the IPG NIC. */ 747 /* Associate the receive buffer with the IPG NIC. */
753 skb->dev = dev; 748 skb->dev = dev;
754 749
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index f9f633c134bd..1bd0ca1b0465 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1972,9 +1972,8 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1972 * of reassembly being done in the stack */ 1972 * of reassembly being done in the stack */
1973 if (length < copybreak) { 1973 if (length < copybreak) {
1974 struct sk_buff *new_skb = 1974 struct sk_buff *new_skb =
1975 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 1975 netdev_alloc_skb_ip_align(netdev, length);
1976 if (new_skb) { 1976 if (new_skb) {
1977 skb_reserve(new_skb, NET_IP_ALIGN);
1978 skb_copy_to_linear_data_offset(new_skb, 1977 skb_copy_to_linear_data_offset(new_skb,
1979 -NET_IP_ALIGN, 1978 -NET_IP_ALIGN,
1980 (skb->data - 1979 (skb->data -
@@ -2057,20 +2056,13 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
2057 goto map_skb; 2056 goto map_skb;
2058 } 2057 }
2059 2058
2060 skb = netdev_alloc_skb(netdev, adapter->rx_buffer_len 2059 skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
2061 + NET_IP_ALIGN);
2062 if (unlikely(!skb)) { 2060 if (unlikely(!skb)) {
2063 /* Better luck next round */ 2061 /* Better luck next round */
2064 adapter->alloc_rx_buff_failed++; 2062 adapter->alloc_rx_buff_failed++;
2065 break; 2063 break;
2066 } 2064 }
2067 2065
2068 /* Make buffer alignment 2 beyond a 16 byte boundary
2069 * this will result in a 16 byte aligned IP header after
2070 * the 14 byte MAC header is removed
2071 */
2072 skb_reserve(skb, NET_IP_ALIGN);
2073
2074 buffer_info->skb = skb; 2066 buffer_info->skb = skb;
2075 buffer_info->length = adapter->rx_buffer_len; 2067 buffer_info->length = adapter->rx_buffer_len;
2076map_skb: 2068map_skb:
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index eb3abd79e4ee..4c8a44919705 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -616,22 +616,14 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
616 616
617 if (!bi->skb) { 617 if (!bi->skb) {
618 struct sk_buff *skb; 618 struct sk_buff *skb;
619 skb = netdev_alloc_skb(adapter->netdev, 619 skb = netdev_alloc_skb_ip_align(adapter->netdev,
620 (rx_ring->rx_buf_len + 620 rx_ring->rx_buf_len);
621 NET_IP_ALIGN));
622 621
623 if (!skb) { 622 if (!skb) {
624 adapter->alloc_rx_buff_failed++; 623 adapter->alloc_rx_buff_failed++;
625 goto no_buffers; 624 goto no_buffers;
626 } 625 }
627 626
628 /*
629 * Make buffer alignment 2 beyond a 16 byte boundary
630 * this will result in a 16 byte aligned IP header after
631 * the 14 byte MAC header is removed
632 */
633 skb_reserve(skb, NET_IP_ALIGN);
634
635 bi->skb = skb; 627 bi->skb = skb;
636 bi->dma = pci_map_single(pdev, skb->data, 628 bi->dma = pci_map_single(pdev, skb->data,
637 rx_ring->rx_buf_len, 629 rx_ring->rx_buf_len,
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 127243461a51..6baf3c94b3e8 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -108,9 +108,8 @@ static int ixpdev_rx(struct net_device *dev, int processed, int budget)
108 if (unlikely(!netif_running(nds[desc->channel]))) 108 if (unlikely(!netif_running(nds[desc->channel])))
109 goto err; 109 goto err;
110 110
111 skb = netdev_alloc_skb(dev, desc->pkt_length + 2); 111 skb = netdev_alloc_skb_ip_align(dev, desc->pkt_length);
112 if (likely(skb != NULL)) { 112 if (likely(skb != NULL)) {
113 skb_reserve(skb, 2);
114 skb_copy_to_linear_data(skb, buf, desc->pkt_length); 113 skb_copy_to_linear_data(skb, buf, desc->pkt_length);
115 skb_put(skb, desc->pkt_length); 114 skb_put(skb, desc->pkt_length);
116 skb->protocol = eth_type_trans(skb, nds[desc->channel]); 115 skb->protocol = eth_type_trans(skb, nds[desc->channel]);
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 03199fa10003..a07a5972b57e 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -400,7 +400,7 @@ static int korina_rx(struct net_device *dev, int limit)
400 dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4); 400 dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
401 401
402 /* Malloc up new buffer. */ 402 /* Malloc up new buffer. */
403 skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2); 403 skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
404 404
405 if (!skb_new) 405 if (!skb_new)
406 break; 406 break;
@@ -417,9 +417,6 @@ static int korina_rx(struct net_device *dev, int limit)
417 if (devcs & ETH_RX_MP) 417 if (devcs & ETH_RX_MP)
418 dev->stats.multicast++; 418 dev->stats.multicast++;
419 419
420 /* 16 bit align */
421 skb_reserve(skb_new, 2);
422
423 lp->rx_skb[lp->rx_next_done] = skb_new; 420 lp->rx_skb[lp->rx_next_done] = skb_new;
424 } 421 }
425 422
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 99e954167fa6..5c45cb58d023 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -357,7 +357,7 @@ static void ks8842_rx_frame(struct net_device *netdev,
357 357
358 /* check the status */ 358 /* check the status */
359 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { 359 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
360 struct sk_buff *skb = netdev_alloc_skb(netdev, len + 2); 360 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
361 361
362 dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n", 362 dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n",
363 __func__, len); 363 __func__, len);
@@ -369,9 +369,6 @@ static void ks8842_rx_frame(struct net_device *netdev,
369 if (status & RXSR_MULTICAST) 369 if (status & RXSR_MULTICAST)
370 netdev->stats.multicast++; 370 netdev->stats.multicast++;
371 371
372 /* Align socket buffer in 4-byte boundary for
373 better performance. */
374 skb_reserve(skb, 2);
375 data = (u32 *)skb_put(skb, len); 372 data = (u32 *)skb_put(skb, len);
376 373
377 ks8842_select_bank(adapter, 17); 374 ks8842_select_bank(adapter, 17);
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index 51e11c3e53e1..5b24c67de25e 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -470,11 +470,11 @@ static inline int init_rx_bufs(struct net_device *dev)
470 470
471 for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) { 471 for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
472 dma_addr_t dma_addr; 472 dma_addr_t dma_addr;
473 struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4); 473 struct sk_buff *skb;
474 474
475 skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
475 if (skb == NULL) 476 if (skb == NULL)
476 return -1; 477 return -1;
477 skb_reserve(skb, 2);
478 dma_addr = dma_map_single(dev->dev.parent, skb->data, 478 dma_addr = dma_map_single(dev->dev.parent, skb->data,
479 PKT_BUF_SZ, DMA_FROM_DEVICE); 479 PKT_BUF_SZ, DMA_FROM_DEVICE);
480 rbd->v_next = rbd+1; 480 rbd->v_next = rbd+1;
@@ -697,12 +697,12 @@ static inline int i596_rx(struct net_device *dev)
697 (dma_addr_t)SWAP32(rbd->b_data), 697 (dma_addr_t)SWAP32(rbd->b_data),
698 PKT_BUF_SZ, DMA_FROM_DEVICE); 698 PKT_BUF_SZ, DMA_FROM_DEVICE);
699 /* Get fresh skbuff to replace filled one. */ 699 /* Get fresh skbuff to replace filled one. */
700 newskb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4); 700 newskb = netdev_alloc_skb_ip_align(dev,
701 PKT_BUF_SZ);
701 if (newskb == NULL) { 702 if (newskb == NULL) {
702 skb = NULL; /* drop pkt */ 703 skb = NULL; /* drop pkt */
703 goto memory_squeeze; 704 goto memory_squeeze;
704 } 705 }
705 skb_reserve(newskb, 2);
706 706
707 /* Pass up the skb already on the Rx ring. */ 707 /* Pass up the skb already on the Rx ring. */
708 skb_put(skb, pkt_len); 708 skb_put(skb, pkt_len);
@@ -716,7 +716,7 @@ static inline int i596_rx(struct net_device *dev)
716 rbd->b_data = SWAP32(dma_addr); 716 rbd->b_data = SWAP32(dma_addr);
717 DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); 717 DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
718 } else 718 } else
719 skb = netdev_alloc_skb(dev, pkt_len + 2); 719 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
720memory_squeeze: 720memory_squeeze:
721 if (skb == NULL) { 721 if (skb == NULL) {
722 /* XXX tulip.c can defer packets here!! */ 722 /* XXX tulip.c can defer packets here!! */
@@ -730,7 +730,6 @@ memory_squeeze:
730 dma_sync_single_for_cpu(dev->dev.parent, 730 dma_sync_single_for_cpu(dev->dev.parent,
731 (dma_addr_t)SWAP32(rbd->b_data), 731 (dma_addr_t)SWAP32(rbd->b_data),
732 PKT_BUF_SZ, DMA_FROM_DEVICE); 732 PKT_BUF_SZ, DMA_FROM_DEVICE);
733 skb_reserve(skb, 2);
734 memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len); 733 memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
735 dma_sync_single_for_device(dev->dev.parent, 734 dma_sync_single_for_device(dev->dev.parent,
736 (dma_addr_t)SWAP32(rbd->b_data), 735 (dma_addr_t)SWAP32(rbd->b_data),
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 50c6a3cfe439..97b170448ce6 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -3555,13 +3555,12 @@ static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
3555 if (pkt_size >= rx_copybreak) 3555 if (pkt_size >= rx_copybreak)
3556 goto out; 3556 goto out;
3557 3557
3558 skb = netdev_alloc_skb(tp->dev, pkt_size + NET_IP_ALIGN); 3558 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
3559 if (!skb) 3559 if (!skb)
3560 goto out; 3560 goto out;
3561 3561
3562 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size, 3562 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size,
3563 PCI_DMA_FROMDEVICE); 3563 PCI_DMA_FROMDEVICE);
3564 skb_reserve(skb, NET_IP_ALIGN);
3565 skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size); 3564 skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
3566 *sk_buff = skb; 3565 *sk_buff = skb;
3567 done = true; 3566 done = true;
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 8d6030022d14..b7e0eb40a8bd 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -793,7 +793,7 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
793 793
794 rx_len -= rx_size_align + 4; 794 rx_len -= rx_size_align + 4;
795 795
796 skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN); 796 skb = netdev_alloc_skb_ip_align(dev, pkt_size);
797 if (unlikely(!skb)) { 797 if (unlikely(!skb)) {
798 if (printk_ratelimit()) 798 if (printk_ratelimit())
799 printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n", 799 printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",
@@ -801,8 +801,6 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
801 goto next; 801 goto next;
802 } 802 }
803 803
804 skb_reserve(skb, NET_IP_ALIGN);
805
806 if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) { 804 if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {
807 memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset), 805 memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset),
808 rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset); 806 rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset);
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index f4dfd1f679a9..6b364a6c6c60 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -365,11 +365,10 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
365 } 365 }
366 skb_reserve(newskb, 2); 366 skb_reserve(newskb, 2);
367 } else { 367 } else {
368 skb = netdev_alloc_skb(dev, len + 2); 368 skb = netdev_alloc_skb_ip_align(dev, len);
369 if (skb) { 369 if (skb)
370 skb_reserve(skb, 2);
371 skb_copy_to_linear_data(skb, rd->skb->data, len); 370 skb_copy_to_linear_data(skb, rd->skb->data, len);
372 } 371
373 newskb = rd->skb; 372 newskb = rd->skb;
374 } 373 }
375memory_squeeze: 374memory_squeeze:
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 7cc9898f4e00..31233b4c44a0 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -536,13 +536,12 @@ static bool sis190_try_rx_copy(struct sis190_private *tp,
536 if (pkt_size >= rx_copybreak) 536 if (pkt_size >= rx_copybreak)
537 goto out; 537 goto out;
538 538
539 skb = netdev_alloc_skb(tp->dev, pkt_size + 2); 539 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
540 if (!skb) 540 if (!skb)
541 goto out; 541 goto out;
542 542
543 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz, 543 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
544 PCI_DMA_FROMDEVICE); 544 PCI_DMA_FROMDEVICE);
545 skb_reserve(skb, 2);
546 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size); 545 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
547 *sk_buff = skb; 546 *sk_buff = skb;
548 done = true; 547 done = true;
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 01f6811f1324..be28ebb3811c 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3070,11 +3070,10 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3070 goto error; 3070 goto error;
3071 3071
3072 if (len < RX_COPY_THRESHOLD) { 3072 if (len < RX_COPY_THRESHOLD) {
3073 skb = netdev_alloc_skb(dev, len + 2); 3073 skb = netdev_alloc_skb_ip_align(dev, len);
3074 if (!skb) 3074 if (!skb)
3075 goto resubmit; 3075 goto resubmit;
3076 3076
3077 skb_reserve(skb, 2);
3078 pci_dma_sync_single_for_cpu(skge->hw->pdev, 3077 pci_dma_sync_single_for_cpu(skge->hw->pdev,
3079 pci_unmap_addr(e, mapaddr), 3078 pci_unmap_addr(e, mapaddr),
3080 len, PCI_DMA_FROMDEVICE); 3079 len, PCI_DMA_FROMDEVICE);
@@ -3085,11 +3084,11 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3085 skge_rx_reuse(e, skge->rx_buf_size); 3084 skge_rx_reuse(e, skge->rx_buf_size);
3086 } else { 3085 } else {
3087 struct sk_buff *nskb; 3086 struct sk_buff *nskb;
3088 nskb = netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN); 3087
3088 nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
3089 if (!nskb) 3089 if (!nskb)
3090 goto resubmit; 3090 goto resubmit;
3091 3091
3092 skb_reserve(nskb, NET_IP_ALIGN);
3093 pci_unmap_single(skge->hw->pdev, 3092 pci_unmap_single(skge->hw->pdev,
3094 pci_unmap_addr(e, mapaddr), 3093 pci_unmap_addr(e, mapaddr),
3095 pci_unmap_len(e, maplen), 3094 pci_unmap_len(e, maplen),
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 2ab5c39f33ca..3a449d012d4b 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -2191,9 +2191,8 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
2191{ 2191{
2192 struct sk_buff *skb; 2192 struct sk_buff *skb;
2193 2193
2194 skb = netdev_alloc_skb(sky2->netdev, length + 2); 2194 skb = netdev_alloc_skb_ip_align(sky2->netdev, length);
2195 if (likely(skb)) { 2195 if (likely(skb)) {
2196 skb_reserve(skb, 2);
2197 pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr, 2196 pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
2198 length, PCI_DMA_FROMDEVICE); 2197 length, PCI_DMA_FROMDEVICE);
2199 skb_copy_from_linear_data(re->skb, skb->data, length); 2198 skb_copy_from_linear_data(re->skb, skb->data, length);
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 3d31b47332bb..16f23f84920b 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1549,7 +1549,8 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1549 if (tmpCStat & TLAN_CSTAT_EOC) 1549 if (tmpCStat & TLAN_CSTAT_EOC)
1550 eoc = 1; 1550 eoc = 1;
1551 1551
1552 new_skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 ); 1552 new_skb = netdev_alloc_skb_ip_align(dev,
1553 TLAN_MAX_FRAME_SIZE + 5);
1553 if ( !new_skb ) 1554 if ( !new_skb )
1554 goto drop_and_reuse; 1555 goto drop_and_reuse;
1555 1556
@@ -1563,7 +1564,6 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1563 skb->protocol = eth_type_trans( skb, dev ); 1564 skb->protocol = eth_type_trans( skb, dev );
1564 netif_rx( skb ); 1565 netif_rx( skb );
1565 1566
1566 skb_reserve( new_skb, NET_IP_ALIGN );
1567 head_list->buffer[0].address = pci_map_single(priv->pciDev, 1567 head_list->buffer[0].address = pci_map_single(priv->pciDev,
1568 new_skb->data, 1568 new_skb->data,
1569 TLAN_MAX_FRAME_SIZE, 1569 TLAN_MAX_FRAME_SIZE,
@@ -1967,13 +1967,12 @@ static void TLan_ResetLists( struct net_device *dev )
1967 list->cStat = TLAN_CSTAT_READY; 1967 list->cStat = TLAN_CSTAT_READY;
1968 list->frameSize = TLAN_MAX_FRAME_SIZE; 1968 list->frameSize = TLAN_MAX_FRAME_SIZE;
1969 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; 1969 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
1970 skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 ); 1970 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
1971 if ( !skb ) { 1971 if ( !skb ) {
1972 pr_err("TLAN: out of memory for received data.\n" ); 1972 pr_err("TLAN: out of memory for received data.\n" );
1973 break; 1973 break;
1974 } 1974 }
1975 1975
1976 skb_reserve( skb, NET_IP_ALIGN );
1977 list->buffer[0].address = pci_map_single(priv->pciDev, 1976 list->buffer[0].address = pci_map_single(priv->pciDev,
1978 skb->data, 1977 skb->data,
1979 TLAN_MAX_FRAME_SIZE, 1978 TLAN_MAX_FRAME_SIZE,
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 7030bd5e9848..a69c4a48bab9 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -802,13 +802,11 @@ static int tsi108_refill_rx(struct net_device *dev, int budget)
802 int rx = data->rxhead; 802 int rx = data->rxhead;
803 struct sk_buff *skb; 803 struct sk_buff *skb;
804 804
805 data->rxskbs[rx] = skb = netdev_alloc_skb(dev, 805 skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
806 TSI108_RXBUF_SIZE + 2); 806 data->rxskbs[rx] = skb;
807 if (!skb) 807 if (!skb)
808 break; 808 break;
809 809
810 skb_reserve(skb, 2); /* Align the data on a 4-byte boundary. */
811
812 data->rxring[rx].buf0 = dma_map_single(NULL, skb->data, 810 data->rxring[rx].buf0 = dma_map_single(NULL, skb->data,
813 TSI108_RX_SKB_SIZE, 811 TSI108_RX_SKB_SIZE,
814 DMA_FROM_DEVICE); 812 DMA_FROM_DEVICE);
@@ -1356,7 +1354,7 @@ static int tsi108_open(struct net_device *dev)
1356 for (i = 0; i < TSI108_RXRING_LEN; i++) { 1354 for (i = 0; i < TSI108_RXRING_LEN; i++) {
1357 struct sk_buff *skb; 1355 struct sk_buff *skb;
1358 1356
1359 skb = netdev_alloc_skb(dev, TSI108_RXBUF_SIZE + NET_IP_ALIGN); 1357 skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
1360 if (!skb) { 1358 if (!skb) {
1361 /* Bah. No memory for now, but maybe we'll get 1359 /* Bah. No memory for now, but maybe we'll get
1362 * some more later. 1360 * some more later.
@@ -1370,8 +1368,6 @@ static int tsi108_open(struct net_device *dev)
1370 } 1368 }
1371 1369
1372 data->rxskbs[i] = skb; 1370 data->rxskbs[i] = skb;
1373 /* Align the payload on a 4-byte boundary */
1374 skb_reserve(skb, 2);
1375 data->rxskbs[i] = skb; 1371 data->rxskbs[i] = skb;
1376 data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data); 1372 data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
1377 data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT; 1373 data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 1fd70583be44..4535e89dfff1 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1484,15 +1484,15 @@ static int rhine_rx(struct net_device *dev, int limit)
1484 } 1484 }
1485 } 1485 }
1486 } else { 1486 } else {
1487 struct sk_buff *skb; 1487 struct sk_buff *skb = NULL;
1488 /* Length should omit the CRC */ 1488 /* Length should omit the CRC */
1489 int pkt_len = data_size - 4; 1489 int pkt_len = data_size - 4;
1490 1490
1491 /* Check if the packet is long enough to accept without 1491 /* Check if the packet is long enough to accept without
1492 copying to a minimally-sized skbuff. */ 1492 copying to a minimally-sized skbuff. */
1493 if (pkt_len < rx_copybreak && 1493 if (pkt_len < rx_copybreak)
1494 (skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN)) != NULL) { 1494 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1495 skb_reserve(skb, NET_IP_ALIGN); /* 16 byte align the IP header */ 1495 if (skb) {
1496 pci_dma_sync_single_for_cpu(rp->pdev, 1496 pci_dma_sync_single_for_cpu(rp->pdev,
1497 rp->rx_skbuff_dma[entry], 1497 rp->rx_skbuff_dma[entry],
1498 rp->rx_buf_sz, 1498 rp->rx_buf_sz,
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index e04e5bee005c..144db6395c95 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1949,10 +1949,9 @@ static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1949 if (pkt_size < rx_copybreak) { 1949 if (pkt_size < rx_copybreak) {
1950 struct sk_buff *new_skb; 1950 struct sk_buff *new_skb;
1951 1951
1952 new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2); 1952 new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
1953 if (new_skb) { 1953 if (new_skb) {
1954 new_skb->ip_summed = rx_skb[0]->ip_summed; 1954 new_skb->ip_summed = rx_skb[0]->ip_summed;
1955 skb_reserve(new_skb, 2);
1956 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size); 1955 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
1957 *rx_skb = new_skb; 1956 *rx_skb = new_skb;
1958 ret = 0; 1957 ret = 0;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8d009760277c..556512dc6072 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -283,13 +283,12 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
283 do { 283 do {
284 struct skb_vnet_hdr *hdr; 284 struct skb_vnet_hdr *hdr;
285 285
286 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN); 286 skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
287 if (unlikely(!skb)) { 287 if (unlikely(!skb)) {
288 oom = true; 288 oom = true;
289 break; 289 break;
290 } 290 }
291 291
292 skb_reserve(skb, NET_IP_ALIGN);
293 skb_put(skb, MAX_PACKET_LEN); 292 skb_put(skb, MAX_PACKET_LEN);
294 293
295 hdr = skb_vnet_hdr(skb); 294 hdr = skb_vnet_hdr(skb);
@@ -344,14 +343,12 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
344 do { 343 do {
345 skb_frag_t *f; 344 skb_frag_t *f;
346 345
347 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN); 346 skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
348 if (unlikely(!skb)) { 347 if (unlikely(!skb)) {
349 oom = true; 348 oom = true;
350 break; 349 break;
351 } 350 }
352 351
353 skb_reserve(skb, NET_IP_ALIGN);
354
355 f = &skb_shinfo(skb)->frags[0]; 352 f = &skb_shinfo(skb)->frags[0];
356 f->page = get_a_page(vi, gfp); 353 f->page = get_a_page(vi, gfp);
357 if (!f->page) { 354 if (!f->page) {