diff options
Diffstat (limited to 'drivers/net')
21 files changed, 46 insertions, 51 deletions
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 3c315f46859b..85e044567f68 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -397,7 +397,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, | |||
397 | netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", | 397 | netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", |
398 | first_frag, last_frag, len); | 398 | first_frag, last_frag, len); |
399 | 399 | ||
400 | skb = dev_alloc_skb(len + RX_OFFSET); | 400 | skb = netdev_alloc_skb(bp->dev, len + RX_OFFSET); |
401 | if (!skb) { | 401 | if (!skb) { |
402 | bp->stats.rx_dropped++; | 402 | bp->stats.rx_dropped++; |
403 | for (frag = first_frag; ; frag = NEXT_RX(frag)) { | 403 | for (frag = first_frag; ; frag = NEXT_RX(frag)) { |
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index f328da24c8fa..d5ff93653e4c 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c | |||
@@ -911,7 +911,7 @@ dma_rx(struct net_device *dev) | |||
911 | } | 911 | } |
912 | 912 | ||
913 | /* Malloc up new buffer. */ | 913 | /* Malloc up new buffer. */ |
914 | skb = dev_alloc_skb(length + 2); | 914 | skb = netdev_alloc_skb(dev, length + 2); |
915 | if (skb == NULL) { | 915 | if (skb == NULL) { |
916 | if (net_debug) /* I don't think we want to do this to a stressed system */ | 916 | if (net_debug) /* I don't think we want to do this to a stressed system */ |
917 | printk("%s: Memory squeeze, dropping packet.\n", dev->name); | 917 | printk("%s: Memory squeeze, dropping packet.\n", dev->name); |
@@ -1616,7 +1616,7 @@ net_rx(struct net_device *dev) | |||
1616 | } | 1616 | } |
1617 | 1617 | ||
1618 | /* Malloc up new buffer. */ | 1618 | /* Malloc up new buffer. */ |
1619 | skb = dev_alloc_skb(length + 2); | 1619 | skb = netdev_alloc_skb(dev, length + 2); |
1620 | if (skb == NULL) { | 1620 | if (skb == NULL) { |
1621 | #if 0 /* Again, this seems a cruel thing to do */ | 1621 | #if 0 /* Again, this seems a cruel thing to do */ |
1622 | printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); | 1622 | printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); |
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index 4317af8d2f0a..c21e5ab8d1ef 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c | |||
@@ -282,7 +282,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget) | |||
282 | if (rstat0 & RSTAT0_CRCI) | 282 | if (rstat0 & RSTAT0_CRCI) |
283 | length -= 4; | 283 | length -= 4; |
284 | 284 | ||
285 | skb = dev_alloc_skb(length + 2); | 285 | skb = netdev_alloc_skb(dev, length + 2); |
286 | if (likely(skb != NULL)) { | 286 | if (likely(skb != NULL)) { |
287 | struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry]; | 287 | struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry]; |
288 | skb_reserve(skb, 2); | 288 | skb_reserve(skb, 2); |
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 493cc6202081..42383ab5227e 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c | |||
@@ -1028,7 +1028,7 @@ dm9000_rx(struct net_device *dev) | |||
1028 | 1028 | ||
1029 | /* Move data from DM9000 */ | 1029 | /* Move data from DM9000 */ |
1030 | if (GoodPacket && | 1030 | if (GoodPacket && |
1031 | ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) { | 1031 | ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) { |
1032 | skb_reserve(skb, 2); | 1032 | skb_reserve(skb, 2); |
1033 | rdptr = (u8 *) skb_put(skb, RxLen - 4); | 1033 | rdptr = (u8 *) skb_put(skb, RxLen - 4); |
1034 | 1034 | ||
diff --git a/drivers/net/ethernet/dec/ewrk3.c b/drivers/net/ethernet/dec/ewrk3.c index f9df5e4d0341..1879f84a25a3 100644 --- a/drivers/net/ethernet/dec/ewrk3.c +++ b/drivers/net/ethernet/dec/ewrk3.c | |||
@@ -986,8 +986,10 @@ static int ewrk3_rx(struct net_device *dev) | |||
986 | dev->stats.rx_fifo_errors++; | 986 | dev->stats.rx_fifo_errors++; |
987 | } else { | 987 | } else { |
988 | struct sk_buff *skb; | 988 | struct sk_buff *skb; |
989 | skb = netdev_alloc_skb(dev, | ||
990 | pkt_len + 2); | ||
989 | 991 | ||
990 | if ((skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 992 | if (skb != NULL) { |
991 | unsigned char *p; | 993 | unsigned char *p; |
992 | skb_reserve(skb, 2); /* Align to 16 bytes */ | 994 | skb_reserve(skb, 2); /* Align to 16 bytes */ |
993 | p = skb_put(skb, pkt_len); | 995 | p = skb_put(skb, pkt_len); |
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index 1eb46a0bb488..68f1c39184df 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c | |||
@@ -439,7 +439,7 @@ static void de_rx (struct de_private *de) | |||
439 | rx_tail, status, len, copying_skb); | 439 | rx_tail, status, len, copying_skb); |
440 | 440 | ||
441 | buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz; | 441 | buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz; |
442 | copy_skb = dev_alloc_skb (buflen); | 442 | copy_skb = netdev_alloc_skb(de->dev, buflen); |
443 | if (unlikely(!copy_skb)) { | 443 | if (unlikely(!copy_skb)) { |
444 | de->net_stats.rx_dropped++; | 444 | de->net_stats.rx_dropped++; |
445 | drop = 1; | 445 | drop = 1; |
@@ -1283,12 +1283,10 @@ static int de_refill_rx (struct de_private *de) | |||
1283 | for (i = 0; i < DE_RX_RING_SIZE; i++) { | 1283 | for (i = 0; i < DE_RX_RING_SIZE; i++) { |
1284 | struct sk_buff *skb; | 1284 | struct sk_buff *skb; |
1285 | 1285 | ||
1286 | skb = dev_alloc_skb(de->rx_buf_sz); | 1286 | skb = netdev_alloc_skb(de->dev, de->rx_buf_sz); |
1287 | if (!skb) | 1287 | if (!skb) |
1288 | goto err_out; | 1288 | goto err_out; |
1289 | 1289 | ||
1290 | skb->dev = de->dev; | ||
1291 | |||
1292 | de->rx_skb[i].mapping = pci_map_single(de->pdev, | 1290 | de->rx_skb[i].mapping = pci_map_single(de->pdev, |
1293 | skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1291 | skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE); |
1294 | de->rx_skb[i].skb = skb; | 1292 | de->rx_skb[i].skb = skb; |
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 4d71f5ae20c8..93583408a325 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c | |||
@@ -3598,7 +3598,7 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len) | |||
3598 | struct sk_buff *ret; | 3598 | struct sk_buff *ret; |
3599 | u_long i=0, tmp; | 3599 | u_long i=0, tmp; |
3600 | 3600 | ||
3601 | p = dev_alloc_skb(IEEE802_3_SZ + DE4X5_ALIGN + 2); | 3601 | p = netdev_alloc_skb(dev, IEEE802_3_SZ + DE4X5_ALIGN + 2); |
3602 | if (!p) return NULL; | 3602 | if (!p) return NULL; |
3603 | 3603 | ||
3604 | tmp = virt_to_bus(p->data); | 3604 | tmp = virt_to_bus(p->data); |
@@ -3618,7 +3618,7 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len) | |||
3618 | #else | 3618 | #else |
3619 | if (lp->state != OPEN) return (struct sk_buff *)1; /* Fake out the open */ | 3619 | if (lp->state != OPEN) return (struct sk_buff *)1; /* Fake out the open */ |
3620 | 3620 | ||
3621 | p = dev_alloc_skb(len + 2); | 3621 | p = netdev_alloc_skb(dev, len + 2); |
3622 | if (!p) return NULL; | 3622 | if (!p) return NULL; |
3623 | 3623 | ||
3624 | skb_reserve(p, 2); /* Align */ | 3624 | skb_reserve(p, 2); /* Align */ |
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c index feaee7424bd9..28a5e425fecf 100644 --- a/drivers/net/ethernet/dec/tulip/interrupt.c +++ b/drivers/net/ethernet/dec/tulip/interrupt.c | |||
@@ -69,7 +69,8 @@ int tulip_refill_rx(struct net_device *dev) | |||
69 | struct sk_buff *skb; | 69 | struct sk_buff *skb; |
70 | dma_addr_t mapping; | 70 | dma_addr_t mapping; |
71 | 71 | ||
72 | skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ); | 72 | skb = tp->rx_buffers[entry].skb = |
73 | netdev_alloc_skb(dev, PKT_BUF_SZ); | ||
73 | if (skb == NULL) | 74 | if (skb == NULL) |
74 | break; | 75 | break; |
75 | 76 | ||
@@ -77,7 +78,6 @@ int tulip_refill_rx(struct net_device *dev) | |||
77 | PCI_DMA_FROMDEVICE); | 78 | PCI_DMA_FROMDEVICE); |
78 | tp->rx_buffers[entry].mapping = mapping; | 79 | tp->rx_buffers[entry].mapping = mapping; |
79 | 80 | ||
80 | skb->dev = dev; /* Mark as being used by this device. */ | ||
81 | tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); | 81 | tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); |
82 | refilled++; | 82 | refilled++; |
83 | } | 83 | } |
@@ -202,7 +202,7 @@ int tulip_poll(struct napi_struct *napi, int budget) | |||
202 | /* Check if the packet is long enough to accept without copying | 202 | /* Check if the packet is long enough to accept without copying |
203 | to a minimally-sized skbuff. */ | 203 | to a minimally-sized skbuff. */ |
204 | if (pkt_len < tulip_rx_copybreak && | 204 | if (pkt_len < tulip_rx_copybreak && |
205 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 205 | (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { |
206 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 206 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
207 | pci_dma_sync_single_for_cpu(tp->pdev, | 207 | pci_dma_sync_single_for_cpu(tp->pdev, |
208 | tp->rx_buffers[entry].mapping, | 208 | tp->rx_buffers[entry].mapping, |
@@ -428,7 +428,7 @@ static int tulip_rx(struct net_device *dev) | |||
428 | /* Check if the packet is long enough to accept without copying | 428 | /* Check if the packet is long enough to accept without copying |
429 | to a minimally-sized skbuff. */ | 429 | to a minimally-sized skbuff. */ |
430 | if (pkt_len < tulip_rx_copybreak && | 430 | if (pkt_len < tulip_rx_copybreak && |
431 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 431 | (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { |
432 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 432 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
433 | pci_dma_sync_single_for_cpu(tp->pdev, | 433 | pci_dma_sync_single_for_cpu(tp->pdev, |
434 | tp->rx_buffers[entry].mapping, | 434 | tp->rx_buffers[entry].mapping, |
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 17ecb18341c9..fea3641d9398 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c | |||
@@ -636,16 +636,15 @@ static void tulip_init_ring(struct net_device *dev) | |||
636 | dma_addr_t mapping; | 636 | dma_addr_t mapping; |
637 | 637 | ||
638 | /* Note the receive buffer must be longword aligned. | 638 | /* Note the receive buffer must be longword aligned. |
639 | dev_alloc_skb() provides 16 byte alignment. But do *not* | 639 | netdev_alloc_skb() provides 16 byte alignment. But do *not* |
640 | use skb_reserve() to align the IP header! */ | 640 | use skb_reserve() to align the IP header! */ |
641 | struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ); | 641 | struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ); |
642 | tp->rx_buffers[i].skb = skb; | 642 | tp->rx_buffers[i].skb = skb; |
643 | if (skb == NULL) | 643 | if (skb == NULL) |
644 | break; | 644 | break; |
645 | mapping = pci_map_single(tp->pdev, skb->data, | 645 | mapping = pci_map_single(tp->pdev, skb->data, |
646 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | 646 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); |
647 | tp->rx_buffers[i].mapping = mapping; | 647 | tp->rx_buffers[i].mapping = mapping; |
648 | skb->dev = dev; /* Mark as being used by this device. */ | ||
649 | tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */ | 648 | tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */ |
650 | tp->rx_ring[i].buffer1 = cpu_to_le32(mapping); | 649 | tp->rx_ring[i].buffer1 = cpu_to_le32(mapping); |
651 | } | 650 | } |
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 52da7b2fe3b6..2ac6fff0363a 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c | |||
@@ -815,7 +815,7 @@ static void init_rxtx_rings(struct net_device *dev) | |||
815 | 815 | ||
816 | /* Fill in the Rx buffers. Handle allocation failure gracefully. */ | 816 | /* Fill in the Rx buffers. Handle allocation failure gracefully. */ |
817 | for (i = 0; i < RX_RING_SIZE; i++) { | 817 | for (i = 0; i < RX_RING_SIZE; i++) { |
818 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); | 818 | struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz); |
819 | np->rx_skbuff[i] = skb; | 819 | np->rx_skbuff[i] = skb; |
820 | if (skb == NULL) | 820 | if (skb == NULL) |
821 | break; | 821 | break; |
@@ -1231,7 +1231,7 @@ static int netdev_rx(struct net_device *dev) | |||
1231 | /* Check if the packet is long enough to accept without copying | 1231 | /* Check if the packet is long enough to accept without copying |
1232 | to a minimally-sized skbuff. */ | 1232 | to a minimally-sized skbuff. */ |
1233 | if (pkt_len < rx_copybreak && | 1233 | if (pkt_len < rx_copybreak && |
1234 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1234 | (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { |
1235 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1235 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1236 | pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], | 1236 | pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], |
1237 | np->rx_skbuff[entry]->len, | 1237 | np->rx_skbuff[entry]->len, |
@@ -1270,7 +1270,7 @@ static int netdev_rx(struct net_device *dev) | |||
1270 | struct sk_buff *skb; | 1270 | struct sk_buff *skb; |
1271 | entry = np->dirty_rx % RX_RING_SIZE; | 1271 | entry = np->dirty_rx % RX_RING_SIZE; |
1272 | if (np->rx_skbuff[entry] == NULL) { | 1272 | if (np->rx_skbuff[entry] == NULL) { |
1273 | skb = dev_alloc_skb(np->rx_buf_sz); | 1273 | skb = netdev_alloc_skb(dev, np->rx_buf_sz); |
1274 | np->rx_skbuff[entry] = skb; | 1274 | np->rx_skbuff[entry] = skb; |
1275 | if (skb == NULL) | 1275 | if (skb == NULL) |
1276 | break; /* Better luck next round. */ | 1276 | break; /* Better luck next round. */ |
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c index b7c73eefb54b..fdb329fe6e8e 100644 --- a/drivers/net/ethernet/dec/tulip/xircom_cb.c +++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c | |||
@@ -1084,7 +1084,7 @@ investigate_read_descriptor(struct net_device *dev, struct xircom_private *card, | |||
1084 | pkt_len = 1518; | 1084 | pkt_len = 1518; |
1085 | } | 1085 | } |
1086 | 1086 | ||
1087 | skb = dev_alloc_skb(pkt_len + 2); | 1087 | skb = netdev_alloc_skb(dev, pkt_len + 2); |
1088 | if (skb == NULL) { | 1088 | if (skb == NULL) { |
1089 | dev->stats.rx_dropped++; | 1089 | dev->stats.rx_dropped++; |
1090 | goto out; | 1090 | goto out; |
diff --git a/drivers/net/ethernet/dlink/de600.c b/drivers/net/ethernet/dlink/de600.c index c24fab1e9cbe..682750c052c8 100644 --- a/drivers/net/ethernet/dlink/de600.c +++ b/drivers/net/ethernet/dlink/de600.c | |||
@@ -335,7 +335,7 @@ static void de600_rx_intr(struct net_device *dev) | |||
335 | return; | 335 | return; |
336 | } | 336 | } |
337 | 337 | ||
338 | skb = dev_alloc_skb(size+2); | 338 | skb = netdev_alloc_skb(dev, size + 2); |
339 | if (skb == NULL) { | 339 | if (skb == NULL) { |
340 | printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size); | 340 | printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size); |
341 | return; | 341 | return; |
diff --git a/drivers/net/ethernet/dlink/de620.c b/drivers/net/ethernet/dlink/de620.c index 3b934ab784d3..afc5aaac6b60 100644 --- a/drivers/net/ethernet/dlink/de620.c +++ b/drivers/net/ethernet/dlink/de620.c | |||
@@ -650,7 +650,7 @@ static int de620_rx_intr(struct net_device *dev) | |||
650 | printk(KERN_WARNING "%s: Illegal packet size: %d!\n", dev->name, size); | 650 | printk(KERN_WARNING "%s: Illegal packet size: %d!\n", dev->name, size); |
651 | } | 651 | } |
652 | else { /* Good packet? */ | 652 | else { /* Good packet? */ |
653 | skb = dev_alloc_skb(size+2); | 653 | skb = netdev_alloc_skb(dev, size + 2); |
654 | if (skb == NULL) { /* Yeah, but no place to put it... */ | 654 | if (skb == NULL) { /* Yeah, but no place to put it... */ |
655 | printk(KERN_WARNING "%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size); | 655 | printk(KERN_WARNING "%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size); |
656 | dev->stats.rx_dropped++; | 656 | dev->stats.rx_dropped++; |
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index 28a3a9b50b8b..7227f29ee2ee 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c | |||
@@ -1020,11 +1020,11 @@ static void init_ring(struct net_device *dev) | |||
1020 | 1020 | ||
1021 | /* Fill in the Rx buffers. Handle allocation failure gracefully. */ | 1021 | /* Fill in the Rx buffers. Handle allocation failure gracefully. */ |
1022 | for (i = 0; i < RX_RING_SIZE; i++) { | 1022 | for (i = 0; i < RX_RING_SIZE; i++) { |
1023 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + 2); | 1023 | struct sk_buff *skb = |
1024 | netdev_alloc_skb(dev, np->rx_buf_sz + 2); | ||
1024 | np->rx_skbuff[i] = skb; | 1025 | np->rx_skbuff[i] = skb; |
1025 | if (skb == NULL) | 1026 | if (skb == NULL) |
1026 | break; | 1027 | break; |
1027 | skb->dev = dev; /* Mark as being used by this device. */ | ||
1028 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ | 1028 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ |
1029 | np->rx_ring[i].frag[0].addr = cpu_to_le32( | 1029 | np->rx_ring[i].frag[0].addr = cpu_to_le32( |
1030 | dma_map_single(&np->pci_dev->dev, skb->data, | 1030 | dma_map_single(&np->pci_dev->dev, skb->data, |
@@ -1358,7 +1358,7 @@ static void rx_poll(unsigned long data) | |||
1358 | /* Check if the packet is long enough to accept without copying | 1358 | /* Check if the packet is long enough to accept without copying |
1359 | to a minimally-sized skbuff. */ | 1359 | to a minimally-sized skbuff. */ |
1360 | if (pkt_len < rx_copybreak && | 1360 | if (pkt_len < rx_copybreak && |
1361 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1361 | (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { |
1362 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1362 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1363 | dma_sync_single_for_cpu(&np->pci_dev->dev, | 1363 | dma_sync_single_for_cpu(&np->pci_dev->dev, |
1364 | le32_to_cpu(desc->frag[0].addr), | 1364 | le32_to_cpu(desc->frag[0].addr), |
@@ -1411,11 +1411,10 @@ static void refill_rx (struct net_device *dev) | |||
1411 | struct sk_buff *skb; | 1411 | struct sk_buff *skb; |
1412 | entry = np->dirty_rx % RX_RING_SIZE; | 1412 | entry = np->dirty_rx % RX_RING_SIZE; |
1413 | if (np->rx_skbuff[entry] == NULL) { | 1413 | if (np->rx_skbuff[entry] == NULL) { |
1414 | skb = dev_alloc_skb(np->rx_buf_sz + 2); | 1414 | skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2); |
1415 | np->rx_skbuff[entry] = skb; | 1415 | np->rx_skbuff[entry] = skb; |
1416 | if (skb == NULL) | 1416 | if (skb == NULL) |
1417 | break; /* Better luck next round. */ | 1417 | break; /* Better luck next round. */ |
1418 | skb->dev = dev; /* Mark as being used by this device. */ | ||
1419 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 1418 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
1420 | np->rx_ring[entry].frag[0].addr = cpu_to_le32( | 1419 | np->rx_ring[entry].frag[0].addr = cpu_to_le32( |
1421 | dma_map_single(&np->pci_dev->dev, skb->data, | 1420 | dma_map_single(&np->pci_dev->dev, skb->data, |
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index fe48cb7dde21..8536e376555a 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c | |||
@@ -421,7 +421,7 @@ static int dnet_poll(struct napi_struct *napi, int budget) | |||
421 | printk(KERN_ERR "%s packet receive error %x\n", | 421 | printk(KERN_ERR "%s packet receive error %x\n", |
422 | __func__, cmd_word); | 422 | __func__, cmd_word); |
423 | 423 | ||
424 | skb = dev_alloc_skb(pkt_len + 5); | 424 | skb = netdev_alloc_skb(dev, pkt_len + 5); |
425 | if (skb != NULL) { | 425 | if (skb != NULL) { |
426 | /* Align IP on 16 byte boundaries */ | 426 | /* Align IP on 16 byte boundaries */ |
427 | skb_reserve(skb, 2); | 427 | skb_reserve(skb, 2); |
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index c82d444b582d..1637b9862292 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c | |||
@@ -1070,14 +1070,13 @@ static void allocate_rx_buffers(struct net_device *dev) | |||
1070 | while (np->really_rx_count != RX_RING_SIZE) { | 1070 | while (np->really_rx_count != RX_RING_SIZE) { |
1071 | struct sk_buff *skb; | 1071 | struct sk_buff *skb; |
1072 | 1072 | ||
1073 | skb = dev_alloc_skb(np->rx_buf_sz); | 1073 | skb = netdev_alloc_skb(dev, np->rx_buf_sz); |
1074 | if (skb == NULL) | 1074 | if (skb == NULL) |
1075 | break; /* Better luck next round. */ | 1075 | break; /* Better luck next round. */ |
1076 | 1076 | ||
1077 | while (np->lack_rxbuf->skbuff) | 1077 | while (np->lack_rxbuf->skbuff) |
1078 | np->lack_rxbuf = np->lack_rxbuf->next_desc_logical; | 1078 | np->lack_rxbuf = np->lack_rxbuf->next_desc_logical; |
1079 | 1079 | ||
1080 | skb->dev = dev; /* Mark as being used by this device. */ | ||
1081 | np->lack_rxbuf->skbuff = skb; | 1080 | np->lack_rxbuf->skbuff = skb; |
1082 | np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data, | 1081 | np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data, |
1083 | np->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1082 | np->rx_buf_sz, PCI_DMA_FROMDEVICE); |
@@ -1265,7 +1264,7 @@ static void init_ring(struct net_device *dev) | |||
1265 | 1264 | ||
1266 | /* allocate skb for rx buffers */ | 1265 | /* allocate skb for rx buffers */ |
1267 | for (i = 0; i < RX_RING_SIZE; i++) { | 1266 | for (i = 0; i < RX_RING_SIZE; i++) { |
1268 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); | 1267 | struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz); |
1269 | 1268 | ||
1270 | if (skb == NULL) { | 1269 | if (skb == NULL) { |
1271 | np->lack_rxbuf = &np->rx_ring[i]; | 1270 | np->lack_rxbuf = &np->rx_ring[i]; |
@@ -1274,7 +1273,6 @@ static void init_ring(struct net_device *dev) | |||
1274 | 1273 | ||
1275 | ++np->really_rx_count; | 1274 | ++np->really_rx_count; |
1276 | np->rx_ring[i].skbuff = skb; | 1275 | np->rx_ring[i].skbuff = skb; |
1277 | skb->dev = dev; /* Mark as being used by this device. */ | ||
1278 | np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data, | 1276 | np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data, |
1279 | np->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1277 | np->rx_buf_sz, PCI_DMA_FROMDEVICE); |
1280 | np->rx_ring[i].status = RXOWN; | 1278 | np->rx_ring[i].status = RXOWN; |
@@ -1704,7 +1702,7 @@ static int netdev_rx(struct net_device *dev) | |||
1704 | /* Check if the packet is long enough to accept without copying | 1702 | /* Check if the packet is long enough to accept without copying |
1705 | to a minimally-sized skbuff. */ | 1703 | to a minimally-sized skbuff. */ |
1706 | if (pkt_len < rx_copybreak && | 1704 | if (pkt_len < rx_copybreak && |
1707 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1705 | (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { |
1708 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1706 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1709 | pci_dma_sync_single_for_cpu(np->pci_dev, | 1707 | pci_dma_sync_single_for_cpu(np->pci_dev, |
1710 | np->cur_rx->buffer, | 1708 | np->cur_rx->buffer, |
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index 336edd7e0b78..f976619d1b21 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c | |||
@@ -711,7 +711,7 @@ fec_enet_rx(struct net_device *ndev) | |||
711 | * include that when passing upstream as it messes up | 711 | * include that when passing upstream as it messes up |
712 | * bridging applications. | 712 | * bridging applications. |
713 | */ | 713 | */ |
714 | skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN); | 714 | skb = netdev_alloc_skb(dev, pkt_len - 4 + NET_IP_ALIGN); |
715 | 715 | ||
716 | if (unlikely(!skb)) { | 716 | if (unlikely(!skb)) { |
717 | printk("%s: Memory squeeze, dropping packet.\n", | 717 | printk("%s: Memory squeeze, dropping packet.\n", |
@@ -1210,7 +1210,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) | |||
1210 | 1210 | ||
1211 | bdp = fep->rx_bd_base; | 1211 | bdp = fep->rx_bd_base; |
1212 | for (i = 0; i < RX_RING_SIZE; i++) { | 1212 | for (i = 0; i < RX_RING_SIZE; i++) { |
1213 | skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE); | 1213 | skb = netdev_alloc_skb(dev, FEC_ENET_RX_FRSIZE); |
1214 | if (!skb) { | 1214 | if (!skb) { |
1215 | fec_enet_free_buffers(ndev); | 1215 | fec_enet_free_buffers(ndev); |
1216 | return -ENOMEM; | 1216 | return -ENOMEM; |
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 30745b56fe5d..7b34d8c698da 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c | |||
@@ -160,7 +160,7 @@ static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task | |||
160 | struct sk_buff *skb; | 160 | struct sk_buff *skb; |
161 | 161 | ||
162 | while (!bcom_queue_full(rxtsk)) { | 162 | while (!bcom_queue_full(rxtsk)) { |
163 | skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE); | 163 | skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE); |
164 | if (!skb) | 164 | if (!skb) |
165 | return -EAGAIN; | 165 | return -EAGAIN; |
166 | 166 | ||
@@ -416,7 +416,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id) | |||
416 | 416 | ||
417 | /* skbs are allocated on open, so now we allocate a new one, | 417 | /* skbs are allocated on open, so now we allocate a new one, |
418 | * and remove the old (with the packet) */ | 418 | * and remove the old (with the packet) */ |
419 | skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE); | 419 | skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE); |
420 | if (!skb) { | 420 | if (!skb) { |
421 | /* Can't get a new one : reuse the same & drop pkt */ | 421 | /* Can't get a new one : reuse the same & drop pkt */ |
422 | dev_notice(&dev->dev, "Low memory - dropped packet.\n"); | 422 | dev_notice(&dev->dev, "Low memory - dropped packet.\n"); |
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 910a8e18a9ae..999638a7c851 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c | |||
@@ -154,7 +154,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget) | |||
154 | 154 | ||
155 | if (pkt_len <= fpi->rx_copybreak) { | 155 | if (pkt_len <= fpi->rx_copybreak) { |
156 | /* +2 to make IP header L1 cache aligned */ | 156 | /* +2 to make IP header L1 cache aligned */ |
157 | skbn = dev_alloc_skb(pkt_len + 2); | 157 | skbn = netdev_alloc_skb(dev, pkt_len + 2); |
158 | if (skbn != NULL) { | 158 | if (skbn != NULL) { |
159 | skb_reserve(skbn, 2); /* align IP header */ | 159 | skb_reserve(skbn, 2); /* align IP header */ |
160 | skb_copy_from_linear_data(skb, | 160 | skb_copy_from_linear_data(skb, |
@@ -165,7 +165,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget) | |||
165 | skbn = skbt; | 165 | skbn = skbt; |
166 | } | 166 | } |
167 | } else { | 167 | } else { |
168 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); | 168 | skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE); |
169 | 169 | ||
170 | if (skbn) | 170 | if (skbn) |
171 | skb_align(skbn, ENET_RX_ALIGN); | 171 | skb_align(skbn, ENET_RX_ALIGN); |
@@ -286,7 +286,7 @@ static int fs_enet_rx_non_napi(struct net_device *dev) | |||
286 | 286 | ||
287 | if (pkt_len <= fpi->rx_copybreak) { | 287 | if (pkt_len <= fpi->rx_copybreak) { |
288 | /* +2 to make IP header L1 cache aligned */ | 288 | /* +2 to make IP header L1 cache aligned */ |
289 | skbn = dev_alloc_skb(pkt_len + 2); | 289 | skbn = netdev_alloc_skb(dev, pkt_len + 2); |
290 | if (skbn != NULL) { | 290 | if (skbn != NULL) { |
291 | skb_reserve(skbn, 2); /* align IP header */ | 291 | skb_reserve(skbn, 2); /* align IP header */ |
292 | skb_copy_from_linear_data(skb, | 292 | skb_copy_from_linear_data(skb, |
@@ -297,7 +297,7 @@ static int fs_enet_rx_non_napi(struct net_device *dev) | |||
297 | skbn = skbt; | 297 | skbn = skbt; |
298 | } | 298 | } |
299 | } else { | 299 | } else { |
300 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); | 300 | skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE); |
301 | 301 | ||
302 | if (skbn) | 302 | if (skbn) |
303 | skb_align(skbn, ENET_RX_ALIGN); | 303 | skb_align(skbn, ENET_RX_ALIGN); |
@@ -504,7 +504,7 @@ void fs_init_bds(struct net_device *dev) | |||
504 | * Initialize the receive buffer descriptors. | 504 | * Initialize the receive buffer descriptors. |
505 | */ | 505 | */ |
506 | for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { | 506 | for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { |
507 | skb = dev_alloc_skb(ENET_RX_FRSIZE); | 507 | skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE); |
508 | if (skb == NULL) { | 508 | if (skb == NULL) { |
509 | dev_warn(fep->dev, | 509 | dev_warn(fep->dev, |
510 | "Memory squeeze, unable to allocate skb\n"); | 510 | "Memory squeeze, unable to allocate skb\n"); |
@@ -592,7 +592,7 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, | |||
592 | struct fs_enet_private *fep = netdev_priv(dev); | 592 | struct fs_enet_private *fep = netdev_priv(dev); |
593 | 593 | ||
594 | /* Alloc new skb */ | 594 | /* Alloc new skb */ |
595 | new_skb = dev_alloc_skb(skb->len + 4); | 595 | new_skb = netdev_alloc_skb(dev, skb->len + 4); |
596 | if (!new_skb) { | 596 | if (!new_skb) { |
597 | if (net_ratelimit()) { | 597 | if (net_ratelimit()) { |
598 | dev_warn(fep->dev, | 598 | dev_warn(fep->dev, |
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index ba2dc083bfc0..ec0905461312 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c | |||
@@ -214,8 +214,9 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, | |||
214 | 214 | ||
215 | skb = __skb_dequeue(&ugeth->rx_recycle); | 215 | skb = __skb_dequeue(&ugeth->rx_recycle); |
216 | if (!skb) | 216 | if (!skb) |
217 | skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length + | 217 | skb = netdev_alloc_skb(ugeth->ndev, |
218 | UCC_GETH_RX_DATA_BUF_ALIGNMENT); | 218 | ugeth->ug_info->uf_info.max_rx_buf_length + |
219 | UCC_GETH_RX_DATA_BUF_ALIGNMENT); | ||
219 | if (skb == NULL) | 220 | if (skb == NULL) |
220 | return NULL; | 221 | return NULL; |
221 | 222 | ||
@@ -227,8 +228,6 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, | |||
227 | (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - | 228 | (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - |
228 | 1))); | 229 | 1))); |
229 | 230 | ||
230 | skb->dev = ugeth->ndev; | ||
231 | |||
232 | out_be32(&((struct qe_bd __iomem *)bd)->buf, | 231 | out_be32(&((struct qe_bd __iomem *)bd)->buf, |
233 | dma_map_single(ugeth->dev, | 232 | dma_map_single(ugeth->dev, |
234 | skb->data, | 233 | skb->data, |
diff --git a/drivers/net/ethernet/fujitsu/at1700.c b/drivers/net/ethernet/fujitsu/at1700.c index 7c6c908bdf02..586b46fd4eed 100644 --- a/drivers/net/ethernet/fujitsu/at1700.c +++ b/drivers/net/ethernet/fujitsu/at1700.c | |||
@@ -757,7 +757,7 @@ net_rx(struct net_device *dev) | |||
757 | dev->stats.rx_errors++; | 757 | dev->stats.rx_errors++; |
758 | break; | 758 | break; |
759 | } | 759 | } |
760 | skb = dev_alloc_skb(pkt_len+3); | 760 | skb = netdev_alloc_skb(dev, pkt_len + 3); |
761 | if (skb == NULL) { | 761 | if (skb == NULL) { |
762 | printk("%s: Memory squeeze, dropping packet (len %d).\n", | 762 | printk("%s: Memory squeeze, dropping packet (len %d).\n", |
763 | dev->name, pkt_len); | 763 | dev->name, pkt_len); |