diff options
author | Kevin Lo <kevlo@kevlo.org> | 2008-08-26 23:35:09 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-09-03 10:24:04 -0400 |
commit | b26b555a7b4decf765ade265fa9da3bd6ff3e9e0 (patch) | |
tree | 353b588b86a382d3f68268acea09ebf51b26a8af /drivers | |
parent | c844d483004df596b0bd6e68a3be8e0cd85536ce (diff) |
via-rhine: changed to use netdev_alloc_skb() from dev_alloc_skb
Use netdev_alloc_skb. This sets skb->dev and allows
arch specific allocation. Also cleanup the alignment code.
Signed-off-by: Kevin Lo <kevlo@kevlo.org>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/via-rhine.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index 96dff04334b8..5b7870080c56 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c | |||
@@ -914,7 +914,7 @@ static void alloc_rbufs(struct net_device *dev) | |||
914 | 914 | ||
915 | /* Fill in the Rx buffers. Handle allocation failure gracefully. */ | 915 | /* Fill in the Rx buffers. Handle allocation failure gracefully. */ |
916 | for (i = 0; i < RX_RING_SIZE; i++) { | 916 | for (i = 0; i < RX_RING_SIZE; i++) { |
917 | struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz); | 917 | struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz); |
918 | rp->rx_skbuff[i] = skb; | 918 | rp->rx_skbuff[i] = skb; |
919 | if (skb == NULL) | 919 | if (skb == NULL) |
920 | break; | 920 | break; |
@@ -1473,8 +1473,8 @@ static int rhine_rx(struct net_device *dev, int limit) | |||
1473 | /* Check if the packet is long enough to accept without | 1473 | /* Check if the packet is long enough to accept without |
1474 | copying to a minimally-sized skbuff. */ | 1474 | copying to a minimally-sized skbuff. */ |
1475 | if (pkt_len < rx_copybreak && | 1475 | if (pkt_len < rx_copybreak && |
1476 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1476 | (skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN)) != NULL) { |
1477 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1477 | skb_reserve(skb, NET_IP_ALIGN); /* 16 byte align the IP header */ |
1478 | pci_dma_sync_single_for_cpu(rp->pdev, | 1478 | pci_dma_sync_single_for_cpu(rp->pdev, |
1479 | rp->rx_skbuff_dma[entry], | 1479 | rp->rx_skbuff_dma[entry], |
1480 | rp->rx_buf_sz, | 1480 | rp->rx_buf_sz, |
@@ -1518,7 +1518,7 @@ static int rhine_rx(struct net_device *dev, int limit) | |||
1518 | struct sk_buff *skb; | 1518 | struct sk_buff *skb; |
1519 | entry = rp->dirty_rx % RX_RING_SIZE; | 1519 | entry = rp->dirty_rx % RX_RING_SIZE; |
1520 | if (rp->rx_skbuff[entry] == NULL) { | 1520 | if (rp->rx_skbuff[entry] == NULL) { |
1521 | skb = dev_alloc_skb(rp->rx_buf_sz); | 1521 | skb = netdev_alloc_skb(dev, rp->rx_buf_sz); |
1522 | rp->rx_skbuff[entry] = skb; | 1522 | rp->rx_skbuff[entry] = skb; |
1523 | if (skb == NULL) | 1523 | if (skb == NULL) |
1524 | break; /* Better luck next round. */ | 1524 | break; /* Better luck next round. */ |