aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/adaptec/starfire.c
diff options
context:
space:
mode:
authorPradeep A Dalvi <netdev@pradeepdalvi.com>2012-02-04 21:49:09 -0500
committerDavid S. Miller <davem@davemloft.net>2012-02-06 11:41:20 -0500
commit1d266430546acf01438ae42d0a7370db4817e2ad (patch)
tree149268ba8ad2b3288ae001e5ab28146e4361a6d0 /drivers/net/ethernet/adaptec/starfire.c
parent3238a9be4d7ad95c741bcfe6c147406eeef62d95 (diff)
netdev: ethernet dev_alloc_skb to netdev_alloc_skb
Replaced deprecating dev_alloc_skb with netdev_alloc_skb in drivers/net/ethernet - Removed extra skb->dev = dev after netdev_alloc_skb Signed-off-by: Pradeep A Dalvi <netdev@pradeepdalvi.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/adaptec/starfire.c')
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 11fc2eccb0fd..d896816512ca 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1179,12 +1179,11 @@ static void init_ring(struct net_device *dev)
1179 1179
1180 /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 1180 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1181 for (i = 0; i < RX_RING_SIZE; i++) { 1181 for (i = 0; i < RX_RING_SIZE; i++) {
1182 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); 1182 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1183 np->rx_info[i].skb = skb; 1183 np->rx_info[i].skb = skb;
1184 if (skb == NULL) 1184 if (skb == NULL)
1185 break; 1185 break;
1186 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1186 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1187 skb->dev = dev; /* Mark as being used by this device. */
1188 /* Grrr, we cannot offset to correctly align the IP header. */ 1187 /* Grrr, we cannot offset to correctly align the IP header. */
1189 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); 1188 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1190 } 1189 }
@@ -1472,7 +1471,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
1472 /* Check if the packet is long enough to accept without copying 1471 /* Check if the packet is long enough to accept without copying
1473 to a minimally-sized skbuff. */ 1472 to a minimally-sized skbuff. */
1474 if (pkt_len < rx_copybreak && 1473 if (pkt_len < rx_copybreak &&
1475 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1474 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1476 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1475 skb_reserve(skb, 2); /* 16 byte align the IP header */
1477 pci_dma_sync_single_for_cpu(np->pci_dev, 1476 pci_dma_sync_single_for_cpu(np->pci_dev,
1478 np->rx_info[entry].mapping, 1477 np->rx_info[entry].mapping,
@@ -1596,13 +1595,12 @@ static void refill_rx_ring(struct net_device *dev)
1596 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { 1595 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1597 entry = np->dirty_rx % RX_RING_SIZE; 1596 entry = np->dirty_rx % RX_RING_SIZE;
1598 if (np->rx_info[entry].skb == NULL) { 1597 if (np->rx_info[entry].skb == NULL) {
1599 skb = dev_alloc_skb(np->rx_buf_sz); 1598 skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1600 np->rx_info[entry].skb = skb; 1599 np->rx_info[entry].skb = skb;
1601 if (skb == NULL) 1600 if (skb == NULL)
1602 break; /* Better luck next round. */ 1601 break; /* Better luck next round. */
1603 np->rx_info[entry].mapping = 1602 np->rx_info[entry].mapping =
1604 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1603 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1605 skb->dev = dev; /* Mark as being used by this device. */
1606 np->rx_ring[entry].rxaddr = 1604 np->rx_ring[entry].rxaddr =
1607 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); 1605 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1608 } 1606 }