diff options
author | Pradeep A. Dalvi <netdev@pradeepdalvi.com> | 2012-02-06 06:16:13 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-02-08 18:46:38 -0500 |
commit | dae2e9f430c46c29e3f771110094bd3da3625aa4 (patch) | |
tree | b78b2671d2566ea5795349e5dadf8ef786b507f5 /drivers/net/ethernet/packetengines | |
parent | c4062dfc425e94290ac427a98d6b4721dd2bc91f (diff) |
netdev: ethernet dev_alloc_skb to netdev_alloc_skb
Replaced deprecating dev_alloc_skb with netdev_alloc_skb in drivers/net/ethernet
- Removed extra skb->dev = dev after netdev_alloc_skb
Signed-off-by: Pradeep A Dalvi <netdev@pradeepdalvi.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/packetengines')
-rw-r--r-- | drivers/net/ethernet/packetengines/hamachi.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/packetengines/yellowfin.c | 8 |
2 files changed, 6 insertions, 10 deletions
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index 3458df3780b8..0d29f5f4b8e4 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c | |||
@@ -1188,11 +1188,10 @@ static void hamachi_init_ring(struct net_device *dev) | |||
1188 | } | 1188 | } |
1189 | /* Fill in the Rx buffers. Handle allocation failure gracefully. */ | 1189 | /* Fill in the Rx buffers. Handle allocation failure gracefully. */ |
1190 | for (i = 0; i < RX_RING_SIZE; i++) { | 1190 | for (i = 0; i < RX_RING_SIZE; i++) { |
1191 | struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2); | 1191 | struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2); |
1192 | hmp->rx_skbuff[i] = skb; | 1192 | hmp->rx_skbuff[i] = skb; |
1193 | if (skb == NULL) | 1193 | if (skb == NULL) |
1194 | break; | 1194 | break; |
1195 | skb->dev = dev; /* Mark as being used by this device. */ | ||
1196 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ | 1195 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ |
1197 | hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, | 1196 | hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, |
1198 | skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); | 1197 | skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); |
@@ -1488,7 +1487,7 @@ static int hamachi_rx(struct net_device *dev) | |||
1488 | /* Check if the packet is long enough to accept without copying | 1487 | /* Check if the packet is long enough to accept without copying |
1489 | to a minimally-sized skbuff. */ | 1488 | to a minimally-sized skbuff. */ |
1490 | if (pkt_len < rx_copybreak && | 1489 | if (pkt_len < rx_copybreak && |
1491 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1490 | (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { |
1492 | #ifdef RX_CHECKSUM | 1491 | #ifdef RX_CHECKSUM |
1493 | printk(KERN_ERR "%s: rx_copybreak non-zero " | 1492 | printk(KERN_ERR "%s: rx_copybreak non-zero " |
1494 | "not good with RX_CHECKSUM\n", dev->name); | 1493 | "not good with RX_CHECKSUM\n", dev->name); |
@@ -1591,12 +1590,11 @@ static int hamachi_rx(struct net_device *dev) | |||
1591 | entry = hmp->dirty_rx % RX_RING_SIZE; | 1590 | entry = hmp->dirty_rx % RX_RING_SIZE; |
1592 | desc = &(hmp->rx_ring[entry]); | 1591 | desc = &(hmp->rx_ring[entry]); |
1593 | if (hmp->rx_skbuff[entry] == NULL) { | 1592 | if (hmp->rx_skbuff[entry] == NULL) { |
1594 | struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2); | 1593 | struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2); |
1595 | 1594 | ||
1596 | hmp->rx_skbuff[entry] = skb; | 1595 | hmp->rx_skbuff[entry] = skb; |
1597 | if (skb == NULL) | 1596 | if (skb == NULL) |
1598 | break; /* Better luck next round. */ | 1597 | break; /* Better luck next round. */ |
1599 | skb->dev = dev; /* Mark as being used by this device. */ | ||
1600 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 1598 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
1601 | desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, | 1599 | desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, |
1602 | skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); | 1600 | skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); |
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index 4a5774271bd4..7757b80ef924 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c | |||
@@ -743,11 +743,10 @@ static int yellowfin_init_ring(struct net_device *dev) | |||
743 | } | 743 | } |
744 | 744 | ||
745 | for (i = 0; i < RX_RING_SIZE; i++) { | 745 | for (i = 0; i < RX_RING_SIZE; i++) { |
746 | struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2); | 746 | struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2); |
747 | yp->rx_skbuff[i] = skb; | 747 | yp->rx_skbuff[i] = skb; |
748 | if (skb == NULL) | 748 | if (skb == NULL) |
749 | break; | 749 | break; |
750 | skb->dev = dev; /* Mark as being used by this device. */ | ||
751 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ | 750 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ |
752 | yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, | 751 | yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, |
753 | skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); | 752 | skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); |
@@ -1133,7 +1132,7 @@ static int yellowfin_rx(struct net_device *dev) | |||
1133 | PCI_DMA_FROMDEVICE); | 1132 | PCI_DMA_FROMDEVICE); |
1134 | yp->rx_skbuff[entry] = NULL; | 1133 | yp->rx_skbuff[entry] = NULL; |
1135 | } else { | 1134 | } else { |
1136 | skb = dev_alloc_skb(pkt_len + 2); | 1135 | skb = netdev_alloc_skb(dev, pkt_len + 2); |
1137 | if (skb == NULL) | 1136 | if (skb == NULL) |
1138 | break; | 1137 | break; |
1139 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1138 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
@@ -1156,11 +1155,10 @@ static int yellowfin_rx(struct net_device *dev) | |||
1156 | for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) { | 1155 | for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) { |
1157 | entry = yp->dirty_rx % RX_RING_SIZE; | 1156 | entry = yp->dirty_rx % RX_RING_SIZE; |
1158 | if (yp->rx_skbuff[entry] == NULL) { | 1157 | if (yp->rx_skbuff[entry] == NULL) { |
1159 | struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2); | 1158 | struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2); |
1160 | if (skb == NULL) | 1159 | if (skb == NULL) |
1161 | break; /* Better luck next round. */ | 1160 | break; /* Better luck next round. */ |
1162 | yp->rx_skbuff[entry] = skb; | 1161 | yp->rx_skbuff[entry] = skb; |
1163 | skb->dev = dev; /* Mark as being used by this device. */ | ||
1164 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 1162 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
1165 | yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, | 1163 | yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, |
1166 | skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); | 1164 | skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); |