diff options
author | Stephen Hemminger <shemminger@vyatta.com> | 2008-04-16 19:37:31 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-05-22 14:12:50 -0400 |
commit | c73d2589b784098b2bb6e986c1a7b04e9555fbd3 (patch) | |
tree | 44ddff3852079beaac7dd67aedc0bd636f362b6f | |
parent | 47f98c7d4bfc08e5efffd4fe22296044ab4db21e (diff) |
via-velocity: use netdev_alloc_skb
Use netdev_alloc_skb for rx buffer allocation. This sets skb->dev
and can be overriden for NUMA machines.
Change code to return new buffer rather than call by reference.
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
-rw-r--r-- | drivers/net/via-velocity.c | 19 |
1 files changed, 6 insertions, 13 deletions
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 6b8d882d197b..3e94c8fff9e2 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -1495,24 +1495,18 @@ static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb) | |||
1495 | * enough. This function returns a negative value if the received | 1495 | * enough. This function returns a negative value if the received |
1496 | * packet is too big or if memory is exhausted. | 1496 | * packet is too big or if memory is exhausted. |
1497 | */ | 1497 | */ |
1498 | static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, | 1498 | static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, |
1499 | struct velocity_info *vptr) | 1499 | struct velocity_info *vptr) |
1500 | { | 1500 | { |
1501 | int ret = -1; | 1501 | int ret = -1; |
1502 | |||
1503 | if (pkt_size < rx_copybreak) { | 1502 | if (pkt_size < rx_copybreak) { |
1504 | struct sk_buff *new_skb; | 1503 | struct sk_buff *new_skb; |
1505 | 1504 | ||
1506 | new_skb = dev_alloc_skb(pkt_size + 2); | 1505 | new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2); |
1507 | if (new_skb) { | 1506 | if (new_skb) { |
1508 | new_skb->dev = vptr->dev; | ||
1509 | new_skb->ip_summed = rx_skb[0]->ip_summed; | 1507 | new_skb->ip_summed = rx_skb[0]->ip_summed; |
1510 | 1508 | skb_reserve(new_skb, 2); | |
1511 | if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) | 1509 | skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size); |
1512 | skb_reserve(new_skb, 2); | ||
1513 | |||
1514 | skb_copy_from_linear_data(rx_skb[0], new_skb->data, | ||
1515 | pkt_size); | ||
1516 | *rx_skb = new_skb; | 1510 | *rx_skb = new_skb; |
1517 | ret = 0; | 1511 | ret = 0; |
1518 | } | 1512 | } |
@@ -1629,7 +1623,7 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) | |||
1629 | struct rx_desc *rd = &(vptr->rd_ring[idx]); | 1623 | struct rx_desc *rd = &(vptr->rd_ring[idx]); |
1630 | struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); | 1624 | struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); |
1631 | 1625 | ||
1632 | rd_info->skb = dev_alloc_skb(vptr->rx_buf_sz + 64); | 1626 | rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx_buf_sz + 64); |
1633 | if (rd_info->skb == NULL) | 1627 | if (rd_info->skb == NULL) |
1634 | return -ENOMEM; | 1628 | return -ENOMEM; |
1635 | 1629 | ||
@@ -1638,7 +1632,6 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) | |||
1638 | * 64byte alignment. | 1632 | * 64byte alignment. |
1639 | */ | 1633 | */ |
1640 | skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); | 1634 | skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); |
1641 | rd_info->skb->dev = vptr->dev; | ||
1642 | rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1635 | rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); |
1643 | 1636 | ||
1644 | /* | 1637 | /* |