diff options
author | Lendacky, Thomas <Thomas.Lendacky@amd.com> | 2014-11-04 17:06:44 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-11-05 21:50:12 -0500 |
commit | 08dcc47c06c79de31b9b2c0b4637f6119e5701fa (patch) | |
tree | 55a56cf66275b02c33afb6e4a2d2296b3d1450d8 /drivers/net/ethernet/amd/xgbe/xgbe-drv.c | |
parent | aa96bd3c9fda52b9f57128798b49d662e5d4659c (diff) |
amd-xgbe: Use page allocations for Rx buffers
Use page allocations for Rx buffers instead of pre-allocating skbs
of a set size.
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-drv.c')
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 95 |
1 files changed, 61 insertions, 34 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 8cb2372f8fa9..d65f5aa8fdce 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c | |||
@@ -218,8 +218,8 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) | |||
218 | } | 218 | } |
219 | 219 | ||
220 | rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; | 220 | rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; |
221 | if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE) | 221 | rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE); |
222 | rx_buf_size = XGBE_RX_MIN_BUF_SIZE; | 222 | |
223 | rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) & | 223 | rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) & |
224 | ~(XGBE_RX_BUF_ALIGN - 1); | 224 | ~(XGBE_RX_BUF_ALIGN - 1); |
225 | 225 | ||
@@ -546,7 +546,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata) | |||
546 | DBGPR("<--xgbe_init_rx_coalesce\n"); | 546 | DBGPR("<--xgbe_init_rx_coalesce\n"); |
547 | } | 547 | } |
548 | 548 | ||
549 | static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata) | 549 | static void xgbe_free_tx_data(struct xgbe_prv_data *pdata) |
550 | { | 550 | { |
551 | struct xgbe_desc_if *desc_if = &pdata->desc_if; | 551 | struct xgbe_desc_if *desc_if = &pdata->desc_if; |
552 | struct xgbe_channel *channel; | 552 | struct xgbe_channel *channel; |
@@ -554,7 +554,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata) | |||
554 | struct xgbe_ring_data *rdata; | 554 | struct xgbe_ring_data *rdata; |
555 | unsigned int i, j; | 555 | unsigned int i, j; |
556 | 556 | ||
557 | DBGPR("-->xgbe_free_tx_skbuff\n"); | 557 | DBGPR("-->xgbe_free_tx_data\n"); |
558 | 558 | ||
559 | channel = pdata->channel; | 559 | channel = pdata->channel; |
560 | for (i = 0; i < pdata->channel_count; i++, channel++) { | 560 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
@@ -564,14 +564,14 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata) | |||
564 | 564 | ||
565 | for (j = 0; j < ring->rdesc_count; j++) { | 565 | for (j = 0; j < ring->rdesc_count; j++) { |
566 | rdata = XGBE_GET_DESC_DATA(ring, j); | 566 | rdata = XGBE_GET_DESC_DATA(ring, j); |
567 | desc_if->unmap_skb(pdata, rdata); | 567 | desc_if->unmap_rdata(pdata, rdata); |
568 | } | 568 | } |
569 | } | 569 | } |
570 | 570 | ||
571 | DBGPR("<--xgbe_free_tx_skbuff\n"); | 571 | DBGPR("<--xgbe_free_tx_data\n"); |
572 | } | 572 | } |
573 | 573 | ||
574 | static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata) | 574 | static void xgbe_free_rx_data(struct xgbe_prv_data *pdata) |
575 | { | 575 | { |
576 | struct xgbe_desc_if *desc_if = &pdata->desc_if; | 576 | struct xgbe_desc_if *desc_if = &pdata->desc_if; |
577 | struct xgbe_channel *channel; | 577 | struct xgbe_channel *channel; |
@@ -579,7 +579,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata) | |||
579 | struct xgbe_ring_data *rdata; | 579 | struct xgbe_ring_data *rdata; |
580 | unsigned int i, j; | 580 | unsigned int i, j; |
581 | 581 | ||
582 | DBGPR("-->xgbe_free_rx_skbuff\n"); | 582 | DBGPR("-->xgbe_free_rx_data\n"); |
583 | 583 | ||
584 | channel = pdata->channel; | 584 | channel = pdata->channel; |
585 | for (i = 0; i < pdata->channel_count; i++, channel++) { | 585 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
@@ -589,11 +589,11 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata) | |||
589 | 589 | ||
590 | for (j = 0; j < ring->rdesc_count; j++) { | 590 | for (j = 0; j < ring->rdesc_count; j++) { |
591 | rdata = XGBE_GET_DESC_DATA(ring, j); | 591 | rdata = XGBE_GET_DESC_DATA(ring, j); |
592 | desc_if->unmap_skb(pdata, rdata); | 592 | desc_if->unmap_rdata(pdata, rdata); |
593 | } | 593 | } |
594 | } | 594 | } |
595 | 595 | ||
596 | DBGPR("<--xgbe_free_rx_skbuff\n"); | 596 | DBGPR("<--xgbe_free_rx_data\n"); |
597 | } | 597 | } |
598 | 598 | ||
599 | static void xgbe_adjust_link(struct net_device *netdev) | 599 | static void xgbe_adjust_link(struct net_device *netdev) |
@@ -839,8 +839,8 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset) | |||
839 | xgbe_stop(pdata); | 839 | xgbe_stop(pdata); |
840 | synchronize_irq(pdata->irq_number); | 840 | synchronize_irq(pdata->irq_number); |
841 | 841 | ||
842 | xgbe_free_tx_skbuff(pdata); | 842 | xgbe_free_tx_data(pdata); |
843 | xgbe_free_rx_skbuff(pdata); | 843 | xgbe_free_rx_data(pdata); |
844 | 844 | ||
845 | /* Issue software reset to device if requested */ | 845 | /* Issue software reset to device if requested */ |
846 | if (reset) | 846 | if (reset) |
@@ -1609,7 +1609,7 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel) | |||
1609 | struct xgbe_ring *ring = channel->rx_ring; | 1609 | struct xgbe_ring *ring = channel->rx_ring; |
1610 | struct xgbe_ring_data *rdata; | 1610 | struct xgbe_ring_data *rdata; |
1611 | 1611 | ||
1612 | desc_if->realloc_skb(channel); | 1612 | desc_if->realloc_rx_buffer(channel); |
1613 | 1613 | ||
1614 | /* Update the Rx Tail Pointer Register with address of | 1614 | /* Update the Rx Tail Pointer Register with address of |
1615 | * the last cleaned entry */ | 1615 | * the last cleaned entry */ |
@@ -1618,6 +1618,37 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel) | |||
1618 | lower_32_bits(rdata->rdesc_dma)); | 1618 | lower_32_bits(rdata->rdesc_dma)); |
1619 | } | 1619 | } |
1620 | 1620 | ||
1621 | static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, | ||
1622 | struct xgbe_ring_data *rdata, | ||
1623 | unsigned int len) | ||
1624 | { | ||
1625 | struct net_device *netdev = pdata->netdev; | ||
1626 | struct sk_buff *skb; | ||
1627 | u8 *packet; | ||
1628 | unsigned int copy_len; | ||
1629 | |||
1630 | skb = netdev_alloc_skb_ip_align(netdev, XGBE_SKB_ALLOC_SIZE); | ||
1631 | if (!skb) | ||
1632 | return NULL; | ||
1633 | |||
1634 | packet = page_address(rdata->rx_pa.pages) + rdata->rx_pa.pages_offset; | ||
1635 | copy_len = min_t(unsigned int, XGBE_SKB_ALLOC_SIZE, len); | ||
1636 | skb_copy_to_linear_data(skb, packet, copy_len); | ||
1637 | skb_put(skb, copy_len); | ||
1638 | |||
1639 | rdata->rx_pa.pages_offset += copy_len; | ||
1640 | len -= copy_len; | ||
1641 | if (len) | ||
1642 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, | ||
1643 | rdata->rx_pa.pages, | ||
1644 | rdata->rx_pa.pages_offset, | ||
1645 | len, rdata->rx_dma_len); | ||
1646 | else | ||
1647 | put_page(rdata->rx_pa.pages); | ||
1648 | |||
1649 | return skb; | ||
1650 | } | ||
1651 | |||
1621 | static int xgbe_tx_poll(struct xgbe_channel *channel) | 1652 | static int xgbe_tx_poll(struct xgbe_channel *channel) |
1622 | { | 1653 | { |
1623 | struct xgbe_prv_data *pdata = channel->pdata; | 1654 | struct xgbe_prv_data *pdata = channel->pdata; |
@@ -1651,7 +1682,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) | |||
1651 | #endif | 1682 | #endif |
1652 | 1683 | ||
1653 | /* Free the SKB and reset the descriptor for re-use */ | 1684 | /* Free the SKB and reset the descriptor for re-use */ |
1654 | desc_if->unmap_skb(pdata, rdata); | 1685 | desc_if->unmap_rdata(pdata, rdata); |
1655 | hw_if->tx_desc_reset(rdata); | 1686 | hw_if->tx_desc_reset(rdata); |
1656 | 1687 | ||
1657 | processed++; | 1688 | processed++; |
@@ -1726,9 +1757,9 @@ read_again: | |||
1726 | ring->cur++; | 1757 | ring->cur++; |
1727 | ring->dirty++; | 1758 | ring->dirty++; |
1728 | 1759 | ||
1729 | dma_unmap_single(pdata->dev, rdata->skb_dma, | 1760 | dma_sync_single_for_cpu(pdata->dev, rdata->rx_dma, |
1730 | rdata->skb_dma_len, DMA_FROM_DEVICE); | 1761 | rdata->rx_dma_len, |
1731 | rdata->skb_dma = 0; | 1762 | DMA_FROM_DEVICE); |
1732 | 1763 | ||
1733 | incomplete = XGMAC_GET_BITS(packet->attributes, | 1764 | incomplete = XGMAC_GET_BITS(packet->attributes, |
1734 | RX_PACKET_ATTRIBUTES, | 1765 | RX_PACKET_ATTRIBUTES, |
@@ -1753,26 +1784,22 @@ read_again: | |||
1753 | 1784 | ||
1754 | if (!context) { | 1785 | if (!context) { |
1755 | put_len = rdata->len - len; | 1786 | put_len = rdata->len - len; |
1756 | if (skb) { | 1787 | len += put_len; |
1757 | if (pskb_expand_head(skb, 0, put_len, | 1788 | |
1758 | GFP_ATOMIC)) { | 1789 | if (!skb) { |
1759 | DBGPR("pskb_expand_head error\n"); | 1790 | skb = xgbe_create_skb(pdata, rdata, put_len); |
1760 | if (incomplete) { | 1791 | if (!skb) { |
1761 | error = 1; | 1792 | error = 1; |
1762 | goto read_again; | 1793 | goto read_again; |
1763 | } | ||
1764 | |||
1765 | dev_kfree_skb(skb); | ||
1766 | goto next_packet; | ||
1767 | } | 1794 | } |
1768 | memcpy(skb_tail_pointer(skb), rdata->skb->data, | ||
1769 | put_len); | ||
1770 | } else { | 1795 | } else { |
1771 | skb = rdata->skb; | 1796 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, |
1772 | rdata->skb = NULL; | 1797 | rdata->rx_pa.pages, |
1798 | rdata->rx_pa.pages_offset, | ||
1799 | put_len, rdata->rx_dma_len); | ||
1773 | } | 1800 | } |
1774 | skb_put(skb, put_len); | 1801 | |
1775 | len += put_len; | 1802 | rdata->rx_pa.pages = NULL; |
1776 | } | 1803 | } |
1777 | 1804 | ||
1778 | if (incomplete || context_next) | 1805 | if (incomplete || context_next) |