aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2012-07-20 04:08:18 -0400
committerPeter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>2012-08-16 18:44:25 -0400
commit0549ae20b77d411aefb5271c2c494b9c3f02d972 (patch)
tree42194614fcaf6384d46f88795c086fd842181c2c /drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
parentafaa9459de639591ff3318fd215a813c8d794759 (diff)
ixgbe: combine ixgbe_add_rx_frag and ixgbe_can_reuse_page
This patch combines ixgbe_add_rx_frag and ixgbe_can_reuse_page into a single function. The main motivation behind this is to make better use of the values so that we don't have to load them from memory and into registers twice. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com> Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c73
1 files changed, 34 insertions, 39 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index f7351c6fa3b5..6a8c48443676 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1560,33 +1560,17 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1560} 1560}
1561 1561
1562/** 1562/**
1563 * ixgbe_can_reuse_page - determine if we can reuse a page
1564 * @rx_buffer: pointer to rx_buffer containing the page we want to reuse
1565 *
1566 * Returns true if page can be reused in another Rx buffer
1567 **/
1568static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer)
1569{
1570 struct page *page = rx_buffer->page;
1571
1572 /* if we are only owner of page and it is local we can reuse it */
1573 return likely(page_count(page) == 1) &&
1574 likely(page_to_nid(page) == numa_node_id());
1575}
1576
1577/**
1578 * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring 1563 * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
1579 * @rx_ring: rx descriptor ring to store buffers on 1564 * @rx_ring: rx descriptor ring to store buffers on
1580 * @old_buff: donor buffer to have page reused 1565 * @old_buff: donor buffer to have page reused
1581 * 1566 *
1582 * Syncronizes page for reuse by the adapter 1567 * Synchronizes page for reuse by the adapter
1583 **/ 1568 **/
1584static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, 1569static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1585 struct ixgbe_rx_buffer *old_buff) 1570 struct ixgbe_rx_buffer *old_buff)
1586{ 1571{
1587 struct ixgbe_rx_buffer *new_buff; 1572 struct ixgbe_rx_buffer *new_buff;
1588 u16 nta = rx_ring->next_to_alloc; 1573 u16 nta = rx_ring->next_to_alloc;
1589 u16 bufsz = ixgbe_rx_bufsz(rx_ring);
1590 1574
1591 new_buff = &rx_ring->rx_buffer_info[nta]; 1575 new_buff = &rx_ring->rx_buffer_info[nta];
1592 1576
@@ -1597,17 +1581,13 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1597 /* transfer page from old buffer to new buffer */ 1581 /* transfer page from old buffer to new buffer */
1598 new_buff->page = old_buff->page; 1582 new_buff->page = old_buff->page;
1599 new_buff->dma = old_buff->dma; 1583 new_buff->dma = old_buff->dma;
1600 1584 new_buff->page_offset = old_buff->page_offset;
1601 /* flip page offset to other buffer and store to new_buff */
1602 new_buff->page_offset = old_buff->page_offset ^ bufsz;
1603 1585
1604 /* sync the buffer for use by the device */ 1586 /* sync the buffer for use by the device */
1605 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, 1587 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
1606 new_buff->page_offset, bufsz, 1588 new_buff->page_offset,
1589 ixgbe_rx_bufsz(rx_ring),
1607 DMA_FROM_DEVICE); 1590 DMA_FROM_DEVICE);
1608
1609 /* bump ref count on page before it is given to the stack */
1610 get_page(new_buff->page);
1611} 1591}
1612 1592
1613/** 1593/**
@@ -1617,20 +1597,38 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1617 * @rx_desc: descriptor containing length of buffer written by hardware 1597 * @rx_desc: descriptor containing length of buffer written by hardware
1618 * @skb: sk_buff to place the data into 1598 * @skb: sk_buff to place the data into
1619 * 1599 *
1620 * This function is based on skb_add_rx_frag. I would have used that 1600 * This function will add the data contained in rx_buffer->page to the skb.
1621 * function however it doesn't handle the truesize case correctly since we 1601 * This is done either through a direct copy if the data in the buffer is
1622 * are allocating more memory than might be used for a single receive. 1602 * less than the skb header size, otherwise it will just attach the page as
1603 * a frag to the skb.
1604 *
1605 * The function will then update the page offset if necessary and return
1606 * true if the buffer can be reused by the adapter.
1623 **/ 1607 **/
1624static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, 1608static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1625 struct ixgbe_rx_buffer *rx_buffer, 1609 struct ixgbe_rx_buffer *rx_buffer,
1626 struct sk_buff *skb, int size) 1610 union ixgbe_adv_rx_desc *rx_desc,
1611 struct sk_buff *skb)
1627{ 1612{
1628 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 1613 struct page *page = rx_buffer->page;
1629 rx_buffer->page, rx_buffer->page_offset, 1614 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
1630 size); 1615 unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
1631 skb->len += size; 1616
1632 skb->data_len += size; 1617 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1633 skb->truesize += ixgbe_rx_bufsz(rx_ring); 1618 rx_buffer->page_offset, size, truesize);
1619
1620 /* if we are only owner of page and it is local we can reuse it */
1621 if (unlikely(page_count(page) != 1) ||
1622 unlikely(page_to_nid(page) != numa_node_id()))
1623 return false;
1624
1625 /* flip page offset to other buffer */
1626 rx_buffer->page_offset ^= truesize;
1627
1628 /* bump ref count on page before it is given to the stack */
1629 get_page(page);
1630
1631 return true;
1634} 1632}
1635 1633
1636/** 1634/**
@@ -1731,10 +1729,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1731 } 1729 }
1732 1730
1733 /* pull page into skb */ 1731 /* pull page into skb */
1734 ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, 1732 if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
1735 le16_to_cpu(rx_desc->wb.upper.length));
1736
1737 if (ixgbe_can_reuse_page(rx_buffer)) {
1738 /* hand second half of page back to the ring */ 1733 /* hand second half of page back to the ring */
1739 ixgbe_reuse_rx_page(rx_ring, rx_buffer); 1734 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
1740 } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { 1735 } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {