diff options
author | Jesse Brandeburg <jesse.brandeburg@intel.com> | 2008-09-11 22:58:43 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-09-24 18:54:56 -0400 |
commit | 762f4c57105853d1cbad3b96ef18aa23beff3db2 (patch) | |
tree | 7df0c1a8e5d0ae30e7c4138c4f182d1d1dc2bba4 /drivers/net/ixgbe | |
parent | ff819cfb5d95c4945811f5e33aa57274885c7527 (diff) |
ixgbe: recycle pages in packet split mode
most of the time we only need 1500 bytes for a packet which means
we don't need a whole 4k page for each packet. Share the allocation
by using a reference count to the page and giving half to two
receive descriptors. This can enable us to use packet split mode
all the time due to the performance increase of allocating half
the pages.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r-- | drivers/net/ixgbe/ixgbe.h | 1 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 49 |
2 files changed, 32 insertions, 18 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 064af675a941..71ddac6ac4f4 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -114,6 +114,7 @@ struct ixgbe_rx_buffer { | |||
114 | dma_addr_t dma; | 114 | dma_addr_t dma; |
115 | struct page *page; | 115 | struct page *page; |
116 | dma_addr_t page_dma; | 116 | dma_addr_t page_dma; |
117 | unsigned int page_offset; | ||
117 | }; | 118 | }; |
118 | 119 | ||
119 | struct ixgbe_queue_stats { | 120 | struct ixgbe_queue_stats { |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index ef965c996848..5858ab2b48f0 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -493,16 +493,24 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | |||
493 | while (cleaned_count--) { | 493 | while (cleaned_count--) { |
494 | rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); | 494 | rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); |
495 | 495 | ||
496 | if (!bi->page && | 496 | if (!bi->page_dma && |
497 | (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { | 497 | (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { |
498 | bi->page = alloc_page(GFP_ATOMIC); | ||
499 | if (!bi->page) { | 498 | if (!bi->page) { |
500 | adapter->alloc_rx_page_failed++; | 499 | bi->page = alloc_page(GFP_ATOMIC); |
501 | goto no_buffers; | 500 | if (!bi->page) { |
501 | adapter->alloc_rx_page_failed++; | ||
502 | goto no_buffers; | ||
503 | } | ||
504 | bi->page_offset = 0; | ||
505 | } else { | ||
506 | /* use a half page if we're re-using */ | ||
507 | bi->page_offset ^= (PAGE_SIZE / 2); | ||
502 | } | 508 | } |
503 | bi->page_dma = pci_map_page(pdev, bi->page, 0, | 509 | |
504 | PAGE_SIZE, | 510 | bi->page_dma = pci_map_page(pdev, bi->page, |
505 | PCI_DMA_FROMDEVICE); | 511 | bi->page_offset, |
512 | (PAGE_SIZE / 2), | ||
513 | PCI_DMA_FROMDEVICE); | ||
506 | } | 514 | } |
507 | 515 | ||
508 | if (!bi->skb) { | 516 | if (!bi->skb) { |
@@ -596,7 +604,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, | |||
596 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | 604 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { |
597 | hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); | 605 | hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); |
598 | len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> | 606 | len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> |
599 | IXGBE_RXDADV_HDRBUFLEN_SHIFT; | 607 | IXGBE_RXDADV_HDRBUFLEN_SHIFT; |
600 | if (hdr_info & IXGBE_RXDADV_SPH) | 608 | if (hdr_info & IXGBE_RXDADV_SPH) |
601 | adapter->rx_hdr_split++; | 609 | adapter->rx_hdr_split++; |
602 | if (len > IXGBE_RX_HDR_SIZE) | 610 | if (len > IXGBE_RX_HDR_SIZE) |
@@ -620,11 +628,18 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, | |||
620 | 628 | ||
621 | if (upper_len) { | 629 | if (upper_len) { |
622 | pci_unmap_page(pdev, rx_buffer_info->page_dma, | 630 | pci_unmap_page(pdev, rx_buffer_info->page_dma, |
623 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | 631 | PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); |
624 | rx_buffer_info->page_dma = 0; | 632 | rx_buffer_info->page_dma = 0; |
625 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, | 633 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, |
626 | rx_buffer_info->page, 0, upper_len); | 634 | rx_buffer_info->page, |
627 | rx_buffer_info->page = NULL; | 635 | rx_buffer_info->page_offset, |
636 | upper_len); | ||
637 | |||
638 | if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || | ||
639 | (page_count(rx_buffer_info->page) != 1)) | ||
640 | rx_buffer_info->page = NULL; | ||
641 | else | ||
642 | get_page(rx_buffer_info->page); | ||
628 | 643 | ||
629 | skb->len += upper_len; | 644 | skb->len += upper_len; |
630 | skb->data_len += upper_len; | 645 | skb->data_len += upper_len; |
@@ -647,6 +662,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, | |||
647 | rx_buffer_info->skb = next_buffer->skb; | 662 | rx_buffer_info->skb = next_buffer->skb; |
648 | rx_buffer_info->dma = next_buffer->dma; | 663 | rx_buffer_info->dma = next_buffer->dma; |
649 | next_buffer->skb = skb; | 664 | next_buffer->skb = skb; |
665 | next_buffer->dma = 0; | ||
650 | adapter->non_eop_descs++; | 666 | adapter->non_eop_descs++; |
651 | goto next_desc; | 667 | goto next_desc; |
652 | } | 668 | } |
@@ -1534,10 +1550,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
1534 | int rx_buf_len; | 1550 | int rx_buf_len; |
1535 | 1551 | ||
1536 | /* Decide whether to use packet split mode or not */ | 1552 | /* Decide whether to use packet split mode or not */ |
1537 | if (netdev->mtu > ETH_DATA_LEN) | 1553 | adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; |
1538 | adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; | ||
1539 | else | ||
1540 | adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; | ||
1541 | 1554 | ||
1542 | /* Set the RX buffer length according to the mode */ | 1555 | /* Set the RX buffer length according to the mode */ |
1543 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | 1556 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { |
@@ -2018,12 +2031,12 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | |||
2018 | } | 2031 | } |
2019 | if (!rx_buffer_info->page) | 2032 | if (!rx_buffer_info->page) |
2020 | continue; | 2033 | continue; |
2021 | pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE, | 2034 | pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2, |
2022 | PCI_DMA_FROMDEVICE); | 2035 | PCI_DMA_FROMDEVICE); |
2023 | rx_buffer_info->page_dma = 0; | 2036 | rx_buffer_info->page_dma = 0; |
2024 | |||
2025 | put_page(rx_buffer_info->page); | 2037 | put_page(rx_buffer_info->page); |
2026 | rx_buffer_info->page = NULL; | 2038 | rx_buffer_info->page = NULL; |
2039 | rx_buffer_info->page_offset = 0; | ||
2027 | } | 2040 | } |
2028 | 2041 | ||
2029 | size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; | 2042 | size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; |