aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc/efx.c
diff options
context:
space:
mode:
authorDaniel Pieczko <dpieczko@solarflare.com>2013-02-13 05:54:41 -0500
committerBen Hutchings <bhutchings@solarflare.com>2013-03-07 15:22:15 -0500
commit1648a23fa159e5c433aac06dc5e0d9db36146016 (patch)
treec60a40e25d368f34e73b6abffa9a62a890dc9bbf /drivers/net/ethernet/sfc/efx.c
parent179ea7f039f68ae4247a340bfb59fd861e7def12 (diff)
sfc: allocate more RX buffers per page
Allocating 2 buffers per page is insanely inefficient when MTU is 1500 and PAGE_SIZE is 64K (as it usually is on POWER). Allocate as many as we can fit, and choose the refill batch size at run-time so that we still always use a whole page at once. [bwh: Fix loop condition to allow for compound pages; rebase] Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Diffstat (limited to 'drivers/net/ethernet/sfc/efx.c')
-rw-r--r--drivers/net/ethernet/sfc/efx.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index a70c458f3cef..f050248e9fba 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -643,10 +643,6 @@ static void efx_start_datapath(struct efx_nic *efx)
643 if (rx_buf_len <= PAGE_SIZE) { 643 if (rx_buf_len <= PAGE_SIZE) {
644 efx->rx_scatter = false; 644 efx->rx_scatter = false;
645 efx->rx_buffer_order = 0; 645 efx->rx_buffer_order = 0;
646 if (rx_buf_len <= PAGE_SIZE / 2)
647 efx->rx_buffer_truesize = PAGE_SIZE / 2;
648 else
649 efx->rx_buffer_truesize = PAGE_SIZE;
650 } else if (efx->type->can_rx_scatter) { 646 } else if (efx->type->can_rx_scatter) {
651 BUILD_BUG_ON(sizeof(struct efx_rx_page_state) + 647 BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
652 EFX_PAGE_IP_ALIGN + EFX_RX_USR_BUF_SIZE > 648 EFX_PAGE_IP_ALIGN + EFX_RX_USR_BUF_SIZE >
@@ -654,14 +650,22 @@ static void efx_start_datapath(struct efx_nic *efx)
654 efx->rx_scatter = true; 650 efx->rx_scatter = true;
655 efx->rx_dma_len = EFX_RX_USR_BUF_SIZE; 651 efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
656 efx->rx_buffer_order = 0; 652 efx->rx_buffer_order = 0;
657 efx->rx_buffer_truesize = PAGE_SIZE / 2;
658 } else { 653 } else {
659 efx->rx_scatter = false; 654 efx->rx_scatter = false;
660 efx->rx_buffer_order = get_order(rx_buf_len); 655 efx->rx_buffer_order = get_order(rx_buf_len);
661 efx->rx_buffer_truesize = PAGE_SIZE << efx->rx_buffer_order;
662 } 656 }
663 657
664 efx->rx_bufs_per_page = (rx_buf_len <= PAGE_SIZE / 2) ? 2 : 1; 658 efx_rx_config_page_split(efx);
659 if (efx->rx_buffer_order)
660 netif_dbg(efx, drv, efx->net_dev,
661 "RX buf len=%u; page order=%u batch=%u\n",
662 efx->rx_dma_len, efx->rx_buffer_order,
663 efx->rx_pages_per_batch);
664 else
665 netif_dbg(efx, drv, efx->net_dev,
666 "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
667 efx->rx_dma_len, efx->rx_page_buf_step,
668 efx->rx_bufs_per_page, efx->rx_pages_per_batch);
665 669
666 /* RX filters also have scatter-enabled flags */ 670 /* RX filters also have scatter-enabled flags */
667 if (efx->rx_scatter != old_rx_scatter) 671 if (efx->rx_scatter != old_rx_scatter)