aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Riddoch <driddoch@solarflare.com>2012-04-11 08:12:41 -0400
committerBen Hutchings <bhutchings@solarflare.com>2012-05-09 21:22:17 -0400
commit64235187c2b32913fc41dfafc98e3a77ea7c0217 (patch)
tree4d2faf81428d4c5c3741823c0c5700a39f80db4a
parentda9ca505829f6c270b239536b1b169644d7cf072 (diff)
sfc: By default refill RX rings as soon as space for a batch
Previously we refilled with much larger batches, which caused large latency spikes. We now have many more much much smaller spikes! Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
-rw-r--r--drivers/net/ethernet/sfc/rx.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 254fec81894e..243e91f3dff9 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -76,7 +76,7 @@ static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
76/* This is the percentage fill level below which new RX descriptors 76/* This is the percentage fill level below which new RX descriptors
77 * will be added to the RX descriptor ring. 77 * will be added to the RX descriptor ring.
78 */ 78 */
79static unsigned int rx_refill_threshold = 90; 79static unsigned int rx_refill_threshold;
80 80
81/* 81/*
82 * RX maximum head room required. 82 * RX maximum head room required.
@@ -363,8 +363,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
363 } 363 }
364 364
365 space = rx_queue->max_fill - fill_level; 365 space = rx_queue->max_fill - fill_level;
366 if (space < EFX_RX_BATCH) 366 EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH);
367 goto out;
368 367
369 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 368 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
370 "RX queue %d fast-filling descriptor ring from" 369 "RX queue %d fast-filling descriptor ring from"
@@ -676,7 +675,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
676void efx_init_rx_queue(struct efx_rx_queue *rx_queue) 675void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
677{ 676{
678 struct efx_nic *efx = rx_queue->efx; 677 struct efx_nic *efx = rx_queue->efx;
679 unsigned int max_fill, trigger; 678 unsigned int max_fill, trigger, max_trigger;
680 679
681 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 680 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
682 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); 681 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
@@ -689,7 +688,14 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
689 688
690 /* Initialise limit fields */ 689 /* Initialise limit fields */
691 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; 690 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
692 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 691 max_trigger = max_fill - EFX_RX_BATCH;
692 if (rx_refill_threshold != 0) {
693 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
694 if (trigger > max_trigger)
695 trigger = max_trigger;
696 } else {
697 trigger = max_trigger;
698 }
693 699
694 rx_queue->max_fill = max_fill; 700 rx_queue->max_fill = max_fill;
695 rx_queue->fast_fill_trigger = trigger; 701 rx_queue->fast_fill_trigger = trigger;
@@ -739,5 +745,5 @@ MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
739 745
740module_param(rx_refill_threshold, uint, 0444); 746module_param(rx_refill_threshold, uint, 0444);
741MODULE_PARM_DESC(rx_refill_threshold, 747MODULE_PARM_DESC(rx_refill_threshold,
742 "RX descriptor ring fast/slow fill threshold (%)"); 748 "RX descriptor ring refill threshold (%)");
743 749