aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Riddoch <driddoch@solarflare.com>2012-04-11 08:09:24 -0400
committerBen Hutchings <bhutchings@solarflare.com>2012-05-09 21:22:17 -0400
commitda9ca505829f6c270b239536b1b169644d7cf072 (patch)
treea5ec93e6649c972576a60a56b4bd09d09d3b581a
parent3de4e30196f09ff2c096f2f8e17ebc9adda9db8d (diff)
sfc: Fill RX rings completely full, rather than to 95% full
There was no runtime control of the fast_fill_limit in any case, so purged that field. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h3
-rw-r--r--drivers/net/ethernet/sfc/rx.c15
2 files changed, 4 insertions, 14 deletions
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index f0385e1fb2d8..eaca447e2a2b 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -252,8 +252,6 @@ struct efx_rx_page_state {
252 * @max_fill: RX descriptor maximum fill level (<= ring size) 252 * @max_fill: RX descriptor maximum fill level (<= ring size)
253 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill 253 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
254 * (<= @max_fill) 254 * (<= @max_fill)
255 * @fast_fill_limit: The level to which a fast fill will fill
256 * (@fast_fill_trigger <= @fast_fill_limit <= @max_fill)
257 * @min_fill: RX descriptor minimum non-zero fill level. 255 * @min_fill: RX descriptor minimum non-zero fill level.
258 * This records the minimum fill level observed when a ring 256 * This records the minimum fill level observed when a ring
259 * refill was triggered. 257 * refill was triggered.
@@ -274,7 +272,6 @@ struct efx_rx_queue {
274 int removed_count; 272 int removed_count;
275 unsigned int max_fill; 273 unsigned int max_fill;
276 unsigned int fast_fill_trigger; 274 unsigned int fast_fill_trigger;
277 unsigned int fast_fill_limit;
278 unsigned int min_fill; 275 unsigned int min_fill;
279 unsigned int min_overfill; 276 unsigned int min_overfill;
280 unsigned int alloc_page_count; 277 unsigned int alloc_page_count;
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 763fa2fe1a38..254fec81894e 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -78,11 +78,6 @@ static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
78 */ 78 */
79static unsigned int rx_refill_threshold = 90; 79static unsigned int rx_refill_threshold = 90;
80 80
81/* This is the percentage fill level to which an RX queue will be refilled
82 * when the "RX refill threshold" is reached.
83 */
84static unsigned int rx_refill_limit = 95;
85
86/* 81/*
87 * RX maximum head room required. 82 * RX maximum head room required.
88 * 83 *
@@ -342,7 +337,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
342 * efx_fast_push_rx_descriptors - push new RX descriptors quickly 337 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
343 * @rx_queue: RX descriptor queue 338 * @rx_queue: RX descriptor queue
344 * This will aim to fill the RX descriptor queue up to 339 * This will aim to fill the RX descriptor queue up to
345 * @rx_queue->@fast_fill_limit. If there is insufficient atomic 340 * @rx_queue->@max_fill. If there is insufficient atomic
346 * memory to do so, a slow fill will be scheduled. 341 * memory to do so, a slow fill will be scheduled.
347 * 342 *
348 * The caller must provide serialisation (none is used here). In practise, 343 * The caller must provide serialisation (none is used here). In practise,
@@ -367,7 +362,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
367 rx_queue->min_fill = fill_level; 362 rx_queue->min_fill = fill_level;
368 } 363 }
369 364
370 space = rx_queue->fast_fill_limit - fill_level; 365 space = rx_queue->max_fill - fill_level;
371 if (space < EFX_RX_BATCH) 366 if (space < EFX_RX_BATCH)
372 goto out; 367 goto out;
373 368
@@ -375,7 +370,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
375 "RX queue %d fast-filling descriptor ring from" 370 "RX queue %d fast-filling descriptor ring from"
376 " level %d to level %d using %s allocation\n", 371 " level %d to level %d using %s allocation\n",
377 efx_rx_queue_index(rx_queue), fill_level, 372 efx_rx_queue_index(rx_queue), fill_level,
378 rx_queue->fast_fill_limit, 373 rx_queue->max_fill,
379 channel->rx_alloc_push_pages ? "page" : "skb"); 374 channel->rx_alloc_push_pages ? "page" : "skb");
380 375
381 do { 376 do {
@@ -681,7 +676,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
681void efx_init_rx_queue(struct efx_rx_queue *rx_queue) 676void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
682{ 677{
683 struct efx_nic *efx = rx_queue->efx; 678 struct efx_nic *efx = rx_queue->efx;
684 unsigned int max_fill, trigger, limit; 679 unsigned int max_fill, trigger;
685 680
686 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 681 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
687 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); 682 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
@@ -695,11 +690,9 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
695 /* Initialise limit fields */ 690 /* Initialise limit fields */
696 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; 691 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
697 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 692 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
698 limit = max_fill * min(rx_refill_limit, 100U) / 100U;
699 693
700 rx_queue->max_fill = max_fill; 694 rx_queue->max_fill = max_fill;
701 rx_queue->fast_fill_trigger = trigger; 695 rx_queue->fast_fill_trigger = trigger;
702 rx_queue->fast_fill_limit = limit;
703 696
704 /* Set up RX descriptor ring */ 697 /* Set up RX descriptor ring */
705 rx_queue->enabled = true; 698 rx_queue->enabled = true;