aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/rx.c
diff options
context:
space:
mode:
authorSteve Hodgson <shodgson@solarflare.com>2010-06-01 07:19:39 -0400
committerDavid S. Miller <davem@davemloft.net>2010-06-02 05:21:08 -0400
commit90d683afd1395016775c8d90508614f8d3000b81 (patch)
tree33c239c5cd1e607a22dca1af0fd3615cee584ef9 /drivers/net/sfc/rx.c
parentd730dc527a5abd4717f6320e82cfce54edc882a3 (diff)
sfc: Remove efx_rx_queue::add_lock
Ensure that efx_fast_push_rx_descriptors() must only run from efx_process_channel() [NAPI], or when napi_disable() has been executed. Reimplement the slow fill by sending an event to the channel, so that NAPI runs, and hanging the subsequent fast fill off the event handler. Replace the sfc_refill workqueue and delayed work items with a timer. We do not need to stop this timer in efx_flush_all() because it's safe to send the event always; receiving it will be delayed until NAPI is restarted. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc/rx.c')
-rw-r--r--drivers/net/sfc/rx.c95
1 files changed, 21 insertions, 74 deletions
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index e308818b9f55..bf1e55e7869e 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -276,28 +276,25 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
276/** 276/**
277 * efx_fast_push_rx_descriptors - push new RX descriptors quickly 277 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
278 * @rx_queue: RX descriptor queue 278 * @rx_queue: RX descriptor queue
279 * @retry: Recheck the fill level
280 * This will aim to fill the RX descriptor queue up to 279 * This will aim to fill the RX descriptor queue up to
281 * @rx_queue->@fast_fill_limit. If there is insufficient atomic 280 * @rx_queue->@fast_fill_limit. If there is insufficient atomic
282 * memory to do so, the caller should retry. 281 * memory to do so, a slow fill will be scheduled.
282 *
283 * The caller must provide serialisation (none is used here). In practise,
284 * this means this function must run from the NAPI handler, or be called
285 * when NAPI is disabled.
283 */ 286 */
284static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, 287void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
285 int retry)
286{ 288{
287 struct efx_rx_buffer *rx_buf; 289 struct efx_rx_buffer *rx_buf;
288 unsigned fill_level, index; 290 unsigned fill_level, index;
289 int i, space, rc = 0; 291 int i, space, rc = 0;
290 292
291 /* Calculate current fill level. Do this outside the lock, 293 /* Calculate current fill level, and exit if we don't need to fill */
292 * because most of the time we'll end up not wanting to do the
293 * fill anyway.
294 */
295 fill_level = (rx_queue->added_count - rx_queue->removed_count); 294 fill_level = (rx_queue->added_count - rx_queue->removed_count);
296 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); 295 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
297
298 /* Don't fill if we don't need to */
299 if (fill_level >= rx_queue->fast_fill_trigger) 296 if (fill_level >= rx_queue->fast_fill_trigger)
300 return 0; 297 return;
301 298
302 /* Record minimum fill level */ 299 /* Record minimum fill level */
303 if (unlikely(fill_level < rx_queue->min_fill)) { 300 if (unlikely(fill_level < rx_queue->min_fill)) {
@@ -305,20 +302,9 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
305 rx_queue->min_fill = fill_level; 302 rx_queue->min_fill = fill_level;
306 } 303 }
307 304
308 /* Acquire RX add lock. If this lock is contended, then a fast
309 * fill must already be in progress (e.g. in the refill
310 * tasklet), so we don't need to do anything
311 */
312 if (!spin_trylock_bh(&rx_queue->add_lock))
313 return -1;
314
315 retry:
316 /* Recalculate current fill level now that we have the lock */
317 fill_level = (rx_queue->added_count - rx_queue->removed_count);
318 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
319 space = rx_queue->fast_fill_limit - fill_level; 305 space = rx_queue->fast_fill_limit - fill_level;
320 if (space < EFX_RX_BATCH) 306 if (space < EFX_RX_BATCH)
321 goto out_unlock; 307 return;
322 308
323 EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from" 309 EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
324 " level %d to level %d using %s allocation\n", 310 " level %d to level %d using %s allocation\n",
@@ -330,8 +316,13 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
330 index = rx_queue->added_count & EFX_RXQ_MASK; 316 index = rx_queue->added_count & EFX_RXQ_MASK;
331 rx_buf = efx_rx_buffer(rx_queue, index); 317 rx_buf = efx_rx_buffer(rx_queue, index);
332 rc = efx_init_rx_buffer(rx_queue, rx_buf); 318 rc = efx_init_rx_buffer(rx_queue, rx_buf);
333 if (unlikely(rc)) 319 if (unlikely(rc)) {
320 /* Ensure that we don't leave the rx queue
321 * empty */
322 if (rx_queue->added_count == rx_queue->removed_count)
323 efx_schedule_slow_fill(rx_queue);
334 goto out; 324 goto out;
325 }
335 ++rx_queue->added_count; 326 ++rx_queue->added_count;
336 } 327 }
337 } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); 328 } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
@@ -343,61 +334,16 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
343 out: 334 out:
344 /* Send write pointer to card. */ 335 /* Send write pointer to card. */
345 efx_nic_notify_rx_desc(rx_queue); 336 efx_nic_notify_rx_desc(rx_queue);
346
347 /* If the fast fill is running inside from the refill tasklet, then
348 * for SMP systems it may be running on a different CPU to
349 * RX event processing, which means that the fill level may now be
350 * out of date. */
351 if (unlikely(retry && (rc == 0)))
352 goto retry;
353
354 out_unlock:
355 spin_unlock_bh(&rx_queue->add_lock);
356
357 return rc;
358} 337}
359 338
360/** 339void efx_rx_slow_fill(unsigned long context)
361 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
362 * @rx_queue: RX descriptor queue
363 *
364 * This will aim to fill the RX descriptor queue up to
365 * @rx_queue->@fast_fill_limit. If there is insufficient memory to do so,
366 * it will schedule a work item to immediately continue the fast fill
367 */
368void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
369{
370 int rc;
371
372 rc = __efx_fast_push_rx_descriptors(rx_queue, 0);
373 if (unlikely(rc)) {
374 /* Schedule the work item to run immediately. The hope is
375 * that work is immediately pending to free some memory
376 * (e.g. an RX event or TX completion)
377 */
378 efx_schedule_slow_fill(rx_queue, 0);
379 }
380}
381
382void efx_rx_work(struct work_struct *data)
383{ 340{
384 struct efx_rx_queue *rx_queue; 341 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
385 int rc; 342 struct efx_channel *channel = rx_queue->channel;
386
387 rx_queue = container_of(data, struct efx_rx_queue, work.work);
388
389 if (unlikely(!rx_queue->channel->enabled))
390 return;
391
392 EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU "
393 "%d\n", rx_queue->queue, raw_smp_processor_id());
394 343
344 /* Post an event to cause NAPI to run and refill the queue */
345 efx_nic_generate_fill_event(channel);
395 ++rx_queue->slow_fill_count; 346 ++rx_queue->slow_fill_count;
396 /* Push new RX descriptors, allowing at least 1 jiffy for
397 * the kernel to free some more memory. */
398 rc = __efx_fast_push_rx_descriptors(rx_queue, 1);
399 if (rc)
400 efx_schedule_slow_fill(rx_queue, 1);
401} 347}
402 348
403static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, 349static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
@@ -682,6 +628,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
682 628
683 EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue); 629 EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
684 630
631 del_timer_sync(&rx_queue->slow_fill);
685 efx_nic_fini_rx(rx_queue); 632 efx_nic_fini_rx(rx_queue);
686 633
687 /* Release RX buffers NB start at index 0 not current HW ptr */ 634 /* Release RX buffers NB start at index 0 not current HW ptr */