diff options
author | Ben Hutchings <bhutchings@solarflare.com> | 2009-10-23 04:30:58 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-10-24 07:27:05 -0400 |
commit | 3ffeabdd2bc62e0ebcb1a51a5d959a86a7a915fc (patch) | |
tree | a3b17cc4b0f8300aca46d67a6f9a362f6b052975 /drivers/net/sfc/rx.c | |
parent | 12d00cadcc45382fc127712aa35bd0c96cbf81d9 (diff) |
sfc: Eliminate indirect lookups of queue size constants
Move size and mask definitions into efx.h; calculate page orders in falcon.c.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc/rx.c')
-rw-r--r-- | drivers/net/sfc/rx.c | 16 |
1 files changed, 6 insertions, 10 deletions
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 01f9432c31ef..ea59ed25b0d8 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -293,8 +293,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | |||
293 | * fill anyway. | 293 | * fill anyway. |
294 | */ | 294 | */ |
295 | fill_level = (rx_queue->added_count - rx_queue->removed_count); | 295 | fill_level = (rx_queue->added_count - rx_queue->removed_count); |
296 | EFX_BUG_ON_PARANOID(fill_level > | 296 | EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); |
297 | rx_queue->efx->type->rxd_ring_mask + 1); | ||
298 | 297 | ||
299 | /* Don't fill if we don't need to */ | 298 | /* Don't fill if we don't need to */ |
300 | if (fill_level >= rx_queue->fast_fill_trigger) | 299 | if (fill_level >= rx_queue->fast_fill_trigger) |
@@ -316,8 +315,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | |||
316 | retry: | 315 | retry: |
317 | /* Recalculate current fill level now that we have the lock */ | 316 | /* Recalculate current fill level now that we have the lock */ |
318 | fill_level = (rx_queue->added_count - rx_queue->removed_count); | 317 | fill_level = (rx_queue->added_count - rx_queue->removed_count); |
319 | EFX_BUG_ON_PARANOID(fill_level > | 318 | EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); |
320 | rx_queue->efx->type->rxd_ring_mask + 1); | ||
321 | space = rx_queue->fast_fill_limit - fill_level; | 319 | space = rx_queue->fast_fill_limit - fill_level; |
322 | if (space < EFX_RX_BATCH) | 320 | if (space < EFX_RX_BATCH) |
323 | goto out_unlock; | 321 | goto out_unlock; |
@@ -329,8 +327,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | |||
329 | 327 | ||
330 | do { | 328 | do { |
331 | for (i = 0; i < EFX_RX_BATCH; ++i) { | 329 | for (i = 0; i < EFX_RX_BATCH; ++i) { |
332 | index = (rx_queue->added_count & | 330 | index = rx_queue->added_count & EFX_RXQ_MASK; |
333 | rx_queue->efx->type->rxd_ring_mask); | ||
334 | rx_buf = efx_rx_buffer(rx_queue, index); | 331 | rx_buf = efx_rx_buffer(rx_queue, index); |
335 | rc = efx_init_rx_buffer(rx_queue, rx_buf); | 332 | rc = efx_init_rx_buffer(rx_queue, rx_buf); |
336 | if (unlikely(rc)) | 333 | if (unlikely(rc)) |
@@ -629,7 +626,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) | |||
629 | EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue); | 626 | EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue); |
630 | 627 | ||
631 | /* Allocate RX buffers */ | 628 | /* Allocate RX buffers */ |
632 | rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer); | 629 | rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer); |
633 | rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL); | 630 | rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL); |
634 | if (!rx_queue->buffer) | 631 | if (!rx_queue->buffer) |
635 | return -ENOMEM; | 632 | return -ENOMEM; |
@@ -644,7 +641,6 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) | |||
644 | 641 | ||
645 | void efx_init_rx_queue(struct efx_rx_queue *rx_queue) | 642 | void efx_init_rx_queue(struct efx_rx_queue *rx_queue) |
646 | { | 643 | { |
647 | struct efx_nic *efx = rx_queue->efx; | ||
648 | unsigned int max_fill, trigger, limit; | 644 | unsigned int max_fill, trigger, limit; |
649 | 645 | ||
650 | EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue); | 646 | EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue); |
@@ -657,7 +653,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue) | |||
657 | rx_queue->min_overfill = -1U; | 653 | rx_queue->min_overfill = -1U; |
658 | 654 | ||
659 | /* Initialise limit fields */ | 655 | /* Initialise limit fields */ |
660 | max_fill = efx->type->rxd_ring_mask + 1 - EFX_RXD_HEAD_ROOM; | 656 | max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM; |
661 | trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; | 657 | trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; |
662 | limit = max_fill * min(rx_refill_limit, 100U) / 100U; | 658 | limit = max_fill * min(rx_refill_limit, 100U) / 100U; |
663 | 659 | ||
@@ -680,7 +676,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |||
680 | 676 | ||
681 | /* Release RX buffers NB start at index 0 not current HW ptr */ | 677 | /* Release RX buffers NB start at index 0 not current HW ptr */ |
682 | if (rx_queue->buffer) { | 678 | if (rx_queue->buffer) { |
683 | for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) { | 679 | for (i = 0; i <= EFX_RXQ_MASK; i++) { |
684 | rx_buf = efx_rx_buffer(rx_queue, i); | 680 | rx_buf = efx_rx_buffer(rx_queue, i); |
685 | efx_fini_rx_buffer(rx_queue, rx_buf); | 681 | efx_fini_rx_buffer(rx_queue, rx_buf); |
686 | } | 682 | } |