aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/rx.c
diff options
context:
space:
mode:
authorSteve Hodgson <shodgson@solarflare.com>2010-09-10 02:42:22 -0400
committerDavid S. Miller <davem@davemloft.net>2010-09-10 15:27:33 -0400
commitecc910f520ba8f22848982ee816ad75c449b805d (patch)
treee934380209532b831b7e7e334ddc33d75db7eef5 /drivers/net/sfc/rx.c
parent8313aca38b3937947fffebca6e34bac8e24300c8 (diff)
sfc: Make the dmaq size a run-time setting (rather than compile-time)
- Allow the ring size to be specified in non power-of-two sizes (for instance to limit the amount of receive buffers). - Automatically size the event queue. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc/rx.c')
-rw-r--r--drivers/net/sfc/rx.c33
1 files changed, 20 insertions, 13 deletions
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 6651d9364e8f..6d0959b5158e 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -133,7 +133,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
133 unsigned index, count; 133 unsigned index, count;
134 134
135 for (count = 0; count < EFX_RX_BATCH; ++count) { 135 for (count = 0; count < EFX_RX_BATCH; ++count) {
136 index = rx_queue->added_count & EFX_RXQ_MASK; 136 index = rx_queue->added_count & rx_queue->ptr_mask;
137 rx_buf = efx_rx_buffer(rx_queue, index); 137 rx_buf = efx_rx_buffer(rx_queue, index);
138 138
139 rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); 139 rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
@@ -208,7 +208,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
208 dma_addr += sizeof(struct efx_rx_page_state); 208 dma_addr += sizeof(struct efx_rx_page_state);
209 209
210 split: 210 split:
211 index = rx_queue->added_count & EFX_RXQ_MASK; 211 index = rx_queue->added_count & rx_queue->ptr_mask;
212 rx_buf = efx_rx_buffer(rx_queue, index); 212 rx_buf = efx_rx_buffer(rx_queue, index);
213 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 213 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
214 rx_buf->skb = NULL; 214 rx_buf->skb = NULL;
@@ -285,7 +285,7 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
285 * we'd like to insert an additional descriptor whilst leaving 285 * we'd like to insert an additional descriptor whilst leaving
286 * EFX_RXD_HEAD_ROOM for the non-recycle path */ 286 * EFX_RXD_HEAD_ROOM for the non-recycle path */
287 fill_level = (rx_queue->added_count - rx_queue->removed_count + 2); 287 fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
288 if (unlikely(fill_level >= EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM)) { 288 if (unlikely(fill_level > rx_queue->max_fill)) {
289 /* We could place "state" on a list, and drain the list in 289 /* We could place "state" on a list, and drain the list in
290 * efx_fast_push_rx_descriptors(). For now, this will do. */ 290 * efx_fast_push_rx_descriptors(). For now, this will do. */
291 return; 291 return;
@@ -294,7 +294,7 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
294 ++state->refcnt; 294 ++state->refcnt;
295 get_page(rx_buf->page); 295 get_page(rx_buf->page);
296 296
297 index = rx_queue->added_count & EFX_RXQ_MASK; 297 index = rx_queue->added_count & rx_queue->ptr_mask;
298 new_buf = efx_rx_buffer(rx_queue, index); 298 new_buf = efx_rx_buffer(rx_queue, index);
299 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); 299 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
300 new_buf->skb = NULL; 300 new_buf->skb = NULL;
@@ -319,7 +319,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
319 page_count(rx_buf->page) == 1) 319 page_count(rx_buf->page) == 1)
320 efx_resurrect_rx_buffer(rx_queue, rx_buf); 320 efx_resurrect_rx_buffer(rx_queue, rx_buf);
321 321
322 index = rx_queue->added_count & EFX_RXQ_MASK; 322 index = rx_queue->added_count & rx_queue->ptr_mask;
323 new_buf = efx_rx_buffer(rx_queue, index); 323 new_buf = efx_rx_buffer(rx_queue, index);
324 324
325 memcpy(new_buf, rx_buf, sizeof(*new_buf)); 325 memcpy(new_buf, rx_buf, sizeof(*new_buf));
@@ -347,7 +347,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
347 347
348 /* Calculate current fill level, and exit if we don't need to fill */ 348 /* Calculate current fill level, and exit if we don't need to fill */
349 fill_level = (rx_queue->added_count - rx_queue->removed_count); 349 fill_level = (rx_queue->added_count - rx_queue->removed_count);
350 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); 350 EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
351 if (fill_level >= rx_queue->fast_fill_trigger) 351 if (fill_level >= rx_queue->fast_fill_trigger)
352 goto out; 352 goto out;
353 353
@@ -650,15 +650,22 @@ void efx_rx_strategy(struct efx_channel *channel)
650int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) 650int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
651{ 651{
652 struct efx_nic *efx = rx_queue->efx; 652 struct efx_nic *efx = rx_queue->efx;
653 unsigned int rxq_size; 653 unsigned int entries;
654 int rc; 654 int rc;
655 655
656 /* Create the smallest power-of-two aligned ring */
657 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
658 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
659 rx_queue->ptr_mask = entries - 1;
660
656 netif_dbg(efx, probe, efx->net_dev, 661 netif_dbg(efx, probe, efx->net_dev,
657 "creating RX queue %d\n", efx_rx_queue_index(rx_queue)); 662 "creating RX queue %d size %#x mask %#x\n",
663 efx_rx_queue_index(rx_queue), efx->rxq_entries,
664 rx_queue->ptr_mask);
658 665
659 /* Allocate RX buffers */ 666 /* Allocate RX buffers */
660 rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer); 667 rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer),
661 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL); 668 GFP_KERNEL);
662 if (!rx_queue->buffer) 669 if (!rx_queue->buffer)
663 return -ENOMEM; 670 return -ENOMEM;
664 671
@@ -672,6 +679,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
672 679
673void efx_init_rx_queue(struct efx_rx_queue *rx_queue) 680void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
674{ 681{
682 struct efx_nic *efx = rx_queue->efx;
675 unsigned int max_fill, trigger, limit; 683 unsigned int max_fill, trigger, limit;
676 684
677 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 685 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
@@ -682,10 +690,9 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
682 rx_queue->notified_count = 0; 690 rx_queue->notified_count = 0;
683 rx_queue->removed_count = 0; 691 rx_queue->removed_count = 0;
684 rx_queue->min_fill = -1U; 692 rx_queue->min_fill = -1U;
685 rx_queue->min_overfill = -1U;
686 693
687 /* Initialise limit fields */ 694 /* Initialise limit fields */
688 max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM; 695 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
689 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 696 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
690 limit = max_fill * min(rx_refill_limit, 100U) / 100U; 697 limit = max_fill * min(rx_refill_limit, 100U) / 100U;
691 698
@@ -710,7 +717,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
710 717
711 /* Release RX buffers NB start at index 0 not current HW ptr */ 718 /* Release RX buffers NB start at index 0 not current HW ptr */
712 if (rx_queue->buffer) { 719 if (rx_queue->buffer) {
713 for (i = 0; i <= EFX_RXQ_MASK; i++) { 720 for (i = 0; i <= rx_queue->ptr_mask; i++) {
714 rx_buf = efx_rx_buffer(rx_queue, i); 721 rx_buf = efx_rx_buffer(rx_queue, i);
715 efx_fini_rx_buffer(rx_queue, rx_buf); 722 efx_fini_rx_buffer(rx_queue, rx_buf);
716 } 723 }