aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/tx.c
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2009-10-23 04:30:58 -0400
committerDavid S. Miller <davem@davemloft.net>2009-10-24 07:27:05 -0400
commit3ffeabdd2bc62e0ebcb1a51a5d959a86a7a915fc (patch)
treea3b17cc4b0f8300aca46d67a6f9a362f6b052975 /drivers/net/sfc/tx.c
parent12d00cadcc45382fc127712aa35bd0c96cbf81d9 (diff)
sfc: Eliminate indirect lookups of queue size constants
Move size and mask definitions into efx.h; calculate page orders in falcon.c. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc/tx.c')
-rw-r--r--drivers/net/sfc/tx.c46
1 files changed, 20 insertions, 26 deletions
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 489c4de31447..ae554eec0563 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -26,8 +26,7 @@
26 * The tx_queue descriptor ring fill-level must fall below this value 26 * The tx_queue descriptor ring fill-level must fall below this value
27 * before we restart the netif queue 27 * before we restart the netif queue
28 */ 28 */
29#define EFX_NETDEV_TX_THRESHOLD(_tx_queue) \ 29#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
30 (_tx_queue->efx->type->txd_ring_mask / 2u)
31 30
32/* We want to be able to nest calls to netif_stop_queue(), since each 31/* We want to be able to nest calls to netif_stop_queue(), since each
33 * channel can have an individual stop on the queue. 32 * channel can have an individual stop on the queue.
@@ -171,7 +170,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
171 } 170 }
172 171
173 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 172 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
174 q_space = efx->type->txd_ring_mask - 1 - fill_level; 173 q_space = EFX_TXQ_MASK - 1 - fill_level;
175 174
176 /* Map for DMA. Use pci_map_single rather than pci_map_page 175 /* Map for DMA. Use pci_map_single rather than pci_map_page
177 * since this is more efficient on machines with sparse 176 * since this is more efficient on machines with sparse
@@ -208,16 +207,14 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
208 &tx_queue->read_count; 207 &tx_queue->read_count;
209 fill_level = (tx_queue->insert_count 208 fill_level = (tx_queue->insert_count
210 - tx_queue->old_read_count); 209 - tx_queue->old_read_count);
211 q_space = (efx->type->txd_ring_mask - 1 - 210 q_space = EFX_TXQ_MASK - 1 - fill_level;
212 fill_level);
213 if (unlikely(q_space-- <= 0)) 211 if (unlikely(q_space-- <= 0))
214 goto stop; 212 goto stop;
215 smp_mb(); 213 smp_mb();
216 --tx_queue->stopped; 214 --tx_queue->stopped;
217 } 215 }
218 216
219 insert_ptr = (tx_queue->insert_count & 217 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
220 efx->type->txd_ring_mask);
221 buffer = &tx_queue->buffer[insert_ptr]; 218 buffer = &tx_queue->buffer[insert_ptr];
222 efx_tsoh_free(tx_queue, buffer); 219 efx_tsoh_free(tx_queue, buffer);
223 EFX_BUG_ON_PARANOID(buffer->tsoh); 220 EFX_BUG_ON_PARANOID(buffer->tsoh);
@@ -289,7 +286,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
289 /* Work backwards until we hit the original insert pointer value */ 286 /* Work backwards until we hit the original insert pointer value */
290 while (tx_queue->insert_count != tx_queue->write_count) { 287 while (tx_queue->insert_count != tx_queue->write_count) {
291 --tx_queue->insert_count; 288 --tx_queue->insert_count;
292 insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; 289 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
293 buffer = &tx_queue->buffer[insert_ptr]; 290 buffer = &tx_queue->buffer[insert_ptr];
294 efx_dequeue_buffer(tx_queue, buffer); 291 efx_dequeue_buffer(tx_queue, buffer);
295 buffer->len = 0; 292 buffer->len = 0;
@@ -318,10 +315,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
318{ 315{
319 struct efx_nic *efx = tx_queue->efx; 316 struct efx_nic *efx = tx_queue->efx;
320 unsigned int stop_index, read_ptr; 317 unsigned int stop_index, read_ptr;
321 unsigned int mask = tx_queue->efx->type->txd_ring_mask;
322 318
323 stop_index = (index + 1) & mask; 319 stop_index = (index + 1) & EFX_TXQ_MASK;
324 read_ptr = tx_queue->read_count & mask; 320 read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
325 321
326 while (read_ptr != stop_index) { 322 while (read_ptr != stop_index) {
327 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 323 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
@@ -338,7 +334,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
338 buffer->len = 0; 334 buffer->len = 0;
339 335
340 ++tx_queue->read_count; 336 ++tx_queue->read_count;
341 read_ptr = tx_queue->read_count & mask; 337 read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
342 } 338 }
343} 339}
344 340
@@ -391,7 +387,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
391 unsigned fill_level; 387 unsigned fill_level;
392 struct efx_nic *efx = tx_queue->efx; 388 struct efx_nic *efx = tx_queue->efx;
393 389
394 EFX_BUG_ON_PARANOID(index > efx->type->txd_ring_mask); 390 EFX_BUG_ON_PARANOID(index > EFX_TXQ_MASK);
395 391
396 efx_dequeue_buffers(tx_queue, index); 392 efx_dequeue_buffers(tx_queue, index);
397 393
@@ -401,7 +397,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
401 smp_mb(); 397 smp_mb();
402 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) { 398 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
403 fill_level = tx_queue->insert_count - tx_queue->read_count; 399 fill_level = tx_queue->insert_count - tx_queue->read_count;
404 if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { 400 if (fill_level < EFX_TXQ_THRESHOLD) {
405 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); 401 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
406 402
407 /* Do this under netif_tx_lock(), to avoid racing 403 /* Do this under netif_tx_lock(), to avoid racing
@@ -425,11 +421,11 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
425 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue); 421 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue);
426 422
427 /* Allocate software ring */ 423 /* Allocate software ring */
428 txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer); 424 txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer);
429 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); 425 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
430 if (!tx_queue->buffer) 426 if (!tx_queue->buffer)
431 return -ENOMEM; 427 return -ENOMEM;
432 for (i = 0; i <= efx->type->txd_ring_mask; ++i) 428 for (i = 0; i <= EFX_TXQ_MASK; ++i)
433 tx_queue->buffer[i].continuation = true; 429 tx_queue->buffer[i].continuation = true;
434 430
435 /* Allocate hardware ring */ 431 /* Allocate hardware ring */
@@ -468,8 +464,7 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
468 464
469 /* Free any buffers left in the ring */ 465 /* Free any buffers left in the ring */
470 while (tx_queue->read_count != tx_queue->write_count) { 466 while (tx_queue->read_count != tx_queue->write_count) {
471 buffer = &tx_queue->buffer[tx_queue->read_count & 467 buffer = &tx_queue->buffer[tx_queue->read_count & EFX_TXQ_MASK];
472 tx_queue->efx->type->txd_ring_mask];
473 efx_dequeue_buffer(tx_queue, buffer); 468 efx_dequeue_buffer(tx_queue, buffer);
474 buffer->continuation = true; 469 buffer->continuation = true;
475 buffer->len = 0; 470 buffer->len = 0;
@@ -715,7 +710,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
715 710
716 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 711 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
717 /* -1 as there is no way to represent all descriptors used */ 712 /* -1 as there is no way to represent all descriptors used */
718 q_space = efx->type->txd_ring_mask - 1 - fill_level; 713 q_space = EFX_TXQ_MASK - 1 - fill_level;
719 714
720 while (1) { 715 while (1) {
721 if (unlikely(q_space-- <= 0)) { 716 if (unlikely(q_space-- <= 0)) {
@@ -731,7 +726,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
731 *(volatile unsigned *)&tx_queue->read_count; 726 *(volatile unsigned *)&tx_queue->read_count;
732 fill_level = (tx_queue->insert_count 727 fill_level = (tx_queue->insert_count
733 - tx_queue->old_read_count); 728 - tx_queue->old_read_count);
734 q_space = efx->type->txd_ring_mask - 1 - fill_level; 729 q_space = EFX_TXQ_MASK - 1 - fill_level;
735 if (unlikely(q_space-- <= 0)) { 730 if (unlikely(q_space-- <= 0)) {
736 *final_buffer = NULL; 731 *final_buffer = NULL;
737 return 1; 732 return 1;
@@ -740,13 +735,13 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
740 --tx_queue->stopped; 735 --tx_queue->stopped;
741 } 736 }
742 737
743 insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; 738 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
744 buffer = &tx_queue->buffer[insert_ptr]; 739 buffer = &tx_queue->buffer[insert_ptr];
745 ++tx_queue->insert_count; 740 ++tx_queue->insert_count;
746 741
747 EFX_BUG_ON_PARANOID(tx_queue->insert_count - 742 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
748 tx_queue->read_count > 743 tx_queue->read_count >
749 efx->type->txd_ring_mask); 744 EFX_TXQ_MASK);
750 745
751 efx_tsoh_free(tx_queue, buffer); 746 efx_tsoh_free(tx_queue, buffer);
752 EFX_BUG_ON_PARANOID(buffer->len); 747 EFX_BUG_ON_PARANOID(buffer->len);
@@ -792,8 +787,7 @@ static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
792{ 787{
793 struct efx_tx_buffer *buffer; 788 struct efx_tx_buffer *buffer;
794 789
795 buffer = &tx_queue->buffer[tx_queue->insert_count & 790 buffer = &tx_queue->buffer[tx_queue->insert_count & EFX_TXQ_MASK];
796 tx_queue->efx->type->txd_ring_mask];
797 efx_tsoh_free(tx_queue, buffer); 791 efx_tsoh_free(tx_queue, buffer);
798 EFX_BUG_ON_PARANOID(buffer->len); 792 EFX_BUG_ON_PARANOID(buffer->len);
799 EFX_BUG_ON_PARANOID(buffer->unmap_len); 793 EFX_BUG_ON_PARANOID(buffer->unmap_len);
@@ -818,7 +812,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
818 while (tx_queue->insert_count != tx_queue->write_count) { 812 while (tx_queue->insert_count != tx_queue->write_count) {
819 --tx_queue->insert_count; 813 --tx_queue->insert_count;
820 buffer = &tx_queue->buffer[tx_queue->insert_count & 814 buffer = &tx_queue->buffer[tx_queue->insert_count &
821 tx_queue->efx->type->txd_ring_mask]; 815 EFX_TXQ_MASK];
822 efx_tsoh_free(tx_queue, buffer); 816 efx_tsoh_free(tx_queue, buffer);
823 EFX_BUG_ON_PARANOID(buffer->skb); 817 EFX_BUG_ON_PARANOID(buffer->skb);
824 buffer->len = 0; 818 buffer->len = 0;
@@ -1135,7 +1129,7 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1135 unsigned i; 1129 unsigned i;
1136 1130
1137 if (tx_queue->buffer) { 1131 if (tx_queue->buffer) {
1138 for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) 1132 for (i = 0; i <= EFX_TXQ_MASK; ++i)
1139 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); 1133 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1140 } 1134 }
1141 1135