diff options
Diffstat (limited to 'drivers/net/sfc/tx.c')
-rw-r--r-- | drivers/net/sfc/tx.c | 56 |
1 files changed, 31 insertions, 25 deletions
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 6a6acc47285c..11726989fe2d 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
@@ -28,7 +28,7 @@ | |||
28 | * The tx_queue descriptor ring fill-level must fall below this value | 28 | * The tx_queue descriptor ring fill-level must fall below this value |
29 | * before we restart the netif queue | 29 | * before we restart the netif queue |
30 | */ | 30 | */ |
31 | #define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u) | 31 | #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u) |
32 | 32 | ||
33 | /* We need to be able to nest calls to netif_tx_stop_queue(), partly | 33 | /* We need to be able to nest calls to netif_tx_stop_queue(), partly |
34 | * because of the 2 hardware queues associated with each core queue, | 34 | * because of the 2 hardware queues associated with each core queue, |
@@ -207,7 +207,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |||
207 | } | 207 | } |
208 | 208 | ||
209 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; | 209 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; |
210 | q_space = EFX_TXQ_MASK - 1 - fill_level; | 210 | q_space = efx->txq_entries - 1 - fill_level; |
211 | 211 | ||
212 | /* Map for DMA. Use pci_map_single rather than pci_map_page | 212 | /* Map for DMA. Use pci_map_single rather than pci_map_page |
213 | * since this is more efficient on machines with sparse | 213 | * since this is more efficient on machines with sparse |
@@ -244,14 +244,14 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |||
244 | &tx_queue->read_count; | 244 | &tx_queue->read_count; |
245 | fill_level = (tx_queue->insert_count | 245 | fill_level = (tx_queue->insert_count |
246 | - tx_queue->old_read_count); | 246 | - tx_queue->old_read_count); |
247 | q_space = EFX_TXQ_MASK - 1 - fill_level; | 247 | q_space = efx->txq_entries - 1 - fill_level; |
248 | if (unlikely(q_space-- <= 0)) | 248 | if (unlikely(q_space-- <= 0)) |
249 | goto stop; | 249 | goto stop; |
250 | smp_mb(); | 250 | smp_mb(); |
251 | --tx_queue->stopped; | 251 | --tx_queue->stopped; |
252 | } | 252 | } |
253 | 253 | ||
254 | insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; | 254 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; |
255 | buffer = &tx_queue->buffer[insert_ptr]; | 255 | buffer = &tx_queue->buffer[insert_ptr]; |
256 | efx_tsoh_free(tx_queue, buffer); | 256 | efx_tsoh_free(tx_queue, buffer); |
257 | EFX_BUG_ON_PARANOID(buffer->tsoh); | 257 | EFX_BUG_ON_PARANOID(buffer->tsoh); |
@@ -320,7 +320,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |||
320 | /* Work backwards until we hit the original insert pointer value */ | 320 | /* Work backwards until we hit the original insert pointer value */ |
321 | while (tx_queue->insert_count != tx_queue->write_count) { | 321 | while (tx_queue->insert_count != tx_queue->write_count) { |
322 | --tx_queue->insert_count; | 322 | --tx_queue->insert_count; |
323 | insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; | 323 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; |
324 | buffer = &tx_queue->buffer[insert_ptr]; | 324 | buffer = &tx_queue->buffer[insert_ptr]; |
325 | efx_dequeue_buffer(tx_queue, buffer); | 325 | efx_dequeue_buffer(tx_queue, buffer); |
326 | buffer->len = 0; | 326 | buffer->len = 0; |
@@ -350,8 +350,8 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, | |||
350 | struct efx_nic *efx = tx_queue->efx; | 350 | struct efx_nic *efx = tx_queue->efx; |
351 | unsigned int stop_index, read_ptr; | 351 | unsigned int stop_index, read_ptr; |
352 | 352 | ||
353 | stop_index = (index + 1) & EFX_TXQ_MASK; | 353 | stop_index = (index + 1) & tx_queue->ptr_mask; |
354 | read_ptr = tx_queue->read_count & EFX_TXQ_MASK; | 354 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; |
355 | 355 | ||
356 | while (read_ptr != stop_index) { | 356 | while (read_ptr != stop_index) { |
357 | struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; | 357 | struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; |
@@ -368,7 +368,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, | |||
368 | buffer->len = 0; | 368 | buffer->len = 0; |
369 | 369 | ||
370 | ++tx_queue->read_count; | 370 | ++tx_queue->read_count; |
371 | read_ptr = tx_queue->read_count & EFX_TXQ_MASK; | 371 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; |
372 | } | 372 | } |
373 | } | 373 | } |
374 | 374 | ||
@@ -402,7 +402,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | |||
402 | unsigned fill_level; | 402 | unsigned fill_level; |
403 | struct efx_nic *efx = tx_queue->efx; | 403 | struct efx_nic *efx = tx_queue->efx; |
404 | 404 | ||
405 | EFX_BUG_ON_PARANOID(index > EFX_TXQ_MASK); | 405 | EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); |
406 | 406 | ||
407 | efx_dequeue_buffers(tx_queue, index); | 407 | efx_dequeue_buffers(tx_queue, index); |
408 | 408 | ||
@@ -412,7 +412,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | |||
412 | smp_mb(); | 412 | smp_mb(); |
413 | if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) { | 413 | if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) { |
414 | fill_level = tx_queue->insert_count - tx_queue->read_count; | 414 | fill_level = tx_queue->insert_count - tx_queue->read_count; |
415 | if (fill_level < EFX_TXQ_THRESHOLD) { | 415 | if (fill_level < EFX_TXQ_THRESHOLD(efx)) { |
416 | EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); | 416 | EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); |
417 | 417 | ||
418 | /* Do this under netif_tx_lock(), to avoid racing | 418 | /* Do this under netif_tx_lock(), to avoid racing |
@@ -430,18 +430,24 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | |||
430 | int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) | 430 | int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) |
431 | { | 431 | { |
432 | struct efx_nic *efx = tx_queue->efx; | 432 | struct efx_nic *efx = tx_queue->efx; |
433 | unsigned int txq_size; | 433 | unsigned int entries; |
434 | int i, rc; | 434 | int i, rc; |
435 | 435 | ||
436 | netif_dbg(efx, probe, efx->net_dev, "creating TX queue %d\n", | 436 | /* Create the smallest power-of-two aligned ring */ |
437 | tx_queue->queue); | 437 | entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); |
438 | EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); | ||
439 | tx_queue->ptr_mask = entries - 1; | ||
440 | |||
441 | netif_dbg(efx, probe, efx->net_dev, | ||
442 | "creating TX queue %d size %#x mask %#x\n", | ||
443 | tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); | ||
438 | 444 | ||
439 | /* Allocate software ring */ | 445 | /* Allocate software ring */ |
440 | txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer); | 446 | tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer), |
441 | tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); | 447 | GFP_KERNEL); |
442 | if (!tx_queue->buffer) | 448 | if (!tx_queue->buffer) |
443 | return -ENOMEM; | 449 | return -ENOMEM; |
444 | for (i = 0; i <= EFX_TXQ_MASK; ++i) | 450 | for (i = 0; i <= tx_queue->ptr_mask; ++i) |
445 | tx_queue->buffer[i].continuation = true; | 451 | tx_queue->buffer[i].continuation = true; |
446 | 452 | ||
447 | /* Allocate hardware ring */ | 453 | /* Allocate hardware ring */ |
@@ -481,7 +487,7 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) | |||
481 | 487 | ||
482 | /* Free any buffers left in the ring */ | 488 | /* Free any buffers left in the ring */ |
483 | while (tx_queue->read_count != tx_queue->write_count) { | 489 | while (tx_queue->read_count != tx_queue->write_count) { |
484 | buffer = &tx_queue->buffer[tx_queue->read_count & EFX_TXQ_MASK]; | 490 | buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; |
485 | efx_dequeue_buffer(tx_queue, buffer); | 491 | efx_dequeue_buffer(tx_queue, buffer); |
486 | buffer->continuation = true; | 492 | buffer->continuation = true; |
487 | buffer->len = 0; | 493 | buffer->len = 0; |
@@ -741,7 +747,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, | |||
741 | 747 | ||
742 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; | 748 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; |
743 | /* -1 as there is no way to represent all descriptors used */ | 749 | /* -1 as there is no way to represent all descriptors used */ |
744 | q_space = EFX_TXQ_MASK - 1 - fill_level; | 750 | q_space = efx->txq_entries - 1 - fill_level; |
745 | 751 | ||
746 | while (1) { | 752 | while (1) { |
747 | if (unlikely(q_space-- <= 0)) { | 753 | if (unlikely(q_space-- <= 0)) { |
@@ -757,7 +763,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, | |||
757 | *(volatile unsigned *)&tx_queue->read_count; | 763 | *(volatile unsigned *)&tx_queue->read_count; |
758 | fill_level = (tx_queue->insert_count | 764 | fill_level = (tx_queue->insert_count |
759 | - tx_queue->old_read_count); | 765 | - tx_queue->old_read_count); |
760 | q_space = EFX_TXQ_MASK - 1 - fill_level; | 766 | q_space = efx->txq_entries - 1 - fill_level; |
761 | if (unlikely(q_space-- <= 0)) { | 767 | if (unlikely(q_space-- <= 0)) { |
762 | *final_buffer = NULL; | 768 | *final_buffer = NULL; |
763 | return 1; | 769 | return 1; |
@@ -766,13 +772,13 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, | |||
766 | --tx_queue->stopped; | 772 | --tx_queue->stopped; |
767 | } | 773 | } |
768 | 774 | ||
769 | insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; | 775 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; |
770 | buffer = &tx_queue->buffer[insert_ptr]; | 776 | buffer = &tx_queue->buffer[insert_ptr]; |
771 | ++tx_queue->insert_count; | 777 | ++tx_queue->insert_count; |
772 | 778 | ||
773 | EFX_BUG_ON_PARANOID(tx_queue->insert_count - | 779 | EFX_BUG_ON_PARANOID(tx_queue->insert_count - |
774 | tx_queue->read_count > | 780 | tx_queue->read_count >= |
775 | EFX_TXQ_MASK); | 781 | efx->txq_entries); |
776 | 782 | ||
777 | efx_tsoh_free(tx_queue, buffer); | 783 | efx_tsoh_free(tx_queue, buffer); |
778 | EFX_BUG_ON_PARANOID(buffer->len); | 784 | EFX_BUG_ON_PARANOID(buffer->len); |
@@ -813,7 +819,7 @@ static void efx_tso_put_header(struct efx_tx_queue *tx_queue, | |||
813 | { | 819 | { |
814 | struct efx_tx_buffer *buffer; | 820 | struct efx_tx_buffer *buffer; |
815 | 821 | ||
816 | buffer = &tx_queue->buffer[tx_queue->insert_count & EFX_TXQ_MASK]; | 822 | buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; |
817 | efx_tsoh_free(tx_queue, buffer); | 823 | efx_tsoh_free(tx_queue, buffer); |
818 | EFX_BUG_ON_PARANOID(buffer->len); | 824 | EFX_BUG_ON_PARANOID(buffer->len); |
819 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | 825 | EFX_BUG_ON_PARANOID(buffer->unmap_len); |
@@ -838,7 +844,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) | |||
838 | while (tx_queue->insert_count != tx_queue->write_count) { | 844 | while (tx_queue->insert_count != tx_queue->write_count) { |
839 | --tx_queue->insert_count; | 845 | --tx_queue->insert_count; |
840 | buffer = &tx_queue->buffer[tx_queue->insert_count & | 846 | buffer = &tx_queue->buffer[tx_queue->insert_count & |
841 | EFX_TXQ_MASK]; | 847 | tx_queue->ptr_mask]; |
842 | efx_tsoh_free(tx_queue, buffer); | 848 | efx_tsoh_free(tx_queue, buffer); |
843 | EFX_BUG_ON_PARANOID(buffer->skb); | 849 | EFX_BUG_ON_PARANOID(buffer->skb); |
844 | if (buffer->unmap_len) { | 850 | if (buffer->unmap_len) { |
@@ -1168,7 +1174,7 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue) | |||
1168 | unsigned i; | 1174 | unsigned i; |
1169 | 1175 | ||
1170 | if (tx_queue->buffer) { | 1176 | if (tx_queue->buffer) { |
1171 | for (i = 0; i <= EFX_TXQ_MASK; ++i) | 1177 | for (i = 0; i <= tx_queue->ptr_mask; ++i) |
1172 | efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); | 1178 | efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); |
1173 | } | 1179 | } |
1174 | 1180 | ||