diff options
author | Steve Hodgson <shodgson@solarflare.com> | 2010-09-10 02:42:22 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-09-10 15:27:33 -0400 |
commit | ecc910f520ba8f22848982ee816ad75c449b805d (patch) | |
tree | e934380209532b831b7e7e334ddc33d75db7eef5 /drivers/net/sfc/nic.c | |
parent | 8313aca38b3937947fffebca6e34bac8e24300c8 (diff) |
sfc: Make the dmaq size a run-time setting (rather than compile-time)
- Allow the ring size to be specified in non
power-of-two sizes (for instance to limit
the amount of receive buffers).
- Automatically size the event queue.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc/nic.c')
-rw-r--r-- | drivers/net/sfc/nic.c | 53 |
1 files changed, 29 insertions, 24 deletions
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index 9e3563348eb7..0deb5c38efff 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c | |||
@@ -356,7 +356,7 @@ static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) | |||
356 | unsigned write_ptr; | 356 | unsigned write_ptr; |
357 | efx_dword_t reg; | 357 | efx_dword_t reg; |
358 | 358 | ||
359 | write_ptr = tx_queue->write_count & EFX_TXQ_MASK; | 359 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
360 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); | 360 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); |
361 | efx_writed_page(tx_queue->efx, ®, | 361 | efx_writed_page(tx_queue->efx, ®, |
362 | FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); | 362 | FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); |
@@ -377,7 +377,7 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) | |||
377 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); | 377 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); |
378 | 378 | ||
379 | do { | 379 | do { |
380 | write_ptr = tx_queue->write_count & EFX_TXQ_MASK; | 380 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
381 | buffer = &tx_queue->buffer[write_ptr]; | 381 | buffer = &tx_queue->buffer[write_ptr]; |
382 | txd = efx_tx_desc(tx_queue, write_ptr); | 382 | txd = efx_tx_desc(tx_queue, write_ptr); |
383 | ++tx_queue->write_count; | 383 | ++tx_queue->write_count; |
@@ -398,10 +398,11 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) | |||
398 | int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) | 398 | int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) |
399 | { | 399 | { |
400 | struct efx_nic *efx = tx_queue->efx; | 400 | struct efx_nic *efx = tx_queue->efx; |
401 | BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 || | 401 | unsigned entries; |
402 | EFX_TXQ_SIZE & EFX_TXQ_MASK); | 402 | |
403 | entries = tx_queue->ptr_mask + 1; | ||
403 | return efx_alloc_special_buffer(efx, &tx_queue->txd, | 404 | return efx_alloc_special_buffer(efx, &tx_queue->txd, |
404 | EFX_TXQ_SIZE * sizeof(efx_qword_t)); | 405 | entries * sizeof(efx_qword_t)); |
405 | } | 406 | } |
406 | 407 | ||
407 | void efx_nic_init_tx(struct efx_tx_queue *tx_queue) | 408 | void efx_nic_init_tx(struct efx_tx_queue *tx_queue) |
@@ -526,30 +527,32 @@ efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) | |||
526 | */ | 527 | */ |
527 | void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) | 528 | void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) |
528 | { | 529 | { |
530 | struct efx_nic *efx = rx_queue->efx; | ||
529 | efx_dword_t reg; | 531 | efx_dword_t reg; |
530 | unsigned write_ptr; | 532 | unsigned write_ptr; |
531 | 533 | ||
532 | while (rx_queue->notified_count != rx_queue->added_count) { | 534 | while (rx_queue->notified_count != rx_queue->added_count) { |
533 | efx_build_rx_desc(rx_queue, | 535 | efx_build_rx_desc( |
534 | rx_queue->notified_count & | 536 | rx_queue, |
535 | EFX_RXQ_MASK); | 537 | rx_queue->notified_count & rx_queue->ptr_mask); |
536 | ++rx_queue->notified_count; | 538 | ++rx_queue->notified_count; |
537 | } | 539 | } |
538 | 540 | ||
539 | wmb(); | 541 | wmb(); |
540 | write_ptr = rx_queue->added_count & EFX_RXQ_MASK; | 542 | write_ptr = rx_queue->added_count & rx_queue->ptr_mask; |
541 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); | 543 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); |
542 | efx_writed_page(rx_queue->efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, | 544 | efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, |
543 | efx_rx_queue_index(rx_queue)); | 545 | efx_rx_queue_index(rx_queue)); |
544 | } | 546 | } |
545 | 547 | ||
546 | int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) | 548 | int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) |
547 | { | 549 | { |
548 | struct efx_nic *efx = rx_queue->efx; | 550 | struct efx_nic *efx = rx_queue->efx; |
549 | BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 || | 551 | unsigned entries; |
550 | EFX_RXQ_SIZE & EFX_RXQ_MASK); | 552 | |
553 | entries = rx_queue->ptr_mask + 1; | ||
551 | return efx_alloc_special_buffer(efx, &rx_queue->rxd, | 554 | return efx_alloc_special_buffer(efx, &rx_queue->rxd, |
552 | EFX_RXQ_SIZE * sizeof(efx_qword_t)); | 555 | entries * sizeof(efx_qword_t)); |
553 | } | 556 | } |
554 | 557 | ||
555 | void efx_nic_init_rx(struct efx_rx_queue *rx_queue) | 558 | void efx_nic_init_rx(struct efx_rx_queue *rx_queue) |
@@ -685,7 +688,7 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | |||
685 | tx_queue = efx_channel_get_tx_queue( | 688 | tx_queue = efx_channel_get_tx_queue( |
686 | channel, tx_ev_q_label % EFX_TXQ_TYPES); | 689 | channel, tx_ev_q_label % EFX_TXQ_TYPES); |
687 | tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & | 690 | tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & |
688 | EFX_TXQ_MASK); | 691 | tx_queue->ptr_mask); |
689 | channel->irq_mod_score += tx_packets; | 692 | channel->irq_mod_score += tx_packets; |
690 | efx_xmit_done(tx_queue, tx_ev_desc_ptr); | 693 | efx_xmit_done(tx_queue, tx_ev_desc_ptr); |
691 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { | 694 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { |
@@ -796,8 +799,8 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) | |||
796 | struct efx_nic *efx = rx_queue->efx; | 799 | struct efx_nic *efx = rx_queue->efx; |
797 | unsigned expected, dropped; | 800 | unsigned expected, dropped; |
798 | 801 | ||
799 | expected = rx_queue->removed_count & EFX_RXQ_MASK; | 802 | expected = rx_queue->removed_count & rx_queue->ptr_mask; |
800 | dropped = (index - expected) & EFX_RXQ_MASK; | 803 | dropped = (index - expected) & rx_queue->ptr_mask; |
801 | netif_info(efx, rx_err, efx->net_dev, | 804 | netif_info(efx, rx_err, efx->net_dev, |
802 | "dropped %d events (index=%d expected=%d)\n", | 805 | "dropped %d events (index=%d expected=%d)\n", |
803 | dropped, index, expected); | 806 | dropped, index, expected); |
@@ -835,7 +838,7 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) | |||
835 | rx_queue = efx_channel_get_rx_queue(channel); | 838 | rx_queue = efx_channel_get_rx_queue(channel); |
836 | 839 | ||
837 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); | 840 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); |
838 | expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK; | 841 | expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; |
839 | if (unlikely(rx_ev_desc_ptr != expected_ptr)) | 842 | if (unlikely(rx_ev_desc_ptr != expected_ptr)) |
840 | efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); | 843 | efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); |
841 | 844 | ||
@@ -1002,6 +1005,7 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | |||
1002 | 1005 | ||
1003 | int efx_nic_process_eventq(struct efx_channel *channel, int budget) | 1006 | int efx_nic_process_eventq(struct efx_channel *channel, int budget) |
1004 | { | 1007 | { |
1008 | struct efx_nic *efx = channel->efx; | ||
1005 | unsigned int read_ptr; | 1009 | unsigned int read_ptr; |
1006 | efx_qword_t event, *p_event; | 1010 | efx_qword_t event, *p_event; |
1007 | int ev_code; | 1011 | int ev_code; |
@@ -1026,7 +1030,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget) | |||
1026 | EFX_SET_QWORD(*p_event); | 1030 | EFX_SET_QWORD(*p_event); |
1027 | 1031 | ||
1028 | /* Increment read pointer */ | 1032 | /* Increment read pointer */ |
1029 | read_ptr = (read_ptr + 1) & EFX_EVQ_MASK; | 1033 | read_ptr = (read_ptr + 1) & channel->eventq_mask; |
1030 | 1034 | ||
1031 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); | 1035 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); |
1032 | 1036 | ||
@@ -1038,7 +1042,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget) | |||
1038 | break; | 1042 | break; |
1039 | case FSE_AZ_EV_CODE_TX_EV: | 1043 | case FSE_AZ_EV_CODE_TX_EV: |
1040 | tx_packets += efx_handle_tx_event(channel, &event); | 1044 | tx_packets += efx_handle_tx_event(channel, &event); |
1041 | if (tx_packets >= EFX_TXQ_SIZE) { | 1045 | if (tx_packets > efx->txq_entries) { |
1042 | spent = budget; | 1046 | spent = budget; |
1043 | goto out; | 1047 | goto out; |
1044 | } | 1048 | } |
@@ -1073,10 +1077,11 @@ out: | |||
1073 | int efx_nic_probe_eventq(struct efx_channel *channel) | 1077 | int efx_nic_probe_eventq(struct efx_channel *channel) |
1074 | { | 1078 | { |
1075 | struct efx_nic *efx = channel->efx; | 1079 | struct efx_nic *efx = channel->efx; |
1076 | BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 || | 1080 | unsigned entries; |
1077 | EFX_EVQ_SIZE & EFX_EVQ_MASK); | 1081 | |
1082 | entries = channel->eventq_mask + 1; | ||
1078 | return efx_alloc_special_buffer(efx, &channel->eventq, | 1083 | return efx_alloc_special_buffer(efx, &channel->eventq, |
1079 | EFX_EVQ_SIZE * sizeof(efx_qword_t)); | 1084 | entries * sizeof(efx_qword_t)); |
1080 | } | 1085 | } |
1081 | 1086 | ||
1082 | void efx_nic_init_eventq(struct efx_channel *channel) | 1087 | void efx_nic_init_eventq(struct efx_channel *channel) |
@@ -1172,7 +1177,7 @@ static void efx_poll_flush_events(struct efx_nic *efx) | |||
1172 | struct efx_tx_queue *tx_queue; | 1177 | struct efx_tx_queue *tx_queue; |
1173 | struct efx_rx_queue *rx_queue; | 1178 | struct efx_rx_queue *rx_queue; |
1174 | unsigned int read_ptr = channel->eventq_read_ptr; | 1179 | unsigned int read_ptr = channel->eventq_read_ptr; |
1175 | unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK; | 1180 | unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask; |
1176 | 1181 | ||
1177 | do { | 1182 | do { |
1178 | efx_qword_t *event = efx_event(channel, read_ptr); | 1183 | efx_qword_t *event = efx_event(channel, read_ptr); |
@@ -1212,7 +1217,7 @@ static void efx_poll_flush_events(struct efx_nic *efx) | |||
1212 | * it's ok to throw away every non-flush event */ | 1217 | * it's ok to throw away every non-flush event */ |
1213 | EFX_SET_QWORD(*event); | 1218 | EFX_SET_QWORD(*event); |
1214 | 1219 | ||
1215 | read_ptr = (read_ptr + 1) & EFX_EVQ_MASK; | 1220 | read_ptr = (read_ptr + 1) & channel->eventq_mask; |
1216 | } while (read_ptr != end_ptr); | 1221 | } while (read_ptr != end_ptr); |
1217 | 1222 | ||
1218 | channel->eventq_read_ptr = read_ptr; | 1223 | channel->eventq_read_ptr = read_ptr; |