aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/sfc')
-rw-r--r--drivers/net/sfc/efx.c15
-rw-r--r--drivers/net/sfc/efx.h13
-rw-r--r--drivers/net/sfc/net_driver.h13
-rw-r--r--drivers/net/sfc/nic.c53
-rw-r--r--drivers/net/sfc/rx.c33
-rw-r--r--drivers/net/sfc/selftest.c2
-rw-r--r--drivers/net/sfc/tx.c56
7 files changed, 109 insertions, 76 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 4b42e61e3c7d..6166e2207160 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -348,7 +348,7 @@ void efx_process_channel_now(struct efx_channel *channel)
348 napi_disable(&channel->napi_str); 348 napi_disable(&channel->napi_str);
349 349
350 /* Poll the channel */ 350 /* Poll the channel */
351 efx_process_channel(channel, EFX_EVQ_SIZE); 351 efx_process_channel(channel, channel->eventq_mask + 1);
352 352
353 /* Ack the eventq. This may cause an interrupt to be generated 353 /* Ack the eventq. This may cause an interrupt to be generated
354 * when they are reenabled */ 354 * when they are reenabled */
@@ -365,9 +365,18 @@ void efx_process_channel_now(struct efx_channel *channel)
365 */ 365 */
366static int efx_probe_eventq(struct efx_channel *channel) 366static int efx_probe_eventq(struct efx_channel *channel)
367{ 367{
368 struct efx_nic *efx = channel->efx;
369 unsigned long entries;
370
368 netif_dbg(channel->efx, probe, channel->efx->net_dev, 371 netif_dbg(channel->efx, probe, channel->efx->net_dev,
369 "chan %d create event queue\n", channel->channel); 372 "chan %d create event queue\n", channel->channel);
370 373
374 /* Build an event queue with room for one event per tx and rx buffer,
375 * plus some extra for link state events and MCDI completions. */
376 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
377 EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
378 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
379
371 return efx_nic_probe_eventq(channel); 380 return efx_nic_probe_eventq(channel);
372} 381}
373 382
@@ -1191,6 +1200,7 @@ static int efx_probe_all(struct efx_nic *efx)
1191 } 1200 }
1192 1201
1193 /* Create channels */ 1202 /* Create channels */
1203 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
1194 efx_for_each_channel(channel, efx) { 1204 efx_for_each_channel(channel, efx) {
1195 rc = efx_probe_channel(channel); 1205 rc = efx_probe_channel(channel);
1196 if (rc) { 1206 if (rc) {
@@ -2101,9 +2111,6 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2101 2111
2102 efx->type = type; 2112 efx->type = type;
2103 2113
2104 /* As close as we can get to guaranteeing that we don't overflow */
2105 BUILD_BUG_ON(EFX_EVQ_SIZE < EFX_TXQ_SIZE + EFX_RXQ_SIZE);
2106
2107 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); 2114 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
2108 2115
2109 /* Higher numbered interrupt modes are less capable! */ 2116 /* Higher numbered interrupt modes are less capable! */
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index d6b172b1a8ef..c15a2d3c2c23 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -37,8 +37,6 @@ efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
37extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 37extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
38extern void efx_stop_queue(struct efx_channel *channel); 38extern void efx_stop_queue(struct efx_channel *channel);
39extern void efx_wake_queue(struct efx_channel *channel); 39extern void efx_wake_queue(struct efx_channel *channel);
40#define EFX_TXQ_SIZE 1024
41#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)
42 40
43/* RX */ 41/* RX */
44extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 42extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -53,13 +51,16 @@ extern void __efx_rx_packet(struct efx_channel *channel,
53extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 51extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
54 unsigned int len, bool checksummed, bool discard); 52 unsigned int len, bool checksummed, bool discard);
55extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); 53extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
56#define EFX_RXQ_SIZE 1024 54
57#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1) 55#define EFX_MAX_DMAQ_SIZE 4096UL
56#define EFX_DEFAULT_DMAQ_SIZE 1024UL
57#define EFX_MIN_DMAQ_SIZE 512UL
58
59#define EFX_MAX_EVQ_SIZE 16384UL
60#define EFX_MIN_EVQ_SIZE 512UL
58 61
59/* Channels */ 62/* Channels */
60extern void efx_process_channel_now(struct efx_channel *channel); 63extern void efx_process_channel_now(struct efx_channel *channel);
61#define EFX_EVQ_SIZE 4096
62#define EFX_EVQ_MASK (EFX_EVQ_SIZE - 1)
63 64
64/* Ports */ 65/* Ports */
65extern int efx_reconfigure_port(struct efx_nic *efx); 66extern int efx_reconfigure_port(struct efx_nic *efx);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index cfc65f5a3c09..ac622ab72e11 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -137,6 +137,7 @@ struct efx_tx_buffer {
137 * @channel: The associated channel 137 * @channel: The associated channel
138 * @buffer: The software buffer ring 138 * @buffer: The software buffer ring
139 * @txd: The hardware descriptor ring 139 * @txd: The hardware descriptor ring
140 * @ptr_mask: The size of the ring minus 1.
140 * @flushed: Used when handling queue flushing 141 * @flushed: Used when handling queue flushing
141 * @read_count: Current read pointer. 142 * @read_count: Current read pointer.
142 * This is the number of buffers that have been removed from both rings. 143 * This is the number of buffers that have been removed from both rings.
@@ -170,6 +171,7 @@ struct efx_tx_queue {
170 struct efx_nic *nic; 171 struct efx_nic *nic;
171 struct efx_tx_buffer *buffer; 172 struct efx_tx_buffer *buffer;
172 struct efx_special_buffer txd; 173 struct efx_special_buffer txd;
174 unsigned int ptr_mask;
173 enum efx_flush_state flushed; 175 enum efx_flush_state flushed;
174 176
175 /* Members used mainly on the completion path */ 177 /* Members used mainly on the completion path */
@@ -227,6 +229,7 @@ struct efx_rx_page_state {
227 * @efx: The associated Efx NIC 229 * @efx: The associated Efx NIC
228 * @buffer: The software buffer ring 230 * @buffer: The software buffer ring
229 * @rxd: The hardware descriptor ring 231 * @rxd: The hardware descriptor ring
232 * @ptr_mask: The size of the ring minus 1.
230 * @added_count: Number of buffers added to the receive queue. 233 * @added_count: Number of buffers added to the receive queue.
231 * @notified_count: Number of buffers given to NIC (<= @added_count). 234 * @notified_count: Number of buffers given to NIC (<= @added_count).
232 * @removed_count: Number of buffers removed from the receive queue. 235 * @removed_count: Number of buffers removed from the receive queue.
@@ -238,9 +241,6 @@ struct efx_rx_page_state {
238 * @min_fill: RX descriptor minimum non-zero fill level. 241 * @min_fill: RX descriptor minimum non-zero fill level.
239 * This records the minimum fill level observed when a ring 242 * This records the minimum fill level observed when a ring
240 * refill was triggered. 243 * refill was triggered.
241 * @min_overfill: RX descriptor minimum overflow fill level.
242 * This records the minimum fill level at which RX queue
243 * overflow was observed. It should never be set.
244 * @alloc_page_count: RX allocation strategy counter. 244 * @alloc_page_count: RX allocation strategy counter.
245 * @alloc_skb_count: RX allocation strategy counter. 245 * @alloc_skb_count: RX allocation strategy counter.
246 * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). 246 * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
@@ -250,6 +250,7 @@ struct efx_rx_queue {
250 struct efx_nic *efx; 250 struct efx_nic *efx;
251 struct efx_rx_buffer *buffer; 251 struct efx_rx_buffer *buffer;
252 struct efx_special_buffer rxd; 252 struct efx_special_buffer rxd;
253 unsigned int ptr_mask;
253 254
254 int added_count; 255 int added_count;
255 int notified_count; 256 int notified_count;
@@ -307,6 +308,7 @@ enum efx_rx_alloc_method {
307 * @reset_work: Scheduled reset work thread 308 * @reset_work: Scheduled reset work thread
308 * @work_pending: Is work pending via NAPI? 309 * @work_pending: Is work pending via NAPI?
309 * @eventq: Event queue buffer 310 * @eventq: Event queue buffer
311 * @eventq_mask: Event queue pointer mask
310 * @eventq_read_ptr: Event queue read pointer 312 * @eventq_read_ptr: Event queue read pointer
311 * @last_eventq_read_ptr: Last event queue read pointer value. 313 * @last_eventq_read_ptr: Last event queue read pointer value.
312 * @magic_count: Event queue test event count 314 * @magic_count: Event queue test event count
@@ -339,6 +341,7 @@ struct efx_channel {
339 struct napi_struct napi_str; 341 struct napi_struct napi_str;
340 bool work_pending; 342 bool work_pending;
341 struct efx_special_buffer eventq; 343 struct efx_special_buffer eventq;
344 unsigned int eventq_mask;
342 unsigned int eventq_read_ptr; 345 unsigned int eventq_read_ptr;
343 unsigned int last_eventq_read_ptr; 346 unsigned int last_eventq_read_ptr;
344 unsigned int magic_count; 347 unsigned int magic_count;
@@ -641,6 +644,8 @@ union efx_multicast_hash {
641 * @tx_queue: TX DMA queues 644 * @tx_queue: TX DMA queues
642 * @rx_queue: RX DMA queues 645 * @rx_queue: RX DMA queues
643 * @channel: Channels 646 * @channel: Channels
647 * @rxq_entries: Size of receive queues requested by user.
648 * @txq_entries: Size of transmit queues requested by user.
644 * @next_buffer_table: First available buffer table id 649 * @next_buffer_table: First available buffer table id
645 * @n_channels: Number of channels in use 650 * @n_channels: Number of channels in use
646 * @n_rx_channels: Number of channels used for RX (= number of RX queues) 651 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
@@ -726,6 +731,8 @@ struct efx_nic {
726 731
727 struct efx_channel *channel[EFX_MAX_CHANNELS]; 732 struct efx_channel *channel[EFX_MAX_CHANNELS];
728 733
734 unsigned rxq_entries;
735 unsigned txq_entries;
729 unsigned next_buffer_table; 736 unsigned next_buffer_table;
730 unsigned n_channels; 737 unsigned n_channels;
731 unsigned n_rx_channels; 738 unsigned n_rx_channels;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 9e3563348eb7..0deb5c38efff 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -356,7 +356,7 @@ static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
356 unsigned write_ptr; 356 unsigned write_ptr;
357 efx_dword_t reg; 357 efx_dword_t reg;
358 358
359 write_ptr = tx_queue->write_count & EFX_TXQ_MASK; 359 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
360 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 360 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
361 efx_writed_page(tx_queue->efx, &reg, 361 efx_writed_page(tx_queue->efx, &reg,
362 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 362 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
@@ -377,7 +377,7 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
377 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 377 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
378 378
379 do { 379 do {
380 write_ptr = tx_queue->write_count & EFX_TXQ_MASK; 380 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
381 buffer = &tx_queue->buffer[write_ptr]; 381 buffer = &tx_queue->buffer[write_ptr];
382 txd = efx_tx_desc(tx_queue, write_ptr); 382 txd = efx_tx_desc(tx_queue, write_ptr);
383 ++tx_queue->write_count; 383 ++tx_queue->write_count;
@@ -398,10 +398,11 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
398int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 398int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
399{ 399{
400 struct efx_nic *efx = tx_queue->efx; 400 struct efx_nic *efx = tx_queue->efx;
401 BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 || 401 unsigned entries;
402 EFX_TXQ_SIZE & EFX_TXQ_MASK); 402
403 entries = tx_queue->ptr_mask + 1;
403 return efx_alloc_special_buffer(efx, &tx_queue->txd, 404 return efx_alloc_special_buffer(efx, &tx_queue->txd,
404 EFX_TXQ_SIZE * sizeof(efx_qword_t)); 405 entries * sizeof(efx_qword_t));
405} 406}
406 407
407void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 408void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
@@ -526,30 +527,32 @@ efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
526 */ 527 */
527void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) 528void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
528{ 529{
530 struct efx_nic *efx = rx_queue->efx;
529 efx_dword_t reg; 531 efx_dword_t reg;
530 unsigned write_ptr; 532 unsigned write_ptr;
531 533
532 while (rx_queue->notified_count != rx_queue->added_count) { 534 while (rx_queue->notified_count != rx_queue->added_count) {
533 efx_build_rx_desc(rx_queue, 535 efx_build_rx_desc(
534 rx_queue->notified_count & 536 rx_queue,
535 EFX_RXQ_MASK); 537 rx_queue->notified_count & rx_queue->ptr_mask);
536 ++rx_queue->notified_count; 538 ++rx_queue->notified_count;
537 } 539 }
538 540
539 wmb(); 541 wmb();
540 write_ptr = rx_queue->added_count & EFX_RXQ_MASK; 542 write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
541 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 543 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
542 efx_writed_page(rx_queue->efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0, 544 efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
543 efx_rx_queue_index(rx_queue)); 545 efx_rx_queue_index(rx_queue));
544} 546}
545 547
546int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) 548int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
547{ 549{
548 struct efx_nic *efx = rx_queue->efx; 550 struct efx_nic *efx = rx_queue->efx;
549 BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 || 551 unsigned entries;
550 EFX_RXQ_SIZE & EFX_RXQ_MASK); 552
553 entries = rx_queue->ptr_mask + 1;
551 return efx_alloc_special_buffer(efx, &rx_queue->rxd, 554 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
552 EFX_RXQ_SIZE * sizeof(efx_qword_t)); 555 entries * sizeof(efx_qword_t));
553} 556}
554 557
555void efx_nic_init_rx(struct efx_rx_queue *rx_queue) 558void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
@@ -685,7 +688,7 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
685 tx_queue = efx_channel_get_tx_queue( 688 tx_queue = efx_channel_get_tx_queue(
686 channel, tx_ev_q_label % EFX_TXQ_TYPES); 689 channel, tx_ev_q_label % EFX_TXQ_TYPES);
687 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 690 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
688 EFX_TXQ_MASK); 691 tx_queue->ptr_mask);
689 channel->irq_mod_score += tx_packets; 692 channel->irq_mod_score += tx_packets;
690 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 693 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
691 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 694 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
@@ -796,8 +799,8 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
796 struct efx_nic *efx = rx_queue->efx; 799 struct efx_nic *efx = rx_queue->efx;
797 unsigned expected, dropped; 800 unsigned expected, dropped;
798 801
799 expected = rx_queue->removed_count & EFX_RXQ_MASK; 802 expected = rx_queue->removed_count & rx_queue->ptr_mask;
800 dropped = (index - expected) & EFX_RXQ_MASK; 803 dropped = (index - expected) & rx_queue->ptr_mask;
801 netif_info(efx, rx_err, efx->net_dev, 804 netif_info(efx, rx_err, efx->net_dev,
802 "dropped %d events (index=%d expected=%d)\n", 805 "dropped %d events (index=%d expected=%d)\n",
803 dropped, index, expected); 806 dropped, index, expected);
@@ -835,7 +838,7 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
835 rx_queue = efx_channel_get_rx_queue(channel); 838 rx_queue = efx_channel_get_rx_queue(channel);
836 839
837 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 840 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
838 expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK; 841 expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
839 if (unlikely(rx_ev_desc_ptr != expected_ptr)) 842 if (unlikely(rx_ev_desc_ptr != expected_ptr))
840 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 843 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
841 844
@@ -1002,6 +1005,7 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1002 1005
1003int efx_nic_process_eventq(struct efx_channel *channel, int budget) 1006int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1004{ 1007{
1008 struct efx_nic *efx = channel->efx;
1005 unsigned int read_ptr; 1009 unsigned int read_ptr;
1006 efx_qword_t event, *p_event; 1010 efx_qword_t event, *p_event;
1007 int ev_code; 1011 int ev_code;
@@ -1026,7 +1030,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1026 EFX_SET_QWORD(*p_event); 1030 EFX_SET_QWORD(*p_event);
1027 1031
1028 /* Increment read pointer */ 1032 /* Increment read pointer */
1029 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK; 1033 read_ptr = (read_ptr + 1) & channel->eventq_mask;
1030 1034
1031 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1035 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1032 1036
@@ -1038,7 +1042,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1038 break; 1042 break;
1039 case FSE_AZ_EV_CODE_TX_EV: 1043 case FSE_AZ_EV_CODE_TX_EV:
1040 tx_packets += efx_handle_tx_event(channel, &event); 1044 tx_packets += efx_handle_tx_event(channel, &event);
1041 if (tx_packets >= EFX_TXQ_SIZE) { 1045 if (tx_packets > efx->txq_entries) {
1042 spent = budget; 1046 spent = budget;
1043 goto out; 1047 goto out;
1044 } 1048 }
@@ -1073,10 +1077,11 @@ out:
1073int efx_nic_probe_eventq(struct efx_channel *channel) 1077int efx_nic_probe_eventq(struct efx_channel *channel)
1074{ 1078{
1075 struct efx_nic *efx = channel->efx; 1079 struct efx_nic *efx = channel->efx;
1076 BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 || 1080 unsigned entries;
1077 EFX_EVQ_SIZE & EFX_EVQ_MASK); 1081
1082 entries = channel->eventq_mask + 1;
1078 return efx_alloc_special_buffer(efx, &channel->eventq, 1083 return efx_alloc_special_buffer(efx, &channel->eventq,
1079 EFX_EVQ_SIZE * sizeof(efx_qword_t)); 1084 entries * sizeof(efx_qword_t));
1080} 1085}
1081 1086
1082void efx_nic_init_eventq(struct efx_channel *channel) 1087void efx_nic_init_eventq(struct efx_channel *channel)
@@ -1172,7 +1177,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1172 struct efx_tx_queue *tx_queue; 1177 struct efx_tx_queue *tx_queue;
1173 struct efx_rx_queue *rx_queue; 1178 struct efx_rx_queue *rx_queue;
1174 unsigned int read_ptr = channel->eventq_read_ptr; 1179 unsigned int read_ptr = channel->eventq_read_ptr;
1175 unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK; 1180 unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask;
1176 1181
1177 do { 1182 do {
1178 efx_qword_t *event = efx_event(channel, read_ptr); 1183 efx_qword_t *event = efx_event(channel, read_ptr);
@@ -1212,7 +1217,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1212 * it's ok to throw away every non-flush event */ 1217 * it's ok to throw away every non-flush event */
1213 EFX_SET_QWORD(*event); 1218 EFX_SET_QWORD(*event);
1214 1219
1215 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK; 1220 read_ptr = (read_ptr + 1) & channel->eventq_mask;
1216 } while (read_ptr != end_ptr); 1221 } while (read_ptr != end_ptr);
1217 1222
1218 channel->eventq_read_ptr = read_ptr; 1223 channel->eventq_read_ptr = read_ptr;
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 6651d9364e8f..6d0959b5158e 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -133,7 +133,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
133 unsigned index, count; 133 unsigned index, count;
134 134
135 for (count = 0; count < EFX_RX_BATCH; ++count) { 135 for (count = 0; count < EFX_RX_BATCH; ++count) {
136 index = rx_queue->added_count & EFX_RXQ_MASK; 136 index = rx_queue->added_count & rx_queue->ptr_mask;
137 rx_buf = efx_rx_buffer(rx_queue, index); 137 rx_buf = efx_rx_buffer(rx_queue, index);
138 138
139 rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); 139 rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
@@ -208,7 +208,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
208 dma_addr += sizeof(struct efx_rx_page_state); 208 dma_addr += sizeof(struct efx_rx_page_state);
209 209
210 split: 210 split:
211 index = rx_queue->added_count & EFX_RXQ_MASK; 211 index = rx_queue->added_count & rx_queue->ptr_mask;
212 rx_buf = efx_rx_buffer(rx_queue, index); 212 rx_buf = efx_rx_buffer(rx_queue, index);
213 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 213 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
214 rx_buf->skb = NULL; 214 rx_buf->skb = NULL;
@@ -285,7 +285,7 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
285 * we'd like to insert an additional descriptor whilst leaving 285 * we'd like to insert an additional descriptor whilst leaving
286 * EFX_RXD_HEAD_ROOM for the non-recycle path */ 286 * EFX_RXD_HEAD_ROOM for the non-recycle path */
287 fill_level = (rx_queue->added_count - rx_queue->removed_count + 2); 287 fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
288 if (unlikely(fill_level >= EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM)) { 288 if (unlikely(fill_level > rx_queue->max_fill)) {
289 /* We could place "state" on a list, and drain the list in 289 /* We could place "state" on a list, and drain the list in
290 * efx_fast_push_rx_descriptors(). For now, this will do. */ 290 * efx_fast_push_rx_descriptors(). For now, this will do. */
291 return; 291 return;
@@ -294,7 +294,7 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
294 ++state->refcnt; 294 ++state->refcnt;
295 get_page(rx_buf->page); 295 get_page(rx_buf->page);
296 296
297 index = rx_queue->added_count & EFX_RXQ_MASK; 297 index = rx_queue->added_count & rx_queue->ptr_mask;
298 new_buf = efx_rx_buffer(rx_queue, index); 298 new_buf = efx_rx_buffer(rx_queue, index);
299 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); 299 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
300 new_buf->skb = NULL; 300 new_buf->skb = NULL;
@@ -319,7 +319,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
319 page_count(rx_buf->page) == 1) 319 page_count(rx_buf->page) == 1)
320 efx_resurrect_rx_buffer(rx_queue, rx_buf); 320 efx_resurrect_rx_buffer(rx_queue, rx_buf);
321 321
322 index = rx_queue->added_count & EFX_RXQ_MASK; 322 index = rx_queue->added_count & rx_queue->ptr_mask;
323 new_buf = efx_rx_buffer(rx_queue, index); 323 new_buf = efx_rx_buffer(rx_queue, index);
324 324
325 memcpy(new_buf, rx_buf, sizeof(*new_buf)); 325 memcpy(new_buf, rx_buf, sizeof(*new_buf));
@@ -347,7 +347,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
347 347
348 /* Calculate current fill level, and exit if we don't need to fill */ 348 /* Calculate current fill level, and exit if we don't need to fill */
349 fill_level = (rx_queue->added_count - rx_queue->removed_count); 349 fill_level = (rx_queue->added_count - rx_queue->removed_count);
350 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); 350 EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
351 if (fill_level >= rx_queue->fast_fill_trigger) 351 if (fill_level >= rx_queue->fast_fill_trigger)
352 goto out; 352 goto out;
353 353
@@ -650,15 +650,22 @@ void efx_rx_strategy(struct efx_channel *channel)
650int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) 650int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
651{ 651{
652 struct efx_nic *efx = rx_queue->efx; 652 struct efx_nic *efx = rx_queue->efx;
653 unsigned int rxq_size; 653 unsigned int entries;
654 int rc; 654 int rc;
655 655
656 /* Create the smallest power-of-two aligned ring */
657 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
658 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
659 rx_queue->ptr_mask = entries - 1;
660
656 netif_dbg(efx, probe, efx->net_dev, 661 netif_dbg(efx, probe, efx->net_dev,
657 "creating RX queue %d\n", efx_rx_queue_index(rx_queue)); 662 "creating RX queue %d size %#x mask %#x\n",
663 efx_rx_queue_index(rx_queue), efx->rxq_entries,
664 rx_queue->ptr_mask);
658 665
659 /* Allocate RX buffers */ 666 /* Allocate RX buffers */
660 rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer); 667 rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer),
661 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL); 668 GFP_KERNEL);
662 if (!rx_queue->buffer) 669 if (!rx_queue->buffer)
663 return -ENOMEM; 670 return -ENOMEM;
664 671
@@ -672,6 +679,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
672 679
673void efx_init_rx_queue(struct efx_rx_queue *rx_queue) 680void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
674{ 681{
682 struct efx_nic *efx = rx_queue->efx;
675 unsigned int max_fill, trigger, limit; 683 unsigned int max_fill, trigger, limit;
676 684
677 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 685 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
@@ -682,10 +690,9 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
682 rx_queue->notified_count = 0; 690 rx_queue->notified_count = 0;
683 rx_queue->removed_count = 0; 691 rx_queue->removed_count = 0;
684 rx_queue->min_fill = -1U; 692 rx_queue->min_fill = -1U;
685 rx_queue->min_overfill = -1U;
686 693
687 /* Initialise limit fields */ 694 /* Initialise limit fields */
688 max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM; 695 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
689 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 696 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
690 limit = max_fill * min(rx_refill_limit, 100U) / 100U; 697 limit = max_fill * min(rx_refill_limit, 100U) / 100U;
691 698
@@ -710,7 +717,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
710 717
711 /* Release RX buffers NB start at index 0 not current HW ptr */ 718 /* Release RX buffers NB start at index 0 not current HW ptr */
712 if (rx_queue->buffer) { 719 if (rx_queue->buffer) {
713 for (i = 0; i <= EFX_RXQ_MASK; i++) { 720 for (i = 0; i <= rx_queue->ptr_mask; i++) {
714 rx_buf = efx_rx_buffer(rx_queue, i); 721 rx_buf = efx_rx_buffer(rx_queue, i);
715 efx_fini_rx_buffer(rx_queue, rx_buf); 722 efx_fini_rx_buffer(rx_queue, rx_buf);
716 } 723 }
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 11153d99bc2b..da4473b71058 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -506,7 +506,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
506 506
507 for (i = 0; i < 3; i++) { 507 for (i = 0; i < 3; i++) {
508 /* Determine how many packets to send */ 508 /* Determine how many packets to send */
509 state->packet_count = EFX_TXQ_SIZE / 3; 509 state->packet_count = efx->txq_entries / 3;
510 state->packet_count = min(1 << (i << 2), state->packet_count); 510 state->packet_count = min(1 << (i << 2), state->packet_count);
511 state->skbs = kzalloc(sizeof(state->skbs[0]) * 511 state->skbs = kzalloc(sizeof(state->skbs[0]) *
512 state->packet_count, GFP_KERNEL); 512 state->packet_count, GFP_KERNEL);
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 6a6acc47285c..11726989fe2d 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -28,7 +28,7 @@
28 * The tx_queue descriptor ring fill-level must fall below this value 28 * The tx_queue descriptor ring fill-level must fall below this value
29 * before we restart the netif queue 29 * before we restart the netif queue
30 */ 30 */
31#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u) 31#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
32 32
33/* We need to be able to nest calls to netif_tx_stop_queue(), partly 33/* We need to be able to nest calls to netif_tx_stop_queue(), partly
34 * because of the 2 hardware queues associated with each core queue, 34 * because of the 2 hardware queues associated with each core queue,
@@ -207,7 +207,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
207 } 207 }
208 208
209 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 209 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
210 q_space = EFX_TXQ_MASK - 1 - fill_level; 210 q_space = efx->txq_entries - 1 - fill_level;
211 211
212 /* Map for DMA. Use pci_map_single rather than pci_map_page 212 /* Map for DMA. Use pci_map_single rather than pci_map_page
213 * since this is more efficient on machines with sparse 213 * since this is more efficient on machines with sparse
@@ -244,14 +244,14 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
244 &tx_queue->read_count; 244 &tx_queue->read_count;
245 fill_level = (tx_queue->insert_count 245 fill_level = (tx_queue->insert_count
246 - tx_queue->old_read_count); 246 - tx_queue->old_read_count);
247 q_space = EFX_TXQ_MASK - 1 - fill_level; 247 q_space = efx->txq_entries - 1 - fill_level;
248 if (unlikely(q_space-- <= 0)) 248 if (unlikely(q_space-- <= 0))
249 goto stop; 249 goto stop;
250 smp_mb(); 250 smp_mb();
251 --tx_queue->stopped; 251 --tx_queue->stopped;
252 } 252 }
253 253
254 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; 254 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
255 buffer = &tx_queue->buffer[insert_ptr]; 255 buffer = &tx_queue->buffer[insert_ptr];
256 efx_tsoh_free(tx_queue, buffer); 256 efx_tsoh_free(tx_queue, buffer);
257 EFX_BUG_ON_PARANOID(buffer->tsoh); 257 EFX_BUG_ON_PARANOID(buffer->tsoh);
@@ -320,7 +320,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
320 /* Work backwards until we hit the original insert pointer value */ 320 /* Work backwards until we hit the original insert pointer value */
321 while (tx_queue->insert_count != tx_queue->write_count) { 321 while (tx_queue->insert_count != tx_queue->write_count) {
322 --tx_queue->insert_count; 322 --tx_queue->insert_count;
323 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; 323 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
324 buffer = &tx_queue->buffer[insert_ptr]; 324 buffer = &tx_queue->buffer[insert_ptr];
325 efx_dequeue_buffer(tx_queue, buffer); 325 efx_dequeue_buffer(tx_queue, buffer);
326 buffer->len = 0; 326 buffer->len = 0;
@@ -350,8 +350,8 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
350 struct efx_nic *efx = tx_queue->efx; 350 struct efx_nic *efx = tx_queue->efx;
351 unsigned int stop_index, read_ptr; 351 unsigned int stop_index, read_ptr;
352 352
353 stop_index = (index + 1) & EFX_TXQ_MASK; 353 stop_index = (index + 1) & tx_queue->ptr_mask;
354 read_ptr = tx_queue->read_count & EFX_TXQ_MASK; 354 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
355 355
356 while (read_ptr != stop_index) { 356 while (read_ptr != stop_index) {
357 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 357 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
@@ -368,7 +368,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
368 buffer->len = 0; 368 buffer->len = 0;
369 369
370 ++tx_queue->read_count; 370 ++tx_queue->read_count;
371 read_ptr = tx_queue->read_count & EFX_TXQ_MASK; 371 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
372 } 372 }
373} 373}
374 374
@@ -402,7 +402,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
402 unsigned fill_level; 402 unsigned fill_level;
403 struct efx_nic *efx = tx_queue->efx; 403 struct efx_nic *efx = tx_queue->efx;
404 404
405 EFX_BUG_ON_PARANOID(index > EFX_TXQ_MASK); 405 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
406 406
407 efx_dequeue_buffers(tx_queue, index); 407 efx_dequeue_buffers(tx_queue, index);
408 408
@@ -412,7 +412,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
412 smp_mb(); 412 smp_mb();
413 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) { 413 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
414 fill_level = tx_queue->insert_count - tx_queue->read_count; 414 fill_level = tx_queue->insert_count - tx_queue->read_count;
415 if (fill_level < EFX_TXQ_THRESHOLD) { 415 if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
416 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); 416 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
417 417
418 /* Do this under netif_tx_lock(), to avoid racing 418 /* Do this under netif_tx_lock(), to avoid racing
@@ -430,18 +430,24 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
430int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) 430int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
431{ 431{
432 struct efx_nic *efx = tx_queue->efx; 432 struct efx_nic *efx = tx_queue->efx;
433 unsigned int txq_size; 433 unsigned int entries;
434 int i, rc; 434 int i, rc;
435 435
436 netif_dbg(efx, probe, efx->net_dev, "creating TX queue %d\n", 436 /* Create the smallest power-of-two aligned ring */
437 tx_queue->queue); 437 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
438 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
439 tx_queue->ptr_mask = entries - 1;
440
441 netif_dbg(efx, probe, efx->net_dev,
442 "creating TX queue %d size %#x mask %#x\n",
443 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
438 444
439 /* Allocate software ring */ 445 /* Allocate software ring */
440 txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer); 446 tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer),
441 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); 447 GFP_KERNEL);
442 if (!tx_queue->buffer) 448 if (!tx_queue->buffer)
443 return -ENOMEM; 449 return -ENOMEM;
444 for (i = 0; i <= EFX_TXQ_MASK; ++i) 450 for (i = 0; i <= tx_queue->ptr_mask; ++i)
445 tx_queue->buffer[i].continuation = true; 451 tx_queue->buffer[i].continuation = true;
446 452
447 /* Allocate hardware ring */ 453 /* Allocate hardware ring */
@@ -481,7 +487,7 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
481 487
482 /* Free any buffers left in the ring */ 488 /* Free any buffers left in the ring */
483 while (tx_queue->read_count != tx_queue->write_count) { 489 while (tx_queue->read_count != tx_queue->write_count) {
484 buffer = &tx_queue->buffer[tx_queue->read_count & EFX_TXQ_MASK]; 490 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
485 efx_dequeue_buffer(tx_queue, buffer); 491 efx_dequeue_buffer(tx_queue, buffer);
486 buffer->continuation = true; 492 buffer->continuation = true;
487 buffer->len = 0; 493 buffer->len = 0;
@@ -741,7 +747,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
741 747
742 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 748 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
743 /* -1 as there is no way to represent all descriptors used */ 749 /* -1 as there is no way to represent all descriptors used */
744 q_space = EFX_TXQ_MASK - 1 - fill_level; 750 q_space = efx->txq_entries - 1 - fill_level;
745 751
746 while (1) { 752 while (1) {
747 if (unlikely(q_space-- <= 0)) { 753 if (unlikely(q_space-- <= 0)) {
@@ -757,7 +763,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
757 *(volatile unsigned *)&tx_queue->read_count; 763 *(volatile unsigned *)&tx_queue->read_count;
758 fill_level = (tx_queue->insert_count 764 fill_level = (tx_queue->insert_count
759 - tx_queue->old_read_count); 765 - tx_queue->old_read_count);
760 q_space = EFX_TXQ_MASK - 1 - fill_level; 766 q_space = efx->txq_entries - 1 - fill_level;
761 if (unlikely(q_space-- <= 0)) { 767 if (unlikely(q_space-- <= 0)) {
762 *final_buffer = NULL; 768 *final_buffer = NULL;
763 return 1; 769 return 1;
@@ -766,13 +772,13 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
766 --tx_queue->stopped; 772 --tx_queue->stopped;
767 } 773 }
768 774
769 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; 775 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
770 buffer = &tx_queue->buffer[insert_ptr]; 776 buffer = &tx_queue->buffer[insert_ptr];
771 ++tx_queue->insert_count; 777 ++tx_queue->insert_count;
772 778
773 EFX_BUG_ON_PARANOID(tx_queue->insert_count - 779 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
774 tx_queue->read_count > 780 tx_queue->read_count >=
775 EFX_TXQ_MASK); 781 efx->txq_entries);
776 782
777 efx_tsoh_free(tx_queue, buffer); 783 efx_tsoh_free(tx_queue, buffer);
778 EFX_BUG_ON_PARANOID(buffer->len); 784 EFX_BUG_ON_PARANOID(buffer->len);
@@ -813,7 +819,7 @@ static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
813{ 819{
814 struct efx_tx_buffer *buffer; 820 struct efx_tx_buffer *buffer;
815 821
816 buffer = &tx_queue->buffer[tx_queue->insert_count & EFX_TXQ_MASK]; 822 buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
817 efx_tsoh_free(tx_queue, buffer); 823 efx_tsoh_free(tx_queue, buffer);
818 EFX_BUG_ON_PARANOID(buffer->len); 824 EFX_BUG_ON_PARANOID(buffer->len);
819 EFX_BUG_ON_PARANOID(buffer->unmap_len); 825 EFX_BUG_ON_PARANOID(buffer->unmap_len);
@@ -838,7 +844,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
838 while (tx_queue->insert_count != tx_queue->write_count) { 844 while (tx_queue->insert_count != tx_queue->write_count) {
839 --tx_queue->insert_count; 845 --tx_queue->insert_count;
840 buffer = &tx_queue->buffer[tx_queue->insert_count & 846 buffer = &tx_queue->buffer[tx_queue->insert_count &
841 EFX_TXQ_MASK]; 847 tx_queue->ptr_mask];
842 efx_tsoh_free(tx_queue, buffer); 848 efx_tsoh_free(tx_queue, buffer);
843 EFX_BUG_ON_PARANOID(buffer->skb); 849 EFX_BUG_ON_PARANOID(buffer->skb);
844 if (buffer->unmap_len) { 850 if (buffer->unmap_len) {
@@ -1168,7 +1174,7 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1168 unsigned i; 1174 unsigned i;
1169 1175
1170 if (tx_queue->buffer) { 1176 if (tx_queue->buffer) {
1171 for (i = 0; i <= EFX_TXQ_MASK; ++i) 1177 for (i = 0; i <= tx_queue->ptr_mask; ++i)
1172 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); 1178 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1173 } 1179 }
1174 1180