aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2009-10-23 04:30:58 -0400
committerDavid S. Miller <davem@davemloft.net>2009-10-24 07:27:05 -0400
commit3ffeabdd2bc62e0ebcb1a51a5d959a86a7a915fc (patch)
treea3b17cc4b0f8300aca46d67a6f9a362f6b052975 /drivers/net/sfc
parent12d00cadcc45382fc127712aa35bd0c96cbf81d9 (diff)
sfc: Eliminate indirect lookups of queue size constants
Move size and mask definitions into efx.h; calculate page orders in falcon.c. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc')
-rw-r--r--drivers/net/sfc/efx.c14
-rw-r--r--drivers/net/sfc/efx.h6
-rw-r--r--drivers/net/sfc/falcon.c70
-rw-r--r--drivers/net/sfc/net_driver.h6
-rw-r--r--drivers/net/sfc/rx.c16
-rw-r--r--drivers/net/sfc/selftest.c2
-rw-r--r--drivers/net/sfc/tx.c46
7 files changed, 62 insertions, 98 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index cc4b2f99989d..8b67553046e8 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -290,7 +290,7 @@ void efx_process_channel_now(struct efx_channel *channel)
290 napi_disable(&channel->napi_str); 290 napi_disable(&channel->napi_str);
291 291
292 /* Poll the channel */ 292 /* Poll the channel */
293 efx_process_channel(channel, efx->type->evq_size); 293 efx_process_channel(channel, EFX_EVQ_SIZE);
294 294
295 /* Ack the eventq. This may cause an interrupt to be generated 295 /* Ack the eventq. This may cause an interrupt to be generated
296 * when they are reenabled */ 296 * when they are reenabled */
@@ -1981,17 +1981,9 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1981 1981
1982 efx->type = type; 1982 efx->type = type;
1983 1983
1984 /* Sanity-check NIC type */
1985 EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
1986 (efx->type->txd_ring_mask + 1));
1987 EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
1988 (efx->type->rxd_ring_mask + 1));
1989 EFX_BUG_ON_PARANOID(efx->type->evq_size &
1990 (efx->type->evq_size - 1));
1991 /* As close as we can get to guaranteeing that we don't overflow */ 1984 /* As close as we can get to guaranteeing that we don't overflow */
1992 EFX_BUG_ON_PARANOID(efx->type->evq_size < 1985 BUILD_BUG_ON(EFX_EVQ_SIZE < EFX_TXQ_SIZE + EFX_RXQ_SIZE);
1993 (efx->type->txd_ring_mask + 1 + 1986
1994 efx->type->rxd_ring_mask + 1));
1995 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); 1987 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
1996 1988
1997 /* Higher numbered interrupt modes are less capable! */ 1989 /* Higher numbered interrupt modes are less capable! */
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index aecaf62f4929..20c8d62fd6e7 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -25,16 +25,22 @@ extern netdev_tx_t efx_xmit(struct efx_nic *efx,
25 struct sk_buff *skb); 25 struct sk_buff *skb);
26extern void efx_stop_queue(struct efx_nic *efx); 26extern void efx_stop_queue(struct efx_nic *efx);
27extern void efx_wake_queue(struct efx_nic *efx); 27extern void efx_wake_queue(struct efx_nic *efx);
28#define EFX_TXQ_SIZE 1024
29#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)
28 30
29/* RX */ 31/* RX */
30extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 32extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
31extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 33extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
32 unsigned int len, bool checksummed, bool discard); 34 unsigned int len, bool checksummed, bool discard);
33extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay); 35extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
36#define EFX_RXQ_SIZE 1024
37#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1)
34 38
35/* Channels */ 39/* Channels */
36extern void efx_process_channel_now(struct efx_channel *channel); 40extern void efx_process_channel_now(struct efx_channel *channel);
37extern void efx_flush_queues(struct efx_nic *efx); 41extern void efx_flush_queues(struct efx_nic *efx);
42#define EFX_EVQ_SIZE 4096
43#define EFX_EVQ_MASK (EFX_EVQ_SIZE - 1)
38 44
39/* Ports */ 45/* Ports */
40extern void efx_stats_disable(struct efx_nic *efx); 46extern void efx_stats_disable(struct efx_nic *efx);
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 759f55ae4b83..3cb7e613ab30 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -108,21 +108,6 @@ static int rx_xon_thresh_bytes = -1;
108module_param(rx_xon_thresh_bytes, int, 0644); 108module_param(rx_xon_thresh_bytes, int, 0644);
109MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); 109MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
110 110
111/* TX descriptor ring size - min 512 max 4k */
112#define FALCON_TXD_RING_ORDER FFE_AZ_TX_DESCQ_SIZE_1K
113#define FALCON_TXD_RING_SIZE 1024
114#define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1)
115
116/* RX descriptor ring size - min 512 max 4k */
117#define FALCON_RXD_RING_ORDER FFE_AZ_RX_DESCQ_SIZE_1K
118#define FALCON_RXD_RING_SIZE 1024
119#define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1)
120
121/* Event queue size - max 32k */
122#define FALCON_EVQ_ORDER FFE_AZ_EVQ_SIZE_4K
123#define FALCON_EVQ_SIZE 4096
124#define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1)
125
126/* If FALCON_MAX_INT_ERRORS internal errors occur within 111/* If FALCON_MAX_INT_ERRORS internal errors occur within
127 * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 112 * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
128 * disable it. 113 * disable it.
@@ -420,7 +405,7 @@ static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
420 unsigned write_ptr; 405 unsigned write_ptr;
421 efx_dword_t reg; 406 efx_dword_t reg;
422 407
423 write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK; 408 write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
424 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 409 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
425 efx_writed_page(tx_queue->efx, &reg, 410 efx_writed_page(tx_queue->efx, &reg,
426 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 411 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
@@ -441,7 +426,7 @@ void falcon_push_buffers(struct efx_tx_queue *tx_queue)
441 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 426 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
442 427
443 do { 428 do {
444 write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK; 429 write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
445 buffer = &tx_queue->buffer[write_ptr]; 430 buffer = &tx_queue->buffer[write_ptr];
446 txd = falcon_tx_desc(tx_queue, write_ptr); 431 txd = falcon_tx_desc(tx_queue, write_ptr);
447 ++tx_queue->write_count; 432 ++tx_queue->write_count;
@@ -462,9 +447,10 @@ void falcon_push_buffers(struct efx_tx_queue *tx_queue)
462int falcon_probe_tx(struct efx_tx_queue *tx_queue) 447int falcon_probe_tx(struct efx_tx_queue *tx_queue)
463{ 448{
464 struct efx_nic *efx = tx_queue->efx; 449 struct efx_nic *efx = tx_queue->efx;
450 BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 ||
451 EFX_TXQ_SIZE & EFX_TXQ_MASK);
465 return falcon_alloc_special_buffer(efx, &tx_queue->txd, 452 return falcon_alloc_special_buffer(efx, &tx_queue->txd,
466 FALCON_TXD_RING_SIZE * 453 EFX_TXQ_SIZE * sizeof(efx_qword_t));
467 sizeof(efx_qword_t));
468} 454}
469 455
470void falcon_init_tx(struct efx_tx_queue *tx_queue) 456void falcon_init_tx(struct efx_tx_queue *tx_queue)
@@ -487,7 +473,8 @@ void falcon_init_tx(struct efx_tx_queue *tx_queue)
487 tx_queue->channel->channel, 473 tx_queue->channel->channel,
488 FRF_AZ_TX_DESCQ_OWNER_ID, 0, 474 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
489 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, 475 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
490 FRF_AZ_TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER, 476 FRF_AZ_TX_DESCQ_SIZE,
477 __ffs(tx_queue->txd.entries),
491 FRF_AZ_TX_DESCQ_TYPE, 0, 478 FRF_AZ_TX_DESCQ_TYPE, 0,
492 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 479 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
493 480
@@ -592,12 +579,12 @@ void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
592 while (rx_queue->notified_count != rx_queue->added_count) { 579 while (rx_queue->notified_count != rx_queue->added_count) {
593 falcon_build_rx_desc(rx_queue, 580 falcon_build_rx_desc(rx_queue,
594 rx_queue->notified_count & 581 rx_queue->notified_count &
595 FALCON_RXD_RING_MASK); 582 EFX_RXQ_MASK);
596 ++rx_queue->notified_count; 583 ++rx_queue->notified_count;
597 } 584 }
598 585
599 wmb(); 586 wmb();
600 write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK; 587 write_ptr = rx_queue->added_count & EFX_RXQ_MASK;
601 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 588 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
602 efx_writed_page(rx_queue->efx, &reg, 589 efx_writed_page(rx_queue->efx, &reg,
603 FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue); 590 FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue);
@@ -606,9 +593,10 @@ void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
606int falcon_probe_rx(struct efx_rx_queue *rx_queue) 593int falcon_probe_rx(struct efx_rx_queue *rx_queue)
607{ 594{
608 struct efx_nic *efx = rx_queue->efx; 595 struct efx_nic *efx = rx_queue->efx;
596 BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 ||
597 EFX_RXQ_SIZE & EFX_RXQ_MASK);
609 return falcon_alloc_special_buffer(efx, &rx_queue->rxd, 598 return falcon_alloc_special_buffer(efx, &rx_queue->rxd,
610 FALCON_RXD_RING_SIZE * 599 EFX_RXQ_SIZE * sizeof(efx_qword_t));
611 sizeof(efx_qword_t));
612} 600}
613 601
614void falcon_init_rx(struct efx_rx_queue *rx_queue) 602void falcon_init_rx(struct efx_rx_queue *rx_queue)
@@ -636,7 +624,8 @@ void falcon_init_rx(struct efx_rx_queue *rx_queue)
636 rx_queue->channel->channel, 624 rx_queue->channel->channel,
637 FRF_AZ_RX_DESCQ_OWNER_ID, 0, 625 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
638 FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue, 626 FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue,
639 FRF_AZ_RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER, 627 FRF_AZ_RX_DESCQ_SIZE,
628 __ffs(rx_queue->rxd.entries),
640 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 629 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
641 /* For >=B0 this is scatter so disable */ 630 /* For >=B0 this is scatter so disable */
642 FRF_AZ_RX_DESCQ_JUMBO, !is_b0, 631 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
@@ -741,7 +730,7 @@ static void falcon_handle_tx_event(struct efx_channel *channel,
741 tx_queue = &efx->tx_queue[tx_ev_q_label]; 730 tx_queue = &efx->tx_queue[tx_ev_q_label];
742 channel->irq_mod_score += 731 channel->irq_mod_score +=
743 (tx_ev_desc_ptr - tx_queue->read_count) & 732 (tx_ev_desc_ptr - tx_queue->read_count) &
744 efx->type->txd_ring_mask; 733 EFX_TXQ_MASK;
745 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 734 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
746 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 735 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
747 /* Rewrite the FIFO write pointer */ 736 /* Rewrite the FIFO write pointer */
@@ -848,9 +837,8 @@ static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
848 struct efx_nic *efx = rx_queue->efx; 837 struct efx_nic *efx = rx_queue->efx;
849 unsigned expected, dropped; 838 unsigned expected, dropped;
850 839
851 expected = rx_queue->removed_count & FALCON_RXD_RING_MASK; 840 expected = rx_queue->removed_count & EFX_RXQ_MASK;
852 dropped = ((index + FALCON_RXD_RING_SIZE - expected) & 841 dropped = (index - expected) & EFX_RXQ_MASK;
853 FALCON_RXD_RING_MASK);
854 EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n", 842 EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
855 dropped, index, expected); 843 dropped, index, expected);
856 844
@@ -887,7 +875,7 @@ static void falcon_handle_rx_event(struct efx_channel *channel,
887 rx_queue = &efx->rx_queue[channel->channel]; 875 rx_queue = &efx->rx_queue[channel->channel];
888 876
889 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 877 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
890 expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK; 878 expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK;
891 if (unlikely(rx_ev_desc_ptr != expected_ptr)) 879 if (unlikely(rx_ev_desc_ptr != expected_ptr))
892 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 880 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
893 881
@@ -1075,7 +1063,7 @@ int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
1075 } 1063 }
1076 1064
1077 /* Increment read pointer */ 1065 /* Increment read pointer */
1078 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; 1066 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1079 1067
1080 } while (rx_packets < rx_quota); 1068 } while (rx_packets < rx_quota);
1081 1069
@@ -1120,10 +1108,10 @@ void falcon_set_int_moderation(struct efx_channel *channel)
1120int falcon_probe_eventq(struct efx_channel *channel) 1108int falcon_probe_eventq(struct efx_channel *channel)
1121{ 1109{
1122 struct efx_nic *efx = channel->efx; 1110 struct efx_nic *efx = channel->efx;
1123 unsigned int evq_size; 1111 BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 ||
1124 1112 EFX_EVQ_SIZE & EFX_EVQ_MASK);
1125 evq_size = FALCON_EVQ_SIZE * sizeof(efx_qword_t); 1113 return falcon_alloc_special_buffer(efx, &channel->eventq,
1126 return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size); 1114 EFX_EVQ_SIZE * sizeof(efx_qword_t));
1127} 1115}
1128 1116
1129void falcon_init_eventq(struct efx_channel *channel) 1117void falcon_init_eventq(struct efx_channel *channel)
@@ -1144,7 +1132,7 @@ void falcon_init_eventq(struct efx_channel *channel)
1144 /* Push event queue to card */ 1132 /* Push event queue to card */
1145 EFX_POPULATE_OWORD_3(evq_ptr, 1133 EFX_POPULATE_OWORD_3(evq_ptr,
1146 FRF_AZ_EVQ_EN, 1, 1134 FRF_AZ_EVQ_EN, 1,
1147 FRF_AZ_EVQ_SIZE, FALCON_EVQ_ORDER, 1135 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1148 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); 1136 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1149 efx_writeo_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base, 1137 efx_writeo_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
1150 channel->channel); 1138 channel->channel);
@@ -1214,7 +1202,7 @@ static void falcon_poll_flush_events(struct efx_nic *efx)
1214 struct efx_tx_queue *tx_queue; 1202 struct efx_tx_queue *tx_queue;
1215 struct efx_rx_queue *rx_queue; 1203 struct efx_rx_queue *rx_queue;
1216 unsigned int read_ptr = channel->eventq_read_ptr; 1204 unsigned int read_ptr = channel->eventq_read_ptr;
1217 unsigned int end_ptr = (read_ptr - 1) & FALCON_EVQ_MASK; 1205 unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK;
1218 1206
1219 do { 1207 do {
1220 efx_qword_t *event = falcon_event(channel, read_ptr); 1208 efx_qword_t *event = falcon_event(channel, read_ptr);
@@ -1252,7 +1240,7 @@ static void falcon_poll_flush_events(struct efx_nic *efx)
1252 } 1240 }
1253 } 1241 }
1254 1242
1255 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; 1243 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1256 } while (read_ptr != end_ptr); 1244 } while (read_ptr != end_ptr);
1257} 1245}
1258 1246
@@ -3160,9 +3148,6 @@ struct efx_nic_type falcon_a_nic_type = {
3160 .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER, 3148 .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
3161 .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER, 3149 .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
3162 .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER, 3150 .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
3163 .txd_ring_mask = FALCON_TXD_RING_MASK,
3164 .rxd_ring_mask = FALCON_RXD_RING_MASK,
3165 .evq_size = FALCON_EVQ_SIZE,
3166 .max_dma_mask = FALCON_DMA_MASK, 3151 .max_dma_mask = FALCON_DMA_MASK,
3167 .tx_dma_mask = FALCON_TX_DMA_MASK, 3152 .tx_dma_mask = FALCON_TX_DMA_MASK,
3168 .bug5391_mask = 0xf, 3153 .bug5391_mask = 0xf,
@@ -3184,9 +3169,6 @@ struct efx_nic_type falcon_b_nic_type = {
3184 .buf_tbl_base = FR_BZ_BUF_FULL_TBL, 3169 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
3185 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, 3170 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
3186 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, 3171 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
3187 .txd_ring_mask = FALCON_TXD_RING_MASK,
3188 .rxd_ring_mask = FALCON_RXD_RING_MASK,
3189 .evq_size = FALCON_EVQ_SIZE,
3190 .max_dma_mask = FALCON_DMA_MASK, 3172 .max_dma_mask = FALCON_DMA_MASK,
3191 .tx_dma_mask = FALCON_TX_DMA_MASK, 3173 .tx_dma_mask = FALCON_TX_DMA_MASK,
3192 .bug5391_mask = 0, 3174 .bug5391_mask = 0,
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 479a6fe38318..3afadc621a81 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -869,9 +869,6 @@ static inline const char *efx_dev_name(struct efx_nic *efx)
869 * @buf_tbl_base: Buffer table base address 869 * @buf_tbl_base: Buffer table base address
870 * @evq_ptr_tbl_base: Event queue pointer table base address 870 * @evq_ptr_tbl_base: Event queue pointer table base address
871 * @evq_rptr_tbl_base: Event queue read-pointer table base address 871 * @evq_rptr_tbl_base: Event queue read-pointer table base address
872 * @txd_ring_mask: TX descriptor ring size - 1 (must be a power of two - 1)
873 * @rxd_ring_mask: RX descriptor ring size - 1 (must be a power of two - 1)
874 * @evq_size: Event queue size (must be a power of two)
875 * @max_dma_mask: Maximum possible DMA mask 872 * @max_dma_mask: Maximum possible DMA mask
876 * @tx_dma_mask: TX DMA mask 873 * @tx_dma_mask: TX DMA mask
877 * @bug5391_mask: Address mask for bug 5391 workaround 874 * @bug5391_mask: Address mask for bug 5391 workaround
@@ -890,9 +887,6 @@ struct efx_nic_type {
890 unsigned int evq_ptr_tbl_base; 887 unsigned int evq_ptr_tbl_base;
891 unsigned int evq_rptr_tbl_base; 888 unsigned int evq_rptr_tbl_base;
892 889
893 unsigned int txd_ring_mask;
894 unsigned int rxd_ring_mask;
895 unsigned int evq_size;
896 u64 max_dma_mask; 890 u64 max_dma_mask;
897 unsigned int tx_dma_mask; 891 unsigned int tx_dma_mask;
898 unsigned bug5391_mask; 892 unsigned bug5391_mask;
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 01f9432c31ef..ea59ed25b0d8 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -293,8 +293,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
293 * fill anyway. 293 * fill anyway.
294 */ 294 */
295 fill_level = (rx_queue->added_count - rx_queue->removed_count); 295 fill_level = (rx_queue->added_count - rx_queue->removed_count);
296 EFX_BUG_ON_PARANOID(fill_level > 296 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
297 rx_queue->efx->type->rxd_ring_mask + 1);
298 297
299 /* Don't fill if we don't need to */ 298 /* Don't fill if we don't need to */
300 if (fill_level >= rx_queue->fast_fill_trigger) 299 if (fill_level >= rx_queue->fast_fill_trigger)
@@ -316,8 +315,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
316 retry: 315 retry:
317 /* Recalculate current fill level now that we have the lock */ 316 /* Recalculate current fill level now that we have the lock */
318 fill_level = (rx_queue->added_count - rx_queue->removed_count); 317 fill_level = (rx_queue->added_count - rx_queue->removed_count);
319 EFX_BUG_ON_PARANOID(fill_level > 318 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
320 rx_queue->efx->type->rxd_ring_mask + 1);
321 space = rx_queue->fast_fill_limit - fill_level; 319 space = rx_queue->fast_fill_limit - fill_level;
322 if (space < EFX_RX_BATCH) 320 if (space < EFX_RX_BATCH)
323 goto out_unlock; 321 goto out_unlock;
@@ -329,8 +327,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
329 327
330 do { 328 do {
331 for (i = 0; i < EFX_RX_BATCH; ++i) { 329 for (i = 0; i < EFX_RX_BATCH; ++i) {
332 index = (rx_queue->added_count & 330 index = rx_queue->added_count & EFX_RXQ_MASK;
333 rx_queue->efx->type->rxd_ring_mask);
334 rx_buf = efx_rx_buffer(rx_queue, index); 331 rx_buf = efx_rx_buffer(rx_queue, index);
335 rc = efx_init_rx_buffer(rx_queue, rx_buf); 332 rc = efx_init_rx_buffer(rx_queue, rx_buf);
336 if (unlikely(rc)) 333 if (unlikely(rc))
@@ -629,7 +626,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
629 EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue); 626 EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);
630 627
631 /* Allocate RX buffers */ 628 /* Allocate RX buffers */
632 rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer); 629 rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer);
633 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL); 630 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
634 if (!rx_queue->buffer) 631 if (!rx_queue->buffer)
635 return -ENOMEM; 632 return -ENOMEM;
@@ -644,7 +641,6 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
644 641
645void efx_init_rx_queue(struct efx_rx_queue *rx_queue) 642void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
646{ 643{
647 struct efx_nic *efx = rx_queue->efx;
648 unsigned int max_fill, trigger, limit; 644 unsigned int max_fill, trigger, limit;
649 645
650 EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue); 646 EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
@@ -657,7 +653,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
657 rx_queue->min_overfill = -1U; 653 rx_queue->min_overfill = -1U;
658 654
659 /* Initialise limit fields */ 655 /* Initialise limit fields */
660 max_fill = efx->type->rxd_ring_mask + 1 - EFX_RXD_HEAD_ROOM; 656 max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM;
661 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 657 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
662 limit = max_fill * min(rx_refill_limit, 100U) / 100U; 658 limit = max_fill * min(rx_refill_limit, 100U) / 100U;
663 659
@@ -680,7 +676,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
680 676
681 /* Release RX buffers NB start at index 0 not current HW ptr */ 677 /* Release RX buffers NB start at index 0 not current HW ptr */
682 if (rx_queue->buffer) { 678 if (rx_queue->buffer) {
683 for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) { 679 for (i = 0; i <= EFX_RXQ_MASK; i++) {
684 rx_buf = efx_rx_buffer(rx_queue, i); 680 rx_buf = efx_rx_buffer(rx_queue, i);
685 efx_fini_rx_buffer(rx_queue, rx_buf); 681 efx_fini_rx_buffer(rx_queue, rx_buf);
686 } 682 }
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index e5c4c9cd4520..7a9386f97c42 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -526,7 +526,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
526 526
527 for (i = 0; i < 3; i++) { 527 for (i = 0; i < 3; i++) {
528 /* Determine how many packets to send */ 528 /* Determine how many packets to send */
529 state->packet_count = (efx->type->txd_ring_mask + 1) / 3; 529 state->packet_count = EFX_TXQ_SIZE / 3;
530 state->packet_count = min(1 << (i << 2), state->packet_count); 530 state->packet_count = min(1 << (i << 2), state->packet_count);
531 state->skbs = kzalloc(sizeof(state->skbs[0]) * 531 state->skbs = kzalloc(sizeof(state->skbs[0]) *
532 state->packet_count, GFP_KERNEL); 532 state->packet_count, GFP_KERNEL);
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 489c4de31447..ae554eec0563 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -26,8 +26,7 @@
26 * The tx_queue descriptor ring fill-level must fall below this value 26 * The tx_queue descriptor ring fill-level must fall below this value
27 * before we restart the netif queue 27 * before we restart the netif queue
28 */ 28 */
29#define EFX_NETDEV_TX_THRESHOLD(_tx_queue) \ 29#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
30 (_tx_queue->efx->type->txd_ring_mask / 2u)
31 30
32/* We want to be able to nest calls to netif_stop_queue(), since each 31/* We want to be able to nest calls to netif_stop_queue(), since each
33 * channel can have an individual stop on the queue. 32 * channel can have an individual stop on the queue.
@@ -171,7 +170,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
171 } 170 }
172 171
173 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 172 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
174 q_space = efx->type->txd_ring_mask - 1 - fill_level; 173 q_space = EFX_TXQ_MASK - 1 - fill_level;
175 174
176 /* Map for DMA. Use pci_map_single rather than pci_map_page 175 /* Map for DMA. Use pci_map_single rather than pci_map_page
177 * since this is more efficient on machines with sparse 176 * since this is more efficient on machines with sparse
@@ -208,16 +207,14 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
208 &tx_queue->read_count; 207 &tx_queue->read_count;
209 fill_level = (tx_queue->insert_count 208 fill_level = (tx_queue->insert_count
210 - tx_queue->old_read_count); 209 - tx_queue->old_read_count);
211 q_space = (efx->type->txd_ring_mask - 1 - 210 q_space = EFX_TXQ_MASK - 1 - fill_level;
212 fill_level);
213 if (unlikely(q_space-- <= 0)) 211 if (unlikely(q_space-- <= 0))
214 goto stop; 212 goto stop;
215 smp_mb(); 213 smp_mb();
216 --tx_queue->stopped; 214 --tx_queue->stopped;
217 } 215 }
218 216
219 insert_ptr = (tx_queue->insert_count & 217 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
220 efx->type->txd_ring_mask);
221 buffer = &tx_queue->buffer[insert_ptr]; 218 buffer = &tx_queue->buffer[insert_ptr];
222 efx_tsoh_free(tx_queue, buffer); 219 efx_tsoh_free(tx_queue, buffer);
223 EFX_BUG_ON_PARANOID(buffer->tsoh); 220 EFX_BUG_ON_PARANOID(buffer->tsoh);
@@ -289,7 +286,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
289 /* Work backwards until we hit the original insert pointer value */ 286 /* Work backwards until we hit the original insert pointer value */
290 while (tx_queue->insert_count != tx_queue->write_count) { 287 while (tx_queue->insert_count != tx_queue->write_count) {
291 --tx_queue->insert_count; 288 --tx_queue->insert_count;
292 insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; 289 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
293 buffer = &tx_queue->buffer[insert_ptr]; 290 buffer = &tx_queue->buffer[insert_ptr];
294 efx_dequeue_buffer(tx_queue, buffer); 291 efx_dequeue_buffer(tx_queue, buffer);
295 buffer->len = 0; 292 buffer->len = 0;
@@ -318,10 +315,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
318{ 315{
319 struct efx_nic *efx = tx_queue->efx; 316 struct efx_nic *efx = tx_queue->efx;
320 unsigned int stop_index, read_ptr; 317 unsigned int stop_index, read_ptr;
321 unsigned int mask = tx_queue->efx->type->txd_ring_mask;
322 318
323 stop_index = (index + 1) & mask; 319 stop_index = (index + 1) & EFX_TXQ_MASK;
324 read_ptr = tx_queue->read_count & mask; 320 read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
325 321
326 while (read_ptr != stop_index) { 322 while (read_ptr != stop_index) {
327 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 323 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
@@ -338,7 +334,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
338 buffer->len = 0; 334 buffer->len = 0;
339 335
340 ++tx_queue->read_count; 336 ++tx_queue->read_count;
341 read_ptr = tx_queue->read_count & mask; 337 read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
342 } 338 }
343} 339}
344 340
@@ -391,7 +387,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
391 unsigned fill_level; 387 unsigned fill_level;
392 struct efx_nic *efx = tx_queue->efx; 388 struct efx_nic *efx = tx_queue->efx;
393 389
394 EFX_BUG_ON_PARANOID(index > efx->type->txd_ring_mask); 390 EFX_BUG_ON_PARANOID(index > EFX_TXQ_MASK);
395 391
396 efx_dequeue_buffers(tx_queue, index); 392 efx_dequeue_buffers(tx_queue, index);
397 393
@@ -401,7 +397,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
401 smp_mb(); 397 smp_mb();
402 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) { 398 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
403 fill_level = tx_queue->insert_count - tx_queue->read_count; 399 fill_level = tx_queue->insert_count - tx_queue->read_count;
404 if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { 400 if (fill_level < EFX_TXQ_THRESHOLD) {
405 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); 401 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
406 402
407 /* Do this under netif_tx_lock(), to avoid racing 403 /* Do this under netif_tx_lock(), to avoid racing
@@ -425,11 +421,11 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
425 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue); 421 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue);
426 422
427 /* Allocate software ring */ 423 /* Allocate software ring */
428 txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer); 424 txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer);
429 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); 425 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
430 if (!tx_queue->buffer) 426 if (!tx_queue->buffer)
431 return -ENOMEM; 427 return -ENOMEM;
432 for (i = 0; i <= efx->type->txd_ring_mask; ++i) 428 for (i = 0; i <= EFX_TXQ_MASK; ++i)
433 tx_queue->buffer[i].continuation = true; 429 tx_queue->buffer[i].continuation = true;
434 430
435 /* Allocate hardware ring */ 431 /* Allocate hardware ring */
@@ -468,8 +464,7 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
468 464
469 /* Free any buffers left in the ring */ 465 /* Free any buffers left in the ring */
470 while (tx_queue->read_count != tx_queue->write_count) { 466 while (tx_queue->read_count != tx_queue->write_count) {
471 buffer = &tx_queue->buffer[tx_queue->read_count & 467 buffer = &tx_queue->buffer[tx_queue->read_count & EFX_TXQ_MASK];
472 tx_queue->efx->type->txd_ring_mask];
473 efx_dequeue_buffer(tx_queue, buffer); 468 efx_dequeue_buffer(tx_queue, buffer);
474 buffer->continuation = true; 469 buffer->continuation = true;
475 buffer->len = 0; 470 buffer->len = 0;
@@ -715,7 +710,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
715 710
716 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 711 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
717 /* -1 as there is no way to represent all descriptors used */ 712 /* -1 as there is no way to represent all descriptors used */
718 q_space = efx->type->txd_ring_mask - 1 - fill_level; 713 q_space = EFX_TXQ_MASK - 1 - fill_level;
719 714
720 while (1) { 715 while (1) {
721 if (unlikely(q_space-- <= 0)) { 716 if (unlikely(q_space-- <= 0)) {
@@ -731,7 +726,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
731 *(volatile unsigned *)&tx_queue->read_count; 726 *(volatile unsigned *)&tx_queue->read_count;
732 fill_level = (tx_queue->insert_count 727 fill_level = (tx_queue->insert_count
733 - tx_queue->old_read_count); 728 - tx_queue->old_read_count);
734 q_space = efx->type->txd_ring_mask - 1 - fill_level; 729 q_space = EFX_TXQ_MASK - 1 - fill_level;
735 if (unlikely(q_space-- <= 0)) { 730 if (unlikely(q_space-- <= 0)) {
736 *final_buffer = NULL; 731 *final_buffer = NULL;
737 return 1; 732 return 1;
@@ -740,13 +735,13 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
740 --tx_queue->stopped; 735 --tx_queue->stopped;
741 } 736 }
742 737
743 insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; 738 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
744 buffer = &tx_queue->buffer[insert_ptr]; 739 buffer = &tx_queue->buffer[insert_ptr];
745 ++tx_queue->insert_count; 740 ++tx_queue->insert_count;
746 741
747 EFX_BUG_ON_PARANOID(tx_queue->insert_count - 742 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
748 tx_queue->read_count > 743 tx_queue->read_count >
749 efx->type->txd_ring_mask); 744 EFX_TXQ_MASK);
750 745
751 efx_tsoh_free(tx_queue, buffer); 746 efx_tsoh_free(tx_queue, buffer);
752 EFX_BUG_ON_PARANOID(buffer->len); 747 EFX_BUG_ON_PARANOID(buffer->len);
@@ -792,8 +787,7 @@ static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
792{ 787{
793 struct efx_tx_buffer *buffer; 788 struct efx_tx_buffer *buffer;
794 789
795 buffer = &tx_queue->buffer[tx_queue->insert_count & 790 buffer = &tx_queue->buffer[tx_queue->insert_count & EFX_TXQ_MASK];
796 tx_queue->efx->type->txd_ring_mask];
797 efx_tsoh_free(tx_queue, buffer); 791 efx_tsoh_free(tx_queue, buffer);
798 EFX_BUG_ON_PARANOID(buffer->len); 792 EFX_BUG_ON_PARANOID(buffer->len);
799 EFX_BUG_ON_PARANOID(buffer->unmap_len); 793 EFX_BUG_ON_PARANOID(buffer->unmap_len);
@@ -818,7 +812,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
818 while (tx_queue->insert_count != tx_queue->write_count) { 812 while (tx_queue->insert_count != tx_queue->write_count) {
819 --tx_queue->insert_count; 813 --tx_queue->insert_count;
820 buffer = &tx_queue->buffer[tx_queue->insert_count & 814 buffer = &tx_queue->buffer[tx_queue->insert_count &
821 tx_queue->efx->type->txd_ring_mask]; 815 EFX_TXQ_MASK];
822 efx_tsoh_free(tx_queue, buffer); 816 efx_tsoh_free(tx_queue, buffer);
823 EFX_BUG_ON_PARANOID(buffer->skb); 817 EFX_BUG_ON_PARANOID(buffer->skb);
824 buffer->len = 0; 818 buffer->len = 0;
@@ -1135,7 +1129,7 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1135 unsigned i; 1129 unsigned i;
1136 1130
1137 if (tx_queue->buffer) { 1131 if (tx_queue->buffer) {
1138 for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) 1132 for (i = 0; i <= EFX_TXQ_MASK; ++i)
1139 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); 1133 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1140 } 1134 }
1141 1135