diff options
author | Ben Hutchings <bhutchings@solarflare.com> | 2009-11-25 11:09:55 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-11-26 18:59:19 -0500 |
commit | 127e6e10ad17585c48cba8e1dcf30d98b90ee583 (patch) | |
tree | 7c66f03e88b0f1aef5bb6d4f42724bd1fe7b5538 /drivers/net/sfc | |
parent | 44838a447de3b1541cbf845853c4f8999310b0dd (diff) |
sfc: Fix bugs in RX queue flushing
Avoid overrunning the hardware limit of 4 concurrent RX queue flushes.
Expand the queue flush state to support this. Make similar changes to
TX flushing to keep the code symmetric.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc')
-rw-r--r-- | drivers/net/sfc/falcon.c | 103 | ||||
-rw-r--r-- | drivers/net/sfc/net_driver.h | 11 |
2 files changed, 73 insertions, 41 deletions
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 2f1f1fca0802..e1b9ce30429a 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -109,6 +109,9 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); | |||
109 | /* Size and alignment of special buffers (4KB) */ | 109 | /* Size and alignment of special buffers (4KB) */ |
110 | #define FALCON_BUF_SIZE 4096 | 110 | #define FALCON_BUF_SIZE 4096 |
111 | 111 | ||
112 | /* Depth of RX flush request fifo */ | ||
113 | #define FALCON_RX_FLUSH_COUNT 4 | ||
114 | |||
112 | #define FALCON_IS_DUAL_FUNC(efx) \ | 115 | #define FALCON_IS_DUAL_FUNC(efx) \ |
113 | (falcon_rev(efx) < FALCON_REV_B0) | 116 | (falcon_rev(efx) < FALCON_REV_B0) |
114 | 117 | ||
@@ -426,7 +429,7 @@ void falcon_init_tx(struct efx_tx_queue *tx_queue) | |||
426 | efx_oword_t tx_desc_ptr; | 429 | efx_oword_t tx_desc_ptr; |
427 | struct efx_nic *efx = tx_queue->efx; | 430 | struct efx_nic *efx = tx_queue->efx; |
428 | 431 | ||
429 | tx_queue->flushed = false; | 432 | tx_queue->flushed = FLUSH_NONE; |
430 | 433 | ||
431 | /* Pin TX descriptor ring */ | 434 | /* Pin TX descriptor ring */ |
432 | falcon_init_special_buffer(efx, &tx_queue->txd); | 435 | falcon_init_special_buffer(efx, &tx_queue->txd); |
@@ -476,6 +479,8 @@ static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue) | |||
476 | struct efx_nic *efx = tx_queue->efx; | 479 | struct efx_nic *efx = tx_queue->efx; |
477 | efx_oword_t tx_flush_descq; | 480 | efx_oword_t tx_flush_descq; |
478 | 481 | ||
482 | tx_queue->flushed = FLUSH_PENDING; | ||
483 | |||
479 | /* Post a flush command */ | 484 | /* Post a flush command */ |
480 | EFX_POPULATE_OWORD_2(tx_flush_descq, | 485 | EFX_POPULATE_OWORD_2(tx_flush_descq, |
481 | FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, | 486 | FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, |
@@ -489,7 +494,7 @@ void falcon_fini_tx(struct efx_tx_queue *tx_queue) | |||
489 | efx_oword_t tx_desc_ptr; | 494 | efx_oword_t tx_desc_ptr; |
490 | 495 | ||
491 | /* The queue should have been flushed */ | 496 | /* The queue should have been flushed */ |
492 | WARN_ON(!tx_queue->flushed); | 497 | WARN_ON(tx_queue->flushed != FLUSH_DONE); |
493 | 498 | ||
494 | /* Remove TX descriptor ring from card */ | 499 | /* Remove TX descriptor ring from card */ |
495 | EFX_ZERO_OWORD(tx_desc_ptr); | 500 | EFX_ZERO_OWORD(tx_desc_ptr); |
@@ -578,7 +583,7 @@ void falcon_init_rx(struct efx_rx_queue *rx_queue) | |||
578 | rx_queue->queue, rx_queue->rxd.index, | 583 | rx_queue->queue, rx_queue->rxd.index, |
579 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); | 584 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); |
580 | 585 | ||
581 | rx_queue->flushed = false; | 586 | rx_queue->flushed = FLUSH_NONE; |
582 | 587 | ||
583 | /* Pin RX descriptor ring */ | 588 | /* Pin RX descriptor ring */ |
584 | falcon_init_special_buffer(efx, &rx_queue->rxd); | 589 | falcon_init_special_buffer(efx, &rx_queue->rxd); |
@@ -607,6 +612,8 @@ static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) | |||
607 | struct efx_nic *efx = rx_queue->efx; | 612 | struct efx_nic *efx = rx_queue->efx; |
608 | efx_oword_t rx_flush_descq; | 613 | efx_oword_t rx_flush_descq; |
609 | 614 | ||
615 | rx_queue->flushed = FLUSH_PENDING; | ||
616 | |||
610 | /* Post a flush command */ | 617 | /* Post a flush command */ |
611 | EFX_POPULATE_OWORD_2(rx_flush_descq, | 618 | EFX_POPULATE_OWORD_2(rx_flush_descq, |
612 | FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, | 619 | FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, |
@@ -620,7 +627,7 @@ void falcon_fini_rx(struct efx_rx_queue *rx_queue) | |||
620 | struct efx_nic *efx = rx_queue->efx; | 627 | struct efx_nic *efx = rx_queue->efx; |
621 | 628 | ||
622 | /* The queue should already have been flushed */ | 629 | /* The queue should already have been flushed */ |
623 | WARN_ON(!rx_queue->flushed); | 630 | WARN_ON(rx_queue->flushed != FLUSH_DONE); |
624 | 631 | ||
625 | /* Remove RX descriptor ring from card */ | 632 | /* Remove RX descriptor ring from card */ |
626 | EFX_ZERO_OWORD(rx_desc_ptr); | 633 | EFX_ZERO_OWORD(rx_desc_ptr); |
@@ -1181,7 +1188,7 @@ static void falcon_poll_flush_events(struct efx_nic *efx) | |||
1181 | FSF_AZ_DRIVER_EV_SUBDATA); | 1188 | FSF_AZ_DRIVER_EV_SUBDATA); |
1182 | if (ev_queue < EFX_TX_QUEUE_COUNT) { | 1189 | if (ev_queue < EFX_TX_QUEUE_COUNT) { |
1183 | tx_queue = efx->tx_queue + ev_queue; | 1190 | tx_queue = efx->tx_queue + ev_queue; |
1184 | tx_queue->flushed = true; | 1191 | tx_queue->flushed = FLUSH_DONE; |
1185 | } | 1192 | } |
1186 | } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && | 1193 | } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && |
1187 | ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) { | 1194 | ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) { |
@@ -1191,17 +1198,29 @@ static void falcon_poll_flush_events(struct efx_nic *efx) | |||
1191 | *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); | 1198 | *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); |
1192 | if (ev_queue < efx->n_rx_queues) { | 1199 | if (ev_queue < efx->n_rx_queues) { |
1193 | rx_queue = efx->rx_queue + ev_queue; | 1200 | rx_queue = efx->rx_queue + ev_queue; |
1194 | 1201 | rx_queue->flushed = | |
1195 | /* retry the rx flush */ | 1202 | ev_failed ? FLUSH_FAILED : FLUSH_DONE; |
1196 | if (ev_failed) | ||
1197 | falcon_flush_rx_queue(rx_queue); | ||
1198 | else | ||
1199 | rx_queue->flushed = true; | ||
1200 | } | 1203 | } |
1201 | } | 1204 | } |
1202 | 1205 | ||
1206 | /* We're about to destroy the queue anyway, so | ||
1207 | * it's ok to throw away every non-flush event */ | ||
1208 | EFX_SET_QWORD(*event); | ||
1209 | |||
1203 | read_ptr = (read_ptr + 1) & EFX_EVQ_MASK; | 1210 | read_ptr = (read_ptr + 1) & EFX_EVQ_MASK; |
1204 | } while (read_ptr != end_ptr); | 1211 | } while (read_ptr != end_ptr); |
1212 | |||
1213 | channel->eventq_read_ptr = read_ptr; | ||
1214 | } | ||
1215 | |||
1216 | static void falcon_prepare_flush(struct efx_nic *efx) | ||
1217 | { | ||
1218 | falcon_deconfigure_mac_wrapper(efx); | ||
1219 | |||
1220 | /* Wait for the tx and rx fifo's to get to the next packet boundary | ||
1221 | * (~1ms without back-pressure), then to drain the remainder of the | ||
1222 | * fifo's at data path speeds (negligible), with a healthy margin. */ | ||
1223 | msleep(10); | ||
1205 | } | 1224 | } |
1206 | 1225 | ||
1207 | /* Handle tx and rx flushes at the same time, since they run in | 1226 | /* Handle tx and rx flushes at the same time, since they run in |
@@ -1211,50 +1230,56 @@ int falcon_flush_queues(struct efx_nic *efx) | |||
1211 | { | 1230 | { |
1212 | struct efx_rx_queue *rx_queue; | 1231 | struct efx_rx_queue *rx_queue; |
1213 | struct efx_tx_queue *tx_queue; | 1232 | struct efx_tx_queue *tx_queue; |
1214 | int i; | 1233 | int i, tx_pending, rx_pending; |
1215 | bool outstanding; | ||
1216 | 1234 | ||
1217 | /* Issue flush requests */ | 1235 | falcon_prepare_flush(efx); |
1218 | efx_for_each_tx_queue(tx_queue, efx) { | 1236 | |
1219 | tx_queue->flushed = false; | 1237 | /* Flush all tx queues in parallel */ |
1238 | efx_for_each_tx_queue(tx_queue, efx) | ||
1220 | falcon_flush_tx_queue(tx_queue); | 1239 | falcon_flush_tx_queue(tx_queue); |
1221 | } | ||
1222 | efx_for_each_rx_queue(rx_queue, efx) { | ||
1223 | rx_queue->flushed = false; | ||
1224 | falcon_flush_rx_queue(rx_queue); | ||
1225 | } | ||
1226 | 1240 | ||
1227 | /* Poll the evq looking for flush completions. Since we're not pushing | 1241 | /* The hardware supports four concurrent rx flushes, each of which may |
1228 | * any more rx or tx descriptors at this point, we're in no danger of | 1242 | * need to be retried if there is an outstanding descriptor fetch */ |
1229 | * overflowing the evq whilst we wait */ | ||
1230 | for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) { | 1243 | for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) { |
1231 | msleep(FALCON_FLUSH_INTERVAL); | 1244 | rx_pending = tx_pending = 0; |
1232 | falcon_poll_flush_events(efx); | 1245 | efx_for_each_rx_queue(rx_queue, efx) { |
1246 | if (rx_queue->flushed == FLUSH_PENDING) | ||
1247 | ++rx_pending; | ||
1248 | } | ||
1249 | efx_for_each_rx_queue(rx_queue, efx) { | ||
1250 | if (rx_pending == FALCON_RX_FLUSH_COUNT) | ||
1251 | break; | ||
1252 | if (rx_queue->flushed == FLUSH_FAILED || | ||
1253 | rx_queue->flushed == FLUSH_NONE) { | ||
1254 | falcon_flush_rx_queue(rx_queue); | ||
1255 | ++rx_pending; | ||
1256 | } | ||
1257 | } | ||
1258 | efx_for_each_tx_queue(tx_queue, efx) { | ||
1259 | if (tx_queue->flushed != FLUSH_DONE) | ||
1260 | ++tx_pending; | ||
1261 | } | ||
1233 | 1262 | ||
1234 | /* Check if every queue has been succesfully flushed */ | 1263 | if (rx_pending == 0 && tx_pending == 0) |
1235 | outstanding = false; | ||
1236 | efx_for_each_tx_queue(tx_queue, efx) | ||
1237 | outstanding |= !tx_queue->flushed; | ||
1238 | efx_for_each_rx_queue(rx_queue, efx) | ||
1239 | outstanding |= !rx_queue->flushed; | ||
1240 | if (!outstanding) | ||
1241 | return 0; | 1264 | return 0; |
1265 | |||
1266 | msleep(FALCON_FLUSH_INTERVAL); | ||
1267 | falcon_poll_flush_events(efx); | ||
1242 | } | 1268 | } |
1243 | 1269 | ||
1244 | /* Mark the queues as all flushed. We're going to return failure | 1270 | /* Mark the queues as all flushed. We're going to return failure |
1245 | * leading to a reset, or fake up success anyway. "flushed" now | 1271 | * leading to a reset, or fake up success anyway */ |
1246 | * indicates that we tried to flush. */ | ||
1247 | efx_for_each_tx_queue(tx_queue, efx) { | 1272 | efx_for_each_tx_queue(tx_queue, efx) { |
1248 | if (!tx_queue->flushed) | 1273 | if (tx_queue->flushed != FLUSH_DONE) |
1249 | EFX_ERR(efx, "tx queue %d flush command timed out\n", | 1274 | EFX_ERR(efx, "tx queue %d flush command timed out\n", |
1250 | tx_queue->queue); | 1275 | tx_queue->queue); |
1251 | tx_queue->flushed = true; | 1276 | tx_queue->flushed = FLUSH_DONE; |
1252 | } | 1277 | } |
1253 | efx_for_each_rx_queue(rx_queue, efx) { | 1278 | efx_for_each_rx_queue(rx_queue, efx) { |
1254 | if (!rx_queue->flushed) | 1279 | if (rx_queue->flushed != FLUSH_DONE) |
1255 | EFX_ERR(efx, "rx queue %d flush command timed out\n", | 1280 | EFX_ERR(efx, "rx queue %d flush command timed out\n", |
1256 | rx_queue->queue); | 1281 | rx_queue->queue); |
1257 | rx_queue->flushed = true; | 1282 | rx_queue->flushed = FLUSH_DONE; |
1258 | } | 1283 | } |
1259 | 1284 | ||
1260 | if (EFX_WORKAROUND_7803(efx)) | 1285 | if (EFX_WORKAROUND_7803(efx)) |
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index ac808d5f24a0..d0755ab056fe 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
@@ -113,6 +113,13 @@ struct efx_special_buffer { | |||
113 | int entries; | 113 | int entries; |
114 | }; | 114 | }; |
115 | 115 | ||
116 | enum efx_flush_state { | ||
117 | FLUSH_NONE, | ||
118 | FLUSH_PENDING, | ||
119 | FLUSH_FAILED, | ||
120 | FLUSH_DONE, | ||
121 | }; | ||
122 | |||
116 | /** | 123 | /** |
117 | * struct efx_tx_buffer - An Efx TX buffer | 124 | * struct efx_tx_buffer - An Efx TX buffer |
118 | * @skb: The associated socket buffer. | 125 | * @skb: The associated socket buffer. |
@@ -189,7 +196,7 @@ struct efx_tx_queue { | |||
189 | struct efx_nic *nic; | 196 | struct efx_nic *nic; |
190 | struct efx_tx_buffer *buffer; | 197 | struct efx_tx_buffer *buffer; |
191 | struct efx_special_buffer txd; | 198 | struct efx_special_buffer txd; |
192 | bool flushed; | 199 | enum efx_flush_state flushed; |
193 | 200 | ||
194 | /* Members used mainly on the completion path */ | 201 | /* Members used mainly on the completion path */ |
195 | unsigned int read_count ____cacheline_aligned_in_smp; | 202 | unsigned int read_count ____cacheline_aligned_in_smp; |
@@ -284,7 +291,7 @@ struct efx_rx_queue { | |||
284 | struct page *buf_page; | 291 | struct page *buf_page; |
285 | dma_addr_t buf_dma_addr; | 292 | dma_addr_t buf_dma_addr; |
286 | char *buf_data; | 293 | char *buf_data; |
287 | bool flushed; | 294 | enum efx_flush_state flushed; |
288 | }; | 295 | }; |
289 | 296 | ||
290 | /** | 297 | /** |