aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/nic.c
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2010-09-10 02:41:47 -0400
committerDavid S. Miller <davem@davemloft.net>2010-09-10 15:27:32 -0400
commitf7d12cdcbb28207b3bdcf4affbf3935e4c015d03 (patch)
treeab94c3e81e355c8df47102ede2d5d0aa02738945 /drivers/net/sfc/nic.c
parentba1e8a35b77f3bc7d109696dbd2a7fd5af208b62 (diff)
sfc: Refactor channel and queue lookup and iteration
In preparation for changes to the way channels and queue structures are allocated, revise the macros and functions used to look up and iterator over them. - Replace efx_for_each_tx_queue() with iteration over channels then TX queues - Replace efx_for_each_rx_queue() with iteration over channels then RX queues (with one exception, shortly to be removed) - Introduce efx_get_{channel,rx_queue,tx_queue}() functions to look up channels and queues by index - Introduce efx_channel_get_{rx,tx}_queue() functions to look up a channel's queues Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc/nic.c')
-rw-r--r--drivers/net/sfc/nic.c85
1 files changed, 49 insertions, 36 deletions
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index be4d5524054f..9e3563348eb7 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -682,7 +682,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
682 /* Transmit completion */ 682 /* Transmit completion */
683 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 683 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
684 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 684 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
685 tx_queue = &efx->tx_queue[tx_ev_q_label]; 685 tx_queue = efx_channel_get_tx_queue(
686 channel, tx_ev_q_label % EFX_TXQ_TYPES);
686 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 687 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
687 EFX_TXQ_MASK); 688 EFX_TXQ_MASK);
688 channel->irq_mod_score += tx_packets; 689 channel->irq_mod_score += tx_packets;
@@ -690,7 +691,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
690 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 691 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
691 /* Rewrite the FIFO write pointer */ 692 /* Rewrite the FIFO write pointer */
692 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 693 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
693 tx_queue = &efx->tx_queue[tx_ev_q_label]; 694 tx_queue = efx_channel_get_tx_queue(
695 channel, tx_ev_q_label % EFX_TXQ_TYPES);
694 696
695 if (efx_dev_registered(efx)) 697 if (efx_dev_registered(efx))
696 netif_tx_lock(efx->net_dev); 698 netif_tx_lock(efx->net_dev);
@@ -830,7 +832,7 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
830 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 832 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
831 channel->channel); 833 channel->channel);
832 834
833 rx_queue = &efx->rx_queue[channel->channel]; 835 rx_queue = efx_channel_get_rx_queue(channel);
834 836
835 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 837 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
836 expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK; 838 expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK;
@@ -882,7 +884,7 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
882 /* The queue must be empty, so we won't receive any rx 884 /* The queue must be empty, so we won't receive any rx
883 * events, so efx_process_channel() won't refill the 885 * events, so efx_process_channel() won't refill the
884 * queue. Refill it here */ 886 * queue. Refill it here */
885 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); 887 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
886 else 888 else
887 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 889 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
888 "generated event "EFX_QWORD_FMT"\n", 890 "generated event "EFX_QWORD_FMT"\n",
@@ -1166,7 +1168,7 @@ void efx_nic_generate_fill_event(struct efx_channel *channel)
1166 1168
1167static void efx_poll_flush_events(struct efx_nic *efx) 1169static void efx_poll_flush_events(struct efx_nic *efx)
1168{ 1170{
1169 struct efx_channel *channel = &efx->channel[0]; 1171 struct efx_channel *channel = efx_get_channel(efx, 0);
1170 struct efx_tx_queue *tx_queue; 1172 struct efx_tx_queue *tx_queue;
1171 struct efx_rx_queue *rx_queue; 1173 struct efx_rx_queue *rx_queue;
1172 unsigned int read_ptr = channel->eventq_read_ptr; 1174 unsigned int read_ptr = channel->eventq_read_ptr;
@@ -1188,7 +1190,9 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1188 ev_queue = EFX_QWORD_FIELD(*event, 1190 ev_queue = EFX_QWORD_FIELD(*event,
1189 FSF_AZ_DRIVER_EV_SUBDATA); 1191 FSF_AZ_DRIVER_EV_SUBDATA);
1190 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) { 1192 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
1191 tx_queue = efx->tx_queue + ev_queue; 1193 tx_queue = efx_get_tx_queue(
1194 efx, ev_queue / EFX_TXQ_TYPES,
1195 ev_queue % EFX_TXQ_TYPES);
1192 tx_queue->flushed = FLUSH_DONE; 1196 tx_queue->flushed = FLUSH_DONE;
1193 } 1197 }
1194 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && 1198 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
@@ -1198,7 +1202,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1198 ev_failed = EFX_QWORD_FIELD( 1202 ev_failed = EFX_QWORD_FIELD(
1199 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1203 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1200 if (ev_queue < efx->n_rx_channels) { 1204 if (ev_queue < efx->n_rx_channels) {
1201 rx_queue = efx->rx_queue + ev_queue; 1205 rx_queue = efx_get_rx_queue(efx, ev_queue);
1202 rx_queue->flushed = 1206 rx_queue->flushed =
1203 ev_failed ? FLUSH_FAILED : FLUSH_DONE; 1207 ev_failed ? FLUSH_FAILED : FLUSH_DONE;
1204 } 1208 }
@@ -1219,6 +1223,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1219 * serialise them */ 1223 * serialise them */
1220int efx_nic_flush_queues(struct efx_nic *efx) 1224int efx_nic_flush_queues(struct efx_nic *efx)
1221{ 1225{
1226 struct efx_channel *channel;
1222 struct efx_rx_queue *rx_queue; 1227 struct efx_rx_queue *rx_queue;
1223 struct efx_tx_queue *tx_queue; 1228 struct efx_tx_queue *tx_queue;
1224 int i, tx_pending, rx_pending; 1229 int i, tx_pending, rx_pending;
@@ -1227,29 +1232,35 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1227 efx->type->prepare_flush(efx); 1232 efx->type->prepare_flush(efx);
1228 1233
1229 /* Flush all tx queues in parallel */ 1234 /* Flush all tx queues in parallel */
1230 efx_for_each_tx_queue(tx_queue, efx) 1235 efx_for_each_channel(channel, efx) {
1231 efx_flush_tx_queue(tx_queue); 1236 efx_for_each_channel_tx_queue(tx_queue, channel)
1237 efx_flush_tx_queue(tx_queue);
1238 }
1232 1239
1233 /* The hardware supports four concurrent rx flushes, each of which may 1240 /* The hardware supports four concurrent rx flushes, each of which may
1234 * need to be retried if there is an outstanding descriptor fetch */ 1241 * need to be retried if there is an outstanding descriptor fetch */
1235 for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) { 1242 for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
1236 rx_pending = tx_pending = 0; 1243 rx_pending = tx_pending = 0;
1237 efx_for_each_rx_queue(rx_queue, efx) { 1244 efx_for_each_channel(channel, efx) {
1238 if (rx_queue->flushed == FLUSH_PENDING) 1245 efx_for_each_channel_rx_queue(rx_queue, channel) {
1239 ++rx_pending; 1246 if (rx_queue->flushed == FLUSH_PENDING)
1240 } 1247 ++rx_pending;
1241 efx_for_each_rx_queue(rx_queue, efx) {
1242 if (rx_pending == EFX_RX_FLUSH_COUNT)
1243 break;
1244 if (rx_queue->flushed == FLUSH_FAILED ||
1245 rx_queue->flushed == FLUSH_NONE) {
1246 efx_flush_rx_queue(rx_queue);
1247 ++rx_pending;
1248 } 1248 }
1249 } 1249 }
1250 efx_for_each_tx_queue(tx_queue, efx) { 1250 efx_for_each_channel(channel, efx) {
1251 if (tx_queue->flushed != FLUSH_DONE) 1251 efx_for_each_channel_rx_queue(rx_queue, channel) {
1252 ++tx_pending; 1252 if (rx_pending == EFX_RX_FLUSH_COUNT)
1253 break;
1254 if (rx_queue->flushed == FLUSH_FAILED ||
1255 rx_queue->flushed == FLUSH_NONE) {
1256 efx_flush_rx_queue(rx_queue);
1257 ++rx_pending;
1258 }
1259 }
1260 efx_for_each_channel_tx_queue(tx_queue, channel) {
1261 if (tx_queue->flushed != FLUSH_DONE)
1262 ++tx_pending;
1263 }
1253 } 1264 }
1254 1265
1255 if (rx_pending == 0 && tx_pending == 0) 1266 if (rx_pending == 0 && tx_pending == 0)
@@ -1261,19 +1272,21 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1261 1272
1262 /* Mark the queues as all flushed. We're going to return failure 1273 /* Mark the queues as all flushed. We're going to return failure
1263 * leading to a reset, or fake up success anyway */ 1274 * leading to a reset, or fake up success anyway */
1264 efx_for_each_tx_queue(tx_queue, efx) { 1275 efx_for_each_channel(channel, efx) {
1265 if (tx_queue->flushed != FLUSH_DONE) 1276 efx_for_each_channel_tx_queue(tx_queue, channel) {
1266 netif_err(efx, hw, efx->net_dev, 1277 if (tx_queue->flushed != FLUSH_DONE)
1267 "tx queue %d flush command timed out\n", 1278 netif_err(efx, hw, efx->net_dev,
1268 tx_queue->queue); 1279 "tx queue %d flush command timed out\n",
1269 tx_queue->flushed = FLUSH_DONE; 1280 tx_queue->queue);
1270 } 1281 tx_queue->flushed = FLUSH_DONE;
1271 efx_for_each_rx_queue(rx_queue, efx) { 1282 }
1272 if (rx_queue->flushed != FLUSH_DONE) 1283 efx_for_each_channel_rx_queue(rx_queue, channel) {
1273 netif_err(efx, hw, efx->net_dev, 1284 if (rx_queue->flushed != FLUSH_DONE)
1274 "rx queue %d flush command timed out\n", 1285 netif_err(efx, hw, efx->net_dev,
1275 efx_rx_queue_index(rx_queue)); 1286 "rx queue %d flush command timed out\n",
1276 rx_queue->flushed = FLUSH_DONE; 1287 efx_rx_queue_index(rx_queue));
1288 rx_queue->flushed = FLUSH_DONE;
1289 }
1277 } 1290 }
1278 1291
1279 return -ETIMEDOUT; 1292 return -ETIMEDOUT;