aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2010-09-10 02:41:47 -0400
committerDavid S. Miller <davem@davemloft.net>2010-09-10 15:27:32 -0400
commitf7d12cdcbb28207b3bdcf4affbf3935e4c015d03 (patch)
treeab94c3e81e355c8df47102ede2d5d0aa02738945
parentba1e8a35b77f3bc7d109696dbd2a7fd5af208b62 (diff)
sfc: Refactor channel and queue lookup and iteration
In preparation for changes to the way channels and queue structures are allocated, revise the macros and functions used to look up and iterator over them. - Replace efx_for_each_tx_queue() with iteration over channels then TX queues - Replace efx_for_each_rx_queue() with iteration over channels then RX queues (with one exception, shortly to be removed) - Introduce efx_get_{channel,rx_queue,tx_queue}() functions to look up channels and queues by index - Introduce efx_channel_get_{rx,tx}_queue() functions to look up a channel's queues Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/sfc/efx.c28
-rw-r--r--drivers/net/sfc/ethtool.c15
-rw-r--r--drivers/net/sfc/net_driver.h43
-rw-r--r--drivers/net/sfc/nic.c85
-rw-r--r--drivers/net/sfc/rx.c2
-rw-r--r--drivers/net/sfc/selftest.c5
-rw-r--r--drivers/net/sfc/tx.c22
7 files changed, 125 insertions, 75 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index a57604527a42..3dd71aa310cd 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -248,7 +248,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
248 248
249 efx_rx_strategy(channel); 249 efx_rx_strategy(channel);
250 250
251 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); 251 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
252 252
253 return spent; 253 return spent;
254} 254}
@@ -1050,7 +1050,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1050 efx->n_rx_channels = efx->n_channels; 1050 efx->n_rx_channels = efx->n_channels;
1051 } 1051 }
1052 for (i = 0; i < n_channels; i++) 1052 for (i = 0; i < n_channels; i++)
1053 efx->channel[i].irq = xentries[i].vector; 1053 efx_get_channel(efx, i)->irq =
1054 xentries[i].vector;
1054 } else { 1055 } else {
1055 /* Fall back to single channel MSI */ 1056 /* Fall back to single channel MSI */
1056 efx->interrupt_mode = EFX_INT_MODE_MSI; 1057 efx->interrupt_mode = EFX_INT_MODE_MSI;
@@ -1066,7 +1067,7 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1066 efx->n_tx_channels = 1; 1067 efx->n_tx_channels = 1;
1067 rc = pci_enable_msi(efx->pci_dev); 1068 rc = pci_enable_msi(efx->pci_dev);
1068 if (rc == 0) { 1069 if (rc == 0) {
1069 efx->channel[0].irq = efx->pci_dev->irq; 1070 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
1070 } else { 1071 } else {
1071 netif_err(efx, drv, efx->net_dev, 1072 netif_err(efx, drv, efx->net_dev,
1072 "could not enable MSI\n"); 1073 "could not enable MSI\n");
@@ -1355,20 +1356,20 @@ static unsigned irq_mod_ticks(int usecs, int resolution)
1355void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, 1356void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
1356 bool rx_adaptive) 1357 bool rx_adaptive)
1357{ 1358{
1358 struct efx_tx_queue *tx_queue; 1359 struct efx_channel *channel;
1359 struct efx_rx_queue *rx_queue;
1360 unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION); 1360 unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION);
1361 unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION); 1361 unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION);
1362 1362
1363 EFX_ASSERT_RESET_SERIALISED(efx); 1363 EFX_ASSERT_RESET_SERIALISED(efx);
1364 1364
1365 efx_for_each_tx_queue(tx_queue, efx)
1366 tx_queue->channel->irq_moderation = tx_ticks;
1367
1368 efx->irq_rx_adaptive = rx_adaptive; 1365 efx->irq_rx_adaptive = rx_adaptive;
1369 efx->irq_rx_moderation = rx_ticks; 1366 efx->irq_rx_moderation = rx_ticks;
1370 efx_for_each_rx_queue(rx_queue, efx) 1367 efx_for_each_channel(channel, efx) {
1371 rx_queue->channel->irq_moderation = rx_ticks; 1368 if (efx_channel_get_rx_queue(channel))
1369 channel->irq_moderation = rx_ticks;
1370 else if (efx_channel_get_tx_queue(channel, 0))
1371 channel->irq_moderation = tx_ticks;
1372 }
1372} 1373}
1373 1374
1374/************************************************************************** 1375/**************************************************************************
@@ -1767,6 +1768,7 @@ fail_registered:
1767 1768
1768static void efx_unregister_netdev(struct efx_nic *efx) 1769static void efx_unregister_netdev(struct efx_nic *efx)
1769{ 1770{
1771 struct efx_channel *channel;
1770 struct efx_tx_queue *tx_queue; 1772 struct efx_tx_queue *tx_queue;
1771 1773
1772 if (!efx->net_dev) 1774 if (!efx->net_dev)
@@ -1777,8 +1779,10 @@ static void efx_unregister_netdev(struct efx_nic *efx)
1777 /* Free up any skbs still remaining. This has to happen before 1779 /* Free up any skbs still remaining. This has to happen before
1778 * we try to unregister the netdev as running their destructors 1780 * we try to unregister the netdev as running their destructors
1779 * may be needed to get the device ref. count to 0. */ 1781 * may be needed to get the device ref. count to 0. */
1780 efx_for_each_tx_queue(tx_queue, efx) 1782 efx_for_each_channel(channel, efx) {
1781 efx_release_tx_buffers(tx_queue); 1783 efx_for_each_channel_tx_queue(tx_queue, channel)
1784 efx_release_tx_buffers(tx_queue);
1785 }
1782 1786
1783 if (efx_dev_registered(efx)) { 1787 if (efx_dev_registered(efx)) {
1784 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 1788 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index fd19d6ab97a2..b9291db023bb 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -328,9 +328,10 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
328 unsigned int test_index, 328 unsigned int test_index,
329 struct ethtool_string *strings, u64 *data) 329 struct ethtool_string *strings, u64 *data)
330{ 330{
331 struct efx_channel *channel = efx_get_channel(efx, 0);
331 struct efx_tx_queue *tx_queue; 332 struct efx_tx_queue *tx_queue;
332 333
333 efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) { 334 efx_for_each_channel_tx_queue(tx_queue, channel) {
334 efx_fill_test(test_index++, strings, data, 335 efx_fill_test(test_index++, strings, data,
335 &lb_tests->tx_sent[tx_queue->queue], 336 &lb_tests->tx_sent[tx_queue->queue],
336 EFX_TX_QUEUE_NAME(tx_queue), 337 EFX_TX_QUEUE_NAME(tx_queue),
@@ -673,15 +674,15 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
673 struct ethtool_coalesce *coalesce) 674 struct ethtool_coalesce *coalesce)
674{ 675{
675 struct efx_nic *efx = netdev_priv(net_dev); 676 struct efx_nic *efx = netdev_priv(net_dev);
676 struct efx_tx_queue *tx_queue;
677 struct efx_channel *channel; 677 struct efx_channel *channel;
678 678
679 memset(coalesce, 0, sizeof(*coalesce)); 679 memset(coalesce, 0, sizeof(*coalesce));
680 680
681 /* Find lowest IRQ moderation across all used TX queues */ 681 /* Find lowest IRQ moderation across all used TX queues */
682 coalesce->tx_coalesce_usecs_irq = ~((u32) 0); 682 coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
683 efx_for_each_tx_queue(tx_queue, efx) { 683 efx_for_each_channel(channel, efx) {
684 channel = tx_queue->channel; 684 if (!efx_channel_get_tx_queue(channel, 0))
685 continue;
685 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { 686 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
686 if (channel->channel < efx->n_rx_channels) 687 if (channel->channel < efx->n_rx_channels)
687 coalesce->tx_coalesce_usecs_irq = 688 coalesce->tx_coalesce_usecs_irq =
@@ -708,7 +709,6 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
708{ 709{
709 struct efx_nic *efx = netdev_priv(net_dev); 710 struct efx_nic *efx = netdev_priv(net_dev);
710 struct efx_channel *channel; 711 struct efx_channel *channel;
711 struct efx_tx_queue *tx_queue;
712 unsigned tx_usecs, rx_usecs, adaptive; 712 unsigned tx_usecs, rx_usecs, adaptive;
713 713
714 if (coalesce->use_adaptive_tx_coalesce) 714 if (coalesce->use_adaptive_tx_coalesce)
@@ -725,8 +725,9 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
725 adaptive = coalesce->use_adaptive_rx_coalesce; 725 adaptive = coalesce->use_adaptive_rx_coalesce;
726 726
727 /* If the channel is shared only allow RX parameters to be set */ 727 /* If the channel is shared only allow RX parameters to be set */
728 efx_for_each_tx_queue(tx_queue, efx) { 728 efx_for_each_channel(channel, efx) {
729 if ((tx_queue->channel->channel < efx->n_rx_channels) && 729 if (efx_channel_get_rx_queue(channel) &&
730 efx_channel_get_tx_queue(channel, 0) &&
730 tx_usecs) { 731 tx_usecs) {
731 netif_err(efx, drv, efx->net_dev, "Channel is shared. " 732 netif_err(efx, drv, efx->net_dev, "Channel is shared. "
732 "Only RX coalescing may be set\n"); 733 "Only RX coalescing may be set\n");
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 89c6e02c57dd..eb3537529c9c 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -909,18 +909,34 @@ struct efx_nic_type {
909 * 909 *
910 *************************************************************************/ 910 *************************************************************************/
911 911
912static inline struct efx_channel *
913efx_get_channel(struct efx_nic *efx, unsigned index)
914{
915 EFX_BUG_ON_PARANOID(index >= efx->n_channels);
916 return &efx->channel[index];
917}
918
912/* Iterate over all used channels */ 919/* Iterate over all used channels */
913#define efx_for_each_channel(_channel, _efx) \ 920#define efx_for_each_channel(_channel, _efx) \
914 for (_channel = &((_efx)->channel[0]); \ 921 for (_channel = &((_efx)->channel[0]); \
915 _channel < &((_efx)->channel[(efx)->n_channels]); \ 922 _channel < &((_efx)->channel[(efx)->n_channels]); \
916 _channel++) 923 _channel++)
917 924
918/* Iterate over all used TX queues */ 925static inline struct efx_tx_queue *
919#define efx_for_each_tx_queue(_tx_queue, _efx) \ 926efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
920 for (_tx_queue = &((_efx)->tx_queue[0]); \ 927{
921 _tx_queue < &((_efx)->tx_queue[EFX_TXQ_TYPES * \ 928 EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
922 (_efx)->n_tx_channels]); \ 929 type >= EFX_TXQ_TYPES);
923 _tx_queue++) 930 return &efx->tx_queue[index * EFX_TXQ_TYPES + type];
931}
932
933static inline struct efx_tx_queue *
934efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
935{
936 struct efx_tx_queue *tx_queue = channel->tx_queue;
937 EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES);
938 return tx_queue ? tx_queue + type : NULL;
939}
924 940
925/* Iterate over all TX queues belonging to a channel */ 941/* Iterate over all TX queues belonging to a channel */
926#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ 942#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
@@ -928,12 +944,27 @@ struct efx_nic_type {
928 _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ 944 _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
929 _tx_queue++) 945 _tx_queue++)
930 946
947static inline struct efx_rx_queue *
948efx_get_rx_queue(struct efx_nic *efx, unsigned index)
949{
950 EFX_BUG_ON_PARANOID(index >= efx->n_rx_channels);
951 return &efx->rx_queue[index];
952}
953
931/* Iterate over all used RX queues */ 954/* Iterate over all used RX queues */
932#define efx_for_each_rx_queue(_rx_queue, _efx) \ 955#define efx_for_each_rx_queue(_rx_queue, _efx) \
933 for (_rx_queue = &((_efx)->rx_queue[0]); \ 956 for (_rx_queue = &((_efx)->rx_queue[0]); \
934 _rx_queue < &((_efx)->rx_queue[(_efx)->n_rx_channels]); \ 957 _rx_queue < &((_efx)->rx_queue[(_efx)->n_rx_channels]); \
935 _rx_queue++) 958 _rx_queue++)
936 959
960static inline struct efx_rx_queue *
961efx_channel_get_rx_queue(struct efx_channel *channel)
962{
963 struct efx_rx_queue *rx_queue =
964 &channel->efx->rx_queue[channel->channel];
965 return rx_queue->channel == channel ? rx_queue : NULL;
966}
967
937/* Iterate over all RX queues belonging to a channel */ 968/* Iterate over all RX queues belonging to a channel */
938#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ 969#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
939 for (_rx_queue = &((_channel)->efx->rx_queue[(_channel)->channel]); \ 970 for (_rx_queue = &((_channel)->efx->rx_queue[(_channel)->channel]); \
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index be4d5524054f..9e3563348eb7 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -682,7 +682,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
682 /* Transmit completion */ 682 /* Transmit completion */
683 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 683 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
684 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 684 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
685 tx_queue = &efx->tx_queue[tx_ev_q_label]; 685 tx_queue = efx_channel_get_tx_queue(
686 channel, tx_ev_q_label % EFX_TXQ_TYPES);
686 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 687 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
687 EFX_TXQ_MASK); 688 EFX_TXQ_MASK);
688 channel->irq_mod_score += tx_packets; 689 channel->irq_mod_score += tx_packets;
@@ -690,7 +691,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
690 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 691 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
691 /* Rewrite the FIFO write pointer */ 692 /* Rewrite the FIFO write pointer */
692 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 693 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
693 tx_queue = &efx->tx_queue[tx_ev_q_label]; 694 tx_queue = efx_channel_get_tx_queue(
695 channel, tx_ev_q_label % EFX_TXQ_TYPES);
694 696
695 if (efx_dev_registered(efx)) 697 if (efx_dev_registered(efx))
696 netif_tx_lock(efx->net_dev); 698 netif_tx_lock(efx->net_dev);
@@ -830,7 +832,7 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
830 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 832 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
831 channel->channel); 833 channel->channel);
832 834
833 rx_queue = &efx->rx_queue[channel->channel]; 835 rx_queue = efx_channel_get_rx_queue(channel);
834 836
835 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 837 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
836 expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK; 838 expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK;
@@ -882,7 +884,7 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
882 /* The queue must be empty, so we won't receive any rx 884 /* The queue must be empty, so we won't receive any rx
883 * events, so efx_process_channel() won't refill the 885 * events, so efx_process_channel() won't refill the
884 * queue. Refill it here */ 886 * queue. Refill it here */
885 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); 887 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
886 else 888 else
887 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 889 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
888 "generated event "EFX_QWORD_FMT"\n", 890 "generated event "EFX_QWORD_FMT"\n",
@@ -1166,7 +1168,7 @@ void efx_nic_generate_fill_event(struct efx_channel *channel)
1166 1168
1167static void efx_poll_flush_events(struct efx_nic *efx) 1169static void efx_poll_flush_events(struct efx_nic *efx)
1168{ 1170{
1169 struct efx_channel *channel = &efx->channel[0]; 1171 struct efx_channel *channel = efx_get_channel(efx, 0);
1170 struct efx_tx_queue *tx_queue; 1172 struct efx_tx_queue *tx_queue;
1171 struct efx_rx_queue *rx_queue; 1173 struct efx_rx_queue *rx_queue;
1172 unsigned int read_ptr = channel->eventq_read_ptr; 1174 unsigned int read_ptr = channel->eventq_read_ptr;
@@ -1188,7 +1190,9 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1188 ev_queue = EFX_QWORD_FIELD(*event, 1190 ev_queue = EFX_QWORD_FIELD(*event,
1189 FSF_AZ_DRIVER_EV_SUBDATA); 1191 FSF_AZ_DRIVER_EV_SUBDATA);
1190 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) { 1192 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
1191 tx_queue = efx->tx_queue + ev_queue; 1193 tx_queue = efx_get_tx_queue(
1194 efx, ev_queue / EFX_TXQ_TYPES,
1195 ev_queue % EFX_TXQ_TYPES);
1192 tx_queue->flushed = FLUSH_DONE; 1196 tx_queue->flushed = FLUSH_DONE;
1193 } 1197 }
1194 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && 1198 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
@@ -1198,7 +1202,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1198 ev_failed = EFX_QWORD_FIELD( 1202 ev_failed = EFX_QWORD_FIELD(
1199 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1203 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1200 if (ev_queue < efx->n_rx_channels) { 1204 if (ev_queue < efx->n_rx_channels) {
1201 rx_queue = efx->rx_queue + ev_queue; 1205 rx_queue = efx_get_rx_queue(efx, ev_queue);
1202 rx_queue->flushed = 1206 rx_queue->flushed =
1203 ev_failed ? FLUSH_FAILED : FLUSH_DONE; 1207 ev_failed ? FLUSH_FAILED : FLUSH_DONE;
1204 } 1208 }
@@ -1219,6 +1223,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1219 * serialise them */ 1223 * serialise them */
1220int efx_nic_flush_queues(struct efx_nic *efx) 1224int efx_nic_flush_queues(struct efx_nic *efx)
1221{ 1225{
1226 struct efx_channel *channel;
1222 struct efx_rx_queue *rx_queue; 1227 struct efx_rx_queue *rx_queue;
1223 struct efx_tx_queue *tx_queue; 1228 struct efx_tx_queue *tx_queue;
1224 int i, tx_pending, rx_pending; 1229 int i, tx_pending, rx_pending;
@@ -1227,29 +1232,35 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1227 efx->type->prepare_flush(efx); 1232 efx->type->prepare_flush(efx);
1228 1233
1229 /* Flush all tx queues in parallel */ 1234 /* Flush all tx queues in parallel */
1230 efx_for_each_tx_queue(tx_queue, efx) 1235 efx_for_each_channel(channel, efx) {
1231 efx_flush_tx_queue(tx_queue); 1236 efx_for_each_channel_tx_queue(tx_queue, channel)
1237 efx_flush_tx_queue(tx_queue);
1238 }
1232 1239
1233 /* The hardware supports four concurrent rx flushes, each of which may 1240 /* The hardware supports four concurrent rx flushes, each of which may
1234 * need to be retried if there is an outstanding descriptor fetch */ 1241 * need to be retried if there is an outstanding descriptor fetch */
1235 for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) { 1242 for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
1236 rx_pending = tx_pending = 0; 1243 rx_pending = tx_pending = 0;
1237 efx_for_each_rx_queue(rx_queue, efx) { 1244 efx_for_each_channel(channel, efx) {
1238 if (rx_queue->flushed == FLUSH_PENDING) 1245 efx_for_each_channel_rx_queue(rx_queue, channel) {
1239 ++rx_pending; 1246 if (rx_queue->flushed == FLUSH_PENDING)
1240 } 1247 ++rx_pending;
1241 efx_for_each_rx_queue(rx_queue, efx) {
1242 if (rx_pending == EFX_RX_FLUSH_COUNT)
1243 break;
1244 if (rx_queue->flushed == FLUSH_FAILED ||
1245 rx_queue->flushed == FLUSH_NONE) {
1246 efx_flush_rx_queue(rx_queue);
1247 ++rx_pending;
1248 } 1248 }
1249 } 1249 }
1250 efx_for_each_tx_queue(tx_queue, efx) { 1250 efx_for_each_channel(channel, efx) {
1251 if (tx_queue->flushed != FLUSH_DONE) 1251 efx_for_each_channel_rx_queue(rx_queue, channel) {
1252 ++tx_pending; 1252 if (rx_pending == EFX_RX_FLUSH_COUNT)
1253 break;
1254 if (rx_queue->flushed == FLUSH_FAILED ||
1255 rx_queue->flushed == FLUSH_NONE) {
1256 efx_flush_rx_queue(rx_queue);
1257 ++rx_pending;
1258 }
1259 }
1260 efx_for_each_channel_tx_queue(tx_queue, channel) {
1261 if (tx_queue->flushed != FLUSH_DONE)
1262 ++tx_pending;
1263 }
1253 } 1264 }
1254 1265
1255 if (rx_pending == 0 && tx_pending == 0) 1266 if (rx_pending == 0 && tx_pending == 0)
@@ -1261,19 +1272,21 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1261 1272
1262 /* Mark the queues as all flushed. We're going to return failure 1273 /* Mark the queues as all flushed. We're going to return failure
1263 * leading to a reset, or fake up success anyway */ 1274 * leading to a reset, or fake up success anyway */
1264 efx_for_each_tx_queue(tx_queue, efx) { 1275 efx_for_each_channel(channel, efx) {
1265 if (tx_queue->flushed != FLUSH_DONE) 1276 efx_for_each_channel_tx_queue(tx_queue, channel) {
1266 netif_err(efx, hw, efx->net_dev, 1277 if (tx_queue->flushed != FLUSH_DONE)
1267 "tx queue %d flush command timed out\n", 1278 netif_err(efx, hw, efx->net_dev,
1268 tx_queue->queue); 1279 "tx queue %d flush command timed out\n",
1269 tx_queue->flushed = FLUSH_DONE; 1280 tx_queue->queue);
1270 } 1281 tx_queue->flushed = FLUSH_DONE;
1271 efx_for_each_rx_queue(rx_queue, efx) { 1282 }
1272 if (rx_queue->flushed != FLUSH_DONE) 1283 efx_for_each_channel_rx_queue(rx_queue, channel) {
1273 netif_err(efx, hw, efx->net_dev, 1284 if (rx_queue->flushed != FLUSH_DONE)
1274 "rx queue %d flush command timed out\n", 1285 netif_err(efx, hw, efx->net_dev,
1275 efx_rx_queue_index(rx_queue)); 1286 "rx queue %d flush command timed out\n",
1276 rx_queue->flushed = FLUSH_DONE; 1287 efx_rx_queue_index(rx_queue));
1288 rx_queue->flushed = FLUSH_DONE;
1289 }
1277 } 1290 }
1278 1291
1279 return -ETIMEDOUT; 1292 return -ETIMEDOUT;
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 1e6c8cfa6c0c..6651d9364e8f 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -311,7 +311,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
311 struct efx_rx_buffer *rx_buf) 311 struct efx_rx_buffer *rx_buf)
312{ 312{
313 struct efx_nic *efx = channel->efx; 313 struct efx_nic *efx = channel->efx;
314 struct efx_rx_queue *rx_queue = &efx->rx_queue[channel->channel]; 314 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
315 struct efx_rx_buffer *new_buf; 315 struct efx_rx_buffer *new_buf;
316 unsigned index; 316 unsigned index;
317 317
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 85f015f005d5..11153d99bc2b 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -567,7 +567,7 @@ static int efx_wait_for_link(struct efx_nic *efx)
567 efx->type->monitor(efx); 567 efx->type->monitor(efx);
568 mutex_unlock(&efx->mac_lock); 568 mutex_unlock(&efx->mac_lock);
569 } else { 569 } else {
570 struct efx_channel *channel = &efx->channel[0]; 570 struct efx_channel *channel = efx_get_channel(efx, 0);
571 if (channel->work_pending) 571 if (channel->work_pending)
572 efx_process_channel_now(channel); 572 efx_process_channel_now(channel);
573 } 573 }
@@ -594,6 +594,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
594{ 594{
595 enum efx_loopback_mode mode; 595 enum efx_loopback_mode mode;
596 struct efx_loopback_state *state; 596 struct efx_loopback_state *state;
597 struct efx_channel *channel = efx_get_channel(efx, 0);
597 struct efx_tx_queue *tx_queue; 598 struct efx_tx_queue *tx_queue;
598 int rc = 0; 599 int rc = 0;
599 600
@@ -634,7 +635,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
634 } 635 }
635 636
636 /* Test both types of TX queue */ 637 /* Test both types of TX queue */
637 efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) { 638 efx_for_each_channel_tx_queue(tx_queue, channel) {
638 state->offload_csum = (tx_queue->queue & 639 state->offload_csum = (tx_queue->queue &
639 EFX_TXQ_TYPE_OFFLOAD); 640 EFX_TXQ_TYPE_OFFLOAD);
640 rc = efx_test_loopback(tx_queue, 641 rc = efx_test_loopback(tx_queue,
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index c6942da2c99a..6a6acc47285c 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -37,8 +37,9 @@
37void efx_stop_queue(struct efx_channel *channel) 37void efx_stop_queue(struct efx_channel *channel)
38{ 38{
39 struct efx_nic *efx = channel->efx; 39 struct efx_nic *efx = channel->efx;
40 struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
40 41
41 if (!channel->tx_queue) 42 if (!tx_queue)
42 return; 43 return;
43 44
44 spin_lock_bh(&channel->tx_stop_lock); 45 spin_lock_bh(&channel->tx_stop_lock);
@@ -46,9 +47,8 @@ void efx_stop_queue(struct efx_channel *channel)
46 47
47 atomic_inc(&channel->tx_stop_count); 48 atomic_inc(&channel->tx_stop_count);
48 netif_tx_stop_queue( 49 netif_tx_stop_queue(
49 netdev_get_tx_queue( 50 netdev_get_tx_queue(efx->net_dev,
50 efx->net_dev, 51 tx_queue->queue / EFX_TXQ_TYPES));
51 channel->tx_queue->queue / EFX_TXQ_TYPES));
52 52
53 spin_unlock_bh(&channel->tx_stop_lock); 53 spin_unlock_bh(&channel->tx_stop_lock);
54} 54}
@@ -57,8 +57,9 @@ void efx_stop_queue(struct efx_channel *channel)
57void efx_wake_queue(struct efx_channel *channel) 57void efx_wake_queue(struct efx_channel *channel)
58{ 58{
59 struct efx_nic *efx = channel->efx; 59 struct efx_nic *efx = channel->efx;
60 struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
60 61
61 if (!channel->tx_queue) 62 if (!tx_queue)
62 return; 63 return;
63 64
64 local_bh_disable(); 65 local_bh_disable();
@@ -66,9 +67,8 @@ void efx_wake_queue(struct efx_channel *channel)
66 &channel->tx_stop_lock)) { 67 &channel->tx_stop_lock)) {
67 netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n"); 68 netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n");
68 netif_tx_wake_queue( 69 netif_tx_wake_queue(
69 netdev_get_tx_queue( 70 netdev_get_tx_queue(efx->net_dev,
70 efx->net_dev, 71 tx_queue->queue / EFX_TXQ_TYPES));
71 channel->tx_queue->queue / EFX_TXQ_TYPES));
72 spin_unlock(&channel->tx_stop_lock); 72 spin_unlock(&channel->tx_stop_lock);
73 } 73 }
74 local_bh_enable(); 74 local_bh_enable();
@@ -390,9 +390,9 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
390 if (unlikely(efx->port_inhibited)) 390 if (unlikely(efx->port_inhibited))
391 return NETDEV_TX_BUSY; 391 return NETDEV_TX_BUSY;
392 392
393 tx_queue = &efx->tx_queue[EFX_TXQ_TYPES * skb_get_queue_mapping(skb)]; 393 tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb),
394 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) 394 skb->ip_summed == CHECKSUM_PARTIAL ?
395 tx_queue += EFX_TXQ_TYPE_OFFLOAD; 395 EFX_TXQ_TYPE_OFFLOAD : 0);
396 396
397 return efx_enqueue_skb(tx_queue, skb); 397 return efx_enqueue_skb(tx_queue, skb);
398} 398}