aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-12-01 11:30:10 -0500
committerDavid S. Miller <davem@davemloft.net>2012-12-01 11:30:10 -0500
commitabe303dbc22bd16cde6f1a62fc25f63cc254caf7 (patch)
treed1f41c1b26d27eeb837050aebe06c16f8af2400e /drivers/net/ethernet
parenta20da984fb5ddedb9e8e699c04e10fe0ca609440 (diff)
parentb9cc977d9d4d1866ee83df38815f4b3b34d99dd9 (diff)
Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next
Ben Hutchings says: ==================== 1. More workarounds for TX queue flush failures that can occur during interface reconfiguration. 2. Fix spurious failure of a firmware request running during a system clock change, e.g. ntpd started at the same time as driver load. 3. Fix inconsistent statistics after a firmware upgrade. 4. Fix a variable (non-)initialisation in offline self-test that can make it more disruptive than intended. 5. Fix a race that can (at least) cause an assertion failure. 6. Miscellaneous cleanup. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/sfc/efx.c12
-rw-r--r--drivers/net/ethernet/sfc/efx.h13
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c25
-rw-r--r--drivers/net/ethernet/sfc/falcon.c2
-rw-r--r--drivers/net/ethernet/sfc/io.h43
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c23
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h3
-rw-r--r--drivers/net/ethernet/sfc/nic.c81
-rw-r--r--drivers/net/ethernet/sfc/nic.h2
-rw-r--r--drivers/net/ethernet/sfc/rx.c6
-rw-r--r--drivers/net/ethernet/sfc/selftest.c4
-rw-r--r--drivers/net/ethernet/sfc/siena.c17
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c8
13 files changed, 156 insertions, 83 deletions
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 4f86d0cd516a..d858f310b2b2 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -106,8 +106,8 @@ static struct workqueue_struct *reset_workqueue;
106 * 106 *
107 * This is only used in MSI-X interrupt mode 107 * This is only used in MSI-X interrupt mode
108 */ 108 */
109static unsigned int separate_tx_channels; 109static bool separate_tx_channels;
110module_param(separate_tx_channels, uint, 0444); 110module_param(separate_tx_channels, bool, 0444);
111MODULE_PARM_DESC(separate_tx_channels, 111MODULE_PARM_DESC(separate_tx_channels,
112 "Use separate channels for TX and RX"); 112 "Use separate channels for TX and RX");
113 113
@@ -160,8 +160,8 @@ static unsigned int rss_cpus;
160module_param(rss_cpus, uint, 0444); 160module_param(rss_cpus, uint, 0444);
161MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); 161MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
162 162
163static int phy_flash_cfg; 163static bool phy_flash_cfg;
164module_param(phy_flash_cfg, int, 0644); 164module_param(phy_flash_cfg, bool, 0644);
165MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); 165MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
166 166
167static unsigned irq_adapt_low_thresh = 8000; 167static unsigned irq_adapt_low_thresh = 8000;
@@ -2279,7 +2279,7 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
2279 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", 2279 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
2280 RESET_TYPE(method)); 2280 RESET_TYPE(method));
2281 2281
2282 netif_device_detach(efx->net_dev); 2282 efx_device_detach_sync(efx);
2283 efx_reset_down(efx, method); 2283 efx_reset_down(efx, method);
2284 2284
2285 rc = efx->type->reset(efx, method); 2285 rc = efx->type->reset(efx, method);
@@ -2758,7 +2758,7 @@ static int efx_pm_freeze(struct device *dev)
2758 if (efx->state != STATE_DISABLED) { 2758 if (efx->state != STATE_DISABLED) {
2759 efx->state = STATE_UNINIT; 2759 efx->state = STATE_UNINIT;
2760 2760
2761 netif_device_detach(efx->net_dev); 2761 efx_device_detach_sync(efx);
2762 2762
2763 efx_stop_all(efx); 2763 efx_stop_all(efx);
2764 efx_stop_interrupts(efx, false); 2764 efx_stop_interrupts(efx, false);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index f11170bc48bf..50247dfe8f57 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -163,4 +163,17 @@ extern void efx_link_status_changed(struct efx_nic *efx);
163extern void efx_link_set_advertising(struct efx_nic *efx, u32); 163extern void efx_link_set_advertising(struct efx_nic *efx, u32);
164extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8); 164extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
165 165
166static inline void efx_device_detach_sync(struct efx_nic *efx)
167{
168 struct net_device *dev = efx->net_dev;
169
170 /* Lock/freeze all TX queues so that we can be sure the
171 * TX scheduler is stopped when we're done and before
172 * netif_device_present() becomes false.
173 */
174 netif_tx_lock(dev);
175 netif_device_detach(dev);
176 netif_tx_unlock(dev);
177}
178
166#endif /* EFX_EFX_H */ 179#endif /* EFX_EFX_H */
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 90f078eff8e6..8e61cd06f66a 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -816,6 +816,9 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
816/* MAC address mask including only MC flag */ 816/* MAC address mask including only MC flag */
817static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; 817static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
818 818
819#define IP4_ADDR_FULL_MASK ((__force __be32)~0)
820#define PORT_FULL_MASK ((__force __be16)~0)
821
819static int efx_ethtool_get_class_rule(struct efx_nic *efx, 822static int efx_ethtool_get_class_rule(struct efx_nic *efx,
820 struct ethtool_rx_flow_spec *rule) 823 struct ethtool_rx_flow_spec *rule)
821{ 824{
@@ -865,12 +868,12 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
865 &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst, 868 &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
866 &ip_entry->ip4src, &ip_entry->psrc); 869 &ip_entry->ip4src, &ip_entry->psrc);
867 EFX_WARN_ON_PARANOID(rc); 870 EFX_WARN_ON_PARANOID(rc);
868 ip_mask->ip4src = ~0; 871 ip_mask->ip4src = IP4_ADDR_FULL_MASK;
869 ip_mask->psrc = ~0; 872 ip_mask->psrc = PORT_FULL_MASK;
870 } 873 }
871 rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW; 874 rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW;
872 ip_mask->ip4dst = ~0; 875 ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
873 ip_mask->pdst = ~0; 876 ip_mask->pdst = PORT_FULL_MASK;
874 return rc; 877 return rc;
875} 878}
876 879
@@ -971,7 +974,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
971 974
972 /* Check for unsupported extensions */ 975 /* Check for unsupported extensions */
973 if ((rule->flow_type & FLOW_EXT) && 976 if ((rule->flow_type & FLOW_EXT) &&
974 (rule->m_ext.vlan_etype | rule->m_ext.data[0] | 977 (rule->m_ext.vlan_etype || rule->m_ext.data[0] ||
975 rule->m_ext.data[1])) 978 rule->m_ext.data[1]))
976 return -EINVAL; 979 return -EINVAL;
977 980
@@ -986,16 +989,16 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
986 IPPROTO_TCP : IPPROTO_UDP); 989 IPPROTO_TCP : IPPROTO_UDP);
987 990
988 /* Must match all of destination, */ 991 /* Must match all of destination, */
989 if ((__force u32)~ip_mask->ip4dst | 992 if (!(ip_mask->ip4dst == IP4_ADDR_FULL_MASK &&
990 (__force u16)~ip_mask->pdst) 993 ip_mask->pdst == PORT_FULL_MASK))
991 return -EINVAL; 994 return -EINVAL;
992 /* all or none of source, */ 995 /* all or none of source, */
993 if ((ip_mask->ip4src | ip_mask->psrc) && 996 if ((ip_mask->ip4src || ip_mask->psrc) &&
994 ((__force u32)~ip_mask->ip4src | 997 !(ip_mask->ip4src == IP4_ADDR_FULL_MASK &&
995 (__force u16)~ip_mask->psrc)) 998 ip_mask->psrc == PORT_FULL_MASK))
996 return -EINVAL; 999 return -EINVAL;
997 /* and nothing else */ 1000 /* and nothing else */
998 if (ip_mask->tos | rule->m_ext.vlan_tci) 1001 if (ip_mask->tos || rule->m_ext.vlan_tci)
999 return -EINVAL; 1002 return -EINVAL;
1000 1003
1001 if (ip_mask->ip4src) 1004 if (ip_mask->ip4src)
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 12b573a8e82b..49bcd196e10d 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -1792,6 +1792,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
1792 .remove_port = falcon_remove_port, 1792 .remove_port = falcon_remove_port,
1793 .handle_global_event = falcon_handle_global_event, 1793 .handle_global_event = falcon_handle_global_event,
1794 .prepare_flush = falcon_prepare_flush, 1794 .prepare_flush = falcon_prepare_flush,
1795 .finish_flush = efx_port_dummy_op_void,
1795 .update_stats = falcon_update_nic_stats, 1796 .update_stats = falcon_update_nic_stats,
1796 .start_stats = falcon_start_nic_stats, 1797 .start_stats = falcon_start_nic_stats,
1797 .stop_stats = falcon_stop_nic_stats, 1798 .stop_stats = falcon_stop_nic_stats,
@@ -1834,6 +1835,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
1834 .remove_port = falcon_remove_port, 1835 .remove_port = falcon_remove_port,
1835 .handle_global_event = falcon_handle_global_event, 1836 .handle_global_event = falcon_handle_global_event,
1836 .prepare_flush = falcon_prepare_flush, 1837 .prepare_flush = falcon_prepare_flush,
1838 .finish_flush = efx_port_dummy_op_void,
1837 .update_stats = falcon_update_nic_stats, 1839 .update_stats = falcon_update_nic_stats,
1838 .start_stats = falcon_start_nic_stats, 1840 .start_stats = falcon_start_nic_stats,
1839 .stop_stats = falcon_stop_nic_stats, 1841 .stop_stats = falcon_stop_nic_stats,
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 751d1ec112cc..96759aee1c6c 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -22,22 +22,21 @@
22 * 22 *
23 * Notes on locking strategy: 23 * Notes on locking strategy:
24 * 24 *
25 * Most CSRs are 128-bit (oword) and therefore cannot be read or 25 * Many CSRs are very wide and cannot be read or written atomically.
26 * written atomically. Access from the host is buffered by the Bus 26 * Writes from the host are buffered by the Bus Interface Unit (BIU)
27 * Interface Unit (BIU). Whenever the host reads from the lowest 27 * up to 128 bits. Whenever the host writes part of such a register,
28 * address of such a register, or from the address of a different such 28 * the BIU collects the written value and does not write to the
29 * register, the BIU latches the register's value. Subsequent reads 29 * underlying register until all 4 dwords have been written. A
30 * from higher addresses of the same register will read the latched 30 * similar buffering scheme applies to host access to the NIC's 64-bit
31 * value. Whenever the host writes part of such a register, the BIU 31 * SRAM.
32 * collects the written value and does not write to the underlying
33 * register until all 4 dwords have been written. A similar buffering
34 * scheme applies to host access to the NIC's 64-bit SRAM.
35 * 32 *
36 * Access to different CSRs and 64-bit SRAM words must be serialised, 33 * Writes to different CSRs and 64-bit SRAM words must be serialised,
37 * since interleaved access can result in lost writes or lost 34 * since interleaved access can result in lost writes. We use
38 * information from read-to-clear fields. We use efx_nic::biu_lock 35 * efx_nic::biu_lock for this.
39 * for this. (We could use separate locks for read and write, but 36 *
40 * this is not normally a performance bottleneck.) 37 * We also serialise reads from 128-bit CSRs and SRAM with the same
38 * spinlock. This may not be necessary, but it doesn't really matter
39 * as there are no such reads on the fast path.
41 * 40 *
42 * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are 41 * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
43 * 128-bit but are special-cased in the BIU to avoid the need for 42 * 128-bit but are special-cased in the BIU to avoid the need for
@@ -204,20 +203,6 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
204 efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); 203 efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
205} 204}
206 205
207/* Write a 32-bit CSR forming part of a table, or 32-bit SRAM */
208static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
209 unsigned int reg, unsigned int index)
210{
211 efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
212}
213
214/* Read a 32-bit CSR forming part of a table, or 32-bit SRAM */
215static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
216 unsigned int reg, unsigned int index)
217{
218 efx_readd(efx, value, reg + index * sizeof(efx_dword_t));
219}
220
221/* Page-mapped register block size */ 206/* Page-mapped register block size */
222#define EFX_PAGE_BLOCK_SIZE 0x2000 207#define EFX_PAGE_BLOCK_SIZE 0x2000
223 208
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index aea43cbd0520..0095ce95150b 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -22,7 +22,7 @@
22 ************************************************************************** 22 **************************************************************************
23 */ 23 */
24 24
25#define MCDI_RPC_TIMEOUT 10 /*seconds */ 25#define MCDI_RPC_TIMEOUT (10 * HZ)
26 26
27#define MCDI_PDU(efx) \ 27#define MCDI_PDU(efx) \
28 (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST) 28 (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
@@ -120,7 +120,7 @@ static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
120static int efx_mcdi_poll(struct efx_nic *efx) 120static int efx_mcdi_poll(struct efx_nic *efx)
121{ 121{
122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
123 unsigned int time, finish; 123 unsigned long time, finish;
124 unsigned int respseq, respcmd, error; 124 unsigned int respseq, respcmd, error;
125 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 125 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
126 unsigned int rc, spins; 126 unsigned int rc, spins;
@@ -136,7 +136,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
136 * and poll once a jiffy (approximately) 136 * and poll once a jiffy (approximately)
137 */ 137 */
138 spins = TICK_USEC; 138 spins = TICK_USEC;
139 finish = get_seconds() + MCDI_RPC_TIMEOUT; 139 finish = jiffies + MCDI_RPC_TIMEOUT;
140 140
141 while (1) { 141 while (1) {
142 if (spins != 0) { 142 if (spins != 0) {
@@ -146,7 +146,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
146 schedule_timeout_uninterruptible(1); 146 schedule_timeout_uninterruptible(1);
147 } 147 }
148 148
149 time = get_seconds(); 149 time = jiffies;
150 150
151 rmb(); 151 rmb();
152 efx_readd(efx, &reg, pdu); 152 efx_readd(efx, &reg, pdu);
@@ -158,7 +158,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
158 EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE)) 158 EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
159 break; 159 break;
160 160
161 if (time >= finish) 161 if (time_after(time, finish))
162 return -ETIMEDOUT; 162 return -ETIMEDOUT;
163 } 163 }
164 164
@@ -207,7 +207,9 @@ out:
207 return 0; 207 return 0;
208} 208}
209 209
210/* Test and clear MC-rebooted flag for this port/function */ 210/* Test and clear MC-rebooted flag for this port/function; reset
211 * software state as necessary.
212 */
211int efx_mcdi_poll_reboot(struct efx_nic *efx) 213int efx_mcdi_poll_reboot(struct efx_nic *efx)
212{ 214{
213 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx); 215 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
@@ -223,6 +225,11 @@ int efx_mcdi_poll_reboot(struct efx_nic *efx)
223 if (value == 0) 225 if (value == 0)
224 return 0; 226 return 0;
225 227
228 /* MAC statistics have been cleared on the NIC; clear our copy
229 * so that efx_update_diff_stat() can continue to work.
230 */
231 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
232
226 EFX_ZERO_DWORD(reg); 233 EFX_ZERO_DWORD(reg);
227 efx_writed(efx, &reg, addr); 234 efx_writed(efx, &reg, addr);
228 235
@@ -250,7 +257,7 @@ static int efx_mcdi_await_completion(struct efx_nic *efx)
250 if (wait_event_timeout( 257 if (wait_event_timeout(
251 mcdi->wq, 258 mcdi->wq,
252 atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED, 259 atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
253 msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0) 260 MCDI_RPC_TIMEOUT) == 0)
254 return -ETIMEDOUT; 261 return -ETIMEDOUT;
255 262
256 /* Check if efx_mcdi_set_mode() switched us back to polled completions. 263 /* Check if efx_mcdi_set_mode() switched us back to polled completions.
@@ -1216,7 +1223,7 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx)
1216 1223
1217 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid, 1224 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid,
1218 count * sizeof(*qid), NULL, 0, NULL); 1225 count * sizeof(*qid), NULL, 0, NULL);
1219 WARN_ON(rc > 0); 1226 WARN_ON(rc < 0);
1220 1227
1221 kfree(qid); 1228 kfree(qid);
1222 1229
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 2487f582ab04..2d756c1d7142 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -200,6 +200,7 @@ struct efx_tx_queue {
200 /* Members shared between paths and sometimes updated */ 200 /* Members shared between paths and sometimes updated */
201 unsigned int empty_read_count ____cacheline_aligned_in_smp; 201 unsigned int empty_read_count ____cacheline_aligned_in_smp;
202#define EFX_EMPTY_COUNT_VALID 0x80000000 202#define EFX_EMPTY_COUNT_VALID 0x80000000
203 atomic_t flush_outstanding;
203}; 204};
204 205
205/** 206/**
@@ -907,6 +908,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
907 * @remove_port: Free resources allocated by probe_port() 908 * @remove_port: Free resources allocated by probe_port()
908 * @handle_global_event: Handle a "global" event (may be %NULL) 909 * @handle_global_event: Handle a "global" event (may be %NULL)
909 * @prepare_flush: Prepare the hardware for flushing the DMA queues 910 * @prepare_flush: Prepare the hardware for flushing the DMA queues
911 * @finish_flush: Clean up after flushing the DMA queues
910 * @update_stats: Update statistics not provided by event handling 912 * @update_stats: Update statistics not provided by event handling
911 * @start_stats: Start the regular fetching of statistics 913 * @start_stats: Start the regular fetching of statistics
912 * @stop_stats: Stop the regular fetching of statistics 914 * @stop_stats: Stop the regular fetching of statistics
@@ -954,6 +956,7 @@ struct efx_nic_type {
954 void (*remove_port)(struct efx_nic *efx); 956 void (*remove_port)(struct efx_nic *efx);
955 bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *); 957 bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
956 void (*prepare_flush)(struct efx_nic *efx); 958 void (*prepare_flush)(struct efx_nic *efx);
959 void (*finish_flush)(struct efx_nic *efx);
957 void (*update_stats)(struct efx_nic *efx); 960 void (*update_stats)(struct efx_nic *efx);
958 void (*start_stats)(struct efx_nic *efx); 961 void (*start_stats)(struct efx_nic *efx);
959 void (*stop_stats)(struct efx_nic *efx); 962 void (*stop_stats)(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index aab7cacb2e34..0ad790cc473c 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -73,6 +73,8 @@
73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ 73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
74 (_tx_queue)->queue) 74 (_tx_queue)->queue)
75 75
76static void efx_magic_event(struct efx_channel *channel, u32 magic);
77
76/************************************************************************** 78/**************************************************************************
77 * 79 *
78 * Solarstorm hardware access 80 * Solarstorm hardware access
@@ -255,9 +257,6 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
255 buffer->entries = len / EFX_BUF_SIZE; 257 buffer->entries = len / EFX_BUF_SIZE;
256 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); 258 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
257 259
258 /* All zeros is a potentially valid event so memset to 0xff */
259 memset(buffer->addr, 0xff, len);
260
261 /* Select new buffer ID */ 260 /* Select new buffer ID */
262 buffer->index = efx->next_buffer_table; 261 buffer->index = efx->next_buffer_table;
263 efx->next_buffer_table += buffer->entries; 262 efx->next_buffer_table += buffer->entries;
@@ -494,6 +493,9 @@ static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
494 struct efx_nic *efx = tx_queue->efx; 493 struct efx_nic *efx = tx_queue->efx;
495 efx_oword_t tx_flush_descq; 494 efx_oword_t tx_flush_descq;
496 495
496 WARN_ON(atomic_read(&tx_queue->flush_outstanding));
497 atomic_set(&tx_queue->flush_outstanding, 1);
498
497 EFX_POPULATE_OWORD_2(tx_flush_descq, 499 EFX_POPULATE_OWORD_2(tx_flush_descq,
498 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 500 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
499 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 501 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
@@ -669,6 +671,47 @@ static bool efx_flush_wake(struct efx_nic *efx)
669 && atomic_read(&efx->rxq_flush_pending) > 0)); 671 && atomic_read(&efx->rxq_flush_pending) > 0));
670} 672}
671 673
674static bool efx_check_tx_flush_complete(struct efx_nic *efx)
675{
676 bool i = true;
677 efx_oword_t txd_ptr_tbl;
678 struct efx_channel *channel;
679 struct efx_tx_queue *tx_queue;
680
681 efx_for_each_channel(channel, efx) {
682 efx_for_each_channel_tx_queue(tx_queue, channel) {
683 efx_reado_table(efx, &txd_ptr_tbl,
684 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
685 if (EFX_OWORD_FIELD(txd_ptr_tbl,
686 FRF_AZ_TX_DESCQ_FLUSH) ||
687 EFX_OWORD_FIELD(txd_ptr_tbl,
688 FRF_AZ_TX_DESCQ_EN)) {
689 netif_dbg(efx, hw, efx->net_dev,
690 "flush did not complete on TXQ %d\n",
691 tx_queue->queue);
692 i = false;
693 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
694 1, 0)) {
695 /* The flush is complete, but we didn't
696 * receive a flush completion event
697 */
698 netif_dbg(efx, hw, efx->net_dev,
699 "flush complete on TXQ %d, so drain "
700 "the queue\n", tx_queue->queue);
701 /* Don't need to increment drain_pending as it
702 * has already been incremented for the queues
703 * which did not drain
704 */
705 efx_magic_event(channel,
706 EFX_CHANNEL_MAGIC_TX_DRAIN(
707 tx_queue));
708 }
709 }
710 }
711
712 return i;
713}
714
672/* Flush all the transmit queues, and continue flushing receive queues until 715/* Flush all the transmit queues, and continue flushing receive queues until
673 * they're all flushed. Wait for the DRAIN events to be recieved so that there 716 * they're all flushed. Wait for the DRAIN events to be recieved so that there
674 * are no more RX and TX events left on any channel. */ 717 * are no more RX and TX events left on any channel. */
@@ -680,7 +723,6 @@ int efx_nic_flush_queues(struct efx_nic *efx)
680 struct efx_tx_queue *tx_queue; 723 struct efx_tx_queue *tx_queue;
681 int rc = 0; 724 int rc = 0;
682 725
683 efx->fc_disable++;
684 efx->type->prepare_flush(efx); 726 efx->type->prepare_flush(efx);
685 727
686 efx_for_each_channel(channel, efx) { 728 efx_for_each_channel(channel, efx) {
@@ -730,7 +772,8 @@ int efx_nic_flush_queues(struct efx_nic *efx)
730 timeout); 772 timeout);
731 } 773 }
732 774
733 if (atomic_read(&efx->drain_pending)) { 775 if (atomic_read(&efx->drain_pending) &&
776 !efx_check_tx_flush_complete(efx)) {
734 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " 777 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
735 "(rx %d+%d)\n", atomic_read(&efx->drain_pending), 778 "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
736 atomic_read(&efx->rxq_flush_outstanding), 779 atomic_read(&efx->rxq_flush_outstanding),
@@ -742,7 +785,7 @@ int efx_nic_flush_queues(struct efx_nic *efx)
742 atomic_set(&efx->rxq_flush_outstanding, 0); 785 atomic_set(&efx->rxq_flush_outstanding, 0);
743 } 786 }
744 787
745 efx->fc_disable--; 788 efx->type->finish_flush(efx);
746 789
747 return rc; 790 return rc;
748} 791}
@@ -766,8 +809,13 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel)
766 809
767 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 810 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
768 channel->eventq_read_ptr & channel->eventq_mask); 811 channel->eventq_read_ptr & channel->eventq_mask);
769 efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base, 812
770 channel->channel); 813 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
814 * of 4 bytes, but it is really 16 bytes just like later revisions.
815 */
816 efx_writed(efx, &reg,
817 efx->type->evq_rptr_tbl_base +
818 FR_BZ_EVQ_RPTR_STEP * channel->channel);
771} 819}
772 820
773/* Use HW to insert a SW defined event */ 821/* Use HW to insert a SW defined event */
@@ -1017,9 +1065,10 @@ efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1017 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { 1065 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
1018 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, 1066 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
1019 qid % EFX_TXQ_TYPES); 1067 qid % EFX_TXQ_TYPES);
1020 1068 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
1021 efx_magic_event(tx_queue->channel, 1069 efx_magic_event(tx_queue->channel,
1022 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); 1070 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1071 }
1023 } 1072 }
1024} 1073}
1025 1074
@@ -1565,7 +1614,9 @@ void efx_nic_push_rx_indir_table(struct efx_nic *efx)
1565 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1614 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1566 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1615 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1567 efx->rx_indir_table[i]); 1616 efx->rx_indir_table[i]);
1568 efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i); 1617 efx_writed(efx, &dword,
1618 FR_BZ_RX_INDIRECTION_TBL +
1619 FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1569 } 1620 }
1570} 1621}
1571 1622
@@ -2029,15 +2080,15 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
2029 2080
2030 for (i = 0; i < table->rows; i++) { 2081 for (i = 0; i < table->rows; i++) {
2031 switch (table->step) { 2082 switch (table->step) {
2032 case 4: /* 32-bit register or SRAM */ 2083 case 4: /* 32-bit SRAM */
2033 efx_readd_table(efx, buf, table->offset, i); 2084 efx_readd(efx, buf, table->offset + 4 * i);
2034 break; 2085 break;
2035 case 8: /* 64-bit SRAM */ 2086 case 8: /* 64-bit SRAM */
2036 efx_sram_readq(efx, 2087 efx_sram_readq(efx,
2037 efx->membase + table->offset, 2088 efx->membase + table->offset,
2038 buf, i); 2089 buf, i);
2039 break; 2090 break;
2040 case 16: /* 128-bit register */ 2091 case 16: /* 128-bit-readable register */
2041 efx_reado_table(efx, buf, table->offset, i); 2092 efx_reado_table(efx, buf, table->offset, i);
2042 break; 2093 break;
2043 case 32: /* 128-bit register, interleaved */ 2094 case 32: /* 128-bit register, interleaved */
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 7a9647a3c565..1b0003323498 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -344,6 +344,8 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
344 344
345/* Global Resources */ 345/* Global Resources */
346extern int efx_nic_flush_queues(struct efx_nic *efx); 346extern int efx_nic_flush_queues(struct efx_nic *efx);
347extern void siena_prepare_flush(struct efx_nic *efx);
348extern void siena_finish_flush(struct efx_nic *efx);
347extern void falcon_start_nic_stats(struct efx_nic *efx); 349extern void falcon_start_nic_stats(struct efx_nic *efx);
348extern void falcon_stop_nic_stats(struct efx_nic *efx); 350extern void falcon_stop_nic_stats(struct efx_nic *efx);
349extern void falcon_setup_xaui(struct efx_nic *efx); 351extern void falcon_setup_xaui(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 9e0ad1b75c33..d780a0d096b4 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -187,7 +187,6 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
187 struct efx_nic *efx = rx_queue->efx; 187 struct efx_nic *efx = rx_queue->efx;
188 struct efx_rx_buffer *rx_buf; 188 struct efx_rx_buffer *rx_buf;
189 struct page *page; 189 struct page *page;
190 void *page_addr;
191 struct efx_rx_page_state *state; 190 struct efx_rx_page_state *state;
192 dma_addr_t dma_addr; 191 dma_addr_t dma_addr;
193 unsigned index, count; 192 unsigned index, count;
@@ -207,12 +206,10 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
207 __free_pages(page, efx->rx_buffer_order); 206 __free_pages(page, efx->rx_buffer_order);
208 return -EIO; 207 return -EIO;
209 } 208 }
210 page_addr = page_address(page); 209 state = page_address(page);
211 state = page_addr;
212 state->refcnt = 0; 210 state->refcnt = 0;
213 state->dma_addr = dma_addr; 211 state->dma_addr = dma_addr;
214 212
215 page_addr += sizeof(struct efx_rx_page_state);
216 dma_addr += sizeof(struct efx_rx_page_state); 213 dma_addr += sizeof(struct efx_rx_page_state);
217 214
218 split: 215 split:
@@ -230,7 +227,6 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
230 /* Use the second half of the page */ 227 /* Use the second half of the page */
231 get_page(page); 228 get_page(page);
232 dma_addr += (PAGE_SIZE >> 1); 229 dma_addr += (PAGE_SIZE >> 1);
233 page_addr += (PAGE_SIZE >> 1);
234 ++count; 230 ++count;
235 goto split; 231 goto split;
236 } 232 }
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index ce72ae4f399f..2069f51b2aa9 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -373,7 +373,7 @@ static void efx_iterate_state(struct efx_nic *efx)
373 /* saddr set later and used as incrementing count */ 373 /* saddr set later and used as incrementing count */
374 payload->ip.daddr = htonl(INADDR_LOOPBACK); 374 payload->ip.daddr = htonl(INADDR_LOOPBACK);
375 payload->ip.ihl = 5; 375 payload->ip.ihl = 5;
376 payload->ip.check = htons(0xdead); 376 payload->ip.check = (__force __sum16) htons(0xdead);
377 payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); 377 payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
378 payload->ip.version = IPVERSION; 378 payload->ip.version = IPVERSION;
379 payload->ip.protocol = IPPROTO_UDP; 379 payload->ip.protocol = IPPROTO_UDP;
@@ -722,7 +722,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
722 /* Detach the device so the kernel doesn't transmit during the 722 /* Detach the device so the kernel doesn't transmit during the
723 * loopback test and the watchdog timeout doesn't fire. 723 * loopback test and the watchdog timeout doesn't fire.
724 */ 724 */
725 netif_device_detach(efx->net_dev); 725 efx_device_detach_sync(efx);
726 726
727 if (efx->type->test_chip) { 727 if (efx->type->test_chip) {
728 rc_reset = efx->type->test_chip(efx, tests); 728 rc_reset = efx->type->test_chip(efx, tests);
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 84b41bf08a38..ba40f67e4f05 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -127,6 +127,18 @@ static void siena_remove_port(struct efx_nic *efx)
127 efx_nic_free_buffer(efx, &efx->stats_buffer); 127 efx_nic_free_buffer(efx, &efx->stats_buffer);
128} 128}
129 129
130void siena_prepare_flush(struct efx_nic *efx)
131{
132 if (efx->fc_disable++ == 0)
133 efx_mcdi_set_mac(efx);
134}
135
136void siena_finish_flush(struct efx_nic *efx)
137{
138 if (--efx->fc_disable == 0)
139 efx_mcdi_set_mac(efx);
140}
141
130static const struct efx_nic_register_test siena_register_tests[] = { 142static const struct efx_nic_register_test siena_register_tests[] = {
131 { FR_AZ_ADR_REGION, 143 { FR_AZ_ADR_REGION,
132 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, 144 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
@@ -158,7 +170,7 @@ static const struct efx_nic_register_test siena_register_tests[] = {
158 170
159static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) 171static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
160{ 172{
161 enum reset_type reset_method = reset_method; 173 enum reset_type reset_method = RESET_TYPE_ALL;
162 int rc, rc2; 174 int rc, rc2;
163 175
164 efx_reset_down(efx, reset_method); 176 efx_reset_down(efx, reset_method);
@@ -659,7 +671,8 @@ const struct efx_nic_type siena_a0_nic_type = {
659 .reset = siena_reset_hw, 671 .reset = siena_reset_hw,
660 .probe_port = siena_probe_port, 672 .probe_port = siena_probe_port,
661 .remove_port = siena_remove_port, 673 .remove_port = siena_remove_port,
662 .prepare_flush = efx_port_dummy_op_void, 674 .prepare_flush = siena_prepare_flush,
675 .finish_flush = siena_finish_flush,
663 .update_stats = siena_update_nic_stats, 676 .update_stats = siena_update_nic_stats,
664 .start_stats = siena_start_nic_stats, 677 .start_stats = siena_start_nic_stats,
665 .stop_stats = siena_stop_nic_stats, 678 .stop_stats = siena_stop_nic_stats,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index d49b53dc2a50..90f8d1604f5f 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -695,8 +695,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
695 return VFDI_RC_ENOMEM; 695 return VFDI_RC_ENOMEM;
696 696
697 rtnl_lock(); 697 rtnl_lock();
698 if (efx->fc_disable++ == 0) 698 siena_prepare_flush(efx);
699 efx_mcdi_set_mac(efx);
700 rtnl_unlock(); 699 rtnl_unlock();
701 700
702 /* Flush all the initialized queues */ 701 /* Flush all the initialized queues */
@@ -733,8 +732,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
733 } 732 }
734 733
735 rtnl_lock(); 734 rtnl_lock();
736 if (--efx->fc_disable == 0) 735 siena_finish_flush(efx);
737 efx_mcdi_set_mac(efx);
738 rtnl_unlock(); 736 rtnl_unlock();
739 737
740 /* Irrespective of success/failure, fini the queues */ 738 /* Irrespective of success/failure, fini the queues */
@@ -995,7 +993,7 @@ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer)
995 FRF_AZ_EVQ_BUF_BASE_ID, buftbl); 993 FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
996 efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq); 994 efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq);
997 EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0); 995 EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0);
998 efx_writed_table(efx, &ptr, FR_BZ_EVQ_RPTR, abs_evq); 996 efx_writed(efx, &ptr, FR_BZ_EVQ_RPTR + FR_BZ_EVQ_RPTR_STEP * abs_evq);
999 997
1000 mutex_unlock(&vf->status_lock); 998 mutex_unlock(&vf->status_lock);
1001} 999}