aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-08-31 22:24:24 -0400
committerDavid S. Miller <davem@davemloft.net>2013-08-31 22:24:24 -0400
commitae5dbf1ad83cb90921a400d15fb18afae66e1e56 (patch)
tree92492ce47c6caf7851c2a6e5abda31286fc2421b
parent34aedd3f3b289edba118e66450e95790ccab5091 (diff)
parentf7a6d2c4427790cc8695401576dc594fcce8fc80 (diff)
Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next
Ben Hutchings says: ==================== 1. A little more refactoring. 2. Remove the unnecessary use of atomic_t that you pointed out. 3. Add support for starting or queueing firmware requests from atomic context. 4. Add hwmon support for additional sensors found on some new boards. 5. Add support for the EF10 controller architecture, the SFC9100 family and specifically the SFC9120 controller. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/sfc/Kconfig9
-rw-r--r--drivers/net/ethernet/sfc/Makefile4
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h8
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3043
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h415
-rw-r--r--drivers/net/ethernet/sfc/efx.c175
-rw-r--r--drivers/net/ethernet/sfc/efx.h26
-rw-r--r--drivers/net/ethernet/sfc/enum.h4
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c6
-rw-r--r--drivers/net/ethernet/sfc/falcon.c7
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c4
-rw-r--r--drivers/net/ethernet/sfc/farch.c28
-rw-r--r--drivers/net/ethernet/sfc/farch_regs.h4
-rw-r--r--drivers/net/ethernet/sfc/filter.h4
-rw-r--r--drivers/net/ethernet/sfc/io.h24
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c461
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h45
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c125
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h4
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c25
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.c2
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.h2
-rw-r--r--drivers/net/ethernet/sfc/mtd.c4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h33
-rw-r--r--drivers/net/ethernet/sfc/nic.c4
-rw-r--r--drivers/net/ethernet/sfc/nic.h89
-rw-r--r--drivers/net/ethernet/sfc/phy.h2
-rw-r--r--drivers/net/ethernet/sfc/ptp.c4
-rw-r--r--drivers/net/ethernet/sfc/qt202x_phy.c4
-rw-r--r--drivers/net/ethernet/sfc/rx.c46
-rw-r--r--drivers/net/ethernet/sfc/selftest.c4
-rw-r--r--drivers/net/ethernet/sfc/selftest.h4
-rw-r--r--drivers/net/ethernet/sfc/siena.c28
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c4
-rw-r--r--drivers/net/ethernet/sfc/tenxpress.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c8
-rw-r--r--drivers/net/ethernet/sfc/txc43128_phy.c2
-rw-r--r--drivers/net/ethernet/sfc/vfdi.h2
-rw-r--r--drivers/net/ethernet/sfc/workarounds.h10
39 files changed, 4404 insertions, 271 deletions
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 4136ccc4a954..8b7152565c5e 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -1,5 +1,5 @@
1config SFC 1config SFC
2 tristate "Solarflare SFC4000/SFC9000-family support" 2 tristate "Solarflare SFC4000/SFC9000/SFC9100-family support"
3 depends on PCI 3 depends on PCI
4 select MDIO 4 select MDIO
5 select CRC32 5 select CRC32
@@ -8,12 +8,13 @@ config SFC
8 select PTP_1588_CLOCK 8 select PTP_1588_CLOCK
9 ---help--- 9 ---help---
10 This driver supports 10-gigabit Ethernet cards based on 10 This driver supports 10-gigabit Ethernet cards based on
11 the Solarflare SFC4000 and SFC9000-family controllers. 11 the Solarflare SFC4000, SFC9000-family and SFC9100-family
12 controllers.
12 13
13 To compile this driver as a module, choose M here. The module 14 To compile this driver as a module, choose M here. The module
14 will be called sfc. 15 will be called sfc.
15config SFC_MTD 16config SFC_MTD
16 bool "Solarflare SFC4000/SFC9000-family MTD support" 17 bool "Solarflare SFC4000/SFC9000/SFC9100-family MTD support"
17 depends on SFC && MTD && !(SFC=y && MTD=m) 18 depends on SFC && MTD && !(SFC=y && MTD=m)
18 default y 19 default y
19 ---help--- 20 ---help---
@@ -21,7 +22,7 @@ config SFC_MTD
21 (e.g. /dev/mtd1). This is required to update the firmware or 22 (e.g. /dev/mtd1). This is required to update the firmware or
22 the boot configuration under Linux. 23 the boot configuration under Linux.
23config SFC_MCDI_MON 24config SFC_MCDI_MON
24 bool "Solarflare SFC9000-family hwmon support" 25 bool "Solarflare SFC9000/SFC9100-family hwmon support"
25 depends on SFC && HWMON && !(SFC=y && HWMON=m) 26 depends on SFC && HWMON && !(SFC=y && HWMON=m)
26 default y 27 default y
27 ---help--- 28 ---help---
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index a61272661a73..3a83c0dca8e6 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -1,5 +1,5 @@
1sfc-y += efx.o nic.o farch.o falcon.o siena.o tx.o rx.o \ 1sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \
2 selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ 2 rx.o selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
3 tenxpress.o txc43128_phy.o falcon_boards.o \ 3 tenxpress.o txc43128_phy.o falcon_boards.o \
4 mcdi.o mcdi_port.o mcdi_mon.o ptp.o 4 mcdi.o mcdi_port.o mcdi_mon.o ptp.o
5sfc-$(CONFIG_SFC_MTD) += mtd.o 5sfc-$(CONFIG_SFC_MTD) += mtd.o
diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
index 5400a33f254f..17d83f37fbf2 100644
--- a/drivers/net/ethernet/sfc/bitfield.h
+++ b/drivers/net/ethernet/sfc/bitfield.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -29,6 +29,10 @@
29/* Lowest bit numbers and widths */ 29/* Lowest bit numbers and widths */
30#define EFX_DUMMY_FIELD_LBN 0 30#define EFX_DUMMY_FIELD_LBN 0
31#define EFX_DUMMY_FIELD_WIDTH 0 31#define EFX_DUMMY_FIELD_WIDTH 0
32#define EFX_WORD_0_LBN 0
33#define EFX_WORD_0_WIDTH 16
34#define EFX_WORD_1_LBN 16
35#define EFX_WORD_1_WIDTH 16
32#define EFX_DWORD_0_LBN 0 36#define EFX_DWORD_0_LBN 0
33#define EFX_DWORD_0_WIDTH 32 37#define EFX_DWORD_0_WIDTH 32
34#define EFX_DWORD_1_LBN 32 38#define EFX_DWORD_1_LBN 32
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
new file mode 100644
index 000000000000..5f42313b4965
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -0,0 +1,3043 @@
1/****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2012-2013 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include "net_driver.h"
11#include "ef10_regs.h"
12#include "io.h"
13#include "mcdi.h"
14#include "mcdi_pcol.h"
15#include "nic.h"
16#include "workarounds.h"
17#include <linux/in.h>
18#include <linux/jhash.h>
19#include <linux/wait.h>
20#include <linux/workqueue.h>
21
22/* Hardware control for EF10 architecture including 'Huntington'. */
23
24#define EFX_EF10_DRVGEN_EV 7
25enum {
26 EFX_EF10_TEST = 1,
27 EFX_EF10_REFILL,
28};
29
30/* The reserved RSS context value */
31#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
32
33/* The filter table(s) are managed by firmware and we have write-only
34 * access. When removing filters we must identify them to the
35 * firmware by a 64-bit handle, but this is too wide for Linux kernel
36 * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
37 * be able to tell in advance whether a requested insertion will
38 * replace an existing filter. Therefore we maintain a software hash
39 * table, which should be at least as large as the hardware hash
40 * table.
41 *
42 * Huntington has a single 8K filter table shared between all filter
43 * types and both ports.
44 */
45#define HUNT_FILTER_TBL_ROWS 8192
46
47struct efx_ef10_filter_table {
48/* The RX match field masks supported by this fw & hw, in order of priority */
49 enum efx_filter_match_flags rx_match_flags[
50 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
51 unsigned int rx_match_count;
52
53 struct {
54 unsigned long spec; /* pointer to spec plus flag bits */
55/* BUSY flag indicates that an update is in progress. STACK_OLD is
56 * used to mark and sweep stack-owned MAC filters.
57 */
58#define EFX_EF10_FILTER_FLAG_BUSY 1UL
59#define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL
60#define EFX_EF10_FILTER_FLAGS 3UL
61 u64 handle; /* firmware handle */
62 } *entry;
63 wait_queue_head_t waitq;
64/* Shadow of net_device address lists, guarded by mac_lock */
65#define EFX_EF10_FILTER_STACK_UC_MAX 32
66#define EFX_EF10_FILTER_STACK_MC_MAX 256
67 struct {
68 u8 addr[ETH_ALEN];
69 u16 id;
70 } stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX],
71 stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX];
72 int stack_uc_count; /* negative for PROMISC */
73 int stack_mc_count; /* negative for PROMISC/ALLMULTI */
74};
75
76/* An arbitrary search limit for the software hash table */
77#define EFX_EF10_FILTER_SEARCH_LIMIT 200
78
79static void efx_ef10_rx_push_indir_table(struct efx_nic *efx);
80static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
81static void efx_ef10_filter_table_remove(struct efx_nic *efx);
82
83static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
84{
85 efx_dword_t reg;
86
87 efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
88 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
89 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
90}
91
92static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
93{
94 return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
95}
96
97static int efx_ef10_init_capabilities(struct efx_nic *efx)
98{
99 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
100 struct efx_ef10_nic_data *nic_data = efx->nic_data;
101 size_t outlen;
102 int rc;
103
104 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
105
106 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
107 outbuf, sizeof(outbuf), &outlen);
108 if (rc)
109 return rc;
110
111 if (outlen >= sizeof(outbuf)) {
112 nic_data->datapath_caps =
113 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
114 if (!(nic_data->datapath_caps &
115 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
116 netif_err(efx, drv, efx->net_dev,
117 "Capabilities don't indicate TSO support.\n");
118 return -ENODEV;
119 }
120 }
121
122 return 0;
123}
124
125static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
126{
127 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
128 int rc;
129
130 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
131 outbuf, sizeof(outbuf), NULL);
132 if (rc)
133 return rc;
134 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
135 return rc > 0 ? rc : -ERANGE;
136}
137
138static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
139{
140 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
141 size_t outlen;
142 int rc;
143
144 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
145
146 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
147 outbuf, sizeof(outbuf), &outlen);
148 if (rc)
149 return rc;
150 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
151 return -EIO;
152
153 memcpy(mac_address,
154 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN);
155 return 0;
156}
157
158static int efx_ef10_probe(struct efx_nic *efx)
159{
160 struct efx_ef10_nic_data *nic_data;
161 int i, rc;
162
163 /* We can have one VI for each 8K region. However we need
164 * multiple TX queues per channel.
165 */
166 efx->max_channels =
167 min_t(unsigned int,
168 EFX_MAX_CHANNELS,
169 resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
170 (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
171 BUG_ON(efx->max_channels == 0);
172
173 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
174 if (!nic_data)
175 return -ENOMEM;
176 efx->nic_data = nic_data;
177
178 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
179 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
180 if (rc)
181 goto fail1;
182
183 /* Get the MC's warm boot count. In case it's rebooting right
184 * now, be prepared to retry.
185 */
186 i = 0;
187 for (;;) {
188 rc = efx_ef10_get_warm_boot_count(efx);
189 if (rc >= 0)
190 break;
191 if (++i == 5)
192 goto fail2;
193 ssleep(1);
194 }
195 nic_data->warm_boot_count = rc;
196
197 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
198
199 /* In case we're recovering from a crash (kexec), we want to
200 * cancel any outstanding request by the previous user of this
201 * function. We send a special message using the least
202 * significant bits of the 'high' (doorbell) register.
203 */
204 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
205
206 rc = efx_mcdi_init(efx);
207 if (rc)
208 goto fail2;
209
210 /* Reset (most) configuration for this function */
211 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
212 if (rc)
213 goto fail3;
214
215 /* Enable event logging */
216 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
217 if (rc)
218 goto fail3;
219
220 rc = efx_ef10_init_capabilities(efx);
221 if (rc < 0)
222 goto fail3;
223
224 efx->rx_packet_len_offset =
225 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
226
227 if (!(nic_data->datapath_caps &
228 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
229 netif_err(efx, probe, efx->net_dev,
230 "current firmware does not support an RX prefix\n");
231 rc = -ENODEV;
232 goto fail3;
233 }
234
235 rc = efx_mcdi_port_get_number(efx);
236 if (rc < 0)
237 goto fail3;
238 efx->port_num = rc;
239
240 rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr);
241 if (rc)
242 goto fail3;
243
244 rc = efx_ef10_get_sysclk_freq(efx);
245 if (rc < 0)
246 goto fail3;
247 efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
248
249 /* Check whether firmware supports bug 35388 workaround */
250 rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
251 if (rc == 0)
252 nic_data->workaround_35388 = true;
253 else if (rc != -ENOSYS && rc != -ENOENT)
254 goto fail3;
255 netif_dbg(efx, probe, efx->net_dev,
256 "workaround for bug 35388 is %sabled\n",
257 nic_data->workaround_35388 ? "en" : "dis");
258
259 rc = efx_mcdi_mon_probe(efx);
260 if (rc)
261 goto fail3;
262
263 efx_ptp_probe(efx);
264
265 return 0;
266
267fail3:
268 efx_mcdi_fini(efx);
269fail2:
270 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
271fail1:
272 kfree(nic_data);
273 efx->nic_data = NULL;
274 return rc;
275}
276
277static int efx_ef10_free_vis(struct efx_nic *efx)
278{
279 int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL);
280
281 /* -EALREADY means nothing to free, so ignore */
282 if (rc == -EALREADY)
283 rc = 0;
284 return rc;
285}
286
287static void efx_ef10_remove(struct efx_nic *efx)
288{
289 struct efx_ef10_nic_data *nic_data = efx->nic_data;
290 int rc;
291
292 efx_mcdi_mon_remove(efx);
293
294 /* This needs to be after efx_ptp_remove_channel() with no filters */
295 efx_ef10_rx_free_indir_table(efx);
296
297 rc = efx_ef10_free_vis(efx);
298 WARN_ON(rc != 0);
299
300 efx_mcdi_fini(efx);
301 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
302 kfree(nic_data);
303}
304
305static int efx_ef10_alloc_vis(struct efx_nic *efx,
306 unsigned int min_vis, unsigned int max_vis)
307{
308 MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
309 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
310 struct efx_ef10_nic_data *nic_data = efx->nic_data;
311 size_t outlen;
312 int rc;
313
314 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
315 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
316 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
317 outbuf, sizeof(outbuf), &outlen);
318 if (rc != 0)
319 return rc;
320
321 if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
322 return -EIO;
323
324 netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
325 MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
326
327 nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
328 nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
329 return 0;
330}
331
332static int efx_ef10_dimension_resources(struct efx_nic *efx)
333{
334 unsigned int n_vis =
335 max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
336
337 return efx_ef10_alloc_vis(efx, n_vis, n_vis);
338}
339
340static int efx_ef10_init_nic(struct efx_nic *efx)
341{
342 struct efx_ef10_nic_data *nic_data = efx->nic_data;
343 int rc;
344
345 if (nic_data->must_realloc_vis) {
346 /* We cannot let the number of VIs change now */
347 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
348 nic_data->n_allocated_vis);
349 if (rc)
350 return rc;
351 nic_data->must_realloc_vis = false;
352 }
353
354 efx_ef10_rx_push_indir_table(efx);
355 return 0;
356}
357
358static int efx_ef10_map_reset_flags(u32 *flags)
359{
360 enum {
361 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
362 ETH_RESET_SHARED_SHIFT),
363 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
364 ETH_RESET_OFFLOAD | ETH_RESET_MAC |
365 ETH_RESET_PHY | ETH_RESET_MGMT) <<
366 ETH_RESET_SHARED_SHIFT)
367 };
368
369 /* We assume for now that our PCI function is permitted to
370 * reset everything.
371 */
372
373 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
374 *flags &= ~EF10_RESET_MC;
375 return RESET_TYPE_WORLD;
376 }
377
378 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
379 *flags &= ~EF10_RESET_PORT;
380 return RESET_TYPE_ALL;
381 }
382
383 /* no invisible reset implemented */
384
385 return -EINVAL;
386}
387
388#define EF10_DMA_STAT(ext_name, mcdi_name) \
389 [EF10_STAT_ ## ext_name] = \
390 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
391#define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
392 [EF10_STAT_ ## int_name] = \
393 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
394#define EF10_OTHER_STAT(ext_name) \
395 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
396
397static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
398 EF10_DMA_STAT(tx_bytes, TX_BYTES),
399 EF10_DMA_STAT(tx_packets, TX_PKTS),
400 EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
401 EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
402 EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
403 EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
404 EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
405 EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
406 EF10_DMA_STAT(tx_64, TX_64_PKTS),
407 EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
408 EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
409 EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
410 EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
411 EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
412 EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
413 EF10_DMA_STAT(rx_bytes, RX_BYTES),
414 EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
415 EF10_OTHER_STAT(rx_good_bytes),
416 EF10_OTHER_STAT(rx_bad_bytes),
417 EF10_DMA_STAT(rx_packets, RX_PKTS),
418 EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
419 EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
420 EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
421 EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
422 EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
423 EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
424 EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
425 EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
426 EF10_DMA_STAT(rx_64, RX_64_PKTS),
427 EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
428 EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
429 EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
430 EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
431 EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
432 EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
433 EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
434 EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
435 EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
436 EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
437 EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
438 EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
439};
440
441#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
442 (1ULL << EF10_STAT_tx_packets) | \
443 (1ULL << EF10_STAT_tx_pause) | \
444 (1ULL << EF10_STAT_tx_unicast) | \
445 (1ULL << EF10_STAT_tx_multicast) | \
446 (1ULL << EF10_STAT_tx_broadcast) | \
447 (1ULL << EF10_STAT_rx_bytes) | \
448 (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
449 (1ULL << EF10_STAT_rx_good_bytes) | \
450 (1ULL << EF10_STAT_rx_bad_bytes) | \
451 (1ULL << EF10_STAT_rx_packets) | \
452 (1ULL << EF10_STAT_rx_good) | \
453 (1ULL << EF10_STAT_rx_bad) | \
454 (1ULL << EF10_STAT_rx_pause) | \
455 (1ULL << EF10_STAT_rx_control) | \
456 (1ULL << EF10_STAT_rx_unicast) | \
457 (1ULL << EF10_STAT_rx_multicast) | \
458 (1ULL << EF10_STAT_rx_broadcast) | \
459 (1ULL << EF10_STAT_rx_lt64) | \
460 (1ULL << EF10_STAT_rx_64) | \
461 (1ULL << EF10_STAT_rx_65_to_127) | \
462 (1ULL << EF10_STAT_rx_128_to_255) | \
463 (1ULL << EF10_STAT_rx_256_to_511) | \
464 (1ULL << EF10_STAT_rx_512_to_1023) | \
465 (1ULL << EF10_STAT_rx_1024_to_15xx) | \
466 (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \
467 (1ULL << EF10_STAT_rx_gtjumbo) | \
468 (1ULL << EF10_STAT_rx_bad_gtjumbo) | \
469 (1ULL << EF10_STAT_rx_overflow) | \
470 (1ULL << EF10_STAT_rx_nodesc_drops))
471
472/* These statistics are only provided by the 10G MAC. For a 10G/40G
473 * switchable port we do not expose these because they might not
474 * include all the packets they should.
475 */
476#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \
477 (1ULL << EF10_STAT_tx_lt64) | \
478 (1ULL << EF10_STAT_tx_64) | \
479 (1ULL << EF10_STAT_tx_65_to_127) | \
480 (1ULL << EF10_STAT_tx_128_to_255) | \
481 (1ULL << EF10_STAT_tx_256_to_511) | \
482 (1ULL << EF10_STAT_tx_512_to_1023) | \
483 (1ULL << EF10_STAT_tx_1024_to_15xx) | \
484 (1ULL << EF10_STAT_tx_15xx_to_jumbo))
485
486/* These statistics are only provided by the 40G MAC. For a 10G/40G
487 * switchable port we do expose these because the errors will otherwise
488 * be silent.
489 */
490#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
491 (1ULL << EF10_STAT_rx_length_error))
492
493#if BITS_PER_LONG == 64
494#define STAT_MASK_BITMAP(bits) (bits)
495#else
496#define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32
497#endif
498
499static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx)
500{
501 static const unsigned long hunt_40g_stat_mask[] = {
502 STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
503 HUNT_40G_EXTRA_STAT_MASK)
504 };
505 static const unsigned long hunt_10g_only_stat_mask[] = {
506 STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
507 HUNT_10G_ONLY_STAT_MASK)
508 };
509 u32 port_caps = efx_mcdi_phy_get_caps(efx);
510
511 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
512 return hunt_40g_stat_mask;
513 else
514 return hunt_10g_only_stat_mask;
515}
516
517static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
518{
519 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
520 efx_ef10_stat_mask(efx), names);
521}
522
523static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
524{
525 struct efx_ef10_nic_data *nic_data = efx->nic_data;
526 const unsigned long *stats_mask = efx_ef10_stat_mask(efx);
527 __le64 generation_start, generation_end;
528 u64 *stats = nic_data->stats;
529 __le64 *dma_stats;
530
531 dma_stats = efx->stats_buffer.addr;
532 nic_data = efx->nic_data;
533
534 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
535 if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
536 return 0;
537 rmb();
538 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask,
539 stats, efx->stats_buffer.addr, false);
540 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
541 if (generation_end != generation_start)
542 return -EAGAIN;
543
544 /* Update derived statistics */
545 stats[EF10_STAT_rx_good_bytes] =
546 stats[EF10_STAT_rx_bytes] -
547 stats[EF10_STAT_rx_bytes_minus_good_bytes];
548 efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
549 stats[EF10_STAT_rx_bytes_minus_good_bytes]);
550
551 return 0;
552}
553
554
555static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
556 struct rtnl_link_stats64 *core_stats)
557{
558 const unsigned long *mask = efx_ef10_stat_mask(efx);
559 struct efx_ef10_nic_data *nic_data = efx->nic_data;
560 u64 *stats = nic_data->stats;
561 size_t stats_count = 0, index;
562 int retry;
563
564 /* If we're unlucky enough to read statistics during the DMA, wait
565 * up to 10ms for it to finish (typically takes <500us)
566 */
567 for (retry = 0; retry < 100; ++retry) {
568 if (efx_ef10_try_update_nic_stats(efx) == 0)
569 break;
570 udelay(100);
571 }
572
573 if (full_stats) {
574 for_each_set_bit(index, mask, EF10_STAT_COUNT) {
575 if (efx_ef10_stat_desc[index].name) {
576 *full_stats++ = stats[index];
577 ++stats_count;
578 }
579 }
580 }
581
582 if (core_stats) {
583 core_stats->rx_packets = stats[EF10_STAT_rx_packets];
584 core_stats->tx_packets = stats[EF10_STAT_tx_packets];
585 core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
586 core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
587 core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops];
588 core_stats->multicast = stats[EF10_STAT_rx_multicast];
589 core_stats->rx_length_errors =
590 stats[EF10_STAT_rx_gtjumbo] +
591 stats[EF10_STAT_rx_length_error];
592 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
593 core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
594 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
595 core_stats->rx_errors = (core_stats->rx_length_errors +
596 core_stats->rx_crc_errors +
597 core_stats->rx_frame_errors);
598 }
599
600 return stats_count;
601}
602
603static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
604{
605 struct efx_nic *efx = channel->efx;
606 unsigned int mode, value;
607 efx_dword_t timer_cmd;
608
609 if (channel->irq_moderation) {
610 mode = 3;
611 value = channel->irq_moderation - 1;
612 } else {
613 mode = 0;
614 value = 0;
615 }
616
617 if (EFX_EF10_WORKAROUND_35388(efx)) {
618 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
619 EFE_DD_EVQ_IND_TIMER_FLAGS,
620 ERF_DD_EVQ_IND_TIMER_MODE, mode,
621 ERF_DD_EVQ_IND_TIMER_VAL, value);
622 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
623 channel->channel);
624 } else {
625 EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
626 ERF_DZ_TC_TIMER_VAL, value);
627 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
628 channel->channel);
629 }
630}
631
632static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
633{
634 wol->supported = 0;
635 wol->wolopts = 0;
636 memset(&wol->sopass, 0, sizeof(wol->sopass));
637}
638
639static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
640{
641 if (type != 0)
642 return -EINVAL;
643 return 0;
644}
645
646static void efx_ef10_mcdi_request(struct efx_nic *efx,
647 const efx_dword_t *hdr, size_t hdr_len,
648 const efx_dword_t *sdu, size_t sdu_len)
649{
650 struct efx_ef10_nic_data *nic_data = efx->nic_data;
651 u8 *pdu = nic_data->mcdi_buf.addr;
652
653 memcpy(pdu, hdr, hdr_len);
654 memcpy(pdu + hdr_len, sdu, sdu_len);
655 wmb();
656
657 /* The hardware provides 'low' and 'high' (doorbell) registers
658 * for passing the 64-bit address of an MCDI request to
659 * firmware. However the dwords are swapped by firmware. The
660 * least significant bits of the doorbell are then 0 for all
661 * MCDI requests due to alignment.
662 */
663 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
664 ER_DZ_MC_DB_LWRD);
665 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
666 ER_DZ_MC_DB_HWRD);
667}
668
669static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
670{
671 struct efx_ef10_nic_data *nic_data = efx->nic_data;
672 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
673
674 rmb();
675 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
676}
677
678static void
679efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
680 size_t offset, size_t outlen)
681{
682 struct efx_ef10_nic_data *nic_data = efx->nic_data;
683 const u8 *pdu = nic_data->mcdi_buf.addr;
684
685 memcpy(outbuf, pdu + offset, outlen);
686}
687
688static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
689{
690 struct efx_ef10_nic_data *nic_data = efx->nic_data;
691 int rc;
692
693 rc = efx_ef10_get_warm_boot_count(efx);
694 if (rc < 0) {
695 /* The firmware is presumably in the process of
696 * rebooting. However, we are supposed to report each
697 * reboot just once, so we must only do that once we
698 * can read and store the updated warm boot count.
699 */
700 return 0;
701 }
702
703 if (rc == nic_data->warm_boot_count)
704 return 0;
705
706 nic_data->warm_boot_count = rc;
707
708 /* All our allocations have been reset */
709 nic_data->must_realloc_vis = true;
710 nic_data->must_restore_filters = true;
711 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
712
713 return -EIO;
714}
715
716/* Handle an MSI interrupt
717 *
718 * Handle an MSI hardware interrupt. This routine schedules event
719 * queue processing. No interrupt acknowledgement cycle is necessary.
720 * Also, we never need to check that the interrupt is for us, since
721 * MSI interrupts cannot be shared.
722 */
723static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
724{
725 struct efx_msi_context *context = dev_id;
726 struct efx_nic *efx = context->efx;
727
728 netif_vdbg(efx, intr, efx->net_dev,
729 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
730
731 if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
732 /* Note test interrupts */
733 if (context->index == efx->irq_level)
734 efx->last_irq_cpu = raw_smp_processor_id();
735
736 /* Schedule processing of the channel */
737 efx_schedule_channel_irq(efx->channel[context->index]);
738 }
739
740 return IRQ_HANDLED;
741}
742
743static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
744{
745 struct efx_nic *efx = dev_id;
746 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
747 struct efx_channel *channel;
748 efx_dword_t reg;
749 u32 queues;
750
751 /* Read the ISR which also ACKs the interrupts */
752 efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
753 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
754
755 if (queues == 0)
756 return IRQ_NONE;
757
758 if (likely(soft_enabled)) {
759 /* Note test interrupts */
760 if (queues & (1U << efx->irq_level))
761 efx->last_irq_cpu = raw_smp_processor_id();
762
763 efx_for_each_channel(channel, efx) {
764 if (queues & 1)
765 efx_schedule_channel_irq(channel);
766 queues >>= 1;
767 }
768 }
769
770 netif_vdbg(efx, intr, efx->net_dev,
771 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
772 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
773
774 return IRQ_HANDLED;
775}
776
777static void efx_ef10_irq_test_generate(struct efx_nic *efx)
778{
779 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
780
781 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
782
783 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
784 (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
785 inbuf, sizeof(inbuf), NULL, 0, NULL);
786}
787
788static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
789{
790 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
791 (tx_queue->ptr_mask + 1) *
792 sizeof(efx_qword_t),
793 GFP_KERNEL);
794}
795
796/* This writes to the TX_DESC_WPTR and also pushes data */
797static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
798 const efx_qword_t *txd)
799{
800 unsigned int write_ptr;
801 efx_oword_t reg;
802
803 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
804 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
805 reg.qword[0] = *txd;
806 efx_writeo_page(tx_queue->efx, &reg,
807 ER_DZ_TX_DESC_UPD, tx_queue->queue);
808}
809
810static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
811{
812 MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
813 EFX_BUF_SIZE));
814 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN);
815 bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
816 size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
817 struct efx_channel *channel = tx_queue->channel;
818 struct efx_nic *efx = tx_queue->efx;
819 size_t inlen, outlen;
820 dma_addr_t dma_addr;
821 efx_qword_t *txd;
822 int rc;
823 int i;
824
825 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
826 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
827 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
828 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
829 MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
830 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
831 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
832 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
833 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
834
835 dma_addr = tx_queue->txd.buf.dma_addr;
836
837 netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
838 tx_queue->queue, entries, (u64)dma_addr);
839
840 for (i = 0; i < entries; ++i) {
841 MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
842 dma_addr += EFX_BUF_SIZE;
843 }
844
845 inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
846
847 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
848 outbuf, sizeof(outbuf), &outlen);
849 if (rc)
850 goto fail;
851
852 /* A previous user of this TX queue might have set us up the
853 * bomb by writing a descriptor to the TX push collector but
854 * not the doorbell. (Each collector belongs to a port, not a
855 * queue or function, so cannot easily be reset.) We must
856 * attempt to push a no-op descriptor in its place.
857 */
858 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
859 tx_queue->insert_count = 1;
860 txd = efx_tx_desc(tx_queue, 0);
861 EFX_POPULATE_QWORD_4(*txd,
862 ESF_DZ_TX_DESC_IS_OPT, true,
863 ESF_DZ_TX_OPTION_TYPE,
864 ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
865 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
866 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
867 tx_queue->write_count = 1;
868 wmb();
869 efx_ef10_push_tx_desc(tx_queue, txd);
870
871 return;
872
873fail:
874 WARN_ON(true);
875 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
876}
877
878static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
879{
880 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
881 MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN);
882 struct efx_nic *efx = tx_queue->efx;
883 size_t outlen;
884 int rc;
885
886 MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
887 tx_queue->queue);
888
889 rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
890 outbuf, sizeof(outbuf), &outlen);
891
892 if (rc && rc != -EALREADY)
893 goto fail;
894
895 return;
896
897fail:
898 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
899}
900
901static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
902{
903 efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
904}
905
906/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
907static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
908{
909 unsigned int write_ptr;
910 efx_dword_t reg;
911
912 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
913 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
914 efx_writed_page(tx_queue->efx, &reg,
915 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
916}
917
918static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
919{
920 unsigned int old_write_count = tx_queue->write_count;
921 struct efx_tx_buffer *buffer;
922 unsigned int write_ptr;
923 efx_qword_t *txd;
924
925 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
926
927 do {
928 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
929 buffer = &tx_queue->buffer[write_ptr];
930 txd = efx_tx_desc(tx_queue, write_ptr);
931 ++tx_queue->write_count;
932
933 /* Create TX descriptor ring entry */
934 if (buffer->flags & EFX_TX_BUF_OPTION) {
935 *txd = buffer->option;
936 } else {
937 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
938 EFX_POPULATE_QWORD_3(
939 *txd,
940 ESF_DZ_TX_KER_CONT,
941 buffer->flags & EFX_TX_BUF_CONT,
942 ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
943 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
944 }
945 } while (tx_queue->write_count != tx_queue->insert_count);
946
947 wmb(); /* Ensure descriptors are written before they are fetched */
948
949 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
950 txd = efx_tx_desc(tx_queue,
951 old_write_count & tx_queue->ptr_mask);
952 efx_ef10_push_tx_desc(tx_queue, txd);
953 ++tx_queue->pushes;
954 } else {
955 efx_ef10_notify_tx_desc(tx_queue);
956 }
957}
958
959static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
960{
961 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
962 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
963 size_t outlen;
964 int rc;
965
966 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
967 EVB_PORT_ID_ASSIGNED);
968 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
969 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
970 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
971 EFX_MAX_CHANNELS);
972
973 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
974 outbuf, sizeof(outbuf), &outlen);
975 if (rc != 0)
976 return rc;
977
978 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
979 return -EIO;
980
981 *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
982
983 return 0;
984}
985
986static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
987{
988 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
989 int rc;
990
991 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
992 context);
993
994 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
995 NULL, 0, NULL);
996 WARN_ON(rc != 0);
997}
998
999static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
1000{
1001 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
1002 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
1003 int i, rc;
1004
1005 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
1006 context);
1007 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1008 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
1009
1010 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
1011 MCDI_PTR(tablebuf,
1012 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
1013 (u8) efx->rx_indir_table[i];
1014
1015 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
1016 sizeof(tablebuf), NULL, 0, NULL);
1017 if (rc != 0)
1018 return rc;
1019
1020 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
1021 context);
1022 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
1023 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
1024 for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
1025 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
1026 efx->rx_hash_key[i];
1027
1028 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
1029 sizeof(keybuf), NULL, 0, NULL);
1030}
1031
1032static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
1033{
1034 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1035
1036 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
1037 efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
1038 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
1039}
1040
1041static void efx_ef10_rx_push_indir_table(struct efx_nic *efx)
1042{
1043 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1044 int rc;
1045
1046 netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n");
1047
1048 if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
1049 rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
1050 if (rc != 0)
1051 goto fail;
1052 }
1053
1054 rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
1055 if (rc != 0)
1056 goto fail;
1057
1058 return;
1059
1060fail:
1061 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1062}
1063
1064static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
1065{
1066 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
1067 (rx_queue->ptr_mask + 1) *
1068 sizeof(efx_qword_t),
1069 GFP_KERNEL);
1070}
1071
1072static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
1073{
1074 MCDI_DECLARE_BUF(inbuf,
1075 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
1076 EFX_BUF_SIZE));
1077 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN);
1078 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
1079 size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
1080 struct efx_nic *efx = rx_queue->efx;
1081 size_t inlen, outlen;
1082 dma_addr_t dma_addr;
1083 int rc;
1084 int i;
1085
1086 rx_queue->scatter_n = 0;
1087 rx_queue->scatter_len = 0;
1088
1089 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
1090 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
1091 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
1092 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
1093 efx_rx_queue_index(rx_queue));
1094 MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS,
1095 INIT_RXQ_IN_FLAG_PREFIX, 1);
1096 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
1097 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1098
1099 dma_addr = rx_queue->rxd.buf.dma_addr;
1100
1101 netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
1102 efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
1103
1104 for (i = 0; i < entries; ++i) {
1105 MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
1106 dma_addr += EFX_BUF_SIZE;
1107 }
1108
1109 inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
1110
1111 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
1112 outbuf, sizeof(outbuf), &outlen);
1113 if (rc)
1114 goto fail;
1115
1116 return;
1117
1118fail:
1119 WARN_ON(true);
1120 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1121}
1122
1123static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
1124{
1125 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
1126 MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN);
1127 struct efx_nic *efx = rx_queue->efx;
1128 size_t outlen;
1129 int rc;
1130
1131 MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
1132 efx_rx_queue_index(rx_queue));
1133
1134 rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
1135 outbuf, sizeof(outbuf), &outlen);
1136
1137 if (rc && rc != -EALREADY)
1138 goto fail;
1139
1140 return;
1141
1142fail:
1143 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1144}
1145
1146static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
1147{
1148 efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
1149}
1150
1151/* This creates an entry in the RX descriptor queue */
1152static inline void
1153efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
1154{
1155 struct efx_rx_buffer *rx_buf;
1156 efx_qword_t *rxd;
1157
1158 rxd = efx_rx_desc(rx_queue, index);
1159 rx_buf = efx_rx_buffer(rx_queue, index);
1160 EFX_POPULATE_QWORD_2(*rxd,
1161 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
1162 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
1163}
1164
1165static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
1166{
1167 struct efx_nic *efx = rx_queue->efx;
1168 unsigned int write_count;
1169 efx_dword_t reg;
1170
1171 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
1172 write_count = rx_queue->added_count & ~7;
1173 if (rx_queue->notified_count == write_count)
1174 return;
1175
1176 do
1177 efx_ef10_build_rx_desc(
1178 rx_queue,
1179 rx_queue->notified_count & rx_queue->ptr_mask);
1180 while (++rx_queue->notified_count != write_count);
1181
1182 wmb();
1183 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
1184 write_count & rx_queue->ptr_mask);
1185 efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
1186 efx_rx_queue_index(rx_queue));
1187}
1188
1189static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
1190
1191static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
1192{
1193 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
1194 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
1195 efx_qword_t event;
1196
1197 EFX_POPULATE_QWORD_2(event,
1198 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
1199 ESF_DZ_EV_DATA, EFX_EF10_REFILL);
1200
1201 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
1202
1203 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
1204 * already swapped the data to little-endian order.
1205 */
1206 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
1207 sizeof(efx_qword_t));
1208
1209 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
1210 inbuf, sizeof(inbuf), 0,
1211 efx_ef10_rx_defer_refill_complete, 0);
1212}
1213
1214static void
1215efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
1216 int rc, efx_dword_t *outbuf,
1217 size_t outlen_actual)
1218{
1219 /* nothing to do */
1220}
1221
1222static int efx_ef10_ev_probe(struct efx_channel *channel)
1223{
1224 return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
1225 (channel->eventq_mask + 1) *
1226 sizeof(efx_qword_t),
1227 GFP_KERNEL);
1228}
1229
1230static int efx_ef10_ev_init(struct efx_channel *channel)
1231{
1232 MCDI_DECLARE_BUF(inbuf,
1233 MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
1234 EFX_BUF_SIZE));
1235 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN);
1236 size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
1237 struct efx_nic *efx = channel->efx;
1238 struct efx_ef10_nic_data *nic_data;
1239 bool supports_rx_merge;
1240 size_t inlen, outlen;
1241 dma_addr_t dma_addr;
1242 int rc;
1243 int i;
1244
1245 nic_data = efx->nic_data;
1246 supports_rx_merge =
1247 !!(nic_data->datapath_caps &
1248 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
1249
1250 /* Fill event queue with all ones (i.e. empty events) */
1251 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1252
1253 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
1254 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
1255 /* INIT_EVQ expects index in vector table, not absolute */
1256 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
1257 MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
1258 INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
1259 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
1260 INIT_EVQ_IN_FLAG_TX_MERGE, 1,
1261 INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge);
1262 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
1263 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
1264 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
1265 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
1266 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
1267 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
1268 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
1269
1270 dma_addr = channel->eventq.buf.dma_addr;
1271 for (i = 0; i < entries; ++i) {
1272 MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
1273 dma_addr += EFX_BUF_SIZE;
1274 }
1275
1276 inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
1277
1278 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
1279 outbuf, sizeof(outbuf), &outlen);
1280 if (rc)
1281 goto fail;
1282
1283 /* IRQ return is ignored */
1284
1285 return 0;
1286
1287fail:
1288 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1289 return rc;
1290}
1291
1292static void efx_ef10_ev_fini(struct efx_channel *channel)
1293{
1294 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
1295 MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN);
1296 struct efx_nic *efx = channel->efx;
1297 size_t outlen;
1298 int rc;
1299
1300 MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
1301
1302 rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
1303 outbuf, sizeof(outbuf), &outlen);
1304
1305 if (rc && rc != -EALREADY)
1306 goto fail;
1307
1308 return;
1309
1310fail:
1311 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1312}
1313
1314static void efx_ef10_ev_remove(struct efx_channel *channel)
1315{
1316 efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
1317}
1318
1319static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
1320 unsigned int rx_queue_label)
1321{
1322 struct efx_nic *efx = rx_queue->efx;
1323
1324 netif_info(efx, hw, efx->net_dev,
1325 "rx event arrived on queue %d labeled as queue %u\n",
1326 efx_rx_queue_index(rx_queue), rx_queue_label);
1327
1328 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1329}
1330
1331static void
1332efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
1333 unsigned int actual, unsigned int expected)
1334{
1335 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
1336 struct efx_nic *efx = rx_queue->efx;
1337
1338 netif_info(efx, hw, efx->net_dev,
1339 "dropped %d events (index=%d expected=%d)\n",
1340 dropped, actual, expected);
1341
1342 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1343}
1344
1345/* partially received RX was aborted. clean up. */
1346static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
1347{
1348 unsigned int rx_desc_ptr;
1349
1350 WARN_ON(rx_queue->scatter_n == 0);
1351
1352 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
1353 "scattered RX aborted (dropping %u buffers)\n",
1354 rx_queue->scatter_n);
1355
1356 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
1357
1358 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
1359 0, EFX_RX_PKT_DISCARD);
1360
1361 rx_queue->removed_count += rx_queue->scatter_n;
1362 rx_queue->scatter_n = 0;
1363 rx_queue->scatter_len = 0;
1364 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
1365}
1366
1367static int efx_ef10_handle_rx_event(struct efx_channel *channel,
1368 const efx_qword_t *event)
1369{
1370 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
1371 unsigned int n_descs, n_packets, i;
1372 struct efx_nic *efx = channel->efx;
1373 struct efx_rx_queue *rx_queue;
1374 bool rx_cont;
1375 u16 flags = 0;
1376
1377 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1378 return 0;
1379
1380 /* Basic packet information */
1381 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
1382 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
1383 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
1384 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
1385 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
1386
1387 WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT));
1388
1389 rx_queue = efx_channel_get_rx_queue(channel);
1390
1391 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
1392 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
1393
1394 n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
1395 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
1396
1397 if (n_descs != rx_queue->scatter_n + 1) {
1398 /* detect rx abort */
1399 if (unlikely(n_descs == rx_queue->scatter_n)) {
1400 WARN_ON(rx_bytes != 0);
1401 efx_ef10_handle_rx_abort(rx_queue);
1402 return 0;
1403 }
1404
1405 if (unlikely(rx_queue->scatter_n != 0)) {
1406 /* Scattered packet completions cannot be
1407 * merged, so something has gone wrong.
1408 */
1409 efx_ef10_handle_rx_bad_lbits(
1410 rx_queue, next_ptr_lbits,
1411 (rx_queue->removed_count +
1412 rx_queue->scatter_n + 1) &
1413 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
1414 return 0;
1415 }
1416
1417 /* Merged completion for multiple non-scattered packets */
1418 rx_queue->scatter_n = 1;
1419 rx_queue->scatter_len = 0;
1420 n_packets = n_descs;
1421 ++channel->n_rx_merge_events;
1422 channel->n_rx_merge_packets += n_packets;
1423 flags |= EFX_RX_PKT_PREFIX_LEN;
1424 } else {
1425 ++rx_queue->scatter_n;
1426 rx_queue->scatter_len += rx_bytes;
1427 if (rx_cont)
1428 return 0;
1429 n_packets = 1;
1430 }
1431
1432 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
1433 flags |= EFX_RX_PKT_DISCARD;
1434
1435 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
1436 channel->n_rx_ip_hdr_chksum_err += n_packets;
1437 } else if (unlikely(EFX_QWORD_FIELD(*event,
1438 ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
1439 channel->n_rx_tcp_udp_chksum_err += n_packets;
1440 } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
1441 rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
1442 flags |= EFX_RX_PKT_CSUMMED;
1443 }
1444
1445 if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
1446 flags |= EFX_RX_PKT_TCP;
1447
1448 channel->irq_mod_score += 2 * n_packets;
1449
1450 /* Handle received packet(s) */
1451 for (i = 0; i < n_packets; i++) {
1452 efx_rx_packet(rx_queue,
1453 rx_queue->removed_count & rx_queue->ptr_mask,
1454 rx_queue->scatter_n, rx_queue->scatter_len,
1455 flags);
1456 rx_queue->removed_count += rx_queue->scatter_n;
1457 }
1458
1459 rx_queue->scatter_n = 0;
1460 rx_queue->scatter_len = 0;
1461
1462 return n_packets;
1463}
1464
1465static int
1466efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
1467{
1468 struct efx_nic *efx = channel->efx;
1469 struct efx_tx_queue *tx_queue;
1470 unsigned int tx_ev_desc_ptr;
1471 unsigned int tx_ev_q_label;
1472 int tx_descs = 0;
1473
1474 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1475 return 0;
1476
1477 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
1478 return 0;
1479
1480 /* Transmit completion */
1481 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
1482 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
1483 tx_queue = efx_channel_get_tx_queue(channel,
1484 tx_ev_q_label % EFX_TXQ_TYPES);
1485 tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
1486 tx_queue->ptr_mask);
1487 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
1488
1489 return tx_descs;
1490}
1491
1492static void
1493efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1494{
1495 struct efx_nic *efx = channel->efx;
1496 int subcode;
1497
1498 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
1499
1500 switch (subcode) {
1501 case ESE_DZ_DRV_TIMER_EV:
1502 case ESE_DZ_DRV_WAKE_UP_EV:
1503 break;
1504 case ESE_DZ_DRV_START_UP_EV:
1505 /* event queue init complete. ok. */
1506 break;
1507 default:
1508 netif_err(efx, hw, efx->net_dev,
1509 "channel %d unknown driver event type %d"
1510 " (data " EFX_QWORD_FMT ")\n",
1511 channel->channel, subcode,
1512 EFX_QWORD_VAL(*event));
1513
1514 }
1515}
1516
1517static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
1518 efx_qword_t *event)
1519{
1520 struct efx_nic *efx = channel->efx;
1521 u32 subcode;
1522
1523 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
1524
1525 switch (subcode) {
1526 case EFX_EF10_TEST:
1527 channel->event_test_cpu = raw_smp_processor_id();
1528 break;
1529 case EFX_EF10_REFILL:
1530 /* The queue must be empty, so we won't receive any rx
1531 * events, so efx_process_channel() won't refill the
1532 * queue. Refill it here
1533 */
1534 efx_fast_push_rx_descriptors(&channel->rx_queue);
1535 break;
1536 default:
1537 netif_err(efx, hw, efx->net_dev,
1538 "channel %d unknown driver event type %u"
1539 " (data " EFX_QWORD_FMT ")\n",
1540 channel->channel, (unsigned) subcode,
1541 EFX_QWORD_VAL(*event));
1542 }
1543}
1544
1545static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
1546{
1547 struct efx_nic *efx = channel->efx;
1548 efx_qword_t event, *p_event;
1549 unsigned int read_ptr;
1550 int ev_code;
1551 int tx_descs = 0;
1552 int spent = 0;
1553
1554 read_ptr = channel->eventq_read_ptr;
1555
1556 for (;;) {
1557 p_event = efx_event(channel, read_ptr);
1558 event = *p_event;
1559
1560 if (!efx_event_present(&event))
1561 break;
1562
1563 EFX_SET_QWORD(*p_event);
1564
1565 ++read_ptr;
1566
1567 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
1568
1569 netif_vdbg(efx, drv, efx->net_dev,
1570 "processing event on %d " EFX_QWORD_FMT "\n",
1571 channel->channel, EFX_QWORD_VAL(event));
1572
1573 switch (ev_code) {
1574 case ESE_DZ_EV_CODE_MCDI_EV:
1575 efx_mcdi_process_event(channel, &event);
1576 break;
1577 case ESE_DZ_EV_CODE_RX_EV:
1578 spent += efx_ef10_handle_rx_event(channel, &event);
1579 if (spent >= quota) {
1580 /* XXX can we split a merged event to
1581 * avoid going over-quota?
1582 */
1583 spent = quota;
1584 goto out;
1585 }
1586 break;
1587 case ESE_DZ_EV_CODE_TX_EV:
1588 tx_descs += efx_ef10_handle_tx_event(channel, &event);
1589 if (tx_descs > efx->txq_entries) {
1590 spent = quota;
1591 goto out;
1592 } else if (++spent == quota) {
1593 goto out;
1594 }
1595 break;
1596 case ESE_DZ_EV_CODE_DRIVER_EV:
1597 efx_ef10_handle_driver_event(channel, &event);
1598 if (++spent == quota)
1599 goto out;
1600 break;
1601 case EFX_EF10_DRVGEN_EV:
1602 efx_ef10_handle_driver_generated_event(channel, &event);
1603 break;
1604 default:
1605 netif_err(efx, hw, efx->net_dev,
1606 "channel %d unknown event type %d"
1607 " (data " EFX_QWORD_FMT ")\n",
1608 channel->channel, ev_code,
1609 EFX_QWORD_VAL(event));
1610 }
1611 }
1612
1613out:
1614 channel->eventq_read_ptr = read_ptr;
1615 return spent;
1616}
1617
1618static void efx_ef10_ev_read_ack(struct efx_channel *channel)
1619{
1620 struct efx_nic *efx = channel->efx;
1621 efx_dword_t rptr;
1622
1623 if (EFX_EF10_WORKAROUND_35388(efx)) {
1624 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
1625 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
1626 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
1627 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
1628
1629 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
1630 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
1631 ERF_DD_EVQ_IND_RPTR,
1632 (channel->eventq_read_ptr &
1633 channel->eventq_mask) >>
1634 ERF_DD_EVQ_IND_RPTR_WIDTH);
1635 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
1636 channel->channel);
1637 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
1638 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
1639 ERF_DD_EVQ_IND_RPTR,
1640 channel->eventq_read_ptr &
1641 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
1642 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
1643 channel->channel);
1644 } else {
1645 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
1646 channel->eventq_read_ptr &
1647 channel->eventq_mask);
1648 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
1649 }
1650}
1651
1652static void efx_ef10_ev_test_generate(struct efx_channel *channel)
1653{
1654 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
1655 struct efx_nic *efx = channel->efx;
1656 efx_qword_t event;
1657 int rc;
1658
1659 EFX_POPULATE_QWORD_2(event,
1660 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
1661 ESF_DZ_EV_DATA, EFX_EF10_TEST);
1662
1663 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
1664
1665 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
1666 * already swapped the data to little-endian order.
1667 */
1668 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
1669 sizeof(efx_qword_t));
1670
1671 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
1672 NULL, 0, NULL);
1673 if (rc != 0)
1674 goto fail;
1675
1676 return;
1677
1678fail:
1679 WARN_ON(true);
1680 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1681}
1682
1683void efx_ef10_handle_drain_event(struct efx_nic *efx)
1684{
1685 if (atomic_dec_and_test(&efx->active_queues))
1686 wake_up(&efx->flush_wq);
1687
1688 WARN_ON(atomic_read(&efx->active_queues) < 0);
1689}
1690
1691static int efx_ef10_fini_dmaq(struct efx_nic *efx)
1692{
1693 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1694 struct efx_channel *channel;
1695 struct efx_tx_queue *tx_queue;
1696 struct efx_rx_queue *rx_queue;
1697 int pending;
1698
1699 /* If the MC has just rebooted, the TX/RX queues will have already been
1700 * torn down, but efx->active_queues needs to be set to zero.
1701 */
1702 if (nic_data->must_realloc_vis) {
1703 atomic_set(&efx->active_queues, 0);
1704 return 0;
1705 }
1706
1707 /* Do not attempt to write to the NIC during EEH recovery */
1708 if (efx->state != STATE_RECOVERY) {
1709 efx_for_each_channel(channel, efx) {
1710 efx_for_each_channel_rx_queue(rx_queue, channel)
1711 efx_ef10_rx_fini(rx_queue);
1712 efx_for_each_channel_tx_queue(tx_queue, channel)
1713 efx_ef10_tx_fini(tx_queue);
1714 }
1715
1716 wait_event_timeout(efx->flush_wq,
1717 atomic_read(&efx->active_queues) == 0,
1718 msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
1719 pending = atomic_read(&efx->active_queues);
1720 if (pending) {
1721 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
1722 pending);
1723 return -ETIMEDOUT;
1724 }
1725 }
1726
1727 return 0;
1728}
1729
1730static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
1731 const struct efx_filter_spec *right)
1732{
1733 if ((left->match_flags ^ right->match_flags) |
1734 ((left->flags ^ right->flags) &
1735 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
1736 return false;
1737
1738 return memcmp(&left->outer_vid, &right->outer_vid,
1739 sizeof(struct efx_filter_spec) -
1740 offsetof(struct efx_filter_spec, outer_vid)) == 0;
1741}
1742
1743static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
1744{
1745 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
1746 return jhash2((const u32 *)&spec->outer_vid,
1747 (sizeof(struct efx_filter_spec) -
1748 offsetof(struct efx_filter_spec, outer_vid)) / 4,
1749 0);
1750 /* XXX should we randomise the initval? */
1751}
1752
1753/* Decide whether a filter should be exclusive or else should allow
1754 * delivery to additional recipients. Currently we decide that
1755 * filters for specific local unicast MAC and IP addresses are
1756 * exclusive.
1757 */
1758static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
1759{
1760 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
1761 !is_multicast_ether_addr(spec->loc_mac))
1762 return true;
1763
1764 if ((spec->match_flags &
1765 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
1766 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
1767 if (spec->ether_type == htons(ETH_P_IP) &&
1768 !ipv4_is_multicast(spec->loc_host[0]))
1769 return true;
1770 if (spec->ether_type == htons(ETH_P_IPV6) &&
1771 ((const u8 *)spec->loc_host)[0] != 0xff)
1772 return true;
1773 }
1774
1775 return false;
1776}
1777
1778static struct efx_filter_spec *
1779efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
1780 unsigned int filter_idx)
1781{
1782 return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
1783 ~EFX_EF10_FILTER_FLAGS);
1784}
1785
1786static unsigned int
1787efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
1788 unsigned int filter_idx)
1789{
1790 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
1791}
1792
1793static void
1794efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
1795 unsigned int filter_idx,
1796 const struct efx_filter_spec *spec,
1797 unsigned int flags)
1798{
1799 table->entry[filter_idx].spec = (unsigned long)spec | flags;
1800}
1801
1802static void efx_ef10_filter_push_prep(struct efx_nic *efx,
1803 const struct efx_filter_spec *spec,
1804 efx_dword_t *inbuf, u64 handle,
1805 bool replacing)
1806{
1807 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1808
1809 memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
1810
1811 if (replacing) {
1812 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
1813 MC_CMD_FILTER_OP_IN_OP_REPLACE);
1814 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
1815 } else {
1816 u32 match_fields = 0;
1817
1818 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
1819 efx_ef10_filter_is_exclusive(spec) ?
1820 MC_CMD_FILTER_OP_IN_OP_INSERT :
1821 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
1822
1823 /* Convert match flags and values. Unlike almost
1824 * everything else in MCDI, these fields are in
1825 * network byte order.
1826 */
1827 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
1828 match_fields |=
1829 is_multicast_ether_addr(spec->loc_mac) ?
1830 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
1831 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
1832#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
1833 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
1834 match_fields |= \
1835 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
1836 mcdi_field ## _LBN; \
1837 BUILD_BUG_ON( \
1838 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
1839 sizeof(spec->gen_field)); \
1840 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
1841 &spec->gen_field, sizeof(spec->gen_field)); \
1842 }
1843 COPY_FIELD(REM_HOST, rem_host, SRC_IP);
1844 COPY_FIELD(LOC_HOST, loc_host, DST_IP);
1845 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
1846 COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
1847 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
1848 COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
1849 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
1850 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
1851 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
1852 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
1853#undef COPY_FIELD
1854 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
1855 match_fields);
1856 }
1857
1858 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1859 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
1860 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
1861 MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
1862 MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
1863 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
1864 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
1865 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id);
1866 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
1867 (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
1868 MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
1869 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
1870 if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
1871 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
1872 spec->rss_context !=
1873 EFX_FILTER_RSS_CONTEXT_DEFAULT ?
1874 spec->rss_context : nic_data->rx_rss_context);
1875}
1876
1877static int efx_ef10_filter_push(struct efx_nic *efx,
1878 const struct efx_filter_spec *spec,
1879 u64 *handle, bool replacing)
1880{
1881 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
1882 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
1883 int rc;
1884
1885 efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
1886 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
1887 outbuf, sizeof(outbuf), NULL);
1888 if (rc == 0)
1889 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
1890 return rc;
1891}
1892
1893static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table,
1894 enum efx_filter_match_flags match_flags)
1895{
1896 unsigned int match_pri;
1897
1898 for (match_pri = 0;
1899 match_pri < table->rx_match_count;
1900 match_pri++)
1901 if (table->rx_match_flags[match_pri] == match_flags)
1902 return match_pri;
1903
1904 return -EPROTONOSUPPORT;
1905}
1906
1907static s32 efx_ef10_filter_insert(struct efx_nic *efx,
1908 struct efx_filter_spec *spec,
1909 bool replace_equal)
1910{
1911 struct efx_ef10_filter_table *table = efx->filter_state;
1912 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
1913 struct efx_filter_spec *saved_spec;
1914 unsigned int match_pri, hash;
1915 unsigned int priv_flags;
1916 bool replacing = false;
1917 int ins_index = -1;
1918 DEFINE_WAIT(wait);
1919 bool is_mc_recip;
1920 s32 rc;
1921
1922 /* For now, only support RX filters */
1923 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
1924 EFX_FILTER_FLAG_RX)
1925 return -EINVAL;
1926
1927 rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags);
1928 if (rc < 0)
1929 return rc;
1930 match_pri = rc;
1931
1932 hash = efx_ef10_filter_hash(spec);
1933 is_mc_recip = efx_filter_is_mc_recipient(spec);
1934 if (is_mc_recip)
1935 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
1936
1937 /* Find any existing filters with the same match tuple or
1938 * else a free slot to insert at. If any of them are busy,
1939 * we have to wait and retry.
1940 */
1941 for (;;) {
1942 unsigned int depth = 1;
1943 unsigned int i;
1944
1945 spin_lock_bh(&efx->filter_lock);
1946
1947 for (;;) {
1948 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
1949 saved_spec = efx_ef10_filter_entry_spec(table, i);
1950
1951 if (!saved_spec) {
1952 if (ins_index < 0)
1953 ins_index = i;
1954 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
1955 if (table->entry[i].spec &
1956 EFX_EF10_FILTER_FLAG_BUSY)
1957 break;
1958 if (spec->priority < saved_spec->priority &&
1959 !(saved_spec->priority ==
1960 EFX_FILTER_PRI_REQUIRED &&
1961 saved_spec->flags &
1962 EFX_FILTER_FLAG_RX_STACK)) {
1963 rc = -EPERM;
1964 goto out_unlock;
1965 }
1966 if (!is_mc_recip) {
1967 /* This is the only one */
1968 if (spec->priority ==
1969 saved_spec->priority &&
1970 !replace_equal) {
1971 rc = -EEXIST;
1972 goto out_unlock;
1973 }
1974 ins_index = i;
1975 goto found;
1976 } else if (spec->priority >
1977 saved_spec->priority ||
1978 (spec->priority ==
1979 saved_spec->priority &&
1980 replace_equal)) {
1981 if (ins_index < 0)
1982 ins_index = i;
1983 else
1984 __set_bit(depth, mc_rem_map);
1985 }
1986 }
1987
1988 /* Once we reach the maximum search depth, use
1989 * the first suitable slot or return -EBUSY if
1990 * there was none
1991 */
1992 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
1993 if (ins_index < 0) {
1994 rc = -EBUSY;
1995 goto out_unlock;
1996 }
1997 goto found;
1998 }
1999
2000 ++depth;
2001 }
2002
2003 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
2004 spin_unlock_bh(&efx->filter_lock);
2005 schedule();
2006 }
2007
2008found:
2009 /* Create a software table entry if necessary, and mark it
2010 * busy. We might yet fail to insert, but any attempt to
2011 * insert a conflicting filter while we're waiting for the
2012 * firmware must find the busy entry.
2013 */
2014 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
2015 if (saved_spec) {
2016 if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
2017 /* Just make sure it won't be removed */
2018 saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
2019 table->entry[ins_index].spec &=
2020 ~EFX_EF10_FILTER_FLAG_STACK_OLD;
2021 rc = ins_index;
2022 goto out_unlock;
2023 }
2024 replacing = true;
2025 priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
2026 } else {
2027 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
2028 if (!saved_spec) {
2029 rc = -ENOMEM;
2030 goto out_unlock;
2031 }
2032 *saved_spec = *spec;
2033 priv_flags = 0;
2034 }
2035 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
2036 priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
2037
2038 /* Mark lower-priority multicast recipients busy prior to removal */
2039 if (is_mc_recip) {
2040 unsigned int depth, i;
2041
2042 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
2043 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2044 if (test_bit(depth, mc_rem_map))
2045 table->entry[i].spec |=
2046 EFX_EF10_FILTER_FLAG_BUSY;
2047 }
2048 }
2049
2050 spin_unlock_bh(&efx->filter_lock);
2051
2052 rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
2053 replacing);
2054
2055 /* Finalise the software table entry */
2056 spin_lock_bh(&efx->filter_lock);
2057 if (rc == 0) {
2058 if (replacing) {
2059 /* Update the fields that may differ */
2060 saved_spec->priority = spec->priority;
2061 saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK;
2062 saved_spec->flags |= spec->flags;
2063 saved_spec->rss_context = spec->rss_context;
2064 saved_spec->dmaq_id = spec->dmaq_id;
2065 }
2066 } else if (!replacing) {
2067 kfree(saved_spec);
2068 saved_spec = NULL;
2069 }
2070 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
2071
2072 /* Remove and finalise entries for lower-priority multicast
2073 * recipients
2074 */
2075 if (is_mc_recip) {
2076 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2077 unsigned int depth, i;
2078
2079 memset(inbuf, 0, sizeof(inbuf));
2080
2081 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
2082 if (!test_bit(depth, mc_rem_map))
2083 continue;
2084
2085 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2086 saved_spec = efx_ef10_filter_entry_spec(table, i);
2087 priv_flags = efx_ef10_filter_entry_flags(table, i);
2088
2089 if (rc == 0) {
2090 spin_unlock_bh(&efx->filter_lock);
2091 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2092 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2093 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2094 table->entry[i].handle);
2095 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
2096 inbuf, sizeof(inbuf),
2097 NULL, 0, NULL);
2098 spin_lock_bh(&efx->filter_lock);
2099 }
2100
2101 if (rc == 0) {
2102 kfree(saved_spec);
2103 saved_spec = NULL;
2104 priv_flags = 0;
2105 } else {
2106 priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
2107 }
2108 efx_ef10_filter_set_entry(table, i, saved_spec,
2109 priv_flags);
2110 }
2111 }
2112
2113 /* If successful, return the inserted filter ID */
2114 if (rc == 0)
2115 rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
2116
2117 wake_up_all(&table->waitq);
2118out_unlock:
2119 spin_unlock_bh(&efx->filter_lock);
2120 finish_wait(&table->waitq, &wait);
2121 return rc;
2122}
2123
2124void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
2125{
2126 /* no need to do anything here on EF10 */
2127}
2128
2129/* Remove a filter.
2130 * If !stack_requested, remove by ID
2131 * If stack_requested, remove by index
2132 * Filter ID may come from userland and must be range-checked.
2133 */
2134static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
2135 enum efx_filter_priority priority,
2136 u32 filter_id, bool stack_requested)
2137{
2138 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
2139 struct efx_ef10_filter_table *table = efx->filter_state;
2140 MCDI_DECLARE_BUF(inbuf,
2141 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
2142 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
2143 struct efx_filter_spec *spec;
2144 DEFINE_WAIT(wait);
2145 int rc;
2146
2147 /* Find the software table entry and mark it busy. Don't
2148 * remove it yet; any attempt to update while we're waiting
2149 * for the firmware must find the busy entry.
2150 */
2151 for (;;) {
2152 spin_lock_bh(&efx->filter_lock);
2153 if (!(table->entry[filter_idx].spec &
2154 EFX_EF10_FILTER_FLAG_BUSY))
2155 break;
2156 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
2157 spin_unlock_bh(&efx->filter_lock);
2158 schedule();
2159 }
2160 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2161 if (!spec || spec->priority > priority ||
2162 (!stack_requested &&
2163 efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
2164 filter_id / HUNT_FILTER_TBL_ROWS)) {
2165 rc = -ENOENT;
2166 goto out_unlock;
2167 }
2168 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2169 spin_unlock_bh(&efx->filter_lock);
2170
2171 if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) {
2172 /* Reset steering of a stack-owned filter */
2173
2174 struct efx_filter_spec new_spec = *spec;
2175
2176 new_spec.priority = EFX_FILTER_PRI_REQUIRED;
2177 new_spec.flags = (EFX_FILTER_FLAG_RX |
2178 EFX_FILTER_FLAG_RX_RSS |
2179 EFX_FILTER_FLAG_RX_STACK);
2180 new_spec.dmaq_id = 0;
2181 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
2182 rc = efx_ef10_filter_push(efx, &new_spec,
2183 &table->entry[filter_idx].handle,
2184 true);
2185
2186 spin_lock_bh(&efx->filter_lock);
2187 if (rc == 0)
2188 *spec = new_spec;
2189 } else {
2190 /* Really remove the filter */
2191
2192 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2193 efx_ef10_filter_is_exclusive(spec) ?
2194 MC_CMD_FILTER_OP_IN_OP_REMOVE :
2195 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2196 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2197 table->entry[filter_idx].handle);
2198 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
2199 inbuf, sizeof(inbuf), NULL, 0, NULL);
2200
2201 spin_lock_bh(&efx->filter_lock);
2202 if (rc == 0) {
2203 kfree(spec);
2204 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2205 }
2206 }
2207 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
2208 wake_up_all(&table->waitq);
2209out_unlock:
2210 spin_unlock_bh(&efx->filter_lock);
2211 finish_wait(&table->waitq, &wait);
2212 return rc;
2213}
2214
2215static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
2216 enum efx_filter_priority priority,
2217 u32 filter_id)
2218{
2219 return efx_ef10_filter_remove_internal(efx, priority, filter_id, false);
2220}
2221
2222static int efx_ef10_filter_get_safe(struct efx_nic *efx,
2223 enum efx_filter_priority priority,
2224 u32 filter_id, struct efx_filter_spec *spec)
2225{
2226 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
2227 struct efx_ef10_filter_table *table = efx->filter_state;
2228 const struct efx_filter_spec *saved_spec;
2229 int rc;
2230
2231 spin_lock_bh(&efx->filter_lock);
2232 saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
2233 if (saved_spec && saved_spec->priority == priority &&
2234 efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) ==
2235 filter_id / HUNT_FILTER_TBL_ROWS) {
2236 *spec = *saved_spec;
2237 rc = 0;
2238 } else {
2239 rc = -ENOENT;
2240 }
2241 spin_unlock_bh(&efx->filter_lock);
2242 return rc;
2243}
2244
2245static void efx_ef10_filter_clear_rx(struct efx_nic *efx,
2246 enum efx_filter_priority priority)
2247{
2248 /* TODO */
2249}
2250
2251static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
2252 enum efx_filter_priority priority)
2253{
2254 struct efx_ef10_filter_table *table = efx->filter_state;
2255 unsigned int filter_idx;
2256 s32 count = 0;
2257
2258 spin_lock_bh(&efx->filter_lock);
2259 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2260 if (table->entry[filter_idx].spec &&
2261 efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
2262 priority)
2263 ++count;
2264 }
2265 spin_unlock_bh(&efx->filter_lock);
2266 return count;
2267}
2268
2269static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
2270{
2271 struct efx_ef10_filter_table *table = efx->filter_state;
2272
2273 return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
2274}
2275
2276static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
2277 enum efx_filter_priority priority,
2278 u32 *buf, u32 size)
2279{
2280 struct efx_ef10_filter_table *table = efx->filter_state;
2281 struct efx_filter_spec *spec;
2282 unsigned int filter_idx;
2283 s32 count = 0;
2284
2285 spin_lock_bh(&efx->filter_lock);
2286 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2287 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2288 if (spec && spec->priority == priority) {
2289 if (count == size) {
2290 count = -EMSGSIZE;
2291 break;
2292 }
2293 buf[count++] = (efx_ef10_filter_rx_match_pri(
2294 table, spec->match_flags) *
2295 HUNT_FILTER_TBL_ROWS +
2296 filter_idx);
2297 }
2298 }
2299 spin_unlock_bh(&efx->filter_lock);
2300 return count;
2301}
2302
2303#ifdef CONFIG_RFS_ACCEL
2304
2305static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
2306
2307static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
2308 struct efx_filter_spec *spec)
2309{
2310 struct efx_ef10_filter_table *table = efx->filter_state;
2311 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2312 struct efx_filter_spec *saved_spec;
2313 unsigned int hash, i, depth = 1;
2314 bool replacing = false;
2315 int ins_index = -1;
2316 u64 cookie;
2317 s32 rc;
2318
2319 /* Must be an RX filter without RSS and not for a multicast
2320 * destination address (RFS only works for connected sockets).
2321 * These restrictions allow us to pass only a tiny amount of
2322 * data through to the completion function.
2323 */
2324 EFX_WARN_ON_PARANOID(spec->flags !=
2325 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
2326 EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
2327 EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
2328
2329 hash = efx_ef10_filter_hash(spec);
2330
2331 spin_lock_bh(&efx->filter_lock);
2332
2333 /* Find any existing filter with the same match tuple or else
2334 * a free slot to insert at. If an existing filter is busy,
2335 * we have to give up.
2336 */
2337 for (;;) {
2338 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2339 saved_spec = efx_ef10_filter_entry_spec(table, i);
2340
2341 if (!saved_spec) {
2342 if (ins_index < 0)
2343 ins_index = i;
2344 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
2345 if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
2346 rc = -EBUSY;
2347 goto fail_unlock;
2348 }
2349 EFX_WARN_ON_PARANOID(saved_spec->flags &
2350 EFX_FILTER_FLAG_RX_STACK);
2351 if (spec->priority < saved_spec->priority) {
2352 rc = -EPERM;
2353 goto fail_unlock;
2354 }
2355 ins_index = i;
2356 break;
2357 }
2358
2359 /* Once we reach the maximum search depth, use the
2360 * first suitable slot or return -EBUSY if there was
2361 * none
2362 */
2363 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
2364 if (ins_index < 0) {
2365 rc = -EBUSY;
2366 goto fail_unlock;
2367 }
2368 break;
2369 }
2370
2371 ++depth;
2372 }
2373
2374 /* Create a software table entry if necessary, and mark it
2375 * busy. We might yet fail to insert, but any attempt to
2376 * insert a conflicting filter while we're waiting for the
2377 * firmware must find the busy entry.
2378 */
2379 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
2380 if (saved_spec) {
2381 replacing = true;
2382 } else {
2383 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
2384 if (!saved_spec) {
2385 rc = -ENOMEM;
2386 goto fail_unlock;
2387 }
2388 *saved_spec = *spec;
2389 }
2390 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
2391 EFX_EF10_FILTER_FLAG_BUSY);
2392
2393 spin_unlock_bh(&efx->filter_lock);
2394
2395 /* Pack up the variables needed on completion */
2396 cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
2397
2398 efx_ef10_filter_push_prep(efx, spec, inbuf,
2399 table->entry[ins_index].handle, replacing);
2400 efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
2401 MC_CMD_FILTER_OP_OUT_LEN,
2402 efx_ef10_filter_rfs_insert_complete, cookie);
2403
2404 return ins_index;
2405
2406fail_unlock:
2407 spin_unlock_bh(&efx->filter_lock);
2408 return rc;
2409}
2410
2411static void
2412efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
2413 int rc, efx_dword_t *outbuf,
2414 size_t outlen_actual)
2415{
2416 struct efx_ef10_filter_table *table = efx->filter_state;
2417 unsigned int ins_index, dmaq_id;
2418 struct efx_filter_spec *spec;
2419 bool replacing;
2420
2421 /* Unpack the cookie */
2422 replacing = cookie >> 31;
2423 ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
2424 dmaq_id = cookie & 0xffff;
2425
2426 spin_lock_bh(&efx->filter_lock);
2427 spec = efx_ef10_filter_entry_spec(table, ins_index);
2428 if (rc == 0) {
2429 table->entry[ins_index].handle =
2430 MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
2431 if (replacing)
2432 spec->dmaq_id = dmaq_id;
2433 } else if (!replacing) {
2434 kfree(spec);
2435 spec = NULL;
2436 }
2437 efx_ef10_filter_set_entry(table, ins_index, spec, 0);
2438 spin_unlock_bh(&efx->filter_lock);
2439
2440 wake_up_all(&table->waitq);
2441}
2442
2443static void
2444efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
2445 unsigned long filter_idx,
2446 int rc, efx_dword_t *outbuf,
2447 size_t outlen_actual);
2448
2449static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2450 unsigned int filter_idx)
2451{
2452 struct efx_ef10_filter_table *table = efx->filter_state;
2453 struct efx_filter_spec *spec =
2454 efx_ef10_filter_entry_spec(table, filter_idx);
2455 MCDI_DECLARE_BUF(inbuf,
2456 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
2457 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
2458
2459 if (!spec ||
2460 (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
2461 spec->priority != EFX_FILTER_PRI_HINT ||
2462 !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
2463 flow_id, filter_idx))
2464 return false;
2465
2466 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2467 MC_CMD_FILTER_OP_IN_OP_REMOVE);
2468 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2469 table->entry[filter_idx].handle);
2470 if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
2471 efx_ef10_filter_rfs_expire_complete, filter_idx))
2472 return false;
2473
2474 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2475 return true;
2476}
2477
2478static void
2479efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
2480 unsigned long filter_idx,
2481 int rc, efx_dword_t *outbuf,
2482 size_t outlen_actual)
2483{
2484 struct efx_ef10_filter_table *table = efx->filter_state;
2485 struct efx_filter_spec *spec =
2486 efx_ef10_filter_entry_spec(table, filter_idx);
2487
2488 spin_lock_bh(&efx->filter_lock);
2489 if (rc == 0) {
2490 kfree(spec);
2491 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2492 }
2493 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
2494 wake_up_all(&table->waitq);
2495 spin_unlock_bh(&efx->filter_lock);
2496}
2497
2498#endif /* CONFIG_RFS_ACCEL */
2499
2500static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
2501{
2502 int match_flags = 0;
2503
2504#define MAP_FLAG(gen_flag, mcdi_field) { \
2505 u32 old_mcdi_flags = mcdi_flags; \
2506 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
2507 mcdi_field ## _LBN); \
2508 if (mcdi_flags != old_mcdi_flags) \
2509 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
2510 }
2511 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
2512 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
2513 MAP_FLAG(REM_HOST, SRC_IP);
2514 MAP_FLAG(LOC_HOST, DST_IP);
2515 MAP_FLAG(REM_MAC, SRC_MAC);
2516 MAP_FLAG(REM_PORT, SRC_PORT);
2517 MAP_FLAG(LOC_MAC, DST_MAC);
2518 MAP_FLAG(LOC_PORT, DST_PORT);
2519 MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
2520 MAP_FLAG(INNER_VID, INNER_VLAN);
2521 MAP_FLAG(OUTER_VID, OUTER_VLAN);
2522 MAP_FLAG(IP_PROTO, IP_PROTO);
2523#undef MAP_FLAG
2524
2525 /* Did we map them all? */
2526 if (mcdi_flags)
2527 return -EINVAL;
2528
2529 return match_flags;
2530}
2531
2532static int efx_ef10_filter_table_probe(struct efx_nic *efx)
2533{
2534 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
2535 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
2536 unsigned int pd_match_pri, pd_match_count;
2537 struct efx_ef10_filter_table *table;
2538 size_t outlen;
2539 int rc;
2540
2541 table = kzalloc(sizeof(*table), GFP_KERNEL);
2542 if (!table)
2543 return -ENOMEM;
2544
2545 /* Find out which RX filter types are supported, and their priorities */
2546 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
2547 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
2548 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
2549 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
2550 &outlen);
2551 if (rc)
2552 goto fail;
2553 pd_match_count = MCDI_VAR_ARRAY_LEN(
2554 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
2555 table->rx_match_count = 0;
2556
2557 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
2558 u32 mcdi_flags =
2559 MCDI_ARRAY_DWORD(
2560 outbuf,
2561 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
2562 pd_match_pri);
2563 rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
2564 if (rc < 0) {
2565 netif_dbg(efx, probe, efx->net_dev,
2566 "%s: fw flags %#x pri %u not supported in driver\n",
2567 __func__, mcdi_flags, pd_match_pri);
2568 } else {
2569 netif_dbg(efx, probe, efx->net_dev,
2570 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
2571 __func__, mcdi_flags, pd_match_pri,
2572 rc, table->rx_match_count);
2573 table->rx_match_flags[table->rx_match_count++] = rc;
2574 }
2575 }
2576
2577 table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
2578 if (!table->entry) {
2579 rc = -ENOMEM;
2580 goto fail;
2581 }
2582
2583 efx->filter_state = table;
2584 init_waitqueue_head(&table->waitq);
2585 return 0;
2586
2587fail:
2588 kfree(table);
2589 return rc;
2590}
2591
2592static void efx_ef10_filter_table_restore(struct efx_nic *efx)
2593{
2594 struct efx_ef10_filter_table *table = efx->filter_state;
2595 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2596 struct efx_filter_spec *spec;
2597 unsigned int filter_idx;
2598 bool failed = false;
2599 int rc;
2600
2601 if (!nic_data->must_restore_filters)
2602 return;
2603
2604 spin_lock_bh(&efx->filter_lock);
2605
2606 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2607 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2608 if (!spec)
2609 continue;
2610
2611 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2612 spin_unlock_bh(&efx->filter_lock);
2613
2614 rc = efx_ef10_filter_push(efx, spec,
2615 &table->entry[filter_idx].handle,
2616 false);
2617 if (rc)
2618 failed = true;
2619
2620 spin_lock_bh(&efx->filter_lock);
2621 if (rc) {
2622 kfree(spec);
2623 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2624 } else {
2625 table->entry[filter_idx].spec &=
2626 ~EFX_EF10_FILTER_FLAG_BUSY;
2627 }
2628 }
2629
2630 spin_unlock_bh(&efx->filter_lock);
2631
2632 if (failed)
2633 netif_err(efx, hw, efx->net_dev,
2634 "unable to restore all filters\n");
2635 else
2636 nic_data->must_restore_filters = false;
2637}
2638
2639static void efx_ef10_filter_table_remove(struct efx_nic *efx)
2640{
2641 struct efx_ef10_filter_table *table = efx->filter_state;
2642 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2643 struct efx_filter_spec *spec;
2644 unsigned int filter_idx;
2645 int rc;
2646
2647 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2648 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2649 if (!spec)
2650 continue;
2651
2652 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2653 efx_ef10_filter_is_exclusive(spec) ?
2654 MC_CMD_FILTER_OP_IN_OP_REMOVE :
2655 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2656 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2657 table->entry[filter_idx].handle);
2658 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
2659 NULL, 0, NULL);
2660
2661 WARN_ON(rc != 0);
2662 kfree(spec);
2663 }
2664
2665 vfree(table->entry);
2666 kfree(table);
2667}
2668
2669static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
2670{
2671 struct efx_ef10_filter_table *table = efx->filter_state;
2672 struct net_device *net_dev = efx->net_dev;
2673 struct efx_filter_spec spec;
2674 bool remove_failed = false;
2675 struct netdev_hw_addr *uc;
2676 struct netdev_hw_addr *mc;
2677 unsigned int filter_idx;
2678 int i, n, rc;
2679
2680 if (!efx_dev_registered(efx))
2681 return;
2682
2683 /* Mark old filters that may need to be removed */
2684 spin_lock_bh(&efx->filter_lock);
2685 n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count;
2686 for (i = 0; i < n; i++) {
2687 filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
2688 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
2689 }
2690 n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count;
2691 for (i = 0; i < n; i++) {
2692 filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
2693 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
2694 }
2695 spin_unlock_bh(&efx->filter_lock);
2696
2697 /* Copy/convert the address lists; add the primary station
2698 * address and broadcast address
2699 */
2700 netif_addr_lock_bh(net_dev);
2701 if (net_dev->flags & IFF_PROMISC ||
2702 netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) {
2703 table->stack_uc_count = -1;
2704 } else {
2705 table->stack_uc_count = 1 + netdev_uc_count(net_dev);
2706 memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr,
2707 ETH_ALEN);
2708 i = 1;
2709 netdev_for_each_uc_addr(uc, net_dev) {
2710 memcpy(table->stack_uc_list[i].addr,
2711 uc->addr, ETH_ALEN);
2712 i++;
2713 }
2714 }
2715 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
2716 netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) {
2717 table->stack_mc_count = -1;
2718 } else {
2719 table->stack_mc_count = 1 + netdev_mc_count(net_dev);
2720 eth_broadcast_addr(table->stack_mc_list[0].addr);
2721 i = 1;
2722 netdev_for_each_mc_addr(mc, net_dev) {
2723 memcpy(table->stack_mc_list[i].addr,
2724 mc->addr, ETH_ALEN);
2725 i++;
2726 }
2727 }
2728 netif_addr_unlock_bh(net_dev);
2729
2730 /* Insert/renew unicast filters */
2731 if (table->stack_uc_count >= 0) {
2732 for (i = 0; i < table->stack_uc_count; i++) {
2733 efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
2734 EFX_FILTER_FLAG_RX_RSS |
2735 EFX_FILTER_FLAG_RX_STACK,
2736 0);
2737 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
2738 table->stack_uc_list[i].addr);
2739 rc = efx_ef10_filter_insert(efx, &spec, true);
2740 if (rc < 0) {
2741 /* Fall back to unicast-promisc */
2742 while (i--)
2743 efx_ef10_filter_remove_safe(
2744 efx, EFX_FILTER_PRI_REQUIRED,
2745 table->stack_uc_list[i].id);
2746 table->stack_uc_count = -1;
2747 break;
2748 }
2749 table->stack_uc_list[i].id = rc;
2750 }
2751 }
2752 if (table->stack_uc_count < 0) {
2753 efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
2754 EFX_FILTER_FLAG_RX_RSS |
2755 EFX_FILTER_FLAG_RX_STACK,
2756 0);
2757 efx_filter_set_uc_def(&spec);
2758 rc = efx_ef10_filter_insert(efx, &spec, true);
2759 if (rc < 0) {
2760 WARN_ON(1);
2761 table->stack_uc_count = 0;
2762 } else {
2763 table->stack_uc_list[0].id = rc;
2764 }
2765 }
2766
2767 /* Insert/renew multicast filters */
2768 if (table->stack_mc_count >= 0) {
2769 for (i = 0; i < table->stack_mc_count; i++) {
2770 efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
2771 EFX_FILTER_FLAG_RX_RSS |
2772 EFX_FILTER_FLAG_RX_STACK,
2773 0);
2774 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
2775 table->stack_mc_list[i].addr);
2776 rc = efx_ef10_filter_insert(efx, &spec, true);
2777 if (rc < 0) {
2778 /* Fall back to multicast-promisc */
2779 while (i--)
2780 efx_ef10_filter_remove_safe(
2781 efx, EFX_FILTER_PRI_REQUIRED,
2782 table->stack_mc_list[i].id);
2783 table->stack_mc_count = -1;
2784 break;
2785 }
2786 table->stack_mc_list[i].id = rc;
2787 }
2788 }
2789 if (table->stack_mc_count < 0) {
2790 efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
2791 EFX_FILTER_FLAG_RX_RSS |
2792 EFX_FILTER_FLAG_RX_STACK,
2793 0);
2794 efx_filter_set_mc_def(&spec);
2795 rc = efx_ef10_filter_insert(efx, &spec, true);
2796 if (rc < 0) {
2797 WARN_ON(1);
2798 table->stack_mc_count = 0;
2799 } else {
2800 table->stack_mc_list[0].id = rc;
2801 }
2802 }
2803
2804 /* Remove filters that weren't renewed. Since nothing else
2805 * changes the STACK_OLD flag or removes these filters, we
2806 * don't need to hold the filter_lock while scanning for
2807 * these filters.
2808 */
2809 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
2810 if (ACCESS_ONCE(table->entry[i].spec) &
2811 EFX_EF10_FILTER_FLAG_STACK_OLD) {
2812 if (efx_ef10_filter_remove_internal(efx,
2813 EFX_FILTER_PRI_REQUIRED,
2814 i, true) < 0)
2815 remove_failed = true;
2816 }
2817 }
2818 WARN_ON(remove_failed);
2819}
2820
2821static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
2822{
2823 efx_ef10_filter_sync_rx_mode(efx);
2824
2825 return efx_mcdi_set_mac(efx);
2826}
2827
2828#ifdef CONFIG_SFC_MTD
2829
2830struct efx_ef10_nvram_type_info {
2831 u16 type, type_mask;
2832 u8 port;
2833 const char *name;
2834};
2835
2836static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
2837 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
2838 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
2839 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
2840 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
2841 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
2842 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
2843 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
2844 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
2845 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
2846 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
2847};
2848
2849static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
2850 struct efx_mcdi_mtd_partition *part,
2851 unsigned int type)
2852{
2853 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
2854 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
2855 const struct efx_ef10_nvram_type_info *info;
2856 size_t size, erase_size, outlen;
2857 bool protected;
2858 int rc;
2859
2860 for (info = efx_ef10_nvram_types; ; info++) {
2861 if (info ==
2862 efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
2863 return -ENODEV;
2864 if ((type & ~info->type_mask) == info->type)
2865 break;
2866 }
2867 if (info->port != efx_port_num(efx))
2868 return -ENODEV;
2869
2870 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
2871 if (rc)
2872 return rc;
2873 if (protected)
2874 return -ENODEV; /* hide it */
2875
2876 part->nvram_type = type;
2877
2878 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
2879 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
2880 outbuf, sizeof(outbuf), &outlen);
2881 if (rc)
2882 return rc;
2883 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
2884 return -EIO;
2885 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
2886 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
2887 part->fw_subtype = MCDI_DWORD(outbuf,
2888 NVRAM_METADATA_OUT_SUBTYPE);
2889
2890 part->common.dev_type_name = "EF10 NVRAM manager";
2891 part->common.type_name = info->name;
2892
2893 part->common.mtd.type = MTD_NORFLASH;
2894 part->common.mtd.flags = MTD_CAP_NORFLASH;
2895 part->common.mtd.size = size;
2896 part->common.mtd.erasesize = erase_size;
2897
2898 return 0;
2899}
2900
2901static int efx_ef10_mtd_probe(struct efx_nic *efx)
2902{
2903 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
2904 struct efx_mcdi_mtd_partition *parts;
2905 size_t outlen, n_parts_total, i, n_parts;
2906 unsigned int type;
2907 int rc;
2908
2909 ASSERT_RTNL();
2910
2911 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
2912 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
2913 outbuf, sizeof(outbuf), &outlen);
2914 if (rc)
2915 return rc;
2916 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
2917 return -EIO;
2918
2919 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
2920 if (n_parts_total >
2921 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
2922 return -EIO;
2923
2924 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
2925 if (!parts)
2926 return -ENOMEM;
2927
2928 n_parts = 0;
2929 for (i = 0; i < n_parts_total; i++) {
2930 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
2931 i);
2932 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
2933 if (rc == 0)
2934 n_parts++;
2935 else if (rc != -ENODEV)
2936 goto fail;
2937 }
2938
2939 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
2940fail:
2941 if (rc)
2942 kfree(parts);
2943 return rc;
2944}
2945
2946#endif /* CONFIG_SFC_MTD */
2947
2948static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
2949{
2950 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
2951}
2952
2953const struct efx_nic_type efx_hunt_a0_nic_type = {
2954 .mem_map_size = efx_ef10_mem_map_size,
2955 .probe = efx_ef10_probe,
2956 .remove = efx_ef10_remove,
2957 .dimension_resources = efx_ef10_dimension_resources,
2958 .init = efx_ef10_init_nic,
2959 .fini = efx_port_dummy_op_void,
2960 .map_reset_reason = efx_mcdi_map_reset_reason,
2961 .map_reset_flags = efx_ef10_map_reset_flags,
2962 .reset = efx_mcdi_reset,
2963 .probe_port = efx_mcdi_port_probe,
2964 .remove_port = efx_mcdi_port_remove,
2965 .fini_dmaq = efx_ef10_fini_dmaq,
2966 .describe_stats = efx_ef10_describe_stats,
2967 .update_stats = efx_ef10_update_stats,
2968 .start_stats = efx_mcdi_mac_start_stats,
2969 .stop_stats = efx_mcdi_mac_stop_stats,
2970 .set_id_led = efx_mcdi_set_id_led,
2971 .push_irq_moderation = efx_ef10_push_irq_moderation,
2972 .reconfigure_mac = efx_ef10_mac_reconfigure,
2973 .check_mac_fault = efx_mcdi_mac_check_fault,
2974 .reconfigure_port = efx_mcdi_port_reconfigure,
2975 .get_wol = efx_ef10_get_wol,
2976 .set_wol = efx_ef10_set_wol,
2977 .resume_wol = efx_port_dummy_op_void,
2978 /* TODO: test_chip */
2979 .test_nvram = efx_mcdi_nvram_test_all,
2980 .mcdi_request = efx_ef10_mcdi_request,
2981 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
2982 .mcdi_read_response = efx_ef10_mcdi_read_response,
2983 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
2984 .irq_enable_master = efx_port_dummy_op_void,
2985 .irq_test_generate = efx_ef10_irq_test_generate,
2986 .irq_disable_non_ev = efx_port_dummy_op_void,
2987 .irq_handle_msi = efx_ef10_msi_interrupt,
2988 .irq_handle_legacy = efx_ef10_legacy_interrupt,
2989 .tx_probe = efx_ef10_tx_probe,
2990 .tx_init = efx_ef10_tx_init,
2991 .tx_remove = efx_ef10_tx_remove,
2992 .tx_write = efx_ef10_tx_write,
2993 .rx_push_indir_table = efx_ef10_rx_push_indir_table,
2994 .rx_probe = efx_ef10_rx_probe,
2995 .rx_init = efx_ef10_rx_init,
2996 .rx_remove = efx_ef10_rx_remove,
2997 .rx_write = efx_ef10_rx_write,
2998 .rx_defer_refill = efx_ef10_rx_defer_refill,
2999 .ev_probe = efx_ef10_ev_probe,
3000 .ev_init = efx_ef10_ev_init,
3001 .ev_fini = efx_ef10_ev_fini,
3002 .ev_remove = efx_ef10_ev_remove,
3003 .ev_process = efx_ef10_ev_process,
3004 .ev_read_ack = efx_ef10_ev_read_ack,
3005 .ev_test_generate = efx_ef10_ev_test_generate,
3006 .filter_table_probe = efx_ef10_filter_table_probe,
3007 .filter_table_restore = efx_ef10_filter_table_restore,
3008 .filter_table_remove = efx_ef10_filter_table_remove,
3009 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
3010 .filter_insert = efx_ef10_filter_insert,
3011 .filter_remove_safe = efx_ef10_filter_remove_safe,
3012 .filter_get_safe = efx_ef10_filter_get_safe,
3013 .filter_clear_rx = efx_ef10_filter_clear_rx,
3014 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
3015 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
3016 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
3017#ifdef CONFIG_RFS_ACCEL
3018 .filter_rfs_insert = efx_ef10_filter_rfs_insert,
3019 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
3020#endif
3021#ifdef CONFIG_SFC_MTD
3022 .mtd_probe = efx_ef10_mtd_probe,
3023 .mtd_rename = efx_mcdi_mtd_rename,
3024 .mtd_read = efx_mcdi_mtd_read,
3025 .mtd_erase = efx_mcdi_mtd_erase,
3026 .mtd_write = efx_mcdi_mtd_write,
3027 .mtd_sync = efx_mcdi_mtd_sync,
3028#endif
3029 .ptp_write_host_time = efx_ef10_ptp_write_host_time,
3030
3031 .revision = EFX_REV_HUNT_A0,
3032 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
3033 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
3034 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
3035 .can_rx_scatter = true,
3036 .always_rx_scatter = true,
3037 .max_interrupt_mode = EFX_INT_MODE_MSIX,
3038 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
3039 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3040 NETIF_F_RXHASH | NETIF_F_NTUPLE),
3041 .mcdi_max_ver = 2,
3042 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
3043};
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
new file mode 100644
index 000000000000..b3f4e3755fd9
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -0,0 +1,415 @@
1/****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2012-2013 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_EF10_REGS_H
11#define EFX_EF10_REGS_H
12
13/* EF10 hardware architecture definitions have a name prefix following
14 * the format:
15 *
16 * E<type>_<min-rev><max-rev>_
17 *
18 * The following <type> strings are used:
19 *
20 * MMIO register Host memory structure
21 * -------------------------------------------------------------
22 * Address R
23 * Bitfield RF SF
24 * Enumerator FE SE
25 *
26 * <min-rev> is the first revision to which the definition applies:
27 *
28 * D: Huntington A0
29 *
30 * If the definition has been changed or removed in later revisions
31 * then <max-rev> is the last revision to which the definition applies;
32 * otherwise it is "Z".
33 */
34
35/**************************************************************************
36 *
37 * EF10 registers and descriptors
38 *
39 **************************************************************************
40 */
41
42/* BIU_HW_REV_ID_REG: */
43#define ER_DZ_BIU_HW_REV_ID 0x00000000
44#define ERF_DZ_HW_REV_ID_LBN 0
45#define ERF_DZ_HW_REV_ID_WIDTH 32
46
47/* BIU_MC_SFT_STATUS_REG: */
48#define ER_DZ_BIU_MC_SFT_STATUS 0x00000010
49#define ER_DZ_BIU_MC_SFT_STATUS_STEP 4
50#define ER_DZ_BIU_MC_SFT_STATUS_ROWS 8
51#define ERF_DZ_MC_SFT_STATUS_LBN 0
52#define ERF_DZ_MC_SFT_STATUS_WIDTH 32
53
54/* BIU_INT_ISR_REG: */
55#define ER_DZ_BIU_INT_ISR 0x00000090
56#define ERF_DZ_ISR_REG_LBN 0
57#define ERF_DZ_ISR_REG_WIDTH 32
58
59/* MC_DB_LWRD_REG: */
60#define ER_DZ_MC_DB_LWRD 0x00000200
61#define ERF_DZ_MC_DOORBELL_L_LBN 0
62#define ERF_DZ_MC_DOORBELL_L_WIDTH 32
63
64/* MC_DB_HWRD_REG: */
65#define ER_DZ_MC_DB_HWRD 0x00000204
66#define ERF_DZ_MC_DOORBELL_H_LBN 0
67#define ERF_DZ_MC_DOORBELL_H_WIDTH 32
68
69/* EVQ_RPTR_REG: */
70#define ER_DZ_EVQ_RPTR 0x00000400
71#define ER_DZ_EVQ_RPTR_STEP 8192
72#define ER_DZ_EVQ_RPTR_ROWS 2048
73#define ERF_DZ_EVQ_RPTR_VLD_LBN 15
74#define ERF_DZ_EVQ_RPTR_VLD_WIDTH 1
75#define ERF_DZ_EVQ_RPTR_LBN 0
76#define ERF_DZ_EVQ_RPTR_WIDTH 15
77
78/* EVQ_TMR_REG: */
79#define ER_DZ_EVQ_TMR 0x00000420
80#define ER_DZ_EVQ_TMR_STEP 8192
81#define ER_DZ_EVQ_TMR_ROWS 2048
82#define ERF_DZ_TC_TIMER_MODE_LBN 14
83#define ERF_DZ_TC_TIMER_MODE_WIDTH 2
84#define ERF_DZ_TC_TIMER_VAL_LBN 0
85#define ERF_DZ_TC_TIMER_VAL_WIDTH 14
86
87/* RX_DESC_UPD_REG: */
88#define ER_DZ_RX_DESC_UPD 0x00000830
89#define ER_DZ_RX_DESC_UPD_STEP 8192
90#define ER_DZ_RX_DESC_UPD_ROWS 2048
91#define ERF_DZ_RX_DESC_WPTR_LBN 0
92#define ERF_DZ_RX_DESC_WPTR_WIDTH 12
93
94/* TX_DESC_UPD_REG: */
95#define ER_DZ_TX_DESC_UPD 0x00000a10
96#define ER_DZ_TX_DESC_UPD_STEP 8192
97#define ER_DZ_TX_DESC_UPD_ROWS 2048
98#define ERF_DZ_RSVD_LBN 76
99#define ERF_DZ_RSVD_WIDTH 20
100#define ERF_DZ_TX_DESC_WPTR_LBN 64
101#define ERF_DZ_TX_DESC_WPTR_WIDTH 12
102#define ERF_DZ_TX_DESC_HWORD_LBN 32
103#define ERF_DZ_TX_DESC_HWORD_WIDTH 32
104#define ERF_DZ_TX_DESC_LWORD_LBN 0
105#define ERF_DZ_TX_DESC_LWORD_WIDTH 32
106
107/* DRIVER_EV */
108#define ESF_DZ_DRV_CODE_LBN 60
109#define ESF_DZ_DRV_CODE_WIDTH 4
110#define ESF_DZ_DRV_SUB_CODE_LBN 56
111#define ESF_DZ_DRV_SUB_CODE_WIDTH 4
112#define ESE_DZ_DRV_TIMER_EV 3
113#define ESE_DZ_DRV_START_UP_EV 2
114#define ESE_DZ_DRV_WAKE_UP_EV 1
115#define ESF_DZ_DRV_SUB_DATA_LBN 0
116#define ESF_DZ_DRV_SUB_DATA_WIDTH 56
117#define ESF_DZ_DRV_EVQ_ID_LBN 0
118#define ESF_DZ_DRV_EVQ_ID_WIDTH 14
119#define ESF_DZ_DRV_TMR_ID_LBN 0
120#define ESF_DZ_DRV_TMR_ID_WIDTH 14
121
122/* EVENT_ENTRY */
123#define ESF_DZ_EV_CODE_LBN 60
124#define ESF_DZ_EV_CODE_WIDTH 4
125#define ESE_DZ_EV_CODE_MCDI_EV 12
126#define ESE_DZ_EV_CODE_DRIVER_EV 5
127#define ESE_DZ_EV_CODE_TX_EV 2
128#define ESE_DZ_EV_CODE_RX_EV 0
129#define ESE_DZ_OTHER other
130#define ESF_DZ_EV_DATA_LBN 0
131#define ESF_DZ_EV_DATA_WIDTH 60
132
133/* MC_EVENT */
134#define ESF_DZ_MC_CODE_LBN 60
135#define ESF_DZ_MC_CODE_WIDTH 4
136#define ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59
137#define ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1
138#define ESF_DZ_MC_DROP_EVENT_LBN 58
139#define ESF_DZ_MC_DROP_EVENT_WIDTH 1
140#define ESF_DZ_MC_SOFT_LBN 0
141#define ESF_DZ_MC_SOFT_WIDTH 58
142
143/* RX_EVENT */
144#define ESF_DZ_RX_CODE_LBN 60
145#define ESF_DZ_RX_CODE_WIDTH 4
146#define ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59
147#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1
148#define ESF_DZ_RX_DROP_EVENT_LBN 58
149#define ESF_DZ_RX_DROP_EVENT_WIDTH 1
150#define ESF_DZ_RX_EV_RSVD2_LBN 54
151#define ESF_DZ_RX_EV_RSVD2_WIDTH 4
152#define ESF_DZ_RX_EV_SOFT2_LBN 52
153#define ESF_DZ_RX_EV_SOFT2_WIDTH 2
154#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
155#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
156#define ESF_DZ_RX_L4_CLASS_LBN 45
157#define ESF_DZ_RX_L4_CLASS_WIDTH 3
158#define ESE_DZ_L4_CLASS_RSVD7 7
159#define ESE_DZ_L4_CLASS_RSVD6 6
160#define ESE_DZ_L4_CLASS_RSVD5 5
161#define ESE_DZ_L4_CLASS_RSVD4 4
162#define ESE_DZ_L4_CLASS_RSVD3 3
163#define ESE_DZ_L4_CLASS_UDP 2
164#define ESE_DZ_L4_CLASS_TCP 1
165#define ESE_DZ_L4_CLASS_UNKNOWN 0
166#define ESF_DZ_RX_L3_CLASS_LBN 42
167#define ESF_DZ_RX_L3_CLASS_WIDTH 3
168#define ESE_DZ_L3_CLASS_RSVD7 7
169#define ESE_DZ_L3_CLASS_IP6_FRAG 6
170#define ESE_DZ_L3_CLASS_ARP 5
171#define ESE_DZ_L3_CLASS_IP4_FRAG 4
172#define ESE_DZ_L3_CLASS_FCOE 3
173#define ESE_DZ_L3_CLASS_IP6 2
174#define ESE_DZ_L3_CLASS_IP4 1
175#define ESE_DZ_L3_CLASS_UNKNOWN 0
176#define ESF_DZ_RX_ETH_TAG_CLASS_LBN 39
177#define ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3
178#define ESE_DZ_ETH_TAG_CLASS_RSVD7 7
179#define ESE_DZ_ETH_TAG_CLASS_RSVD6 6
180#define ESE_DZ_ETH_TAG_CLASS_RSVD5 5
181#define ESE_DZ_ETH_TAG_CLASS_RSVD4 4
182#define ESE_DZ_ETH_TAG_CLASS_RSVD3 3
183#define ESE_DZ_ETH_TAG_CLASS_VLAN2 2
184#define ESE_DZ_ETH_TAG_CLASS_VLAN1 1
185#define ESE_DZ_ETH_TAG_CLASS_NONE 0
186#define ESF_DZ_RX_ETH_BASE_CLASS_LBN 36
187#define ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3
188#define ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2
189#define ESE_DZ_ETH_BASE_CLASS_LLC 1
190#define ESE_DZ_ETH_BASE_CLASS_ETH2 0
191#define ESF_DZ_RX_MAC_CLASS_LBN 35
192#define ESF_DZ_RX_MAC_CLASS_WIDTH 1
193#define ESE_DZ_MAC_CLASS_MCAST 1
194#define ESE_DZ_MAC_CLASS_UCAST 0
195#define ESF_DZ_RX_EV_SOFT1_LBN 32
196#define ESF_DZ_RX_EV_SOFT1_WIDTH 3
197#define ESF_DZ_RX_EV_RSVD1_LBN 31
198#define ESF_DZ_RX_EV_RSVD1_WIDTH 1
199#define ESF_DZ_RX_ABORT_LBN 30
200#define ESF_DZ_RX_ABORT_WIDTH 1
201#define ESF_DZ_RX_ECC_ERR_LBN 29
202#define ESF_DZ_RX_ECC_ERR_WIDTH 1
203#define ESF_DZ_RX_CRC1_ERR_LBN 28
204#define ESF_DZ_RX_CRC1_ERR_WIDTH 1
205#define ESF_DZ_RX_CRC0_ERR_LBN 27
206#define ESF_DZ_RX_CRC0_ERR_WIDTH 1
207#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26
208#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1
209#define ESF_DZ_RX_IPCKSUM_ERR_LBN 25
210#define ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1
211#define ESF_DZ_RX_ECRC_ERR_LBN 24
212#define ESF_DZ_RX_ECRC_ERR_WIDTH 1
213#define ESF_DZ_RX_QLABEL_LBN 16
214#define ESF_DZ_RX_QLABEL_WIDTH 5
215#define ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15
216#define ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1
217#define ESF_DZ_RX_CONT_LBN 14
218#define ESF_DZ_RX_CONT_WIDTH 1
219#define ESF_DZ_RX_BYTES_LBN 0
220#define ESF_DZ_RX_BYTES_WIDTH 14
221
222/* RX_KER_DESC */
223#define ESF_DZ_RX_KER_RESERVED_LBN 62
224#define ESF_DZ_RX_KER_RESERVED_WIDTH 2
225#define ESF_DZ_RX_KER_BYTE_CNT_LBN 48
226#define ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14
227#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
228#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
229
230/* RX_USER_DESC */
231#define ESF_DZ_RX_USR_RESERVED_LBN 62
232#define ESF_DZ_RX_USR_RESERVED_WIDTH 2
233#define ESF_DZ_RX_USR_BYTE_CNT_LBN 48
234#define ESF_DZ_RX_USR_BYTE_CNT_WIDTH 14
235#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_LBN 44
236#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_WIDTH 4
237#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
238#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
239#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
240#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
241#define ESF_DZ_RX_USR_BUF_ID_OFFSET_LBN 0
242#define ESF_DZ_RX_USR_BUF_ID_OFFSET_WIDTH 44
243#define ESF_DZ_RX_USR_4KBPS_BUF_ID_LBN 12
244#define ESF_DZ_RX_USR_4KBPS_BUF_ID_WIDTH 32
245#define ESF_DZ_RX_USR_64KBPS_BUF_ID_LBN 16
246#define ESF_DZ_RX_USR_64KBPS_BUF_ID_WIDTH 28
247#define ESF_DZ_RX_USR_1MBPS_BUF_ID_LBN 20
248#define ESF_DZ_RX_USR_1MBPS_BUF_ID_WIDTH 24
249#define ESF_DZ_RX_USR_4MBPS_BUF_ID_LBN 22
250#define ESF_DZ_RX_USR_4MBPS_BUF_ID_WIDTH 22
251#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_LBN 0
252#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
253#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_LBN 0
254#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
255#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_LBN 0
256#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
257#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_LBN 0
258#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
259
260/* TX_CSUM_TSTAMP_DESC */
261#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
262#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
263#define ESF_DZ_TX_OPTION_TYPE_LBN 60
264#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
265#define ESE_DZ_TX_OPTION_DESC_TSO 7
266#define ESE_DZ_TX_OPTION_DESC_VLAN 6
267#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
268#define ESF_DZ_TX_TIMESTAMP_LBN 5
269#define ESF_DZ_TX_TIMESTAMP_WIDTH 1
270#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2
271#define ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3
272#define ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5
273#define ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4
274#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3
275#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2
276#define ESE_DZ_TX_OPTION_CRC_FCOE 1
277#define ESE_DZ_TX_OPTION_CRC_OFF 0
278#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1
279#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1
280#define ESF_DZ_TX_OPTION_IP_CSUM_LBN 0
281#define ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1
282
283/* TX_EVENT */
284#define ESF_DZ_TX_CODE_LBN 60
285#define ESF_DZ_TX_CODE_WIDTH 4
286#define ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59
287#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1
288#define ESF_DZ_TX_DROP_EVENT_LBN 58
289#define ESF_DZ_TX_DROP_EVENT_WIDTH 1
290#define ESF_DZ_TX_EV_RSVD_LBN 48
291#define ESF_DZ_TX_EV_RSVD_WIDTH 10
292#define ESF_DZ_TX_SOFT2_LBN 32
293#define ESF_DZ_TX_SOFT2_WIDTH 16
294#define ESF_DZ_TX_CAN_MERGE_LBN 31
295#define ESF_DZ_TX_CAN_MERGE_WIDTH 1
296#define ESF_DZ_TX_SOFT1_LBN 24
297#define ESF_DZ_TX_SOFT1_WIDTH 7
298#define ESF_DZ_TX_QLABEL_LBN 16
299#define ESF_DZ_TX_QLABEL_WIDTH 5
300#define ESF_DZ_TX_DESCR_INDX_LBN 0
301#define ESF_DZ_TX_DESCR_INDX_WIDTH 16
302
303/* TX_KER_DESC */
304#define ESF_DZ_TX_KER_TYPE_LBN 63
305#define ESF_DZ_TX_KER_TYPE_WIDTH 1
306#define ESF_DZ_TX_KER_CONT_LBN 62
307#define ESF_DZ_TX_KER_CONT_WIDTH 1
308#define ESF_DZ_TX_KER_BYTE_CNT_LBN 48
309#define ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14
310#define ESF_DZ_TX_KER_BUF_ADDR_LBN 0
311#define ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48
312
313/* TX_PIO_DESC */
314#define ESF_DZ_TX_PIO_TYPE_LBN 63
315#define ESF_DZ_TX_PIO_TYPE_WIDTH 1
316#define ESF_DZ_TX_PIO_OPT_LBN 60
317#define ESF_DZ_TX_PIO_OPT_WIDTH 3
318#define ESF_DZ_TX_PIO_CONT_LBN 59
319#define ESF_DZ_TX_PIO_CONT_WIDTH 1
320#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
321#define ESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12
322#define ESF_DZ_TX_PIO_BUF_ADDR_LBN 0
323#define ESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12
324
325/* TX_TSO_DESC */
326#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
327#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
328#define ESF_DZ_TX_OPTION_TYPE_LBN 60
329#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
330#define ESE_DZ_TX_OPTION_DESC_TSO 7
331#define ESE_DZ_TX_OPTION_DESC_VLAN 6
332#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
333#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
334#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8
335#define ESF_DZ_TX_TSO_IP_ID_LBN 32
336#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
337#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
338#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
339
340/* TX_USER_DESC */
341#define ESF_DZ_TX_USR_TYPE_LBN 63
342#define ESF_DZ_TX_USR_TYPE_WIDTH 1
343#define ESF_DZ_TX_USR_CONT_LBN 62
344#define ESF_DZ_TX_USR_CONT_WIDTH 1
345#define ESF_DZ_TX_USR_BYTE_CNT_LBN 48
346#define ESF_DZ_TX_USR_BYTE_CNT_WIDTH 14
347#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_LBN 44
348#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_WIDTH 4
349#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
350#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
351#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
352#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
353#define ESF_DZ_TX_USR_BUF_ID_OFFSET_LBN 0
354#define ESF_DZ_TX_USR_BUF_ID_OFFSET_WIDTH 44
355#define ESF_DZ_TX_USR_4KBPS_BUF_ID_LBN 12
356#define ESF_DZ_TX_USR_4KBPS_BUF_ID_WIDTH 32
357#define ESF_DZ_TX_USR_64KBPS_BUF_ID_LBN 16
358#define ESF_DZ_TX_USR_64KBPS_BUF_ID_WIDTH 28
359#define ESF_DZ_TX_USR_1MBPS_BUF_ID_LBN 20
360#define ESF_DZ_TX_USR_1MBPS_BUF_ID_WIDTH 24
361#define ESF_DZ_TX_USR_4MBPS_BUF_ID_LBN 22
362#define ESF_DZ_TX_USR_4MBPS_BUF_ID_WIDTH 22
363#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_LBN 0
364#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
365#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_LBN 0
366#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
367#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_LBN 0
368#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
369#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_LBN 0
370#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
371/*************************************************************************/
372
373/* TX_DESC_UPD_REG: Transmit descriptor update register.
374 * We may write just one dword of these registers.
375 */
376#define ER_DZ_TX_DESC_UPD_DWORD (ER_DZ_TX_DESC_UPD + 2 * 4)
377#define ERF_DZ_TX_DESC_WPTR_DWORD_LBN (ERF_DZ_TX_DESC_WPTR_LBN - 2 * 32)
378#define ERF_DZ_TX_DESC_WPTR_DWORD_WIDTH ERF_DZ_TX_DESC_WPTR_WIDTH
379
380/* The workaround for bug 35388 requires multiplexing writes through
381 * the TX_DESC_UPD_DWORD address.
382 * TX_DESC_UPD: 0ppppppppppp (bit 11 lost)
383 * EVQ_RPTR: 1000hhhhhhhh, 1001llllllll (split into high and low bits)
384 * EVQ_TMR: 11mmvvvvvvvv (bits 8:13 of value lost)
385 */
386#define ER_DD_EVQ_INDIRECT ER_DZ_TX_DESC_UPD_DWORD
387#define ERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8
388#define ERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4
389#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8
390#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9
391#define ERF_DD_EVQ_IND_RPTR_LBN 0
392#define ERF_DD_EVQ_IND_RPTR_WIDTH 8
393#define ERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10
394#define ERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2
395#define EFE_DD_EVQ_IND_TIMER_FLAGS 3
396#define ERF_DD_EVQ_IND_TIMER_MODE_LBN 8
397#define ERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2
398#define ERF_DD_EVQ_IND_TIMER_VAL_LBN 0
399#define ERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8
400
401/* TX_PIOBUF
402 * PIO buffer aperture (paged)
403 */
404#define ER_DZ_TX_PIOBUF 4096
405#define ER_DZ_TX_PIOBUF_SIZE 2048
406
407/* RX packet prefix */
408#define ES_DZ_RX_PREFIX_HASH_OFST 0
409#define ES_DZ_RX_PREFIX_VLAN1_OFST 4
410#define ES_DZ_RX_PREFIX_VLAN2_OFST 6
411#define ES_DZ_RX_PREFIX_PKTLEN_OFST 8
412#define ES_DZ_RX_PREFIX_TSTAMP_OFST 10
413#define ES_DZ_RX_PREFIX_SIZE 14
414
415#endif /* EFX_EF10_REGS_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index db2f119d7fec..07c9bc4c61bc 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2011 Solarflare Communications Inc. 4 * Copyright 2005-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -189,7 +189,7 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
189 * 189 *
190 *************************************************************************/ 190 *************************************************************************/
191 191
192static void efx_soft_enable_interrupts(struct efx_nic *efx); 192static int efx_soft_enable_interrupts(struct efx_nic *efx);
193static void efx_soft_disable_interrupts(struct efx_nic *efx); 193static void efx_soft_disable_interrupts(struct efx_nic *efx);
194static void efx_remove_channel(struct efx_channel *channel); 194static void efx_remove_channel(struct efx_channel *channel);
195static void efx_remove_channels(struct efx_nic *efx); 195static void efx_remove_channels(struct efx_nic *efx);
@@ -329,15 +329,23 @@ static int efx_probe_eventq(struct efx_channel *channel)
329} 329}
330 330
331/* Prepare channel's event queue */ 331/* Prepare channel's event queue */
332static void efx_init_eventq(struct efx_channel *channel) 332static int efx_init_eventq(struct efx_channel *channel)
333{ 333{
334 netif_dbg(channel->efx, drv, channel->efx->net_dev, 334 struct efx_nic *efx = channel->efx;
335 "chan %d init event queue\n", channel->channel); 335 int rc;
336
337 EFX_WARN_ON_PARANOID(channel->eventq_init);
336 338
337 channel->eventq_read_ptr = 0; 339 netif_dbg(efx, drv, efx->net_dev,
340 "chan %d init event queue\n", channel->channel);
338 341
339 efx_nic_init_eventq(channel); 342 rc = efx_nic_init_eventq(channel);
340 channel->eventq_init = true; 343 if (rc == 0) {
344 efx->type->push_irq_moderation(channel);
345 channel->eventq_read_ptr = 0;
346 channel->eventq_init = true;
347 }
348 return rc;
341} 349}
342 350
343/* Enable event queue processing and NAPI */ 351/* Enable event queue processing and NAPI */
@@ -579,7 +587,7 @@ static void efx_start_datapath(struct efx_nic *efx)
579 rx_buf_len = (sizeof(struct efx_rx_page_state) + 587 rx_buf_len = (sizeof(struct efx_rx_page_state) +
580 NET_IP_ALIGN + efx->rx_dma_len); 588 NET_IP_ALIGN + efx->rx_dma_len);
581 if (rx_buf_len <= PAGE_SIZE) { 589 if (rx_buf_len <= PAGE_SIZE) {
582 efx->rx_scatter = false; 590 efx->rx_scatter = efx->type->always_rx_scatter;
583 efx->rx_buffer_order = 0; 591 efx->rx_buffer_order = 0;
584 } else if (efx->type->can_rx_scatter) { 592 } else if (efx->type->can_rx_scatter) {
585 BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES); 593 BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
@@ -607,7 +615,7 @@ static void efx_start_datapath(struct efx_nic *efx)
607 efx->rx_dma_len, efx->rx_page_buf_step, 615 efx->rx_dma_len, efx->rx_page_buf_step,
608 efx->rx_bufs_per_page, efx->rx_pages_per_batch); 616 efx->rx_bufs_per_page, efx->rx_pages_per_batch);
609 617
610 /* RX filters also have scatter-enabled flags */ 618 /* RX filters may also have scatter-enabled flags */
611 if (efx->rx_scatter != old_rx_scatter) 619 if (efx->rx_scatter != old_rx_scatter)
612 efx->type->filter_update_rx_scatter(efx); 620 efx->type->filter_update_rx_scatter(efx);
613 621
@@ -623,11 +631,14 @@ static void efx_start_datapath(struct efx_nic *efx)
623 631
624 /* Initialise the channels */ 632 /* Initialise the channels */
625 efx_for_each_channel(channel, efx) { 633 efx_for_each_channel(channel, efx) {
626 efx_for_each_channel_tx_queue(tx_queue, channel) 634 efx_for_each_channel_tx_queue(tx_queue, channel) {
627 efx_init_tx_queue(tx_queue); 635 efx_init_tx_queue(tx_queue);
636 atomic_inc(&efx->active_queues);
637 }
628 638
629 efx_for_each_channel_rx_queue(rx_queue, channel) { 639 efx_for_each_channel_rx_queue(rx_queue, channel) {
630 efx_init_rx_queue(rx_queue); 640 efx_init_rx_queue(rx_queue);
641 atomic_inc(&efx->active_queues);
631 efx_nic_generate_fill_event(rx_queue); 642 efx_nic_generate_fill_event(rx_queue);
632 } 643 }
633 644
@@ -722,7 +733,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
722 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; 733 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
723 u32 old_rxq_entries, old_txq_entries; 734 u32 old_rxq_entries, old_txq_entries;
724 unsigned i, next_buffer_table = 0; 735 unsigned i, next_buffer_table = 0;
725 int rc; 736 int rc, rc2;
726 737
727 rc = efx_check_disabled(efx); 738 rc = efx_check_disabled(efx);
728 if (rc) 739 if (rc)
@@ -802,9 +813,16 @@ out:
802 } 813 }
803 } 814 }
804 815
805 efx_soft_enable_interrupts(efx); 816 rc2 = efx_soft_enable_interrupts(efx);
806 efx_start_all(efx); 817 if (rc2) {
807 netif_device_attach(efx->net_dev); 818 rc = rc ? rc : rc2;
819 netif_err(efx, drv, efx->net_dev,
820 "unable to restart interrupts on channel reallocation\n");
821 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
822 } else {
823 efx_start_all(efx);
824 netif_device_attach(efx->net_dev);
825 }
808 return rc; 826 return rc;
809 827
810rollback: 828rollback:
@@ -1327,9 +1345,10 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1327 return 0; 1345 return 0;
1328} 1346}
1329 1347
1330static void efx_soft_enable_interrupts(struct efx_nic *efx) 1348static int efx_soft_enable_interrupts(struct efx_nic *efx)
1331{ 1349{
1332 struct efx_channel *channel; 1350 struct efx_channel *channel, *end_channel;
1351 int rc;
1333 1352
1334 BUG_ON(efx->state == STATE_DISABLED); 1353 BUG_ON(efx->state == STATE_DISABLED);
1335 1354
@@ -1337,12 +1356,28 @@ static void efx_soft_enable_interrupts(struct efx_nic *efx)
1337 smp_wmb(); 1356 smp_wmb();
1338 1357
1339 efx_for_each_channel(channel, efx) { 1358 efx_for_each_channel(channel, efx) {
1340 if (!channel->type->keep_eventq) 1359 if (!channel->type->keep_eventq) {
1341 efx_init_eventq(channel); 1360 rc = efx_init_eventq(channel);
1361 if (rc)
1362 goto fail;
1363 }
1342 efx_start_eventq(channel); 1364 efx_start_eventq(channel);
1343 } 1365 }
1344 1366
1345 efx_mcdi_mode_event(efx); 1367 efx_mcdi_mode_event(efx);
1368
1369 return 0;
1370fail:
1371 end_channel = channel;
1372 efx_for_each_channel(channel, efx) {
1373 if (channel == end_channel)
1374 break;
1375 efx_stop_eventq(channel);
1376 if (!channel->type->keep_eventq)
1377 efx_fini_eventq(channel);
1378 }
1379
1380 return rc;
1346} 1381}
1347 1382
1348static void efx_soft_disable_interrupts(struct efx_nic *efx) 1383static void efx_soft_disable_interrupts(struct efx_nic *efx)
@@ -1368,11 +1403,15 @@ static void efx_soft_disable_interrupts(struct efx_nic *efx)
1368 if (!channel->type->keep_eventq) 1403 if (!channel->type->keep_eventq)
1369 efx_fini_eventq(channel); 1404 efx_fini_eventq(channel);
1370 } 1405 }
1406
1407 /* Flush the asynchronous MCDI request queue */
1408 efx_mcdi_flush_async(efx);
1371} 1409}
1372 1410
1373static void efx_enable_interrupts(struct efx_nic *efx) 1411static int efx_enable_interrupts(struct efx_nic *efx)
1374{ 1412{
1375 struct efx_channel *channel; 1413 struct efx_channel *channel, *end_channel;
1414 int rc;
1376 1415
1377 BUG_ON(efx->state == STATE_DISABLED); 1416 BUG_ON(efx->state == STATE_DISABLED);
1378 1417
@@ -1384,11 +1423,31 @@ static void efx_enable_interrupts(struct efx_nic *efx)
1384 efx->type->irq_enable_master(efx); 1423 efx->type->irq_enable_master(efx);
1385 1424
1386 efx_for_each_channel(channel, efx) { 1425 efx_for_each_channel(channel, efx) {
1426 if (channel->type->keep_eventq) {
1427 rc = efx_init_eventq(channel);
1428 if (rc)
1429 goto fail;
1430 }
1431 }
1432
1433 rc = efx_soft_enable_interrupts(efx);
1434 if (rc)
1435 goto fail;
1436
1437 return 0;
1438
1439fail:
1440 end_channel = channel;
1441 efx_for_each_channel(channel, efx) {
1442 if (channel == end_channel)
1443 break;
1387 if (channel->type->keep_eventq) 1444 if (channel->type->keep_eventq)
1388 efx_init_eventq(channel); 1445 efx_fini_eventq(channel);
1389 } 1446 }
1390 1447
1391 efx_soft_enable_interrupts(efx); 1448 efx->type->irq_disable_non_ev(efx);
1449
1450 return rc;
1392} 1451}
1393 1452
1394static void efx_disable_interrupts(struct efx_nic *efx) 1453static void efx_disable_interrupts(struct efx_nic *efx)
@@ -1459,9 +1518,11 @@ static int efx_probe_nic(struct efx_nic *efx)
1459 * in MSI-X interrupts. */ 1518 * in MSI-X interrupts. */
1460 rc = efx_probe_interrupts(efx); 1519 rc = efx_probe_interrupts(efx);
1461 if (rc) 1520 if (rc)
1462 goto fail; 1521 goto fail1;
1463 1522
1464 efx->type->dimension_resources(efx); 1523 rc = efx->type->dimension_resources(efx);
1524 if (rc)
1525 goto fail2;
1465 1526
1466 if (efx->n_channels > 1) 1527 if (efx->n_channels > 1)
1467 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); 1528 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
@@ -1479,7 +1540,9 @@ static int efx_probe_nic(struct efx_nic *efx)
1479 1540
1480 return 0; 1541 return 0;
1481 1542
1482fail: 1543fail2:
1544 efx_remove_interrupts(efx);
1545fail1:
1483 efx->type->remove(efx); 1546 efx->type->remove(efx);
1484 return rc; 1547 return rc;
1485} 1548}
@@ -2012,7 +2075,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
2012 return 0; 2075 return 0;
2013} 2076}
2014 2077
2015static const struct net_device_ops efx_netdev_ops = { 2078static const struct net_device_ops efx_farch_netdev_ops = {
2016 .ndo_open = efx_net_open, 2079 .ndo_open = efx_net_open,
2017 .ndo_stop = efx_net_stop, 2080 .ndo_stop = efx_net_stop,
2018 .ndo_get_stats64 = efx_net_stats, 2081 .ndo_get_stats64 = efx_net_stats,
@@ -2039,6 +2102,26 @@ static const struct net_device_ops efx_netdev_ops = {
2039#endif 2102#endif
2040}; 2103};
2041 2104
2105static const struct net_device_ops efx_ef10_netdev_ops = {
2106 .ndo_open = efx_net_open,
2107 .ndo_stop = efx_net_stop,
2108 .ndo_get_stats64 = efx_net_stats,
2109 .ndo_tx_timeout = efx_watchdog,
2110 .ndo_start_xmit = efx_hard_start_xmit,
2111 .ndo_validate_addr = eth_validate_addr,
2112 .ndo_do_ioctl = efx_ioctl,
2113 .ndo_change_mtu = efx_change_mtu,
2114 .ndo_set_mac_address = efx_set_mac_address,
2115 .ndo_set_rx_mode = efx_set_rx_mode,
2116 .ndo_set_features = efx_set_features,
2117#ifdef CONFIG_NET_POLL_CONTROLLER
2118 .ndo_poll_controller = efx_netpoll,
2119#endif
2120#ifdef CONFIG_RFS_ACCEL
2121 .ndo_rx_flow_steer = efx_filter_rfs,
2122#endif
2123};
2124
2042static void efx_update_name(struct efx_nic *efx) 2125static void efx_update_name(struct efx_nic *efx)
2043{ 2126{
2044 strcpy(efx->name, efx->net_dev->name); 2127 strcpy(efx->name, efx->net_dev->name);
@@ -2051,7 +2134,8 @@ static int efx_netdev_event(struct notifier_block *this,
2051{ 2134{
2052 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); 2135 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
2053 2136
2054 if (net_dev->netdev_ops == &efx_netdev_ops && 2137 if ((net_dev->netdev_ops == &efx_farch_netdev_ops ||
2138 net_dev->netdev_ops == &efx_ef10_netdev_ops) &&
2055 event == NETDEV_CHANGENAME) 2139 event == NETDEV_CHANGENAME)
2056 efx_update_name(netdev_priv(net_dev)); 2140 efx_update_name(netdev_priv(net_dev));
2057 2141
@@ -2078,7 +2162,12 @@ static int efx_register_netdev(struct efx_nic *efx)
2078 2162
2079 net_dev->watchdog_timeo = 5 * HZ; 2163 net_dev->watchdog_timeo = 5 * HZ;
2080 net_dev->irq = efx->pci_dev->irq; 2164 net_dev->irq = efx->pci_dev->irq;
2081 net_dev->netdev_ops = &efx_netdev_ops; 2165 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
2166 net_dev->netdev_ops = &efx_ef10_netdev_ops;
2167 net_dev->priv_flags |= IFF_UNICAST_FLT;
2168 } else {
2169 net_dev->netdev_ops = &efx_farch_netdev_ops;
2170 }
2082 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); 2171 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
2083 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; 2172 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
2084 2173
@@ -2202,7 +2291,9 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2202 "could not restore PHY settings\n"); 2291 "could not restore PHY settings\n");
2203 } 2292 }
2204 2293
2205 efx_enable_interrupts(efx); 2294 rc = efx_enable_interrupts(efx);
2295 if (rc)
2296 goto fail;
2206 efx_restore_filters(efx); 2297 efx_restore_filters(efx);
2207 efx_sriov_reset(efx); 2298 efx_sriov_reset(efx);
2208 2299
@@ -2398,6 +2489,8 @@ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
2398 .driver_data = (unsigned long) &siena_a0_nic_type}, 2489 .driver_data = (unsigned long) &siena_a0_nic_type},
2399 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */ 2490 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
2400 .driver_data = (unsigned long) &siena_a0_nic_type}, 2491 .driver_data = (unsigned long) &siena_a0_nic_type},
2492 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */
2493 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2401 {0} /* end of list */ 2494 {0} /* end of list */
2402}; 2495};
2403 2496
@@ -2646,10 +2739,14 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2646 rc = efx_nic_init_interrupt(efx); 2739 rc = efx_nic_init_interrupt(efx);
2647 if (rc) 2740 if (rc)
2648 goto fail5; 2741 goto fail5;
2649 efx_enable_interrupts(efx); 2742 rc = efx_enable_interrupts(efx);
2743 if (rc)
2744 goto fail6;
2650 2745
2651 return 0; 2746 return 0;
2652 2747
2748 fail6:
2749 efx_nic_fini_interrupt(efx);
2653 fail5: 2750 fail5:
2654 efx_fini_port(efx); 2751 efx_fini_port(efx);
2655 fail4: 2752 fail4:
@@ -2777,12 +2874,15 @@ static int efx_pm_freeze(struct device *dev)
2777 2874
2778static int efx_pm_thaw(struct device *dev) 2875static int efx_pm_thaw(struct device *dev)
2779{ 2876{
2877 int rc;
2780 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2878 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2781 2879
2782 rtnl_lock(); 2880 rtnl_lock();
2783 2881
2784 if (efx->state != STATE_DISABLED) { 2882 if (efx->state != STATE_DISABLED) {
2785 efx_enable_interrupts(efx); 2883 rc = efx_enable_interrupts(efx);
2884 if (rc)
2885 goto fail;
2786 2886
2787 mutex_lock(&efx->mac_lock); 2887 mutex_lock(&efx->mac_lock);
2788 efx->phy_op->reconfigure(efx); 2888 efx->phy_op->reconfigure(efx);
@@ -2803,6 +2903,11 @@ static int efx_pm_thaw(struct device *dev)
2803 queue_work(reset_workqueue, &efx->reset_work); 2903 queue_work(reset_workqueue, &efx->reset_work);
2804 2904
2805 return 0; 2905 return 0;
2906
2907fail:
2908 rtnl_unlock();
2909
2910 return rc;
2806} 2911}
2807 2912
2808static int efx_pm_poweroff(struct device *dev) 2913static int efx_pm_poweroff(struct device *dev)
@@ -2839,8 +2944,8 @@ static int efx_pm_resume(struct device *dev)
2839 rc = efx->type->init(efx); 2944 rc = efx->type->init(efx);
2840 if (rc) 2945 if (rc)
2841 return rc; 2946 return rc;
2842 efx_pm_thaw(dev); 2947 rc = efx_pm_thaw(dev);
2843 return 0; 2948 return rc;
2844} 2949}
2845 2950
2846static int efx_pm_suspend(struct device *dev) 2951static int efx_pm_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 3bbc047baea2..34d00f5771fe 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -79,13 +79,20 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
79 * On success, return the filter ID. 79 * On success, return the filter ID.
80 * On failure, return a negative error code. 80 * On failure, return a negative error code.
81 * 81 *
82 * If an existing filter has equal match values to the new filter 82 * If existing filters have equal match values to the new filter spec,
83 * spec, then the new filter might replace it, depending on the 83 * then the new filter might replace them or the function might fail,
84 * relative priorities. If the existing filter has lower priority, or 84 * as follows.
85 * if @replace_equal is set and it has equal priority, then it is 85 *
86 * replaced. Otherwise the function fails, returning -%EPERM if 86 * 1. If the existing filters have lower priority, or @replace_equal
87 * the existing filter has higher priority or -%EEXIST if it has 87 * is set and they have equal priority, replace them.
88 * equal priority. 88 *
89 * 2. If the existing filters have higher priority, return -%EPERM.
90 *
91 * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not
92 * support delivery to multiple recipients, return -%EEXIST.
93 *
94 * This implies that filters for multiple multicast recipients must
95 * all be inserted with the same priority and @replace_equal = %false.
89 */ 96 */
90static inline s32 efx_filter_insert_filter(struct efx_nic *efx, 97static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
91 struct efx_filter_spec *spec, 98 struct efx_filter_spec *spec,
@@ -169,6 +176,7 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel)
169static inline void efx_filter_rfs_expire(struct efx_channel *channel) {} 176static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
170#define efx_filter_rfs_enabled() 0 177#define efx_filter_rfs_enabled() 0
171#endif 178#endif
179extern bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
172 180
173/* Channels */ 181/* Channels */
174extern int efx_channel_dummy_op_int(struct efx_channel *channel); 182extern int efx_channel_dummy_op_int(struct efx_channel *channel);
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index 8665921d7170..7fdfee019092 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2007-2009 Solarflare Communications Inc. 3 * Copyright 2007-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 63546930f954..5b471cf5c323 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -77,6 +77,8 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
77 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), 77 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
78 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), 78 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
79 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc), 79 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc),
80 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
81 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
80}; 82};
81 83
82#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc) 84#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index ec77611a52e4..8685f99d872a 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -2174,10 +2174,11 @@ out:
2174 return rc; 2174 return rc;
2175} 2175}
2176 2176
2177static void falcon_dimension_resources(struct efx_nic *efx) 2177static int falcon_dimension_resources(struct efx_nic *efx)
2178{ 2178{
2179 efx->rx_dc_base = 0x20000; 2179 efx->rx_dc_base = 0x20000;
2180 efx->tx_dc_base = 0x26000; 2180 efx->tx_dc_base = 0x26000;
2181 return 0;
2181} 2182}
2182 2183
2183/* Probe all SPI devices on the NIC */ 2184/* Probe all SPI devices on the NIC */
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index ec1e99d0dcad..1736f4b806af 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2007-2010 Solarflare Communications Inc. 3 * Copyright 2007-2012 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index d21483dfea40..c0907d884d75 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2011 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -325,6 +325,8 @@ void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
325 txd = efx_tx_desc(tx_queue, write_ptr); 325 txd = efx_tx_desc(tx_queue, write_ptr);
326 ++tx_queue->write_count; 326 ++tx_queue->write_count;
327 327
328 EFX_BUG_ON_PARANOID(buffer->flags & EFX_TX_BUF_OPTION);
329
328 /* Create TX descriptor ring entry */ 330 /* Create TX descriptor ring entry */
329 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); 331 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
330 EFX_POPULATE_QWORD_4(*txd, 332 EFX_POPULATE_QWORD_4(*txd,
@@ -594,7 +596,7 @@ static bool efx_farch_flush_wake(struct efx_nic *efx)
594 /* Ensure that all updates are visible to efx_farch_flush_queues() */ 596 /* Ensure that all updates are visible to efx_farch_flush_queues() */
595 smp_mb(); 597 smp_mb();
596 598
597 return (atomic_read(&efx->drain_pending) == 0 || 599 return (atomic_read(&efx->active_queues) == 0 ||
598 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT 600 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
599 && atomic_read(&efx->rxq_flush_pending) > 0)); 601 && atomic_read(&efx->rxq_flush_pending) > 0));
600} 602}
@@ -626,7 +628,7 @@ static bool efx_check_tx_flush_complete(struct efx_nic *efx)
626 netif_dbg(efx, hw, efx->net_dev, 628 netif_dbg(efx, hw, efx->net_dev,
627 "flush complete on TXQ %d, so drain " 629 "flush complete on TXQ %d, so drain "
628 "the queue\n", tx_queue->queue); 630 "the queue\n", tx_queue->queue);
629 /* Don't need to increment drain_pending as it 631 /* Don't need to increment active_queues as it
630 * has already been incremented for the queues 632 * has already been incremented for the queues
631 * which did not drain 633 * which did not drain
632 */ 634 */
@@ -653,17 +655,15 @@ static int efx_farch_do_flush(struct efx_nic *efx)
653 655
654 efx_for_each_channel(channel, efx) { 656 efx_for_each_channel(channel, efx) {
655 efx_for_each_channel_tx_queue(tx_queue, channel) { 657 efx_for_each_channel_tx_queue(tx_queue, channel) {
656 atomic_inc(&efx->drain_pending);
657 efx_farch_flush_tx_queue(tx_queue); 658 efx_farch_flush_tx_queue(tx_queue);
658 } 659 }
659 efx_for_each_channel_rx_queue(rx_queue, channel) { 660 efx_for_each_channel_rx_queue(rx_queue, channel) {
660 atomic_inc(&efx->drain_pending);
661 rx_queue->flush_pending = true; 661 rx_queue->flush_pending = true;
662 atomic_inc(&efx->rxq_flush_pending); 662 atomic_inc(&efx->rxq_flush_pending);
663 } 663 }
664 } 664 }
665 665
666 while (timeout && atomic_read(&efx->drain_pending) > 0) { 666 while (timeout && atomic_read(&efx->active_queues) > 0) {
667 /* If SRIOV is enabled, then offload receive queue flushing to 667 /* If SRIOV is enabled, then offload receive queue flushing to
668 * the firmware (though we will still have to poll for 668 * the firmware (though we will still have to poll for
669 * completion). If that fails, fall back to the old scheme. 669 * completion). If that fails, fall back to the old scheme.
@@ -699,15 +699,15 @@ static int efx_farch_do_flush(struct efx_nic *efx)
699 timeout); 699 timeout);
700 } 700 }
701 701
702 if (atomic_read(&efx->drain_pending) && 702 if (atomic_read(&efx->active_queues) &&
703 !efx_check_tx_flush_complete(efx)) { 703 !efx_check_tx_flush_complete(efx)) {
704 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " 704 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
705 "(rx %d+%d)\n", atomic_read(&efx->drain_pending), 705 "(rx %d+%d)\n", atomic_read(&efx->active_queues),
706 atomic_read(&efx->rxq_flush_outstanding), 706 atomic_read(&efx->rxq_flush_outstanding),
707 atomic_read(&efx->rxq_flush_pending)); 707 atomic_read(&efx->rxq_flush_pending));
708 rc = -ETIMEDOUT; 708 rc = -ETIMEDOUT;
709 709
710 atomic_set(&efx->drain_pending, 0); 710 atomic_set(&efx->active_queues, 0);
711 atomic_set(&efx->rxq_flush_pending, 0); 711 atomic_set(&efx->rxq_flush_pending, 0);
712 atomic_set(&efx->rxq_flush_outstanding, 0); 712 atomic_set(&efx->rxq_flush_outstanding, 0);
713 } 713 }
@@ -1123,8 +1123,8 @@ efx_farch_handle_drain_event(struct efx_channel *channel)
1123{ 1123{
1124 struct efx_nic *efx = channel->efx; 1124 struct efx_nic *efx = channel->efx;
1125 1125
1126 WARN_ON(atomic_read(&efx->drain_pending) == 0); 1126 WARN_ON(atomic_read(&efx->active_queues) == 0);
1127 atomic_dec(&efx->drain_pending); 1127 atomic_dec(&efx->active_queues);
1128 if (efx_farch_flush_wake(efx)) 1128 if (efx_farch_flush_wake(efx))
1129 wake_up(&efx->flush_wq); 1129 wake_up(&efx->flush_wq);
1130} 1130}
@@ -1325,7 +1325,7 @@ int efx_farch_ev_probe(struct efx_channel *channel)
1325 entries * sizeof(efx_qword_t)); 1325 entries * sizeof(efx_qword_t));
1326} 1326}
1327 1327
1328void efx_farch_ev_init(struct efx_channel *channel) 1328int efx_farch_ev_init(struct efx_channel *channel)
1329{ 1329{
1330 efx_oword_t reg; 1330 efx_oword_t reg;
1331 struct efx_nic *efx = channel->efx; 1331 struct efx_nic *efx = channel->efx;
@@ -1357,7 +1357,7 @@ void efx_farch_ev_init(struct efx_channel *channel)
1357 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base, 1357 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1358 channel->channel); 1358 channel->channel);
1359 1359
1360 efx->type->push_irq_moderation(channel); 1360 return 0;
1361} 1361}
1362 1362
1363void efx_farch_ev_fini(struct efx_channel *channel) 1363void efx_farch_ev_fini(struct efx_channel *channel)
diff --git a/drivers/net/ethernet/sfc/farch_regs.h b/drivers/net/ethernet/sfc/farch_regs.h
index 491b1039006b..7019a712e799 100644
--- a/drivers/net/ethernet/sfc/farch_regs.h
+++ b/drivers/net/ethernet/sfc/farch_regs.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2012 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index e459e43a2798..63c77a557178 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2010 Solarflare Communications Inc. 3 * Copyright 2005-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 19e8b95b7af6..96ce507d8602 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -20,7 +20,7 @@
20 * 20 *
21 ************************************************************************** 21 **************************************************************************
22 * 22 *
23 * Notes on locking strategy: 23 * Notes on locking strategy for the Falcon architecture:
24 * 24 *
25 * Many CSRs are very wide and cannot be read or written atomically. 25 * Many CSRs are very wide and cannot be read or written atomically.
26 * Writes from the host are buffered by the Bus Interface Unit (BIU) 26 * Writes from the host are buffered by the Bus Interface Unit (BIU)
@@ -54,6 +54,12 @@
54 * register while the collector already holds values for some other 54 * register while the collector already holds values for some other
55 * register, the write is discarded and the collector maintains its 55 * register, the write is discarded and the collector maintains its
56 * current state. 56 * current state.
57 *
58 * The EF10 architecture exposes very few registers to the host and
59 * most of them are only 32 bits wide. The only exceptions are the MC
60 * doorbell register pair, which has its own latching, and
61 * TX_DESC_UPD, which works in a similar way to the Falcon
62 * architecture.
57 */ 63 */
58 64
59#if BITS_PER_LONG == 64 65#if BITS_PER_LONG == 64
@@ -237,8 +243,8 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
237 BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \ 243 BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
238 page) 244 page)
239 245
240/* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of 246/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the
241 * RX_DESC_UPD or TX_DESC_UPD) 247 * high bits of RX_DESC_UPD or TX_DESC_UPD)
242 */ 248 */
243static inline void 249static inline void
244_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value, 250_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
@@ -249,8 +255,12 @@ _efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
249#define efx_writed_page(efx, value, reg, page) \ 255#define efx_writed_page(efx, value, reg, page) \
250 _efx_writed_page(efx, value, \ 256 _efx_writed_page(efx, value, \
251 reg + \ 257 reg + \
252 BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \ 258 BUILD_BUG_ON_ZERO((reg) != 0x400 && \
253 && (reg) != 0xa1c), \ 259 (reg) != 0x420 && \
260 (reg) != 0x830 && \
261 (reg) != 0x83c && \
262 (reg) != 0xa18 && \
263 (reg) != 0xa1c), \
254 page) 264 page)
255 265
256/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug 266/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 63121adbc3bb..128d7cdf9eb2 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2008-2011 Solarflare Communications Inc. 3 * Copyright 2008-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <asm/cmpxchg.h>
11#include "net_driver.h" 12#include "net_driver.h"
12#include "nic.h" 13#include "nic.h"
13#include "io.h" 14#include "io.h"
@@ -36,6 +37,20 @@
36#define SEQ_MASK \ 37#define SEQ_MASK \
37 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) 38 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
38 39
40struct efx_mcdi_async_param {
41 struct list_head list;
42 unsigned int cmd;
43 size_t inlen;
44 size_t outlen;
45 efx_mcdi_async_completer *complete;
46 unsigned long cookie;
47 /* followed by request/response buffer */
48};
49
50static void efx_mcdi_timeout_async(unsigned long context);
51static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
52 bool *was_attached_out);
53
39static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) 54static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
40{ 55{
41 EFX_BUG_ON_PARANOID(!efx->mcdi); 56 EFX_BUG_ON_PARANOID(!efx->mcdi);
@@ -45,40 +60,76 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
45int efx_mcdi_init(struct efx_nic *efx) 60int efx_mcdi_init(struct efx_nic *efx)
46{ 61{
47 struct efx_mcdi_iface *mcdi; 62 struct efx_mcdi_iface *mcdi;
63 bool already_attached;
64 int rc;
48 65
49 efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL); 66 efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
50 if (!efx->mcdi) 67 if (!efx->mcdi)
51 return -ENOMEM; 68 return -ENOMEM;
52 69
53 mcdi = efx_mcdi(efx); 70 mcdi = efx_mcdi(efx);
71 mcdi->efx = efx;
54 init_waitqueue_head(&mcdi->wq); 72 init_waitqueue_head(&mcdi->wq);
55 spin_lock_init(&mcdi->iface_lock); 73 spin_lock_init(&mcdi->iface_lock);
56 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); 74 mcdi->state = MCDI_STATE_QUIESCENT;
57 mcdi->mode = MCDI_MODE_POLL; 75 mcdi->mode = MCDI_MODE_POLL;
76 spin_lock_init(&mcdi->async_lock);
77 INIT_LIST_HEAD(&mcdi->async_list);
78 setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async,
79 (unsigned long)mcdi);
58 80
59 (void) efx_mcdi_poll_reboot(efx); 81 (void) efx_mcdi_poll_reboot(efx);
60 mcdi->new_epoch = true; 82 mcdi->new_epoch = true;
61 83
62 /* Recover from a failed assertion before probing */ 84 /* Recover from a failed assertion before probing */
63 return efx_mcdi_handle_assertion(efx); 85 rc = efx_mcdi_handle_assertion(efx);
86 if (rc)
87 return rc;
88
89 /* Let the MC (and BMC, if this is a LOM) know that the driver
90 * is loaded. We should do this before we reset the NIC.
91 */
92 rc = efx_mcdi_drv_attach(efx, true, &already_attached);
93 if (rc) {
94 netif_err(efx, probe, efx->net_dev,
95 "Unable to register driver with MCPU\n");
96 return rc;
97 }
98 if (already_attached)
99 /* Not a fatal error */
100 netif_err(efx, probe, efx->net_dev,
101 "Host already registered with MCPU\n");
102
103 return 0;
64} 104}
65 105
66void efx_mcdi_fini(struct efx_nic *efx) 106void efx_mcdi_fini(struct efx_nic *efx)
67{ 107{
68 BUG_ON(efx->mcdi && 108 if (!efx->mcdi)
69 atomic_read(&efx->mcdi->iface.state) != MCDI_STATE_QUIESCENT); 109 return;
110
111 BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
112
113 /* Relinquish the device (back to the BMC, if this is a LOM) */
114 efx_mcdi_drv_attach(efx, false, NULL);
115
70 kfree(efx->mcdi); 116 kfree(efx->mcdi);
71} 117}
72 118
73static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, 119static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
74 const efx_dword_t *inbuf, size_t inlen) 120 const efx_dword_t *inbuf, size_t inlen)
75{ 121{
76 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
77 efx_dword_t hdr[2]; 123 efx_dword_t hdr[2];
78 size_t hdr_len; 124 size_t hdr_len;
79 u32 xflags, seqno; 125 u32 xflags, seqno;
80 126
81 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 127 BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
128
129 /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
130 spin_lock_bh(&mcdi->iface_lock);
131 ++mcdi->seqno;
132 spin_unlock_bh(&mcdi->iface_lock);
82 133
83 seqno = mcdi->seqno & SEQ_MASK; 134 seqno = mcdi->seqno & SEQ_MASK;
84 xflags = 0; 135 xflags = 0;
@@ -114,6 +165,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
114 } 165 }
115 166
116 efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen); 167 efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
168
169 mcdi->new_epoch = false;
117} 170}
118 171
119static int efx_mcdi_errno(unsigned int mcdi_err) 172static int efx_mcdi_errno(unsigned int mcdi_err)
@@ -246,25 +299,30 @@ int efx_mcdi_poll_reboot(struct efx_nic *efx)
246 return efx->type->mcdi_poll_reboot(efx); 299 return efx->type->mcdi_poll_reboot(efx);
247} 300}
248 301
249static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) 302static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
303{
304 return cmpxchg(&mcdi->state,
305 MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
306 MCDI_STATE_QUIESCENT;
307}
308
309static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
250{ 310{
251 /* Wait until the interface becomes QUIESCENT and we win the race 311 /* Wait until the interface becomes QUIESCENT and we win the race
252 * to mark it RUNNING. */ 312 * to mark it RUNNING_SYNC.
313 */
253 wait_event(mcdi->wq, 314 wait_event(mcdi->wq,
254 atomic_cmpxchg(&mcdi->state, 315 cmpxchg(&mcdi->state,
255 MCDI_STATE_QUIESCENT, 316 MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
256 MCDI_STATE_RUNNING) 317 MCDI_STATE_QUIESCENT);
257 == MCDI_STATE_QUIESCENT);
258} 318}
259 319
260static int efx_mcdi_await_completion(struct efx_nic *efx) 320static int efx_mcdi_await_completion(struct efx_nic *efx)
261{ 321{
262 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 322 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
263 323
264 if (wait_event_timeout( 324 if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
265 mcdi->wq, 325 MCDI_RPC_TIMEOUT) == 0)
266 atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
267 MCDI_RPC_TIMEOUT) == 0)
268 return -ETIMEDOUT; 326 return -ETIMEDOUT;
269 327
270 /* Check if efx_mcdi_set_mode() switched us back to polled completions. 328 /* Check if efx_mcdi_set_mode() switched us back to polled completions.
@@ -281,17 +339,14 @@ static int efx_mcdi_await_completion(struct efx_nic *efx)
281 return 0; 339 return 0;
282} 340}
283 341
284static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) 342/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
343 * requester. Return whether this was done. Does not take any locks.
344 */
345static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
285{ 346{
286 /* If the interface is RUNNING, then move to COMPLETED and wake any 347 if (cmpxchg(&mcdi->state,
287 * waiters. If the interface isn't in RUNNING then we've received a 348 MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
288 * duplicate completion after we've already transitioned back to 349 MCDI_STATE_RUNNING_SYNC) {
289 * QUIESCENT. [A subsequent invocation would increment seqno, so would
290 * have failed the seqno check].
291 */
292 if (atomic_cmpxchg(&mcdi->state,
293 MCDI_STATE_RUNNING,
294 MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) {
295 wake_up(&mcdi->wq); 350 wake_up(&mcdi->wq);
296 return true; 351 return true;
297 } 352 }
@@ -301,10 +356,91 @@ static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
301 356
302static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) 357static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
303{ 358{
304 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); 359 if (mcdi->mode == MCDI_MODE_EVENTS) {
360 struct efx_mcdi_async_param *async;
361 struct efx_nic *efx = mcdi->efx;
362
363 /* Process the asynchronous request queue */
364 spin_lock_bh(&mcdi->async_lock);
365 async = list_first_entry_or_null(
366 &mcdi->async_list, struct efx_mcdi_async_param, list);
367 if (async) {
368 mcdi->state = MCDI_STATE_RUNNING_ASYNC;
369 efx_mcdi_send_request(efx, async->cmd,
370 (const efx_dword_t *)(async + 1),
371 async->inlen);
372 mod_timer(&mcdi->async_timer,
373 jiffies + MCDI_RPC_TIMEOUT);
374 }
375 spin_unlock_bh(&mcdi->async_lock);
376
377 if (async)
378 return;
379 }
380
381 mcdi->state = MCDI_STATE_QUIESCENT;
305 wake_up(&mcdi->wq); 382 wake_up(&mcdi->wq);
306} 383}
307 384
385/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
386 * asynchronous completion function, and release the interface.
387 * Return whether this was done. Must be called in bh-disabled
388 * context. Will take iface_lock and async_lock.
389 */
390static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
391{
392 struct efx_nic *efx = mcdi->efx;
393 struct efx_mcdi_async_param *async;
394 size_t hdr_len, data_len;
395 efx_dword_t *outbuf;
396 int rc;
397
398 if (cmpxchg(&mcdi->state,
399 MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
400 MCDI_STATE_RUNNING_ASYNC)
401 return false;
402
403 spin_lock(&mcdi->iface_lock);
404 if (timeout) {
405 /* Ensure that if the completion event arrives later,
406 * the seqno check in efx_mcdi_ev_cpl() will fail
407 */
408 ++mcdi->seqno;
409 ++mcdi->credits;
410 rc = -ETIMEDOUT;
411 hdr_len = 0;
412 data_len = 0;
413 } else {
414 rc = mcdi->resprc;
415 hdr_len = mcdi->resp_hdr_len;
416 data_len = mcdi->resp_data_len;
417 }
418 spin_unlock(&mcdi->iface_lock);
419
420 /* Stop the timer. In case the timer function is running, we
421 * must wait for it to return so that there is no possibility
422 * of it aborting the next request.
423 */
424 if (!timeout)
425 del_timer_sync(&mcdi->async_timer);
426
427 spin_lock(&mcdi->async_lock);
428 async = list_first_entry(&mcdi->async_list,
429 struct efx_mcdi_async_param, list);
430 list_del(&async->list);
431 spin_unlock(&mcdi->async_lock);
432
433 outbuf = (efx_dword_t *)(async + 1);
434 efx->type->mcdi_read_response(efx, outbuf, hdr_len,
435 min(async->outlen, data_len));
436 async->complete(efx, async->cookie, rc, outbuf, data_len);
437 kfree(async);
438
439 efx_mcdi_release(mcdi);
440
441 return true;
442}
443
308static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, 444static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
309 unsigned int datalen, unsigned int mcdi_err) 445 unsigned int datalen, unsigned int mcdi_err)
310{ 446{
@@ -336,8 +472,40 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
336 472
337 spin_unlock(&mcdi->iface_lock); 473 spin_unlock(&mcdi->iface_lock);
338 474
339 if (wake) 475 if (wake) {
340 efx_mcdi_complete(mcdi); 476 if (!efx_mcdi_complete_async(mcdi, false))
477 (void) efx_mcdi_complete_sync(mcdi);
478
479 /* If the interface isn't RUNNING_ASYNC or
480 * RUNNING_SYNC then we've received a duplicate
481 * completion after we've already transitioned back to
482 * QUIESCENT. [A subsequent invocation would increment
483 * seqno, so would have failed the seqno check].
484 */
485 }
486}
487
488static void efx_mcdi_timeout_async(unsigned long context)
489{
490 struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context;
491
492 efx_mcdi_complete_async(mcdi, true);
493}
494
495static int
496efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
497{
498 if (efx->type->mcdi_max_ver < 0 ||
499 (efx->type->mcdi_max_ver < 2 &&
500 cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
501 return -EINVAL;
502
503 if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
504 (efx->type->mcdi_max_ver < 2 &&
505 inlen > MCDI_CTL_SDU_LEN_MAX_V1))
506 return -EMSGSIZE;
507
508 return 0;
341} 509}
342 510
343int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, 511int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
@@ -358,27 +526,84 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
358 const efx_dword_t *inbuf, size_t inlen) 526 const efx_dword_t *inbuf, size_t inlen)
359{ 527{
360 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 528 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
529 int rc;
361 530
362 if (efx->type->mcdi_max_ver < 0 || 531 rc = efx_mcdi_check_supported(efx, cmd, inlen);
363 (efx->type->mcdi_max_ver < 2 && 532 if (rc)
364 cmd > MC_CMD_CMD_SPACE_ESCAPE_7)) 533 return rc;
365 return -EINVAL;
366 534
367 if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 || 535 efx_mcdi_acquire_sync(mcdi);
368 (efx->type->mcdi_max_ver < 2 && 536 efx_mcdi_send_request(efx, cmd, inbuf, inlen);
369 inlen > MCDI_CTL_SDU_LEN_MAX_V1)) 537 return 0;
370 return -EMSGSIZE; 538}
539
540/**
541 * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
542 * @efx: NIC through which to issue the command
543 * @cmd: Command type number
544 * @inbuf: Command parameters
545 * @inlen: Length of command parameters, in bytes
546 * @outlen: Length to allocate for response buffer, in bytes
547 * @complete: Function to be called on completion or cancellation.
548 * @cookie: Arbitrary value to be passed to @complete.
549 *
550 * This function does not sleep and therefore may be called in atomic
551 * context. It will fail if event queues are disabled or if MCDI
552 * event completions have been disabled due to an error.
553 *
554 * If it succeeds, the @complete function will be called exactly once
555 * in atomic context, when one of the following occurs:
556 * (a) the completion event is received (in NAPI context)
557 * (b) event queues are disabled (in the process that disables them)
558 * (c) the request times-out (in timer context)
559 */
560int
561efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
562 const efx_dword_t *inbuf, size_t inlen, size_t outlen,
563 efx_mcdi_async_completer *complete, unsigned long cookie)
564{
565 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
566 struct efx_mcdi_async_param *async;
567 int rc;
371 568
372 efx_mcdi_acquire(mcdi); 569 rc = efx_mcdi_check_supported(efx, cmd, inlen);
570 if (rc)
571 return rc;
373 572
374 /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ 573 async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
375 spin_lock_bh(&mcdi->iface_lock); 574 GFP_ATOMIC);
376 ++mcdi->seqno; 575 if (!async)
377 spin_unlock_bh(&mcdi->iface_lock); 576 return -ENOMEM;
378 577
379 efx_mcdi_copyin(efx, cmd, inbuf, inlen); 578 async->cmd = cmd;
380 mcdi->new_epoch = false; 579 async->inlen = inlen;
381 return 0; 580 async->outlen = outlen;
581 async->complete = complete;
582 async->cookie = cookie;
583 memcpy(async + 1, inbuf, inlen);
584
585 spin_lock_bh(&mcdi->async_lock);
586
587 if (mcdi->mode == MCDI_MODE_EVENTS) {
588 list_add_tail(&async->list, &mcdi->async_list);
589
590 /* If this is at the front of the queue, try to start it
591 * immediately
592 */
593 if (mcdi->async_list.next == &async->list &&
594 efx_mcdi_acquire_async(mcdi)) {
595 efx_mcdi_send_request(efx, cmd, inbuf, inlen);
596 mod_timer(&mcdi->async_timer,
597 jiffies + MCDI_RPC_TIMEOUT);
598 }
599 } else {
600 kfree(async);
601 rc = -ENETDOWN;
602 }
603
604 spin_unlock_bh(&mcdi->async_lock);
605
606 return rc;
382} 607}
383 608
384int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, 609int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
@@ -448,6 +673,10 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
448 return rc; 673 return rc;
449} 674}
450 675
676/* Switch to polled MCDI completions. This can be called in various
677 * error conditions with various locks held, so it must be lockless.
678 * Caller is responsible for flushing asynchronous requests later.
679 */
451void efx_mcdi_mode_poll(struct efx_nic *efx) 680void efx_mcdi_mode_poll(struct efx_nic *efx)
452{ 681{
453 struct efx_mcdi_iface *mcdi; 682 struct efx_mcdi_iface *mcdi;
@@ -465,11 +694,50 @@ void efx_mcdi_mode_poll(struct efx_nic *efx)
465 * efx_mcdi_await_completion() will then call efx_mcdi_poll(). 694 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
466 * 695 *
467 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), 696 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
468 * which efx_mcdi_complete() provides for us. 697 * which efx_mcdi_complete_sync() provides for us.
469 */ 698 */
470 mcdi->mode = MCDI_MODE_POLL; 699 mcdi->mode = MCDI_MODE_POLL;
471 700
472 efx_mcdi_complete(mcdi); 701 efx_mcdi_complete_sync(mcdi);
702}
703
704/* Flush any running or queued asynchronous requests, after event processing
705 * is stopped
706 */
707void efx_mcdi_flush_async(struct efx_nic *efx)
708{
709 struct efx_mcdi_async_param *async, *next;
710 struct efx_mcdi_iface *mcdi;
711
712 if (!efx->mcdi)
713 return;
714
715 mcdi = efx_mcdi(efx);
716
717 /* We must be in polling mode so no more requests can be queued */
718 BUG_ON(mcdi->mode != MCDI_MODE_POLL);
719
720 del_timer_sync(&mcdi->async_timer);
721
722 /* If a request is still running, make sure we give the MC
723 * time to complete it so that the response won't overwrite our
724 * next request.
725 */
726 if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
727 efx_mcdi_poll(efx);
728 mcdi->state = MCDI_STATE_QUIESCENT;
729 }
730
731 /* Nothing else will access the async list now, so it is safe
732 * to walk it without holding async_lock. If we hold it while
733 * calling a completer then lockdep may warn that we have
734 * acquired locks in the wrong order.
735 */
736 list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
737 async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
738 list_del(&async->list);
739 kfree(async);
740 }
473} 741}
474 742
475void efx_mcdi_mode_event(struct efx_nic *efx) 743void efx_mcdi_mode_event(struct efx_nic *efx)
@@ -491,7 +759,7 @@ void efx_mcdi_mode_event(struct efx_nic *efx)
491 * write memory barrier ensure that efx_mcdi_rpc() sees it, which 759 * write memory barrier ensure that efx_mcdi_rpc() sees it, which
492 * efx_mcdi_acquire() provides. 760 * efx_mcdi_acquire() provides.
493 */ 761 */
494 efx_mcdi_acquire(mcdi); 762 efx_mcdi_acquire_sync(mcdi);
495 mcdi->mode = MCDI_MODE_EVENTS; 763 mcdi->mode = MCDI_MODE_EVENTS;
496 efx_mcdi_release(mcdi); 764 efx_mcdi_release(mcdi);
497} 765}
@@ -508,16 +776,21 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
508 * are sent to the same queue, we can't be racing with 776 * are sent to the same queue, we can't be racing with
509 * efx_mcdi_ev_cpl()] 777 * efx_mcdi_ev_cpl()]
510 * 778 *
511 * There's a race here with efx_mcdi_rpc(), because we might receive 779 * If there is an outstanding asynchronous request, we can't
512 * a REBOOT event *before* the request has been copied out. In polled 780 * complete it now (efx_mcdi_complete() would deadlock). The
513 * mode (during startup) this is irrelevant, because efx_mcdi_complete() 781 * reset process will take care of this.
514 * is ignored. In event mode, this condition is just an edge-case of 782 *
515 * receiving a REBOOT event after posting the MCDI request. Did the mc 783 * There's a race here with efx_mcdi_send_request(), because
516 * reboot before or after the copyout? The best we can do always is 784 * we might receive a REBOOT event *before* the request has
517 * just return failure. 785 * been copied out. In polled mode (during startup) this is
786 * irrelevant, because efx_mcdi_complete_sync() is ignored. In
787 * event mode, this condition is just an edge-case of
788 * receiving a REBOOT event after posting the MCDI
789 * request. Did the mc reboot before or after the copyout? The
790 * best we can do always is just return failure.
518 */ 791 */
519 spin_lock(&mcdi->iface_lock); 792 spin_lock(&mcdi->iface_lock);
520 if (efx_mcdi_complete(mcdi)) { 793 if (efx_mcdi_complete_sync(mcdi)) {
521 if (mcdi->mode == MCDI_MODE_EVENTS) { 794 if (mcdi->mode == MCDI_MODE_EVENTS) {
522 mcdi->resprc = rc; 795 mcdi->resprc = rc;
523 mcdi->resp_hdr_len = 0; 796 mcdi->resp_hdr_len = 0;
@@ -579,6 +852,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
579 "MC Scheduler error address=0x%x\n", data); 852 "MC Scheduler error address=0x%x\n", data);
580 break; 853 break;
581 case MCDI_EVENT_CODE_REBOOT: 854 case MCDI_EVENT_CODE_REBOOT:
855 case MCDI_EVENT_CODE_MC_REBOOT:
582 netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); 856 netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
583 efx_mcdi_ev_death(efx, -EIO); 857 efx_mcdi_ev_death(efx, -EIO);
584 break; 858 break;
@@ -593,7 +867,19 @@ void efx_mcdi_process_event(struct efx_channel *channel,
593 case MCDI_EVENT_CODE_PTP_PPS: 867 case MCDI_EVENT_CODE_PTP_PPS:
594 efx_ptp_event(efx, event); 868 efx_ptp_event(efx, event);
595 break; 869 break;
596 870 case MCDI_EVENT_CODE_TX_FLUSH:
871 case MCDI_EVENT_CODE_RX_FLUSH:
872 /* Two flush events will be sent: one to the same event
873 * queue as completions, and one to event queue 0.
874 * In the latter case the {RX,TX}_FLUSH_TO_DRIVER
875 * flag will be set, and we should ignore the event
876 * because we want to wait for all completions.
877 */
878 BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
879 MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN);
880 if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
881 efx_ef10_handle_drain_event(efx);
882 break;
597 case MCDI_EVENT_CODE_TX_ERR: 883 case MCDI_EVENT_CODE_TX_ERR:
598 case MCDI_EVENT_CODE_RX_ERR: 884 case MCDI_EVENT_CODE_RX_ERR:
599 netif_err(efx, hw, efx->net_dev, 885 netif_err(efx, hw, efx->net_dev,
@@ -617,27 +903,55 @@ void efx_mcdi_process_event(struct efx_channel *channel,
617 903
618void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) 904void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
619{ 905{
620 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN); 906 MCDI_DECLARE_BUF(outbuf,
907 max(MC_CMD_GET_VERSION_OUT_LEN,
908 MC_CMD_GET_CAPABILITIES_OUT_LEN));
621 size_t outlength; 909 size_t outlength;
622 const __le16 *ver_words; 910 const __le16 *ver_words;
911 size_t offset;
623 int rc; 912 int rc;
624 913
625 BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); 914 BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
626
627 rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, 915 rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
628 outbuf, sizeof(outbuf), &outlength); 916 outbuf, sizeof(outbuf), &outlength);
629 if (rc) 917 if (rc)
630 goto fail; 918 goto fail;
631
632 if (outlength < MC_CMD_GET_VERSION_OUT_LEN) { 919 if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
633 rc = -EIO; 920 rc = -EIO;
634 goto fail; 921 goto fail;
635 } 922 }
636 923
637 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); 924 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
638 snprintf(buf, len, "%u.%u.%u.%u", 925 offset = snprintf(buf, len, "%u.%u.%u.%u",
639 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), 926 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
640 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); 927 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
928
929 /* EF10 may have multiple datapath firmware variants within a
930 * single version. Report which variants are running.
931 */
932 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
933 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
934 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
935 outbuf, sizeof(outbuf), &outlength);
936 if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN)
937 offset += snprintf(
938 buf + offset, len - offset, " rx? tx?");
939 else
940 offset += snprintf(
941 buf + offset, len - offset, " rx%x tx%x",
942 MCDI_WORD(outbuf,
943 GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID),
944 MCDI_WORD(outbuf,
945 GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID));
946
947 /* It's theoretically possible for the string to exceed 31
948 * characters, though in practice the first three version
949 * components are short enough that this doesn't happen.
950 */
951 if (WARN_ON(offset >= len))
952 buf[0] = 0;
953 }
954
641 return; 955 return;
642 956
643fail: 957fail:
@@ -645,8 +959,8 @@ fail:
645 buf[0] = 0; 959 buf[0] = 0;
646} 960}
647 961
648int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 962static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
649 bool *was_attached) 963 bool *was_attached)
650{ 964{
651 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN); 965 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
652 MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN); 966 MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
@@ -1157,6 +1471,17 @@ fail:
1157 return rc; 1471 return rc;
1158} 1472}
1159 1473
1474int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
1475{
1476 MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
1477
1478 BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
1479 MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
1480 MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
1481 return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
1482 NULL, 0, NULL);
1483}
1484
1160#ifdef CONFIG_SFC_MTD 1485#ifdef CONFIG_SFC_MTD
1161 1486
1162#define EFX_MCDI_NVRAM_LEN_MAX 128 1487#define EFX_MCDI_NVRAM_LEN_MAX 128
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 303d9e88a27f..c34d0d4e10ee 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2008-2010 Solarflare Communications Inc. 3 * Copyright 2008-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -14,15 +14,17 @@
14 * enum efx_mcdi_state - MCDI request handling state 14 * enum efx_mcdi_state - MCDI request handling state
15 * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the 15 * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
16 * mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING 16 * mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING
17 * @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that 17 * @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending.
18 * moved into this state is allowed to move out of it. 18 * Only the thread that moved into this state is allowed to move out of it.
19 * @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending.
19 * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread 20 * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
20 * has not yet consumed the result. For all other threads, equivalent to 21 * has not yet consumed the result. For all other threads, equivalent to
21 * %MCDI_STATE_RUNNING. 22 * %MCDI_STATE_RUNNING.
22 */ 23 */
23enum efx_mcdi_state { 24enum efx_mcdi_state {
24 MCDI_STATE_QUIESCENT, 25 MCDI_STATE_QUIESCENT,
25 MCDI_STATE_RUNNING, 26 MCDI_STATE_RUNNING_SYNC,
27 MCDI_STATE_RUNNING_ASYNC,
26 MCDI_STATE_COMPLETED, 28 MCDI_STATE_COMPLETED,
27}; 29};
28 30
@@ -33,20 +35,26 @@ enum efx_mcdi_mode {
33 35
34/** 36/**
35 * struct efx_mcdi_iface - MCDI protocol context 37 * struct efx_mcdi_iface - MCDI protocol context
38 * @efx: The associated NIC.
36 * @state: Request handling state. Waited for by @wq. 39 * @state: Request handling state. Waited for by @wq.
37 * @mode: Poll for mcdi completion, or wait for an mcdi_event. 40 * @mode: Poll for mcdi completion, or wait for an mcdi_event.
38 * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING 41 * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING
39 * @new_epoch: Indicates start of day or start of MC reboot recovery 42 * @new_epoch: Indicates start of day or start of MC reboot recovery
40 * @iface_lock: Serialises access to all the following fields 43 * @iface_lock: Serialises access to @seqno, @credits and response metadata
41 * @seqno: The next sequence number to use for mcdi requests. 44 * @seqno: The next sequence number to use for mcdi requests.
42 * @credits: Number of spurious MCDI completion events allowed before we 45 * @credits: Number of spurious MCDI completion events allowed before we
43 * trigger a fatal error 46 * trigger a fatal error
44 * @resprc: Response error/success code (Linux numbering) 47 * @resprc: Response error/success code (Linux numbering)
45 * @resp_hdr_len: Response header length 48 * @resp_hdr_len: Response header length
46 * @resp_data_len: Response data (SDU or error) length 49 * @resp_data_len: Response data (SDU or error) length
50 * @async_lock: Serialises access to @async_list while event processing is
51 * enabled
52 * @async_list: Queue of asynchronous requests
53 * @async_timer: Timer for asynchronous request timeout
47 */ 54 */
48struct efx_mcdi_iface { 55struct efx_mcdi_iface {
49 atomic_t state; 56 struct efx_nic *efx;
57 enum efx_mcdi_state state;
50 enum efx_mcdi_mode mode; 58 enum efx_mcdi_mode mode;
51 wait_queue_head_t wq; 59 wait_queue_head_t wq;
52 spinlock_t iface_lock; 60 spinlock_t iface_lock;
@@ -56,6 +64,9 @@ struct efx_mcdi_iface {
56 int resprc; 64 int resprc;
57 size_t resp_hdr_len; 65 size_t resp_hdr_len;
58 size_t resp_data_len; 66 size_t resp_data_len;
67 spinlock_t async_lock;
68 struct list_head async_list;
69 struct timer_list async_timer;
59}; 70};
60 71
61struct efx_mcdi_mon { 72struct efx_mcdi_mon {
@@ -70,7 +81,7 @@ struct efx_mcdi_mon {
70struct efx_mcdi_mtd_partition { 81struct efx_mcdi_mtd_partition {
71 struct efx_mtd_partition common; 82 struct efx_mtd_partition common;
72 bool updating; 83 bool updating;
73 u8 nvram_type; 84 u16 nvram_type;
74 u16 fw_subtype; 85 u16 fw_subtype;
75}; 86};
76 87
@@ -111,10 +122,20 @@ extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
111 efx_dword_t *outbuf, size_t outlen, 122 efx_dword_t *outbuf, size_t outlen,
112 size_t *outlen_actual); 123 size_t *outlen_actual);
113 124
125typedef void efx_mcdi_async_completer(struct efx_nic *efx,
126 unsigned long cookie, int rc,
127 efx_dword_t *outbuf,
128 size_t outlen_actual);
129extern int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
130 const efx_dword_t *inbuf, size_t inlen,
131 size_t outlen,
132 efx_mcdi_async_completer *complete,
133 unsigned long cookie);
114 134
115extern int efx_mcdi_poll_reboot(struct efx_nic *efx); 135extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
116extern void efx_mcdi_mode_poll(struct efx_nic *efx); 136extern void efx_mcdi_mode_poll(struct efx_nic *efx);
117extern void efx_mcdi_mode_event(struct efx_nic *efx); 137extern void efx_mcdi_mode_event(struct efx_nic *efx);
138extern void efx_mcdi_flush_async(struct efx_nic *efx);
118 139
119extern void efx_mcdi_process_event(struct efx_channel *channel, 140extern void efx_mcdi_process_event(struct efx_channel *channel,
120 efx_qword_t *event); 141 efx_qword_t *event);
@@ -136,6 +157,9 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
136#define _MCDI_DWORD(_buf, _field) \ 157#define _MCDI_DWORD(_buf, _field) \
137 ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2)) 158 ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
138 159
160#define MCDI_WORD(_buf, _field) \
161 ((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
162 le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
139#define MCDI_SET_DWORD(_buf, _field, _value) \ 163#define MCDI_SET_DWORD(_buf, _field, _value) \
140 EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value) 164 EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
141#define MCDI_DWORD(_buf, _field) \ 165#define MCDI_DWORD(_buf, _field) \
@@ -252,8 +276,6 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
252 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) 276 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
253 277
254extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); 278extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
255extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
256 bool *was_attached_out);
257extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 279extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
258 u16 *fw_subtype_list, u32 *capabilities); 280 u16 *fw_subtype_list, u32 *capabilities);
259extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, 281extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
@@ -274,6 +296,8 @@ extern int efx_mcdi_flush_rxqs(struct efx_nic *efx);
274extern int efx_mcdi_port_probe(struct efx_nic *efx); 296extern int efx_mcdi_port_probe(struct efx_nic *efx);
275extern void efx_mcdi_port_remove(struct efx_nic *efx); 297extern void efx_mcdi_port_remove(struct efx_nic *efx);
276extern int efx_mcdi_port_reconfigure(struct efx_nic *efx); 298extern int efx_mcdi_port_reconfigure(struct efx_nic *efx);
299extern int efx_mcdi_port_get_number(struct efx_nic *efx);
300extern u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
277extern void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev); 301extern void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
278extern int efx_mcdi_set_mac(struct efx_nic *efx); 302extern int efx_mcdi_set_mac(struct efx_nic *efx);
279#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1)) 303#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
@@ -282,6 +306,7 @@ extern void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
282extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx); 306extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
283extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason); 307extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
284extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method); 308extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
309extern int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
285 310
286#ifdef CONFIG_SFC_MCDI_MON 311#ifdef CONFIG_SFC_MCDI_MON
287extern int efx_mcdi_mon_probe(struct efx_nic *efx); 312extern int efx_mcdi_mon_probe(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index d7d45662d684..4cc5d95b2a5a 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2011 Solarflare Communications Inc. 3 * Copyright 2011-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -21,7 +21,9 @@ enum efx_hwmon_type {
21 EFX_HWMON_UNKNOWN, 21 EFX_HWMON_UNKNOWN,
22 EFX_HWMON_TEMP, /* temperature */ 22 EFX_HWMON_TEMP, /* temperature */
23 EFX_HWMON_COOL, /* cooling device, probably a heatsink */ 23 EFX_HWMON_COOL, /* cooling device, probably a heatsink */
24 EFX_HWMON_IN /* input voltage */ 24 EFX_HWMON_IN, /* voltage */
25 EFX_HWMON_CURR, /* current */
26 EFX_HWMON_POWER, /* power */
25}; 27};
26 28
27static const struct { 29static const struct {
@@ -29,23 +31,52 @@ static const struct {
29 enum efx_hwmon_type hwmon_type; 31 enum efx_hwmon_type hwmon_type;
30 int port; 32 int port;
31} efx_mcdi_sensor_type[] = { 33} efx_mcdi_sensor_type[] = {
32#define SENSOR(name, label, hwmon_type, port) \ 34#define SENSOR(name, label, hwmon_type, port) \
33 [MC_CMD_SENSOR_##name] = { label, hwmon_type, port } 35 [MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port }
34 SENSOR(CONTROLLER_TEMP, "Controller temp.", EFX_HWMON_TEMP, -1), 36 SENSOR(CONTROLLER_TEMP, "Controller ext. temp.", TEMP, -1),
35 SENSOR(PHY_COMMON_TEMP, "PHY temp.", EFX_HWMON_TEMP, -1), 37 SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1),
36 SENSOR(CONTROLLER_COOLING, "Controller cooling", EFX_HWMON_COOL, -1), 38 SENSOR(CONTROLLER_COOLING, "Controller cooling", COOL, -1),
37 SENSOR(PHY0_TEMP, "PHY temp.", EFX_HWMON_TEMP, 0), 39 SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0),
38 SENSOR(PHY0_COOLING, "PHY cooling", EFX_HWMON_COOL, 0), 40 SENSOR(PHY0_COOLING, "PHY cooling", COOL, 0),
39 SENSOR(PHY1_TEMP, "PHY temp.", EFX_HWMON_TEMP, 1), 41 SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1),
40 SENSOR(PHY1_COOLING, "PHY cooling", EFX_HWMON_COOL, 1), 42 SENSOR(PHY1_COOLING, "PHY cooling", COOL, 1),
41 SENSOR(IN_1V0, "1.0V supply", EFX_HWMON_IN, -1), 43 SENSOR(IN_1V0, "1.0V supply", IN, -1),
42 SENSOR(IN_1V2, "1.2V supply", EFX_HWMON_IN, -1), 44 SENSOR(IN_1V2, "1.2V supply", IN, -1),
43 SENSOR(IN_1V8, "1.8V supply", EFX_HWMON_IN, -1), 45 SENSOR(IN_1V8, "1.8V supply", IN, -1),
44 SENSOR(IN_2V5, "2.5V supply", EFX_HWMON_IN, -1), 46 SENSOR(IN_2V5, "2.5V supply", IN, -1),
45 SENSOR(IN_3V3, "3.3V supply", EFX_HWMON_IN, -1), 47 SENSOR(IN_3V3, "3.3V supply", IN, -1),
46 SENSOR(IN_12V0, "12.0V supply", EFX_HWMON_IN, -1), 48 SENSOR(IN_12V0, "12.0V supply", IN, -1),
47 SENSOR(IN_1V2A, "1.2V analogue supply", EFX_HWMON_IN, -1), 49 SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1),
48 SENSOR(IN_VREF, "ref. voltage", EFX_HWMON_IN, -1), 50 SENSOR(IN_VREF, "ref. voltage", IN, -1),
51 SENSOR(OUT_VAOE, "AOE power supply", IN, -1),
52 SENSOR(AOE_TEMP, "AOE temp.", TEMP, -1),
53 SENSOR(PSU_AOE_TEMP, "AOE PSU temp.", TEMP, -1),
54 SENSOR(PSU_TEMP, "Controller PSU temp.", TEMP, -1),
55 SENSOR(FAN_0, NULL, COOL, -1),
56 SENSOR(FAN_1, NULL, COOL, -1),
57 SENSOR(FAN_2, NULL, COOL, -1),
58 SENSOR(FAN_3, NULL, COOL, -1),
59 SENSOR(FAN_4, NULL, COOL, -1),
60 SENSOR(IN_VAOE, "AOE input supply", IN, -1),
61 SENSOR(OUT_IAOE, "AOE output current", CURR, -1),
62 SENSOR(IN_IAOE, "AOE input current", CURR, -1),
63 SENSOR(NIC_POWER, "Board power use", POWER, -1),
64 SENSOR(IN_0V9, "0.9V supply", IN, -1),
65 SENSOR(IN_I0V9, "0.9V input current", CURR, -1),
66 SENSOR(IN_I1V2, "1.2V input current", CURR, -1),
67 SENSOR(IN_0V9_ADC, "0.9V supply (at ADC)", IN, -1),
68 SENSOR(CONTROLLER_2_TEMP, "Controller ext. temp. 2", TEMP, -1),
69 SENSOR(VREG_INTERNAL_TEMP, "Voltage regulator temp.", TEMP, -1),
70 SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1),
71 SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1),
72 SENSOR(CONTROLLER_VPTAT, "Controller int. temp. raw", IN, -1),
73 SENSOR(CONTROLLER_INTERNAL_TEMP, "Controller int. temp.", TEMP, -1),
74 SENSOR(CONTROLLER_VPTAT_EXTADC,
75 "Controller int. temp. raw (at ADC)", IN, -1),
76 SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC,
77 "Controller int. temp. (via ADC)", TEMP, -1),
78 SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1),
79 SENSOR(AIRFLOW, "Air flow raw", IN, -1),
49#undef SENSOR 80#undef SENSOR
50}; 81};
51 82
@@ -160,9 +191,19 @@ static ssize_t efx_mcdi_mon_show_value(struct device *dev,
160 191
161 value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE); 192 value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
162 193
163 /* Convert temperature from degrees to milli-degrees Celsius */ 194 switch (mon_attr->hwmon_type) {
164 if (mon_attr->hwmon_type == EFX_HWMON_TEMP) 195 case EFX_HWMON_TEMP:
196 /* Convert temperature from degrees to milli-degrees Celsius */
165 value *= 1000; 197 value *= 1000;
198 break;
199 case EFX_HWMON_POWER:
200 /* Convert power from watts to microwatts */
201 value *= 1000000;
202 break;
203 default:
204 /* No conversion needed */
205 break;
206 }
166 207
167 return sprintf(buf, "%u\n", value); 208 return sprintf(buf, "%u\n", value);
168} 209}
@@ -177,9 +218,19 @@ static ssize_t efx_mcdi_mon_show_limit(struct device *dev,
177 218
178 value = mon_attr->limit_value; 219 value = mon_attr->limit_value;
179 220
180 /* Convert temperature from degrees to milli-degrees Celsius */ 221 switch (mon_attr->hwmon_type) {
181 if (mon_attr->hwmon_type == EFX_HWMON_TEMP) 222 case EFX_HWMON_TEMP:
223 /* Convert temperature from degrees to milli-degrees Celsius */
182 value *= 1000; 224 value *= 1000;
225 break;
226 case EFX_HWMON_POWER:
227 /* Convert power from watts to microwatts */
228 value *= 1000000;
229 break;
230 default:
231 /* No conversion needed */
232 break;
233 }
183 234
184 return sprintf(buf, "%u\n", value); 235 return sprintf(buf, "%u\n", value);
185} 236}
@@ -243,8 +294,8 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
243 294
244int efx_mcdi_mon_probe(struct efx_nic *efx) 295int efx_mcdi_mon_probe(struct efx_nic *efx)
245{ 296{
297 unsigned int n_temp = 0, n_cool = 0, n_in = 0, n_curr = 0, n_power = 0;
246 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); 298 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
247 unsigned int n_temp = 0, n_cool = 0, n_in = 0;
248 MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN); 299 MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN);
249 MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX); 300 MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX);
250 unsigned int n_pages, n_sensors, n_attrs, page; 301 unsigned int n_pages, n_sensors, n_attrs, page;
@@ -380,6 +431,14 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
380 hwmon_prefix = "in"; 431 hwmon_prefix = "in";
381 hwmon_index = n_in++; /* 0-based */ 432 hwmon_index = n_in++; /* 0-based */
382 break; 433 break;
434 case EFX_HWMON_CURR:
435 hwmon_prefix = "curr";
436 hwmon_index = ++n_curr; /* 1-based */
437 break;
438 case EFX_HWMON_POWER:
439 hwmon_prefix = "power";
440 hwmon_index = ++n_power; /* 1-based */
441 break;
383 } 442 }
384 443
385 min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, 444 min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
@@ -399,13 +458,15 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
399 if (rc) 458 if (rc)
400 goto fail; 459 goto fail;
401 460
402 snprintf(name, sizeof(name), "%s%u_min", 461 if (hwmon_type != EFX_HWMON_POWER) {
403 hwmon_prefix, hwmon_index); 462 snprintf(name, sizeof(name), "%s%u_min",
404 rc = efx_mcdi_mon_add_attr( 463 hwmon_prefix, hwmon_index);
405 efx, name, efx_mcdi_mon_show_limit, 464 rc = efx_mcdi_mon_add_attr(
406 i, type, min1); 465 efx, name, efx_mcdi_mon_show_limit,
407 if (rc) 466 i, type, min1);
408 goto fail; 467 if (rc)
468 goto fail;
469 }
409 470
410 snprintf(name, sizeof(name), "%s%u_max", 471 snprintf(name, sizeof(name), "%s%u_max",
411 hwmon_prefix, hwmon_index); 472 hwmon_prefix, hwmon_index);
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 9e824f74e8a1..b5cf62492f8e 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -3799,6 +3799,10 @@
3799#define NVRAM_PARTITION_TYPE_DUMP 0x800 3799#define NVRAM_PARTITION_TYPE_DUMP 0x800
3800/* enum: Application license key storage partition */ 3800/* enum: Application license key storage partition */
3801#define NVRAM_PARTITION_TYPE_LICENSE 0x900 3801#define NVRAM_PARTITION_TYPE_LICENSE 0x900
3802/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
3803#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
3804/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
3805#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
3802/* enum: Start of reserved value range (firmware may use for any purpose) */ 3806/* enum: Start of reserved value range (firmware may use for any purpose) */
3803#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 3807#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
3804/* enum: End of reserved value range (firmware may use for any purpose) */ 3808/* enum: End of reserved value range (firmware may use for any purpose) */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 42d52f34ad79..8d33da6697fb 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2009-2010 Solarflare Communications Inc. 3 * Copyright 2009-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -830,6 +830,13 @@ static const struct efx_phy_operations efx_mcdi_phy_ops = {
830 .get_module_info = efx_mcdi_phy_get_module_info, 830 .get_module_info = efx_mcdi_phy_get_module_info,
831}; 831};
832 832
833u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
834{
835 struct efx_mcdi_phy_data *phy_data = efx->phy_data;
836
837 return phy_data->supported_cap;
838}
839
833static unsigned int efx_mcdi_event_link_speed[] = { 840static unsigned int efx_mcdi_event_link_speed[] = {
834 [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, 841 [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
835 [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, 842 [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
@@ -1004,3 +1011,17 @@ void efx_mcdi_port_remove(struct efx_nic *efx)
1004 efx->phy_op->remove(efx); 1011 efx->phy_op->remove(efx);
1005 efx_nic_free_buffer(efx, &efx->stats_buffer); 1012 efx_nic_free_buffer(efx, &efx->stats_buffer);
1006} 1013}
1014
1015/* Get physical port number (EF10 only; on Siena it is same as PF number) */
1016int efx_mcdi_port_get_number(struct efx_nic *efx)
1017{
1018 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
1019 int rc;
1020
1021 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0,
1022 outbuf, sizeof(outbuf), NULL);
1023 if (rc)
1024 return rc;
1025
1026 return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT);
1027}
diff --git a/drivers/net/ethernet/sfc/mdio_10g.c b/drivers/net/ethernet/sfc/mdio_10g.c
index 9acfd6696ffb..8ff954c59efa 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.c
+++ b/drivers/net/ethernet/sfc/mdio_10g.c
@@ -1,5 +1,5 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2006-2011 Solarflare Communications Inc. 3 * Copyright 2006-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/mdio_10g.h b/drivers/net/ethernet/sfc/mdio_10g.h
index a97dbbd2de99..16824fecc5ee 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.h
+++ b/drivers/net/ethernet/sfc/mdio_10g.h
@@ -1,5 +1,5 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2006-2011 Solarflare Communications Inc. 3 * Copyright 2006-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index 8be9a69a61e1..a77a8bd2dd70 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index e85bed09b5e9..b172ed133055 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2011 Solarflare Communications Inc. 4 * Copyright 2005-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -39,7 +39,7 @@
39 * 39 *
40 **************************************************************************/ 40 **************************************************************************/
41 41
42#define EFX_DRIVER_VERSION "3.2" 42#define EFX_DRIVER_VERSION "4.0"
43 43
44#ifdef DEBUG 44#ifdef DEBUG
45#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) 45#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -135,6 +135,7 @@ struct efx_special_buffer {
135 * freed when descriptor completes 135 * freed when descriptor completes
136 * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be 136 * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
137 * freed when descriptor completes. 137 * freed when descriptor completes.
138 * @option: When @flags & %EFX_TX_BUF_OPTION, a NIC-specific option descriptor.
138 * @dma_addr: DMA address of the fragment. 139 * @dma_addr: DMA address of the fragment.
139 * @flags: Flags for allocation and DMA mapping type 140 * @flags: Flags for allocation and DMA mapping type
140 * @len: Length of this fragment. 141 * @len: Length of this fragment.
@@ -146,7 +147,10 @@ struct efx_tx_buffer {
146 const struct sk_buff *skb; 147 const struct sk_buff *skb;
147 void *heap_buf; 148 void *heap_buf;
148 }; 149 };
149 dma_addr_t dma_addr; 150 union {
151 efx_qword_t option;
152 dma_addr_t dma_addr;
153 };
150 unsigned short flags; 154 unsigned short flags;
151 unsigned short len; 155 unsigned short len;
152 unsigned short unmap_len; 156 unsigned short unmap_len;
@@ -155,6 +159,7 @@ struct efx_tx_buffer {
155#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */ 159#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
156#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */ 160#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */
157#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */ 161#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
162#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
158 163
159/** 164/**
160 * struct efx_tx_queue - An Efx TX queue 165 * struct efx_tx_queue - An Efx TX queue
@@ -297,7 +302,8 @@ struct efx_rx_page_state {
297 * @added_count: Number of buffers added to the receive queue. 302 * @added_count: Number of buffers added to the receive queue.
298 * @notified_count: Number of buffers given to NIC (<= @added_count). 303 * @notified_count: Number of buffers given to NIC (<= @added_count).
299 * @removed_count: Number of buffers removed from the receive queue. 304 * @removed_count: Number of buffers removed from the receive queue.
300 * @scatter_n: Number of buffers used by current packet 305 * @scatter_n: Used by NIC specific receive code.
306 * @scatter_len: Used by NIC specific receive code.
301 * @page_ring: The ring to store DMA mapped pages for reuse. 307 * @page_ring: The ring to store DMA mapped pages for reuse.
302 * @page_add: Counter to calculate the write pointer for the recycle ring. 308 * @page_add: Counter to calculate the write pointer for the recycle ring.
303 * @page_remove: Counter to calculate the read pointer for the recycle ring. 309 * @page_remove: Counter to calculate the read pointer for the recycle ring.
@@ -329,6 +335,7 @@ struct efx_rx_queue {
329 unsigned int notified_count; 335 unsigned int notified_count;
330 unsigned int removed_count; 336 unsigned int removed_count;
331 unsigned int scatter_n; 337 unsigned int scatter_n;
338 unsigned int scatter_len;
332 struct page **page_ring; 339 struct page **page_ring;
333 unsigned int page_add; 340 unsigned int page_add;
334 unsigned int page_remove; 341 unsigned int page_remove;
@@ -382,6 +389,8 @@ enum efx_rx_alloc_method {
382 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun 389 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
383 * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to 390 * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
384 * lack of descriptors 391 * lack of descriptors
392 * @n_rx_merge_events: Number of RX merged completion events
393 * @n_rx_merge_packets: Number of RX packets completed by merged events
385 * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by 394 * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
386 * __efx_rx_packet(), or zero if there is none 395 * __efx_rx_packet(), or zero if there is none
387 * @rx_pkt_index: Ring index of first buffer for next packet to be delivered 396 * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
@@ -418,6 +427,8 @@ struct efx_channel {
418 unsigned n_rx_overlength; 427 unsigned n_rx_overlength;
419 unsigned n_skbuff_leaks; 428 unsigned n_skbuff_leaks;
420 unsigned int n_rx_nodesc_trunc; 429 unsigned int n_rx_nodesc_trunc;
430 unsigned int n_rx_merge_events;
431 unsigned int n_rx_merge_packets;
421 432
422 unsigned int rx_pkt_n_frags; 433 unsigned int rx_pkt_n_frags;
423 unsigned int rx_pkt_index; 434 unsigned int rx_pkt_index;
@@ -721,7 +732,7 @@ struct vfdi_status;
721 * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, 732 * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
722 * indexed by filter ID 733 * indexed by filter ID
723 * @rps_expire_index: Next index to check for expiry in @rps_flow_id 734 * @rps_expire_index: Next index to check for expiry in @rps_flow_id
724 * @drain_pending: Count of RX and TX queues that haven't been flushed and drained. 735 * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
725 * @rxq_flush_pending: Count of number of receive queues that need to be flushed. 736 * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
726 * Decremented when the efx_flush_rx_queue() is called. 737 * Decremented when the efx_flush_rx_queue() is called.
727 * @rxq_flush_outstanding: Count of number of RX flushes started but not yet 738 * @rxq_flush_outstanding: Count of number of RX flushes started but not yet
@@ -862,7 +873,7 @@ struct efx_nic {
862 unsigned int rps_expire_index; 873 unsigned int rps_expire_index;
863#endif 874#endif
864 875
865 atomic_t drain_pending; 876 atomic_t active_queues;
866 atomic_t rxq_flush_pending; 877 atomic_t rxq_flush_pending;
867 atomic_t rxq_flush_outstanding; 878 atomic_t rxq_flush_outstanding;
868 wait_queue_head_t flush_wq; 879 wait_queue_head_t flush_wq;
@@ -1023,7 +1034,8 @@ struct efx_mtd_partition {
1023 * @rx_prefix_size: Size of RX prefix before packet data 1034 * @rx_prefix_size: Size of RX prefix before packet data
1024 * @rx_hash_offset: Offset of RX flow hash within prefix 1035 * @rx_hash_offset: Offset of RX flow hash within prefix
1025 * @rx_buffer_padding: Size of padding at end of RX packet 1036 * @rx_buffer_padding: Size of padding at end of RX packet
1026 * @can_rx_scatter: NIC is able to scatter packet to multiple buffers 1037 * @can_rx_scatter: NIC is able to scatter packets to multiple buffers
1038 * @always_rx_scatter: NIC will always scatter packets to multiple buffers
1027 * @max_interrupt_mode: Highest capability interrupt mode supported 1039 * @max_interrupt_mode: Highest capability interrupt mode supported
1028 * from &enum efx_init_mode. 1040 * from &enum efx_init_mode.
1029 * @timer_period_max: Maximum period of interrupt timer (in ticks) 1041 * @timer_period_max: Maximum period of interrupt timer (in ticks)
@@ -1036,7 +1048,7 @@ struct efx_nic_type {
1036 int (*probe)(struct efx_nic *efx); 1048 int (*probe)(struct efx_nic *efx);
1037 void (*remove)(struct efx_nic *efx); 1049 void (*remove)(struct efx_nic *efx);
1038 int (*init)(struct efx_nic *efx); 1050 int (*init)(struct efx_nic *efx);
1039 void (*dimension_resources)(struct efx_nic *efx); 1051 int (*dimension_resources)(struct efx_nic *efx);
1040 void (*fini)(struct efx_nic *efx); 1052 void (*fini)(struct efx_nic *efx);
1041 void (*monitor)(struct efx_nic *efx); 1053 void (*monitor)(struct efx_nic *efx);
1042 enum reset_type (*map_reset_reason)(enum reset_type reason); 1054 enum reset_type (*map_reset_reason)(enum reset_type reason);
@@ -1087,7 +1099,7 @@ struct efx_nic_type {
1087 void (*rx_write)(struct efx_rx_queue *rx_queue); 1099 void (*rx_write)(struct efx_rx_queue *rx_queue);
1088 void (*rx_defer_refill)(struct efx_rx_queue *rx_queue); 1100 void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
1089 int (*ev_probe)(struct efx_channel *channel); 1101 int (*ev_probe)(struct efx_channel *channel);
1090 void (*ev_init)(struct efx_channel *channel); 1102 int (*ev_init)(struct efx_channel *channel);
1091 void (*ev_fini)(struct efx_channel *channel); 1103 void (*ev_fini)(struct efx_channel *channel);
1092 void (*ev_remove)(struct efx_channel *channel); 1104 void (*ev_remove)(struct efx_channel *channel);
1093 int (*ev_process)(struct efx_channel *channel, int quota); 1105 int (*ev_process)(struct efx_channel *channel, int quota);
@@ -1142,6 +1154,7 @@ struct efx_nic_type {
1142 unsigned int rx_hash_offset; 1154 unsigned int rx_hash_offset;
1143 unsigned int rx_buffer_padding; 1155 unsigned int rx_buffer_padding;
1144 bool can_rx_scatter; 1156 bool can_rx_scatter;
1157 bool always_rx_scatter;
1145 unsigned int max_interrupt_mode; 1158 unsigned int max_interrupt_mode;
1146 unsigned int timer_period_max; 1159 unsigned int timer_period_max;
1147 netdev_features_t offload_features; 1160 netdev_features_t offload_features;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 78d413312052..e7dbd2dd202e 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2011 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 9afbf3616b4b..4b1e188f7a2f 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2011 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -17,15 +17,12 @@
17#include "efx.h" 17#include "efx.h"
18#include "mcdi.h" 18#include "mcdi.h"
19 19
20/*
21 * Falcon hardware control
22 */
23
24enum { 20enum {
25 EFX_REV_FALCON_A0 = 0, 21 EFX_REV_FALCON_A0 = 0,
26 EFX_REV_FALCON_A1 = 1, 22 EFX_REV_FALCON_A1 = 1,
27 EFX_REV_FALCON_B0 = 2, 23 EFX_REV_FALCON_B0 = 2,
28 EFX_REV_SIENA_A0 = 3, 24 EFX_REV_SIENA_A0 = 3,
25 EFX_REV_HUNT_A0 = 4,
29}; 26};
30 27
31static inline int efx_nic_rev(struct efx_nic *efx) 28static inline int efx_nic_rev(struct efx_nic *efx)
@@ -347,6 +344,78 @@ struct siena_nic_data {
347 u64 stats[SIENA_STAT_COUNT]; 344 u64 stats[SIENA_STAT_COUNT];
348}; 345};
349 346
347enum {
348 EF10_STAT_tx_bytes,
349 EF10_STAT_tx_packets,
350 EF10_STAT_tx_pause,
351 EF10_STAT_tx_control,
352 EF10_STAT_tx_unicast,
353 EF10_STAT_tx_multicast,
354 EF10_STAT_tx_broadcast,
355 EF10_STAT_tx_lt64,
356 EF10_STAT_tx_64,
357 EF10_STAT_tx_65_to_127,
358 EF10_STAT_tx_128_to_255,
359 EF10_STAT_tx_256_to_511,
360 EF10_STAT_tx_512_to_1023,
361 EF10_STAT_tx_1024_to_15xx,
362 EF10_STAT_tx_15xx_to_jumbo,
363 EF10_STAT_rx_bytes,
364 EF10_STAT_rx_bytes_minus_good_bytes,
365 EF10_STAT_rx_good_bytes,
366 EF10_STAT_rx_bad_bytes,
367 EF10_STAT_rx_packets,
368 EF10_STAT_rx_good,
369 EF10_STAT_rx_bad,
370 EF10_STAT_rx_pause,
371 EF10_STAT_rx_control,
372 EF10_STAT_rx_unicast,
373 EF10_STAT_rx_multicast,
374 EF10_STAT_rx_broadcast,
375 EF10_STAT_rx_lt64,
376 EF10_STAT_rx_64,
377 EF10_STAT_rx_65_to_127,
378 EF10_STAT_rx_128_to_255,
379 EF10_STAT_rx_256_to_511,
380 EF10_STAT_rx_512_to_1023,
381 EF10_STAT_rx_1024_to_15xx,
382 EF10_STAT_rx_15xx_to_jumbo,
383 EF10_STAT_rx_gtjumbo,
384 EF10_STAT_rx_bad_gtjumbo,
385 EF10_STAT_rx_overflow,
386 EF10_STAT_rx_align_error,
387 EF10_STAT_rx_length_error,
388 EF10_STAT_rx_nodesc_drops,
389 EF10_STAT_COUNT
390};
391
392/**
393 * struct efx_ef10_nic_data - EF10 architecture NIC state
394 * @mcdi_buf: DMA buffer for MCDI
395 * @warm_boot_count: Last seen MC warm boot count
396 * @vi_base: Absolute index of first VI in this function
397 * @n_allocated_vis: Number of VIs allocated to this function
398 * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
399 * @must_restore_filters: Flag: filters have yet to be restored after MC reboot
400 * @rx_rss_context: Firmware handle for our RSS context
401 * @stats: Hardware statistics
402 * @workaround_35388: Flag: firmware supports workaround for bug 35388
403 * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
404 * %MC_CMD_GET_CAPABILITIES response)
405 */
406struct efx_ef10_nic_data {
407 struct efx_buffer mcdi_buf;
408 u16 warm_boot_count;
409 unsigned int vi_base;
410 unsigned int n_allocated_vis;
411 bool must_realloc_vis;
412 bool must_restore_filters;
413 u32 rx_rss_context;
414 u64 stats[EF10_STAT_COUNT];
415 bool workaround_35388;
416 u32 datapath_caps;
417};
418
350/* 419/*
351 * On the SFC9000 family each port is associated with 1 PCI physical 420 * On the SFC9000 family each port is associated with 1 PCI physical
352 * function (PF) handled by sfc and a configurable number of virtual 421 * function (PF) handled by sfc and a configurable number of virtual
@@ -448,6 +517,7 @@ extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
448extern const struct efx_nic_type falcon_a1_nic_type; 517extern const struct efx_nic_type falcon_a1_nic_type;
449extern const struct efx_nic_type falcon_b0_nic_type; 518extern const struct efx_nic_type falcon_b0_nic_type;
450extern const struct efx_nic_type siena_a0_nic_type; 519extern const struct efx_nic_type siena_a0_nic_type;
520extern const struct efx_nic_type efx_hunt_a0_nic_type;
451 521
452/************************************************************************** 522/**************************************************************************
453 * 523 *
@@ -503,9 +573,9 @@ static inline int efx_nic_probe_eventq(struct efx_channel *channel)
503{ 573{
504 return channel->efx->type->ev_probe(channel); 574 return channel->efx->type->ev_probe(channel);
505} 575}
506static inline void efx_nic_init_eventq(struct efx_channel *channel) 576static inline int efx_nic_init_eventq(struct efx_channel *channel)
507{ 577{
508 channel->efx->type->ev_init(channel); 578 return channel->efx->type->ev_init(channel);
509} 579}
510static inline void efx_nic_fini_eventq(struct efx_channel *channel) 580static inline void efx_nic_fini_eventq(struct efx_channel *channel)
511{ 581{
@@ -539,7 +609,7 @@ extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
539extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue); 609extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
540extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue); 610extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
541extern int efx_farch_ev_probe(struct efx_channel *channel); 611extern int efx_farch_ev_probe(struct efx_channel *channel);
542extern void efx_farch_ev_init(struct efx_channel *channel); 612extern int efx_farch_ev_init(struct efx_channel *channel);
543extern void efx_farch_ev_fini(struct efx_channel *channel); 613extern void efx_farch_ev_fini(struct efx_channel *channel);
544extern void efx_farch_ev_remove(struct efx_channel *channel); 614extern void efx_farch_ev_remove(struct efx_channel *channel);
545extern int efx_farch_ev_process(struct efx_channel *channel, int quota); 615extern int efx_farch_ev_process(struct efx_channel *channel, int quota);
@@ -627,6 +697,7 @@ extern void falcon_stop_nic_stats(struct efx_nic *efx);
627extern int falcon_reset_xaui(struct efx_nic *efx); 697extern int falcon_reset_xaui(struct efx_nic *efx);
628extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); 698extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
629extern void efx_farch_init_common(struct efx_nic *efx); 699extern void efx_farch_init_common(struct efx_nic *efx);
700extern void efx_ef10_handle_drain_event(struct efx_nic *efx);
630static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx) 701static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
631{ 702{
632 efx->type->rx_push_indir_table(efx); 703 efx->type->rx_push_indir_table(efx);
diff --git a/drivers/net/ethernet/sfc/phy.h b/drivers/net/ethernet/sfc/phy.h
index 4f6eb8177a6d..45eeb7075156 100644
--- a/drivers/net/ethernet/sfc/phy.h
+++ b/drivers/net/ethernet/sfc/phy.h
@@ -1,5 +1,5 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2007-2010 Solarflare Communications Inc. 3 * Copyright 2007-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index c60cabb6ff05..03acf57df045 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2011 Solarflare Communications Inc. 3 * Copyright 2011-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/qt202x_phy.c b/drivers/net/ethernet/sfc/qt202x_phy.c
index 326a28637f3c..efa3612affca 100644
--- a/drivers/net/ethernet/sfc/qt202x_phy.c
+++ b/drivers/net/ethernet/sfc/qt202x_phy.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2006-2010 Solarflare Communications Inc. 3 * Copyright 2006-2012 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 864b6ffc9cb2..4a596725023f 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2011 Solarflare Communications Inc. 4 * Copyright 2005-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -529,8 +529,8 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
529 if (!(flags & EFX_RX_PKT_PREFIX_LEN)) 529 if (!(flags & EFX_RX_PKT_PREFIX_LEN))
530 efx_rx_packet__check_len(rx_queue, rx_buf, len); 530 efx_rx_packet__check_len(rx_queue, rx_buf, len);
531 } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) || 531 } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
532 unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) || 532 unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
533 unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) || 533 unlikely(len > n_frags * efx->rx_dma_len) ||
534 unlikely(!efx->rx_scatter)) { 534 unlikely(!efx->rx_scatter)) {
535 /* If this isn't an explicit discard request, either 535 /* If this isn't an explicit discard request, either
536 * the hardware or the driver is broken. 536 * the hardware or the driver is broken.
@@ -581,9 +581,9 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
581 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); 581 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
582 if (--tail_frags == 0) 582 if (--tail_frags == 0)
583 break; 583 break;
584 efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE); 584 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
585 } 585 }
586 rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE; 586 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
587 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); 587 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
588 } 588 }
589 589
@@ -903,3 +903,37 @@ bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
903} 903}
904 904
905#endif /* CONFIG_RFS_ACCEL */ 905#endif /* CONFIG_RFS_ACCEL */
906
907/**
908 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
909 * @spec: Specification to test
910 *
911 * Return: %true if the specification is a non-drop RX filter that
912 * matches a local MAC address I/G bit value of 1 or matches a local
913 * IPv4 or IPv6 address value in the respective multicast address
914 * range. Otherwise %false.
915 */
916bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
917{
918 if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
919 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
920 return false;
921
922 if (spec->match_flags &
923 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
924 is_multicast_ether_addr(spec->loc_mac))
925 return true;
926
927 if ((spec->match_flags &
928 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
929 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
930 if (spec->ether_type == htons(ETH_P_IP) &&
931 ipv4_is_multicast(spec->loc_host[0]))
932 return true;
933 if (spec->ether_type == htons(ETH_P_IPV6) &&
934 ((const u8 *)spec->loc_host)[0] == 0xff)
935 return true;
936 }
937
938 return false;
939}
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 716cff9d0160..144bbff5a4ae 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2012 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index aed24b736059..87698ae0bf75 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2012 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 89180d475367..d034bcd124ef 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -177,13 +177,14 @@ static int siena_probe_nvconfig(struct efx_nic *efx)
177 return rc; 177 return rc;
178} 178}
179 179
180static void siena_dimension_resources(struct efx_nic *efx) 180static int siena_dimension_resources(struct efx_nic *efx)
181{ 181{
182 /* Each port has a small block of internal SRAM dedicated to 182 /* Each port has a small block of internal SRAM dedicated to
183 * the buffer table and descriptor caches. In theory we can 183 * the buffer table and descriptor caches. In theory we can
184 * map both blocks to one port, but we don't. 184 * map both blocks to one port, but we don't.
185 */ 185 */
186 efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2); 186 efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
187 return 0;
187} 188}
188 189
189static unsigned int siena_mem_map_size(struct efx_nic *efx) 190static unsigned int siena_mem_map_size(struct efx_nic *efx)
@@ -195,7 +196,6 @@ static unsigned int siena_mem_map_size(struct efx_nic *efx)
195static int siena_probe_nic(struct efx_nic *efx) 196static int siena_probe_nic(struct efx_nic *efx)
196{ 197{
197 struct siena_nic_data *nic_data; 198 struct siena_nic_data *nic_data;
198 bool already_attached = false;
199 efx_oword_t reg; 199 efx_oword_t reg;
200 int rc; 200 int rc;
201 201
@@ -221,19 +221,6 @@ static int siena_probe_nic(struct efx_nic *efx)
221 if (rc) 221 if (rc)
222 goto fail1; 222 goto fail1;
223 223
224 /* Let the BMC know that the driver is now in charge of link and
225 * filter settings. We must do this before we reset the NIC */
226 rc = efx_mcdi_drv_attach(efx, true, &already_attached);
227 if (rc) {
228 netif_err(efx, probe, efx->net_dev,
229 "Unable to register driver with MCPU\n");
230 goto fail2;
231 }
232 if (already_attached)
233 /* Not a fatal error */
234 netif_err(efx, probe, efx->net_dev,
235 "Host already registered with MCPU\n");
236
237 /* Now we can reset the NIC */ 224 /* Now we can reset the NIC */
238 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); 225 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
239 if (rc) { 226 if (rc) {
@@ -280,8 +267,6 @@ fail5:
280 efx_nic_free_buffer(efx, &efx->irq_status); 267 efx_nic_free_buffer(efx, &efx->irq_status);
281fail4: 268fail4:
282fail3: 269fail3:
283 efx_mcdi_drv_attach(efx, false, NULL);
284fail2:
285 efx_mcdi_fini(efx); 270 efx_mcdi_fini(efx);
286fail1: 271fail1:
287 kfree(efx->nic_data); 272 kfree(efx->nic_data);
@@ -370,14 +355,11 @@ static void siena_remove_nic(struct efx_nic *efx)
370 355
371 efx_mcdi_reset(efx, RESET_TYPE_ALL); 356 efx_mcdi_reset(efx, RESET_TYPE_ALL);
372 357
373 /* Relinquish the device back to the BMC */ 358 efx_mcdi_fini(efx);
374 efx_mcdi_drv_attach(efx, false, NULL);
375 359
376 /* Tear down the private nic state */ 360 /* Tear down the private nic state */
377 kfree(efx->nic_data); 361 kfree(efx->nic_data);
378 efx->nic_data = NULL; 362 efx->nic_data = NULL;
379
380 efx_mcdi_fini(efx);
381} 363}
382 364
383#define SIENA_DMA_STAT(ext_name, mcdi_name) \ 365#define SIENA_DMA_STAT(ext_name, mcdi_name) \
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 4b8eef962faa..0c38f926871e 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2010-2011 Solarflare Communications Inc. 3 * Copyright 2010-2012 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/tenxpress.c b/drivers/net/ethernet/sfc/tenxpress.c
index d37cb5017129..2c90e6b31575 100644
--- a/drivers/net/ethernet/sfc/tenxpress.c
+++ b/drivers/net/ethernet/sfc/tenxpress.c
@@ -1,5 +1,5 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2007-2011 Solarflare Communications Inc. 3 * Copyright 2007-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 85ee647b28ad..2ac91c5b5eea 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2010 Solarflare Communications Inc. 4 * Copyright 2005-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -306,7 +306,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
306 306
307 while (read_ptr != stop_index) { 307 while (read_ptr != stop_index) {
308 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 308 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
309 if (unlikely(buffer->len == 0)) { 309
310 if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
311 unlikely(buffer->len == 0)) {
310 netif_err(efx, tx_err, efx->net_dev, 312 netif_err(efx, tx_err, efx->net_dev,
311 "TX queue %d spurious TX completion id %x\n", 313 "TX queue %d spurious TX completion id %x\n",
312 tx_queue->queue, read_ptr); 314 tx_queue->queue, read_ptr);
diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c
index 29bb3f9941c0..3d5ee3259885 100644
--- a/drivers/net/ethernet/sfc/txc43128_phy.c
+++ b/drivers/net/ethernet/sfc/txc43128_phy.c
@@ -1,5 +1,5 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2006-2011 Solarflare Communications Inc. 3 * Copyright 2006-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/vfdi.h b/drivers/net/ethernet/sfc/vfdi.h
index 225557caaf5a..ae044f44936a 100644
--- a/drivers/net/ethernet/sfc/vfdi.h
+++ b/drivers/net/ethernet/sfc/vfdi.h
@@ -1,5 +1,5 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2010-2012 Solarflare Communications Inc. 3 * Copyright 2010-2012 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h
index 7e5be1d873a7..2310b75d4ec2 100644
--- a/drivers/net/ethernet/sfc/workarounds.h
+++ b/drivers/net/ethernet/sfc/workarounds.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2006-2010 Solarflare Communications Inc. 3 * Copyright 2006-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -44,4 +44,10 @@
44/* Leak overlength packets rather than free */ 44/* Leak overlength packets rather than free */
45#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A 45#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
46 46
47/* Lockup when writing event block registers at gen2/gen3 */
48#define EFX_EF10_WORKAROUND_35388(efx) \
49 (((struct efx_ef10_nic_data *)efx->nic_data)->workaround_35388)
50#define EFX_WORKAROUND_35388(efx) \
51 (efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx))
52
47#endif /* EFX_WORKAROUNDS_H */ 53#endif /* EFX_WORKAROUNDS_H */