diff options
-rw-r--r-- | drivers/net/ethernet/sfc/Makefile | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/efx.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/falcon.c | 58 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/farch.c | 1781 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/net_driver.h | 50 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/nic.c | 1811 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/nic.h | 200 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/siena.c | 34 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/siena_sriov.c | 5 |
9 files changed, 2088 insertions, 1858 deletions
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile index 5b31d8a4ae5e..ef7410f014d6 100644 --- a/drivers/net/ethernet/sfc/Makefile +++ b/drivers/net/ethernet/sfc/Makefile | |||
@@ -1,4 +1,5 @@ | |||
1 | sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \ | 1 | sfc-y += efx.o nic.o farch.o falcon.o siena.o tx.o rx.o \ |
2 | filter.o \ | ||
2 | selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ | 3 | selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ |
3 | tenxpress.o txc43128_phy.o falcon_boards.o \ | 4 | tenxpress.o txc43128_phy.o falcon_boards.o \ |
4 | mcdi.o mcdi_port.o mcdi_mon.o ptp.o | 5 | mcdi.o mcdi_port.o mcdi_mon.o ptp.o |
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 9c6555c12acf..872b9f5b38a3 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
@@ -1386,7 +1386,7 @@ static void efx_enable_interrupts(struct efx_nic *efx) | |||
1386 | efx->eeh_disabled_legacy_irq = false; | 1386 | efx->eeh_disabled_legacy_irq = false; |
1387 | } | 1387 | } |
1388 | 1388 | ||
1389 | efx_nic_enable_interrupts(efx); | 1389 | efx->type->irq_enable_master(efx); |
1390 | 1390 | ||
1391 | efx_for_each_channel(channel, efx) { | 1391 | efx_for_each_channel(channel, efx) { |
1392 | if (channel->type->keep_eventq) | 1392 | if (channel->type->keep_eventq) |
@@ -1407,7 +1407,7 @@ static void efx_disable_interrupts(struct efx_nic *efx) | |||
1407 | efx_fini_eventq(channel); | 1407 | efx_fini_eventq(channel); |
1408 | } | 1408 | } |
1409 | 1409 | ||
1410 | efx_nic_disable_interrupts(efx); | 1410 | efx->type->irq_disable_non_ev(efx); |
1411 | } | 1411 | } |
1412 | 1412 | ||
1413 | static void efx_remove_interrupts(struct efx_nic *efx) | 1413 | static void efx_remove_interrupts(struct efx_nic *efx) |
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index c8efcb0efded..fe83c26c4b8a 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c | |||
@@ -346,7 +346,7 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx) | |||
346 | } | 346 | } |
347 | 347 | ||
348 | 348 | ||
349 | irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | 349 | static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) |
350 | { | 350 | { |
351 | struct efx_nic *efx = dev_id; | 351 | struct efx_nic *efx = dev_id; |
352 | efx_oword_t *int_ker = efx->irq_status.addr; | 352 | efx_oword_t *int_ker = efx->irq_status.addr; |
@@ -373,7 +373,7 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | |||
373 | /* Check to see if we have a serious error condition */ | 373 | /* Check to see if we have a serious error condition */ |
374 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | 374 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); |
375 | if (unlikely(syserr)) | 375 | if (unlikely(syserr)) |
376 | return efx_nic_fatal_interrupt(efx); | 376 | return efx_farch_fatal_interrupt(efx); |
377 | 377 | ||
378 | /* Determine interrupting queues, clear interrupt status | 378 | /* Determine interrupting queues, clear interrupt status |
379 | * register and acknowledge the device interrupt. | 379 | * register and acknowledge the device interrupt. |
@@ -1558,7 +1558,7 @@ static int falcon_test_nvram(struct efx_nic *efx) | |||
1558 | return falcon_read_nvram(efx, NULL); | 1558 | return falcon_read_nvram(efx, NULL); |
1559 | } | 1559 | } |
1560 | 1560 | ||
1561 | static const struct efx_nic_register_test falcon_b0_register_tests[] = { | 1561 | static const struct efx_farch_register_test falcon_b0_register_tests[] = { |
1562 | { FR_AZ_ADR_REGION, | 1562 | { FR_AZ_ADR_REGION, |
1563 | EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, | 1563 | EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, |
1564 | { FR_AZ_RX_CFG, | 1564 | { FR_AZ_RX_CFG, |
@@ -1618,8 +1618,8 @@ falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) | |||
1618 | efx_reset_down(efx, reset_method); | 1618 | efx_reset_down(efx, reset_method); |
1619 | 1619 | ||
1620 | tests->registers = | 1620 | tests->registers = |
1621 | efx_nic_test_registers(efx, falcon_b0_register_tests, | 1621 | efx_farch_test_registers(efx, falcon_b0_register_tests, |
1622 | ARRAY_SIZE(falcon_b0_register_tests)) | 1622 | ARRAY_SIZE(falcon_b0_register_tests)) |
1623 | ? -1 : 1; | 1623 | ? -1 : 1; |
1624 | 1624 | ||
1625 | rc = falcon_reset_hw(efx, reset_method); | 1625 | rc = falcon_reset_hw(efx, reset_method); |
@@ -1984,7 +1984,7 @@ static int falcon_probe_nic(struct efx_nic *efx) | |||
1984 | 1984 | ||
1985 | rc = -ENODEV; | 1985 | rc = -ENODEV; |
1986 | 1986 | ||
1987 | if (efx_nic_fpga_ver(efx) != 0) { | 1987 | if (efx_farch_fpga_ver(efx) != 0) { |
1988 | netif_err(efx, probe, efx->net_dev, | 1988 | netif_err(efx, probe, efx->net_dev, |
1989 | "Falcon FPGA not supported\n"); | 1989 | "Falcon FPGA not supported\n"); |
1990 | goto fail1; | 1990 | goto fail1; |
@@ -2218,7 +2218,7 @@ static int falcon_init_nic(struct efx_nic *efx) | |||
2218 | efx_writeo(efx, &temp, FR_BZ_DP_CTRL); | 2218 | efx_writeo(efx, &temp, FR_BZ_DP_CTRL); |
2219 | } | 2219 | } |
2220 | 2220 | ||
2221 | efx_nic_init_common(efx); | 2221 | efx_farch_init_common(efx); |
2222 | 2222 | ||
2223 | return 0; | 2223 | return 0; |
2224 | } | 2224 | } |
@@ -2367,6 +2367,28 @@ const struct efx_nic_type falcon_a1_nic_type = { | |||
2367 | .set_wol = falcon_set_wol, | 2367 | .set_wol = falcon_set_wol, |
2368 | .resume_wol = efx_port_dummy_op_void, | 2368 | .resume_wol = efx_port_dummy_op_void, |
2369 | .test_nvram = falcon_test_nvram, | 2369 | .test_nvram = falcon_test_nvram, |
2370 | .irq_enable_master = efx_farch_irq_enable_master, | ||
2371 | .irq_test_generate = efx_farch_irq_test_generate, | ||
2372 | .irq_disable_non_ev = efx_farch_irq_disable_master, | ||
2373 | .irq_handle_msi = efx_farch_msi_interrupt, | ||
2374 | .irq_handle_legacy = falcon_legacy_interrupt_a1, | ||
2375 | .tx_probe = efx_farch_tx_probe, | ||
2376 | .tx_init = efx_farch_tx_init, | ||
2377 | .tx_remove = efx_farch_tx_remove, | ||
2378 | .tx_write = efx_farch_tx_write, | ||
2379 | .rx_push_indir_table = efx_farch_rx_push_indir_table, | ||
2380 | .rx_probe = efx_farch_rx_probe, | ||
2381 | .rx_init = efx_farch_rx_init, | ||
2382 | .rx_remove = efx_farch_rx_remove, | ||
2383 | .rx_write = efx_farch_rx_write, | ||
2384 | .rx_defer_refill = efx_farch_rx_defer_refill, | ||
2385 | .ev_probe = efx_farch_ev_probe, | ||
2386 | .ev_init = efx_farch_ev_init, | ||
2387 | .ev_fini = efx_farch_ev_fini, | ||
2388 | .ev_remove = efx_farch_ev_remove, | ||
2389 | .ev_process = efx_farch_ev_process, | ||
2390 | .ev_read_ack = efx_farch_ev_read_ack, | ||
2391 | .ev_test_generate = efx_farch_ev_test_generate, | ||
2370 | 2392 | ||
2371 | .revision = EFX_REV_FALCON_A1, | 2393 | .revision = EFX_REV_FALCON_A1, |
2372 | .mem_map_size = 0x20000, | 2394 | .mem_map_size = 0x20000, |
@@ -2414,6 +2436,28 @@ const struct efx_nic_type falcon_b0_nic_type = { | |||
2414 | .resume_wol = efx_port_dummy_op_void, | 2436 | .resume_wol = efx_port_dummy_op_void, |
2415 | .test_chip = falcon_b0_test_chip, | 2437 | .test_chip = falcon_b0_test_chip, |
2416 | .test_nvram = falcon_test_nvram, | 2438 | .test_nvram = falcon_test_nvram, |
2439 | .irq_enable_master = efx_farch_irq_enable_master, | ||
2440 | .irq_test_generate = efx_farch_irq_test_generate, | ||
2441 | .irq_disable_non_ev = efx_farch_irq_disable_master, | ||
2442 | .irq_handle_msi = efx_farch_msi_interrupt, | ||
2443 | .irq_handle_legacy = efx_farch_legacy_interrupt, | ||
2444 | .tx_probe = efx_farch_tx_probe, | ||
2445 | .tx_init = efx_farch_tx_init, | ||
2446 | .tx_remove = efx_farch_tx_remove, | ||
2447 | .tx_write = efx_farch_tx_write, | ||
2448 | .rx_push_indir_table = efx_farch_rx_push_indir_table, | ||
2449 | .rx_probe = efx_farch_rx_probe, | ||
2450 | .rx_init = efx_farch_rx_init, | ||
2451 | .rx_remove = efx_farch_rx_remove, | ||
2452 | .rx_write = efx_farch_rx_write, | ||
2453 | .rx_defer_refill = efx_farch_rx_defer_refill, | ||
2454 | .ev_probe = efx_farch_ev_probe, | ||
2455 | .ev_init = efx_farch_ev_init, | ||
2456 | .ev_fini = efx_farch_ev_fini, | ||
2457 | .ev_remove = efx_farch_ev_remove, | ||
2458 | .ev_process = efx_farch_ev_process, | ||
2459 | .ev_read_ack = efx_farch_ev_read_ack, | ||
2460 | .ev_test_generate = efx_farch_ev_test_generate, | ||
2417 | 2461 | ||
2418 | .revision = EFX_REV_FALCON_B0, | 2462 | .revision = EFX_REV_FALCON_B0, |
2419 | /* Map everything up to and including the RSS indirection | 2463 | /* Map everything up to and including the RSS indirection |
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c new file mode 100644 index 000000000000..c3d07c556569 --- /dev/null +++ b/drivers/net/ethernet/sfc/farch.c | |||
@@ -0,0 +1,1781 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2011 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #include <linux/bitops.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/pci.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/seq_file.h> | ||
17 | #include "net_driver.h" | ||
18 | #include "bitfield.h" | ||
19 | #include "efx.h" | ||
20 | #include "nic.h" | ||
21 | #include "farch_regs.h" | ||
22 | #include "io.h" | ||
23 | #include "workarounds.h" | ||
24 | |||
25 | /* Falcon-architecture (SFC4000 and SFC9000-family) support */ | ||
26 | |||
27 | /************************************************************************** | ||
28 | * | ||
29 | * Configurable values | ||
30 | * | ||
31 | ************************************************************************** | ||
32 | */ | ||
33 | |||
34 | /* This is set to 16 for a good reason. In summary, if larger than | ||
35 | * 16, the descriptor cache holds more than a default socket | ||
36 | * buffer's worth of packets (for UDP we can only have at most one | ||
37 | * socket buffer's worth outstanding). This combined with the fact | ||
38 | * that we only get 1 TX event per descriptor cache means the NIC | ||
39 | * goes idle. | ||
40 | */ | ||
41 | #define TX_DC_ENTRIES 16 | ||
42 | #define TX_DC_ENTRIES_ORDER 1 | ||
43 | |||
44 | #define RX_DC_ENTRIES 64 | ||
45 | #define RX_DC_ENTRIES_ORDER 3 | ||
46 | |||
47 | /* If EFX_MAX_INT_ERRORS internal errors occur within | ||
48 | * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and | ||
49 | * disable it. | ||
50 | */ | ||
51 | #define EFX_INT_ERROR_EXPIRE 3600 | ||
52 | #define EFX_MAX_INT_ERRORS 5 | ||
53 | |||
54 | /* Depth of RX flush request fifo */ | ||
55 | #define EFX_RX_FLUSH_COUNT 4 | ||
56 | |||
57 | /* Driver generated events */ | ||
58 | #define _EFX_CHANNEL_MAGIC_TEST 0x000101 | ||
59 | #define _EFX_CHANNEL_MAGIC_FILL 0x000102 | ||
60 | #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 | ||
61 | #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 | ||
62 | |||
63 | #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) | ||
64 | #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) | ||
65 | |||
66 | #define EFX_CHANNEL_MAGIC_TEST(_channel) \ | ||
67 | _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) | ||
68 | #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ | ||
69 | _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ | ||
70 | efx_rx_queue_index(_rx_queue)) | ||
71 | #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ | ||
72 | _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ | ||
73 | efx_rx_queue_index(_rx_queue)) | ||
74 | #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ | ||
75 | _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ | ||
76 | (_tx_queue)->queue) | ||
77 | |||
78 | static void efx_farch_magic_event(struct efx_channel *channel, u32 magic); | ||
79 | |||
80 | /************************************************************************** | ||
81 | * | ||
82 | * Hardware access | ||
83 | * | ||
84 | **************************************************************************/ | ||
85 | |||
86 | static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, | ||
87 | unsigned int index) | ||
88 | { | ||
89 | efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, | ||
90 | value, index); | ||
91 | } | ||
92 | |||
93 | static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, | ||
94 | const efx_oword_t *mask) | ||
95 | { | ||
96 | return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || | ||
97 | ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); | ||
98 | } | ||
99 | |||
100 | int efx_farch_test_registers(struct efx_nic *efx, | ||
101 | const struct efx_farch_register_test *regs, | ||
102 | size_t n_regs) | ||
103 | { | ||
104 | unsigned address = 0, i, j; | ||
105 | efx_oword_t mask, imask, original, reg, buf; | ||
106 | |||
107 | for (i = 0; i < n_regs; ++i) { | ||
108 | address = regs[i].address; | ||
109 | mask = imask = regs[i].mask; | ||
110 | EFX_INVERT_OWORD(imask); | ||
111 | |||
112 | efx_reado(efx, &original, address); | ||
113 | |||
114 | /* bit sweep on and off */ | ||
115 | for (j = 0; j < 128; j++) { | ||
116 | if (!EFX_EXTRACT_OWORD32(mask, j, j)) | ||
117 | continue; | ||
118 | |||
119 | /* Test this testable bit can be set in isolation */ | ||
120 | EFX_AND_OWORD(reg, original, mask); | ||
121 | EFX_SET_OWORD32(reg, j, j, 1); | ||
122 | |||
123 | efx_writeo(efx, ®, address); | ||
124 | efx_reado(efx, &buf, address); | ||
125 | |||
126 | if (efx_masked_compare_oword(®, &buf, &mask)) | ||
127 | goto fail; | ||
128 | |||
129 | /* Test this testable bit can be cleared in isolation */ | ||
130 | EFX_OR_OWORD(reg, original, mask); | ||
131 | EFX_SET_OWORD32(reg, j, j, 0); | ||
132 | |||
133 | efx_writeo(efx, ®, address); | ||
134 | efx_reado(efx, &buf, address); | ||
135 | |||
136 | if (efx_masked_compare_oword(®, &buf, &mask)) | ||
137 | goto fail; | ||
138 | } | ||
139 | |||
140 | efx_writeo(efx, &original, address); | ||
141 | } | ||
142 | |||
143 | return 0; | ||
144 | |||
145 | fail: | ||
146 | netif_err(efx, hw, efx->net_dev, | ||
147 | "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT | ||
148 | " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), | ||
149 | EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); | ||
150 | return -EIO; | ||
151 | } | ||
152 | |||
153 | /************************************************************************** | ||
154 | * | ||
155 | * Special buffer handling | ||
156 | * Special buffers are used for event queues and the TX and RX | ||
157 | * descriptor rings. | ||
158 | * | ||
159 | *************************************************************************/ | ||
160 | |||
161 | /* | ||
162 | * Initialise a special buffer | ||
163 | * | ||
164 | * This will define a buffer (previously allocated via | ||
165 | * efx_alloc_special_buffer()) in the buffer table, allowing | ||
166 | * it to be used for event queues, descriptor rings etc. | ||
167 | */ | ||
168 | static void | ||
169 | efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | ||
170 | { | ||
171 | efx_qword_t buf_desc; | ||
172 | unsigned int index; | ||
173 | dma_addr_t dma_addr; | ||
174 | int i; | ||
175 | |||
176 | EFX_BUG_ON_PARANOID(!buffer->buf.addr); | ||
177 | |||
178 | /* Write buffer descriptors to NIC */ | ||
179 | for (i = 0; i < buffer->entries; i++) { | ||
180 | index = buffer->index + i; | ||
181 | dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE); | ||
182 | netif_dbg(efx, probe, efx->net_dev, | ||
183 | "mapping special buffer %d at %llx\n", | ||
184 | index, (unsigned long long)dma_addr); | ||
185 | EFX_POPULATE_QWORD_3(buf_desc, | ||
186 | FRF_AZ_BUF_ADR_REGION, 0, | ||
187 | FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, | ||
188 | FRF_AZ_BUF_OWNER_ID_FBUF, 0); | ||
189 | efx_write_buf_tbl(efx, &buf_desc, index); | ||
190 | } | ||
191 | } | ||
192 | |||
193 | /* Unmaps a buffer and clears the buffer table entries */ | ||
194 | static void | ||
195 | efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | ||
196 | { | ||
197 | efx_oword_t buf_tbl_upd; | ||
198 | unsigned int start = buffer->index; | ||
199 | unsigned int end = (buffer->index + buffer->entries - 1); | ||
200 | |||
201 | if (!buffer->entries) | ||
202 | return; | ||
203 | |||
204 | netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", | ||
205 | buffer->index, buffer->index + buffer->entries - 1); | ||
206 | |||
207 | EFX_POPULATE_OWORD_4(buf_tbl_upd, | ||
208 | FRF_AZ_BUF_UPD_CMD, 0, | ||
209 | FRF_AZ_BUF_CLR_CMD, 1, | ||
210 | FRF_AZ_BUF_CLR_END_ID, end, | ||
211 | FRF_AZ_BUF_CLR_START_ID, start); | ||
212 | efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); | ||
213 | } | ||
214 | |||
215 | /* | ||
216 | * Allocate a new special buffer | ||
217 | * | ||
218 | * This allocates memory for a new buffer, clears it and allocates a | ||
219 | * new buffer ID range. It does not write into the buffer table. | ||
220 | * | ||
221 | * This call will allocate 4KB buffers, since 8KB buffers can't be | ||
222 | * used for event queues and descriptor rings. | ||
223 | */ | ||
224 | static int efx_alloc_special_buffer(struct efx_nic *efx, | ||
225 | struct efx_special_buffer *buffer, | ||
226 | unsigned int len) | ||
227 | { | ||
228 | len = ALIGN(len, EFX_BUF_SIZE); | ||
229 | |||
230 | if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) | ||
231 | return -ENOMEM; | ||
232 | buffer->entries = len / EFX_BUF_SIZE; | ||
233 | BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1)); | ||
234 | |||
235 | /* Select new buffer ID */ | ||
236 | buffer->index = efx->next_buffer_table; | ||
237 | efx->next_buffer_table += buffer->entries; | ||
238 | #ifdef CONFIG_SFC_SRIOV | ||
239 | BUG_ON(efx_sriov_enabled(efx) && | ||
240 | efx->vf_buftbl_base < efx->next_buffer_table); | ||
241 | #endif | ||
242 | |||
243 | netif_dbg(efx, probe, efx->net_dev, | ||
244 | "allocating special buffers %d-%d at %llx+%x " | ||
245 | "(virt %p phys %llx)\n", buffer->index, | ||
246 | buffer->index + buffer->entries - 1, | ||
247 | (u64)buffer->buf.dma_addr, len, | ||
248 | buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); | ||
249 | |||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | static void | ||
254 | efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | ||
255 | { | ||
256 | if (!buffer->buf.addr) | ||
257 | return; | ||
258 | |||
259 | netif_dbg(efx, hw, efx->net_dev, | ||
260 | "deallocating special buffers %d-%d at %llx+%x " | ||
261 | "(virt %p phys %llx)\n", buffer->index, | ||
262 | buffer->index + buffer->entries - 1, | ||
263 | (u64)buffer->buf.dma_addr, buffer->buf.len, | ||
264 | buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); | ||
265 | |||
266 | efx_nic_free_buffer(efx, &buffer->buf); | ||
267 | buffer->entries = 0; | ||
268 | } | ||
269 | |||
270 | /************************************************************************** | ||
271 | * | ||
272 | * TX path | ||
273 | * | ||
274 | **************************************************************************/ | ||
275 | |||
276 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ | ||
277 | static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue) | ||
278 | { | ||
279 | unsigned write_ptr; | ||
280 | efx_dword_t reg; | ||
281 | |||
282 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | ||
283 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); | ||
284 | efx_writed_page(tx_queue->efx, ®, | ||
285 | FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); | ||
286 | } | ||
287 | |||
288 | /* Write pointer and first descriptor for TX descriptor ring */ | ||
289 | static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue, | ||
290 | const efx_qword_t *txd) | ||
291 | { | ||
292 | unsigned write_ptr; | ||
293 | efx_oword_t reg; | ||
294 | |||
295 | BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); | ||
296 | BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); | ||
297 | |||
298 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | ||
299 | EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, | ||
300 | FRF_AZ_TX_DESC_WPTR, write_ptr); | ||
301 | reg.qword[0] = *txd; | ||
302 | efx_writeo_page(tx_queue->efx, ®, | ||
303 | FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); | ||
304 | } | ||
305 | |||
306 | |||
307 | /* For each entry inserted into the software descriptor ring, create a | ||
308 | * descriptor in the hardware TX descriptor ring (in host memory), and | ||
309 | * write a doorbell. | ||
310 | */ | ||
311 | void efx_farch_tx_write(struct efx_tx_queue *tx_queue) | ||
312 | { | ||
313 | |||
314 | struct efx_tx_buffer *buffer; | ||
315 | efx_qword_t *txd; | ||
316 | unsigned write_ptr; | ||
317 | unsigned old_write_count = tx_queue->write_count; | ||
318 | |||
319 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); | ||
320 | |||
321 | do { | ||
322 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | ||
323 | buffer = &tx_queue->buffer[write_ptr]; | ||
324 | txd = efx_tx_desc(tx_queue, write_ptr); | ||
325 | ++tx_queue->write_count; | ||
326 | |||
327 | /* Create TX descriptor ring entry */ | ||
328 | BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); | ||
329 | EFX_POPULATE_QWORD_4(*txd, | ||
330 | FSF_AZ_TX_KER_CONT, | ||
331 | buffer->flags & EFX_TX_BUF_CONT, | ||
332 | FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, | ||
333 | FSF_AZ_TX_KER_BUF_REGION, 0, | ||
334 | FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); | ||
335 | } while (tx_queue->write_count != tx_queue->insert_count); | ||
336 | |||
337 | wmb(); /* Ensure descriptors are written before they are fetched */ | ||
338 | |||
339 | if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { | ||
340 | txd = efx_tx_desc(tx_queue, | ||
341 | old_write_count & tx_queue->ptr_mask); | ||
342 | efx_farch_push_tx_desc(tx_queue, txd); | ||
343 | ++tx_queue->pushes; | ||
344 | } else { | ||
345 | efx_farch_notify_tx_desc(tx_queue); | ||
346 | } | ||
347 | } | ||
348 | |||
349 | /* Allocate hardware resources for a TX queue */ | ||
350 | int efx_farch_tx_probe(struct efx_tx_queue *tx_queue) | ||
351 | { | ||
352 | struct efx_nic *efx = tx_queue->efx; | ||
353 | unsigned entries; | ||
354 | |||
355 | entries = tx_queue->ptr_mask + 1; | ||
356 | return efx_alloc_special_buffer(efx, &tx_queue->txd, | ||
357 | entries * sizeof(efx_qword_t)); | ||
358 | } | ||
359 | |||
360 | void efx_farch_tx_init(struct efx_tx_queue *tx_queue) | ||
361 | { | ||
362 | struct efx_nic *efx = tx_queue->efx; | ||
363 | efx_oword_t reg; | ||
364 | |||
365 | /* Pin TX descriptor ring */ | ||
366 | efx_init_special_buffer(efx, &tx_queue->txd); | ||
367 | |||
368 | /* Push TX descriptor ring to card */ | ||
369 | EFX_POPULATE_OWORD_10(reg, | ||
370 | FRF_AZ_TX_DESCQ_EN, 1, | ||
371 | FRF_AZ_TX_ISCSI_DDIG_EN, 0, | ||
372 | FRF_AZ_TX_ISCSI_HDIG_EN, 0, | ||
373 | FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, | ||
374 | FRF_AZ_TX_DESCQ_EVQ_ID, | ||
375 | tx_queue->channel->channel, | ||
376 | FRF_AZ_TX_DESCQ_OWNER_ID, 0, | ||
377 | FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, | ||
378 | FRF_AZ_TX_DESCQ_SIZE, | ||
379 | __ffs(tx_queue->txd.entries), | ||
380 | FRF_AZ_TX_DESCQ_TYPE, 0, | ||
381 | FRF_BZ_TX_NON_IP_DROP_DIS, 1); | ||
382 | |||
383 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | ||
384 | int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; | ||
385 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); | ||
386 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, | ||
387 | !csum); | ||
388 | } | ||
389 | |||
390 | efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, | ||
391 | tx_queue->queue); | ||
392 | |||
393 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { | ||
394 | /* Only 128 bits in this register */ | ||
395 | BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); | ||
396 | |||
397 | efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); | ||
398 | if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) | ||
399 | __clear_bit_le(tx_queue->queue, ®); | ||
400 | else | ||
401 | __set_bit_le(tx_queue->queue, ®); | ||
402 | efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); | ||
403 | } | ||
404 | |||
405 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | ||
406 | EFX_POPULATE_OWORD_1(reg, | ||
407 | FRF_BZ_TX_PACE, | ||
408 | (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? | ||
409 | FFE_BZ_TX_PACE_OFF : | ||
410 | FFE_BZ_TX_PACE_RESERVED); | ||
411 | efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, | ||
412 | tx_queue->queue); | ||
413 | } | ||
414 | } | ||
415 | |||
416 | static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue) | ||
417 | { | ||
418 | struct efx_nic *efx = tx_queue->efx; | ||
419 | efx_oword_t tx_flush_descq; | ||
420 | |||
421 | WARN_ON(atomic_read(&tx_queue->flush_outstanding)); | ||
422 | atomic_set(&tx_queue->flush_outstanding, 1); | ||
423 | |||
424 | EFX_POPULATE_OWORD_2(tx_flush_descq, | ||
425 | FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, | ||
426 | FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); | ||
427 | efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); | ||
428 | } | ||
429 | |||
430 | void efx_farch_tx_fini(struct efx_tx_queue *tx_queue) | ||
431 | { | ||
432 | struct efx_nic *efx = tx_queue->efx; | ||
433 | efx_oword_t tx_desc_ptr; | ||
434 | |||
435 | /* Remove TX descriptor ring from card */ | ||
436 | EFX_ZERO_OWORD(tx_desc_ptr); | ||
437 | efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | ||
438 | tx_queue->queue); | ||
439 | |||
440 | /* Unpin TX descriptor ring */ | ||
441 | efx_fini_special_buffer(efx, &tx_queue->txd); | ||
442 | } | ||
443 | |||
444 | /* Free buffers backing TX queue */ | ||
445 | void efx_farch_tx_remove(struct efx_tx_queue *tx_queue) | ||
446 | { | ||
447 | efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); | ||
448 | } | ||
449 | |||
450 | /************************************************************************** | ||
451 | * | ||
452 | * RX path | ||
453 | * | ||
454 | **************************************************************************/ | ||
455 | |||
456 | /* This creates an entry in the RX descriptor queue */ | ||
457 | static inline void | ||
458 | efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) | ||
459 | { | ||
460 | struct efx_rx_buffer *rx_buf; | ||
461 | efx_qword_t *rxd; | ||
462 | |||
463 | rxd = efx_rx_desc(rx_queue, index); | ||
464 | rx_buf = efx_rx_buffer(rx_queue, index); | ||
465 | EFX_POPULATE_QWORD_3(*rxd, | ||
466 | FSF_AZ_RX_KER_BUF_SIZE, | ||
467 | rx_buf->len - | ||
468 | rx_queue->efx->type->rx_buffer_padding, | ||
469 | FSF_AZ_RX_KER_BUF_REGION, 0, | ||
470 | FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); | ||
471 | } | ||
472 | |||
473 | /* This writes to the RX_DESC_WPTR register for the specified receive | ||
474 | * descriptor ring. | ||
475 | */ | ||
476 | void efx_farch_rx_write(struct efx_rx_queue *rx_queue) | ||
477 | { | ||
478 | struct efx_nic *efx = rx_queue->efx; | ||
479 | efx_dword_t reg; | ||
480 | unsigned write_ptr; | ||
481 | |||
482 | while (rx_queue->notified_count != rx_queue->added_count) { | ||
483 | efx_farch_build_rx_desc( | ||
484 | rx_queue, | ||
485 | rx_queue->notified_count & rx_queue->ptr_mask); | ||
486 | ++rx_queue->notified_count; | ||
487 | } | ||
488 | |||
489 | wmb(); | ||
490 | write_ptr = rx_queue->added_count & rx_queue->ptr_mask; | ||
491 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); | ||
492 | efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, | ||
493 | efx_rx_queue_index(rx_queue)); | ||
494 | } | ||
495 | |||
496 | int efx_farch_rx_probe(struct efx_rx_queue *rx_queue) | ||
497 | { | ||
498 | struct efx_nic *efx = rx_queue->efx; | ||
499 | unsigned entries; | ||
500 | |||
501 | entries = rx_queue->ptr_mask + 1; | ||
502 | return efx_alloc_special_buffer(efx, &rx_queue->rxd, | ||
503 | entries * sizeof(efx_qword_t)); | ||
504 | } | ||
505 | |||
506 | void efx_farch_rx_init(struct efx_rx_queue *rx_queue) | ||
507 | { | ||
508 | efx_oword_t rx_desc_ptr; | ||
509 | struct efx_nic *efx = rx_queue->efx; | ||
510 | bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; | ||
511 | bool iscsi_digest_en = is_b0; | ||
512 | bool jumbo_en; | ||
513 | |||
514 | /* For kernel-mode queues in Falcon A1, the JUMBO flag enables | ||
515 | * DMA to continue after a PCIe page boundary (and scattering | ||
516 | * is not possible). In Falcon B0 and Siena, it enables | ||
517 | * scatter. | ||
518 | */ | ||
519 | jumbo_en = !is_b0 || efx->rx_scatter; | ||
520 | |||
521 | netif_dbg(efx, hw, efx->net_dev, | ||
522 | "RX queue %d ring in special buffers %d-%d\n", | ||
523 | efx_rx_queue_index(rx_queue), rx_queue->rxd.index, | ||
524 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); | ||
525 | |||
526 | rx_queue->scatter_n = 0; | ||
527 | |||
528 | /* Pin RX descriptor ring */ | ||
529 | efx_init_special_buffer(efx, &rx_queue->rxd); | ||
530 | |||
531 | /* Push RX descriptor ring to card */ | ||
532 | EFX_POPULATE_OWORD_10(rx_desc_ptr, | ||
533 | FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, | ||
534 | FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, | ||
535 | FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, | ||
536 | FRF_AZ_RX_DESCQ_EVQ_ID, | ||
537 | efx_rx_queue_channel(rx_queue)->channel, | ||
538 | FRF_AZ_RX_DESCQ_OWNER_ID, 0, | ||
539 | FRF_AZ_RX_DESCQ_LABEL, | ||
540 | efx_rx_queue_index(rx_queue), | ||
541 | FRF_AZ_RX_DESCQ_SIZE, | ||
542 | __ffs(rx_queue->rxd.entries), | ||
543 | FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , | ||
544 | FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, | ||
545 | FRF_AZ_RX_DESCQ_EN, 1); | ||
546 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | ||
547 | efx_rx_queue_index(rx_queue)); | ||
548 | } | ||
549 | |||
550 | static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue) | ||
551 | { | ||
552 | struct efx_nic *efx = rx_queue->efx; | ||
553 | efx_oword_t rx_flush_descq; | ||
554 | |||
555 | EFX_POPULATE_OWORD_2(rx_flush_descq, | ||
556 | FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, | ||
557 | FRF_AZ_RX_FLUSH_DESCQ, | ||
558 | efx_rx_queue_index(rx_queue)); | ||
559 | efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); | ||
560 | } | ||
561 | |||
562 | void efx_farch_rx_fini(struct efx_rx_queue *rx_queue) | ||
563 | { | ||
564 | efx_oword_t rx_desc_ptr; | ||
565 | struct efx_nic *efx = rx_queue->efx; | ||
566 | |||
567 | /* Remove RX descriptor ring from card */ | ||
568 | EFX_ZERO_OWORD(rx_desc_ptr); | ||
569 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | ||
570 | efx_rx_queue_index(rx_queue)); | ||
571 | |||
572 | /* Unpin RX descriptor ring */ | ||
573 | efx_fini_special_buffer(efx, &rx_queue->rxd); | ||
574 | } | ||
575 | |||
576 | /* Free buffers backing RX queue */ | ||
577 | void efx_farch_rx_remove(struct efx_rx_queue *rx_queue) | ||
578 | { | ||
579 | efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); | ||
580 | } | ||
581 | |||
582 | /************************************************************************** | ||
583 | * | ||
584 | * Flush handling | ||
585 | * | ||
586 | **************************************************************************/ | ||
587 | |||
588 | /* efx_farch_flush_queues() must be woken up when all flushes are completed, | ||
589 | * or more RX flushes can be kicked off. | ||
590 | */ | ||
591 | static bool efx_farch_flush_wake(struct efx_nic *efx) | ||
592 | { | ||
593 | /* Ensure that all updates are visible to efx_farch_flush_queues() */ | ||
594 | smp_mb(); | ||
595 | |||
596 | return (atomic_read(&efx->drain_pending) == 0 || | ||
597 | (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT | ||
598 | && atomic_read(&efx->rxq_flush_pending) > 0)); | ||
599 | } | ||
600 | |||
601 | static bool efx_check_tx_flush_complete(struct efx_nic *efx) | ||
602 | { | ||
603 | bool i = true; | ||
604 | efx_oword_t txd_ptr_tbl; | ||
605 | struct efx_channel *channel; | ||
606 | struct efx_tx_queue *tx_queue; | ||
607 | |||
608 | efx_for_each_channel(channel, efx) { | ||
609 | efx_for_each_channel_tx_queue(tx_queue, channel) { | ||
610 | efx_reado_table(efx, &txd_ptr_tbl, | ||
611 | FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); | ||
612 | if (EFX_OWORD_FIELD(txd_ptr_tbl, | ||
613 | FRF_AZ_TX_DESCQ_FLUSH) || | ||
614 | EFX_OWORD_FIELD(txd_ptr_tbl, | ||
615 | FRF_AZ_TX_DESCQ_EN)) { | ||
616 | netif_dbg(efx, hw, efx->net_dev, | ||
617 | "flush did not complete on TXQ %d\n", | ||
618 | tx_queue->queue); | ||
619 | i = false; | ||
620 | } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, | ||
621 | 1, 0)) { | ||
622 | /* The flush is complete, but we didn't | ||
623 | * receive a flush completion event | ||
624 | */ | ||
625 | netif_dbg(efx, hw, efx->net_dev, | ||
626 | "flush complete on TXQ %d, so drain " | ||
627 | "the queue\n", tx_queue->queue); | ||
628 | /* Don't need to increment drain_pending as it | ||
629 | * has already been incremented for the queues | ||
630 | * which did not drain | ||
631 | */ | ||
632 | efx_farch_magic_event(channel, | ||
633 | EFX_CHANNEL_MAGIC_TX_DRAIN( | ||
634 | tx_queue)); | ||
635 | } | ||
636 | } | ||
637 | } | ||
638 | |||
639 | return i; | ||
640 | } | ||
641 | |||
642 | /* Flush all the transmit queues, and continue flushing receive queues until | ||
643 | * they're all flushed. Wait for the DRAIN events to be recieved so that there | ||
644 | * are no more RX and TX events left on any channel. */ | ||
645 | static int efx_farch_do_flush(struct efx_nic *efx) | ||
646 | { | ||
647 | unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ | ||
648 | struct efx_channel *channel; | ||
649 | struct efx_rx_queue *rx_queue; | ||
650 | struct efx_tx_queue *tx_queue; | ||
651 | int rc = 0; | ||
652 | |||
653 | efx_for_each_channel(channel, efx) { | ||
654 | efx_for_each_channel_tx_queue(tx_queue, channel) { | ||
655 | atomic_inc(&efx->drain_pending); | ||
656 | efx_farch_flush_tx_queue(tx_queue); | ||
657 | } | ||
658 | efx_for_each_channel_rx_queue(rx_queue, channel) { | ||
659 | atomic_inc(&efx->drain_pending); | ||
660 | rx_queue->flush_pending = true; | ||
661 | atomic_inc(&efx->rxq_flush_pending); | ||
662 | } | ||
663 | } | ||
664 | |||
665 | while (timeout && atomic_read(&efx->drain_pending) > 0) { | ||
666 | /* If SRIOV is enabled, then offload receive queue flushing to | ||
667 | * the firmware (though we will still have to poll for | ||
668 | * completion). If that fails, fall back to the old scheme. | ||
669 | */ | ||
670 | if (efx_sriov_enabled(efx)) { | ||
671 | rc = efx_mcdi_flush_rxqs(efx); | ||
672 | if (!rc) | ||
673 | goto wait; | ||
674 | } | ||
675 | |||
676 | /* The hardware supports four concurrent rx flushes, each of | ||
677 | * which may need to be retried if there is an outstanding | ||
678 | * descriptor fetch | ||
679 | */ | ||
680 | efx_for_each_channel(channel, efx) { | ||
681 | efx_for_each_channel_rx_queue(rx_queue, channel) { | ||
682 | if (atomic_read(&efx->rxq_flush_outstanding) >= | ||
683 | EFX_RX_FLUSH_COUNT) | ||
684 | break; | ||
685 | |||
686 | if (rx_queue->flush_pending) { | ||
687 | rx_queue->flush_pending = false; | ||
688 | atomic_dec(&efx->rxq_flush_pending); | ||
689 | atomic_inc(&efx->rxq_flush_outstanding); | ||
690 | efx_farch_flush_rx_queue(rx_queue); | ||
691 | } | ||
692 | } | ||
693 | } | ||
694 | |||
695 | wait: | ||
696 | timeout = wait_event_timeout(efx->flush_wq, | ||
697 | efx_farch_flush_wake(efx), | ||
698 | timeout); | ||
699 | } | ||
700 | |||
701 | if (atomic_read(&efx->drain_pending) && | ||
702 | !efx_check_tx_flush_complete(efx)) { | ||
703 | netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " | ||
704 | "(rx %d+%d)\n", atomic_read(&efx->drain_pending), | ||
705 | atomic_read(&efx->rxq_flush_outstanding), | ||
706 | atomic_read(&efx->rxq_flush_pending)); | ||
707 | rc = -ETIMEDOUT; | ||
708 | |||
709 | atomic_set(&efx->drain_pending, 0); | ||
710 | atomic_set(&efx->rxq_flush_pending, 0); | ||
711 | atomic_set(&efx->rxq_flush_outstanding, 0); | ||
712 | } | ||
713 | |||
714 | return rc; | ||
715 | } | ||
716 | |||
717 | int efx_farch_fini_dmaq(struct efx_nic *efx) | ||
718 | { | ||
719 | struct efx_channel *channel; | ||
720 | struct efx_tx_queue *tx_queue; | ||
721 | struct efx_rx_queue *rx_queue; | ||
722 | int rc = 0; | ||
723 | |||
724 | /* Do not attempt to write to the NIC during EEH recovery */ | ||
725 | if (efx->state != STATE_RECOVERY) { | ||
726 | /* Only perform flush if DMA is enabled */ | ||
727 | if (efx->pci_dev->is_busmaster) { | ||
728 | efx->type->prepare_flush(efx); | ||
729 | rc = efx_farch_do_flush(efx); | ||
730 | efx->type->finish_flush(efx); | ||
731 | } | ||
732 | |||
733 | efx_for_each_channel(channel, efx) { | ||
734 | efx_for_each_channel_rx_queue(rx_queue, channel) | ||
735 | efx_farch_rx_fini(rx_queue); | ||
736 | efx_for_each_channel_tx_queue(tx_queue, channel) | ||
737 | efx_farch_tx_fini(tx_queue); | ||
738 | } | ||
739 | } | ||
740 | |||
741 | return rc; | ||
742 | } | ||
743 | |||
744 | /************************************************************************** | ||
745 | * | ||
746 | * Event queue processing | ||
747 | * Event queues are processed by per-channel tasklets. | ||
748 | * | ||
749 | **************************************************************************/ | ||
750 | |||
751 | /* Update a channel's event queue's read pointer (RPTR) register | ||
752 | * | ||
753 | * This writes the EVQ_RPTR_REG register for the specified channel's | ||
754 | * event queue. | ||
755 | */ | ||
756 | void efx_farch_ev_read_ack(struct efx_channel *channel) | ||
757 | { | ||
758 | efx_dword_t reg; | ||
759 | struct efx_nic *efx = channel->efx; | ||
760 | |||
761 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, | ||
762 | channel->eventq_read_ptr & channel->eventq_mask); | ||
763 | |||
764 | /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size | ||
765 | * of 4 bytes, but it is really 16 bytes just like later revisions. | ||
766 | */ | ||
767 | efx_writed(efx, ®, | ||
768 | efx->type->evq_rptr_tbl_base + | ||
769 | FR_BZ_EVQ_RPTR_STEP * channel->channel); | ||
770 | } | ||
771 | |||
772 | /* Use HW to insert a SW defined event */ | ||
773 | void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, | ||
774 | efx_qword_t *event) | ||
775 | { | ||
776 | efx_oword_t drv_ev_reg; | ||
777 | |||
778 | BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || | ||
779 | FRF_AZ_DRV_EV_DATA_WIDTH != 64); | ||
780 | drv_ev_reg.u32[0] = event->u32[0]; | ||
781 | drv_ev_reg.u32[1] = event->u32[1]; | ||
782 | drv_ev_reg.u32[2] = 0; | ||
783 | drv_ev_reg.u32[3] = 0; | ||
784 | EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); | ||
785 | efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); | ||
786 | } | ||
787 | |||
788 | static void efx_farch_magic_event(struct efx_channel *channel, u32 magic) | ||
789 | { | ||
790 | efx_qword_t event; | ||
791 | |||
792 | EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, | ||
793 | FSE_AZ_EV_CODE_DRV_GEN_EV, | ||
794 | FSF_AZ_DRV_GEN_EV_MAGIC, magic); | ||
795 | efx_farch_generate_event(channel->efx, channel->channel, &event); | ||
796 | } | ||
797 | |||
798 | /* Handle a transmit completion event | ||
799 | * | ||
800 | * The NIC batches TX completion events; the message we receive is of | ||
801 | * the form "complete all TX events up to this index". | ||
802 | */ | ||
803 | static int | ||
804 | efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | ||
805 | { | ||
806 | unsigned int tx_ev_desc_ptr; | ||
807 | unsigned int tx_ev_q_label; | ||
808 | struct efx_tx_queue *tx_queue; | ||
809 | struct efx_nic *efx = channel->efx; | ||
810 | int tx_packets = 0; | ||
811 | |||
812 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | ||
813 | return 0; | ||
814 | |||
815 | if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { | ||
816 | /* Transmit completion */ | ||
817 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); | ||
818 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); | ||
819 | tx_queue = efx_channel_get_tx_queue( | ||
820 | channel, tx_ev_q_label % EFX_TXQ_TYPES); | ||
821 | tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & | ||
822 | tx_queue->ptr_mask); | ||
823 | efx_xmit_done(tx_queue, tx_ev_desc_ptr); | ||
824 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { | ||
825 | /* Rewrite the FIFO write pointer */ | ||
826 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); | ||
827 | tx_queue = efx_channel_get_tx_queue( | ||
828 | channel, tx_ev_q_label % EFX_TXQ_TYPES); | ||
829 | |||
830 | netif_tx_lock(efx->net_dev); | ||
831 | efx_farch_notify_tx_desc(tx_queue); | ||
832 | netif_tx_unlock(efx->net_dev); | ||
833 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && | ||
834 | EFX_WORKAROUND_10727(efx)) { | ||
835 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | ||
836 | } else { | ||
837 | netif_err(efx, tx_err, efx->net_dev, | ||
838 | "channel %d unexpected TX event " | ||
839 | EFX_QWORD_FMT"\n", channel->channel, | ||
840 | EFX_QWORD_VAL(*event)); | ||
841 | } | ||
842 | |||
843 | return tx_packets; | ||
844 | } | ||
845 | |||
846 | /* Detect errors included in the rx_evt_pkt_ok bit. */ | ||
847 | static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | ||
848 | const efx_qword_t *event) | ||
849 | { | ||
850 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | ||
851 | struct efx_nic *efx = rx_queue->efx; | ||
852 | bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; | ||
853 | bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; | ||
854 | bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; | ||
855 | bool rx_ev_other_err, rx_ev_pause_frm; | ||
856 | bool rx_ev_hdr_type, rx_ev_mcast_pkt; | ||
857 | unsigned rx_ev_pkt_type; | ||
858 | |||
859 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); | ||
860 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); | ||
861 | rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); | ||
862 | rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); | ||
863 | rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, | ||
864 | FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); | ||
865 | rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, | ||
866 | FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); | ||
867 | rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, | ||
868 | FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); | ||
869 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); | ||
870 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); | ||
871 | rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? | ||
872 | 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); | ||
873 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); | ||
874 | |||
875 | /* Every error apart from tobe_disc and pause_frm */ | ||
876 | rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | | ||
877 | rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | | ||
878 | rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); | ||
879 | |||
880 | /* Count errors that are not in MAC stats. Ignore expected | ||
881 | * checksum errors during self-test. */ | ||
882 | if (rx_ev_frm_trunc) | ||
883 | ++channel->n_rx_frm_trunc; | ||
884 | else if (rx_ev_tobe_disc) | ||
885 | ++channel->n_rx_tobe_disc; | ||
886 | else if (!efx->loopback_selftest) { | ||
887 | if (rx_ev_ip_hdr_chksum_err) | ||
888 | ++channel->n_rx_ip_hdr_chksum_err; | ||
889 | else if (rx_ev_tcp_udp_chksum_err) | ||
890 | ++channel->n_rx_tcp_udp_chksum_err; | ||
891 | } | ||
892 | |||
893 | /* TOBE_DISC is expected on unicast mismatches; don't print out an | ||
894 | * error message. FRM_TRUNC indicates RXDP dropped the packet due | ||
895 | * to a FIFO overflow. | ||
896 | */ | ||
897 | #ifdef DEBUG | ||
898 | if (rx_ev_other_err && net_ratelimit()) { | ||
899 | netif_dbg(efx, rx_err, efx->net_dev, | ||
900 | " RX queue %d unexpected RX event " | ||
901 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", | ||
902 | efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), | ||
903 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", | ||
904 | rx_ev_ip_hdr_chksum_err ? | ||
905 | " [IP_HDR_CHKSUM_ERR]" : "", | ||
906 | rx_ev_tcp_udp_chksum_err ? | ||
907 | " [TCP_UDP_CHKSUM_ERR]" : "", | ||
908 | rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", | ||
909 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", | ||
910 | rx_ev_drib_nib ? " [DRIB_NIB]" : "", | ||
911 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "", | ||
912 | rx_ev_pause_frm ? " [PAUSE]" : ""); | ||
913 | } | ||
914 | #endif | ||
915 | |||
916 | /* The frame must be discarded if any of these are true. */ | ||
917 | return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | | ||
918 | rx_ev_tobe_disc | rx_ev_pause_frm) ? | ||
919 | EFX_RX_PKT_DISCARD : 0; | ||
920 | } | ||
921 | |||
922 | /* Handle receive events that are not in-order. Return true if this | ||
923 | * can be handled as a partial packet discard, false if it's more | ||
924 | * serious. | ||
925 | */ | ||
926 | static bool | ||
927 | efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) | ||
928 | { | ||
929 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | ||
930 | struct efx_nic *efx = rx_queue->efx; | ||
931 | unsigned expected, dropped; | ||
932 | |||
933 | if (rx_queue->scatter_n && | ||
934 | index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & | ||
935 | rx_queue->ptr_mask)) { | ||
936 | ++channel->n_rx_nodesc_trunc; | ||
937 | return true; | ||
938 | } | ||
939 | |||
940 | expected = rx_queue->removed_count & rx_queue->ptr_mask; | ||
941 | dropped = (index - expected) & rx_queue->ptr_mask; | ||
942 | netif_info(efx, rx_err, efx->net_dev, | ||
943 | "dropped %d events (index=%d expected=%d)\n", | ||
944 | dropped, index, expected); | ||
945 | |||
946 | efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? | ||
947 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | ||
948 | return false; | ||
949 | } | ||
950 | |||
951 | /* Handle a packet received event | ||
952 | * | ||
953 | * The NIC gives a "discard" flag if it's a unicast packet with the | ||
954 | * wrong destination address | ||
955 | * Also "is multicast" and "matches multicast filter" flags can be used to | ||
956 | * discard non-matching multicast packets. | ||
957 | */ | ||
958 | static void | ||
959 | efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) | ||
960 | { | ||
961 | unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; | ||
962 | unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; | ||
963 | unsigned expected_ptr; | ||
964 | bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; | ||
965 | u16 flags; | ||
966 | struct efx_rx_queue *rx_queue; | ||
967 | struct efx_nic *efx = channel->efx; | ||
968 | |||
969 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | ||
970 | return; | ||
971 | |||
972 | rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); | ||
973 | rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); | ||
974 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != | ||
975 | channel->channel); | ||
976 | |||
977 | rx_queue = efx_channel_get_rx_queue(channel); | ||
978 | |||
979 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); | ||
980 | expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & | ||
981 | rx_queue->ptr_mask); | ||
982 | |||
983 | /* Check for partial drops and other errors */ | ||
984 | if (unlikely(rx_ev_desc_ptr != expected_ptr) || | ||
985 | unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { | ||
986 | if (rx_ev_desc_ptr != expected_ptr && | ||
987 | !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) | ||
988 | return; | ||
989 | |||
990 | /* Discard all pending fragments */ | ||
991 | if (rx_queue->scatter_n) { | ||
992 | efx_rx_packet( | ||
993 | rx_queue, | ||
994 | rx_queue->removed_count & rx_queue->ptr_mask, | ||
995 | rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); | ||
996 | rx_queue->removed_count += rx_queue->scatter_n; | ||
997 | rx_queue->scatter_n = 0; | ||
998 | } | ||
999 | |||
1000 | /* Return if there is no new fragment */ | ||
1001 | if (rx_ev_desc_ptr != expected_ptr) | ||
1002 | return; | ||
1003 | |||
1004 | /* Discard new fragment if not SOP */ | ||
1005 | if (!rx_ev_sop) { | ||
1006 | efx_rx_packet( | ||
1007 | rx_queue, | ||
1008 | rx_queue->removed_count & rx_queue->ptr_mask, | ||
1009 | 1, 0, EFX_RX_PKT_DISCARD); | ||
1010 | ++rx_queue->removed_count; | ||
1011 | return; | ||
1012 | } | ||
1013 | } | ||
1014 | |||
1015 | ++rx_queue->scatter_n; | ||
1016 | if (rx_ev_cont) | ||
1017 | return; | ||
1018 | |||
1019 | rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); | ||
1020 | rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); | ||
1021 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); | ||
1022 | |||
1023 | if (likely(rx_ev_pkt_ok)) { | ||
1024 | /* If packet is marked as OK then we can rely on the | ||
1025 | * hardware checksum and classification. | ||
1026 | */ | ||
1027 | flags = 0; | ||
1028 | switch (rx_ev_hdr_type) { | ||
1029 | case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: | ||
1030 | flags |= EFX_RX_PKT_TCP; | ||
1031 | /* fall through */ | ||
1032 | case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: | ||
1033 | flags |= EFX_RX_PKT_CSUMMED; | ||
1034 | /* fall through */ | ||
1035 | case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: | ||
1036 | case FSE_AZ_RX_EV_HDR_TYPE_OTHER: | ||
1037 | break; | ||
1038 | } | ||
1039 | } else { | ||
1040 | flags = efx_farch_handle_rx_not_ok(rx_queue, event); | ||
1041 | } | ||
1042 | |||
1043 | /* Detect multicast packets that didn't match the filter */ | ||
1044 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); | ||
1045 | if (rx_ev_mcast_pkt) { | ||
1046 | unsigned int rx_ev_mcast_hash_match = | ||
1047 | EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); | ||
1048 | |||
1049 | if (unlikely(!rx_ev_mcast_hash_match)) { | ||
1050 | ++channel->n_rx_mcast_mismatch; | ||
1051 | flags |= EFX_RX_PKT_DISCARD; | ||
1052 | } | ||
1053 | } | ||
1054 | |||
1055 | channel->irq_mod_score += 2; | ||
1056 | |||
1057 | /* Handle received packet */ | ||
1058 | efx_rx_packet(rx_queue, | ||
1059 | rx_queue->removed_count & rx_queue->ptr_mask, | ||
1060 | rx_queue->scatter_n, rx_ev_byte_cnt, flags); | ||
1061 | rx_queue->removed_count += rx_queue->scatter_n; | ||
1062 | rx_queue->scatter_n = 0; | ||
1063 | } | ||
1064 | |||
1065 | /* If this flush done event corresponds to a &struct efx_tx_queue, then | ||
1066 | * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue | ||
1067 | * of all transmit completions. | ||
1068 | */ | ||
1069 | static void | ||
1070 | efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) | ||
1071 | { | ||
1072 | struct efx_tx_queue *tx_queue; | ||
1073 | int qid; | ||
1074 | |||
1075 | qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); | ||
1076 | if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { | ||
1077 | tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, | ||
1078 | qid % EFX_TXQ_TYPES); | ||
1079 | if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) { | ||
1080 | efx_farch_magic_event(tx_queue->channel, | ||
1081 | EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); | ||
1082 | } | ||
1083 | } | ||
1084 | } | ||
1085 | |||
1086 | /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush | ||
1087 | * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add | ||
1088 | * the RX queue back to the mask of RX queues in need of flushing. | ||
1089 | */ | ||
1090 | static void | ||
1091 | efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) | ||
1092 | { | ||
1093 | struct efx_channel *channel; | ||
1094 | struct efx_rx_queue *rx_queue; | ||
1095 | int qid; | ||
1096 | bool failed; | ||
1097 | |||
1098 | qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); | ||
1099 | failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); | ||
1100 | if (qid >= efx->n_channels) | ||
1101 | return; | ||
1102 | channel = efx_get_channel(efx, qid); | ||
1103 | if (!efx_channel_has_rx_queue(channel)) | ||
1104 | return; | ||
1105 | rx_queue = efx_channel_get_rx_queue(channel); | ||
1106 | |||
1107 | if (failed) { | ||
1108 | netif_info(efx, hw, efx->net_dev, | ||
1109 | "RXQ %d flush retry\n", qid); | ||
1110 | rx_queue->flush_pending = true; | ||
1111 | atomic_inc(&efx->rxq_flush_pending); | ||
1112 | } else { | ||
1113 | efx_farch_magic_event(efx_rx_queue_channel(rx_queue), | ||
1114 | EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); | ||
1115 | } | ||
1116 | atomic_dec(&efx->rxq_flush_outstanding); | ||
1117 | if (efx_farch_flush_wake(efx)) | ||
1118 | wake_up(&efx->flush_wq); | ||
1119 | } | ||
1120 | |||
1121 | static void | ||
1122 | efx_farch_handle_drain_event(struct efx_channel *channel) | ||
1123 | { | ||
1124 | struct efx_nic *efx = channel->efx; | ||
1125 | |||
1126 | WARN_ON(atomic_read(&efx->drain_pending) == 0); | ||
1127 | atomic_dec(&efx->drain_pending); | ||
1128 | if (efx_farch_flush_wake(efx)) | ||
1129 | wake_up(&efx->flush_wq); | ||
1130 | } | ||
1131 | |||
1132 | static void efx_farch_handle_generated_event(struct efx_channel *channel, | ||
1133 | efx_qword_t *event) | ||
1134 | { | ||
1135 | struct efx_nic *efx = channel->efx; | ||
1136 | struct efx_rx_queue *rx_queue = | ||
1137 | efx_channel_has_rx_queue(channel) ? | ||
1138 | efx_channel_get_rx_queue(channel) : NULL; | ||
1139 | unsigned magic, code; | ||
1140 | |||
1141 | magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); | ||
1142 | code = _EFX_CHANNEL_MAGIC_CODE(magic); | ||
1143 | |||
1144 | if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { | ||
1145 | channel->event_test_cpu = raw_smp_processor_id(); | ||
1146 | } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { | ||
1147 | /* The queue must be empty, so we won't receive any rx | ||
1148 | * events, so efx_process_channel() won't refill the | ||
1149 | * queue. Refill it here */ | ||
1150 | efx_fast_push_rx_descriptors(rx_queue); | ||
1151 | } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { | ||
1152 | efx_farch_handle_drain_event(channel); | ||
1153 | } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { | ||
1154 | efx_farch_handle_drain_event(channel); | ||
1155 | } else { | ||
1156 | netif_dbg(efx, hw, efx->net_dev, "channel %d received " | ||
1157 | "generated event "EFX_QWORD_FMT"\n", | ||
1158 | channel->channel, EFX_QWORD_VAL(*event)); | ||
1159 | } | ||
1160 | } | ||
1161 | |||
1162 | static void | ||
1163 | efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | ||
1164 | { | ||
1165 | struct efx_nic *efx = channel->efx; | ||
1166 | unsigned int ev_sub_code; | ||
1167 | unsigned int ev_sub_data; | ||
1168 | |||
1169 | ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); | ||
1170 | ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); | ||
1171 | |||
1172 | switch (ev_sub_code) { | ||
1173 | case FSE_AZ_TX_DESCQ_FLS_DONE_EV: | ||
1174 | netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", | ||
1175 | channel->channel, ev_sub_data); | ||
1176 | efx_farch_handle_tx_flush_done(efx, event); | ||
1177 | efx_sriov_tx_flush_done(efx, event); | ||
1178 | break; | ||
1179 | case FSE_AZ_RX_DESCQ_FLS_DONE_EV: | ||
1180 | netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", | ||
1181 | channel->channel, ev_sub_data); | ||
1182 | efx_farch_handle_rx_flush_done(efx, event); | ||
1183 | efx_sriov_rx_flush_done(efx, event); | ||
1184 | break; | ||
1185 | case FSE_AZ_EVQ_INIT_DONE_EV: | ||
1186 | netif_dbg(efx, hw, efx->net_dev, | ||
1187 | "channel %d EVQ %d initialised\n", | ||
1188 | channel->channel, ev_sub_data); | ||
1189 | break; | ||
1190 | case FSE_AZ_SRM_UPD_DONE_EV: | ||
1191 | netif_vdbg(efx, hw, efx->net_dev, | ||
1192 | "channel %d SRAM update done\n", channel->channel); | ||
1193 | break; | ||
1194 | case FSE_AZ_WAKE_UP_EV: | ||
1195 | netif_vdbg(efx, hw, efx->net_dev, | ||
1196 | "channel %d RXQ %d wakeup event\n", | ||
1197 | channel->channel, ev_sub_data); | ||
1198 | break; | ||
1199 | case FSE_AZ_TIMER_EV: | ||
1200 | netif_vdbg(efx, hw, efx->net_dev, | ||
1201 | "channel %d RX queue %d timer expired\n", | ||
1202 | channel->channel, ev_sub_data); | ||
1203 | break; | ||
1204 | case FSE_AA_RX_RECOVER_EV: | ||
1205 | netif_err(efx, rx_err, efx->net_dev, | ||
1206 | "channel %d seen DRIVER RX_RESET event. " | ||
1207 | "Resetting.\n", channel->channel); | ||
1208 | atomic_inc(&efx->rx_reset); | ||
1209 | efx_schedule_reset(efx, | ||
1210 | EFX_WORKAROUND_6555(efx) ? | ||
1211 | RESET_TYPE_RX_RECOVERY : | ||
1212 | RESET_TYPE_DISABLE); | ||
1213 | break; | ||
1214 | case FSE_BZ_RX_DSC_ERROR_EV: | ||
1215 | if (ev_sub_data < EFX_VI_BASE) { | ||
1216 | netif_err(efx, rx_err, efx->net_dev, | ||
1217 | "RX DMA Q %d reports descriptor fetch error." | ||
1218 | " RX Q %d is disabled.\n", ev_sub_data, | ||
1219 | ev_sub_data); | ||
1220 | efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); | ||
1221 | } else | ||
1222 | efx_sriov_desc_fetch_err(efx, ev_sub_data); | ||
1223 | break; | ||
1224 | case FSE_BZ_TX_DSC_ERROR_EV: | ||
1225 | if (ev_sub_data < EFX_VI_BASE) { | ||
1226 | netif_err(efx, tx_err, efx->net_dev, | ||
1227 | "TX DMA Q %d reports descriptor fetch error." | ||
1228 | " TX Q %d is disabled.\n", ev_sub_data, | ||
1229 | ev_sub_data); | ||
1230 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | ||
1231 | } else | ||
1232 | efx_sriov_desc_fetch_err(efx, ev_sub_data); | ||
1233 | break; | ||
1234 | default: | ||
1235 | netif_vdbg(efx, hw, efx->net_dev, | ||
1236 | "channel %d unknown driver event code %d " | ||
1237 | "data %04x\n", channel->channel, ev_sub_code, | ||
1238 | ev_sub_data); | ||
1239 | break; | ||
1240 | } | ||
1241 | } | ||
1242 | |||
1243 | int efx_farch_ev_process(struct efx_channel *channel, int budget) | ||
1244 | { | ||
1245 | struct efx_nic *efx = channel->efx; | ||
1246 | unsigned int read_ptr; | ||
1247 | efx_qword_t event, *p_event; | ||
1248 | int ev_code; | ||
1249 | int tx_packets = 0; | ||
1250 | int spent = 0; | ||
1251 | |||
1252 | read_ptr = channel->eventq_read_ptr; | ||
1253 | |||
1254 | for (;;) { | ||
1255 | p_event = efx_event(channel, read_ptr); | ||
1256 | event = *p_event; | ||
1257 | |||
1258 | if (!efx_event_present(&event)) | ||
1259 | /* End of events */ | ||
1260 | break; | ||
1261 | |||
1262 | netif_vdbg(channel->efx, intr, channel->efx->net_dev, | ||
1263 | "channel %d event is "EFX_QWORD_FMT"\n", | ||
1264 | channel->channel, EFX_QWORD_VAL(event)); | ||
1265 | |||
1266 | /* Clear this event by marking it all ones */ | ||
1267 | EFX_SET_QWORD(*p_event); | ||
1268 | |||
1269 | ++read_ptr; | ||
1270 | |||
1271 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); | ||
1272 | |||
1273 | switch (ev_code) { | ||
1274 | case FSE_AZ_EV_CODE_RX_EV: | ||
1275 | efx_farch_handle_rx_event(channel, &event); | ||
1276 | if (++spent == budget) | ||
1277 | goto out; | ||
1278 | break; | ||
1279 | case FSE_AZ_EV_CODE_TX_EV: | ||
1280 | tx_packets += efx_farch_handle_tx_event(channel, | ||
1281 | &event); | ||
1282 | if (tx_packets > efx->txq_entries) { | ||
1283 | spent = budget; | ||
1284 | goto out; | ||
1285 | } | ||
1286 | break; | ||
1287 | case FSE_AZ_EV_CODE_DRV_GEN_EV: | ||
1288 | efx_farch_handle_generated_event(channel, &event); | ||
1289 | break; | ||
1290 | case FSE_AZ_EV_CODE_DRIVER_EV: | ||
1291 | efx_farch_handle_driver_event(channel, &event); | ||
1292 | break; | ||
1293 | case FSE_CZ_EV_CODE_USER_EV: | ||
1294 | efx_sriov_event(channel, &event); | ||
1295 | break; | ||
1296 | case FSE_CZ_EV_CODE_MCDI_EV: | ||
1297 | efx_mcdi_process_event(channel, &event); | ||
1298 | break; | ||
1299 | case FSE_AZ_EV_CODE_GLOBAL_EV: | ||
1300 | if (efx->type->handle_global_event && | ||
1301 | efx->type->handle_global_event(channel, &event)) | ||
1302 | break; | ||
1303 | /* else fall through */ | ||
1304 | default: | ||
1305 | netif_err(channel->efx, hw, channel->efx->net_dev, | ||
1306 | "channel %d unknown event type %d (data " | ||
1307 | EFX_QWORD_FMT ")\n", channel->channel, | ||
1308 | ev_code, EFX_QWORD_VAL(event)); | ||
1309 | } | ||
1310 | } | ||
1311 | |||
1312 | out: | ||
1313 | channel->eventq_read_ptr = read_ptr; | ||
1314 | return spent; | ||
1315 | } | ||
1316 | |||
1317 | /* Allocate buffer table entries for event queue */ | ||
1318 | int efx_farch_ev_probe(struct efx_channel *channel) | ||
1319 | { | ||
1320 | struct efx_nic *efx = channel->efx; | ||
1321 | unsigned entries; | ||
1322 | |||
1323 | entries = channel->eventq_mask + 1; | ||
1324 | return efx_alloc_special_buffer(efx, &channel->eventq, | ||
1325 | entries * sizeof(efx_qword_t)); | ||
1326 | } | ||
1327 | |||
1328 | void efx_farch_ev_init(struct efx_channel *channel) | ||
1329 | { | ||
1330 | efx_oword_t reg; | ||
1331 | struct efx_nic *efx = channel->efx; | ||
1332 | |||
1333 | netif_dbg(efx, hw, efx->net_dev, | ||
1334 | "channel %d event queue in special buffers %d-%d\n", | ||
1335 | channel->channel, channel->eventq.index, | ||
1336 | channel->eventq.index + channel->eventq.entries - 1); | ||
1337 | |||
1338 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { | ||
1339 | EFX_POPULATE_OWORD_3(reg, | ||
1340 | FRF_CZ_TIMER_Q_EN, 1, | ||
1341 | FRF_CZ_HOST_NOTIFY_MODE, 0, | ||
1342 | FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); | ||
1343 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); | ||
1344 | } | ||
1345 | |||
1346 | /* Pin event queue buffer */ | ||
1347 | efx_init_special_buffer(efx, &channel->eventq); | ||
1348 | |||
1349 | /* Fill event queue with all ones (i.e. empty events) */ | ||
1350 | memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); | ||
1351 | |||
1352 | /* Push event queue to card */ | ||
1353 | EFX_POPULATE_OWORD_3(reg, | ||
1354 | FRF_AZ_EVQ_EN, 1, | ||
1355 | FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), | ||
1356 | FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); | ||
1357 | efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, | ||
1358 | channel->channel); | ||
1359 | |||
1360 | efx->type->push_irq_moderation(channel); | ||
1361 | } | ||
1362 | |||
1363 | void efx_farch_ev_fini(struct efx_channel *channel) | ||
1364 | { | ||
1365 | efx_oword_t reg; | ||
1366 | struct efx_nic *efx = channel->efx; | ||
1367 | |||
1368 | /* Remove event queue from card */ | ||
1369 | EFX_ZERO_OWORD(reg); | ||
1370 | efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, | ||
1371 | channel->channel); | ||
1372 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) | ||
1373 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); | ||
1374 | |||
1375 | /* Unpin event queue */ | ||
1376 | efx_fini_special_buffer(efx, &channel->eventq); | ||
1377 | } | ||
1378 | |||
1379 | /* Free buffers backing event queue */ | ||
1380 | void efx_farch_ev_remove(struct efx_channel *channel) | ||
1381 | { | ||
1382 | efx_free_special_buffer(channel->efx, &channel->eventq); | ||
1383 | } | ||
1384 | |||
1385 | |||
1386 | void efx_farch_ev_test_generate(struct efx_channel *channel) | ||
1387 | { | ||
1388 | efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); | ||
1389 | } | ||
1390 | |||
1391 | void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue) | ||
1392 | { | ||
1393 | efx_farch_magic_event(efx_rx_queue_channel(rx_queue), | ||
1394 | EFX_CHANNEL_MAGIC_FILL(rx_queue)); | ||
1395 | } | ||
1396 | |||
1397 | /************************************************************************** | ||
1398 | * | ||
1399 | * Hardware interrupts | ||
1400 | * The hardware interrupt handler does very little work; all the event | ||
1401 | * queue processing is carried out by per-channel tasklets. | ||
1402 | * | ||
1403 | **************************************************************************/ | ||
1404 | |||
1405 | /* Enable/disable/generate interrupts */ | ||
1406 | static inline void efx_farch_interrupts(struct efx_nic *efx, | ||
1407 | bool enabled, bool force) | ||
1408 | { | ||
1409 | efx_oword_t int_en_reg_ker; | ||
1410 | |||
1411 | EFX_POPULATE_OWORD_3(int_en_reg_ker, | ||
1412 | FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, | ||
1413 | FRF_AZ_KER_INT_KER, force, | ||
1414 | FRF_AZ_DRV_INT_EN_KER, enabled); | ||
1415 | efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); | ||
1416 | } | ||
1417 | |||
1418 | void efx_farch_irq_enable_master(struct efx_nic *efx) | ||
1419 | { | ||
1420 | EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); | ||
1421 | wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ | ||
1422 | |||
1423 | efx_farch_interrupts(efx, true, false); | ||
1424 | } | ||
1425 | |||
1426 | void efx_farch_irq_disable_master(struct efx_nic *efx) | ||
1427 | { | ||
1428 | /* Disable interrupts */ | ||
1429 | efx_farch_interrupts(efx, false, false); | ||
1430 | } | ||
1431 | |||
1432 | /* Generate a test interrupt | ||
1433 | * Interrupt must already have been enabled, otherwise nasty things | ||
1434 | * may happen. | ||
1435 | */ | ||
1436 | void efx_farch_irq_test_generate(struct efx_nic *efx) | ||
1437 | { | ||
1438 | efx_farch_interrupts(efx, true, true); | ||
1439 | } | ||
1440 | |||
1441 | /* Process a fatal interrupt | ||
1442 | * Disable bus mastering ASAP and schedule a reset | ||
1443 | */ | ||
1444 | irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) | ||
1445 | { | ||
1446 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
1447 | efx_oword_t *int_ker = efx->irq_status.addr; | ||
1448 | efx_oword_t fatal_intr; | ||
1449 | int error, mem_perr; | ||
1450 | |||
1451 | efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); | ||
1452 | error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); | ||
1453 | |||
1454 | netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " | ||
1455 | EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), | ||
1456 | EFX_OWORD_VAL(fatal_intr), | ||
1457 | error ? "disabling bus mastering" : "no recognised error"); | ||
1458 | |||
1459 | /* If this is a memory parity error dump which blocks are offending */ | ||
1460 | mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || | ||
1461 | EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); | ||
1462 | if (mem_perr) { | ||
1463 | efx_oword_t reg; | ||
1464 | efx_reado(efx, ®, FR_AZ_MEM_STAT); | ||
1465 | netif_err(efx, hw, efx->net_dev, | ||
1466 | "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", | ||
1467 | EFX_OWORD_VAL(reg)); | ||
1468 | } | ||
1469 | |||
1470 | /* Disable both devices */ | ||
1471 | pci_clear_master(efx->pci_dev); | ||
1472 | if (efx_nic_is_dual_func(efx)) | ||
1473 | pci_clear_master(nic_data->pci_dev2); | ||
1474 | efx_farch_irq_disable_master(efx); | ||
1475 | |||
1476 | /* Count errors and reset or disable the NIC accordingly */ | ||
1477 | if (efx->int_error_count == 0 || | ||
1478 | time_after(jiffies, efx->int_error_expire)) { | ||
1479 | efx->int_error_count = 0; | ||
1480 | efx->int_error_expire = | ||
1481 | jiffies + EFX_INT_ERROR_EXPIRE * HZ; | ||
1482 | } | ||
1483 | if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { | ||
1484 | netif_err(efx, hw, efx->net_dev, | ||
1485 | "SYSTEM ERROR - reset scheduled\n"); | ||
1486 | efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); | ||
1487 | } else { | ||
1488 | netif_err(efx, hw, efx->net_dev, | ||
1489 | "SYSTEM ERROR - max number of errors seen." | ||
1490 | "NIC will be disabled\n"); | ||
1491 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | ||
1492 | } | ||
1493 | |||
1494 | return IRQ_HANDLED; | ||
1495 | } | ||
1496 | |||
1497 | /* Handle a legacy interrupt | ||
1498 | * Acknowledges the interrupt and schedule event queue processing. | ||
1499 | */ | ||
1500 | irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) | ||
1501 | { | ||
1502 | struct efx_nic *efx = dev_id; | ||
1503 | bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); | ||
1504 | efx_oword_t *int_ker = efx->irq_status.addr; | ||
1505 | irqreturn_t result = IRQ_NONE; | ||
1506 | struct efx_channel *channel; | ||
1507 | efx_dword_t reg; | ||
1508 | u32 queues; | ||
1509 | int syserr; | ||
1510 | |||
1511 | /* Read the ISR which also ACKs the interrupts */ | ||
1512 | efx_readd(efx, ®, FR_BZ_INT_ISR0); | ||
1513 | queues = EFX_EXTRACT_DWORD(reg, 0, 31); | ||
1514 | |||
1515 | /* Legacy interrupts are disabled too late by the EEH kernel | ||
1516 | * code. Disable them earlier. | ||
1517 | * If an EEH error occurred, the read will have returned all ones. | ||
1518 | */ | ||
1519 | if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && | ||
1520 | !efx->eeh_disabled_legacy_irq) { | ||
1521 | disable_irq_nosync(efx->legacy_irq); | ||
1522 | efx->eeh_disabled_legacy_irq = true; | ||
1523 | } | ||
1524 | |||
1525 | /* Handle non-event-queue sources */ | ||
1526 | if (queues & (1U << efx->irq_level) && soft_enabled) { | ||
1527 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | ||
1528 | if (unlikely(syserr)) | ||
1529 | return efx_farch_fatal_interrupt(efx); | ||
1530 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
1531 | } | ||
1532 | |||
1533 | if (queues != 0) { | ||
1534 | if (EFX_WORKAROUND_15783(efx)) | ||
1535 | efx->irq_zero_count = 0; | ||
1536 | |||
1537 | /* Schedule processing of any interrupting queues */ | ||
1538 | if (likely(soft_enabled)) { | ||
1539 | efx_for_each_channel(channel, efx) { | ||
1540 | if (queues & 1) | ||
1541 | efx_schedule_channel_irq(channel); | ||
1542 | queues >>= 1; | ||
1543 | } | ||
1544 | } | ||
1545 | result = IRQ_HANDLED; | ||
1546 | |||
1547 | } else if (EFX_WORKAROUND_15783(efx)) { | ||
1548 | efx_qword_t *event; | ||
1549 | |||
1550 | /* We can't return IRQ_HANDLED more than once on seeing ISR=0 | ||
1551 | * because this might be a shared interrupt. */ | ||
1552 | if (efx->irq_zero_count++ == 0) | ||
1553 | result = IRQ_HANDLED; | ||
1554 | |||
1555 | /* Ensure we schedule or rearm all event queues */ | ||
1556 | if (likely(soft_enabled)) { | ||
1557 | efx_for_each_channel(channel, efx) { | ||
1558 | event = efx_event(channel, | ||
1559 | channel->eventq_read_ptr); | ||
1560 | if (efx_event_present(event)) | ||
1561 | efx_schedule_channel_irq(channel); | ||
1562 | else | ||
1563 | efx_farch_ev_read_ack(channel); | ||
1564 | } | ||
1565 | } | ||
1566 | } | ||
1567 | |||
1568 | if (result == IRQ_HANDLED) | ||
1569 | netif_vdbg(efx, intr, efx->net_dev, | ||
1570 | "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", | ||
1571 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | ||
1572 | |||
1573 | return result; | ||
1574 | } | ||
1575 | |||
1576 | /* Handle an MSI interrupt | ||
1577 | * | ||
1578 | * Handle an MSI hardware interrupt. This routine schedules event | ||
1579 | * queue processing. No interrupt acknowledgement cycle is necessary. | ||
1580 | * Also, we never need to check that the interrupt is for us, since | ||
1581 | * MSI interrupts cannot be shared. | ||
1582 | */ | ||
1583 | irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id) | ||
1584 | { | ||
1585 | struct efx_msi_context *context = dev_id; | ||
1586 | struct efx_nic *efx = context->efx; | ||
1587 | efx_oword_t *int_ker = efx->irq_status.addr; | ||
1588 | int syserr; | ||
1589 | |||
1590 | netif_vdbg(efx, intr, efx->net_dev, | ||
1591 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", | ||
1592 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | ||
1593 | |||
1594 | if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) | ||
1595 | return IRQ_HANDLED; | ||
1596 | |||
1597 | /* Handle non-event-queue sources */ | ||
1598 | if (context->index == efx->irq_level) { | ||
1599 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | ||
1600 | if (unlikely(syserr)) | ||
1601 | return efx_farch_fatal_interrupt(efx); | ||
1602 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
1603 | } | ||
1604 | |||
1605 | /* Schedule processing of the channel */ | ||
1606 | efx_schedule_channel_irq(efx->channel[context->index]); | ||
1607 | |||
1608 | return IRQ_HANDLED; | ||
1609 | } | ||
1610 | |||
1611 | |||
1612 | /* Setup RSS indirection table. | ||
1613 | * This maps from the hash value of the packet to RXQ | ||
1614 | */ | ||
1615 | void efx_farch_rx_push_indir_table(struct efx_nic *efx) | ||
1616 | { | ||
1617 | size_t i = 0; | ||
1618 | efx_dword_t dword; | ||
1619 | |||
1620 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) | ||
1621 | return; | ||
1622 | |||
1623 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != | ||
1624 | FR_BZ_RX_INDIRECTION_TBL_ROWS); | ||
1625 | |||
1626 | for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { | ||
1627 | EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, | ||
1628 | efx->rx_indir_table[i]); | ||
1629 | efx_writed(efx, &dword, | ||
1630 | FR_BZ_RX_INDIRECTION_TBL + | ||
1631 | FR_BZ_RX_INDIRECTION_TBL_STEP * i); | ||
1632 | } | ||
1633 | } | ||
1634 | |||
1635 | /* Looks at available SRAM resources and works out how many queues we | ||
1636 | * can support, and where things like descriptor caches should live. | ||
1637 | * | ||
1638 | * SRAM is split up as follows: | ||
1639 | * 0 buftbl entries for channels | ||
1640 | * efx->vf_buftbl_base buftbl entries for SR-IOV | ||
1641 | * efx->rx_dc_base RX descriptor caches | ||
1642 | * efx->tx_dc_base TX descriptor caches | ||
1643 | */ | ||
1644 | void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) | ||
1645 | { | ||
1646 | unsigned vi_count, buftbl_min; | ||
1647 | |||
1648 | /* Account for the buffer table entries backing the datapath channels | ||
1649 | * and the descriptor caches for those channels. | ||
1650 | */ | ||
1651 | buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + | ||
1652 | efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + | ||
1653 | efx->n_channels * EFX_MAX_EVQ_SIZE) | ||
1654 | * sizeof(efx_qword_t) / EFX_BUF_SIZE); | ||
1655 | vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); | ||
1656 | |||
1657 | #ifdef CONFIG_SFC_SRIOV | ||
1658 | if (efx_sriov_wanted(efx)) { | ||
1659 | unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; | ||
1660 | |||
1661 | efx->vf_buftbl_base = buftbl_min; | ||
1662 | |||
1663 | vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; | ||
1664 | vi_count = max(vi_count, EFX_VI_BASE); | ||
1665 | buftbl_free = (sram_lim_qw - buftbl_min - | ||
1666 | vi_count * vi_dc_entries); | ||
1667 | |||
1668 | entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) * | ||
1669 | efx_vf_size(efx)); | ||
1670 | vf_limit = min(buftbl_free / entries_per_vf, | ||
1671 | (1024U - EFX_VI_BASE) >> efx->vi_scale); | ||
1672 | |||
1673 | if (efx->vf_count > vf_limit) { | ||
1674 | netif_err(efx, probe, efx->net_dev, | ||
1675 | "Reducing VF count from from %d to %d\n", | ||
1676 | efx->vf_count, vf_limit); | ||
1677 | efx->vf_count = vf_limit; | ||
1678 | } | ||
1679 | vi_count += efx->vf_count * efx_vf_size(efx); | ||
1680 | } | ||
1681 | #endif | ||
1682 | |||
1683 | efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; | ||
1684 | efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; | ||
1685 | } | ||
1686 | |||
1687 | u32 efx_farch_fpga_ver(struct efx_nic *efx) | ||
1688 | { | ||
1689 | efx_oword_t altera_build; | ||
1690 | efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); | ||
1691 | return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); | ||
1692 | } | ||
1693 | |||
1694 | void efx_farch_init_common(struct efx_nic *efx) | ||
1695 | { | ||
1696 | efx_oword_t temp; | ||
1697 | |||
1698 | /* Set positions of descriptor caches in SRAM. */ | ||
1699 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); | ||
1700 | efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); | ||
1701 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); | ||
1702 | efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); | ||
1703 | |||
1704 | /* Set TX descriptor cache size. */ | ||
1705 | BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); | ||
1706 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); | ||
1707 | efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); | ||
1708 | |||
1709 | /* Set RX descriptor cache size. Set low watermark to size-8, as | ||
1710 | * this allows most efficient prefetching. | ||
1711 | */ | ||
1712 | BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); | ||
1713 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); | ||
1714 | efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); | ||
1715 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); | ||
1716 | efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); | ||
1717 | |||
1718 | /* Program INT_KER address */ | ||
1719 | EFX_POPULATE_OWORD_2(temp, | ||
1720 | FRF_AZ_NORM_INT_VEC_DIS_KER, | ||
1721 | EFX_INT_MODE_USE_MSI(efx), | ||
1722 | FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); | ||
1723 | efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); | ||
1724 | |||
1725 | if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) | ||
1726 | /* Use an interrupt level unused by event queues */ | ||
1727 | efx->irq_level = 0x1f; | ||
1728 | else | ||
1729 | /* Use a valid MSI-X vector */ | ||
1730 | efx->irq_level = 0; | ||
1731 | |||
1732 | /* Enable all the genuinely fatal interrupts. (They are still | ||
1733 | * masked by the overall interrupt mask, controlled by | ||
1734 | * falcon_interrupts()). | ||
1735 | * | ||
1736 | * Note: All other fatal interrupts are enabled | ||
1737 | */ | ||
1738 | EFX_POPULATE_OWORD_3(temp, | ||
1739 | FRF_AZ_ILL_ADR_INT_KER_EN, 1, | ||
1740 | FRF_AZ_RBUF_OWN_INT_KER_EN, 1, | ||
1741 | FRF_AZ_TBUF_OWN_INT_KER_EN, 1); | ||
1742 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) | ||
1743 | EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); | ||
1744 | EFX_INVERT_OWORD(temp); | ||
1745 | efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); | ||
1746 | |||
1747 | efx_farch_rx_push_indir_table(efx); | ||
1748 | |||
1749 | /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be | ||
1750 | * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. | ||
1751 | */ | ||
1752 | efx_reado(efx, &temp, FR_AZ_TX_RESERVED); | ||
1753 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); | ||
1754 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); | ||
1755 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); | ||
1756 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); | ||
1757 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); | ||
1758 | /* Enable SW_EV to inherit in char driver - assume harmless here */ | ||
1759 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); | ||
1760 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ | ||
1761 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); | ||
1762 | /* Disable hardware watchdog which can misfire */ | ||
1763 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); | ||
1764 | /* Squash TX of packets of 16 bytes or less */ | ||
1765 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | ||
1766 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); | ||
1767 | efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); | ||
1768 | |||
1769 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | ||
1770 | EFX_POPULATE_OWORD_4(temp, | ||
1771 | /* Default values */ | ||
1772 | FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, | ||
1773 | FRF_BZ_TX_PACE_SB_AF, 0xb, | ||
1774 | FRF_BZ_TX_PACE_FB_BASE, 0, | ||
1775 | /* Allow large pace values in the | ||
1776 | * fast bin. */ | ||
1777 | FRF_BZ_TX_PACE_BIN_TH, | ||
1778 | FFE_BZ_TX_PACE_RESERVED); | ||
1779 | efx_writeo(efx, &temp, FR_BZ_TX_PACE); | ||
1780 | } | ||
1781 | } | ||
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index b382895901b1..7283cc1a90fe 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h | |||
@@ -971,7 +971,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) | |||
971 | * @get_wol: Get WoL configuration from driver state | 971 | * @get_wol: Get WoL configuration from driver state |
972 | * @set_wol: Push WoL configuration to the NIC | 972 | * @set_wol: Push WoL configuration to the NIC |
973 | * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) | 973 | * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) |
974 | * @test_chip: Test registers. Should use efx_nic_test_registers(), and is | 974 | * @test_chip: Test registers. May use efx_farch_test_registers(), and is |
975 | * expected to reset the NIC. | 975 | * expected to reset the NIC. |
976 | * @test_nvram: Test validity of NVRAM contents | 976 | * @test_nvram: Test validity of NVRAM contents |
977 | * @mcdi_request: Send an MCDI request with the given header and SDU. | 977 | * @mcdi_request: Send an MCDI request with the given header and SDU. |
@@ -985,6 +985,32 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) | |||
985 | * @mcdi_poll_reboot: Test whether the MCDI has rebooted. If so, | 985 | * @mcdi_poll_reboot: Test whether the MCDI has rebooted. If so, |
986 | * return an appropriate error code for aborting any current | 986 | * return an appropriate error code for aborting any current |
987 | * request; otherwise return 0. | 987 | * request; otherwise return 0. |
988 | * @irq_enable_master: Enable IRQs on the NIC. Each event queue must | ||
989 | * be separately enabled after this. | ||
990 | * @irq_test_generate: Generate a test IRQ | ||
991 | * @irq_disable_non_ev: Disable non-event IRQs on the NIC. Each event | ||
992 | * queue must be separately disabled before this. | ||
993 | * @irq_handle_msi: Handle MSI for a channel. The @dev_id argument is | ||
994 | * a pointer to the &struct efx_msi_context for the channel. | ||
995 | * @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument | ||
996 | * is a pointer to the &struct efx_nic. | ||
997 | * @tx_probe: Allocate resources for TX queue | ||
998 | * @tx_init: Initialise TX queue on the NIC | ||
999 | * @tx_remove: Free resources for TX queue | ||
1000 | * @tx_write: Write TX descriptors and doorbell | ||
1001 | * @rx_push_indir_table: Write RSS indirection table to the NIC | ||
1002 | * @rx_probe: Allocate resources for RX queue | ||
1003 | * @rx_init: Initialise RX queue on the NIC | ||
1004 | * @rx_remove: Free resources for RX queue | ||
1005 | * @rx_write: Write RX descriptors and doorbell | ||
1006 | * @rx_defer_refill: Generate a refill reminder event | ||
1007 | * @ev_probe: Allocate resources for event queue | ||
1008 | * @ev_init: Initialise event queue on the NIC | ||
1009 | * @ev_fini: Deinitialise event queue on the NIC | ||
1010 | * @ev_remove: Free resources for event queue | ||
1011 | * @ev_process: Process events for a queue, up to the given NAPI quota | ||
1012 | * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ | ||
1013 | * @ev_test_generate: Generate a test event | ||
988 | * @revision: Hardware architecture revision | 1014 | * @revision: Hardware architecture revision |
989 | * @mem_map_size: Memory BAR mapped size | 1015 | * @mem_map_size: Memory BAR mapped size |
990 | * @txd_ptr_tbl_base: TX descriptor ring base address | 1016 | * @txd_ptr_tbl_base: TX descriptor ring base address |
@@ -1041,6 +1067,28 @@ struct efx_nic_type { | |||
1041 | void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu, | 1067 | void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu, |
1042 | size_t pdu_offset, size_t pdu_len); | 1068 | size_t pdu_offset, size_t pdu_len); |
1043 | int (*mcdi_poll_reboot)(struct efx_nic *efx); | 1069 | int (*mcdi_poll_reboot)(struct efx_nic *efx); |
1070 | void (*irq_enable_master)(struct efx_nic *efx); | ||
1071 | void (*irq_test_generate)(struct efx_nic *efx); | ||
1072 | void (*irq_disable_non_ev)(struct efx_nic *efx); | ||
1073 | irqreturn_t (*irq_handle_msi)(int irq, void *dev_id); | ||
1074 | irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id); | ||
1075 | int (*tx_probe)(struct efx_tx_queue *tx_queue); | ||
1076 | void (*tx_init)(struct efx_tx_queue *tx_queue); | ||
1077 | void (*tx_remove)(struct efx_tx_queue *tx_queue); | ||
1078 | void (*tx_write)(struct efx_tx_queue *tx_queue); | ||
1079 | void (*rx_push_indir_table)(struct efx_nic *efx); | ||
1080 | int (*rx_probe)(struct efx_rx_queue *rx_queue); | ||
1081 | void (*rx_init)(struct efx_rx_queue *rx_queue); | ||
1082 | void (*rx_remove)(struct efx_rx_queue *rx_queue); | ||
1083 | void (*rx_write)(struct efx_rx_queue *rx_queue); | ||
1084 | void (*rx_defer_refill)(struct efx_rx_queue *rx_queue); | ||
1085 | int (*ev_probe)(struct efx_channel *channel); | ||
1086 | void (*ev_init)(struct efx_channel *channel); | ||
1087 | void (*ev_fini)(struct efx_channel *channel); | ||
1088 | void (*ev_remove)(struct efx_channel *channel); | ||
1089 | int (*ev_process)(struct efx_channel *channel, int quota); | ||
1090 | void (*ev_read_ack)(struct efx_channel *channel); | ||
1091 | void (*ev_test_generate)(struct efx_channel *channel); | ||
1044 | 1092 | ||
1045 | int revision; | 1093 | int revision; |
1046 | unsigned int mem_map_size; | 1094 | unsigned int mem_map_size; |
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 7c52691e9d26..66c71ede3a98 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c | |||
@@ -25,273 +25,6 @@ | |||
25 | 25 | ||
26 | /************************************************************************** | 26 | /************************************************************************** |
27 | * | 27 | * |
28 | * Configurable values | ||
29 | * | ||
30 | ************************************************************************** | ||
31 | */ | ||
32 | |||
33 | /* This is set to 16 for a good reason. In summary, if larger than | ||
34 | * 16, the descriptor cache holds more than a default socket | ||
35 | * buffer's worth of packets (for UDP we can only have at most one | ||
36 | * socket buffer's worth outstanding). This combined with the fact | ||
37 | * that we only get 1 TX event per descriptor cache means the NIC | ||
38 | * goes idle. | ||
39 | */ | ||
40 | #define TX_DC_ENTRIES 16 | ||
41 | #define TX_DC_ENTRIES_ORDER 1 | ||
42 | |||
43 | #define RX_DC_ENTRIES 64 | ||
44 | #define RX_DC_ENTRIES_ORDER 3 | ||
45 | |||
46 | /* If EFX_MAX_INT_ERRORS internal errors occur within | ||
47 | * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and | ||
48 | * disable it. | ||
49 | */ | ||
50 | #define EFX_INT_ERROR_EXPIRE 3600 | ||
51 | #define EFX_MAX_INT_ERRORS 5 | ||
52 | |||
53 | /* Depth of RX flush request fifo */ | ||
54 | #define EFX_RX_FLUSH_COUNT 4 | ||
55 | |||
56 | /* Driver generated events */ | ||
57 | #define _EFX_CHANNEL_MAGIC_TEST 0x000101 | ||
58 | #define _EFX_CHANNEL_MAGIC_FILL 0x000102 | ||
59 | #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 | ||
60 | #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 | ||
61 | |||
62 | #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) | ||
63 | #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) | ||
64 | |||
65 | #define EFX_CHANNEL_MAGIC_TEST(_channel) \ | ||
66 | _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) | ||
67 | #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ | ||
68 | _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ | ||
69 | efx_rx_queue_index(_rx_queue)) | ||
70 | #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ | ||
71 | _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ | ||
72 | efx_rx_queue_index(_rx_queue)) | ||
73 | #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ | ||
74 | _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ | ||
75 | (_tx_queue)->queue) | ||
76 | |||
77 | static void efx_magic_event(struct efx_channel *channel, u32 magic); | ||
78 | |||
79 | /************************************************************************** | ||
80 | * | ||
81 | * Solarstorm hardware access | ||
82 | * | ||
83 | **************************************************************************/ | ||
84 | |||
85 | static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, | ||
86 | unsigned int index) | ||
87 | { | ||
88 | efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, | ||
89 | value, index); | ||
90 | } | ||
91 | |||
92 | /* Read the current event from the event queue */ | ||
93 | static inline efx_qword_t *efx_event(struct efx_channel *channel, | ||
94 | unsigned int index) | ||
95 | { | ||
96 | return ((efx_qword_t *) (channel->eventq.buf.addr)) + | ||
97 | (index & channel->eventq_mask); | ||
98 | } | ||
99 | |||
100 | /* See if an event is present | ||
101 | * | ||
102 | * We check both the high and low dword of the event for all ones. We | ||
103 | * wrote all ones when we cleared the event, and no valid event can | ||
104 | * have all ones in either its high or low dwords. This approach is | ||
105 | * robust against reordering. | ||
106 | * | ||
107 | * Note that using a single 64-bit comparison is incorrect; even | ||
108 | * though the CPU read will be atomic, the DMA write may not be. | ||
109 | */ | ||
110 | static inline int efx_event_present(efx_qword_t *event) | ||
111 | { | ||
112 | return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | | ||
113 | EFX_DWORD_IS_ALL_ONES(event->dword[1])); | ||
114 | } | ||
115 | |||
116 | static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, | ||
117 | const efx_oword_t *mask) | ||
118 | { | ||
119 | return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || | ||
120 | ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); | ||
121 | } | ||
122 | |||
123 | int efx_nic_test_registers(struct efx_nic *efx, | ||
124 | const struct efx_nic_register_test *regs, | ||
125 | size_t n_regs) | ||
126 | { | ||
127 | unsigned address = 0, i, j; | ||
128 | efx_oword_t mask, imask, original, reg, buf; | ||
129 | |||
130 | for (i = 0; i < n_regs; ++i) { | ||
131 | address = regs[i].address; | ||
132 | mask = imask = regs[i].mask; | ||
133 | EFX_INVERT_OWORD(imask); | ||
134 | |||
135 | efx_reado(efx, &original, address); | ||
136 | |||
137 | /* bit sweep on and off */ | ||
138 | for (j = 0; j < 128; j++) { | ||
139 | if (!EFX_EXTRACT_OWORD32(mask, j, j)) | ||
140 | continue; | ||
141 | |||
142 | /* Test this testable bit can be set in isolation */ | ||
143 | EFX_AND_OWORD(reg, original, mask); | ||
144 | EFX_SET_OWORD32(reg, j, j, 1); | ||
145 | |||
146 | efx_writeo(efx, ®, address); | ||
147 | efx_reado(efx, &buf, address); | ||
148 | |||
149 | if (efx_masked_compare_oword(®, &buf, &mask)) | ||
150 | goto fail; | ||
151 | |||
152 | /* Test this testable bit can be cleared in isolation */ | ||
153 | EFX_OR_OWORD(reg, original, mask); | ||
154 | EFX_SET_OWORD32(reg, j, j, 0); | ||
155 | |||
156 | efx_writeo(efx, ®, address); | ||
157 | efx_reado(efx, &buf, address); | ||
158 | |||
159 | if (efx_masked_compare_oword(®, &buf, &mask)) | ||
160 | goto fail; | ||
161 | } | ||
162 | |||
163 | efx_writeo(efx, &original, address); | ||
164 | } | ||
165 | |||
166 | return 0; | ||
167 | |||
168 | fail: | ||
169 | netif_err(efx, hw, efx->net_dev, | ||
170 | "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT | ||
171 | " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), | ||
172 | EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); | ||
173 | return -EIO; | ||
174 | } | ||
175 | |||
176 | /************************************************************************** | ||
177 | * | ||
178 | * Special buffer handling | ||
179 | * Special buffers are used for event queues and the TX and RX | ||
180 | * descriptor rings. | ||
181 | * | ||
182 | *************************************************************************/ | ||
183 | |||
184 | /* | ||
185 | * Initialise a special buffer | ||
186 | * | ||
187 | * This will define a buffer (previously allocated via | ||
188 | * efx_alloc_special_buffer()) in the buffer table, allowing | ||
189 | * it to be used for event queues, descriptor rings etc. | ||
190 | */ | ||
191 | static void | ||
192 | efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | ||
193 | { | ||
194 | efx_qword_t buf_desc; | ||
195 | unsigned int index; | ||
196 | dma_addr_t dma_addr; | ||
197 | int i; | ||
198 | |||
199 | EFX_BUG_ON_PARANOID(!buffer->buf.addr); | ||
200 | |||
201 | /* Write buffer descriptors to NIC */ | ||
202 | for (i = 0; i < buffer->entries; i++) { | ||
203 | index = buffer->index + i; | ||
204 | dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE); | ||
205 | netif_dbg(efx, probe, efx->net_dev, | ||
206 | "mapping special buffer %d at %llx\n", | ||
207 | index, (unsigned long long)dma_addr); | ||
208 | EFX_POPULATE_QWORD_3(buf_desc, | ||
209 | FRF_AZ_BUF_ADR_REGION, 0, | ||
210 | FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, | ||
211 | FRF_AZ_BUF_OWNER_ID_FBUF, 0); | ||
212 | efx_write_buf_tbl(efx, &buf_desc, index); | ||
213 | } | ||
214 | } | ||
215 | |||
216 | /* Unmaps a buffer and clears the buffer table entries */ | ||
217 | static void | ||
218 | efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | ||
219 | { | ||
220 | efx_oword_t buf_tbl_upd; | ||
221 | unsigned int start = buffer->index; | ||
222 | unsigned int end = (buffer->index + buffer->entries - 1); | ||
223 | |||
224 | if (!buffer->entries) | ||
225 | return; | ||
226 | |||
227 | netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", | ||
228 | buffer->index, buffer->index + buffer->entries - 1); | ||
229 | |||
230 | EFX_POPULATE_OWORD_4(buf_tbl_upd, | ||
231 | FRF_AZ_BUF_UPD_CMD, 0, | ||
232 | FRF_AZ_BUF_CLR_CMD, 1, | ||
233 | FRF_AZ_BUF_CLR_END_ID, end, | ||
234 | FRF_AZ_BUF_CLR_START_ID, start); | ||
235 | efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); | ||
236 | } | ||
237 | |||
238 | /* | ||
239 | * Allocate a new special buffer | ||
240 | * | ||
241 | * This allocates memory for a new buffer, clears it and allocates a | ||
242 | * new buffer ID range. It does not write into the buffer table. | ||
243 | * | ||
244 | * This call will allocate 4KB buffers, since 8KB buffers can't be | ||
245 | * used for event queues and descriptor rings. | ||
246 | */ | ||
247 | static int efx_alloc_special_buffer(struct efx_nic *efx, | ||
248 | struct efx_special_buffer *buffer, | ||
249 | unsigned int len) | ||
250 | { | ||
251 | len = ALIGN(len, EFX_BUF_SIZE); | ||
252 | |||
253 | if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) | ||
254 | return -ENOMEM; | ||
255 | buffer->entries = len / EFX_BUF_SIZE; | ||
256 | BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1)); | ||
257 | |||
258 | /* Select new buffer ID */ | ||
259 | buffer->index = efx->next_buffer_table; | ||
260 | efx->next_buffer_table += buffer->entries; | ||
261 | #ifdef CONFIG_SFC_SRIOV | ||
262 | BUG_ON(efx_sriov_enabled(efx) && | ||
263 | efx->vf_buftbl_base < efx->next_buffer_table); | ||
264 | #endif | ||
265 | |||
266 | netif_dbg(efx, probe, efx->net_dev, | ||
267 | "allocating special buffers %d-%d at %llx+%x " | ||
268 | "(virt %p phys %llx)\n", buffer->index, | ||
269 | buffer->index + buffer->entries - 1, | ||
270 | (u64)buffer->buf.dma_addr, len, | ||
271 | buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); | ||
272 | |||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static void | ||
277 | efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | ||
278 | { | ||
279 | if (!buffer->buf.addr) | ||
280 | return; | ||
281 | |||
282 | netif_dbg(efx, hw, efx->net_dev, | ||
283 | "deallocating special buffers %d-%d at %llx+%x " | ||
284 | "(virt %p phys %llx)\n", buffer->index, | ||
285 | buffer->index + buffer->entries - 1, | ||
286 | (u64)buffer->buf.dma_addr, buffer->buf.len, | ||
287 | buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); | ||
288 | |||
289 | efx_nic_free_buffer(efx, &buffer->buf); | ||
290 | buffer->entries = 0; | ||
291 | } | ||
292 | |||
293 | /************************************************************************** | ||
294 | * | ||
295 | * Generic buffer handling | 28 | * Generic buffer handling |
296 | * These buffers are used for interrupt status, MAC stats, etc. | 29 | * These buffers are used for interrupt status, MAC stats, etc. |
297 | * | 30 | * |
@@ -318,1079 +51,6 @@ void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) | |||
318 | } | 51 | } |
319 | } | 52 | } |
320 | 53 | ||
321 | /************************************************************************** | ||
322 | * | ||
323 | * TX path | ||
324 | * | ||
325 | **************************************************************************/ | ||
326 | |||
327 | /* Returns a pointer to the specified transmit descriptor in the TX | ||
328 | * descriptor queue belonging to the specified channel. | ||
329 | */ | ||
330 | static inline efx_qword_t * | ||
331 | efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) | ||
332 | { | ||
333 | return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; | ||
334 | } | ||
335 | |||
336 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ | ||
337 | static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) | ||
338 | { | ||
339 | unsigned write_ptr; | ||
340 | efx_dword_t reg; | ||
341 | |||
342 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | ||
343 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); | ||
344 | efx_writed_page(tx_queue->efx, ®, | ||
345 | FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); | ||
346 | } | ||
347 | |||
348 | /* Write pointer and first descriptor for TX descriptor ring */ | ||
349 | static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, | ||
350 | const efx_qword_t *txd) | ||
351 | { | ||
352 | unsigned write_ptr; | ||
353 | efx_oword_t reg; | ||
354 | |||
355 | BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); | ||
356 | BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); | ||
357 | |||
358 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | ||
359 | EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, | ||
360 | FRF_AZ_TX_DESC_WPTR, write_ptr); | ||
361 | reg.qword[0] = *txd; | ||
362 | efx_writeo_page(tx_queue->efx, ®, | ||
363 | FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); | ||
364 | } | ||
365 | |||
366 | static inline bool | ||
367 | efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) | ||
368 | { | ||
369 | unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); | ||
370 | |||
371 | if (empty_read_count == 0) | ||
372 | return false; | ||
373 | |||
374 | tx_queue->empty_read_count = 0; | ||
375 | return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 | ||
376 | && tx_queue->write_count - write_count == 1; | ||
377 | } | ||
378 | |||
379 | /* For each entry inserted into the software descriptor ring, create a | ||
380 | * descriptor in the hardware TX descriptor ring (in host memory), and | ||
381 | * write a doorbell. | ||
382 | */ | ||
383 | void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) | ||
384 | { | ||
385 | |||
386 | struct efx_tx_buffer *buffer; | ||
387 | efx_qword_t *txd; | ||
388 | unsigned write_ptr; | ||
389 | unsigned old_write_count = tx_queue->write_count; | ||
390 | |||
391 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); | ||
392 | |||
393 | do { | ||
394 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | ||
395 | buffer = &tx_queue->buffer[write_ptr]; | ||
396 | txd = efx_tx_desc(tx_queue, write_ptr); | ||
397 | ++tx_queue->write_count; | ||
398 | |||
399 | /* Create TX descriptor ring entry */ | ||
400 | BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); | ||
401 | EFX_POPULATE_QWORD_4(*txd, | ||
402 | FSF_AZ_TX_KER_CONT, | ||
403 | buffer->flags & EFX_TX_BUF_CONT, | ||
404 | FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, | ||
405 | FSF_AZ_TX_KER_BUF_REGION, 0, | ||
406 | FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); | ||
407 | } while (tx_queue->write_count != tx_queue->insert_count); | ||
408 | |||
409 | wmb(); /* Ensure descriptors are written before they are fetched */ | ||
410 | |||
411 | if (efx_may_push_tx_desc(tx_queue, old_write_count)) { | ||
412 | txd = efx_tx_desc(tx_queue, | ||
413 | old_write_count & tx_queue->ptr_mask); | ||
414 | efx_push_tx_desc(tx_queue, txd); | ||
415 | ++tx_queue->pushes; | ||
416 | } else { | ||
417 | efx_notify_tx_desc(tx_queue); | ||
418 | } | ||
419 | } | ||
420 | |||
421 | /* Allocate hardware resources for a TX queue */ | ||
422 | int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) | ||
423 | { | ||
424 | struct efx_nic *efx = tx_queue->efx; | ||
425 | unsigned entries; | ||
426 | |||
427 | entries = tx_queue->ptr_mask + 1; | ||
428 | return efx_alloc_special_buffer(efx, &tx_queue->txd, | ||
429 | entries * sizeof(efx_qword_t)); | ||
430 | } | ||
431 | |||
432 | void efx_nic_init_tx(struct efx_tx_queue *tx_queue) | ||
433 | { | ||
434 | struct efx_nic *efx = tx_queue->efx; | ||
435 | efx_oword_t reg; | ||
436 | |||
437 | /* Pin TX descriptor ring */ | ||
438 | efx_init_special_buffer(efx, &tx_queue->txd); | ||
439 | |||
440 | /* Push TX descriptor ring to card */ | ||
441 | EFX_POPULATE_OWORD_10(reg, | ||
442 | FRF_AZ_TX_DESCQ_EN, 1, | ||
443 | FRF_AZ_TX_ISCSI_DDIG_EN, 0, | ||
444 | FRF_AZ_TX_ISCSI_HDIG_EN, 0, | ||
445 | FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, | ||
446 | FRF_AZ_TX_DESCQ_EVQ_ID, | ||
447 | tx_queue->channel->channel, | ||
448 | FRF_AZ_TX_DESCQ_OWNER_ID, 0, | ||
449 | FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, | ||
450 | FRF_AZ_TX_DESCQ_SIZE, | ||
451 | __ffs(tx_queue->txd.entries), | ||
452 | FRF_AZ_TX_DESCQ_TYPE, 0, | ||
453 | FRF_BZ_TX_NON_IP_DROP_DIS, 1); | ||
454 | |||
455 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | ||
456 | int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; | ||
457 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); | ||
458 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, | ||
459 | !csum); | ||
460 | } | ||
461 | |||
462 | efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, | ||
463 | tx_queue->queue); | ||
464 | |||
465 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { | ||
466 | /* Only 128 bits in this register */ | ||
467 | BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); | ||
468 | |||
469 | efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); | ||
470 | if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) | ||
471 | __clear_bit_le(tx_queue->queue, ®); | ||
472 | else | ||
473 | __set_bit_le(tx_queue->queue, ®); | ||
474 | efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); | ||
475 | } | ||
476 | |||
477 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | ||
478 | EFX_POPULATE_OWORD_1(reg, | ||
479 | FRF_BZ_TX_PACE, | ||
480 | (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? | ||
481 | FFE_BZ_TX_PACE_OFF : | ||
482 | FFE_BZ_TX_PACE_RESERVED); | ||
483 | efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, | ||
484 | tx_queue->queue); | ||
485 | } | ||
486 | } | ||
487 | |||
488 | static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) | ||
489 | { | ||
490 | struct efx_nic *efx = tx_queue->efx; | ||
491 | efx_oword_t tx_flush_descq; | ||
492 | |||
493 | WARN_ON(atomic_read(&tx_queue->flush_outstanding)); | ||
494 | atomic_set(&tx_queue->flush_outstanding, 1); | ||
495 | |||
496 | EFX_POPULATE_OWORD_2(tx_flush_descq, | ||
497 | FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, | ||
498 | FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); | ||
499 | efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); | ||
500 | } | ||
501 | |||
502 | void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) | ||
503 | { | ||
504 | struct efx_nic *efx = tx_queue->efx; | ||
505 | efx_oword_t tx_desc_ptr; | ||
506 | |||
507 | /* Remove TX descriptor ring from card */ | ||
508 | EFX_ZERO_OWORD(tx_desc_ptr); | ||
509 | efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | ||
510 | tx_queue->queue); | ||
511 | |||
512 | /* Unpin TX descriptor ring */ | ||
513 | efx_fini_special_buffer(efx, &tx_queue->txd); | ||
514 | } | ||
515 | |||
516 | /* Free buffers backing TX queue */ | ||
517 | void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) | ||
518 | { | ||
519 | efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); | ||
520 | } | ||
521 | |||
522 | /************************************************************************** | ||
523 | * | ||
524 | * RX path | ||
525 | * | ||
526 | **************************************************************************/ | ||
527 | |||
528 | /* Returns a pointer to the specified descriptor in the RX descriptor queue */ | ||
529 | static inline efx_qword_t * | ||
530 | efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) | ||
531 | { | ||
532 | return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index; | ||
533 | } | ||
534 | |||
535 | /* This creates an entry in the RX descriptor queue */ | ||
536 | static inline void | ||
537 | efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) | ||
538 | { | ||
539 | struct efx_rx_buffer *rx_buf; | ||
540 | efx_qword_t *rxd; | ||
541 | |||
542 | rxd = efx_rx_desc(rx_queue, index); | ||
543 | rx_buf = efx_rx_buffer(rx_queue, index); | ||
544 | EFX_POPULATE_QWORD_3(*rxd, | ||
545 | FSF_AZ_RX_KER_BUF_SIZE, | ||
546 | rx_buf->len - | ||
547 | rx_queue->efx->type->rx_buffer_padding, | ||
548 | FSF_AZ_RX_KER_BUF_REGION, 0, | ||
549 | FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); | ||
550 | } | ||
551 | |||
552 | /* This writes to the RX_DESC_WPTR register for the specified receive | ||
553 | * descriptor ring. | ||
554 | */ | ||
555 | void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) | ||
556 | { | ||
557 | struct efx_nic *efx = rx_queue->efx; | ||
558 | efx_dword_t reg; | ||
559 | unsigned write_ptr; | ||
560 | |||
561 | while (rx_queue->notified_count != rx_queue->added_count) { | ||
562 | efx_build_rx_desc( | ||
563 | rx_queue, | ||
564 | rx_queue->notified_count & rx_queue->ptr_mask); | ||
565 | ++rx_queue->notified_count; | ||
566 | } | ||
567 | |||
568 | wmb(); | ||
569 | write_ptr = rx_queue->added_count & rx_queue->ptr_mask; | ||
570 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); | ||
571 | efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, | ||
572 | efx_rx_queue_index(rx_queue)); | ||
573 | } | ||
574 | |||
575 | int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) | ||
576 | { | ||
577 | struct efx_nic *efx = rx_queue->efx; | ||
578 | unsigned entries; | ||
579 | |||
580 | entries = rx_queue->ptr_mask + 1; | ||
581 | return efx_alloc_special_buffer(efx, &rx_queue->rxd, | ||
582 | entries * sizeof(efx_qword_t)); | ||
583 | } | ||
584 | |||
585 | void efx_nic_init_rx(struct efx_rx_queue *rx_queue) | ||
586 | { | ||
587 | efx_oword_t rx_desc_ptr; | ||
588 | struct efx_nic *efx = rx_queue->efx; | ||
589 | bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; | ||
590 | bool iscsi_digest_en = is_b0; | ||
591 | bool jumbo_en; | ||
592 | |||
593 | /* For kernel-mode queues in Falcon A1, the JUMBO flag enables | ||
594 | * DMA to continue after a PCIe page boundary (and scattering | ||
595 | * is not possible). In Falcon B0 and Siena, it enables | ||
596 | * scatter. | ||
597 | */ | ||
598 | jumbo_en = !is_b0 || efx->rx_scatter; | ||
599 | |||
600 | netif_dbg(efx, hw, efx->net_dev, | ||
601 | "RX queue %d ring in special buffers %d-%d\n", | ||
602 | efx_rx_queue_index(rx_queue), rx_queue->rxd.index, | ||
603 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); | ||
604 | |||
605 | rx_queue->scatter_n = 0; | ||
606 | |||
607 | /* Pin RX descriptor ring */ | ||
608 | efx_init_special_buffer(efx, &rx_queue->rxd); | ||
609 | |||
610 | /* Push RX descriptor ring to card */ | ||
611 | EFX_POPULATE_OWORD_10(rx_desc_ptr, | ||
612 | FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, | ||
613 | FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, | ||
614 | FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, | ||
615 | FRF_AZ_RX_DESCQ_EVQ_ID, | ||
616 | efx_rx_queue_channel(rx_queue)->channel, | ||
617 | FRF_AZ_RX_DESCQ_OWNER_ID, 0, | ||
618 | FRF_AZ_RX_DESCQ_LABEL, | ||
619 | efx_rx_queue_index(rx_queue), | ||
620 | FRF_AZ_RX_DESCQ_SIZE, | ||
621 | __ffs(rx_queue->rxd.entries), | ||
622 | FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , | ||
623 | FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, | ||
624 | FRF_AZ_RX_DESCQ_EN, 1); | ||
625 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | ||
626 | efx_rx_queue_index(rx_queue)); | ||
627 | } | ||
628 | |||
629 | static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) | ||
630 | { | ||
631 | struct efx_nic *efx = rx_queue->efx; | ||
632 | efx_oword_t rx_flush_descq; | ||
633 | |||
634 | EFX_POPULATE_OWORD_2(rx_flush_descq, | ||
635 | FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, | ||
636 | FRF_AZ_RX_FLUSH_DESCQ, | ||
637 | efx_rx_queue_index(rx_queue)); | ||
638 | efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); | ||
639 | } | ||
640 | |||
641 | void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) | ||
642 | { | ||
643 | efx_oword_t rx_desc_ptr; | ||
644 | struct efx_nic *efx = rx_queue->efx; | ||
645 | |||
646 | /* Remove RX descriptor ring from card */ | ||
647 | EFX_ZERO_OWORD(rx_desc_ptr); | ||
648 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | ||
649 | efx_rx_queue_index(rx_queue)); | ||
650 | |||
651 | /* Unpin RX descriptor ring */ | ||
652 | efx_fini_special_buffer(efx, &rx_queue->rxd); | ||
653 | } | ||
654 | |||
655 | /* Free buffers backing RX queue */ | ||
656 | void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) | ||
657 | { | ||
658 | efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); | ||
659 | } | ||
660 | |||
661 | /************************************************************************** | ||
662 | * | ||
663 | * Flush handling | ||
664 | * | ||
665 | **************************************************************************/ | ||
666 | |||
667 | /* efx_nic_flush_queues() must be woken up when all flushes are completed, | ||
668 | * or more RX flushes can be kicked off. | ||
669 | */ | ||
670 | static bool efx_flush_wake(struct efx_nic *efx) | ||
671 | { | ||
672 | /* Ensure that all updates are visible to efx_nic_flush_queues() */ | ||
673 | smp_mb(); | ||
674 | |||
675 | return (atomic_read(&efx->drain_pending) == 0 || | ||
676 | (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT | ||
677 | && atomic_read(&efx->rxq_flush_pending) > 0)); | ||
678 | } | ||
679 | |||
680 | static bool efx_check_tx_flush_complete(struct efx_nic *efx) | ||
681 | { | ||
682 | bool i = true; | ||
683 | efx_oword_t txd_ptr_tbl; | ||
684 | struct efx_channel *channel; | ||
685 | struct efx_tx_queue *tx_queue; | ||
686 | |||
687 | efx_for_each_channel(channel, efx) { | ||
688 | efx_for_each_channel_tx_queue(tx_queue, channel) { | ||
689 | efx_reado_table(efx, &txd_ptr_tbl, | ||
690 | FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); | ||
691 | if (EFX_OWORD_FIELD(txd_ptr_tbl, | ||
692 | FRF_AZ_TX_DESCQ_FLUSH) || | ||
693 | EFX_OWORD_FIELD(txd_ptr_tbl, | ||
694 | FRF_AZ_TX_DESCQ_EN)) { | ||
695 | netif_dbg(efx, hw, efx->net_dev, | ||
696 | "flush did not complete on TXQ %d\n", | ||
697 | tx_queue->queue); | ||
698 | i = false; | ||
699 | } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, | ||
700 | 1, 0)) { | ||
701 | /* The flush is complete, but we didn't | ||
702 | * receive a flush completion event | ||
703 | */ | ||
704 | netif_dbg(efx, hw, efx->net_dev, | ||
705 | "flush complete on TXQ %d, so drain " | ||
706 | "the queue\n", tx_queue->queue); | ||
707 | /* Don't need to increment drain_pending as it | ||
708 | * has already been incremented for the queues | ||
709 | * which did not drain | ||
710 | */ | ||
711 | efx_magic_event(channel, | ||
712 | EFX_CHANNEL_MAGIC_TX_DRAIN( | ||
713 | tx_queue)); | ||
714 | } | ||
715 | } | ||
716 | } | ||
717 | |||
718 | return i; | ||
719 | } | ||
720 | |||
721 | /* Flush all the transmit queues, and continue flushing receive queues until | ||
722 | * they're all flushed. Wait for the DRAIN events to be recieved so that there | ||
723 | * are no more RX and TX events left on any channel. */ | ||
724 | static int efx_farch_do_flush(struct efx_nic *efx) | ||
725 | { | ||
726 | unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ | ||
727 | struct efx_channel *channel; | ||
728 | struct efx_rx_queue *rx_queue; | ||
729 | struct efx_tx_queue *tx_queue; | ||
730 | int rc = 0; | ||
731 | |||
732 | efx_for_each_channel(channel, efx) { | ||
733 | efx_for_each_channel_tx_queue(tx_queue, channel) { | ||
734 | atomic_inc(&efx->drain_pending); | ||
735 | efx_flush_tx_queue(tx_queue); | ||
736 | } | ||
737 | efx_for_each_channel_rx_queue(rx_queue, channel) { | ||
738 | atomic_inc(&efx->drain_pending); | ||
739 | rx_queue->flush_pending = true; | ||
740 | atomic_inc(&efx->rxq_flush_pending); | ||
741 | } | ||
742 | } | ||
743 | |||
744 | while (timeout && atomic_read(&efx->drain_pending) > 0) { | ||
745 | /* If SRIOV is enabled, then offload receive queue flushing to | ||
746 | * the firmware (though we will still have to poll for | ||
747 | * completion). If that fails, fall back to the old scheme. | ||
748 | */ | ||
749 | if (efx_sriov_enabled(efx)) { | ||
750 | rc = efx_mcdi_flush_rxqs(efx); | ||
751 | if (!rc) | ||
752 | goto wait; | ||
753 | } | ||
754 | |||
755 | /* The hardware supports four concurrent rx flushes, each of | ||
756 | * which may need to be retried if there is an outstanding | ||
757 | * descriptor fetch | ||
758 | */ | ||
759 | efx_for_each_channel(channel, efx) { | ||
760 | efx_for_each_channel_rx_queue(rx_queue, channel) { | ||
761 | if (atomic_read(&efx->rxq_flush_outstanding) >= | ||
762 | EFX_RX_FLUSH_COUNT) | ||
763 | break; | ||
764 | |||
765 | if (rx_queue->flush_pending) { | ||
766 | rx_queue->flush_pending = false; | ||
767 | atomic_dec(&efx->rxq_flush_pending); | ||
768 | atomic_inc(&efx->rxq_flush_outstanding); | ||
769 | efx_flush_rx_queue(rx_queue); | ||
770 | } | ||
771 | } | ||
772 | } | ||
773 | |||
774 | wait: | ||
775 | timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx), | ||
776 | timeout); | ||
777 | } | ||
778 | |||
779 | if (atomic_read(&efx->drain_pending) && | ||
780 | !efx_check_tx_flush_complete(efx)) { | ||
781 | netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " | ||
782 | "(rx %d+%d)\n", atomic_read(&efx->drain_pending), | ||
783 | atomic_read(&efx->rxq_flush_outstanding), | ||
784 | atomic_read(&efx->rxq_flush_pending)); | ||
785 | rc = -ETIMEDOUT; | ||
786 | |||
787 | atomic_set(&efx->drain_pending, 0); | ||
788 | atomic_set(&efx->rxq_flush_pending, 0); | ||
789 | atomic_set(&efx->rxq_flush_outstanding, 0); | ||
790 | } | ||
791 | |||
792 | return rc; | ||
793 | } | ||
794 | |||
795 | int efx_farch_fini_dmaq(struct efx_nic *efx) | ||
796 | { | ||
797 | struct efx_channel *channel; | ||
798 | struct efx_tx_queue *tx_queue; | ||
799 | struct efx_rx_queue *rx_queue; | ||
800 | int rc = 0; | ||
801 | |||
802 | /* Do not attempt to write to the NIC during EEH recovery */ | ||
803 | if (efx->state != STATE_RECOVERY) { | ||
804 | /* Only perform flush if DMA is enabled */ | ||
805 | if (efx->pci_dev->is_busmaster) { | ||
806 | efx->type->prepare_flush(efx); | ||
807 | rc = efx_farch_do_flush(efx); | ||
808 | efx->type->finish_flush(efx); | ||
809 | } | ||
810 | |||
811 | efx_for_each_channel(channel, efx) { | ||
812 | efx_for_each_channel_rx_queue(rx_queue, channel) | ||
813 | efx_nic_fini_rx(rx_queue); | ||
814 | efx_for_each_channel_tx_queue(tx_queue, channel) | ||
815 | efx_nic_fini_tx(tx_queue); | ||
816 | } | ||
817 | } | ||
818 | |||
819 | return rc; | ||
820 | } | ||
821 | |||
822 | /************************************************************************** | ||
823 | * | ||
824 | * Event queue processing | ||
825 | * Event queues are processed by per-channel tasklets. | ||
826 | * | ||
827 | **************************************************************************/ | ||
828 | |||
829 | /* Update a channel's event queue's read pointer (RPTR) register | ||
830 | * | ||
831 | * This writes the EVQ_RPTR_REG register for the specified channel's | ||
832 | * event queue. | ||
833 | */ | ||
834 | void efx_nic_eventq_read_ack(struct efx_channel *channel) | ||
835 | { | ||
836 | efx_dword_t reg; | ||
837 | struct efx_nic *efx = channel->efx; | ||
838 | |||
839 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, | ||
840 | channel->eventq_read_ptr & channel->eventq_mask); | ||
841 | |||
842 | /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size | ||
843 | * of 4 bytes, but it is really 16 bytes just like later revisions. | ||
844 | */ | ||
845 | efx_writed(efx, ®, | ||
846 | efx->type->evq_rptr_tbl_base + | ||
847 | FR_BZ_EVQ_RPTR_STEP * channel->channel); | ||
848 | } | ||
849 | |||
850 | /* Use HW to insert a SW defined event */ | ||
851 | void efx_generate_event(struct efx_nic *efx, unsigned int evq, | ||
852 | efx_qword_t *event) | ||
853 | { | ||
854 | efx_oword_t drv_ev_reg; | ||
855 | |||
856 | BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || | ||
857 | FRF_AZ_DRV_EV_DATA_WIDTH != 64); | ||
858 | drv_ev_reg.u32[0] = event->u32[0]; | ||
859 | drv_ev_reg.u32[1] = event->u32[1]; | ||
860 | drv_ev_reg.u32[2] = 0; | ||
861 | drv_ev_reg.u32[3] = 0; | ||
862 | EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); | ||
863 | efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); | ||
864 | } | ||
865 | |||
866 | static void efx_magic_event(struct efx_channel *channel, u32 magic) | ||
867 | { | ||
868 | efx_qword_t event; | ||
869 | |||
870 | EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, | ||
871 | FSE_AZ_EV_CODE_DRV_GEN_EV, | ||
872 | FSF_AZ_DRV_GEN_EV_MAGIC, magic); | ||
873 | efx_generate_event(channel->efx, channel->channel, &event); | ||
874 | } | ||
875 | |||
876 | /* Handle a transmit completion event | ||
877 | * | ||
878 | * The NIC batches TX completion events; the message we receive is of | ||
879 | * the form "complete all TX events up to this index". | ||
880 | */ | ||
881 | static int | ||
882 | efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | ||
883 | { | ||
884 | unsigned int tx_ev_desc_ptr; | ||
885 | unsigned int tx_ev_q_label; | ||
886 | struct efx_tx_queue *tx_queue; | ||
887 | struct efx_nic *efx = channel->efx; | ||
888 | int tx_packets = 0; | ||
889 | |||
890 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | ||
891 | return 0; | ||
892 | |||
893 | if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { | ||
894 | /* Transmit completion */ | ||
895 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); | ||
896 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); | ||
897 | tx_queue = efx_channel_get_tx_queue( | ||
898 | channel, tx_ev_q_label % EFX_TXQ_TYPES); | ||
899 | tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & | ||
900 | tx_queue->ptr_mask); | ||
901 | efx_xmit_done(tx_queue, tx_ev_desc_ptr); | ||
902 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { | ||
903 | /* Rewrite the FIFO write pointer */ | ||
904 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); | ||
905 | tx_queue = efx_channel_get_tx_queue( | ||
906 | channel, tx_ev_q_label % EFX_TXQ_TYPES); | ||
907 | |||
908 | netif_tx_lock(efx->net_dev); | ||
909 | efx_notify_tx_desc(tx_queue); | ||
910 | netif_tx_unlock(efx->net_dev); | ||
911 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && | ||
912 | EFX_WORKAROUND_10727(efx)) { | ||
913 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | ||
914 | } else { | ||
915 | netif_err(efx, tx_err, efx->net_dev, | ||
916 | "channel %d unexpected TX event " | ||
917 | EFX_QWORD_FMT"\n", channel->channel, | ||
918 | EFX_QWORD_VAL(*event)); | ||
919 | } | ||
920 | |||
921 | return tx_packets; | ||
922 | } | ||
923 | |||
924 | /* Detect errors included in the rx_evt_pkt_ok bit. */ | ||
925 | static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | ||
926 | const efx_qword_t *event) | ||
927 | { | ||
928 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | ||
929 | struct efx_nic *efx = rx_queue->efx; | ||
930 | bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; | ||
931 | bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; | ||
932 | bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; | ||
933 | bool rx_ev_other_err, rx_ev_pause_frm; | ||
934 | bool rx_ev_hdr_type, rx_ev_mcast_pkt; | ||
935 | unsigned rx_ev_pkt_type; | ||
936 | |||
937 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); | ||
938 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); | ||
939 | rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); | ||
940 | rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); | ||
941 | rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, | ||
942 | FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); | ||
943 | rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, | ||
944 | FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); | ||
945 | rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, | ||
946 | FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); | ||
947 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); | ||
948 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); | ||
949 | rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? | ||
950 | 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); | ||
951 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); | ||
952 | |||
953 | /* Every error apart from tobe_disc and pause_frm */ | ||
954 | rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | | ||
955 | rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | | ||
956 | rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); | ||
957 | |||
958 | /* Count errors that are not in MAC stats. Ignore expected | ||
959 | * checksum errors during self-test. */ | ||
960 | if (rx_ev_frm_trunc) | ||
961 | ++channel->n_rx_frm_trunc; | ||
962 | else if (rx_ev_tobe_disc) | ||
963 | ++channel->n_rx_tobe_disc; | ||
964 | else if (!efx->loopback_selftest) { | ||
965 | if (rx_ev_ip_hdr_chksum_err) | ||
966 | ++channel->n_rx_ip_hdr_chksum_err; | ||
967 | else if (rx_ev_tcp_udp_chksum_err) | ||
968 | ++channel->n_rx_tcp_udp_chksum_err; | ||
969 | } | ||
970 | |||
971 | /* TOBE_DISC is expected on unicast mismatches; don't print out an | ||
972 | * error message. FRM_TRUNC indicates RXDP dropped the packet due | ||
973 | * to a FIFO overflow. | ||
974 | */ | ||
975 | #ifdef DEBUG | ||
976 | if (rx_ev_other_err && net_ratelimit()) { | ||
977 | netif_dbg(efx, rx_err, efx->net_dev, | ||
978 | " RX queue %d unexpected RX event " | ||
979 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", | ||
980 | efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), | ||
981 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", | ||
982 | rx_ev_ip_hdr_chksum_err ? | ||
983 | " [IP_HDR_CHKSUM_ERR]" : "", | ||
984 | rx_ev_tcp_udp_chksum_err ? | ||
985 | " [TCP_UDP_CHKSUM_ERR]" : "", | ||
986 | rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", | ||
987 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", | ||
988 | rx_ev_drib_nib ? " [DRIB_NIB]" : "", | ||
989 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "", | ||
990 | rx_ev_pause_frm ? " [PAUSE]" : ""); | ||
991 | } | ||
992 | #endif | ||
993 | |||
994 | /* The frame must be discarded if any of these are true. */ | ||
995 | return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | | ||
996 | rx_ev_tobe_disc | rx_ev_pause_frm) ? | ||
997 | EFX_RX_PKT_DISCARD : 0; | ||
998 | } | ||
999 | |||
1000 | /* Handle receive events that are not in-order. Return true if this | ||
1001 | * can be handled as a partial packet discard, false if it's more | ||
1002 | * serious. | ||
1003 | */ | ||
1004 | static bool | ||
1005 | efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) | ||
1006 | { | ||
1007 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | ||
1008 | struct efx_nic *efx = rx_queue->efx; | ||
1009 | unsigned expected, dropped; | ||
1010 | |||
1011 | if (rx_queue->scatter_n && | ||
1012 | index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & | ||
1013 | rx_queue->ptr_mask)) { | ||
1014 | ++channel->n_rx_nodesc_trunc; | ||
1015 | return true; | ||
1016 | } | ||
1017 | |||
1018 | expected = rx_queue->removed_count & rx_queue->ptr_mask; | ||
1019 | dropped = (index - expected) & rx_queue->ptr_mask; | ||
1020 | netif_info(efx, rx_err, efx->net_dev, | ||
1021 | "dropped %d events (index=%d expected=%d)\n", | ||
1022 | dropped, index, expected); | ||
1023 | |||
1024 | efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? | ||
1025 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | ||
1026 | return false; | ||
1027 | } | ||
1028 | |||
1029 | /* Handle a packet received event | ||
1030 | * | ||
1031 | * The NIC gives a "discard" flag if it's a unicast packet with the | ||
1032 | * wrong destination address | ||
1033 | * Also "is multicast" and "matches multicast filter" flags can be used to | ||
1034 | * discard non-matching multicast packets. | ||
1035 | */ | ||
1036 | static void | ||
1037 | efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) | ||
1038 | { | ||
1039 | unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; | ||
1040 | unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; | ||
1041 | unsigned expected_ptr; | ||
1042 | bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; | ||
1043 | u16 flags; | ||
1044 | struct efx_rx_queue *rx_queue; | ||
1045 | struct efx_nic *efx = channel->efx; | ||
1046 | |||
1047 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | ||
1048 | return; | ||
1049 | |||
1050 | rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); | ||
1051 | rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); | ||
1052 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != | ||
1053 | channel->channel); | ||
1054 | |||
1055 | rx_queue = efx_channel_get_rx_queue(channel); | ||
1056 | |||
1057 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); | ||
1058 | expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & | ||
1059 | rx_queue->ptr_mask); | ||
1060 | |||
1061 | /* Check for partial drops and other errors */ | ||
1062 | if (unlikely(rx_ev_desc_ptr != expected_ptr) || | ||
1063 | unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { | ||
1064 | if (rx_ev_desc_ptr != expected_ptr && | ||
1065 | !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) | ||
1066 | return; | ||
1067 | |||
1068 | /* Discard all pending fragments */ | ||
1069 | if (rx_queue->scatter_n) { | ||
1070 | efx_rx_packet( | ||
1071 | rx_queue, | ||
1072 | rx_queue->removed_count & rx_queue->ptr_mask, | ||
1073 | rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); | ||
1074 | rx_queue->removed_count += rx_queue->scatter_n; | ||
1075 | rx_queue->scatter_n = 0; | ||
1076 | } | ||
1077 | |||
1078 | /* Return if there is no new fragment */ | ||
1079 | if (rx_ev_desc_ptr != expected_ptr) | ||
1080 | return; | ||
1081 | |||
1082 | /* Discard new fragment if not SOP */ | ||
1083 | if (!rx_ev_sop) { | ||
1084 | efx_rx_packet( | ||
1085 | rx_queue, | ||
1086 | rx_queue->removed_count & rx_queue->ptr_mask, | ||
1087 | 1, 0, EFX_RX_PKT_DISCARD); | ||
1088 | ++rx_queue->removed_count; | ||
1089 | return; | ||
1090 | } | ||
1091 | } | ||
1092 | |||
1093 | ++rx_queue->scatter_n; | ||
1094 | if (rx_ev_cont) | ||
1095 | return; | ||
1096 | |||
1097 | rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); | ||
1098 | rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); | ||
1099 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); | ||
1100 | |||
1101 | if (likely(rx_ev_pkt_ok)) { | ||
1102 | /* If packet is marked as OK then we can rely on the | ||
1103 | * hardware checksum and classification. | ||
1104 | */ | ||
1105 | flags = 0; | ||
1106 | switch (rx_ev_hdr_type) { | ||
1107 | case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: | ||
1108 | flags |= EFX_RX_PKT_TCP; | ||
1109 | /* fall through */ | ||
1110 | case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: | ||
1111 | flags |= EFX_RX_PKT_CSUMMED; | ||
1112 | /* fall through */ | ||
1113 | case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: | ||
1114 | case FSE_AZ_RX_EV_HDR_TYPE_OTHER: | ||
1115 | break; | ||
1116 | } | ||
1117 | } else { | ||
1118 | flags = efx_handle_rx_not_ok(rx_queue, event); | ||
1119 | } | ||
1120 | |||
1121 | /* Detect multicast packets that didn't match the filter */ | ||
1122 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); | ||
1123 | if (rx_ev_mcast_pkt) { | ||
1124 | unsigned int rx_ev_mcast_hash_match = | ||
1125 | EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); | ||
1126 | |||
1127 | if (unlikely(!rx_ev_mcast_hash_match)) { | ||
1128 | ++channel->n_rx_mcast_mismatch; | ||
1129 | flags |= EFX_RX_PKT_DISCARD; | ||
1130 | } | ||
1131 | } | ||
1132 | |||
1133 | channel->irq_mod_score += 2; | ||
1134 | |||
1135 | /* Handle received packet */ | ||
1136 | efx_rx_packet(rx_queue, | ||
1137 | rx_queue->removed_count & rx_queue->ptr_mask, | ||
1138 | rx_queue->scatter_n, rx_ev_byte_cnt, flags); | ||
1139 | rx_queue->removed_count += rx_queue->scatter_n; | ||
1140 | rx_queue->scatter_n = 0; | ||
1141 | } | ||
1142 | |||
1143 | /* If this flush done event corresponds to a &struct efx_tx_queue, then | ||
1144 | * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue | ||
1145 | * of all transmit completions. | ||
1146 | */ | ||
1147 | static void | ||
1148 | efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) | ||
1149 | { | ||
1150 | struct efx_tx_queue *tx_queue; | ||
1151 | int qid; | ||
1152 | |||
1153 | qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); | ||
1154 | if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { | ||
1155 | tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, | ||
1156 | qid % EFX_TXQ_TYPES); | ||
1157 | if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) { | ||
1158 | efx_magic_event(tx_queue->channel, | ||
1159 | EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); | ||
1160 | } | ||
1161 | } | ||
1162 | } | ||
1163 | |||
1164 | /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush | ||
1165 | * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add | ||
1166 | * the RX queue back to the mask of RX queues in need of flushing. | ||
1167 | */ | ||
1168 | static void | ||
1169 | efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) | ||
1170 | { | ||
1171 | struct efx_channel *channel; | ||
1172 | struct efx_rx_queue *rx_queue; | ||
1173 | int qid; | ||
1174 | bool failed; | ||
1175 | |||
1176 | qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); | ||
1177 | failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); | ||
1178 | if (qid >= efx->n_channels) | ||
1179 | return; | ||
1180 | channel = efx_get_channel(efx, qid); | ||
1181 | if (!efx_channel_has_rx_queue(channel)) | ||
1182 | return; | ||
1183 | rx_queue = efx_channel_get_rx_queue(channel); | ||
1184 | |||
1185 | if (failed) { | ||
1186 | netif_info(efx, hw, efx->net_dev, | ||
1187 | "RXQ %d flush retry\n", qid); | ||
1188 | rx_queue->flush_pending = true; | ||
1189 | atomic_inc(&efx->rxq_flush_pending); | ||
1190 | } else { | ||
1191 | efx_magic_event(efx_rx_queue_channel(rx_queue), | ||
1192 | EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); | ||
1193 | } | ||
1194 | atomic_dec(&efx->rxq_flush_outstanding); | ||
1195 | if (efx_flush_wake(efx)) | ||
1196 | wake_up(&efx->flush_wq); | ||
1197 | } | ||
1198 | |||
1199 | static void | ||
1200 | efx_handle_drain_event(struct efx_channel *channel) | ||
1201 | { | ||
1202 | struct efx_nic *efx = channel->efx; | ||
1203 | |||
1204 | WARN_ON(atomic_read(&efx->drain_pending) == 0); | ||
1205 | atomic_dec(&efx->drain_pending); | ||
1206 | if (efx_flush_wake(efx)) | ||
1207 | wake_up(&efx->flush_wq); | ||
1208 | } | ||
1209 | |||
1210 | static void | ||
1211 | efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) | ||
1212 | { | ||
1213 | struct efx_nic *efx = channel->efx; | ||
1214 | struct efx_rx_queue *rx_queue = | ||
1215 | efx_channel_has_rx_queue(channel) ? | ||
1216 | efx_channel_get_rx_queue(channel) : NULL; | ||
1217 | unsigned magic, code; | ||
1218 | |||
1219 | magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); | ||
1220 | code = _EFX_CHANNEL_MAGIC_CODE(magic); | ||
1221 | |||
1222 | if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { | ||
1223 | channel->event_test_cpu = raw_smp_processor_id(); | ||
1224 | } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { | ||
1225 | /* The queue must be empty, so we won't receive any rx | ||
1226 | * events, so efx_process_channel() won't refill the | ||
1227 | * queue. Refill it here */ | ||
1228 | efx_fast_push_rx_descriptors(rx_queue); | ||
1229 | } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { | ||
1230 | efx_handle_drain_event(channel); | ||
1231 | } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { | ||
1232 | efx_handle_drain_event(channel); | ||
1233 | } else { | ||
1234 | netif_dbg(efx, hw, efx->net_dev, "channel %d received " | ||
1235 | "generated event "EFX_QWORD_FMT"\n", | ||
1236 | channel->channel, EFX_QWORD_VAL(*event)); | ||
1237 | } | ||
1238 | } | ||
1239 | |||
1240 | static void | ||
1241 | efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | ||
1242 | { | ||
1243 | struct efx_nic *efx = channel->efx; | ||
1244 | unsigned int ev_sub_code; | ||
1245 | unsigned int ev_sub_data; | ||
1246 | |||
1247 | ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); | ||
1248 | ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); | ||
1249 | |||
1250 | switch (ev_sub_code) { | ||
1251 | case FSE_AZ_TX_DESCQ_FLS_DONE_EV: | ||
1252 | netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", | ||
1253 | channel->channel, ev_sub_data); | ||
1254 | efx_handle_tx_flush_done(efx, event); | ||
1255 | efx_sriov_tx_flush_done(efx, event); | ||
1256 | break; | ||
1257 | case FSE_AZ_RX_DESCQ_FLS_DONE_EV: | ||
1258 | netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", | ||
1259 | channel->channel, ev_sub_data); | ||
1260 | efx_handle_rx_flush_done(efx, event); | ||
1261 | efx_sriov_rx_flush_done(efx, event); | ||
1262 | break; | ||
1263 | case FSE_AZ_EVQ_INIT_DONE_EV: | ||
1264 | netif_dbg(efx, hw, efx->net_dev, | ||
1265 | "channel %d EVQ %d initialised\n", | ||
1266 | channel->channel, ev_sub_data); | ||
1267 | break; | ||
1268 | case FSE_AZ_SRM_UPD_DONE_EV: | ||
1269 | netif_vdbg(efx, hw, efx->net_dev, | ||
1270 | "channel %d SRAM update done\n", channel->channel); | ||
1271 | break; | ||
1272 | case FSE_AZ_WAKE_UP_EV: | ||
1273 | netif_vdbg(efx, hw, efx->net_dev, | ||
1274 | "channel %d RXQ %d wakeup event\n", | ||
1275 | channel->channel, ev_sub_data); | ||
1276 | break; | ||
1277 | case FSE_AZ_TIMER_EV: | ||
1278 | netif_vdbg(efx, hw, efx->net_dev, | ||
1279 | "channel %d RX queue %d timer expired\n", | ||
1280 | channel->channel, ev_sub_data); | ||
1281 | break; | ||
1282 | case FSE_AA_RX_RECOVER_EV: | ||
1283 | netif_err(efx, rx_err, efx->net_dev, | ||
1284 | "channel %d seen DRIVER RX_RESET event. " | ||
1285 | "Resetting.\n", channel->channel); | ||
1286 | atomic_inc(&efx->rx_reset); | ||
1287 | efx_schedule_reset(efx, | ||
1288 | EFX_WORKAROUND_6555(efx) ? | ||
1289 | RESET_TYPE_RX_RECOVERY : | ||
1290 | RESET_TYPE_DISABLE); | ||
1291 | break; | ||
1292 | case FSE_BZ_RX_DSC_ERROR_EV: | ||
1293 | if (ev_sub_data < EFX_VI_BASE) { | ||
1294 | netif_err(efx, rx_err, efx->net_dev, | ||
1295 | "RX DMA Q %d reports descriptor fetch error." | ||
1296 | " RX Q %d is disabled.\n", ev_sub_data, | ||
1297 | ev_sub_data); | ||
1298 | efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); | ||
1299 | } else | ||
1300 | efx_sriov_desc_fetch_err(efx, ev_sub_data); | ||
1301 | break; | ||
1302 | case FSE_BZ_TX_DSC_ERROR_EV: | ||
1303 | if (ev_sub_data < EFX_VI_BASE) { | ||
1304 | netif_err(efx, tx_err, efx->net_dev, | ||
1305 | "TX DMA Q %d reports descriptor fetch error." | ||
1306 | " TX Q %d is disabled.\n", ev_sub_data, | ||
1307 | ev_sub_data); | ||
1308 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | ||
1309 | } else | ||
1310 | efx_sriov_desc_fetch_err(efx, ev_sub_data); | ||
1311 | break; | ||
1312 | default: | ||
1313 | netif_vdbg(efx, hw, efx->net_dev, | ||
1314 | "channel %d unknown driver event code %d " | ||
1315 | "data %04x\n", channel->channel, ev_sub_code, | ||
1316 | ev_sub_data); | ||
1317 | break; | ||
1318 | } | ||
1319 | } | ||
1320 | |||
1321 | int efx_nic_process_eventq(struct efx_channel *channel, int budget) | ||
1322 | { | ||
1323 | struct efx_nic *efx = channel->efx; | ||
1324 | unsigned int read_ptr; | ||
1325 | efx_qword_t event, *p_event; | ||
1326 | int ev_code; | ||
1327 | int tx_packets = 0; | ||
1328 | int spent = 0; | ||
1329 | |||
1330 | read_ptr = channel->eventq_read_ptr; | ||
1331 | |||
1332 | for (;;) { | ||
1333 | p_event = efx_event(channel, read_ptr); | ||
1334 | event = *p_event; | ||
1335 | |||
1336 | if (!efx_event_present(&event)) | ||
1337 | /* End of events */ | ||
1338 | break; | ||
1339 | |||
1340 | netif_vdbg(channel->efx, intr, channel->efx->net_dev, | ||
1341 | "channel %d event is "EFX_QWORD_FMT"\n", | ||
1342 | channel->channel, EFX_QWORD_VAL(event)); | ||
1343 | |||
1344 | /* Clear this event by marking it all ones */ | ||
1345 | EFX_SET_QWORD(*p_event); | ||
1346 | |||
1347 | ++read_ptr; | ||
1348 | |||
1349 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); | ||
1350 | |||
1351 | switch (ev_code) { | ||
1352 | case FSE_AZ_EV_CODE_RX_EV: | ||
1353 | efx_handle_rx_event(channel, &event); | ||
1354 | if (++spent == budget) | ||
1355 | goto out; | ||
1356 | break; | ||
1357 | case FSE_AZ_EV_CODE_TX_EV: | ||
1358 | tx_packets += efx_handle_tx_event(channel, &event); | ||
1359 | if (tx_packets > efx->txq_entries) { | ||
1360 | spent = budget; | ||
1361 | goto out; | ||
1362 | } | ||
1363 | break; | ||
1364 | case FSE_AZ_EV_CODE_DRV_GEN_EV: | ||
1365 | efx_handle_generated_event(channel, &event); | ||
1366 | break; | ||
1367 | case FSE_AZ_EV_CODE_DRIVER_EV: | ||
1368 | efx_handle_driver_event(channel, &event); | ||
1369 | break; | ||
1370 | case FSE_CZ_EV_CODE_USER_EV: | ||
1371 | efx_sriov_event(channel, &event); | ||
1372 | break; | ||
1373 | case FSE_CZ_EV_CODE_MCDI_EV: | ||
1374 | efx_mcdi_process_event(channel, &event); | ||
1375 | break; | ||
1376 | case FSE_AZ_EV_CODE_GLOBAL_EV: | ||
1377 | if (efx->type->handle_global_event && | ||
1378 | efx->type->handle_global_event(channel, &event)) | ||
1379 | break; | ||
1380 | /* else fall through */ | ||
1381 | default: | ||
1382 | netif_err(channel->efx, hw, channel->efx->net_dev, | ||
1383 | "channel %d unknown event type %d (data " | ||
1384 | EFX_QWORD_FMT ")\n", channel->channel, | ||
1385 | ev_code, EFX_QWORD_VAL(event)); | ||
1386 | } | ||
1387 | } | ||
1388 | |||
1389 | out: | ||
1390 | channel->eventq_read_ptr = read_ptr; | ||
1391 | return spent; | ||
1392 | } | ||
1393 | |||
1394 | /* Check whether an event is present in the eventq at the current | 54 | /* Check whether an event is present in the eventq at the current |
1395 | * read pointer. Only useful for self-test. | 55 | * read pointer. Only useful for self-test. |
1396 | */ | 56 | */ |
@@ -1399,326 +59,18 @@ bool efx_nic_event_present(struct efx_channel *channel) | |||
1399 | return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); | 59 | return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); |
1400 | } | 60 | } |
1401 | 61 | ||
1402 | /* Allocate buffer table entries for event queue */ | ||
1403 | int efx_nic_probe_eventq(struct efx_channel *channel) | ||
1404 | { | ||
1405 | struct efx_nic *efx = channel->efx; | ||
1406 | unsigned entries; | ||
1407 | |||
1408 | entries = channel->eventq_mask + 1; | ||
1409 | return efx_alloc_special_buffer(efx, &channel->eventq, | ||
1410 | entries * sizeof(efx_qword_t)); | ||
1411 | } | ||
1412 | |||
1413 | void efx_nic_init_eventq(struct efx_channel *channel) | ||
1414 | { | ||
1415 | efx_oword_t reg; | ||
1416 | struct efx_nic *efx = channel->efx; | ||
1417 | |||
1418 | netif_dbg(efx, hw, efx->net_dev, | ||
1419 | "channel %d event queue in special buffers %d-%d\n", | ||
1420 | channel->channel, channel->eventq.index, | ||
1421 | channel->eventq.index + channel->eventq.entries - 1); | ||
1422 | |||
1423 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { | ||
1424 | EFX_POPULATE_OWORD_3(reg, | ||
1425 | FRF_CZ_TIMER_Q_EN, 1, | ||
1426 | FRF_CZ_HOST_NOTIFY_MODE, 0, | ||
1427 | FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); | ||
1428 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); | ||
1429 | } | ||
1430 | |||
1431 | /* Pin event queue buffer */ | ||
1432 | efx_init_special_buffer(efx, &channel->eventq); | ||
1433 | |||
1434 | /* Fill event queue with all ones (i.e. empty events) */ | ||
1435 | memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); | ||
1436 | |||
1437 | /* Push event queue to card */ | ||
1438 | EFX_POPULATE_OWORD_3(reg, | ||
1439 | FRF_AZ_EVQ_EN, 1, | ||
1440 | FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), | ||
1441 | FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); | ||
1442 | efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, | ||
1443 | channel->channel); | ||
1444 | |||
1445 | efx->type->push_irq_moderation(channel); | ||
1446 | } | ||
1447 | |||
1448 | void efx_nic_fini_eventq(struct efx_channel *channel) | ||
1449 | { | ||
1450 | efx_oword_t reg; | ||
1451 | struct efx_nic *efx = channel->efx; | ||
1452 | |||
1453 | /* Remove event queue from card */ | ||
1454 | EFX_ZERO_OWORD(reg); | ||
1455 | efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, | ||
1456 | channel->channel); | ||
1457 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) | ||
1458 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); | ||
1459 | |||
1460 | /* Unpin event queue */ | ||
1461 | efx_fini_special_buffer(efx, &channel->eventq); | ||
1462 | } | ||
1463 | |||
1464 | /* Free buffers backing event queue */ | ||
1465 | void efx_nic_remove_eventq(struct efx_channel *channel) | ||
1466 | { | ||
1467 | efx_free_special_buffer(channel->efx, &channel->eventq); | ||
1468 | } | ||
1469 | |||
1470 | |||
1471 | void efx_nic_event_test_start(struct efx_channel *channel) | 62 | void efx_nic_event_test_start(struct efx_channel *channel) |
1472 | { | 63 | { |
1473 | channel->event_test_cpu = -1; | 64 | channel->event_test_cpu = -1; |
1474 | smp_wmb(); | 65 | smp_wmb(); |
1475 | efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); | 66 | channel->efx->type->ev_test_generate(channel); |
1476 | } | ||
1477 | |||
1478 | void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) | ||
1479 | { | ||
1480 | efx_magic_event(efx_rx_queue_channel(rx_queue), | ||
1481 | EFX_CHANNEL_MAGIC_FILL(rx_queue)); | ||
1482 | } | ||
1483 | |||
1484 | /************************************************************************** | ||
1485 | * | ||
1486 | * Hardware interrupts | ||
1487 | * The hardware interrupt handler does very little work; all the event | ||
1488 | * queue processing is carried out by per-channel tasklets. | ||
1489 | * | ||
1490 | **************************************************************************/ | ||
1491 | |||
1492 | /* Enable/disable/generate interrupts */ | ||
1493 | static inline void efx_nic_interrupts(struct efx_nic *efx, | ||
1494 | bool enabled, bool force) | ||
1495 | { | ||
1496 | efx_oword_t int_en_reg_ker; | ||
1497 | |||
1498 | EFX_POPULATE_OWORD_3(int_en_reg_ker, | ||
1499 | FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, | ||
1500 | FRF_AZ_KER_INT_KER, force, | ||
1501 | FRF_AZ_DRV_INT_EN_KER, enabled); | ||
1502 | efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); | ||
1503 | } | 67 | } |
1504 | 68 | ||
1505 | void efx_nic_enable_interrupts(struct efx_nic *efx) | ||
1506 | { | ||
1507 | EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); | ||
1508 | wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ | ||
1509 | |||
1510 | efx_nic_interrupts(efx, true, false); | ||
1511 | } | ||
1512 | |||
1513 | void efx_nic_disable_interrupts(struct efx_nic *efx) | ||
1514 | { | ||
1515 | /* Disable interrupts */ | ||
1516 | efx_nic_interrupts(efx, false, false); | ||
1517 | } | ||
1518 | |||
1519 | /* Generate a test interrupt | ||
1520 | * Interrupt must already have been enabled, otherwise nasty things | ||
1521 | * may happen. | ||
1522 | */ | ||
1523 | void efx_nic_irq_test_start(struct efx_nic *efx) | 69 | void efx_nic_irq_test_start(struct efx_nic *efx) |
1524 | { | 70 | { |
1525 | efx->last_irq_cpu = -1; | 71 | efx->last_irq_cpu = -1; |
1526 | smp_wmb(); | 72 | smp_wmb(); |
1527 | efx_nic_interrupts(efx, true, true); | 73 | efx->type->irq_test_generate(efx); |
1528 | } | ||
1529 | |||
1530 | /* Process a fatal interrupt | ||
1531 | * Disable bus mastering ASAP and schedule a reset | ||
1532 | */ | ||
1533 | irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) | ||
1534 | { | ||
1535 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
1536 | efx_oword_t *int_ker = efx->irq_status.addr; | ||
1537 | efx_oword_t fatal_intr; | ||
1538 | int error, mem_perr; | ||
1539 | |||
1540 | efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); | ||
1541 | error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); | ||
1542 | |||
1543 | netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " | ||
1544 | EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), | ||
1545 | EFX_OWORD_VAL(fatal_intr), | ||
1546 | error ? "disabling bus mastering" : "no recognised error"); | ||
1547 | |||
1548 | /* If this is a memory parity error dump which blocks are offending */ | ||
1549 | mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || | ||
1550 | EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); | ||
1551 | if (mem_perr) { | ||
1552 | efx_oword_t reg; | ||
1553 | efx_reado(efx, ®, FR_AZ_MEM_STAT); | ||
1554 | netif_err(efx, hw, efx->net_dev, | ||
1555 | "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", | ||
1556 | EFX_OWORD_VAL(reg)); | ||
1557 | } | ||
1558 | |||
1559 | /* Disable both devices */ | ||
1560 | pci_clear_master(efx->pci_dev); | ||
1561 | if (efx_nic_is_dual_func(efx)) | ||
1562 | pci_clear_master(nic_data->pci_dev2); | ||
1563 | efx_nic_disable_interrupts(efx); | ||
1564 | |||
1565 | /* Count errors and reset or disable the NIC accordingly */ | ||
1566 | if (efx->int_error_count == 0 || | ||
1567 | time_after(jiffies, efx->int_error_expire)) { | ||
1568 | efx->int_error_count = 0; | ||
1569 | efx->int_error_expire = | ||
1570 | jiffies + EFX_INT_ERROR_EXPIRE * HZ; | ||
1571 | } | ||
1572 | if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { | ||
1573 | netif_err(efx, hw, efx->net_dev, | ||
1574 | "SYSTEM ERROR - reset scheduled\n"); | ||
1575 | efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); | ||
1576 | } else { | ||
1577 | netif_err(efx, hw, efx->net_dev, | ||
1578 | "SYSTEM ERROR - max number of errors seen." | ||
1579 | "NIC will be disabled\n"); | ||
1580 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | ||
1581 | } | ||
1582 | |||
1583 | return IRQ_HANDLED; | ||
1584 | } | ||
1585 | |||
1586 | /* Handle a legacy interrupt | ||
1587 | * Acknowledges the interrupt and schedule event queue processing. | ||
1588 | */ | ||
1589 | static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) | ||
1590 | { | ||
1591 | struct efx_nic *efx = dev_id; | ||
1592 | bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); | ||
1593 | efx_oword_t *int_ker = efx->irq_status.addr; | ||
1594 | irqreturn_t result = IRQ_NONE; | ||
1595 | struct efx_channel *channel; | ||
1596 | efx_dword_t reg; | ||
1597 | u32 queues; | ||
1598 | int syserr; | ||
1599 | |||
1600 | /* Read the ISR which also ACKs the interrupts */ | ||
1601 | efx_readd(efx, ®, FR_BZ_INT_ISR0); | ||
1602 | queues = EFX_EXTRACT_DWORD(reg, 0, 31); | ||
1603 | |||
1604 | /* Legacy interrupts are disabled too late by the EEH kernel | ||
1605 | * code. Disable them earlier. | ||
1606 | * If an EEH error occurred, the read will have returned all ones. | ||
1607 | */ | ||
1608 | if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && | ||
1609 | !efx->eeh_disabled_legacy_irq) { | ||
1610 | disable_irq_nosync(efx->legacy_irq); | ||
1611 | efx->eeh_disabled_legacy_irq = true; | ||
1612 | } | ||
1613 | |||
1614 | /* Handle non-event-queue sources */ | ||
1615 | if (queues & (1U << efx->irq_level) && soft_enabled) { | ||
1616 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | ||
1617 | if (unlikely(syserr)) | ||
1618 | return efx_nic_fatal_interrupt(efx); | ||
1619 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
1620 | } | ||
1621 | |||
1622 | if (queues != 0) { | ||
1623 | if (EFX_WORKAROUND_15783(efx)) | ||
1624 | efx->irq_zero_count = 0; | ||
1625 | |||
1626 | /* Schedule processing of any interrupting queues */ | ||
1627 | if (likely(soft_enabled)) { | ||
1628 | efx_for_each_channel(channel, efx) { | ||
1629 | if (queues & 1) | ||
1630 | efx_schedule_channel_irq(channel); | ||
1631 | queues >>= 1; | ||
1632 | } | ||
1633 | } | ||
1634 | result = IRQ_HANDLED; | ||
1635 | |||
1636 | } else if (EFX_WORKAROUND_15783(efx)) { | ||
1637 | efx_qword_t *event; | ||
1638 | |||
1639 | /* We can't return IRQ_HANDLED more than once on seeing ISR=0 | ||
1640 | * because this might be a shared interrupt. */ | ||
1641 | if (efx->irq_zero_count++ == 0) | ||
1642 | result = IRQ_HANDLED; | ||
1643 | |||
1644 | /* Ensure we schedule or rearm all event queues */ | ||
1645 | if (likely(soft_enabled)) { | ||
1646 | efx_for_each_channel(channel, efx) { | ||
1647 | event = efx_event(channel, | ||
1648 | channel->eventq_read_ptr); | ||
1649 | if (efx_event_present(event)) | ||
1650 | efx_schedule_channel_irq(channel); | ||
1651 | else | ||
1652 | efx_nic_eventq_read_ack(channel); | ||
1653 | } | ||
1654 | } | ||
1655 | } | ||
1656 | |||
1657 | if (result == IRQ_HANDLED) | ||
1658 | netif_vdbg(efx, intr, efx->net_dev, | ||
1659 | "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", | ||
1660 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | ||
1661 | |||
1662 | return result; | ||
1663 | } | ||
1664 | |||
1665 | /* Handle an MSI interrupt | ||
1666 | * | ||
1667 | * Handle an MSI hardware interrupt. This routine schedules event | ||
1668 | * queue processing. No interrupt acknowledgement cycle is necessary. | ||
1669 | * Also, we never need to check that the interrupt is for us, since | ||
1670 | * MSI interrupts cannot be shared. | ||
1671 | */ | ||
1672 | static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) | ||
1673 | { | ||
1674 | struct efx_msi_context *context = dev_id; | ||
1675 | struct efx_nic *efx = context->efx; | ||
1676 | efx_oword_t *int_ker = efx->irq_status.addr; | ||
1677 | int syserr; | ||
1678 | |||
1679 | netif_vdbg(efx, intr, efx->net_dev, | ||
1680 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", | ||
1681 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | ||
1682 | |||
1683 | if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) | ||
1684 | return IRQ_HANDLED; | ||
1685 | |||
1686 | /* Handle non-event-queue sources */ | ||
1687 | if (context->index == efx->irq_level) { | ||
1688 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | ||
1689 | if (unlikely(syserr)) | ||
1690 | return efx_nic_fatal_interrupt(efx); | ||
1691 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
1692 | } | ||
1693 | |||
1694 | /* Schedule processing of the channel */ | ||
1695 | efx_schedule_channel_irq(efx->channel[context->index]); | ||
1696 | |||
1697 | return IRQ_HANDLED; | ||
1698 | } | ||
1699 | |||
1700 | |||
1701 | /* Setup RSS indirection table. | ||
1702 | * This maps from the hash value of the packet to RXQ | ||
1703 | */ | ||
1704 | void efx_nic_push_rx_indir_table(struct efx_nic *efx) | ||
1705 | { | ||
1706 | size_t i = 0; | ||
1707 | efx_dword_t dword; | ||
1708 | |||
1709 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) | ||
1710 | return; | ||
1711 | |||
1712 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != | ||
1713 | FR_BZ_RX_INDIRECTION_TBL_ROWS); | ||
1714 | |||
1715 | for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { | ||
1716 | EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, | ||
1717 | efx->rx_indir_table[i]); | ||
1718 | efx_writed(efx, &dword, | ||
1719 | FR_BZ_RX_INDIRECTION_TBL + | ||
1720 | FR_BZ_RX_INDIRECTION_TBL_STEP * i); | ||
1721 | } | ||
1722 | } | 74 | } |
1723 | 75 | ||
1724 | /* Hook interrupt handler(s) | 76 | /* Hook interrupt handler(s) |
@@ -1731,13 +83,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx) | |||
1731 | int rc; | 83 | int rc; |
1732 | 84 | ||
1733 | if (!EFX_INT_MODE_USE_MSI(efx)) { | 85 | if (!EFX_INT_MODE_USE_MSI(efx)) { |
1734 | irq_handler_t handler; | 86 | rc = request_irq(efx->legacy_irq, |
1735 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | 87 | efx->type->irq_handle_legacy, IRQF_SHARED, |
1736 | handler = efx_legacy_interrupt; | ||
1737 | else | ||
1738 | handler = falcon_legacy_interrupt_a1; | ||
1739 | |||
1740 | rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, | ||
1741 | efx->name, efx); | 88 | efx->name, efx); |
1742 | if (rc) { | 89 | if (rc) { |
1743 | netif_err(efx, drv, efx->net_dev, | 90 | netif_err(efx, drv, efx->net_dev, |
@@ -1762,7 +109,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx) | |||
1762 | /* Hook MSI or MSI-X interrupt */ | 109 | /* Hook MSI or MSI-X interrupt */ |
1763 | n_irqs = 0; | 110 | n_irqs = 0; |
1764 | efx_for_each_channel(channel, efx) { | 111 | efx_for_each_channel(channel, efx) { |
1765 | rc = request_irq(channel->irq, efx_msi_interrupt, | 112 | rc = request_irq(channel->irq, efx->type->irq_handle_msi, |
1766 | IRQF_PROBE_SHARED, /* Not shared */ | 113 | IRQF_PROBE_SHARED, /* Not shared */ |
1767 | efx->msi_context[channel->channel].name, | 114 | efx->msi_context[channel->channel].name, |
1768 | &efx->msi_context[channel->channel]); | 115 | &efx->msi_context[channel->channel]); |
@@ -1818,154 +165,6 @@ void efx_nic_fini_interrupt(struct efx_nic *efx) | |||
1818 | free_irq(efx->legacy_irq, efx); | 165 | free_irq(efx->legacy_irq, efx); |
1819 | } | 166 | } |
1820 | 167 | ||
1821 | /* Looks at available SRAM resources and works out how many queues we | ||
1822 | * can support, and where things like descriptor caches should live. | ||
1823 | * | ||
1824 | * SRAM is split up as follows: | ||
1825 | * 0 buftbl entries for channels | ||
1826 | * efx->vf_buftbl_base buftbl entries for SR-IOV | ||
1827 | * efx->rx_dc_base RX descriptor caches | ||
1828 | * efx->tx_dc_base TX descriptor caches | ||
1829 | */ | ||
1830 | void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) | ||
1831 | { | ||
1832 | unsigned vi_count, buftbl_min; | ||
1833 | |||
1834 | /* Account for the buffer table entries backing the datapath channels | ||
1835 | * and the descriptor caches for those channels. | ||
1836 | */ | ||
1837 | buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + | ||
1838 | efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + | ||
1839 | efx->n_channels * EFX_MAX_EVQ_SIZE) | ||
1840 | * sizeof(efx_qword_t) / EFX_BUF_SIZE); | ||
1841 | vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); | ||
1842 | |||
1843 | #ifdef CONFIG_SFC_SRIOV | ||
1844 | if (efx_sriov_wanted(efx)) { | ||
1845 | unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; | ||
1846 | |||
1847 | efx->vf_buftbl_base = buftbl_min; | ||
1848 | |||
1849 | vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; | ||
1850 | vi_count = max(vi_count, EFX_VI_BASE); | ||
1851 | buftbl_free = (sram_lim_qw - buftbl_min - | ||
1852 | vi_count * vi_dc_entries); | ||
1853 | |||
1854 | entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) * | ||
1855 | efx_vf_size(efx)); | ||
1856 | vf_limit = min(buftbl_free / entries_per_vf, | ||
1857 | (1024U - EFX_VI_BASE) >> efx->vi_scale); | ||
1858 | |||
1859 | if (efx->vf_count > vf_limit) { | ||
1860 | netif_err(efx, probe, efx->net_dev, | ||
1861 | "Reducing VF count from from %d to %d\n", | ||
1862 | efx->vf_count, vf_limit); | ||
1863 | efx->vf_count = vf_limit; | ||
1864 | } | ||
1865 | vi_count += efx->vf_count * efx_vf_size(efx); | ||
1866 | } | ||
1867 | #endif | ||
1868 | |||
1869 | efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; | ||
1870 | efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; | ||
1871 | } | ||
1872 | |||
1873 | u32 efx_nic_fpga_ver(struct efx_nic *efx) | ||
1874 | { | ||
1875 | efx_oword_t altera_build; | ||
1876 | efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); | ||
1877 | return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); | ||
1878 | } | ||
1879 | |||
1880 | void efx_nic_init_common(struct efx_nic *efx) | ||
1881 | { | ||
1882 | efx_oword_t temp; | ||
1883 | |||
1884 | /* Set positions of descriptor caches in SRAM. */ | ||
1885 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); | ||
1886 | efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); | ||
1887 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); | ||
1888 | efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); | ||
1889 | |||
1890 | /* Set TX descriptor cache size. */ | ||
1891 | BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); | ||
1892 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); | ||
1893 | efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); | ||
1894 | |||
1895 | /* Set RX descriptor cache size. Set low watermark to size-8, as | ||
1896 | * this allows most efficient prefetching. | ||
1897 | */ | ||
1898 | BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); | ||
1899 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); | ||
1900 | efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); | ||
1901 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); | ||
1902 | efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); | ||
1903 | |||
1904 | /* Program INT_KER address */ | ||
1905 | EFX_POPULATE_OWORD_2(temp, | ||
1906 | FRF_AZ_NORM_INT_VEC_DIS_KER, | ||
1907 | EFX_INT_MODE_USE_MSI(efx), | ||
1908 | FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); | ||
1909 | efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); | ||
1910 | |||
1911 | if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) | ||
1912 | /* Use an interrupt level unused by event queues */ | ||
1913 | efx->irq_level = 0x1f; | ||
1914 | else | ||
1915 | /* Use a valid MSI-X vector */ | ||
1916 | efx->irq_level = 0; | ||
1917 | |||
1918 | /* Enable all the genuinely fatal interrupts. (They are still | ||
1919 | * masked by the overall interrupt mask, controlled by | ||
1920 | * falcon_interrupts()). | ||
1921 | * | ||
1922 | * Note: All other fatal interrupts are enabled | ||
1923 | */ | ||
1924 | EFX_POPULATE_OWORD_3(temp, | ||
1925 | FRF_AZ_ILL_ADR_INT_KER_EN, 1, | ||
1926 | FRF_AZ_RBUF_OWN_INT_KER_EN, 1, | ||
1927 | FRF_AZ_TBUF_OWN_INT_KER_EN, 1); | ||
1928 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) | ||
1929 | EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); | ||
1930 | EFX_INVERT_OWORD(temp); | ||
1931 | efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); | ||
1932 | |||
1933 | efx_nic_push_rx_indir_table(efx); | ||
1934 | |||
1935 | /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be | ||
1936 | * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. | ||
1937 | */ | ||
1938 | efx_reado(efx, &temp, FR_AZ_TX_RESERVED); | ||
1939 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); | ||
1940 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); | ||
1941 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); | ||
1942 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); | ||
1943 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); | ||
1944 | /* Enable SW_EV to inherit in char driver - assume harmless here */ | ||
1945 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); | ||
1946 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ | ||
1947 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); | ||
1948 | /* Disable hardware watchdog which can misfire */ | ||
1949 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); | ||
1950 | /* Squash TX of packets of 16 bytes or less */ | ||
1951 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | ||
1952 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); | ||
1953 | efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); | ||
1954 | |||
1955 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | ||
1956 | EFX_POPULATE_OWORD_4(temp, | ||
1957 | /* Default values */ | ||
1958 | FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, | ||
1959 | FRF_BZ_TX_PACE_SB_AF, 0xb, | ||
1960 | FRF_BZ_TX_PACE_FB_BASE, 0, | ||
1961 | /* Allow large pace values in the | ||
1962 | * fast bin. */ | ||
1963 | FRF_BZ_TX_PACE_BIN_TH, | ||
1964 | FFE_BZ_TX_PACE_RESERVED); | ||
1965 | efx_writeo(efx, &temp, FR_BZ_TX_PACE); | ||
1966 | } | ||
1967 | } | ||
1968 | |||
1969 | /* Register dump */ | 168 | /* Register dump */ |
1970 | 169 | ||
1971 | #define REGISTER_REVISION_A 1 | 170 | #define REGISTER_REVISION_A 1 |
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 21f662cc39a4..25e25b635798 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h | |||
@@ -34,7 +34,7 @@ static inline int efx_nic_rev(struct efx_nic *efx) | |||
34 | return efx->type->revision; | 34 | return efx->type->revision; |
35 | } | 35 | } |
36 | 36 | ||
37 | extern u32 efx_nic_fpga_ver(struct efx_nic *efx); | 37 | extern u32 efx_farch_fpga_ver(struct efx_nic *efx); |
38 | 38 | ||
39 | /* NIC has two interlinked PCI functions for the same port. */ | 39 | /* NIC has two interlinked PCI functions for the same port. */ |
40 | static inline bool efx_nic_is_dual_func(struct efx_nic *efx) | 40 | static inline bool efx_nic_is_dual_func(struct efx_nic *efx) |
@@ -42,6 +42,65 @@ static inline bool efx_nic_is_dual_func(struct efx_nic *efx) | |||
42 | return efx_nic_rev(efx) < EFX_REV_FALCON_B0; | 42 | return efx_nic_rev(efx) < EFX_REV_FALCON_B0; |
43 | } | 43 | } |
44 | 44 | ||
45 | /* Read the current event from the event queue */ | ||
46 | static inline efx_qword_t *efx_event(struct efx_channel *channel, | ||
47 | unsigned int index) | ||
48 | { | ||
49 | return ((efx_qword_t *) (channel->eventq.buf.addr)) + | ||
50 | (index & channel->eventq_mask); | ||
51 | } | ||
52 | |||
53 | /* See if an event is present | ||
54 | * | ||
55 | * We check both the high and low dword of the event for all ones. We | ||
56 | * wrote all ones when we cleared the event, and no valid event can | ||
57 | * have all ones in either its high or low dwords. This approach is | ||
58 | * robust against reordering. | ||
59 | * | ||
60 | * Note that using a single 64-bit comparison is incorrect; even | ||
61 | * though the CPU read will be atomic, the DMA write may not be. | ||
62 | */ | ||
63 | static inline int efx_event_present(efx_qword_t *event) | ||
64 | { | ||
65 | return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | | ||
66 | EFX_DWORD_IS_ALL_ONES(event->dword[1])); | ||
67 | } | ||
68 | |||
69 | /* Returns a pointer to the specified transmit descriptor in the TX | ||
70 | * descriptor queue belonging to the specified channel. | ||
71 | */ | ||
72 | static inline efx_qword_t * | ||
73 | efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) | ||
74 | { | ||
75 | return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; | ||
76 | } | ||
77 | |||
78 | /* Decide whether to push a TX descriptor to the NIC vs merely writing | ||
79 | * the doorbell. This can reduce latency when we are adding a single | ||
80 | * descriptor to an empty queue, but is otherwise pointless. Further, | ||
81 | * Falcon and Siena have hardware bugs (SF bug 33851) that may be | ||
82 | * triggered if we don't check this. | ||
83 | */ | ||
84 | static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, | ||
85 | unsigned int write_count) | ||
86 | { | ||
87 | unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); | ||
88 | |||
89 | if (empty_read_count == 0) | ||
90 | return false; | ||
91 | |||
92 | tx_queue->empty_read_count = 0; | ||
93 | return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 | ||
94 | && tx_queue->write_count - write_count == 1; | ||
95 | } | ||
96 | |||
97 | /* Returns a pointer to the specified descriptor in the RX descriptor queue */ | ||
98 | static inline efx_qword_t * | ||
99 | efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) | ||
100 | { | ||
101 | return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index; | ||
102 | } | ||
103 | |||
45 | enum { | 104 | enum { |
46 | PHY_TYPE_NONE = 0, | 105 | PHY_TYPE_NONE = 0, |
47 | PHY_TYPE_TXC43128 = 1, | 106 | PHY_TYPE_TXC43128 = 1, |
@@ -258,25 +317,93 @@ extern const struct efx_nic_type siena_a0_nic_type; | |||
258 | extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info); | 317 | extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info); |
259 | 318 | ||
260 | /* TX data path */ | 319 | /* TX data path */ |
261 | extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); | 320 | static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) |
262 | extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue); | 321 | { |
263 | extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue); | 322 | return tx_queue->efx->type->tx_probe(tx_queue); |
264 | extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue); | 323 | } |
324 | static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue) | ||
325 | { | ||
326 | tx_queue->efx->type->tx_init(tx_queue); | ||
327 | } | ||
328 | static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) | ||
329 | { | ||
330 | tx_queue->efx->type->tx_remove(tx_queue); | ||
331 | } | ||
332 | static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) | ||
333 | { | ||
334 | tx_queue->efx->type->tx_write(tx_queue); | ||
335 | } | ||
265 | 336 | ||
266 | /* RX data path */ | 337 | /* RX data path */ |
267 | extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue); | 338 | static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) |
268 | extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue); | 339 | { |
269 | extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue); | 340 | return rx_queue->efx->type->rx_probe(rx_queue); |
270 | extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue); | 341 | } |
271 | extern void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue); | 342 | static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue) |
343 | { | ||
344 | rx_queue->efx->type->rx_init(rx_queue); | ||
345 | } | ||
346 | static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) | ||
347 | { | ||
348 | rx_queue->efx->type->rx_remove(rx_queue); | ||
349 | } | ||
350 | static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) | ||
351 | { | ||
352 | rx_queue->efx->type->rx_write(rx_queue); | ||
353 | } | ||
354 | static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) | ||
355 | { | ||
356 | rx_queue->efx->type->rx_defer_refill(rx_queue); | ||
357 | } | ||
272 | 358 | ||
273 | /* Event data path */ | 359 | /* Event data path */ |
274 | extern int efx_nic_probe_eventq(struct efx_channel *channel); | 360 | static inline int efx_nic_probe_eventq(struct efx_channel *channel) |
275 | extern void efx_nic_init_eventq(struct efx_channel *channel); | 361 | { |
276 | extern void efx_nic_fini_eventq(struct efx_channel *channel); | 362 | return channel->efx->type->ev_probe(channel); |
277 | extern void efx_nic_remove_eventq(struct efx_channel *channel); | 363 | } |
278 | extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); | 364 | static inline void efx_nic_init_eventq(struct efx_channel *channel) |
279 | extern void efx_nic_eventq_read_ack(struct efx_channel *channel); | 365 | { |
366 | channel->efx->type->ev_init(channel); | ||
367 | } | ||
368 | static inline void efx_nic_fini_eventq(struct efx_channel *channel) | ||
369 | { | ||
370 | channel->efx->type->ev_fini(channel); | ||
371 | } | ||
372 | static inline void efx_nic_remove_eventq(struct efx_channel *channel) | ||
373 | { | ||
374 | channel->efx->type->ev_remove(channel); | ||
375 | } | ||
376 | static inline int | ||
377 | efx_nic_process_eventq(struct efx_channel *channel, int quota) | ||
378 | { | ||
379 | return channel->efx->type->ev_process(channel, quota); | ||
380 | } | ||
381 | static inline void efx_nic_eventq_read_ack(struct efx_channel *channel) | ||
382 | { | ||
383 | channel->efx->type->ev_read_ack(channel); | ||
384 | } | ||
385 | extern void efx_nic_event_test_start(struct efx_channel *channel); | ||
386 | |||
387 | /* Falcon/Siena queue operations */ | ||
388 | extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue); | ||
389 | extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue); | ||
390 | extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue); | ||
391 | extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue); | ||
392 | extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue); | ||
393 | extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue); | ||
394 | extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue); | ||
395 | extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue); | ||
396 | extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue); | ||
397 | extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue); | ||
398 | extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue); | ||
399 | extern int efx_farch_ev_probe(struct efx_channel *channel); | ||
400 | extern void efx_farch_ev_init(struct efx_channel *channel); | ||
401 | extern void efx_farch_ev_fini(struct efx_channel *channel); | ||
402 | extern void efx_farch_ev_remove(struct efx_channel *channel); | ||
403 | extern int efx_farch_ev_process(struct efx_channel *channel, int quota); | ||
404 | extern void efx_farch_ev_read_ack(struct efx_channel *channel); | ||
405 | extern void efx_farch_ev_test_generate(struct efx_channel *channel); | ||
406 | |||
280 | extern bool efx_nic_event_present(struct efx_channel *channel); | 407 | extern bool efx_nic_event_present(struct efx_channel *channel); |
281 | 408 | ||
282 | /* Some statistics are computed as A - B where A and B each increase | 409 | /* Some statistics are computed as A - B where A and B each increase |
@@ -297,15 +424,18 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff) | |||
297 | *stat = diff; | 424 | *stat = diff; |
298 | } | 425 | } |
299 | 426 | ||
300 | /* Interrupts and test events */ | 427 | /* Interrupts */ |
301 | extern int efx_nic_init_interrupt(struct efx_nic *efx); | 428 | extern int efx_nic_init_interrupt(struct efx_nic *efx); |
302 | extern void efx_nic_enable_interrupts(struct efx_nic *efx); | ||
303 | extern void efx_nic_event_test_start(struct efx_channel *channel); | ||
304 | extern void efx_nic_irq_test_start(struct efx_nic *efx); | 429 | extern void efx_nic_irq_test_start(struct efx_nic *efx); |
305 | extern void efx_nic_disable_interrupts(struct efx_nic *efx); | ||
306 | extern void efx_nic_fini_interrupt(struct efx_nic *efx); | 430 | extern void efx_nic_fini_interrupt(struct efx_nic *efx); |
307 | extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx); | 431 | |
308 | extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id); | 432 | /* Falcon/Siena interrupts */ |
433 | extern void efx_farch_irq_enable_master(struct efx_nic *efx); | ||
434 | extern void efx_farch_irq_test_generate(struct efx_nic *efx); | ||
435 | extern void efx_farch_irq_disable_master(struct efx_nic *efx); | ||
436 | extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id); | ||
437 | extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id); | ||
438 | extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx); | ||
309 | 439 | ||
310 | static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) | 440 | static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) |
311 | { | 441 | { |
@@ -317,36 +447,40 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) | |||
317 | } | 447 | } |
318 | 448 | ||
319 | /* Global Resources */ | 449 | /* Global Resources */ |
320 | extern int efx_farch_fini_dmaq(struct efx_nic *efx); | 450 | extern int efx_nic_flush_queues(struct efx_nic *efx); |
321 | extern void siena_prepare_flush(struct efx_nic *efx); | 451 | extern void siena_prepare_flush(struct efx_nic *efx); |
452 | extern int efx_farch_fini_dmaq(struct efx_nic *efx); | ||
322 | extern void siena_finish_flush(struct efx_nic *efx); | 453 | extern void siena_finish_flush(struct efx_nic *efx); |
323 | extern void falcon_start_nic_stats(struct efx_nic *efx); | 454 | extern void falcon_start_nic_stats(struct efx_nic *efx); |
324 | extern void falcon_stop_nic_stats(struct efx_nic *efx); | 455 | extern void falcon_stop_nic_stats(struct efx_nic *efx); |
325 | extern int falcon_reset_xaui(struct efx_nic *efx); | 456 | extern int falcon_reset_xaui(struct efx_nic *efx); |
326 | extern void | 457 | extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); |
327 | efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); | 458 | extern void efx_farch_init_common(struct efx_nic *efx); |
328 | extern void efx_nic_init_common(struct efx_nic *efx); | 459 | static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx) |
329 | extern void efx_nic_push_rx_indir_table(struct efx_nic *efx); | 460 | { |
461 | efx->type->rx_push_indir_table(efx); | ||
462 | } | ||
463 | extern void efx_farch_rx_push_indir_table(struct efx_nic *efx); | ||
330 | 464 | ||
331 | int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, | 465 | int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, |
332 | unsigned int len, gfp_t gfp_flags); | 466 | unsigned int len, gfp_t gfp_flags); |
333 | void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); | 467 | void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); |
334 | 468 | ||
335 | /* Tests */ | 469 | /* Tests */ |
336 | struct efx_nic_register_test { | 470 | struct efx_farch_register_test { |
337 | unsigned address; | 471 | unsigned address; |
338 | efx_oword_t mask; | 472 | efx_oword_t mask; |
339 | }; | 473 | }; |
340 | extern int efx_nic_test_registers(struct efx_nic *efx, | 474 | extern int efx_farch_test_registers(struct efx_nic *efx, |
341 | const struct efx_nic_register_test *regs, | 475 | const struct efx_farch_register_test *regs, |
342 | size_t n_regs); | 476 | size_t n_regs); |
343 | 477 | ||
344 | extern size_t efx_nic_get_regs_len(struct efx_nic *efx); | 478 | extern size_t efx_nic_get_regs_len(struct efx_nic *efx); |
345 | extern void efx_nic_get_regs(struct efx_nic *efx, void *buf); | 479 | extern void efx_nic_get_regs(struct efx_nic *efx, void *buf); |
346 | 480 | ||
347 | #define EFX_MAX_FLUSH_TIME 5000 | 481 | #define EFX_MAX_FLUSH_TIME 5000 |
348 | 482 | ||
349 | extern void efx_generate_event(struct efx_nic *efx, unsigned int evq, | 483 | extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, |
350 | efx_qword_t *event); | 484 | efx_qword_t *event); |
351 | 485 | ||
352 | #endif /* EFX_NIC_H */ | 486 | #endif /* EFX_NIC_H */ |
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index b4c1d4310afe..6a833d531b86 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c | |||
@@ -63,7 +63,7 @@ void siena_finish_flush(struct efx_nic *efx) | |||
63 | efx_mcdi_set_mac(efx); | 63 | efx_mcdi_set_mac(efx); |
64 | } | 64 | } |
65 | 65 | ||
66 | static const struct efx_nic_register_test siena_register_tests[] = { | 66 | static const struct efx_farch_register_test siena_register_tests[] = { |
67 | { FR_AZ_ADR_REGION, | 67 | { FR_AZ_ADR_REGION, |
68 | EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, | 68 | EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, |
69 | { FR_CZ_USR_EV_CFG, | 69 | { FR_CZ_USR_EV_CFG, |
@@ -107,8 +107,8 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) | |||
107 | goto out; | 107 | goto out; |
108 | 108 | ||
109 | tests->registers = | 109 | tests->registers = |
110 | efx_nic_test_registers(efx, siena_register_tests, | 110 | efx_farch_test_registers(efx, siena_register_tests, |
111 | ARRAY_SIZE(siena_register_tests)) | 111 | ARRAY_SIZE(siena_register_tests)) |
112 | ? -1 : 1; | 112 | ? -1 : 1; |
113 | 113 | ||
114 | rc = efx_mcdi_reset(efx, reset_method); | 114 | rc = efx_mcdi_reset(efx, reset_method); |
@@ -184,7 +184,7 @@ static void siena_dimension_resources(struct efx_nic *efx) | |||
184 | * the buffer table and descriptor caches. In theory we can | 184 | * the buffer table and descriptor caches. In theory we can |
185 | * map both blocks to one port, but we don't. | 185 | * map both blocks to one port, but we don't. |
186 | */ | 186 | */ |
187 | efx_nic_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2); | 187 | efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2); |
188 | } | 188 | } |
189 | 189 | ||
190 | static int siena_probe_nic(struct efx_nic *efx) | 190 | static int siena_probe_nic(struct efx_nic *efx) |
@@ -200,7 +200,7 @@ static int siena_probe_nic(struct efx_nic *efx) | |||
200 | return -ENOMEM; | 200 | return -ENOMEM; |
201 | efx->nic_data = nic_data; | 201 | efx->nic_data = nic_data; |
202 | 202 | ||
203 | if (efx_nic_fpga_ver(efx) != 0) { | 203 | if (efx_farch_fpga_ver(efx) != 0) { |
204 | netif_err(efx, probe, efx->net_dev, | 204 | netif_err(efx, probe, efx->net_dev, |
205 | "Siena FPGA not supported\n"); | 205 | "Siena FPGA not supported\n"); |
206 | rc = -ENODEV; | 206 | rc = -ENODEV; |
@@ -351,7 +351,7 @@ static int siena_init_nic(struct efx_nic *efx) | |||
351 | EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1); | 351 | EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1); |
352 | efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG); | 352 | efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG); |
353 | 353 | ||
354 | efx_nic_init_common(efx); | 354 | efx_farch_init_common(efx); |
355 | return 0; | 355 | return 0; |
356 | } | 356 | } |
357 | 357 | ||
@@ -705,6 +705,28 @@ const struct efx_nic_type siena_a0_nic_type = { | |||
705 | .mcdi_poll_response = siena_mcdi_poll_response, | 705 | .mcdi_poll_response = siena_mcdi_poll_response, |
706 | .mcdi_read_response = siena_mcdi_read_response, | 706 | .mcdi_read_response = siena_mcdi_read_response, |
707 | .mcdi_poll_reboot = siena_mcdi_poll_reboot, | 707 | .mcdi_poll_reboot = siena_mcdi_poll_reboot, |
708 | .irq_enable_master = efx_farch_irq_enable_master, | ||
709 | .irq_test_generate = efx_farch_irq_test_generate, | ||
710 | .irq_disable_non_ev = efx_farch_irq_disable_master, | ||
711 | .irq_handle_msi = efx_farch_msi_interrupt, | ||
712 | .irq_handle_legacy = efx_farch_legacy_interrupt, | ||
713 | .tx_probe = efx_farch_tx_probe, | ||
714 | .tx_init = efx_farch_tx_init, | ||
715 | .tx_remove = efx_farch_tx_remove, | ||
716 | .tx_write = efx_farch_tx_write, | ||
717 | .rx_push_indir_table = efx_farch_rx_push_indir_table, | ||
718 | .rx_probe = efx_farch_rx_probe, | ||
719 | .rx_init = efx_farch_rx_init, | ||
720 | .rx_remove = efx_farch_rx_remove, | ||
721 | .rx_write = efx_farch_rx_write, | ||
722 | .rx_defer_refill = efx_farch_rx_defer_refill, | ||
723 | .ev_probe = efx_farch_ev_probe, | ||
724 | .ev_init = efx_farch_ev_init, | ||
725 | .ev_fini = efx_farch_ev_fini, | ||
726 | .ev_remove = efx_farch_ev_remove, | ||
727 | .ev_process = efx_farch_ev_process, | ||
728 | .ev_read_ack = efx_farch_ev_read_ack, | ||
729 | .ev_test_generate = efx_farch_ev_test_generate, | ||
708 | 730 | ||
709 | .revision = EFX_REV_SIENA_A0, | 731 | .revision = EFX_REV_SIENA_A0, |
710 | .mem_map_size = (FR_CZ_MC_TREG_SMEM + | 732 | .mem_map_size = (FR_CZ_MC_TREG_SMEM + |
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c index 4d214bce8969..4b8eef962faa 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena_sriov.c | |||
@@ -464,8 +464,9 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf) | |||
464 | VFDI_EV_SEQ, (vf->msg_seqno & 0xff), | 464 | VFDI_EV_SEQ, (vf->msg_seqno & 0xff), |
465 | VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS); | 465 | VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS); |
466 | ++vf->msg_seqno; | 466 | ++vf->msg_seqno; |
467 | efx_generate_event(efx, EFX_VI_BASE + vf->index * efx_vf_size(efx), | 467 | efx_farch_generate_event(efx, |
468 | &event); | 468 | EFX_VI_BASE + vf->index * efx_vf_size(efx), |
469 | &event); | ||
469 | } | 470 | } |
470 | 471 | ||
471 | static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset, | 472 | static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset, |