diff options
author | Ben Hutchings <bhutchings@solarflare.com> | 2013-08-21 14:51:04 -0400 |
---|---|---|
committer | Ben Hutchings <bhutchings@solarflare.com> | 2013-08-21 15:19:05 -0400 |
commit | 86094f7f38ff711f3db8497fcb4d2e109100f497 (patch) | |
tree | c9c8d9418b32d0a2abf6220a22328ceeb9130244 /drivers/net/ethernet/sfc/farch.c | |
parent | e42c3d85af629697699c89aecba481527a1da898 (diff) |
sfc: Move and rename Falcon/Siena common NIC operations
Add efx_nic_type operations for the many efx_nic functions that need
to be implemented different on EF10. For now, change most of the
existing efx_nic_*() functions into inline wrappers. As a later step,
we may be able to improve branch prediction for operations used on the
fast path by copying the pointers into each queue/channel structure.
Move the Falcon/Siena implementations to new file farch.c and rename
the functions and static data to use a prefix of 'efx_farch_'.
Move efx_may_push_tx_desc() to nic.h, as the EF10 TX code will also
use it.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Diffstat (limited to 'drivers/net/ethernet/sfc/farch.c')
-rw-r--r-- | drivers/net/ethernet/sfc/farch.c | 1781 |
1 files changed, 1781 insertions, 0 deletions
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c new file mode 100644 index 000000000000..c3d07c556569 --- /dev/null +++ b/drivers/net/ethernet/sfc/farch.c | |||
@@ -0,0 +1,1781 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2011 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #include <linux/bitops.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/pci.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/seq_file.h> | ||
17 | #include "net_driver.h" | ||
18 | #include "bitfield.h" | ||
19 | #include "efx.h" | ||
20 | #include "nic.h" | ||
21 | #include "farch_regs.h" | ||
22 | #include "io.h" | ||
23 | #include "workarounds.h" | ||
24 | |||
25 | /* Falcon-architecture (SFC4000 and SFC9000-family) support */ | ||
26 | |||
27 | /************************************************************************** | ||
28 | * | ||
29 | * Configurable values | ||
30 | * | ||
31 | ************************************************************************** | ||
32 | */ | ||
33 | |||
34 | /* This is set to 16 for a good reason. In summary, if larger than | ||
35 | * 16, the descriptor cache holds more than a default socket | ||
36 | * buffer's worth of packets (for UDP we can only have at most one | ||
37 | * socket buffer's worth outstanding). This combined with the fact | ||
38 | * that we only get 1 TX event per descriptor cache means the NIC | ||
39 | * goes idle. | ||
40 | */ | ||
41 | #define TX_DC_ENTRIES 16 | ||
42 | #define TX_DC_ENTRIES_ORDER 1 | ||
43 | |||
44 | #define RX_DC_ENTRIES 64 | ||
45 | #define RX_DC_ENTRIES_ORDER 3 | ||
46 | |||
47 | /* If EFX_MAX_INT_ERRORS internal errors occur within | ||
48 | * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and | ||
49 | * disable it. | ||
50 | */ | ||
51 | #define EFX_INT_ERROR_EXPIRE 3600 | ||
52 | #define EFX_MAX_INT_ERRORS 5 | ||
53 | |||
54 | /* Depth of RX flush request fifo */ | ||
55 | #define EFX_RX_FLUSH_COUNT 4 | ||
56 | |||
57 | /* Driver generated events */ | ||
58 | #define _EFX_CHANNEL_MAGIC_TEST 0x000101 | ||
59 | #define _EFX_CHANNEL_MAGIC_FILL 0x000102 | ||
60 | #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 | ||
61 | #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 | ||
62 | |||
63 | #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) | ||
64 | #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) | ||
65 | |||
66 | #define EFX_CHANNEL_MAGIC_TEST(_channel) \ | ||
67 | _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) | ||
68 | #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ | ||
69 | _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ | ||
70 | efx_rx_queue_index(_rx_queue)) | ||
71 | #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ | ||
72 | _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ | ||
73 | efx_rx_queue_index(_rx_queue)) | ||
74 | #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ | ||
75 | _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ | ||
76 | (_tx_queue)->queue) | ||
77 | |||
78 | static void efx_farch_magic_event(struct efx_channel *channel, u32 magic); | ||
79 | |||
80 | /************************************************************************** | ||
81 | * | ||
82 | * Hardware access | ||
83 | * | ||
84 | **************************************************************************/ | ||
85 | |||
86 | static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, | ||
87 | unsigned int index) | ||
88 | { | ||
89 | efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, | ||
90 | value, index); | ||
91 | } | ||
92 | |||
93 | static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, | ||
94 | const efx_oword_t *mask) | ||
95 | { | ||
96 | return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || | ||
97 | ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); | ||
98 | } | ||
99 | |||
100 | int efx_farch_test_registers(struct efx_nic *efx, | ||
101 | const struct efx_farch_register_test *regs, | ||
102 | size_t n_regs) | ||
103 | { | ||
104 | unsigned address = 0, i, j; | ||
105 | efx_oword_t mask, imask, original, reg, buf; | ||
106 | |||
107 | for (i = 0; i < n_regs; ++i) { | ||
108 | address = regs[i].address; | ||
109 | mask = imask = regs[i].mask; | ||
110 | EFX_INVERT_OWORD(imask); | ||
111 | |||
112 | efx_reado(efx, &original, address); | ||
113 | |||
114 | /* bit sweep on and off */ | ||
115 | for (j = 0; j < 128; j++) { | ||
116 | if (!EFX_EXTRACT_OWORD32(mask, j, j)) | ||
117 | continue; | ||
118 | |||
119 | /* Test this testable bit can be set in isolation */ | ||
120 | EFX_AND_OWORD(reg, original, mask); | ||
121 | EFX_SET_OWORD32(reg, j, j, 1); | ||
122 | |||
123 | efx_writeo(efx, ®, address); | ||
124 | efx_reado(efx, &buf, address); | ||
125 | |||
126 | if (efx_masked_compare_oword(®, &buf, &mask)) | ||
127 | goto fail; | ||
128 | |||
129 | /* Test this testable bit can be cleared in isolation */ | ||
130 | EFX_OR_OWORD(reg, original, mask); | ||
131 | EFX_SET_OWORD32(reg, j, j, 0); | ||
132 | |||
133 | efx_writeo(efx, ®, address); | ||
134 | efx_reado(efx, &buf, address); | ||
135 | |||
136 | if (efx_masked_compare_oword(®, &buf, &mask)) | ||
137 | goto fail; | ||
138 | } | ||
139 | |||
140 | efx_writeo(efx, &original, address); | ||
141 | } | ||
142 | |||
143 | return 0; | ||
144 | |||
145 | fail: | ||
146 | netif_err(efx, hw, efx->net_dev, | ||
147 | "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT | ||
148 | " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), | ||
149 | EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); | ||
150 | return -EIO; | ||
151 | } | ||
152 | |||
153 | /************************************************************************** | ||
154 | * | ||
155 | * Special buffer handling | ||
156 | * Special buffers are used for event queues and the TX and RX | ||
157 | * descriptor rings. | ||
158 | * | ||
159 | *************************************************************************/ | ||
160 | |||
161 | /* | ||
162 | * Initialise a special buffer | ||
163 | * | ||
164 | * This will define a buffer (previously allocated via | ||
165 | * efx_alloc_special_buffer()) in the buffer table, allowing | ||
166 | * it to be used for event queues, descriptor rings etc. | ||
167 | */ | ||
168 | static void | ||
169 | efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | ||
170 | { | ||
171 | efx_qword_t buf_desc; | ||
172 | unsigned int index; | ||
173 | dma_addr_t dma_addr; | ||
174 | int i; | ||
175 | |||
176 | EFX_BUG_ON_PARANOID(!buffer->buf.addr); | ||
177 | |||
178 | /* Write buffer descriptors to NIC */ | ||
179 | for (i = 0; i < buffer->entries; i++) { | ||
180 | index = buffer->index + i; | ||
181 | dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE); | ||
182 | netif_dbg(efx, probe, efx->net_dev, | ||
183 | "mapping special buffer %d at %llx\n", | ||
184 | index, (unsigned long long)dma_addr); | ||
185 | EFX_POPULATE_QWORD_3(buf_desc, | ||
186 | FRF_AZ_BUF_ADR_REGION, 0, | ||
187 | FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, | ||
188 | FRF_AZ_BUF_OWNER_ID_FBUF, 0); | ||
189 | efx_write_buf_tbl(efx, &buf_desc, index); | ||
190 | } | ||
191 | } | ||
192 | |||
193 | /* Unmaps a buffer and clears the buffer table entries */ | ||
194 | static void | ||
195 | efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | ||
196 | { | ||
197 | efx_oword_t buf_tbl_upd; | ||
198 | unsigned int start = buffer->index; | ||
199 | unsigned int end = (buffer->index + buffer->entries - 1); | ||
200 | |||
201 | if (!buffer->entries) | ||
202 | return; | ||
203 | |||
204 | netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", | ||
205 | buffer->index, buffer->index + buffer->entries - 1); | ||
206 | |||
207 | EFX_POPULATE_OWORD_4(buf_tbl_upd, | ||
208 | FRF_AZ_BUF_UPD_CMD, 0, | ||
209 | FRF_AZ_BUF_CLR_CMD, 1, | ||
210 | FRF_AZ_BUF_CLR_END_ID, end, | ||
211 | FRF_AZ_BUF_CLR_START_ID, start); | ||
212 | efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); | ||
213 | } | ||
214 | |||
215 | /* | ||
216 | * Allocate a new special buffer | ||
217 | * | ||
218 | * This allocates memory for a new buffer, clears it and allocates a | ||
219 | * new buffer ID range. It does not write into the buffer table. | ||
220 | * | ||
221 | * This call will allocate 4KB buffers, since 8KB buffers can't be | ||
222 | * used for event queues and descriptor rings. | ||
223 | */ | ||
224 | static int efx_alloc_special_buffer(struct efx_nic *efx, | ||
225 | struct efx_special_buffer *buffer, | ||
226 | unsigned int len) | ||
227 | { | ||
228 | len = ALIGN(len, EFX_BUF_SIZE); | ||
229 | |||
230 | if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) | ||
231 | return -ENOMEM; | ||
232 | buffer->entries = len / EFX_BUF_SIZE; | ||
233 | BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1)); | ||
234 | |||
235 | /* Select new buffer ID */ | ||
236 | buffer->index = efx->next_buffer_table; | ||
237 | efx->next_buffer_table += buffer->entries; | ||
238 | #ifdef CONFIG_SFC_SRIOV | ||
239 | BUG_ON(efx_sriov_enabled(efx) && | ||
240 | efx->vf_buftbl_base < efx->next_buffer_table); | ||
241 | #endif | ||
242 | |||
243 | netif_dbg(efx, probe, efx->net_dev, | ||
244 | "allocating special buffers %d-%d at %llx+%x " | ||
245 | "(virt %p phys %llx)\n", buffer->index, | ||
246 | buffer->index + buffer->entries - 1, | ||
247 | (u64)buffer->buf.dma_addr, len, | ||
248 | buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); | ||
249 | |||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | static void | ||
254 | efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | ||
255 | { | ||
256 | if (!buffer->buf.addr) | ||
257 | return; | ||
258 | |||
259 | netif_dbg(efx, hw, efx->net_dev, | ||
260 | "deallocating special buffers %d-%d at %llx+%x " | ||
261 | "(virt %p phys %llx)\n", buffer->index, | ||
262 | buffer->index + buffer->entries - 1, | ||
263 | (u64)buffer->buf.dma_addr, buffer->buf.len, | ||
264 | buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); | ||
265 | |||
266 | efx_nic_free_buffer(efx, &buffer->buf); | ||
267 | buffer->entries = 0; | ||
268 | } | ||
269 | |||
270 | /************************************************************************** | ||
271 | * | ||
272 | * TX path | ||
273 | * | ||
274 | **************************************************************************/ | ||
275 | |||
276 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ | ||
277 | static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue) | ||
278 | { | ||
279 | unsigned write_ptr; | ||
280 | efx_dword_t reg; | ||
281 | |||
282 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | ||
283 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); | ||
284 | efx_writed_page(tx_queue->efx, ®, | ||
285 | FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); | ||
286 | } | ||
287 | |||
288 | /* Write pointer and first descriptor for TX descriptor ring */ | ||
289 | static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue, | ||
290 | const efx_qword_t *txd) | ||
291 | { | ||
292 | unsigned write_ptr; | ||
293 | efx_oword_t reg; | ||
294 | |||
295 | BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); | ||
296 | BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); | ||
297 | |||
298 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | ||
299 | EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, | ||
300 | FRF_AZ_TX_DESC_WPTR, write_ptr); | ||
301 | reg.qword[0] = *txd; | ||
302 | efx_writeo_page(tx_queue->efx, ®, | ||
303 | FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); | ||
304 | } | ||
305 | |||
306 | |||
307 | /* For each entry inserted into the software descriptor ring, create a | ||
308 | * descriptor in the hardware TX descriptor ring (in host memory), and | ||
309 | * write a doorbell. | ||
310 | */ | ||
311 | void efx_farch_tx_write(struct efx_tx_queue *tx_queue) | ||
312 | { | ||
313 | |||
314 | struct efx_tx_buffer *buffer; | ||
315 | efx_qword_t *txd; | ||
316 | unsigned write_ptr; | ||
317 | unsigned old_write_count = tx_queue->write_count; | ||
318 | |||
319 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); | ||
320 | |||
321 | do { | ||
322 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | ||
323 | buffer = &tx_queue->buffer[write_ptr]; | ||
324 | txd = efx_tx_desc(tx_queue, write_ptr); | ||
325 | ++tx_queue->write_count; | ||
326 | |||
327 | /* Create TX descriptor ring entry */ | ||
328 | BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); | ||
329 | EFX_POPULATE_QWORD_4(*txd, | ||
330 | FSF_AZ_TX_KER_CONT, | ||
331 | buffer->flags & EFX_TX_BUF_CONT, | ||
332 | FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, | ||
333 | FSF_AZ_TX_KER_BUF_REGION, 0, | ||
334 | FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); | ||
335 | } while (tx_queue->write_count != tx_queue->insert_count); | ||
336 | |||
337 | wmb(); /* Ensure descriptors are written before they are fetched */ | ||
338 | |||
339 | if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { | ||
340 | txd = efx_tx_desc(tx_queue, | ||
341 | old_write_count & tx_queue->ptr_mask); | ||
342 | efx_farch_push_tx_desc(tx_queue, txd); | ||
343 | ++tx_queue->pushes; | ||
344 | } else { | ||
345 | efx_farch_notify_tx_desc(tx_queue); | ||
346 | } | ||
347 | } | ||
348 | |||
349 | /* Allocate hardware resources for a TX queue */ | ||
350 | int efx_farch_tx_probe(struct efx_tx_queue *tx_queue) | ||
351 | { | ||
352 | struct efx_nic *efx = tx_queue->efx; | ||
353 | unsigned entries; | ||
354 | |||
355 | entries = tx_queue->ptr_mask + 1; | ||
356 | return efx_alloc_special_buffer(efx, &tx_queue->txd, | ||
357 | entries * sizeof(efx_qword_t)); | ||
358 | } | ||
359 | |||
360 | void efx_farch_tx_init(struct efx_tx_queue *tx_queue) | ||
361 | { | ||
362 | struct efx_nic *efx = tx_queue->efx; | ||
363 | efx_oword_t reg; | ||
364 | |||
365 | /* Pin TX descriptor ring */ | ||
366 | efx_init_special_buffer(efx, &tx_queue->txd); | ||
367 | |||
368 | /* Push TX descriptor ring to card */ | ||
369 | EFX_POPULATE_OWORD_10(reg, | ||
370 | FRF_AZ_TX_DESCQ_EN, 1, | ||
371 | FRF_AZ_TX_ISCSI_DDIG_EN, 0, | ||
372 | FRF_AZ_TX_ISCSI_HDIG_EN, 0, | ||
373 | FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, | ||
374 | FRF_AZ_TX_DESCQ_EVQ_ID, | ||
375 | tx_queue->channel->channel, | ||
376 | FRF_AZ_TX_DESCQ_OWNER_ID, 0, | ||
377 | FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, | ||
378 | FRF_AZ_TX_DESCQ_SIZE, | ||
379 | __ffs(tx_queue->txd.entries), | ||
380 | FRF_AZ_TX_DESCQ_TYPE, 0, | ||
381 | FRF_BZ_TX_NON_IP_DROP_DIS, 1); | ||
382 | |||
383 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | ||
384 | int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; | ||
385 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); | ||
386 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, | ||
387 | !csum); | ||
388 | } | ||
389 | |||
390 | efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, | ||
391 | tx_queue->queue); | ||
392 | |||
393 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { | ||
394 | /* Only 128 bits in this register */ | ||
395 | BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); | ||
396 | |||
397 | efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); | ||
398 | if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) | ||
399 | __clear_bit_le(tx_queue->queue, ®); | ||
400 | else | ||
401 | __set_bit_le(tx_queue->queue, ®); | ||
402 | efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); | ||
403 | } | ||
404 | |||
405 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | ||
406 | EFX_POPULATE_OWORD_1(reg, | ||
407 | FRF_BZ_TX_PACE, | ||
408 | (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? | ||
409 | FFE_BZ_TX_PACE_OFF : | ||
410 | FFE_BZ_TX_PACE_RESERVED); | ||
411 | efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, | ||
412 | tx_queue->queue); | ||
413 | } | ||
414 | } | ||
415 | |||
416 | static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue) | ||
417 | { | ||
418 | struct efx_nic *efx = tx_queue->efx; | ||
419 | efx_oword_t tx_flush_descq; | ||
420 | |||
421 | WARN_ON(atomic_read(&tx_queue->flush_outstanding)); | ||
422 | atomic_set(&tx_queue->flush_outstanding, 1); | ||
423 | |||
424 | EFX_POPULATE_OWORD_2(tx_flush_descq, | ||
425 | FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, | ||
426 | FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); | ||
427 | efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); | ||
428 | } | ||
429 | |||
430 | void efx_farch_tx_fini(struct efx_tx_queue *tx_queue) | ||
431 | { | ||
432 | struct efx_nic *efx = tx_queue->efx; | ||
433 | efx_oword_t tx_desc_ptr; | ||
434 | |||
435 | /* Remove TX descriptor ring from card */ | ||
436 | EFX_ZERO_OWORD(tx_desc_ptr); | ||
437 | efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | ||
438 | tx_queue->queue); | ||
439 | |||
440 | /* Unpin TX descriptor ring */ | ||
441 | efx_fini_special_buffer(efx, &tx_queue->txd); | ||
442 | } | ||
443 | |||
444 | /* Free buffers backing TX queue */ | ||
445 | void efx_farch_tx_remove(struct efx_tx_queue *tx_queue) | ||
446 | { | ||
447 | efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); | ||
448 | } | ||
449 | |||
450 | /************************************************************************** | ||
451 | * | ||
452 | * RX path | ||
453 | * | ||
454 | **************************************************************************/ | ||
455 | |||
456 | /* This creates an entry in the RX descriptor queue */ | ||
457 | static inline void | ||
458 | efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) | ||
459 | { | ||
460 | struct efx_rx_buffer *rx_buf; | ||
461 | efx_qword_t *rxd; | ||
462 | |||
463 | rxd = efx_rx_desc(rx_queue, index); | ||
464 | rx_buf = efx_rx_buffer(rx_queue, index); | ||
465 | EFX_POPULATE_QWORD_3(*rxd, | ||
466 | FSF_AZ_RX_KER_BUF_SIZE, | ||
467 | rx_buf->len - | ||
468 | rx_queue->efx->type->rx_buffer_padding, | ||
469 | FSF_AZ_RX_KER_BUF_REGION, 0, | ||
470 | FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); | ||
471 | } | ||
472 | |||
473 | /* This writes to the RX_DESC_WPTR register for the specified receive | ||
474 | * descriptor ring. | ||
475 | */ | ||
476 | void efx_farch_rx_write(struct efx_rx_queue *rx_queue) | ||
477 | { | ||
478 | struct efx_nic *efx = rx_queue->efx; | ||
479 | efx_dword_t reg; | ||
480 | unsigned write_ptr; | ||
481 | |||
482 | while (rx_queue->notified_count != rx_queue->added_count) { | ||
483 | efx_farch_build_rx_desc( | ||
484 | rx_queue, | ||
485 | rx_queue->notified_count & rx_queue->ptr_mask); | ||
486 | ++rx_queue->notified_count; | ||
487 | } | ||
488 | |||
489 | wmb(); | ||
490 | write_ptr = rx_queue->added_count & rx_queue->ptr_mask; | ||
491 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); | ||
492 | efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, | ||
493 | efx_rx_queue_index(rx_queue)); | ||
494 | } | ||
495 | |||
496 | int efx_farch_rx_probe(struct efx_rx_queue *rx_queue) | ||
497 | { | ||
498 | struct efx_nic *efx = rx_queue->efx; | ||
499 | unsigned entries; | ||
500 | |||
501 | entries = rx_queue->ptr_mask + 1; | ||
502 | return efx_alloc_special_buffer(efx, &rx_queue->rxd, | ||
503 | entries * sizeof(efx_qword_t)); | ||
504 | } | ||
505 | |||
506 | void efx_farch_rx_init(struct efx_rx_queue *rx_queue) | ||
507 | { | ||
508 | efx_oword_t rx_desc_ptr; | ||
509 | struct efx_nic *efx = rx_queue->efx; | ||
510 | bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; | ||
511 | bool iscsi_digest_en = is_b0; | ||
512 | bool jumbo_en; | ||
513 | |||
514 | /* For kernel-mode queues in Falcon A1, the JUMBO flag enables | ||
515 | * DMA to continue after a PCIe page boundary (and scattering | ||
516 | * is not possible). In Falcon B0 and Siena, it enables | ||
517 | * scatter. | ||
518 | */ | ||
519 | jumbo_en = !is_b0 || efx->rx_scatter; | ||
520 | |||
521 | netif_dbg(efx, hw, efx->net_dev, | ||
522 | "RX queue %d ring in special buffers %d-%d\n", | ||
523 | efx_rx_queue_index(rx_queue), rx_queue->rxd.index, | ||
524 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); | ||
525 | |||
526 | rx_queue->scatter_n = 0; | ||
527 | |||
528 | /* Pin RX descriptor ring */ | ||
529 | efx_init_special_buffer(efx, &rx_queue->rxd); | ||
530 | |||
531 | /* Push RX descriptor ring to card */ | ||
532 | EFX_POPULATE_OWORD_10(rx_desc_ptr, | ||
533 | FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, | ||
534 | FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, | ||
535 | FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, | ||
536 | FRF_AZ_RX_DESCQ_EVQ_ID, | ||
537 | efx_rx_queue_channel(rx_queue)->channel, | ||
538 | FRF_AZ_RX_DESCQ_OWNER_ID, 0, | ||
539 | FRF_AZ_RX_DESCQ_LABEL, | ||
540 | efx_rx_queue_index(rx_queue), | ||
541 | FRF_AZ_RX_DESCQ_SIZE, | ||
542 | __ffs(rx_queue->rxd.entries), | ||
543 | FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , | ||
544 | FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, | ||
545 | FRF_AZ_RX_DESCQ_EN, 1); | ||
546 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | ||
547 | efx_rx_queue_index(rx_queue)); | ||
548 | } | ||
549 | |||
550 | static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue) | ||
551 | { | ||
552 | struct efx_nic *efx = rx_queue->efx; | ||
553 | efx_oword_t rx_flush_descq; | ||
554 | |||
555 | EFX_POPULATE_OWORD_2(rx_flush_descq, | ||
556 | FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, | ||
557 | FRF_AZ_RX_FLUSH_DESCQ, | ||
558 | efx_rx_queue_index(rx_queue)); | ||
559 | efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); | ||
560 | } | ||
561 | |||
562 | void efx_farch_rx_fini(struct efx_rx_queue *rx_queue) | ||
563 | { | ||
564 | efx_oword_t rx_desc_ptr; | ||
565 | struct efx_nic *efx = rx_queue->efx; | ||
566 | |||
567 | /* Remove RX descriptor ring from card */ | ||
568 | EFX_ZERO_OWORD(rx_desc_ptr); | ||
569 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | ||
570 | efx_rx_queue_index(rx_queue)); | ||
571 | |||
572 | /* Unpin RX descriptor ring */ | ||
573 | efx_fini_special_buffer(efx, &rx_queue->rxd); | ||
574 | } | ||
575 | |||
576 | /* Free buffers backing RX queue */ | ||
577 | void efx_farch_rx_remove(struct efx_rx_queue *rx_queue) | ||
578 | { | ||
579 | efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); | ||
580 | } | ||
581 | |||
582 | /************************************************************************** | ||
583 | * | ||
584 | * Flush handling | ||
585 | * | ||
586 | **************************************************************************/ | ||
587 | |||
588 | /* efx_farch_flush_queues() must be woken up when all flushes are completed, | ||
589 | * or more RX flushes can be kicked off. | ||
590 | */ | ||
591 | static bool efx_farch_flush_wake(struct efx_nic *efx) | ||
592 | { | ||
593 | /* Ensure that all updates are visible to efx_farch_flush_queues() */ | ||
594 | smp_mb(); | ||
595 | |||
596 | return (atomic_read(&efx->drain_pending) == 0 || | ||
597 | (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT | ||
598 | && atomic_read(&efx->rxq_flush_pending) > 0)); | ||
599 | } | ||
600 | |||
601 | static bool efx_check_tx_flush_complete(struct efx_nic *efx) | ||
602 | { | ||
603 | bool i = true; | ||
604 | efx_oword_t txd_ptr_tbl; | ||
605 | struct efx_channel *channel; | ||
606 | struct efx_tx_queue *tx_queue; | ||
607 | |||
608 | efx_for_each_channel(channel, efx) { | ||
609 | efx_for_each_channel_tx_queue(tx_queue, channel) { | ||
610 | efx_reado_table(efx, &txd_ptr_tbl, | ||
611 | FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); | ||
612 | if (EFX_OWORD_FIELD(txd_ptr_tbl, | ||
613 | FRF_AZ_TX_DESCQ_FLUSH) || | ||
614 | EFX_OWORD_FIELD(txd_ptr_tbl, | ||
615 | FRF_AZ_TX_DESCQ_EN)) { | ||
616 | netif_dbg(efx, hw, efx->net_dev, | ||
617 | "flush did not complete on TXQ %d\n", | ||
618 | tx_queue->queue); | ||
619 | i = false; | ||
620 | } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, | ||
621 | 1, 0)) { | ||
622 | /* The flush is complete, but we didn't | ||
623 | * receive a flush completion event | ||
624 | */ | ||
625 | netif_dbg(efx, hw, efx->net_dev, | ||
626 | "flush complete on TXQ %d, so drain " | ||
627 | "the queue\n", tx_queue->queue); | ||
628 | /* Don't need to increment drain_pending as it | ||
629 | * has already been incremented for the queues | ||
630 | * which did not drain | ||
631 | */ | ||
632 | efx_farch_magic_event(channel, | ||
633 | EFX_CHANNEL_MAGIC_TX_DRAIN( | ||
634 | tx_queue)); | ||
635 | } | ||
636 | } | ||
637 | } | ||
638 | |||
639 | return i; | ||
640 | } | ||
641 | |||
642 | /* Flush all the transmit queues, and continue flushing receive queues until | ||
643 | * they're all flushed. Wait for the DRAIN events to be recieved so that there | ||
644 | * are no more RX and TX events left on any channel. */ | ||
645 | static int efx_farch_do_flush(struct efx_nic *efx) | ||
646 | { | ||
647 | unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ | ||
648 | struct efx_channel *channel; | ||
649 | struct efx_rx_queue *rx_queue; | ||
650 | struct efx_tx_queue *tx_queue; | ||
651 | int rc = 0; | ||
652 | |||
653 | efx_for_each_channel(channel, efx) { | ||
654 | efx_for_each_channel_tx_queue(tx_queue, channel) { | ||
655 | atomic_inc(&efx->drain_pending); | ||
656 | efx_farch_flush_tx_queue(tx_queue); | ||
657 | } | ||
658 | efx_for_each_channel_rx_queue(rx_queue, channel) { | ||
659 | atomic_inc(&efx->drain_pending); | ||
660 | rx_queue->flush_pending = true; | ||
661 | atomic_inc(&efx->rxq_flush_pending); | ||
662 | } | ||
663 | } | ||
664 | |||
665 | while (timeout && atomic_read(&efx->drain_pending) > 0) { | ||
666 | /* If SRIOV is enabled, then offload receive queue flushing to | ||
667 | * the firmware (though we will still have to poll for | ||
668 | * completion). If that fails, fall back to the old scheme. | ||
669 | */ | ||
670 | if (efx_sriov_enabled(efx)) { | ||
671 | rc = efx_mcdi_flush_rxqs(efx); | ||
672 | if (!rc) | ||
673 | goto wait; | ||
674 | } | ||
675 | |||
676 | /* The hardware supports four concurrent rx flushes, each of | ||
677 | * which may need to be retried if there is an outstanding | ||
678 | * descriptor fetch | ||
679 | */ | ||
680 | efx_for_each_channel(channel, efx) { | ||
681 | efx_for_each_channel_rx_queue(rx_queue, channel) { | ||
682 | if (atomic_read(&efx->rxq_flush_outstanding) >= | ||
683 | EFX_RX_FLUSH_COUNT) | ||
684 | break; | ||
685 | |||
686 | if (rx_queue->flush_pending) { | ||
687 | rx_queue->flush_pending = false; | ||
688 | atomic_dec(&efx->rxq_flush_pending); | ||
689 | atomic_inc(&efx->rxq_flush_outstanding); | ||
690 | efx_farch_flush_rx_queue(rx_queue); | ||
691 | } | ||
692 | } | ||
693 | } | ||
694 | |||
695 | wait: | ||
696 | timeout = wait_event_timeout(efx->flush_wq, | ||
697 | efx_farch_flush_wake(efx), | ||
698 | timeout); | ||
699 | } | ||
700 | |||
701 | if (atomic_read(&efx->drain_pending) && | ||
702 | !efx_check_tx_flush_complete(efx)) { | ||
703 | netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " | ||
704 | "(rx %d+%d)\n", atomic_read(&efx->drain_pending), | ||
705 | atomic_read(&efx->rxq_flush_outstanding), | ||
706 | atomic_read(&efx->rxq_flush_pending)); | ||
707 | rc = -ETIMEDOUT; | ||
708 | |||
709 | atomic_set(&efx->drain_pending, 0); | ||
710 | atomic_set(&efx->rxq_flush_pending, 0); | ||
711 | atomic_set(&efx->rxq_flush_outstanding, 0); | ||
712 | } | ||
713 | |||
714 | return rc; | ||
715 | } | ||
716 | |||
717 | int efx_farch_fini_dmaq(struct efx_nic *efx) | ||
718 | { | ||
719 | struct efx_channel *channel; | ||
720 | struct efx_tx_queue *tx_queue; | ||
721 | struct efx_rx_queue *rx_queue; | ||
722 | int rc = 0; | ||
723 | |||
724 | /* Do not attempt to write to the NIC during EEH recovery */ | ||
725 | if (efx->state != STATE_RECOVERY) { | ||
726 | /* Only perform flush if DMA is enabled */ | ||
727 | if (efx->pci_dev->is_busmaster) { | ||
728 | efx->type->prepare_flush(efx); | ||
729 | rc = efx_farch_do_flush(efx); | ||
730 | efx->type->finish_flush(efx); | ||
731 | } | ||
732 | |||
733 | efx_for_each_channel(channel, efx) { | ||
734 | efx_for_each_channel_rx_queue(rx_queue, channel) | ||
735 | efx_farch_rx_fini(rx_queue); | ||
736 | efx_for_each_channel_tx_queue(tx_queue, channel) | ||
737 | efx_farch_tx_fini(tx_queue); | ||
738 | } | ||
739 | } | ||
740 | |||
741 | return rc; | ||
742 | } | ||
743 | |||
744 | /************************************************************************** | ||
745 | * | ||
746 | * Event queue processing | ||
747 | * Event queues are processed by per-channel tasklets. | ||
748 | * | ||
749 | **************************************************************************/ | ||
750 | |||
751 | /* Update a channel's event queue's read pointer (RPTR) register | ||
752 | * | ||
753 | * This writes the EVQ_RPTR_REG register for the specified channel's | ||
754 | * event queue. | ||
755 | */ | ||
756 | void efx_farch_ev_read_ack(struct efx_channel *channel) | ||
757 | { | ||
758 | efx_dword_t reg; | ||
759 | struct efx_nic *efx = channel->efx; | ||
760 | |||
761 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, | ||
762 | channel->eventq_read_ptr & channel->eventq_mask); | ||
763 | |||
764 | /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size | ||
765 | * of 4 bytes, but it is really 16 bytes just like later revisions. | ||
766 | */ | ||
767 | efx_writed(efx, ®, | ||
768 | efx->type->evq_rptr_tbl_base + | ||
769 | FR_BZ_EVQ_RPTR_STEP * channel->channel); | ||
770 | } | ||
771 | |||
772 | /* Use HW to insert a SW defined event */ | ||
773 | void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, | ||
774 | efx_qword_t *event) | ||
775 | { | ||
776 | efx_oword_t drv_ev_reg; | ||
777 | |||
778 | BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || | ||
779 | FRF_AZ_DRV_EV_DATA_WIDTH != 64); | ||
780 | drv_ev_reg.u32[0] = event->u32[0]; | ||
781 | drv_ev_reg.u32[1] = event->u32[1]; | ||
782 | drv_ev_reg.u32[2] = 0; | ||
783 | drv_ev_reg.u32[3] = 0; | ||
784 | EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); | ||
785 | efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); | ||
786 | } | ||
787 | |||
788 | static void efx_farch_magic_event(struct efx_channel *channel, u32 magic) | ||
789 | { | ||
790 | efx_qword_t event; | ||
791 | |||
792 | EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, | ||
793 | FSE_AZ_EV_CODE_DRV_GEN_EV, | ||
794 | FSF_AZ_DRV_GEN_EV_MAGIC, magic); | ||
795 | efx_farch_generate_event(channel->efx, channel->channel, &event); | ||
796 | } | ||
797 | |||
798 | /* Handle a transmit completion event | ||
799 | * | ||
800 | * The NIC batches TX completion events; the message we receive is of | ||
801 | * the form "complete all TX events up to this index". | ||
802 | */ | ||
803 | static int | ||
804 | efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | ||
805 | { | ||
806 | unsigned int tx_ev_desc_ptr; | ||
807 | unsigned int tx_ev_q_label; | ||
808 | struct efx_tx_queue *tx_queue; | ||
809 | struct efx_nic *efx = channel->efx; | ||
810 | int tx_packets = 0; | ||
811 | |||
812 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | ||
813 | return 0; | ||
814 | |||
815 | if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { | ||
816 | /* Transmit completion */ | ||
817 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); | ||
818 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); | ||
819 | tx_queue = efx_channel_get_tx_queue( | ||
820 | channel, tx_ev_q_label % EFX_TXQ_TYPES); | ||
821 | tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & | ||
822 | tx_queue->ptr_mask); | ||
823 | efx_xmit_done(tx_queue, tx_ev_desc_ptr); | ||
824 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { | ||
825 | /* Rewrite the FIFO write pointer */ | ||
826 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); | ||
827 | tx_queue = efx_channel_get_tx_queue( | ||
828 | channel, tx_ev_q_label % EFX_TXQ_TYPES); | ||
829 | |||
830 | netif_tx_lock(efx->net_dev); | ||
831 | efx_farch_notify_tx_desc(tx_queue); | ||
832 | netif_tx_unlock(efx->net_dev); | ||
833 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && | ||
834 | EFX_WORKAROUND_10727(efx)) { | ||
835 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | ||
836 | } else { | ||
837 | netif_err(efx, tx_err, efx->net_dev, | ||
838 | "channel %d unexpected TX event " | ||
839 | EFX_QWORD_FMT"\n", channel->channel, | ||
840 | EFX_QWORD_VAL(*event)); | ||
841 | } | ||
842 | |||
843 | return tx_packets; | ||
844 | } | ||
845 | |||
846 | /* Detect errors included in the rx_evt_pkt_ok bit. */ | ||
847 | static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | ||
848 | const efx_qword_t *event) | ||
849 | { | ||
850 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | ||
851 | struct efx_nic *efx = rx_queue->efx; | ||
852 | bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; | ||
853 | bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; | ||
854 | bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; | ||
855 | bool rx_ev_other_err, rx_ev_pause_frm; | ||
856 | bool rx_ev_hdr_type, rx_ev_mcast_pkt; | ||
857 | unsigned rx_ev_pkt_type; | ||
858 | |||
859 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); | ||
860 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); | ||
861 | rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); | ||
862 | rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); | ||
863 | rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, | ||
864 | FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); | ||
865 | rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, | ||
866 | FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); | ||
867 | rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, | ||
868 | FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); | ||
869 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); | ||
870 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); | ||
871 | rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? | ||
872 | 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); | ||
873 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); | ||
874 | |||
875 | /* Every error apart from tobe_disc and pause_frm */ | ||
876 | rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | | ||
877 | rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | | ||
878 | rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); | ||
879 | |||
880 | /* Count errors that are not in MAC stats. Ignore expected | ||
881 | * checksum errors during self-test. */ | ||
882 | if (rx_ev_frm_trunc) | ||
883 | ++channel->n_rx_frm_trunc; | ||
884 | else if (rx_ev_tobe_disc) | ||
885 | ++channel->n_rx_tobe_disc; | ||
886 | else if (!efx->loopback_selftest) { | ||
887 | if (rx_ev_ip_hdr_chksum_err) | ||
888 | ++channel->n_rx_ip_hdr_chksum_err; | ||
889 | else if (rx_ev_tcp_udp_chksum_err) | ||
890 | ++channel->n_rx_tcp_udp_chksum_err; | ||
891 | } | ||
892 | |||
893 | /* TOBE_DISC is expected on unicast mismatches; don't print out an | ||
894 | * error message. FRM_TRUNC indicates RXDP dropped the packet due | ||
895 | * to a FIFO overflow. | ||
896 | */ | ||
897 | #ifdef DEBUG | ||
898 | if (rx_ev_other_err && net_ratelimit()) { | ||
899 | netif_dbg(efx, rx_err, efx->net_dev, | ||
900 | " RX queue %d unexpected RX event " | ||
901 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", | ||
902 | efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), | ||
903 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", | ||
904 | rx_ev_ip_hdr_chksum_err ? | ||
905 | " [IP_HDR_CHKSUM_ERR]" : "", | ||
906 | rx_ev_tcp_udp_chksum_err ? | ||
907 | " [TCP_UDP_CHKSUM_ERR]" : "", | ||
908 | rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", | ||
909 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", | ||
910 | rx_ev_drib_nib ? " [DRIB_NIB]" : "", | ||
911 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "", | ||
912 | rx_ev_pause_frm ? " [PAUSE]" : ""); | ||
913 | } | ||
914 | #endif | ||
915 | |||
916 | /* The frame must be discarded if any of these are true. */ | ||
917 | return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | | ||
918 | rx_ev_tobe_disc | rx_ev_pause_frm) ? | ||
919 | EFX_RX_PKT_DISCARD : 0; | ||
920 | } | ||
921 | |||
922 | /* Handle receive events that are not in-order. Return true if this | ||
923 | * can be handled as a partial packet discard, false if it's more | ||
924 | * serious. | ||
925 | */ | ||
926 | static bool | ||
927 | efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) | ||
928 | { | ||
929 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | ||
930 | struct efx_nic *efx = rx_queue->efx; | ||
931 | unsigned expected, dropped; | ||
932 | |||
933 | if (rx_queue->scatter_n && | ||
934 | index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & | ||
935 | rx_queue->ptr_mask)) { | ||
936 | ++channel->n_rx_nodesc_trunc; | ||
937 | return true; | ||
938 | } | ||
939 | |||
940 | expected = rx_queue->removed_count & rx_queue->ptr_mask; | ||
941 | dropped = (index - expected) & rx_queue->ptr_mask; | ||
942 | netif_info(efx, rx_err, efx->net_dev, | ||
943 | "dropped %d events (index=%d expected=%d)\n", | ||
944 | dropped, index, expected); | ||
945 | |||
946 | efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? | ||
947 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | ||
948 | return false; | ||
949 | } | ||
950 | |||
951 | /* Handle a packet received event | ||
952 | * | ||
953 | * The NIC gives a "discard" flag if it's a unicast packet with the | ||
954 | * wrong destination address | ||
955 | * Also "is multicast" and "matches multicast filter" flags can be used to | ||
956 | * discard non-matching multicast packets. | ||
957 | */ | ||
958 | static void | ||
959 | efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) | ||
960 | { | ||
961 | unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; | ||
962 | unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; | ||
963 | unsigned expected_ptr; | ||
964 | bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; | ||
965 | u16 flags; | ||
966 | struct efx_rx_queue *rx_queue; | ||
967 | struct efx_nic *efx = channel->efx; | ||
968 | |||
969 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | ||
970 | return; | ||
971 | |||
972 | rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); | ||
973 | rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); | ||
974 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != | ||
975 | channel->channel); | ||
976 | |||
977 | rx_queue = efx_channel_get_rx_queue(channel); | ||
978 | |||
979 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); | ||
980 | expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & | ||
981 | rx_queue->ptr_mask); | ||
982 | |||
983 | /* Check for partial drops and other errors */ | ||
984 | if (unlikely(rx_ev_desc_ptr != expected_ptr) || | ||
985 | unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { | ||
986 | if (rx_ev_desc_ptr != expected_ptr && | ||
987 | !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) | ||
988 | return; | ||
989 | |||
990 | /* Discard all pending fragments */ | ||
991 | if (rx_queue->scatter_n) { | ||
992 | efx_rx_packet( | ||
993 | rx_queue, | ||
994 | rx_queue->removed_count & rx_queue->ptr_mask, | ||
995 | rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); | ||
996 | rx_queue->removed_count += rx_queue->scatter_n; | ||
997 | rx_queue->scatter_n = 0; | ||
998 | } | ||
999 | |||
1000 | /* Return if there is no new fragment */ | ||
1001 | if (rx_ev_desc_ptr != expected_ptr) | ||
1002 | return; | ||
1003 | |||
1004 | /* Discard new fragment if not SOP */ | ||
1005 | if (!rx_ev_sop) { | ||
1006 | efx_rx_packet( | ||
1007 | rx_queue, | ||
1008 | rx_queue->removed_count & rx_queue->ptr_mask, | ||
1009 | 1, 0, EFX_RX_PKT_DISCARD); | ||
1010 | ++rx_queue->removed_count; | ||
1011 | return; | ||
1012 | } | ||
1013 | } | ||
1014 | |||
1015 | ++rx_queue->scatter_n; | ||
1016 | if (rx_ev_cont) | ||
1017 | return; | ||
1018 | |||
1019 | rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); | ||
1020 | rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); | ||
1021 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); | ||
1022 | |||
1023 | if (likely(rx_ev_pkt_ok)) { | ||
1024 | /* If packet is marked as OK then we can rely on the | ||
1025 | * hardware checksum and classification. | ||
1026 | */ | ||
1027 | flags = 0; | ||
1028 | switch (rx_ev_hdr_type) { | ||
1029 | case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: | ||
1030 | flags |= EFX_RX_PKT_TCP; | ||
1031 | /* fall through */ | ||
1032 | case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: | ||
1033 | flags |= EFX_RX_PKT_CSUMMED; | ||
1034 | /* fall through */ | ||
1035 | case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: | ||
1036 | case FSE_AZ_RX_EV_HDR_TYPE_OTHER: | ||
1037 | break; | ||
1038 | } | ||
1039 | } else { | ||
1040 | flags = efx_farch_handle_rx_not_ok(rx_queue, event); | ||
1041 | } | ||
1042 | |||
1043 | /* Detect multicast packets that didn't match the filter */ | ||
1044 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); | ||
1045 | if (rx_ev_mcast_pkt) { | ||
1046 | unsigned int rx_ev_mcast_hash_match = | ||
1047 | EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); | ||
1048 | |||
1049 | if (unlikely(!rx_ev_mcast_hash_match)) { | ||
1050 | ++channel->n_rx_mcast_mismatch; | ||
1051 | flags |= EFX_RX_PKT_DISCARD; | ||
1052 | } | ||
1053 | } | ||
1054 | |||
1055 | channel->irq_mod_score += 2; | ||
1056 | |||
1057 | /* Handle received packet */ | ||
1058 | efx_rx_packet(rx_queue, | ||
1059 | rx_queue->removed_count & rx_queue->ptr_mask, | ||
1060 | rx_queue->scatter_n, rx_ev_byte_cnt, flags); | ||
1061 | rx_queue->removed_count += rx_queue->scatter_n; | ||
1062 | rx_queue->scatter_n = 0; | ||
1063 | } | ||
1064 | |||
1065 | /* If this flush done event corresponds to a &struct efx_tx_queue, then | ||
1066 | * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue | ||
1067 | * of all transmit completions. | ||
1068 | */ | ||
1069 | static void | ||
1070 | efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) | ||
1071 | { | ||
1072 | struct efx_tx_queue *tx_queue; | ||
1073 | int qid; | ||
1074 | |||
1075 | qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); | ||
1076 | if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { | ||
1077 | tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, | ||
1078 | qid % EFX_TXQ_TYPES); | ||
1079 | if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) { | ||
1080 | efx_farch_magic_event(tx_queue->channel, | ||
1081 | EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); | ||
1082 | } | ||
1083 | } | ||
1084 | } | ||
1085 | |||
1086 | /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush | ||
1087 | * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add | ||
1088 | * the RX queue back to the mask of RX queues in need of flushing. | ||
1089 | */ | ||
1090 | static void | ||
1091 | efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) | ||
1092 | { | ||
1093 | struct efx_channel *channel; | ||
1094 | struct efx_rx_queue *rx_queue; | ||
1095 | int qid; | ||
1096 | bool failed; | ||
1097 | |||
1098 | qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); | ||
1099 | failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); | ||
1100 | if (qid >= efx->n_channels) | ||
1101 | return; | ||
1102 | channel = efx_get_channel(efx, qid); | ||
1103 | if (!efx_channel_has_rx_queue(channel)) | ||
1104 | return; | ||
1105 | rx_queue = efx_channel_get_rx_queue(channel); | ||
1106 | |||
1107 | if (failed) { | ||
1108 | netif_info(efx, hw, efx->net_dev, | ||
1109 | "RXQ %d flush retry\n", qid); | ||
1110 | rx_queue->flush_pending = true; | ||
1111 | atomic_inc(&efx->rxq_flush_pending); | ||
1112 | } else { | ||
1113 | efx_farch_magic_event(efx_rx_queue_channel(rx_queue), | ||
1114 | EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); | ||
1115 | } | ||
1116 | atomic_dec(&efx->rxq_flush_outstanding); | ||
1117 | if (efx_farch_flush_wake(efx)) | ||
1118 | wake_up(&efx->flush_wq); | ||
1119 | } | ||
1120 | |||
1121 | static void | ||
1122 | efx_farch_handle_drain_event(struct efx_channel *channel) | ||
1123 | { | ||
1124 | struct efx_nic *efx = channel->efx; | ||
1125 | |||
1126 | WARN_ON(atomic_read(&efx->drain_pending) == 0); | ||
1127 | atomic_dec(&efx->drain_pending); | ||
1128 | if (efx_farch_flush_wake(efx)) | ||
1129 | wake_up(&efx->flush_wq); | ||
1130 | } | ||
1131 | |||
1132 | static void efx_farch_handle_generated_event(struct efx_channel *channel, | ||
1133 | efx_qword_t *event) | ||
1134 | { | ||
1135 | struct efx_nic *efx = channel->efx; | ||
1136 | struct efx_rx_queue *rx_queue = | ||
1137 | efx_channel_has_rx_queue(channel) ? | ||
1138 | efx_channel_get_rx_queue(channel) : NULL; | ||
1139 | unsigned magic, code; | ||
1140 | |||
1141 | magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); | ||
1142 | code = _EFX_CHANNEL_MAGIC_CODE(magic); | ||
1143 | |||
1144 | if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { | ||
1145 | channel->event_test_cpu = raw_smp_processor_id(); | ||
1146 | } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { | ||
1147 | /* The queue must be empty, so we won't receive any rx | ||
1148 | * events, so efx_process_channel() won't refill the | ||
1149 | * queue. Refill it here */ | ||
1150 | efx_fast_push_rx_descriptors(rx_queue); | ||
1151 | } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { | ||
1152 | efx_farch_handle_drain_event(channel); | ||
1153 | } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { | ||
1154 | efx_farch_handle_drain_event(channel); | ||
1155 | } else { | ||
1156 | netif_dbg(efx, hw, efx->net_dev, "channel %d received " | ||
1157 | "generated event "EFX_QWORD_FMT"\n", | ||
1158 | channel->channel, EFX_QWORD_VAL(*event)); | ||
1159 | } | ||
1160 | } | ||
1161 | |||
1162 | static void | ||
1163 | efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | ||
1164 | { | ||
1165 | struct efx_nic *efx = channel->efx; | ||
1166 | unsigned int ev_sub_code; | ||
1167 | unsigned int ev_sub_data; | ||
1168 | |||
1169 | ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); | ||
1170 | ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); | ||
1171 | |||
1172 | switch (ev_sub_code) { | ||
1173 | case FSE_AZ_TX_DESCQ_FLS_DONE_EV: | ||
1174 | netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", | ||
1175 | channel->channel, ev_sub_data); | ||
1176 | efx_farch_handle_tx_flush_done(efx, event); | ||
1177 | efx_sriov_tx_flush_done(efx, event); | ||
1178 | break; | ||
1179 | case FSE_AZ_RX_DESCQ_FLS_DONE_EV: | ||
1180 | netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", | ||
1181 | channel->channel, ev_sub_data); | ||
1182 | efx_farch_handle_rx_flush_done(efx, event); | ||
1183 | efx_sriov_rx_flush_done(efx, event); | ||
1184 | break; | ||
1185 | case FSE_AZ_EVQ_INIT_DONE_EV: | ||
1186 | netif_dbg(efx, hw, efx->net_dev, | ||
1187 | "channel %d EVQ %d initialised\n", | ||
1188 | channel->channel, ev_sub_data); | ||
1189 | break; | ||
1190 | case FSE_AZ_SRM_UPD_DONE_EV: | ||
1191 | netif_vdbg(efx, hw, efx->net_dev, | ||
1192 | "channel %d SRAM update done\n", channel->channel); | ||
1193 | break; | ||
1194 | case FSE_AZ_WAKE_UP_EV: | ||
1195 | netif_vdbg(efx, hw, efx->net_dev, | ||
1196 | "channel %d RXQ %d wakeup event\n", | ||
1197 | channel->channel, ev_sub_data); | ||
1198 | break; | ||
1199 | case FSE_AZ_TIMER_EV: | ||
1200 | netif_vdbg(efx, hw, efx->net_dev, | ||
1201 | "channel %d RX queue %d timer expired\n", | ||
1202 | channel->channel, ev_sub_data); | ||
1203 | break; | ||
1204 | case FSE_AA_RX_RECOVER_EV: | ||
1205 | netif_err(efx, rx_err, efx->net_dev, | ||
1206 | "channel %d seen DRIVER RX_RESET event. " | ||
1207 | "Resetting.\n", channel->channel); | ||
1208 | atomic_inc(&efx->rx_reset); | ||
1209 | efx_schedule_reset(efx, | ||
1210 | EFX_WORKAROUND_6555(efx) ? | ||
1211 | RESET_TYPE_RX_RECOVERY : | ||
1212 | RESET_TYPE_DISABLE); | ||
1213 | break; | ||
1214 | case FSE_BZ_RX_DSC_ERROR_EV: | ||
1215 | if (ev_sub_data < EFX_VI_BASE) { | ||
1216 | netif_err(efx, rx_err, efx->net_dev, | ||
1217 | "RX DMA Q %d reports descriptor fetch error." | ||
1218 | " RX Q %d is disabled.\n", ev_sub_data, | ||
1219 | ev_sub_data); | ||
1220 | efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); | ||
1221 | } else | ||
1222 | efx_sriov_desc_fetch_err(efx, ev_sub_data); | ||
1223 | break; | ||
1224 | case FSE_BZ_TX_DSC_ERROR_EV: | ||
1225 | if (ev_sub_data < EFX_VI_BASE) { | ||
1226 | netif_err(efx, tx_err, efx->net_dev, | ||
1227 | "TX DMA Q %d reports descriptor fetch error." | ||
1228 | " TX Q %d is disabled.\n", ev_sub_data, | ||
1229 | ev_sub_data); | ||
1230 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | ||
1231 | } else | ||
1232 | efx_sriov_desc_fetch_err(efx, ev_sub_data); | ||
1233 | break; | ||
1234 | default: | ||
1235 | netif_vdbg(efx, hw, efx->net_dev, | ||
1236 | "channel %d unknown driver event code %d " | ||
1237 | "data %04x\n", channel->channel, ev_sub_code, | ||
1238 | ev_sub_data); | ||
1239 | break; | ||
1240 | } | ||
1241 | } | ||
1242 | |||
1243 | int efx_farch_ev_process(struct efx_channel *channel, int budget) | ||
1244 | { | ||
1245 | struct efx_nic *efx = channel->efx; | ||
1246 | unsigned int read_ptr; | ||
1247 | efx_qword_t event, *p_event; | ||
1248 | int ev_code; | ||
1249 | int tx_packets = 0; | ||
1250 | int spent = 0; | ||
1251 | |||
1252 | read_ptr = channel->eventq_read_ptr; | ||
1253 | |||
1254 | for (;;) { | ||
1255 | p_event = efx_event(channel, read_ptr); | ||
1256 | event = *p_event; | ||
1257 | |||
1258 | if (!efx_event_present(&event)) | ||
1259 | /* End of events */ | ||
1260 | break; | ||
1261 | |||
1262 | netif_vdbg(channel->efx, intr, channel->efx->net_dev, | ||
1263 | "channel %d event is "EFX_QWORD_FMT"\n", | ||
1264 | channel->channel, EFX_QWORD_VAL(event)); | ||
1265 | |||
1266 | /* Clear this event by marking it all ones */ | ||
1267 | EFX_SET_QWORD(*p_event); | ||
1268 | |||
1269 | ++read_ptr; | ||
1270 | |||
1271 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); | ||
1272 | |||
1273 | switch (ev_code) { | ||
1274 | case FSE_AZ_EV_CODE_RX_EV: | ||
1275 | efx_farch_handle_rx_event(channel, &event); | ||
1276 | if (++spent == budget) | ||
1277 | goto out; | ||
1278 | break; | ||
1279 | case FSE_AZ_EV_CODE_TX_EV: | ||
1280 | tx_packets += efx_farch_handle_tx_event(channel, | ||
1281 | &event); | ||
1282 | if (tx_packets > efx->txq_entries) { | ||
1283 | spent = budget; | ||
1284 | goto out; | ||
1285 | } | ||
1286 | break; | ||
1287 | case FSE_AZ_EV_CODE_DRV_GEN_EV: | ||
1288 | efx_farch_handle_generated_event(channel, &event); | ||
1289 | break; | ||
1290 | case FSE_AZ_EV_CODE_DRIVER_EV: | ||
1291 | efx_farch_handle_driver_event(channel, &event); | ||
1292 | break; | ||
1293 | case FSE_CZ_EV_CODE_USER_EV: | ||
1294 | efx_sriov_event(channel, &event); | ||
1295 | break; | ||
1296 | case FSE_CZ_EV_CODE_MCDI_EV: | ||
1297 | efx_mcdi_process_event(channel, &event); | ||
1298 | break; | ||
1299 | case FSE_AZ_EV_CODE_GLOBAL_EV: | ||
1300 | if (efx->type->handle_global_event && | ||
1301 | efx->type->handle_global_event(channel, &event)) | ||
1302 | break; | ||
1303 | /* else fall through */ | ||
1304 | default: | ||
1305 | netif_err(channel->efx, hw, channel->efx->net_dev, | ||
1306 | "channel %d unknown event type %d (data " | ||
1307 | EFX_QWORD_FMT ")\n", channel->channel, | ||
1308 | ev_code, EFX_QWORD_VAL(event)); | ||
1309 | } | ||
1310 | } | ||
1311 | |||
1312 | out: | ||
1313 | channel->eventq_read_ptr = read_ptr; | ||
1314 | return spent; | ||
1315 | } | ||
1316 | |||
1317 | /* Allocate buffer table entries for event queue */ | ||
1318 | int efx_farch_ev_probe(struct efx_channel *channel) | ||
1319 | { | ||
1320 | struct efx_nic *efx = channel->efx; | ||
1321 | unsigned entries; | ||
1322 | |||
1323 | entries = channel->eventq_mask + 1; | ||
1324 | return efx_alloc_special_buffer(efx, &channel->eventq, | ||
1325 | entries * sizeof(efx_qword_t)); | ||
1326 | } | ||
1327 | |||
1328 | void efx_farch_ev_init(struct efx_channel *channel) | ||
1329 | { | ||
1330 | efx_oword_t reg; | ||
1331 | struct efx_nic *efx = channel->efx; | ||
1332 | |||
1333 | netif_dbg(efx, hw, efx->net_dev, | ||
1334 | "channel %d event queue in special buffers %d-%d\n", | ||
1335 | channel->channel, channel->eventq.index, | ||
1336 | channel->eventq.index + channel->eventq.entries - 1); | ||
1337 | |||
1338 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { | ||
1339 | EFX_POPULATE_OWORD_3(reg, | ||
1340 | FRF_CZ_TIMER_Q_EN, 1, | ||
1341 | FRF_CZ_HOST_NOTIFY_MODE, 0, | ||
1342 | FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); | ||
1343 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); | ||
1344 | } | ||
1345 | |||
1346 | /* Pin event queue buffer */ | ||
1347 | efx_init_special_buffer(efx, &channel->eventq); | ||
1348 | |||
1349 | /* Fill event queue with all ones (i.e. empty events) */ | ||
1350 | memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); | ||
1351 | |||
1352 | /* Push event queue to card */ | ||
1353 | EFX_POPULATE_OWORD_3(reg, | ||
1354 | FRF_AZ_EVQ_EN, 1, | ||
1355 | FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), | ||
1356 | FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); | ||
1357 | efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, | ||
1358 | channel->channel); | ||
1359 | |||
1360 | efx->type->push_irq_moderation(channel); | ||
1361 | } | ||
1362 | |||
1363 | void efx_farch_ev_fini(struct efx_channel *channel) | ||
1364 | { | ||
1365 | efx_oword_t reg; | ||
1366 | struct efx_nic *efx = channel->efx; | ||
1367 | |||
1368 | /* Remove event queue from card */ | ||
1369 | EFX_ZERO_OWORD(reg); | ||
1370 | efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, | ||
1371 | channel->channel); | ||
1372 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) | ||
1373 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); | ||
1374 | |||
1375 | /* Unpin event queue */ | ||
1376 | efx_fini_special_buffer(efx, &channel->eventq); | ||
1377 | } | ||
1378 | |||
1379 | /* Free buffers backing event queue */ | ||
1380 | void efx_farch_ev_remove(struct efx_channel *channel) | ||
1381 | { | ||
1382 | efx_free_special_buffer(channel->efx, &channel->eventq); | ||
1383 | } | ||
1384 | |||
1385 | |||
1386 | void efx_farch_ev_test_generate(struct efx_channel *channel) | ||
1387 | { | ||
1388 | efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); | ||
1389 | } | ||
1390 | |||
1391 | void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue) | ||
1392 | { | ||
1393 | efx_farch_magic_event(efx_rx_queue_channel(rx_queue), | ||
1394 | EFX_CHANNEL_MAGIC_FILL(rx_queue)); | ||
1395 | } | ||
1396 | |||
1397 | /************************************************************************** | ||
1398 | * | ||
1399 | * Hardware interrupts | ||
1400 | * The hardware interrupt handler does very little work; all the event | ||
1401 | * queue processing is carried out by per-channel tasklets. | ||
1402 | * | ||
1403 | **************************************************************************/ | ||
1404 | |||
1405 | /* Enable/disable/generate interrupts */ | ||
1406 | static inline void efx_farch_interrupts(struct efx_nic *efx, | ||
1407 | bool enabled, bool force) | ||
1408 | { | ||
1409 | efx_oword_t int_en_reg_ker; | ||
1410 | |||
1411 | EFX_POPULATE_OWORD_3(int_en_reg_ker, | ||
1412 | FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, | ||
1413 | FRF_AZ_KER_INT_KER, force, | ||
1414 | FRF_AZ_DRV_INT_EN_KER, enabled); | ||
1415 | efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); | ||
1416 | } | ||
1417 | |||
1418 | void efx_farch_irq_enable_master(struct efx_nic *efx) | ||
1419 | { | ||
1420 | EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); | ||
1421 | wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ | ||
1422 | |||
1423 | efx_farch_interrupts(efx, true, false); | ||
1424 | } | ||
1425 | |||
1426 | void efx_farch_irq_disable_master(struct efx_nic *efx) | ||
1427 | { | ||
1428 | /* Disable interrupts */ | ||
1429 | efx_farch_interrupts(efx, false, false); | ||
1430 | } | ||
1431 | |||
1432 | /* Generate a test interrupt | ||
1433 | * Interrupt must already have been enabled, otherwise nasty things | ||
1434 | * may happen. | ||
1435 | */ | ||
1436 | void efx_farch_irq_test_generate(struct efx_nic *efx) | ||
1437 | { | ||
1438 | efx_farch_interrupts(efx, true, true); | ||
1439 | } | ||
1440 | |||
1441 | /* Process a fatal interrupt | ||
1442 | * Disable bus mastering ASAP and schedule a reset | ||
1443 | */ | ||
1444 | irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) | ||
1445 | { | ||
1446 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
1447 | efx_oword_t *int_ker = efx->irq_status.addr; | ||
1448 | efx_oword_t fatal_intr; | ||
1449 | int error, mem_perr; | ||
1450 | |||
1451 | efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); | ||
1452 | error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); | ||
1453 | |||
1454 | netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " | ||
1455 | EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), | ||
1456 | EFX_OWORD_VAL(fatal_intr), | ||
1457 | error ? "disabling bus mastering" : "no recognised error"); | ||
1458 | |||
1459 | /* If this is a memory parity error dump which blocks are offending */ | ||
1460 | mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || | ||
1461 | EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); | ||
1462 | if (mem_perr) { | ||
1463 | efx_oword_t reg; | ||
1464 | efx_reado(efx, ®, FR_AZ_MEM_STAT); | ||
1465 | netif_err(efx, hw, efx->net_dev, | ||
1466 | "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", | ||
1467 | EFX_OWORD_VAL(reg)); | ||
1468 | } | ||
1469 | |||
1470 | /* Disable both devices */ | ||
1471 | pci_clear_master(efx->pci_dev); | ||
1472 | if (efx_nic_is_dual_func(efx)) | ||
1473 | pci_clear_master(nic_data->pci_dev2); | ||
1474 | efx_farch_irq_disable_master(efx); | ||
1475 | |||
1476 | /* Count errors and reset or disable the NIC accordingly */ | ||
1477 | if (efx->int_error_count == 0 || | ||
1478 | time_after(jiffies, efx->int_error_expire)) { | ||
1479 | efx->int_error_count = 0; | ||
1480 | efx->int_error_expire = | ||
1481 | jiffies + EFX_INT_ERROR_EXPIRE * HZ; | ||
1482 | } | ||
1483 | if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { | ||
1484 | netif_err(efx, hw, efx->net_dev, | ||
1485 | "SYSTEM ERROR - reset scheduled\n"); | ||
1486 | efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); | ||
1487 | } else { | ||
1488 | netif_err(efx, hw, efx->net_dev, | ||
1489 | "SYSTEM ERROR - max number of errors seen." | ||
1490 | "NIC will be disabled\n"); | ||
1491 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | ||
1492 | } | ||
1493 | |||
1494 | return IRQ_HANDLED; | ||
1495 | } | ||
1496 | |||
1497 | /* Handle a legacy interrupt | ||
1498 | * Acknowledges the interrupt and schedule event queue processing. | ||
1499 | */ | ||
1500 | irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) | ||
1501 | { | ||
1502 | struct efx_nic *efx = dev_id; | ||
1503 | bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); | ||
1504 | efx_oword_t *int_ker = efx->irq_status.addr; | ||
1505 | irqreturn_t result = IRQ_NONE; | ||
1506 | struct efx_channel *channel; | ||
1507 | efx_dword_t reg; | ||
1508 | u32 queues; | ||
1509 | int syserr; | ||
1510 | |||
1511 | /* Read the ISR which also ACKs the interrupts */ | ||
1512 | efx_readd(efx, ®, FR_BZ_INT_ISR0); | ||
1513 | queues = EFX_EXTRACT_DWORD(reg, 0, 31); | ||
1514 | |||
1515 | /* Legacy interrupts are disabled too late by the EEH kernel | ||
1516 | * code. Disable them earlier. | ||
1517 | * If an EEH error occurred, the read will have returned all ones. | ||
1518 | */ | ||
1519 | if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && | ||
1520 | !efx->eeh_disabled_legacy_irq) { | ||
1521 | disable_irq_nosync(efx->legacy_irq); | ||
1522 | efx->eeh_disabled_legacy_irq = true; | ||
1523 | } | ||
1524 | |||
1525 | /* Handle non-event-queue sources */ | ||
1526 | if (queues & (1U << efx->irq_level) && soft_enabled) { | ||
1527 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | ||
1528 | if (unlikely(syserr)) | ||
1529 | return efx_farch_fatal_interrupt(efx); | ||
1530 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
1531 | } | ||
1532 | |||
1533 | if (queues != 0) { | ||
1534 | if (EFX_WORKAROUND_15783(efx)) | ||
1535 | efx->irq_zero_count = 0; | ||
1536 | |||
1537 | /* Schedule processing of any interrupting queues */ | ||
1538 | if (likely(soft_enabled)) { | ||
1539 | efx_for_each_channel(channel, efx) { | ||
1540 | if (queues & 1) | ||
1541 | efx_schedule_channel_irq(channel); | ||
1542 | queues >>= 1; | ||
1543 | } | ||
1544 | } | ||
1545 | result = IRQ_HANDLED; | ||
1546 | |||
1547 | } else if (EFX_WORKAROUND_15783(efx)) { | ||
1548 | efx_qword_t *event; | ||
1549 | |||
1550 | /* We can't return IRQ_HANDLED more than once on seeing ISR=0 | ||
1551 | * because this might be a shared interrupt. */ | ||
1552 | if (efx->irq_zero_count++ == 0) | ||
1553 | result = IRQ_HANDLED; | ||
1554 | |||
1555 | /* Ensure we schedule or rearm all event queues */ | ||
1556 | if (likely(soft_enabled)) { | ||
1557 | efx_for_each_channel(channel, efx) { | ||
1558 | event = efx_event(channel, | ||
1559 | channel->eventq_read_ptr); | ||
1560 | if (efx_event_present(event)) | ||
1561 | efx_schedule_channel_irq(channel); | ||
1562 | else | ||
1563 | efx_farch_ev_read_ack(channel); | ||
1564 | } | ||
1565 | } | ||
1566 | } | ||
1567 | |||
1568 | if (result == IRQ_HANDLED) | ||
1569 | netif_vdbg(efx, intr, efx->net_dev, | ||
1570 | "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", | ||
1571 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | ||
1572 | |||
1573 | return result; | ||
1574 | } | ||
1575 | |||
1576 | /* Handle an MSI interrupt | ||
1577 | * | ||
1578 | * Handle an MSI hardware interrupt. This routine schedules event | ||
1579 | * queue processing. No interrupt acknowledgement cycle is necessary. | ||
1580 | * Also, we never need to check that the interrupt is for us, since | ||
1581 | * MSI interrupts cannot be shared. | ||
1582 | */ | ||
1583 | irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id) | ||
1584 | { | ||
1585 | struct efx_msi_context *context = dev_id; | ||
1586 | struct efx_nic *efx = context->efx; | ||
1587 | efx_oword_t *int_ker = efx->irq_status.addr; | ||
1588 | int syserr; | ||
1589 | |||
1590 | netif_vdbg(efx, intr, efx->net_dev, | ||
1591 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", | ||
1592 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | ||
1593 | |||
1594 | if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) | ||
1595 | return IRQ_HANDLED; | ||
1596 | |||
1597 | /* Handle non-event-queue sources */ | ||
1598 | if (context->index == efx->irq_level) { | ||
1599 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | ||
1600 | if (unlikely(syserr)) | ||
1601 | return efx_farch_fatal_interrupt(efx); | ||
1602 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
1603 | } | ||
1604 | |||
1605 | /* Schedule processing of the channel */ | ||
1606 | efx_schedule_channel_irq(efx->channel[context->index]); | ||
1607 | |||
1608 | return IRQ_HANDLED; | ||
1609 | } | ||
1610 | |||
1611 | |||
1612 | /* Setup RSS indirection table. | ||
1613 | * This maps from the hash value of the packet to RXQ | ||
1614 | */ | ||
1615 | void efx_farch_rx_push_indir_table(struct efx_nic *efx) | ||
1616 | { | ||
1617 | size_t i = 0; | ||
1618 | efx_dword_t dword; | ||
1619 | |||
1620 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) | ||
1621 | return; | ||
1622 | |||
1623 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != | ||
1624 | FR_BZ_RX_INDIRECTION_TBL_ROWS); | ||
1625 | |||
1626 | for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { | ||
1627 | EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, | ||
1628 | efx->rx_indir_table[i]); | ||
1629 | efx_writed(efx, &dword, | ||
1630 | FR_BZ_RX_INDIRECTION_TBL + | ||
1631 | FR_BZ_RX_INDIRECTION_TBL_STEP * i); | ||
1632 | } | ||
1633 | } | ||
1634 | |||
1635 | /* Looks at available SRAM resources and works out how many queues we | ||
1636 | * can support, and where things like descriptor caches should live. | ||
1637 | * | ||
1638 | * SRAM is split up as follows: | ||
1639 | * 0 buftbl entries for channels | ||
1640 | * efx->vf_buftbl_base buftbl entries for SR-IOV | ||
1641 | * efx->rx_dc_base RX descriptor caches | ||
1642 | * efx->tx_dc_base TX descriptor caches | ||
1643 | */ | ||
1644 | void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) | ||
1645 | { | ||
1646 | unsigned vi_count, buftbl_min; | ||
1647 | |||
1648 | /* Account for the buffer table entries backing the datapath channels | ||
1649 | * and the descriptor caches for those channels. | ||
1650 | */ | ||
1651 | buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + | ||
1652 | efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + | ||
1653 | efx->n_channels * EFX_MAX_EVQ_SIZE) | ||
1654 | * sizeof(efx_qword_t) / EFX_BUF_SIZE); | ||
1655 | vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); | ||
1656 | |||
1657 | #ifdef CONFIG_SFC_SRIOV | ||
1658 | if (efx_sriov_wanted(efx)) { | ||
1659 | unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; | ||
1660 | |||
1661 | efx->vf_buftbl_base = buftbl_min; | ||
1662 | |||
1663 | vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; | ||
1664 | vi_count = max(vi_count, EFX_VI_BASE); | ||
1665 | buftbl_free = (sram_lim_qw - buftbl_min - | ||
1666 | vi_count * vi_dc_entries); | ||
1667 | |||
1668 | entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) * | ||
1669 | efx_vf_size(efx)); | ||
1670 | vf_limit = min(buftbl_free / entries_per_vf, | ||
1671 | (1024U - EFX_VI_BASE) >> efx->vi_scale); | ||
1672 | |||
1673 | if (efx->vf_count > vf_limit) { | ||
1674 | netif_err(efx, probe, efx->net_dev, | ||
1675 | "Reducing VF count from from %d to %d\n", | ||
1676 | efx->vf_count, vf_limit); | ||
1677 | efx->vf_count = vf_limit; | ||
1678 | } | ||
1679 | vi_count += efx->vf_count * efx_vf_size(efx); | ||
1680 | } | ||
1681 | #endif | ||
1682 | |||
1683 | efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; | ||
1684 | efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; | ||
1685 | } | ||
1686 | |||
1687 | u32 efx_farch_fpga_ver(struct efx_nic *efx) | ||
1688 | { | ||
1689 | efx_oword_t altera_build; | ||
1690 | efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); | ||
1691 | return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); | ||
1692 | } | ||
1693 | |||
1694 | void efx_farch_init_common(struct efx_nic *efx) | ||
1695 | { | ||
1696 | efx_oword_t temp; | ||
1697 | |||
1698 | /* Set positions of descriptor caches in SRAM. */ | ||
1699 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); | ||
1700 | efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); | ||
1701 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); | ||
1702 | efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); | ||
1703 | |||
1704 | /* Set TX descriptor cache size. */ | ||
1705 | BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); | ||
1706 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); | ||
1707 | efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); | ||
1708 | |||
1709 | /* Set RX descriptor cache size. Set low watermark to size-8, as | ||
1710 | * this allows most efficient prefetching. | ||
1711 | */ | ||
1712 | BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); | ||
1713 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); | ||
1714 | efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); | ||
1715 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); | ||
1716 | efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); | ||
1717 | |||
1718 | /* Program INT_KER address */ | ||
1719 | EFX_POPULATE_OWORD_2(temp, | ||
1720 | FRF_AZ_NORM_INT_VEC_DIS_KER, | ||
1721 | EFX_INT_MODE_USE_MSI(efx), | ||
1722 | FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); | ||
1723 | efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); | ||
1724 | |||
1725 | if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) | ||
1726 | /* Use an interrupt level unused by event queues */ | ||
1727 | efx->irq_level = 0x1f; | ||
1728 | else | ||
1729 | /* Use a valid MSI-X vector */ | ||
1730 | efx->irq_level = 0; | ||
1731 | |||
1732 | /* Enable all the genuinely fatal interrupts. (They are still | ||
1733 | * masked by the overall interrupt mask, controlled by | ||
1734 | * falcon_interrupts()). | ||
1735 | * | ||
1736 | * Note: All other fatal interrupts are enabled | ||
1737 | */ | ||
1738 | EFX_POPULATE_OWORD_3(temp, | ||
1739 | FRF_AZ_ILL_ADR_INT_KER_EN, 1, | ||
1740 | FRF_AZ_RBUF_OWN_INT_KER_EN, 1, | ||
1741 | FRF_AZ_TBUF_OWN_INT_KER_EN, 1); | ||
1742 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) | ||
1743 | EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); | ||
1744 | EFX_INVERT_OWORD(temp); | ||
1745 | efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); | ||
1746 | |||
1747 | efx_farch_rx_push_indir_table(efx); | ||
1748 | |||
1749 | /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be | ||
1750 | * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. | ||
1751 | */ | ||
1752 | efx_reado(efx, &temp, FR_AZ_TX_RESERVED); | ||
1753 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); | ||
1754 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); | ||
1755 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); | ||
1756 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); | ||
1757 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); | ||
1758 | /* Enable SW_EV to inherit in char driver - assume harmless here */ | ||
1759 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); | ||
1760 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ | ||
1761 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); | ||
1762 | /* Disable hardware watchdog which can misfire */ | ||
1763 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); | ||
1764 | /* Squash TX of packets of 16 bytes or less */ | ||
1765 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | ||
1766 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); | ||
1767 | efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); | ||
1768 | |||
1769 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | ||
1770 | EFX_POPULATE_OWORD_4(temp, | ||
1771 | /* Default values */ | ||
1772 | FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, | ||
1773 | FRF_BZ_TX_PACE_SB_AF, 0xb, | ||
1774 | FRF_BZ_TX_PACE_FB_BASE, 0, | ||
1775 | /* Allow large pace values in the | ||
1776 | * fast bin. */ | ||
1777 | FRF_BZ_TX_PACE_BIN_TH, | ||
1778 | FFE_BZ_TX_PACE_RESERVED); | ||
1779 | efx_writeo(efx, &temp, FR_BZ_TX_PACE); | ||
1780 | } | ||
1781 | } | ||