diff options
Diffstat (limited to 'drivers/net/sfc/net_driver.h')
-rw-r--r-- | drivers/net/sfc/net_driver.h | 1060 |
1 files changed, 1060 insertions, 0 deletions
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h new file mode 100644 index 00000000000..b8e251a1ee4 --- /dev/null +++ b/drivers/net/sfc/net_driver.h | |||
@@ -0,0 +1,1060 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2005-2011 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | /* Common definitions for all Efx net driver code */ | ||
12 | |||
13 | #ifndef EFX_NET_DRIVER_H | ||
14 | #define EFX_NET_DRIVER_H | ||
15 | |||
16 | #if defined(EFX_ENABLE_DEBUG) && !defined(DEBUG) | ||
17 | #define DEBUG | ||
18 | #endif | ||
19 | |||
20 | #include <linux/netdevice.h> | ||
21 | #include <linux/etherdevice.h> | ||
22 | #include <linux/ethtool.h> | ||
23 | #include <linux/if_vlan.h> | ||
24 | #include <linux/timer.h> | ||
25 | #include <linux/mdio.h> | ||
26 | #include <linux/list.h> | ||
27 | #include <linux/pci.h> | ||
28 | #include <linux/device.h> | ||
29 | #include <linux/highmem.h> | ||
30 | #include <linux/workqueue.h> | ||
31 | #include <linux/vmalloc.h> | ||
32 | #include <linux/i2c.h> | ||
33 | |||
34 | #include "enum.h" | ||
35 | #include "bitfield.h" | ||
36 | |||
37 | /************************************************************************** | ||
38 | * | ||
39 | * Build definitions | ||
40 | * | ||
41 | **************************************************************************/ | ||
42 | |||
43 | #define EFX_DRIVER_VERSION "3.1" | ||
44 | |||
45 | #ifdef EFX_ENABLE_DEBUG | ||
46 | #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) | ||
47 | #define EFX_WARN_ON_PARANOID(x) WARN_ON(x) | ||
48 | #else | ||
49 | #define EFX_BUG_ON_PARANOID(x) do {} while (0) | ||
50 | #define EFX_WARN_ON_PARANOID(x) do {} while (0) | ||
51 | #endif | ||
52 | |||
53 | /************************************************************************** | ||
54 | * | ||
55 | * Efx data structures | ||
56 | * | ||
57 | **************************************************************************/ | ||
58 | |||
59 | #define EFX_MAX_CHANNELS 32 | ||
60 | #define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS | ||
61 | |||
62 | /* Checksum generation is a per-queue option in hardware, so each | ||
63 | * queue visible to the networking core is backed by two hardware TX | ||
64 | * queues. */ | ||
65 | #define EFX_MAX_TX_TC 2 | ||
66 | #define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS) | ||
67 | #define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */ | ||
68 | #define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */ | ||
69 | #define EFX_TXQ_TYPES 4 | ||
70 | #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) | ||
71 | |||
72 | /** | ||
73 | * struct efx_special_buffer - An Efx special buffer | ||
74 | * @addr: CPU base address of the buffer | ||
75 | * @dma_addr: DMA base address of the buffer | ||
76 | * @len: Buffer length, in bytes | ||
77 | * @index: Buffer index within controller;s buffer table | ||
78 | * @entries: Number of buffer table entries | ||
79 | * | ||
80 | * Special buffers are used for the event queues and the TX and RX | ||
81 | * descriptor queues for each channel. They are *not* used for the | ||
82 | * actual transmit and receive buffers. | ||
83 | */ | ||
84 | struct efx_special_buffer { | ||
85 | void *addr; | ||
86 | dma_addr_t dma_addr; | ||
87 | unsigned int len; | ||
88 | int index; | ||
89 | int entries; | ||
90 | }; | ||
91 | |||
92 | enum efx_flush_state { | ||
93 | FLUSH_NONE, | ||
94 | FLUSH_PENDING, | ||
95 | FLUSH_FAILED, | ||
96 | FLUSH_DONE, | ||
97 | }; | ||
98 | |||
99 | /** | ||
100 | * struct efx_tx_buffer - An Efx TX buffer | ||
101 | * @skb: The associated socket buffer. | ||
102 | * Set only on the final fragment of a packet; %NULL for all other | ||
103 | * fragments. When this fragment completes, then we can free this | ||
104 | * skb. | ||
105 | * @tsoh: The associated TSO header structure, or %NULL if this | ||
106 | * buffer is not a TSO header. | ||
107 | * @dma_addr: DMA address of the fragment. | ||
108 | * @len: Length of this fragment. | ||
109 | * This field is zero when the queue slot is empty. | ||
110 | * @continuation: True if this fragment is not the end of a packet. | ||
111 | * @unmap_single: True if pci_unmap_single should be used. | ||
112 | * @unmap_len: Length of this fragment to unmap | ||
113 | */ | ||
114 | struct efx_tx_buffer { | ||
115 | const struct sk_buff *skb; | ||
116 | struct efx_tso_header *tsoh; | ||
117 | dma_addr_t dma_addr; | ||
118 | unsigned short len; | ||
119 | bool continuation; | ||
120 | bool unmap_single; | ||
121 | unsigned short unmap_len; | ||
122 | }; | ||
123 | |||
124 | /** | ||
125 | * struct efx_tx_queue - An Efx TX queue | ||
126 | * | ||
127 | * This is a ring buffer of TX fragments. | ||
128 | * Since the TX completion path always executes on the same | ||
129 | * CPU and the xmit path can operate on different CPUs, | ||
130 | * performance is increased by ensuring that the completion | ||
131 | * path and the xmit path operate on different cache lines. | ||
132 | * This is particularly important if the xmit path is always | ||
133 | * executing on one CPU which is different from the completion | ||
134 | * path. There is also a cache line for members which are | ||
135 | * read but not written on the fast path. | ||
136 | * | ||
137 | * @efx: The associated Efx NIC | ||
138 | * @queue: DMA queue number | ||
139 | * @channel: The associated channel | ||
140 | * @core_txq: The networking core TX queue structure | ||
141 | * @buffer: The software buffer ring | ||
142 | * @txd: The hardware descriptor ring | ||
143 | * @ptr_mask: The size of the ring minus 1. | ||
144 | * @initialised: Has hardware queue been initialised? | ||
145 | * @flushed: Used when handling queue flushing | ||
146 | * @read_count: Current read pointer. | ||
147 | * This is the number of buffers that have been removed from both rings. | ||
148 | * @old_write_count: The value of @write_count when last checked. | ||
149 | * This is here for performance reasons. The xmit path will | ||
150 | * only get the up-to-date value of @write_count if this | ||
151 | * variable indicates that the queue is empty. This is to | ||
152 | * avoid cache-line ping-pong between the xmit path and the | ||
153 | * completion path. | ||
154 | * @insert_count: Current insert pointer | ||
155 | * This is the number of buffers that have been added to the | ||
156 | * software ring. | ||
157 | * @write_count: Current write pointer | ||
158 | * This is the number of buffers that have been added to the | ||
159 | * hardware ring. | ||
160 | * @old_read_count: The value of read_count when last checked. | ||
161 | * This is here for performance reasons. The xmit path will | ||
162 | * only get the up-to-date value of read_count if this | ||
163 | * variable indicates that the queue is full. This is to | ||
164 | * avoid cache-line ping-pong between the xmit path and the | ||
165 | * completion path. | ||
166 | * @tso_headers_free: A list of TSO headers allocated for this TX queue | ||
167 | * that are not in use, and so available for new TSO sends. The list | ||
168 | * is protected by the TX queue lock. | ||
169 | * @tso_bursts: Number of times TSO xmit invoked by kernel | ||
170 | * @tso_long_headers: Number of packets with headers too long for standard | ||
171 | * blocks | ||
172 | * @tso_packets: Number of packets via the TSO xmit path | ||
173 | * @pushes: Number of times the TX push feature has been used | ||
174 | * @empty_read_count: If the completion path has seen the queue as empty | ||
175 | * and the transmission path has not yet checked this, the value of | ||
176 | * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0. | ||
177 | */ | ||
178 | struct efx_tx_queue { | ||
179 | /* Members which don't change on the fast path */ | ||
180 | struct efx_nic *efx ____cacheline_aligned_in_smp; | ||
181 | unsigned queue; | ||
182 | struct efx_channel *channel; | ||
183 | struct netdev_queue *core_txq; | ||
184 | struct efx_tx_buffer *buffer; | ||
185 | struct efx_special_buffer txd; | ||
186 | unsigned int ptr_mask; | ||
187 | bool initialised; | ||
188 | enum efx_flush_state flushed; | ||
189 | |||
190 | /* Members used mainly on the completion path */ | ||
191 | unsigned int read_count ____cacheline_aligned_in_smp; | ||
192 | unsigned int old_write_count; | ||
193 | |||
194 | /* Members used only on the xmit path */ | ||
195 | unsigned int insert_count ____cacheline_aligned_in_smp; | ||
196 | unsigned int write_count; | ||
197 | unsigned int old_read_count; | ||
198 | struct efx_tso_header *tso_headers_free; | ||
199 | unsigned int tso_bursts; | ||
200 | unsigned int tso_long_headers; | ||
201 | unsigned int tso_packets; | ||
202 | unsigned int pushes; | ||
203 | |||
204 | /* Members shared between paths and sometimes updated */ | ||
205 | unsigned int empty_read_count ____cacheline_aligned_in_smp; | ||
206 | #define EFX_EMPTY_COUNT_VALID 0x80000000 | ||
207 | }; | ||
208 | |||
209 | /** | ||
210 | * struct efx_rx_buffer - An Efx RX data buffer | ||
211 | * @dma_addr: DMA base address of the buffer | ||
212 | * @skb: The associated socket buffer, if any. | ||
213 | * If both this and page are %NULL, the buffer slot is currently free. | ||
214 | * @page: The associated page buffer, if any. | ||
215 | * If both this and skb are %NULL, the buffer slot is currently free. | ||
216 | * @len: Buffer length, in bytes. | ||
217 | * @is_page: Indicates if @page is valid. If false, @skb is valid. | ||
218 | */ | ||
219 | struct efx_rx_buffer { | ||
220 | dma_addr_t dma_addr; | ||
221 | union { | ||
222 | struct sk_buff *skb; | ||
223 | struct page *page; | ||
224 | } u; | ||
225 | unsigned int len; | ||
226 | bool is_page; | ||
227 | }; | ||
228 | |||
229 | /** | ||
230 | * struct efx_rx_page_state - Page-based rx buffer state | ||
231 | * | ||
232 | * Inserted at the start of every page allocated for receive buffers. | ||
233 | * Used to facilitate sharing dma mappings between recycled rx buffers | ||
234 | * and those passed up to the kernel. | ||
235 | * | ||
236 | * @refcnt: Number of struct efx_rx_buffer's referencing this page. | ||
237 | * When refcnt falls to zero, the page is unmapped for dma | ||
238 | * @dma_addr: The dma address of this page. | ||
239 | */ | ||
240 | struct efx_rx_page_state { | ||
241 | unsigned refcnt; | ||
242 | dma_addr_t dma_addr; | ||
243 | |||
244 | unsigned int __pad[0] ____cacheline_aligned; | ||
245 | }; | ||
246 | |||
247 | /** | ||
248 | * struct efx_rx_queue - An Efx RX queue | ||
249 | * @efx: The associated Efx NIC | ||
250 | * @buffer: The software buffer ring | ||
251 | * @rxd: The hardware descriptor ring | ||
252 | * @ptr_mask: The size of the ring minus 1. | ||
253 | * @added_count: Number of buffers added to the receive queue. | ||
254 | * @notified_count: Number of buffers given to NIC (<= @added_count). | ||
255 | * @removed_count: Number of buffers removed from the receive queue. | ||
256 | * @max_fill: RX descriptor maximum fill level (<= ring size) | ||
257 | * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill | ||
258 | * (<= @max_fill) | ||
259 | * @fast_fill_limit: The level to which a fast fill will fill | ||
260 | * (@fast_fill_trigger <= @fast_fill_limit <= @max_fill) | ||
261 | * @min_fill: RX descriptor minimum non-zero fill level. | ||
262 | * This records the minimum fill level observed when a ring | ||
263 | * refill was triggered. | ||
264 | * @alloc_page_count: RX allocation strategy counter. | ||
265 | * @alloc_skb_count: RX allocation strategy counter. | ||
266 | * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). | ||
267 | * @flushed: Use when handling queue flushing | ||
268 | */ | ||
269 | struct efx_rx_queue { | ||
270 | struct efx_nic *efx; | ||
271 | struct efx_rx_buffer *buffer; | ||
272 | struct efx_special_buffer rxd; | ||
273 | unsigned int ptr_mask; | ||
274 | |||
275 | int added_count; | ||
276 | int notified_count; | ||
277 | int removed_count; | ||
278 | unsigned int max_fill; | ||
279 | unsigned int fast_fill_trigger; | ||
280 | unsigned int fast_fill_limit; | ||
281 | unsigned int min_fill; | ||
282 | unsigned int min_overfill; | ||
283 | unsigned int alloc_page_count; | ||
284 | unsigned int alloc_skb_count; | ||
285 | struct timer_list slow_fill; | ||
286 | unsigned int slow_fill_count; | ||
287 | |||
288 | enum efx_flush_state flushed; | ||
289 | }; | ||
290 | |||
291 | /** | ||
292 | * struct efx_buffer - An Efx general-purpose buffer | ||
293 | * @addr: host base address of the buffer | ||
294 | * @dma_addr: DMA base address of the buffer | ||
295 | * @len: Buffer length, in bytes | ||
296 | * | ||
297 | * The NIC uses these buffers for its interrupt status registers and | ||
298 | * MAC stats dumps. | ||
299 | */ | ||
300 | struct efx_buffer { | ||
301 | void *addr; | ||
302 | dma_addr_t dma_addr; | ||
303 | unsigned int len; | ||
304 | }; | ||
305 | |||
306 | |||
307 | enum efx_rx_alloc_method { | ||
308 | RX_ALLOC_METHOD_AUTO = 0, | ||
309 | RX_ALLOC_METHOD_SKB = 1, | ||
310 | RX_ALLOC_METHOD_PAGE = 2, | ||
311 | }; | ||
312 | |||
313 | /** | ||
314 | * struct efx_channel - An Efx channel | ||
315 | * | ||
316 | * A channel comprises an event queue, at least one TX queue, at least | ||
317 | * one RX queue, and an associated tasklet for processing the event | ||
318 | * queue. | ||
319 | * | ||
320 | * @efx: Associated Efx NIC | ||
321 | * @channel: Channel instance number | ||
322 | * @enabled: Channel enabled indicator | ||
323 | * @irq: IRQ number (MSI and MSI-X only) | ||
324 | * @irq_moderation: IRQ moderation value (in hardware ticks) | ||
325 | * @napi_dev: Net device used with NAPI | ||
326 | * @napi_str: NAPI control structure | ||
327 | * @work_pending: Is work pending via NAPI? | ||
328 | * @eventq: Event queue buffer | ||
329 | * @eventq_mask: Event queue pointer mask | ||
330 | * @eventq_read_ptr: Event queue read pointer | ||
331 | * @last_eventq_read_ptr: Last event queue read pointer value. | ||
332 | * @irq_count: Number of IRQs since last adaptive moderation decision | ||
333 | * @irq_mod_score: IRQ moderation score | ||
334 | * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors | ||
335 | * and diagnostic counters | ||
336 | * @rx_alloc_push_pages: RX allocation method currently in use for pushing | ||
337 | * descriptors | ||
338 | * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors | ||
339 | * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors | ||
340 | * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors | ||
341 | * @n_rx_mcast_mismatch: Count of unmatched multicast frames | ||
342 | * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors | ||
343 | * @n_rx_overlength: Count of RX_OVERLENGTH errors | ||
344 | * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun | ||
345 | * @rx_queue: RX queue for this channel | ||
346 | * @tx_queue: TX queues for this channel | ||
347 | */ | ||
348 | struct efx_channel { | ||
349 | struct efx_nic *efx; | ||
350 | int channel; | ||
351 | bool enabled; | ||
352 | int irq; | ||
353 | unsigned int irq_moderation; | ||
354 | struct net_device *napi_dev; | ||
355 | struct napi_struct napi_str; | ||
356 | bool work_pending; | ||
357 | struct efx_special_buffer eventq; | ||
358 | unsigned int eventq_mask; | ||
359 | unsigned int eventq_read_ptr; | ||
360 | unsigned int last_eventq_read_ptr; | ||
361 | |||
362 | unsigned int irq_count; | ||
363 | unsigned int irq_mod_score; | ||
364 | #ifdef CONFIG_RFS_ACCEL | ||
365 | unsigned int rfs_filters_added; | ||
366 | #endif | ||
367 | |||
368 | int rx_alloc_level; | ||
369 | int rx_alloc_push_pages; | ||
370 | |||
371 | unsigned n_rx_tobe_disc; | ||
372 | unsigned n_rx_ip_hdr_chksum_err; | ||
373 | unsigned n_rx_tcp_udp_chksum_err; | ||
374 | unsigned n_rx_mcast_mismatch; | ||
375 | unsigned n_rx_frm_trunc; | ||
376 | unsigned n_rx_overlength; | ||
377 | unsigned n_skbuff_leaks; | ||
378 | |||
379 | /* Used to pipeline received packets in order to optimise memory | ||
380 | * access with prefetches. | ||
381 | */ | ||
382 | struct efx_rx_buffer *rx_pkt; | ||
383 | bool rx_pkt_csummed; | ||
384 | |||
385 | struct efx_rx_queue rx_queue; | ||
386 | struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; | ||
387 | }; | ||
388 | |||
389 | enum efx_led_mode { | ||
390 | EFX_LED_OFF = 0, | ||
391 | EFX_LED_ON = 1, | ||
392 | EFX_LED_DEFAULT = 2 | ||
393 | }; | ||
394 | |||
395 | #define STRING_TABLE_LOOKUP(val, member) \ | ||
396 | ((val) < member ## _max) ? member ## _names[val] : "(invalid)" | ||
397 | |||
398 | extern const char *efx_loopback_mode_names[]; | ||
399 | extern const unsigned int efx_loopback_mode_max; | ||
400 | #define LOOPBACK_MODE(efx) \ | ||
401 | STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode) | ||
402 | |||
403 | extern const char *efx_reset_type_names[]; | ||
404 | extern const unsigned int efx_reset_type_max; | ||
405 | #define RESET_TYPE(type) \ | ||
406 | STRING_TABLE_LOOKUP(type, efx_reset_type) | ||
407 | |||
408 | enum efx_int_mode { | ||
409 | /* Be careful if altering to correct macro below */ | ||
410 | EFX_INT_MODE_MSIX = 0, | ||
411 | EFX_INT_MODE_MSI = 1, | ||
412 | EFX_INT_MODE_LEGACY = 2, | ||
413 | EFX_INT_MODE_MAX /* Insert any new items before this */ | ||
414 | }; | ||
415 | #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) | ||
416 | |||
417 | enum nic_state { | ||
418 | STATE_INIT = 0, | ||
419 | STATE_RUNNING = 1, | ||
420 | STATE_FINI = 2, | ||
421 | STATE_DISABLED = 3, | ||
422 | STATE_MAX, | ||
423 | }; | ||
424 | |||
425 | /* | ||
426 | * Alignment of page-allocated RX buffers | ||
427 | * | ||
428 | * Controls the number of bytes inserted at the start of an RX buffer. | ||
429 | * This is the equivalent of NET_IP_ALIGN [which controls the alignment | ||
430 | * of the skb->head for hardware DMA]. | ||
431 | */ | ||
432 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
433 | #define EFX_PAGE_IP_ALIGN 0 | ||
434 | #else | ||
435 | #define EFX_PAGE_IP_ALIGN NET_IP_ALIGN | ||
436 | #endif | ||
437 | |||
438 | /* | ||
439 | * Alignment of the skb->head which wraps a page-allocated RX buffer | ||
440 | * | ||
441 | * The skb allocated to wrap an rx_buffer can have this alignment. Since | ||
442 | * the data is memcpy'd from the rx_buf, it does not need to be equal to | ||
443 | * EFX_PAGE_IP_ALIGN. | ||
444 | */ | ||
445 | #define EFX_PAGE_SKB_ALIGN 2 | ||
446 | |||
447 | /* Forward declaration */ | ||
448 | struct efx_nic; | ||
449 | |||
450 | /* Pseudo bit-mask flow control field */ | ||
451 | #define EFX_FC_RX FLOW_CTRL_RX | ||
452 | #define EFX_FC_TX FLOW_CTRL_TX | ||
453 | #define EFX_FC_AUTO 4 | ||
454 | |||
455 | /** | ||
456 | * struct efx_link_state - Current state of the link | ||
457 | * @up: Link is up | ||
458 | * @fd: Link is full-duplex | ||
459 | * @fc: Actual flow control flags | ||
460 | * @speed: Link speed (Mbps) | ||
461 | */ | ||
462 | struct efx_link_state { | ||
463 | bool up; | ||
464 | bool fd; | ||
465 | u8 fc; | ||
466 | unsigned int speed; | ||
467 | }; | ||
468 | |||
469 | static inline bool efx_link_state_equal(const struct efx_link_state *left, | ||
470 | const struct efx_link_state *right) | ||
471 | { | ||
472 | return left->up == right->up && left->fd == right->fd && | ||
473 | left->fc == right->fc && left->speed == right->speed; | ||
474 | } | ||
475 | |||
476 | /** | ||
477 | * struct efx_mac_operations - Efx MAC operations table | ||
478 | * @reconfigure: Reconfigure MAC. Serialised by the mac_lock | ||
479 | * @update_stats: Update statistics | ||
480 | * @check_fault: Check fault state. True if fault present. | ||
481 | */ | ||
482 | struct efx_mac_operations { | ||
483 | int (*reconfigure) (struct efx_nic *efx); | ||
484 | void (*update_stats) (struct efx_nic *efx); | ||
485 | bool (*check_fault)(struct efx_nic *efx); | ||
486 | }; | ||
487 | |||
488 | /** | ||
489 | * struct efx_phy_operations - Efx PHY operations table | ||
490 | * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds, | ||
491 | * efx->loopback_modes. | ||
492 | * @init: Initialise PHY | ||
493 | * @fini: Shut down PHY | ||
494 | * @reconfigure: Reconfigure PHY (e.g. for new link parameters) | ||
495 | * @poll: Update @link_state and report whether it changed. | ||
496 | * Serialised by the mac_lock. | ||
497 | * @get_settings: Get ethtool settings. Serialised by the mac_lock. | ||
498 | * @set_settings: Set ethtool settings. Serialised by the mac_lock. | ||
499 | * @set_npage_adv: Set abilities advertised in (Extended) Next Page | ||
500 | * (only needed where AN bit is set in mmds) | ||
501 | * @test_alive: Test that PHY is 'alive' (online) | ||
502 | * @test_name: Get the name of a PHY-specific test/result | ||
503 | * @run_tests: Run tests and record results as appropriate (offline). | ||
504 | * Flags are the ethtool tests flags. | ||
505 | */ | ||
506 | struct efx_phy_operations { | ||
507 | int (*probe) (struct efx_nic *efx); | ||
508 | int (*init) (struct efx_nic *efx); | ||
509 | void (*fini) (struct efx_nic *efx); | ||
510 | void (*remove) (struct efx_nic *efx); | ||
511 | int (*reconfigure) (struct efx_nic *efx); | ||
512 | bool (*poll) (struct efx_nic *efx); | ||
513 | void (*get_settings) (struct efx_nic *efx, | ||
514 | struct ethtool_cmd *ecmd); | ||
515 | int (*set_settings) (struct efx_nic *efx, | ||
516 | struct ethtool_cmd *ecmd); | ||
517 | void (*set_npage_adv) (struct efx_nic *efx, u32); | ||
518 | int (*test_alive) (struct efx_nic *efx); | ||
519 | const char *(*test_name) (struct efx_nic *efx, unsigned int index); | ||
520 | int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); | ||
521 | }; | ||
522 | |||
523 | /** | ||
524 | * @enum efx_phy_mode - PHY operating mode flags | ||
525 | * @PHY_MODE_NORMAL: on and should pass traffic | ||
526 | * @PHY_MODE_TX_DISABLED: on with TX disabled | ||
527 | * @PHY_MODE_LOW_POWER: set to low power through MDIO | ||
528 | * @PHY_MODE_OFF: switched off through external control | ||
529 | * @PHY_MODE_SPECIAL: on but will not pass traffic | ||
530 | */ | ||
531 | enum efx_phy_mode { | ||
532 | PHY_MODE_NORMAL = 0, | ||
533 | PHY_MODE_TX_DISABLED = 1, | ||
534 | PHY_MODE_LOW_POWER = 2, | ||
535 | PHY_MODE_OFF = 4, | ||
536 | PHY_MODE_SPECIAL = 8, | ||
537 | }; | ||
538 | |||
539 | static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode) | ||
540 | { | ||
541 | return !!(mode & ~PHY_MODE_TX_DISABLED); | ||
542 | } | ||
543 | |||
544 | /* | ||
545 | * Efx extended statistics | ||
546 | * | ||
547 | * Not all statistics are provided by all supported MACs. The purpose | ||
548 | * is this structure is to contain the raw statistics provided by each | ||
549 | * MAC. | ||
550 | */ | ||
551 | struct efx_mac_stats { | ||
552 | u64 tx_bytes; | ||
553 | u64 tx_good_bytes; | ||
554 | u64 tx_bad_bytes; | ||
555 | unsigned long tx_packets; | ||
556 | unsigned long tx_bad; | ||
557 | unsigned long tx_pause; | ||
558 | unsigned long tx_control; | ||
559 | unsigned long tx_unicast; | ||
560 | unsigned long tx_multicast; | ||
561 | unsigned long tx_broadcast; | ||
562 | unsigned long tx_lt64; | ||
563 | unsigned long tx_64; | ||
564 | unsigned long tx_65_to_127; | ||
565 | unsigned long tx_128_to_255; | ||
566 | unsigned long tx_256_to_511; | ||
567 | unsigned long tx_512_to_1023; | ||
568 | unsigned long tx_1024_to_15xx; | ||
569 | unsigned long tx_15xx_to_jumbo; | ||
570 | unsigned long tx_gtjumbo; | ||
571 | unsigned long tx_collision; | ||
572 | unsigned long tx_single_collision; | ||
573 | unsigned long tx_multiple_collision; | ||
574 | unsigned long tx_excessive_collision; | ||
575 | unsigned long tx_deferred; | ||
576 | unsigned long tx_late_collision; | ||
577 | unsigned long tx_excessive_deferred; | ||
578 | unsigned long tx_non_tcpudp; | ||
579 | unsigned long tx_mac_src_error; | ||
580 | unsigned long tx_ip_src_error; | ||
581 | u64 rx_bytes; | ||
582 | u64 rx_good_bytes; | ||
583 | u64 rx_bad_bytes; | ||
584 | unsigned long rx_packets; | ||
585 | unsigned long rx_good; | ||
586 | unsigned long rx_bad; | ||
587 | unsigned long rx_pause; | ||
588 | unsigned long rx_control; | ||
589 | unsigned long rx_unicast; | ||
590 | unsigned long rx_multicast; | ||
591 | unsigned long rx_broadcast; | ||
592 | unsigned long rx_lt64; | ||
593 | unsigned long rx_64; | ||
594 | unsigned long rx_65_to_127; | ||
595 | unsigned long rx_128_to_255; | ||
596 | unsigned long rx_256_to_511; | ||
597 | unsigned long rx_512_to_1023; | ||
598 | unsigned long rx_1024_to_15xx; | ||
599 | unsigned long rx_15xx_to_jumbo; | ||
600 | unsigned long rx_gtjumbo; | ||
601 | unsigned long rx_bad_lt64; | ||
602 | unsigned long rx_bad_64_to_15xx; | ||
603 | unsigned long rx_bad_15xx_to_jumbo; | ||
604 | unsigned long rx_bad_gtjumbo; | ||
605 | unsigned long rx_overflow; | ||
606 | unsigned long rx_missed; | ||
607 | unsigned long rx_false_carrier; | ||
608 | unsigned long rx_symbol_error; | ||
609 | unsigned long rx_align_error; | ||
610 | unsigned long rx_length_error; | ||
611 | unsigned long rx_internal_error; | ||
612 | unsigned long rx_good_lt64; | ||
613 | }; | ||
614 | |||
615 | /* Number of bits used in a multicast filter hash address */ | ||
616 | #define EFX_MCAST_HASH_BITS 8 | ||
617 | |||
618 | /* Number of (single-bit) entries in a multicast filter hash */ | ||
619 | #define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS) | ||
620 | |||
621 | /* An Efx multicast filter hash */ | ||
622 | union efx_multicast_hash { | ||
623 | u8 byte[EFX_MCAST_HASH_ENTRIES / 8]; | ||
624 | efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; | ||
625 | }; | ||
626 | |||
627 | struct efx_filter_state; | ||
628 | |||
629 | /** | ||
630 | * struct efx_nic - an Efx NIC | ||
631 | * @name: Device name (net device name or bus id before net device registered) | ||
632 | * @pci_dev: The PCI device | ||
633 | * @type: Controller type attributes | ||
634 | * @legacy_irq: IRQ number | ||
635 | * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)? | ||
636 | * @workqueue: Workqueue for port reconfigures and the HW monitor. | ||
637 | * Work items do not hold and must not acquire RTNL. | ||
638 | * @workqueue_name: Name of workqueue | ||
639 | * @reset_work: Scheduled reset workitem | ||
640 | * @membase_phys: Memory BAR value as physical address | ||
641 | * @membase: Memory BAR value | ||
642 | * @interrupt_mode: Interrupt mode | ||
643 | * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues | ||
644 | * @irq_rx_moderation: IRQ moderation time for RX event queues | ||
645 | * @msg_enable: Log message enable flags | ||
646 | * @state: Device state flag. Serialised by the rtnl_lock. | ||
647 | * @reset_pending: Bitmask for pending resets | ||
648 | * @tx_queue: TX DMA queues | ||
649 | * @rx_queue: RX DMA queues | ||
650 | * @channel: Channels | ||
651 | * @channel_name: Names for channels and their IRQs | ||
652 | * @rxq_entries: Size of receive queues requested by user. | ||
653 | * @txq_entries: Size of transmit queues requested by user. | ||
654 | * @next_buffer_table: First available buffer table id | ||
655 | * @n_channels: Number of channels in use | ||
656 | * @n_rx_channels: Number of channels used for RX (= number of RX queues) | ||
657 | * @n_tx_channels: Number of channels used for TX | ||
658 | * @rx_buffer_len: RX buffer length | ||
659 | * @rx_buffer_order: Order (log2) of number of pages for each RX buffer | ||
660 | * @rx_hash_key: Toeplitz hash key for RSS | ||
661 | * @rx_indir_table: Indirection table for RSS | ||
662 | * @int_error_count: Number of internal errors seen recently | ||
663 | * @int_error_expire: Time at which error count will be expired | ||
664 | * @irq_status: Interrupt status buffer | ||
665 | * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 | ||
666 | * @fatal_irq_level: IRQ level (bit number) used for serious errors | ||
667 | * @mtd_list: List of MTDs attached to the NIC | ||
668 | * @nic_data: Hardware dependent state | ||
669 | * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, | ||
670 | * efx_monitor() and efx_reconfigure_port() | ||
671 | * @port_enabled: Port enabled indicator. | ||
672 | * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and | ||
673 | * efx_mac_work() with kernel interfaces. Safe to read under any | ||
674 | * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must | ||
675 | * be held to modify it. | ||
676 | * @port_initialized: Port initialized? | ||
677 | * @net_dev: Operating system network device. Consider holding the rtnl lock | ||
678 | * @stats_buffer: DMA buffer for statistics | ||
679 | * @mac_op: MAC interface | ||
680 | * @phy_type: PHY type | ||
681 | * @phy_op: PHY interface | ||
682 | * @phy_data: PHY private data (including PHY-specific stats) | ||
683 | * @mdio: PHY MDIO interface | ||
684 | * @mdio_bus: PHY MDIO bus ID (only used by Siena) | ||
685 | * @phy_mode: PHY operating mode. Serialised by @mac_lock. | ||
686 | * @link_advertising: Autonegotiation advertising flags | ||
687 | * @link_state: Current state of the link | ||
688 | * @n_link_state_changes: Number of times the link has changed state | ||
689 | * @promiscuous: Promiscuous flag. Protected by netif_tx_lock. | ||
690 | * @multicast_hash: Multicast hash table | ||
691 | * @wanted_fc: Wanted flow control flags | ||
692 | * @mac_work: Work item for changing MAC promiscuity and multicast hash | ||
693 | * @loopback_mode: Loopback status | ||
694 | * @loopback_modes: Supported loopback mode bitmask | ||
695 | * @loopback_selftest: Offline self-test private state | ||
696 | * @monitor_work: Hardware monitor workitem | ||
697 | * @biu_lock: BIU (bus interface unit) lock | ||
698 | * @last_irq_cpu: Last CPU to handle interrupt. | ||
699 | * This register is written with the SMP processor ID whenever an | ||
700 | * interrupt is handled. It is used by efx_nic_test_interrupt() | ||
701 | * to verify that an interrupt has occurred. | ||
702 | * @n_rx_nodesc_drop_cnt: RX no descriptor drop count | ||
703 | * @mac_stats: MAC statistics. These include all statistics the MACs | ||
704 | * can provide. Generic code converts these into a standard | ||
705 | * &struct net_device_stats. | ||
706 | * @stats_lock: Statistics update lock. Serialises statistics fetches | ||
707 | * | ||
708 | * This is stored in the private area of the &struct net_device. | ||
709 | */ | ||
710 | struct efx_nic { | ||
711 | /* The following fields should be written very rarely */ | ||
712 | |||
713 | char name[IFNAMSIZ]; | ||
714 | struct pci_dev *pci_dev; | ||
715 | const struct efx_nic_type *type; | ||
716 | int legacy_irq; | ||
717 | bool legacy_irq_enabled; | ||
718 | struct workqueue_struct *workqueue; | ||
719 | char workqueue_name[16]; | ||
720 | struct work_struct reset_work; | ||
721 | resource_size_t membase_phys; | ||
722 | void __iomem *membase; | ||
723 | |||
724 | enum efx_int_mode interrupt_mode; | ||
725 | bool irq_rx_adaptive; | ||
726 | unsigned int irq_rx_moderation; | ||
727 | u32 msg_enable; | ||
728 | |||
729 | enum nic_state state; | ||
730 | unsigned long reset_pending; | ||
731 | |||
732 | struct efx_channel *channel[EFX_MAX_CHANNELS]; | ||
733 | char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6]; | ||
734 | |||
735 | unsigned rxq_entries; | ||
736 | unsigned txq_entries; | ||
737 | unsigned next_buffer_table; | ||
738 | unsigned n_channels; | ||
739 | unsigned n_rx_channels; | ||
740 | unsigned tx_channel_offset; | ||
741 | unsigned n_tx_channels; | ||
742 | unsigned int rx_buffer_len; | ||
743 | unsigned int rx_buffer_order; | ||
744 | u8 rx_hash_key[40]; | ||
745 | u32 rx_indir_table[128]; | ||
746 | |||
747 | unsigned int_error_count; | ||
748 | unsigned long int_error_expire; | ||
749 | |||
750 | struct efx_buffer irq_status; | ||
751 | unsigned irq_zero_count; | ||
752 | unsigned fatal_irq_level; | ||
753 | |||
754 | #ifdef CONFIG_SFC_MTD | ||
755 | struct list_head mtd_list; | ||
756 | #endif | ||
757 | |||
758 | void *nic_data; | ||
759 | |||
760 | struct mutex mac_lock; | ||
761 | struct work_struct mac_work; | ||
762 | bool port_enabled; | ||
763 | |||
764 | bool port_initialized; | ||
765 | struct net_device *net_dev; | ||
766 | |||
767 | struct efx_buffer stats_buffer; | ||
768 | |||
769 | const struct efx_mac_operations *mac_op; | ||
770 | |||
771 | unsigned int phy_type; | ||
772 | const struct efx_phy_operations *phy_op; | ||
773 | void *phy_data; | ||
774 | struct mdio_if_info mdio; | ||
775 | unsigned int mdio_bus; | ||
776 | enum efx_phy_mode phy_mode; | ||
777 | |||
778 | u32 link_advertising; | ||
779 | struct efx_link_state link_state; | ||
780 | unsigned int n_link_state_changes; | ||
781 | |||
782 | bool promiscuous; | ||
783 | union efx_multicast_hash multicast_hash; | ||
784 | u8 wanted_fc; | ||
785 | |||
786 | atomic_t rx_reset; | ||
787 | enum efx_loopback_mode loopback_mode; | ||
788 | u64 loopback_modes; | ||
789 | |||
790 | void *loopback_selftest; | ||
791 | |||
792 | struct efx_filter_state *filter_state; | ||
793 | |||
794 | /* The following fields may be written more often */ | ||
795 | |||
796 | struct delayed_work monitor_work ____cacheline_aligned_in_smp; | ||
797 | spinlock_t biu_lock; | ||
798 | volatile signed int last_irq_cpu; | ||
799 | unsigned n_rx_nodesc_drop_cnt; | ||
800 | struct efx_mac_stats mac_stats; | ||
801 | spinlock_t stats_lock; | ||
802 | }; | ||
803 | |||
804 | static inline int efx_dev_registered(struct efx_nic *efx) | ||
805 | { | ||
806 | return efx->net_dev->reg_state == NETREG_REGISTERED; | ||
807 | } | ||
808 | |||
809 | /* Net device name, for inclusion in log messages if it has been registered. | ||
810 | * Use efx->name not efx->net_dev->name so that races with (un)registration | ||
811 | * are harmless. | ||
812 | */ | ||
813 | static inline const char *efx_dev_name(struct efx_nic *efx) | ||
814 | { | ||
815 | return efx_dev_registered(efx) ? efx->name : ""; | ||
816 | } | ||
817 | |||
818 | static inline unsigned int efx_port_num(struct efx_nic *efx) | ||
819 | { | ||
820 | return efx->net_dev->dev_id; | ||
821 | } | ||
822 | |||
823 | /** | ||
824 | * struct efx_nic_type - Efx device type definition | ||
825 | * @probe: Probe the controller | ||
826 | * @remove: Free resources allocated by probe() | ||
827 | * @init: Initialise the controller | ||
828 | * @fini: Shut down the controller | ||
829 | * @monitor: Periodic function for polling link state and hardware monitor | ||
830 | * @map_reset_reason: Map ethtool reset reason to a reset method | ||
831 | * @map_reset_flags: Map ethtool reset flags to a reset method, if possible | ||
832 | * @reset: Reset the controller hardware and possibly the PHY. This will | ||
833 | * be called while the controller is uninitialised. | ||
834 | * @probe_port: Probe the MAC and PHY | ||
835 | * @remove_port: Free resources allocated by probe_port() | ||
836 | * @handle_global_event: Handle a "global" event (may be %NULL) | ||
837 | * @prepare_flush: Prepare the hardware for flushing the DMA queues | ||
838 | * @update_stats: Update statistics not provided by event handling | ||
839 | * @start_stats: Start the regular fetching of statistics | ||
840 | * @stop_stats: Stop the regular fetching of statistics | ||
841 | * @set_id_led: Set state of identifying LED or revert to automatic function | ||
842 | * @push_irq_moderation: Apply interrupt moderation value | ||
843 | * @push_multicast_hash: Apply multicast hash table | ||
844 | * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY | ||
845 | * @get_wol: Get WoL configuration from driver state | ||
846 | * @set_wol: Push WoL configuration to the NIC | ||
847 | * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) | ||
848 | * @test_registers: Test read/write functionality of control registers | ||
849 | * @test_nvram: Test validity of NVRAM contents | ||
850 | * @default_mac_ops: efx_mac_operations to set at startup | ||
851 | * @revision: Hardware architecture revision | ||
852 | * @mem_map_size: Memory BAR mapped size | ||
853 | * @txd_ptr_tbl_base: TX descriptor ring base address | ||
854 | * @rxd_ptr_tbl_base: RX descriptor ring base address | ||
855 | * @buf_tbl_base: Buffer table base address | ||
856 | * @evq_ptr_tbl_base: Event queue pointer table base address | ||
857 | * @evq_rptr_tbl_base: Event queue read-pointer table base address | ||
858 | * @max_dma_mask: Maximum possible DMA mask | ||
859 | * @rx_buffer_hash_size: Size of hash at start of RX buffer | ||
860 | * @rx_buffer_padding: Size of padding at end of RX buffer | ||
861 | * @max_interrupt_mode: Highest capability interrupt mode supported | ||
862 | * from &enum efx_init_mode. | ||
863 | * @phys_addr_channels: Number of channels with physically addressed | ||
864 | * descriptors | ||
865 | * @tx_dc_base: Base address in SRAM of TX queue descriptor caches | ||
866 | * @rx_dc_base: Base address in SRAM of RX queue descriptor caches | ||
867 | * @offload_features: net_device feature flags for protocol offload | ||
868 | * features implemented in hardware | ||
869 | */ | ||
870 | struct efx_nic_type { | ||
871 | int (*probe)(struct efx_nic *efx); | ||
872 | void (*remove)(struct efx_nic *efx); | ||
873 | int (*init)(struct efx_nic *efx); | ||
874 | void (*fini)(struct efx_nic *efx); | ||
875 | void (*monitor)(struct efx_nic *efx); | ||
876 | enum reset_type (*map_reset_reason)(enum reset_type reason); | ||
877 | int (*map_reset_flags)(u32 *flags); | ||
878 | int (*reset)(struct efx_nic *efx, enum reset_type method); | ||
879 | int (*probe_port)(struct efx_nic *efx); | ||
880 | void (*remove_port)(struct efx_nic *efx); | ||
881 | bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *); | ||
882 | void (*prepare_flush)(struct efx_nic *efx); | ||
883 | void (*update_stats)(struct efx_nic *efx); | ||
884 | void (*start_stats)(struct efx_nic *efx); | ||
885 | void (*stop_stats)(struct efx_nic *efx); | ||
886 | void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode); | ||
887 | void (*push_irq_moderation)(struct efx_channel *channel); | ||
888 | void (*push_multicast_hash)(struct efx_nic *efx); | ||
889 | int (*reconfigure_port)(struct efx_nic *efx); | ||
890 | void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); | ||
891 | int (*set_wol)(struct efx_nic *efx, u32 type); | ||
892 | void (*resume_wol)(struct efx_nic *efx); | ||
893 | int (*test_registers)(struct efx_nic *efx); | ||
894 | int (*test_nvram)(struct efx_nic *efx); | ||
895 | const struct efx_mac_operations *default_mac_ops; | ||
896 | |||
897 | int revision; | ||
898 | unsigned int mem_map_size; | ||
899 | unsigned int txd_ptr_tbl_base; | ||
900 | unsigned int rxd_ptr_tbl_base; | ||
901 | unsigned int buf_tbl_base; | ||
902 | unsigned int evq_ptr_tbl_base; | ||
903 | unsigned int evq_rptr_tbl_base; | ||
904 | u64 max_dma_mask; | ||
905 | unsigned int rx_buffer_hash_size; | ||
906 | unsigned int rx_buffer_padding; | ||
907 | unsigned int max_interrupt_mode; | ||
908 | unsigned int phys_addr_channels; | ||
909 | unsigned int tx_dc_base; | ||
910 | unsigned int rx_dc_base; | ||
911 | u32 offload_features; | ||
912 | }; | ||
913 | |||
914 | /************************************************************************** | ||
915 | * | ||
916 | * Prototypes and inline functions | ||
917 | * | ||
918 | *************************************************************************/ | ||
919 | |||
920 | static inline struct efx_channel * | ||
921 | efx_get_channel(struct efx_nic *efx, unsigned index) | ||
922 | { | ||
923 | EFX_BUG_ON_PARANOID(index >= efx->n_channels); | ||
924 | return efx->channel[index]; | ||
925 | } | ||
926 | |||
927 | /* Iterate over all used channels */ | ||
928 | #define efx_for_each_channel(_channel, _efx) \ | ||
929 | for (_channel = (_efx)->channel[0]; \ | ||
930 | _channel; \ | ||
931 | _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ | ||
932 | (_efx)->channel[_channel->channel + 1] : NULL) | ||
933 | |||
934 | static inline struct efx_tx_queue * | ||
935 | efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) | ||
936 | { | ||
937 | EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels || | ||
938 | type >= EFX_TXQ_TYPES); | ||
939 | return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; | ||
940 | } | ||
941 | |||
942 | static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) | ||
943 | { | ||
944 | return channel->channel - channel->efx->tx_channel_offset < | ||
945 | channel->efx->n_tx_channels; | ||
946 | } | ||
947 | |||
948 | static inline struct efx_tx_queue * | ||
949 | efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) | ||
950 | { | ||
951 | EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) || | ||
952 | type >= EFX_TXQ_TYPES); | ||
953 | return &channel->tx_queue[type]; | ||
954 | } | ||
955 | |||
956 | static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue) | ||
957 | { | ||
958 | return !(tx_queue->efx->net_dev->num_tc < 2 && | ||
959 | tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI); | ||
960 | } | ||
961 | |||
962 | /* Iterate over all TX queues belonging to a channel */ | ||
963 | #define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ | ||
964 | if (!efx_channel_has_tx_queues(_channel)) \ | ||
965 | ; \ | ||
966 | else \ | ||
967 | for (_tx_queue = (_channel)->tx_queue; \ | ||
968 | _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \ | ||
969 | efx_tx_queue_used(_tx_queue); \ | ||
970 | _tx_queue++) | ||
971 | |||
972 | /* Iterate over all possible TX queues belonging to a channel */ | ||
973 | #define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \ | ||
974 | for (_tx_queue = (_channel)->tx_queue; \ | ||
975 | _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ | ||
976 | _tx_queue++) | ||
977 | |||
978 | static inline struct efx_rx_queue * | ||
979 | efx_get_rx_queue(struct efx_nic *efx, unsigned index) | ||
980 | { | ||
981 | EFX_BUG_ON_PARANOID(index >= efx->n_rx_channels); | ||
982 | return &efx->channel[index]->rx_queue; | ||
983 | } | ||
984 | |||
985 | static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) | ||
986 | { | ||
987 | return channel->channel < channel->efx->n_rx_channels; | ||
988 | } | ||
989 | |||
990 | static inline struct efx_rx_queue * | ||
991 | efx_channel_get_rx_queue(struct efx_channel *channel) | ||
992 | { | ||
993 | EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel)); | ||
994 | return &channel->rx_queue; | ||
995 | } | ||
996 | |||
997 | /* Iterate over all RX queues belonging to a channel */ | ||
998 | #define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ | ||
999 | if (!efx_channel_has_rx_queue(_channel)) \ | ||
1000 | ; \ | ||
1001 | else \ | ||
1002 | for (_rx_queue = &(_channel)->rx_queue; \ | ||
1003 | _rx_queue; \ | ||
1004 | _rx_queue = NULL) | ||
1005 | |||
1006 | static inline struct efx_channel * | ||
1007 | efx_rx_queue_channel(struct efx_rx_queue *rx_queue) | ||
1008 | { | ||
1009 | return container_of(rx_queue, struct efx_channel, rx_queue); | ||
1010 | } | ||
1011 | |||
1012 | static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue) | ||
1013 | { | ||
1014 | return efx_rx_queue_channel(rx_queue)->channel; | ||
1015 | } | ||
1016 | |||
1017 | /* Returns a pointer to the specified receive buffer in the RX | ||
1018 | * descriptor queue. | ||
1019 | */ | ||
1020 | static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, | ||
1021 | unsigned int index) | ||
1022 | { | ||
1023 | return &rx_queue->buffer[index]; | ||
1024 | } | ||
1025 | |||
1026 | /* Set bit in a little-endian bitfield */ | ||
1027 | static inline void set_bit_le(unsigned nr, unsigned char *addr) | ||
1028 | { | ||
1029 | addr[nr / 8] |= (1 << (nr % 8)); | ||
1030 | } | ||
1031 | |||
1032 | /* Clear bit in a little-endian bitfield */ | ||
1033 | static inline void clear_bit_le(unsigned nr, unsigned char *addr) | ||
1034 | { | ||
1035 | addr[nr / 8] &= ~(1 << (nr % 8)); | ||
1036 | } | ||
1037 | |||
1038 | |||
1039 | /** | ||
1040 | * EFX_MAX_FRAME_LEN - calculate maximum frame length | ||
1041 | * | ||
1042 | * This calculates the maximum frame length that will be used for a | ||
1043 | * given MTU. The frame length will be equal to the MTU plus a | ||
1044 | * constant amount of header space and padding. This is the quantity | ||
1045 | * that the net driver will program into the MAC as the maximum frame | ||
1046 | * length. | ||
1047 | * | ||
1048 | * The 10G MAC requires 8-byte alignment on the frame | ||
1049 | * length, so we round up to the nearest 8. | ||
1050 | * | ||
1051 | * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an | ||
1052 | * XGMII cycle). If the frame length reaches the maximum value in the | ||
1053 | * same cycle, the XMAC can miss the IPG altogether. We work around | ||
1054 | * this by adding a further 16 bytes. | ||
1055 | */ | ||
1056 | #define EFX_MAX_FRAME_LEN(mtu) \ | ||
1057 | ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16) | ||
1058 | |||
1059 | |||
1060 | #endif /* EFX_NET_DRIVER_H */ | ||