diff options
Diffstat (limited to 'drivers/net/sfc/net_driver.h')
-rw-r--r-- | drivers/net/sfc/net_driver.h | 299 |
1 files changed, 185 insertions, 114 deletions
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 64e7caa4bbb5..e8d5f03a89fe 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2005-2009 Solarflare Communications Inc. | 4 | * Copyright 2005-2011 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/device.h> | 29 | #include <linux/device.h> |
30 | #include <linux/highmem.h> | 30 | #include <linux/highmem.h> |
31 | #include <linux/workqueue.h> | 31 | #include <linux/workqueue.h> |
32 | #include <linux/vmalloc.h> | ||
32 | #include <linux/i2c.h> | 33 | #include <linux/i2c.h> |
33 | 34 | ||
34 | #include "enum.h" | 35 | #include "enum.h" |
@@ -40,7 +41,7 @@ | |||
40 | * | 41 | * |
41 | **************************************************************************/ | 42 | **************************************************************************/ |
42 | 43 | ||
43 | #define EFX_DRIVER_VERSION "3.0" | 44 | #define EFX_DRIVER_VERSION "3.1" |
44 | 45 | ||
45 | #ifdef EFX_ENABLE_DEBUG | 46 | #ifdef EFX_ENABLE_DEBUG |
46 | #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) | 47 | #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) |
@@ -62,10 +63,12 @@ | |||
62 | /* Checksum generation is a per-queue option in hardware, so each | 63 | /* Checksum generation is a per-queue option in hardware, so each |
63 | * queue visible to the networking core is backed by two hardware TX | 64 | * queue visible to the networking core is backed by two hardware TX |
64 | * queues. */ | 65 | * queues. */ |
65 | #define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS | 66 | #define EFX_MAX_TX_TC 2 |
66 | #define EFX_TXQ_TYPE_OFFLOAD 1 | 67 | #define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS) |
67 | #define EFX_TXQ_TYPES 2 | 68 | #define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */ |
68 | #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES) | 69 | #define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */ |
70 | #define EFX_TXQ_TYPES 4 | ||
71 | #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) | ||
69 | 72 | ||
70 | /** | 73 | /** |
71 | * struct efx_special_buffer - An Efx special buffer | 74 | * struct efx_special_buffer - An Efx special buffer |
@@ -135,13 +138,20 @@ struct efx_tx_buffer { | |||
135 | * @efx: The associated Efx NIC | 138 | * @efx: The associated Efx NIC |
136 | * @queue: DMA queue number | 139 | * @queue: DMA queue number |
137 | * @channel: The associated channel | 140 | * @channel: The associated channel |
141 | * @core_txq: The networking core TX queue structure | ||
138 | * @buffer: The software buffer ring | 142 | * @buffer: The software buffer ring |
139 | * @txd: The hardware descriptor ring | 143 | * @txd: The hardware descriptor ring |
144 | * @ptr_mask: The size of the ring minus 1. | ||
145 | * @initialised: Has hardware queue been initialised? | ||
140 | * @flushed: Used when handling queue flushing | 146 | * @flushed: Used when handling queue flushing |
141 | * @read_count: Current read pointer. | 147 | * @read_count: Current read pointer. |
142 | * This is the number of buffers that have been removed from both rings. | 148 | * This is the number of buffers that have been removed from both rings. |
143 | * @stopped: Stopped count. | 149 | * @old_write_count: The value of @write_count when last checked. |
144 | * Set if this TX queue is currently stopping its port. | 150 | * This is here for performance reasons. The xmit path will |
151 | * only get the up-to-date value of @write_count if this | ||
152 | * variable indicates that the queue is empty. This is to | ||
153 | * avoid cache-line ping-pong between the xmit path and the | ||
154 | * completion path. | ||
145 | * @insert_count: Current insert pointer | 155 | * @insert_count: Current insert pointer |
146 | * This is the number of buffers that have been added to the | 156 | * This is the number of buffers that have been added to the |
147 | * software ring. | 157 | * software ring. |
@@ -161,20 +171,26 @@ struct efx_tx_buffer { | |||
161 | * @tso_long_headers: Number of packets with headers too long for standard | 171 | * @tso_long_headers: Number of packets with headers too long for standard |
162 | * blocks | 172 | * blocks |
163 | * @tso_packets: Number of packets via the TSO xmit path | 173 | * @tso_packets: Number of packets via the TSO xmit path |
174 | * @pushes: Number of times the TX push feature has been used | ||
175 | * @empty_read_count: If the completion path has seen the queue as empty | ||
176 | * and the transmission path has not yet checked this, the value of | ||
177 | * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0. | ||
164 | */ | 178 | */ |
165 | struct efx_tx_queue { | 179 | struct efx_tx_queue { |
166 | /* Members which don't change on the fast path */ | 180 | /* Members which don't change on the fast path */ |
167 | struct efx_nic *efx ____cacheline_aligned_in_smp; | 181 | struct efx_nic *efx ____cacheline_aligned_in_smp; |
168 | unsigned queue; | 182 | unsigned queue; |
169 | struct efx_channel *channel; | 183 | struct efx_channel *channel; |
170 | struct efx_nic *nic; | 184 | struct netdev_queue *core_txq; |
171 | struct efx_tx_buffer *buffer; | 185 | struct efx_tx_buffer *buffer; |
172 | struct efx_special_buffer txd; | 186 | struct efx_special_buffer txd; |
187 | unsigned int ptr_mask; | ||
188 | bool initialised; | ||
173 | enum efx_flush_state flushed; | 189 | enum efx_flush_state flushed; |
174 | 190 | ||
175 | /* Members used mainly on the completion path */ | 191 | /* Members used mainly on the completion path */ |
176 | unsigned int read_count ____cacheline_aligned_in_smp; | 192 | unsigned int read_count ____cacheline_aligned_in_smp; |
177 | int stopped; | 193 | unsigned int old_write_count; |
178 | 194 | ||
179 | /* Members used only on the xmit path */ | 195 | /* Members used only on the xmit path */ |
180 | unsigned int insert_count ____cacheline_aligned_in_smp; | 196 | unsigned int insert_count ____cacheline_aligned_in_smp; |
@@ -184,6 +200,11 @@ struct efx_tx_queue { | |||
184 | unsigned int tso_bursts; | 200 | unsigned int tso_bursts; |
185 | unsigned int tso_long_headers; | 201 | unsigned int tso_long_headers; |
186 | unsigned int tso_packets; | 202 | unsigned int tso_packets; |
203 | unsigned int pushes; | ||
204 | |||
205 | /* Members shared between paths and sometimes updated */ | ||
206 | unsigned int empty_read_count ____cacheline_aligned_in_smp; | ||
207 | #define EFX_EMPTY_COUNT_VALID 0x80000000 | ||
187 | }; | 208 | }; |
188 | 209 | ||
189 | /** | 210 | /** |
@@ -193,15 +214,17 @@ struct efx_tx_queue { | |||
193 | * If both this and page are %NULL, the buffer slot is currently free. | 214 | * If both this and page are %NULL, the buffer slot is currently free. |
194 | * @page: The associated page buffer, if any. | 215 | * @page: The associated page buffer, if any. |
195 | * If both this and skb are %NULL, the buffer slot is currently free. | 216 | * If both this and skb are %NULL, the buffer slot is currently free. |
196 | * @data: Pointer to ethernet header | ||
197 | * @len: Buffer length, in bytes. | 217 | * @len: Buffer length, in bytes. |
218 | * @is_page: Indicates if @page is valid. If false, @skb is valid. | ||
198 | */ | 219 | */ |
199 | struct efx_rx_buffer { | 220 | struct efx_rx_buffer { |
200 | dma_addr_t dma_addr; | 221 | dma_addr_t dma_addr; |
201 | struct sk_buff *skb; | 222 | union { |
202 | struct page *page; | 223 | struct sk_buff *skb; |
203 | char *data; | 224 | struct page *page; |
225 | } u; | ||
204 | unsigned int len; | 226 | unsigned int len; |
227 | bool is_page; | ||
205 | }; | 228 | }; |
206 | 229 | ||
207 | /** | 230 | /** |
@@ -225,10 +248,9 @@ struct efx_rx_page_state { | |||
225 | /** | 248 | /** |
226 | * struct efx_rx_queue - An Efx RX queue | 249 | * struct efx_rx_queue - An Efx RX queue |
227 | * @efx: The associated Efx NIC | 250 | * @efx: The associated Efx NIC |
228 | * @queue: DMA queue number | ||
229 | * @channel: The associated channel | ||
230 | * @buffer: The software buffer ring | 251 | * @buffer: The software buffer ring |
231 | * @rxd: The hardware descriptor ring | 252 | * @rxd: The hardware descriptor ring |
253 | * @ptr_mask: The size of the ring minus 1. | ||
232 | * @added_count: Number of buffers added to the receive queue. | 254 | * @added_count: Number of buffers added to the receive queue. |
233 | * @notified_count: Number of buffers given to NIC (<= @added_count). | 255 | * @notified_count: Number of buffers given to NIC (<= @added_count). |
234 | * @removed_count: Number of buffers removed from the receive queue. | 256 | * @removed_count: Number of buffers removed from the receive queue. |
@@ -240,9 +262,6 @@ struct efx_rx_page_state { | |||
240 | * @min_fill: RX descriptor minimum non-zero fill level. | 262 | * @min_fill: RX descriptor minimum non-zero fill level. |
241 | * This records the minimum fill level observed when a ring | 263 | * This records the minimum fill level observed when a ring |
242 | * refill was triggered. | 264 | * refill was triggered. |
243 | * @min_overfill: RX descriptor minimum overflow fill level. | ||
244 | * This records the minimum fill level at which RX queue | ||
245 | * overflow was observed. It should never be set. | ||
246 | * @alloc_page_count: RX allocation strategy counter. | 265 | * @alloc_page_count: RX allocation strategy counter. |
247 | * @alloc_skb_count: RX allocation strategy counter. | 266 | * @alloc_skb_count: RX allocation strategy counter. |
248 | * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). | 267 | * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). |
@@ -250,10 +269,9 @@ struct efx_rx_page_state { | |||
250 | */ | 269 | */ |
251 | struct efx_rx_queue { | 270 | struct efx_rx_queue { |
252 | struct efx_nic *efx; | 271 | struct efx_nic *efx; |
253 | int queue; | ||
254 | struct efx_channel *channel; | ||
255 | struct efx_rx_buffer *buffer; | 272 | struct efx_rx_buffer *buffer; |
256 | struct efx_special_buffer rxd; | 273 | struct efx_special_buffer rxd; |
274 | unsigned int ptr_mask; | ||
257 | 275 | ||
258 | int added_count; | 276 | int added_count; |
259 | int notified_count; | 277 | int notified_count; |
@@ -302,18 +320,16 @@ enum efx_rx_alloc_method { | |||
302 | * | 320 | * |
303 | * @efx: Associated Efx NIC | 321 | * @efx: Associated Efx NIC |
304 | * @channel: Channel instance number | 322 | * @channel: Channel instance number |
305 | * @name: Name for channel and IRQ | ||
306 | * @enabled: Channel enabled indicator | 323 | * @enabled: Channel enabled indicator |
307 | * @irq: IRQ number (MSI and MSI-X only) | 324 | * @irq: IRQ number (MSI and MSI-X only) |
308 | * @irq_moderation: IRQ moderation value (in hardware ticks) | 325 | * @irq_moderation: IRQ moderation value (in hardware ticks) |
309 | * @napi_dev: Net device used with NAPI | 326 | * @napi_dev: Net device used with NAPI |
310 | * @napi_str: NAPI control structure | 327 | * @napi_str: NAPI control structure |
311 | * @reset_work: Scheduled reset work thread | ||
312 | * @work_pending: Is work pending via NAPI? | 328 | * @work_pending: Is work pending via NAPI? |
313 | * @eventq: Event queue buffer | 329 | * @eventq: Event queue buffer |
330 | * @eventq_mask: Event queue pointer mask | ||
314 | * @eventq_read_ptr: Event queue read pointer | 331 | * @eventq_read_ptr: Event queue read pointer |
315 | * @last_eventq_read_ptr: Last event queue read pointer value. | 332 | * @last_eventq_read_ptr: Last event queue read pointer value. |
316 | * @magic_count: Event queue test event count | ||
317 | * @irq_count: Number of IRQs since last adaptive moderation decision | 333 | * @irq_count: Number of IRQs since last adaptive moderation decision |
318 | * @irq_mod_score: IRQ moderation score | 334 | * @irq_mod_score: IRQ moderation score |
319 | * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors | 335 | * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors |
@@ -327,14 +343,12 @@ enum efx_rx_alloc_method { | |||
327 | * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors | 343 | * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors |
328 | * @n_rx_overlength: Count of RX_OVERLENGTH errors | 344 | * @n_rx_overlength: Count of RX_OVERLENGTH errors |
329 | * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun | 345 | * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun |
330 | * @tx_queue: Pointer to first TX queue, or %NULL if not used for TX | 346 | * @rx_queue: RX queue for this channel |
331 | * @tx_stop_count: Core TX queue stop count | 347 | * @tx_queue: TX queues for this channel |
332 | * @tx_stop_lock: Core TX queue stop lock | ||
333 | */ | 348 | */ |
334 | struct efx_channel { | 349 | struct efx_channel { |
335 | struct efx_nic *efx; | 350 | struct efx_nic *efx; |
336 | int channel; | 351 | int channel; |
337 | char name[IFNAMSIZ + 6]; | ||
338 | bool enabled; | 352 | bool enabled; |
339 | int irq; | 353 | int irq; |
340 | unsigned int irq_moderation; | 354 | unsigned int irq_moderation; |
@@ -342,12 +356,15 @@ struct efx_channel { | |||
342 | struct napi_struct napi_str; | 356 | struct napi_struct napi_str; |
343 | bool work_pending; | 357 | bool work_pending; |
344 | struct efx_special_buffer eventq; | 358 | struct efx_special_buffer eventq; |
359 | unsigned int eventq_mask; | ||
345 | unsigned int eventq_read_ptr; | 360 | unsigned int eventq_read_ptr; |
346 | unsigned int last_eventq_read_ptr; | 361 | unsigned int last_eventq_read_ptr; |
347 | unsigned int magic_count; | ||
348 | 362 | ||
349 | unsigned int irq_count; | 363 | unsigned int irq_count; |
350 | unsigned int irq_mod_score; | 364 | unsigned int irq_mod_score; |
365 | #ifdef CONFIG_RFS_ACCEL | ||
366 | unsigned int rfs_filters_added; | ||
367 | #endif | ||
351 | 368 | ||
352 | int rx_alloc_level; | 369 | int rx_alloc_level; |
353 | int rx_alloc_push_pages; | 370 | int rx_alloc_push_pages; |
@@ -366,9 +383,8 @@ struct efx_channel { | |||
366 | struct efx_rx_buffer *rx_pkt; | 383 | struct efx_rx_buffer *rx_pkt; |
367 | bool rx_pkt_csummed; | 384 | bool rx_pkt_csummed; |
368 | 385 | ||
369 | struct efx_tx_queue *tx_queue; | 386 | struct efx_rx_queue rx_queue; |
370 | atomic_t tx_stop_count; | 387 | struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; |
371 | spinlock_t tx_stop_lock; | ||
372 | }; | 388 | }; |
373 | 389 | ||
374 | enum efx_led_mode { | 390 | enum efx_led_mode { |
@@ -385,11 +401,6 @@ extern const unsigned int efx_loopback_mode_max; | |||
385 | #define LOOPBACK_MODE(efx) \ | 401 | #define LOOPBACK_MODE(efx) \ |
386 | STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode) | 402 | STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode) |
387 | 403 | ||
388 | extern const char *efx_interrupt_mode_names[]; | ||
389 | extern const unsigned int efx_interrupt_mode_max; | ||
390 | #define INT_MODE(efx) \ | ||
391 | STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode) | ||
392 | |||
393 | extern const char *efx_reset_type_names[]; | 404 | extern const char *efx_reset_type_names[]; |
394 | extern const unsigned int efx_reset_type_max; | 405 | extern const unsigned int efx_reset_type_max; |
395 | #define RESET_TYPE(type) \ | 406 | #define RESET_TYPE(type) \ |
@@ -404,8 +415,6 @@ enum efx_int_mode { | |||
404 | }; | 415 | }; |
405 | #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) | 416 | #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) |
406 | 417 | ||
407 | #define EFX_IS10G(efx) ((efx)->link_state.speed == 10000) | ||
408 | |||
409 | enum nic_state { | 418 | enum nic_state { |
410 | STATE_INIT = 0, | 419 | STATE_INIT = 0, |
411 | STATE_RUNNING = 1, | 420 | STATE_RUNNING = 1, |
@@ -440,11 +449,9 @@ enum nic_state { | |||
440 | struct efx_nic; | 449 | struct efx_nic; |
441 | 450 | ||
442 | /* Pseudo bit-mask flow control field */ | 451 | /* Pseudo bit-mask flow control field */ |
443 | enum efx_fc_type { | 452 | #define EFX_FC_RX FLOW_CTRL_RX |
444 | EFX_FC_RX = FLOW_CTRL_RX, | 453 | #define EFX_FC_TX FLOW_CTRL_TX |
445 | EFX_FC_TX = FLOW_CTRL_TX, | 454 | #define EFX_FC_AUTO 4 |
446 | EFX_FC_AUTO = 4, | ||
447 | }; | ||
448 | 455 | ||
449 | /** | 456 | /** |
450 | * struct efx_link_state - Current state of the link | 457 | * struct efx_link_state - Current state of the link |
@@ -456,7 +463,7 @@ enum efx_fc_type { | |||
456 | struct efx_link_state { | 463 | struct efx_link_state { |
457 | bool up; | 464 | bool up; |
458 | bool fd; | 465 | bool fd; |
459 | enum efx_fc_type fc; | 466 | u8 fc; |
460 | unsigned int speed; | 467 | unsigned int speed; |
461 | }; | 468 | }; |
462 | 469 | ||
@@ -618,20 +625,21 @@ union efx_multicast_hash { | |||
618 | efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; | 625 | efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; |
619 | }; | 626 | }; |
620 | 627 | ||
628 | struct efx_filter_state; | ||
629 | |||
621 | /** | 630 | /** |
622 | * struct efx_nic - an Efx NIC | 631 | * struct efx_nic - an Efx NIC |
623 | * @name: Device name (net device name or bus id before net device registered) | 632 | * @name: Device name (net device name or bus id before net device registered) |
624 | * @pci_dev: The PCI device | 633 | * @pci_dev: The PCI device |
625 | * @type: Controller type attributes | 634 | * @type: Controller type attributes |
626 | * @legacy_irq: IRQ number | 635 | * @legacy_irq: IRQ number |
636 | * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)? | ||
627 | * @workqueue: Workqueue for port reconfigures and the HW monitor. | 637 | * @workqueue: Workqueue for port reconfigures and the HW monitor. |
628 | * Work items do not hold and must not acquire RTNL. | 638 | * Work items do not hold and must not acquire RTNL. |
629 | * @workqueue_name: Name of workqueue | 639 | * @workqueue_name: Name of workqueue |
630 | * @reset_work: Scheduled reset workitem | 640 | * @reset_work: Scheduled reset workitem |
631 | * @monitor_work: Hardware monitor workitem | ||
632 | * @membase_phys: Memory BAR value as physical address | 641 | * @membase_phys: Memory BAR value as physical address |
633 | * @membase: Memory BAR value | 642 | * @membase: Memory BAR value |
634 | * @biu_lock: BIU (bus interface unit) lock | ||
635 | * @interrupt_mode: Interrupt mode | 643 | * @interrupt_mode: Interrupt mode |
636 | * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues | 644 | * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues |
637 | * @irq_rx_moderation: IRQ moderation time for RX event queues | 645 | * @irq_rx_moderation: IRQ moderation time for RX event queues |
@@ -641,56 +649,41 @@ union efx_multicast_hash { | |||
641 | * @tx_queue: TX DMA queues | 649 | * @tx_queue: TX DMA queues |
642 | * @rx_queue: RX DMA queues | 650 | * @rx_queue: RX DMA queues |
643 | * @channel: Channels | 651 | * @channel: Channels |
652 | * @channel_name: Names for channels and their IRQs | ||
653 | * @rxq_entries: Size of receive queues requested by user. | ||
654 | * @txq_entries: Size of transmit queues requested by user. | ||
644 | * @next_buffer_table: First available buffer table id | 655 | * @next_buffer_table: First available buffer table id |
645 | * @n_channels: Number of channels in use | 656 | * @n_channels: Number of channels in use |
646 | * @n_rx_channels: Number of channels used for RX (= number of RX queues) | 657 | * @n_rx_channels: Number of channels used for RX (= number of RX queues) |
647 | * @n_tx_channels: Number of channels used for TX | 658 | * @n_tx_channels: Number of channels used for TX |
648 | * @rx_buffer_len: RX buffer length | 659 | * @rx_buffer_len: RX buffer length |
649 | * @rx_buffer_order: Order (log2) of number of pages for each RX buffer | 660 | * @rx_buffer_order: Order (log2) of number of pages for each RX buffer |
661 | * @rx_hash_key: Toeplitz hash key for RSS | ||
650 | * @rx_indir_table: Indirection table for RSS | 662 | * @rx_indir_table: Indirection table for RSS |
651 | * @int_error_count: Number of internal errors seen recently | 663 | * @int_error_count: Number of internal errors seen recently |
652 | * @int_error_expire: Time at which error count will be expired | 664 | * @int_error_expire: Time at which error count will be expired |
653 | * @irq_status: Interrupt status buffer | 665 | * @irq_status: Interrupt status buffer |
654 | * @last_irq_cpu: Last CPU to handle interrupt. | ||
655 | * This register is written with the SMP processor ID whenever an | ||
656 | * interrupt is handled. It is used by efx_nic_test_interrupt() | ||
657 | * to verify that an interrupt has occurred. | ||
658 | * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 | 666 | * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 |
659 | * @fatal_irq_level: IRQ level (bit number) used for serious errors | 667 | * @fatal_irq_level: IRQ level (bit number) used for serious errors |
660 | * @spi_flash: SPI flash device | ||
661 | * This field will be %NULL if no flash device is present (or for Siena). | ||
662 | * @spi_eeprom: SPI EEPROM device | ||
663 | * This field will be %NULL if no EEPROM device is present (or for Siena). | ||
664 | * @spi_lock: SPI bus lock | ||
665 | * @mtd_list: List of MTDs attached to the NIC | 668 | * @mtd_list: List of MTDs attached to the NIC |
666 | * @n_rx_nodesc_drop_cnt: RX no descriptor drop count | 669 | * @nic_data: Hardware dependent state |
667 | * @nic_data: Hardware dependant state | ||
668 | * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, | 670 | * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, |
669 | * @port_inhibited, efx_monitor() and efx_reconfigure_port() | 671 | * efx_monitor() and efx_reconfigure_port() |
670 | * @port_enabled: Port enabled indicator. | 672 | * @port_enabled: Port enabled indicator. |
671 | * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and | 673 | * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and |
672 | * efx_mac_work() with kernel interfaces. Safe to read under any | 674 | * efx_mac_work() with kernel interfaces. Safe to read under any |
673 | * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must | 675 | * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must |
674 | * be held to modify it. | 676 | * be held to modify it. |
675 | * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock | ||
676 | * @port_initialized: Port initialized? | 677 | * @port_initialized: Port initialized? |
677 | * @net_dev: Operating system network device. Consider holding the rtnl lock | 678 | * @net_dev: Operating system network device. Consider holding the rtnl lock |
678 | * @rx_checksum_enabled: RX checksumming enabled | ||
679 | * @mac_stats: MAC statistics. These include all statistics the MACs | ||
680 | * can provide. Generic code converts these into a standard | ||
681 | * &struct net_device_stats. | ||
682 | * @stats_buffer: DMA buffer for statistics | 679 | * @stats_buffer: DMA buffer for statistics |
683 | * @stats_lock: Statistics update lock. Serialises statistics fetches | ||
684 | * @mac_op: MAC interface | 680 | * @mac_op: MAC interface |
685 | * @mac_address: Permanent MAC address | ||
686 | * @phy_type: PHY type | 681 | * @phy_type: PHY type |
687 | * @mdio_lock: MDIO lock | ||
688 | * @phy_op: PHY interface | 682 | * @phy_op: PHY interface |
689 | * @phy_data: PHY private data (including PHY-specific stats) | 683 | * @phy_data: PHY private data (including PHY-specific stats) |
690 | * @mdio: PHY MDIO interface | 684 | * @mdio: PHY MDIO interface |
691 | * @mdio_bus: PHY MDIO bus ID (only used by Siena) | 685 | * @mdio_bus: PHY MDIO bus ID (only used by Siena) |
692 | * @phy_mode: PHY operating mode. Serialised by @mac_lock. | 686 | * @phy_mode: PHY operating mode. Serialised by @mac_lock. |
693 | * @xmac_poll_required: XMAC link state needs polling | ||
694 | * @link_advertising: Autonegotiation advertising flags | 687 | * @link_advertising: Autonegotiation advertising flags |
695 | * @link_state: Current state of the link | 688 | * @link_state: Current state of the link |
696 | * @n_link_state_changes: Number of times the link has changed state | 689 | * @n_link_state_changes: Number of times the link has changed state |
@@ -701,21 +694,34 @@ union efx_multicast_hash { | |||
701 | * @loopback_mode: Loopback status | 694 | * @loopback_mode: Loopback status |
702 | * @loopback_modes: Supported loopback mode bitmask | 695 | * @loopback_modes: Supported loopback mode bitmask |
703 | * @loopback_selftest: Offline self-test private state | 696 | * @loopback_selftest: Offline self-test private state |
697 | * @monitor_work: Hardware monitor workitem | ||
698 | * @biu_lock: BIU (bus interface unit) lock | ||
699 | * @last_irq_cpu: Last CPU to handle interrupt. | ||
700 | * This register is written with the SMP processor ID whenever an | ||
701 | * interrupt is handled. It is used by efx_nic_test_interrupt() | ||
702 | * to verify that an interrupt has occurred. | ||
703 | * @n_rx_nodesc_drop_cnt: RX no descriptor drop count | ||
704 | * @mac_stats: MAC statistics. These include all statistics the MACs | ||
705 | * can provide. Generic code converts these into a standard | ||
706 | * &struct net_device_stats. | ||
707 | * @stats_lock: Statistics update lock. Serialises statistics fetches | ||
704 | * | 708 | * |
705 | * This is stored in the private area of the &struct net_device. | 709 | * This is stored in the private area of the &struct net_device. |
706 | */ | 710 | */ |
707 | struct efx_nic { | 711 | struct efx_nic { |
712 | /* The following fields should be written very rarely */ | ||
713 | |||
708 | char name[IFNAMSIZ]; | 714 | char name[IFNAMSIZ]; |
709 | struct pci_dev *pci_dev; | 715 | struct pci_dev *pci_dev; |
710 | const struct efx_nic_type *type; | 716 | const struct efx_nic_type *type; |
711 | int legacy_irq; | 717 | int legacy_irq; |
718 | bool legacy_irq_enabled; | ||
712 | struct workqueue_struct *workqueue; | 719 | struct workqueue_struct *workqueue; |
713 | char workqueue_name[16]; | 720 | char workqueue_name[16]; |
714 | struct work_struct reset_work; | 721 | struct work_struct reset_work; |
715 | struct delayed_work monitor_work; | ||
716 | resource_size_t membase_phys; | 722 | resource_size_t membase_phys; |
717 | void __iomem *membase; | 723 | void __iomem *membase; |
718 | spinlock_t biu_lock; | 724 | |
719 | enum efx_int_mode interrupt_mode; | 725 | enum efx_int_mode interrupt_mode; |
720 | bool irq_rx_adaptive; | 726 | bool irq_rx_adaptive; |
721 | unsigned int irq_rx_moderation; | 727 | unsigned int irq_rx_moderation; |
@@ -724,13 +730,15 @@ struct efx_nic { | |||
724 | enum nic_state state; | 730 | enum nic_state state; |
725 | enum reset_type reset_pending; | 731 | enum reset_type reset_pending; |
726 | 732 | ||
727 | struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES]; | 733 | struct efx_channel *channel[EFX_MAX_CHANNELS]; |
728 | struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES]; | 734 | char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6]; |
729 | struct efx_channel channel[EFX_MAX_CHANNELS]; | ||
730 | 735 | ||
736 | unsigned rxq_entries; | ||
737 | unsigned txq_entries; | ||
731 | unsigned next_buffer_table; | 738 | unsigned next_buffer_table; |
732 | unsigned n_channels; | 739 | unsigned n_channels; |
733 | unsigned n_rx_channels; | 740 | unsigned n_rx_channels; |
741 | unsigned tx_channel_offset; | ||
734 | unsigned n_tx_channels; | 742 | unsigned n_tx_channels; |
735 | unsigned int rx_buffer_len; | 743 | unsigned int rx_buffer_len; |
736 | unsigned int rx_buffer_order; | 744 | unsigned int rx_buffer_order; |
@@ -741,59 +749,57 @@ struct efx_nic { | |||
741 | unsigned long int_error_expire; | 749 | unsigned long int_error_expire; |
742 | 750 | ||
743 | struct efx_buffer irq_status; | 751 | struct efx_buffer irq_status; |
744 | volatile signed int last_irq_cpu; | ||
745 | unsigned irq_zero_count; | 752 | unsigned irq_zero_count; |
746 | unsigned fatal_irq_level; | 753 | unsigned fatal_irq_level; |
747 | 754 | ||
748 | struct efx_spi_device *spi_flash; | ||
749 | struct efx_spi_device *spi_eeprom; | ||
750 | struct mutex spi_lock; | ||
751 | #ifdef CONFIG_SFC_MTD | 755 | #ifdef CONFIG_SFC_MTD |
752 | struct list_head mtd_list; | 756 | struct list_head mtd_list; |
753 | #endif | 757 | #endif |
754 | 758 | ||
755 | unsigned n_rx_nodesc_drop_cnt; | ||
756 | |||
757 | void *nic_data; | 759 | void *nic_data; |
758 | 760 | ||
759 | struct mutex mac_lock; | 761 | struct mutex mac_lock; |
760 | struct work_struct mac_work; | 762 | struct work_struct mac_work; |
761 | bool port_enabled; | 763 | bool port_enabled; |
762 | bool port_inhibited; | ||
763 | 764 | ||
764 | bool port_initialized; | 765 | bool port_initialized; |
765 | struct net_device *net_dev; | 766 | struct net_device *net_dev; |
766 | bool rx_checksum_enabled; | ||
767 | 767 | ||
768 | struct efx_mac_stats mac_stats; | ||
769 | struct efx_buffer stats_buffer; | 768 | struct efx_buffer stats_buffer; |
770 | spinlock_t stats_lock; | ||
771 | 769 | ||
772 | struct efx_mac_operations *mac_op; | 770 | const struct efx_mac_operations *mac_op; |
773 | unsigned char mac_address[ETH_ALEN]; | ||
774 | 771 | ||
775 | unsigned int phy_type; | 772 | unsigned int phy_type; |
776 | struct mutex mdio_lock; | 773 | const struct efx_phy_operations *phy_op; |
777 | struct efx_phy_operations *phy_op; | ||
778 | void *phy_data; | 774 | void *phy_data; |
779 | struct mdio_if_info mdio; | 775 | struct mdio_if_info mdio; |
780 | unsigned int mdio_bus; | 776 | unsigned int mdio_bus; |
781 | enum efx_phy_mode phy_mode; | 777 | enum efx_phy_mode phy_mode; |
782 | 778 | ||
783 | bool xmac_poll_required; | ||
784 | u32 link_advertising; | 779 | u32 link_advertising; |
785 | struct efx_link_state link_state; | 780 | struct efx_link_state link_state; |
786 | unsigned int n_link_state_changes; | 781 | unsigned int n_link_state_changes; |
787 | 782 | ||
788 | bool promiscuous; | 783 | bool promiscuous; |
789 | union efx_multicast_hash multicast_hash; | 784 | union efx_multicast_hash multicast_hash; |
790 | enum efx_fc_type wanted_fc; | 785 | u8 wanted_fc; |
791 | 786 | ||
792 | atomic_t rx_reset; | 787 | atomic_t rx_reset; |
793 | enum efx_loopback_mode loopback_mode; | 788 | enum efx_loopback_mode loopback_mode; |
794 | u64 loopback_modes; | 789 | u64 loopback_modes; |
795 | 790 | ||
796 | void *loopback_selftest; | 791 | void *loopback_selftest; |
792 | |||
793 | struct efx_filter_state *filter_state; | ||
794 | |||
795 | /* The following fields may be written more often */ | ||
796 | |||
797 | struct delayed_work monitor_work ____cacheline_aligned_in_smp; | ||
798 | spinlock_t biu_lock; | ||
799 | volatile signed int last_irq_cpu; | ||
800 | unsigned n_rx_nodesc_drop_cnt; | ||
801 | struct efx_mac_stats mac_stats; | ||
802 | spinlock_t stats_lock; | ||
797 | }; | 803 | }; |
798 | 804 | ||
799 | static inline int efx_dev_registered(struct efx_nic *efx) | 805 | static inline int efx_dev_registered(struct efx_nic *efx) |
@@ -826,6 +832,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) | |||
826 | * be called while the controller is uninitialised. | 832 | * be called while the controller is uninitialised. |
827 | * @probe_port: Probe the MAC and PHY | 833 | * @probe_port: Probe the MAC and PHY |
828 | * @remove_port: Free resources allocated by probe_port() | 834 | * @remove_port: Free resources allocated by probe_port() |
835 | * @handle_global_event: Handle a "global" event (may be %NULL) | ||
829 | * @prepare_flush: Prepare the hardware for flushing the DMA queues | 836 | * @prepare_flush: Prepare the hardware for flushing the DMA queues |
830 | * @update_stats: Update statistics not provided by event handling | 837 | * @update_stats: Update statistics not provided by event handling |
831 | * @start_stats: Start the regular fetching of statistics | 838 | * @start_stats: Start the regular fetching of statistics |
@@ -870,6 +877,7 @@ struct efx_nic_type { | |||
870 | int (*reset)(struct efx_nic *efx, enum reset_type method); | 877 | int (*reset)(struct efx_nic *efx, enum reset_type method); |
871 | int (*probe_port)(struct efx_nic *efx); | 878 | int (*probe_port)(struct efx_nic *efx); |
872 | void (*remove_port)(struct efx_nic *efx); | 879 | void (*remove_port)(struct efx_nic *efx); |
880 | bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *); | ||
873 | void (*prepare_flush)(struct efx_nic *efx); | 881 | void (*prepare_flush)(struct efx_nic *efx); |
874 | void (*update_stats)(struct efx_nic *efx); | 882 | void (*update_stats)(struct efx_nic *efx); |
875 | void (*start_stats)(struct efx_nic *efx); | 883 | void (*start_stats)(struct efx_nic *efx); |
@@ -883,7 +891,7 @@ struct efx_nic_type { | |||
883 | void (*resume_wol)(struct efx_nic *efx); | 891 | void (*resume_wol)(struct efx_nic *efx); |
884 | int (*test_registers)(struct efx_nic *efx); | 892 | int (*test_registers)(struct efx_nic *efx); |
885 | int (*test_nvram)(struct efx_nic *efx); | 893 | int (*test_nvram)(struct efx_nic *efx); |
886 | struct efx_mac_operations *default_mac_ops; | 894 | const struct efx_mac_operations *default_mac_ops; |
887 | 895 | ||
888 | int revision; | 896 | int revision; |
889 | unsigned int mem_map_size; | 897 | unsigned int mem_map_size; |
@@ -899,7 +907,7 @@ struct efx_nic_type { | |||
899 | unsigned int phys_addr_channels; | 907 | unsigned int phys_addr_channels; |
900 | unsigned int tx_dc_base; | 908 | unsigned int tx_dc_base; |
901 | unsigned int rx_dc_base; | 909 | unsigned int rx_dc_base; |
902 | unsigned long offload_features; | 910 | u32 offload_features; |
903 | u32 reset_world_flags; | 911 | u32 reset_world_flags; |
904 | }; | 912 | }; |
905 | 913 | ||
@@ -909,39 +917,102 @@ struct efx_nic_type { | |||
909 | * | 917 | * |
910 | *************************************************************************/ | 918 | *************************************************************************/ |
911 | 919 | ||
920 | static inline struct efx_channel * | ||
921 | efx_get_channel(struct efx_nic *efx, unsigned index) | ||
922 | { | ||
923 | EFX_BUG_ON_PARANOID(index >= efx->n_channels); | ||
924 | return efx->channel[index]; | ||
925 | } | ||
926 | |||
912 | /* Iterate over all used channels */ | 927 | /* Iterate over all used channels */ |
913 | #define efx_for_each_channel(_channel, _efx) \ | 928 | #define efx_for_each_channel(_channel, _efx) \ |
914 | for (_channel = &((_efx)->channel[0]); \ | 929 | for (_channel = (_efx)->channel[0]; \ |
915 | _channel < &((_efx)->channel[(efx)->n_channels]); \ | 930 | _channel; \ |
916 | _channel++) | 931 | _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ |
917 | 932 | (_efx)->channel[_channel->channel + 1] : NULL) | |
918 | /* Iterate over all used TX queues */ | 933 | |
919 | #define efx_for_each_tx_queue(_tx_queue, _efx) \ | 934 | static inline struct efx_tx_queue * |
920 | for (_tx_queue = &((_efx)->tx_queue[0]); \ | 935 | efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) |
921 | _tx_queue < &((_efx)->tx_queue[EFX_TXQ_TYPES * \ | 936 | { |
922 | (_efx)->n_tx_channels]); \ | 937 | EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels || |
923 | _tx_queue++) | 938 | type >= EFX_TXQ_TYPES); |
939 | return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; | ||
940 | } | ||
941 | |||
942 | static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) | ||
943 | { | ||
944 | return channel->channel - channel->efx->tx_channel_offset < | ||
945 | channel->efx->n_tx_channels; | ||
946 | } | ||
947 | |||
948 | static inline struct efx_tx_queue * | ||
949 | efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) | ||
950 | { | ||
951 | EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) || | ||
952 | type >= EFX_TXQ_TYPES); | ||
953 | return &channel->tx_queue[type]; | ||
954 | } | ||
955 | |||
956 | static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue) | ||
957 | { | ||
958 | return !(tx_queue->efx->net_dev->num_tc < 2 && | ||
959 | tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI); | ||
960 | } | ||
924 | 961 | ||
925 | /* Iterate over all TX queues belonging to a channel */ | 962 | /* Iterate over all TX queues belonging to a channel */ |
926 | #define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ | 963 | #define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ |
964 | if (!efx_channel_has_tx_queues(_channel)) \ | ||
965 | ; \ | ||
966 | else \ | ||
967 | for (_tx_queue = (_channel)->tx_queue; \ | ||
968 | _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \ | ||
969 | efx_tx_queue_used(_tx_queue); \ | ||
970 | _tx_queue++) | ||
971 | |||
972 | /* Iterate over all possible TX queues belonging to a channel */ | ||
973 | #define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \ | ||
927 | for (_tx_queue = (_channel)->tx_queue; \ | 974 | for (_tx_queue = (_channel)->tx_queue; \ |
928 | _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ | 975 | _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ |
929 | _tx_queue++) | 976 | _tx_queue++) |
930 | 977 | ||
931 | /* Iterate over all used RX queues */ | 978 | static inline struct efx_rx_queue * |
932 | #define efx_for_each_rx_queue(_rx_queue, _efx) \ | 979 | efx_get_rx_queue(struct efx_nic *efx, unsigned index) |
933 | for (_rx_queue = &((_efx)->rx_queue[0]); \ | 980 | { |
934 | _rx_queue < &((_efx)->rx_queue[(_efx)->n_rx_channels]); \ | 981 | EFX_BUG_ON_PARANOID(index >= efx->n_rx_channels); |
935 | _rx_queue++) | 982 | return &efx->channel[index]->rx_queue; |
983 | } | ||
984 | |||
985 | static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) | ||
986 | { | ||
987 | return channel->channel < channel->efx->n_rx_channels; | ||
988 | } | ||
989 | |||
990 | static inline struct efx_rx_queue * | ||
991 | efx_channel_get_rx_queue(struct efx_channel *channel) | ||
992 | { | ||
993 | EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel)); | ||
994 | return &channel->rx_queue; | ||
995 | } | ||
936 | 996 | ||
937 | /* Iterate over all RX queues belonging to a channel */ | 997 | /* Iterate over all RX queues belonging to a channel */ |
938 | #define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ | 998 | #define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ |
939 | for (_rx_queue = &((_channel)->efx->rx_queue[(_channel)->channel]); \ | 999 | if (!efx_channel_has_rx_queue(_channel)) \ |
940 | _rx_queue; \ | 1000 | ; \ |
941 | _rx_queue = NULL) \ | 1001 | else \ |
942 | if (_rx_queue->channel != (_channel)) \ | 1002 | for (_rx_queue = &(_channel)->rx_queue; \ |
943 | continue; \ | 1003 | _rx_queue; \ |
944 | else | 1004 | _rx_queue = NULL) |
1005 | |||
1006 | static inline struct efx_channel * | ||
1007 | efx_rx_queue_channel(struct efx_rx_queue *rx_queue) | ||
1008 | { | ||
1009 | return container_of(rx_queue, struct efx_channel, rx_queue); | ||
1010 | } | ||
1011 | |||
1012 | static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue) | ||
1013 | { | ||
1014 | return efx_rx_queue_channel(rx_queue)->channel; | ||
1015 | } | ||
945 | 1016 | ||
946 | /* Returns a pointer to the specified receive buffer in the RX | 1017 | /* Returns a pointer to the specified receive buffer in the RX |
947 | * descriptor queue. | 1018 | * descriptor queue. |
@@ -949,7 +1020,7 @@ struct efx_nic_type { | |||
949 | static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, | 1020 | static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, |
950 | unsigned int index) | 1021 | unsigned int index) |
951 | { | 1022 | { |
952 | return (&rx_queue->buffer[index]); | 1023 | return &rx_queue->buffer[index]; |
953 | } | 1024 | } |
954 | 1025 | ||
955 | /* Set bit in a little-endian bitfield */ | 1026 | /* Set bit in a little-endian bitfield */ |