aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/sfc/efx.c30
-rw-r--r--drivers/net/sfc/ethtool.c4
-rw-r--r--drivers/net/sfc/falcon.c507
-rw-r--r--drivers/net/sfc/falcon.h80
-rw-r--r--drivers/net/sfc/rx.c10
-rw-r--r--drivers/net/sfc/selftest.c4
-rw-r--r--drivers/net/sfc/tx.c12
7 files changed, 341 insertions, 306 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 4b7168fc546a..e5c33c66eda3 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -213,7 +213,7 @@ static int efx_process_channel(struct efx_channel *channel, int rx_quota)
213 !channel->enabled)) 213 !channel->enabled))
214 return 0; 214 return 0;
215 215
216 rx_packets = falcon_process_eventq(channel, rx_quota); 216 rx_packets = efx_nic_process_eventq(channel, rx_quota);
217 if (rx_packets == 0) 217 if (rx_packets == 0)
218 return 0; 218 return 0;
219 219
@@ -245,7 +245,7 @@ static inline void efx_channel_processed(struct efx_channel *channel)
245 channel->work_pending = false; 245 channel->work_pending = false;
246 smp_wmb(); 246 smp_wmb();
247 247
248 falcon_eventq_read_ack(channel); 248 efx_nic_eventq_read_ack(channel);
249} 249}
250 250
251/* NAPI poll handler 251/* NAPI poll handler
@@ -316,7 +316,7 @@ void efx_process_channel_now(struct efx_channel *channel)
316 BUG_ON(!channel->enabled); 316 BUG_ON(!channel->enabled);
317 317
318 /* Disable interrupts and wait for ISRs to complete */ 318 /* Disable interrupts and wait for ISRs to complete */
319 falcon_disable_interrupts(efx); 319 efx_nic_disable_interrupts(efx);
320 if (efx->legacy_irq) 320 if (efx->legacy_irq)
321 synchronize_irq(efx->legacy_irq); 321 synchronize_irq(efx->legacy_irq);
322 if (channel->irq) 322 if (channel->irq)
@@ -333,7 +333,7 @@ void efx_process_channel_now(struct efx_channel *channel)
333 efx_channel_processed(channel); 333 efx_channel_processed(channel);
334 334
335 napi_enable(&channel->napi_str); 335 napi_enable(&channel->napi_str);
336 falcon_enable_interrupts(efx); 336 efx_nic_enable_interrupts(efx);
337} 337}
338 338
339/* Create event queue 339/* Create event queue
@@ -345,7 +345,7 @@ static int efx_probe_eventq(struct efx_channel *channel)
345{ 345{
346 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel); 346 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
347 347
348 return falcon_probe_eventq(channel); 348 return efx_nic_probe_eventq(channel);
349} 349}
350 350
351/* Prepare channel's event queue */ 351/* Prepare channel's event queue */
@@ -355,21 +355,21 @@ static void efx_init_eventq(struct efx_channel *channel)
355 355
356 channel->eventq_read_ptr = 0; 356 channel->eventq_read_ptr = 0;
357 357
358 falcon_init_eventq(channel); 358 efx_nic_init_eventq(channel);
359} 359}
360 360
361static void efx_fini_eventq(struct efx_channel *channel) 361static void efx_fini_eventq(struct efx_channel *channel)
362{ 362{
363 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel); 363 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
364 364
365 falcon_fini_eventq(channel); 365 efx_nic_fini_eventq(channel);
366} 366}
367 367
368static void efx_remove_eventq(struct efx_channel *channel) 368static void efx_remove_eventq(struct efx_channel *channel)
369{ 369{
370 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel); 370 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
371 371
372 falcon_remove_eventq(channel); 372 efx_nic_remove_eventq(channel);
373} 373}
374 374
375/************************************************************************** 375/**************************************************************************
@@ -535,7 +535,7 @@ static void efx_fini_channels(struct efx_nic *efx)
535 EFX_ASSERT_RESET_SERIALISED(efx); 535 EFX_ASSERT_RESET_SERIALISED(efx);
536 BUG_ON(efx->port_enabled); 536 BUG_ON(efx->port_enabled);
537 537
538 rc = falcon_flush_queues(efx); 538 rc = efx_nic_flush_queues(efx);
539 if (rc) 539 if (rc)
540 EFX_ERR(efx, "failed to flush queues\n"); 540 EFX_ERR(efx, "failed to flush queues\n");
541 else 541 else
@@ -1172,7 +1172,7 @@ static void efx_start_all(struct efx_nic *efx)
1172 efx_for_each_channel(channel, efx) 1172 efx_for_each_channel(channel, efx)
1173 efx_start_channel(channel); 1173 efx_start_channel(channel);
1174 1174
1175 falcon_enable_interrupts(efx); 1175 efx_nic_enable_interrupts(efx);
1176 1176
1177 /* Start the hardware monitor if there is one. Otherwise (we're link 1177 /* Start the hardware monitor if there is one. Otherwise (we're link
1178 * event driven), we have to poll the PHY because after an event queue 1178 * event driven), we have to poll the PHY because after an event queue
@@ -1226,7 +1226,7 @@ static void efx_stop_all(struct efx_nic *efx)
1226 efx->type->stop_stats(efx); 1226 efx->type->stop_stats(efx);
1227 1227
1228 /* Disable interrupts and wait for ISR to complete */ 1228 /* Disable interrupts and wait for ISR to complete */
1229 falcon_disable_interrupts(efx); 1229 efx_nic_disable_interrupts(efx);
1230 if (efx->legacy_irq) 1230 if (efx->legacy_irq)
1231 synchronize_irq(efx->legacy_irq); 1231 synchronize_irq(efx->legacy_irq);
1232 efx_for_each_channel(channel, efx) { 1232 efx_for_each_channel(channel, efx) {
@@ -1286,8 +1286,8 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
1286{ 1286{
1287 struct efx_tx_queue *tx_queue; 1287 struct efx_tx_queue *tx_queue;
1288 struct efx_rx_queue *rx_queue; 1288 struct efx_rx_queue *rx_queue;
1289 unsigned tx_ticks = irq_mod_ticks(tx_usecs, FALCON_IRQ_MOD_RESOLUTION); 1289 unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION);
1290 unsigned rx_ticks = irq_mod_ticks(rx_usecs, FALCON_IRQ_MOD_RESOLUTION); 1290 unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION);
1291 1291
1292 EFX_ASSERT_RESET_SERIALISED(efx); 1292 EFX_ASSERT_RESET_SERIALISED(efx);
1293 1293
@@ -2042,7 +2042,7 @@ static void efx_fini_struct(struct efx_nic *efx)
2042 */ 2042 */
2043static void efx_pci_remove_main(struct efx_nic *efx) 2043static void efx_pci_remove_main(struct efx_nic *efx)
2044{ 2044{
2045 falcon_fini_interrupt(efx); 2045 efx_nic_fini_interrupt(efx);
2046 efx_fini_channels(efx); 2046 efx_fini_channels(efx);
2047 efx_fini_port(efx); 2047 efx_fini_port(efx);
2048 efx->type->fini(efx); 2048 efx->type->fini(efx);
@@ -2119,7 +2119,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2119 2119
2120 efx_init_channels(efx); 2120 efx_init_channels(efx);
2121 2121
2122 rc = falcon_init_interrupt(efx); 2122 rc = efx_nic_init_interrupt(efx);
2123 if (rc) 2123 if (rc)
2124 goto fail5; 2124 goto fail5;
2125 2125
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 506397527633..e86cbca75ea8 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -611,8 +611,8 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
611 coalesce->use_adaptive_rx_coalesce = efx->irq_rx_adaptive; 611 coalesce->use_adaptive_rx_coalesce = efx->irq_rx_adaptive;
612 coalesce->rx_coalesce_usecs_irq = efx->irq_rx_moderation; 612 coalesce->rx_coalesce_usecs_irq = efx->irq_rx_moderation;
613 613
614 coalesce->tx_coalesce_usecs_irq *= FALCON_IRQ_MOD_RESOLUTION; 614 coalesce->tx_coalesce_usecs_irq *= EFX_IRQ_MOD_RESOLUTION;
615 coalesce->rx_coalesce_usecs_irq *= FALCON_IRQ_MOD_RESOLUTION; 615 coalesce->rx_coalesce_usecs_irq *= EFX_IRQ_MOD_RESOLUTION;
616 616
617 return 0; 617 return 0;
618} 618}
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 61cc9948b233..2e4c71114630 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -69,8 +69,8 @@ default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
69 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A) 69 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
70 * This also has an effect on RX/TX arbitration 70 * This also has an effect on RX/TX arbitration
71 */ 71 */
72static int rx_xoff_thresh_bytes = -1; 72int efx_nic_rx_xoff_thresh = -1;
73module_param(rx_xoff_thresh_bytes, int, 0644); 73module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644);
74MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold"); 74MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
75 75
76/* RX FIFO XON watermark 76/* RX FIFO XON watermark
@@ -79,21 +79,21 @@ MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
79 * watermark send XON. Only used if TX flow control is enabled (ethtool -A) 79 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
80 * This also has an effect on RX/TX arbitration 80 * This also has an effect on RX/TX arbitration
81 */ 81 */
82static int rx_xon_thresh_bytes = -1; 82int efx_nic_rx_xon_thresh = -1;
83module_param(rx_xon_thresh_bytes, int, 0644); 83module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644);
84MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); 84MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
85 85
86/* If FALCON_MAX_INT_ERRORS internal errors occur within 86/* If EFX_MAX_INT_ERRORS internal errors occur within
87 * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 87 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
88 * disable it. 88 * disable it.
89 */ 89 */
90#define FALCON_INT_ERROR_EXPIRE 3600 90#define EFX_INT_ERROR_EXPIRE 3600
91#define FALCON_MAX_INT_ERRORS 5 91#define EFX_MAX_INT_ERRORS 5
92 92
93/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times 93/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
94 */ 94 */
95#define FALCON_FLUSH_INTERVAL 10 95#define EFX_FLUSH_INTERVAL 10
96#define FALCON_FLUSH_POLL_COUNT 100 96#define EFX_FLUSH_POLL_COUNT 100
97 97
98/************************************************************************** 98/**************************************************************************
99 * 99 *
@@ -103,30 +103,27 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
103 */ 103 */
104 104
105/* Size and alignment of special buffers (4KB) */ 105/* Size and alignment of special buffers (4KB) */
106#define FALCON_BUF_SIZE 4096 106#define EFX_BUF_SIZE 4096
107 107
108/* Depth of RX flush request fifo */ 108/* Depth of RX flush request fifo */
109#define FALCON_RX_FLUSH_COUNT 4 109#define EFX_RX_FLUSH_COUNT 4
110
111#define FALCON_IS_DUAL_FUNC(efx) \
112 (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
113 110
114/************************************************************************** 111/**************************************************************************
115 * 112 *
116 * Falcon hardware access 113 * Solarstorm hardware access
117 * 114 *
118 **************************************************************************/ 115 **************************************************************************/
119 116
120static inline void falcon_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, 117static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
121 unsigned int index) 118 unsigned int index)
122{ 119{
123 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, 120 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
124 value, index); 121 value, index);
125} 122}
126 123
127/* Read the current event from the event queue */ 124/* Read the current event from the event queue */
128static inline efx_qword_t *falcon_event(struct efx_channel *channel, 125static inline efx_qword_t *efx_event(struct efx_channel *channel,
129 unsigned int index) 126 unsigned int index)
130{ 127{
131 return (((efx_qword_t *) (channel->eventq.addr)) + index); 128 return (((efx_qword_t *) (channel->eventq.addr)) + index);
132} 129}
@@ -141,7 +138,7 @@ static inline efx_qword_t *falcon_event(struct efx_channel *channel,
141 * Note that using a single 64-bit comparison is incorrect; even 138 * Note that using a single 64-bit comparison is incorrect; even
142 * though the CPU read will be atomic, the DMA write may not be. 139 * though the CPU read will be atomic, the DMA write may not be.
143 */ 140 */
144static inline int falcon_event_present(efx_qword_t *event) 141static inline int efx_event_present(efx_qword_t *event)
145{ 142{
146 return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | 143 return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
147 EFX_DWORD_IS_ALL_ONES(event->dword[1]))); 144 EFX_DWORD_IS_ALL_ONES(event->dword[1])));
@@ -205,22 +202,21 @@ static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
205 202
206/************************************************************************** 203/**************************************************************************
207 * 204 *
208 * Falcon special buffer handling 205 * Special buffer handling
209 * Special buffers are used for event queues and the TX and RX 206 * Special buffers are used for event queues and the TX and RX
210 * descriptor rings. 207 * descriptor rings.
211 * 208 *
212 *************************************************************************/ 209 *************************************************************************/
213 210
214/* 211/*
215 * Initialise a Falcon special buffer 212 * Initialise a special buffer
216 * 213 *
217 * This will define a buffer (previously allocated via 214 * This will define a buffer (previously allocated via
218 * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing 215 * efx_alloc_special_buffer()) in the buffer table, allowing
219 * it to be used for event queues, descriptor rings etc. 216 * it to be used for event queues, descriptor rings etc.
220 */ 217 */
221static void 218static void
222falcon_init_special_buffer(struct efx_nic *efx, 219efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
223 struct efx_special_buffer *buffer)
224{ 220{
225 efx_qword_t buf_desc; 221 efx_qword_t buf_desc;
226 int index; 222 int index;
@@ -239,14 +235,13 @@ falcon_init_special_buffer(struct efx_nic *efx,
239 FRF_AZ_BUF_ADR_REGION, 0, 235 FRF_AZ_BUF_ADR_REGION, 0,
240 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, 236 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
241 FRF_AZ_BUF_OWNER_ID_FBUF, 0); 237 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
242 falcon_write_buf_tbl(efx, &buf_desc, index); 238 efx_write_buf_tbl(efx, &buf_desc, index);
243 } 239 }
244} 240}
245 241
246/* Unmaps a buffer from Falcon and clears the buffer table entries */ 242/* Unmaps a buffer and clears the buffer table entries */
247static void 243static void
248falcon_fini_special_buffer(struct efx_nic *efx, 244efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
249 struct efx_special_buffer *buffer)
250{ 245{
251 efx_oword_t buf_tbl_upd; 246 efx_oword_t buf_tbl_upd;
252 unsigned int start = buffer->index; 247 unsigned int start = buffer->index;
@@ -267,27 +262,27 @@ falcon_fini_special_buffer(struct efx_nic *efx,
267} 262}
268 263
269/* 264/*
270 * Allocate a new Falcon special buffer 265 * Allocate a new special buffer
271 * 266 *
272 * This allocates memory for a new buffer, clears it and allocates a 267 * This allocates memory for a new buffer, clears it and allocates a
273 * new buffer ID range. It does not write into Falcon's buffer table. 268 * new buffer ID range. It does not write into the buffer table.
274 * 269 *
275 * This call will allocate 4KB buffers, since Falcon can't use 8KB 270 * This call will allocate 4KB buffers, since 8KB buffers can't be
276 * buffers for event queues and descriptor rings. 271 * used for event queues and descriptor rings.
277 */ 272 */
278static int falcon_alloc_special_buffer(struct efx_nic *efx, 273static int efx_alloc_special_buffer(struct efx_nic *efx,
279 struct efx_special_buffer *buffer, 274 struct efx_special_buffer *buffer,
280 unsigned int len) 275 unsigned int len)
281{ 276{
282 len = ALIGN(len, FALCON_BUF_SIZE); 277 len = ALIGN(len, EFX_BUF_SIZE);
283 278
284 buffer->addr = pci_alloc_consistent(efx->pci_dev, len, 279 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
285 &buffer->dma_addr); 280 &buffer->dma_addr);
286 if (!buffer->addr) 281 if (!buffer->addr)
287 return -ENOMEM; 282 return -ENOMEM;
288 buffer->len = len; 283 buffer->len = len;
289 buffer->entries = len / FALCON_BUF_SIZE; 284 buffer->entries = len / EFX_BUF_SIZE;
290 BUG_ON(buffer->dma_addr & (FALCON_BUF_SIZE - 1)); 285 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
291 286
292 /* All zeros is a potentially valid event so memset to 0xff */ 287 /* All zeros is a potentially valid event so memset to 0xff */
293 memset(buffer->addr, 0xff, len); 288 memset(buffer->addr, 0xff, len);
@@ -305,8 +300,8 @@ static int falcon_alloc_special_buffer(struct efx_nic *efx,
305 return 0; 300 return 0;
306} 301}
307 302
308static void falcon_free_special_buffer(struct efx_nic *efx, 303static void
309 struct efx_special_buffer *buffer) 304efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
310{ 305{
311 if (!buffer->addr) 306 if (!buffer->addr)
312 return; 307 return;
@@ -325,13 +320,13 @@ static void falcon_free_special_buffer(struct efx_nic *efx,
325 320
326/************************************************************************** 321/**************************************************************************
327 * 322 *
328 * Falcon generic buffer handling 323 * Generic buffer handling
329 * These buffers are used for interrupt status and MAC stats 324 * These buffers are used for interrupt status and MAC stats
330 * 325 *
331 **************************************************************************/ 326 **************************************************************************/
332 327
333static int falcon_alloc_buffer(struct efx_nic *efx, 328int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
334 struct efx_buffer *buffer, unsigned int len) 329 unsigned int len)
335{ 330{
336 buffer->addr = pci_alloc_consistent(efx->pci_dev, len, 331 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
337 &buffer->dma_addr); 332 &buffer->dma_addr);
@@ -342,7 +337,7 @@ static int falcon_alloc_buffer(struct efx_nic *efx,
342 return 0; 337 return 0;
343} 338}
344 339
345static void falcon_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) 340void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
346{ 341{
347 if (buffer->addr) { 342 if (buffer->addr) {
348 pci_free_consistent(efx->pci_dev, buffer->len, 343 pci_free_consistent(efx->pci_dev, buffer->len,
@@ -353,21 +348,21 @@ static void falcon_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
353 348
354/************************************************************************** 349/**************************************************************************
355 * 350 *
356 * Falcon TX path 351 * TX path
357 * 352 *
358 **************************************************************************/ 353 **************************************************************************/
359 354
360/* Returns a pointer to the specified transmit descriptor in the TX 355/* Returns a pointer to the specified transmit descriptor in the TX
361 * descriptor queue belonging to the specified channel. 356 * descriptor queue belonging to the specified channel.
362 */ 357 */
363static inline efx_qword_t *falcon_tx_desc(struct efx_tx_queue *tx_queue, 358static inline efx_qword_t *
364 unsigned int index) 359efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
365{ 360{
366 return (((efx_qword_t *) (tx_queue->txd.addr)) + index); 361 return (((efx_qword_t *) (tx_queue->txd.addr)) + index);
367} 362}
368 363
369/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 364/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
370static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue) 365static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
371{ 366{
372 unsigned write_ptr; 367 unsigned write_ptr;
373 efx_dword_t reg; 368 efx_dword_t reg;
@@ -383,7 +378,7 @@ static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
383 * descriptor in the hardware TX descriptor ring (in host memory), and 378 * descriptor in the hardware TX descriptor ring (in host memory), and
384 * write a doorbell. 379 * write a doorbell.
385 */ 380 */
386void falcon_push_buffers(struct efx_tx_queue *tx_queue) 381void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
387{ 382{
388 383
389 struct efx_tx_buffer *buffer; 384 struct efx_tx_buffer *buffer;
@@ -395,7 +390,7 @@ void falcon_push_buffers(struct efx_tx_queue *tx_queue)
395 do { 390 do {
396 write_ptr = tx_queue->write_count & EFX_TXQ_MASK; 391 write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
397 buffer = &tx_queue->buffer[write_ptr]; 392 buffer = &tx_queue->buffer[write_ptr];
398 txd = falcon_tx_desc(tx_queue, write_ptr); 393 txd = efx_tx_desc(tx_queue, write_ptr);
399 ++tx_queue->write_count; 394 ++tx_queue->write_count;
400 395
401 /* Create TX descriptor ring entry */ 396 /* Create TX descriptor ring entry */
@@ -407,20 +402,20 @@ void falcon_push_buffers(struct efx_tx_queue *tx_queue)
407 } while (tx_queue->write_count != tx_queue->insert_count); 402 } while (tx_queue->write_count != tx_queue->insert_count);
408 403
409 wmb(); /* Ensure descriptors are written before they are fetched */ 404 wmb(); /* Ensure descriptors are written before they are fetched */
410 falcon_notify_tx_desc(tx_queue); 405 efx_notify_tx_desc(tx_queue);
411} 406}
412 407
413/* Allocate hardware resources for a TX queue */ 408/* Allocate hardware resources for a TX queue */
414int falcon_probe_tx(struct efx_tx_queue *tx_queue) 409int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
415{ 410{
416 struct efx_nic *efx = tx_queue->efx; 411 struct efx_nic *efx = tx_queue->efx;
417 BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 || 412 BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 ||
418 EFX_TXQ_SIZE & EFX_TXQ_MASK); 413 EFX_TXQ_SIZE & EFX_TXQ_MASK);
419 return falcon_alloc_special_buffer(efx, &tx_queue->txd, 414 return efx_alloc_special_buffer(efx, &tx_queue->txd,
420 EFX_TXQ_SIZE * sizeof(efx_qword_t)); 415 EFX_TXQ_SIZE * sizeof(efx_qword_t));
421} 416}
422 417
423void falcon_init_tx(struct efx_tx_queue *tx_queue) 418void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
424{ 419{
425 efx_oword_t tx_desc_ptr; 420 efx_oword_t tx_desc_ptr;
426 struct efx_nic *efx = tx_queue->efx; 421 struct efx_nic *efx = tx_queue->efx;
@@ -428,7 +423,7 @@ void falcon_init_tx(struct efx_tx_queue *tx_queue)
428 tx_queue->flushed = FLUSH_NONE; 423 tx_queue->flushed = FLUSH_NONE;
429 424
430 /* Pin TX descriptor ring */ 425 /* Pin TX descriptor ring */
431 falcon_init_special_buffer(efx, &tx_queue->txd); 426 efx_init_special_buffer(efx, &tx_queue->txd);
432 427
433 /* Push TX descriptor ring to card */ 428 /* Push TX descriptor ring to card */
434 EFX_POPULATE_OWORD_10(tx_desc_ptr, 429 EFX_POPULATE_OWORD_10(tx_desc_ptr,
@@ -470,7 +465,7 @@ void falcon_init_tx(struct efx_tx_queue *tx_queue)
470 } 465 }
471} 466}
472 467
473static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue) 468static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
474{ 469{
475 struct efx_nic *efx = tx_queue->efx; 470 struct efx_nic *efx = tx_queue->efx;
476 efx_oword_t tx_flush_descq; 471 efx_oword_t tx_flush_descq;
@@ -484,7 +479,7 @@ static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
484 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); 479 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
485} 480}
486 481
487void falcon_fini_tx(struct efx_tx_queue *tx_queue) 482void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
488{ 483{
489 struct efx_nic *efx = tx_queue->efx; 484 struct efx_nic *efx = tx_queue->efx;
490 efx_oword_t tx_desc_ptr; 485 efx_oword_t tx_desc_ptr;
@@ -498,36 +493,36 @@ void falcon_fini_tx(struct efx_tx_queue *tx_queue)
498 tx_queue->queue); 493 tx_queue->queue);
499 494
500 /* Unpin TX descriptor ring */ 495 /* Unpin TX descriptor ring */
501 falcon_fini_special_buffer(efx, &tx_queue->txd); 496 efx_fini_special_buffer(efx, &tx_queue->txd);
502} 497}
503 498
504/* Free buffers backing TX queue */ 499/* Free buffers backing TX queue */
505void falcon_remove_tx(struct efx_tx_queue *tx_queue) 500void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
506{ 501{
507 falcon_free_special_buffer(tx_queue->efx, &tx_queue->txd); 502 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
508} 503}
509 504
510/************************************************************************** 505/**************************************************************************
511 * 506 *
512 * Falcon RX path 507 * RX path
513 * 508 *
514 **************************************************************************/ 509 **************************************************************************/
515 510
516/* Returns a pointer to the specified descriptor in the RX descriptor queue */ 511/* Returns a pointer to the specified descriptor in the RX descriptor queue */
517static inline efx_qword_t *falcon_rx_desc(struct efx_rx_queue *rx_queue, 512static inline efx_qword_t *
518 unsigned int index) 513efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
519{ 514{
520 return (((efx_qword_t *) (rx_queue->rxd.addr)) + index); 515 return (((efx_qword_t *) (rx_queue->rxd.addr)) + index);
521} 516}
522 517
523/* This creates an entry in the RX descriptor queue */ 518/* This creates an entry in the RX descriptor queue */
524static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue, 519static inline void
525 unsigned index) 520efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
526{ 521{
527 struct efx_rx_buffer *rx_buf; 522 struct efx_rx_buffer *rx_buf;
528 efx_qword_t *rxd; 523 efx_qword_t *rxd;
529 524
530 rxd = falcon_rx_desc(rx_queue, index); 525 rxd = efx_rx_desc(rx_queue, index);
531 rx_buf = efx_rx_buffer(rx_queue, index); 526 rx_buf = efx_rx_buffer(rx_queue, index);
532 EFX_POPULATE_QWORD_3(*rxd, 527 EFX_POPULATE_QWORD_3(*rxd,
533 FSF_AZ_RX_KER_BUF_SIZE, 528 FSF_AZ_RX_KER_BUF_SIZE,
@@ -540,15 +535,15 @@ static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue,
540/* This writes to the RX_DESC_WPTR register for the specified receive 535/* This writes to the RX_DESC_WPTR register for the specified receive
541 * descriptor ring. 536 * descriptor ring.
542 */ 537 */
543void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue) 538void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
544{ 539{
545 efx_dword_t reg; 540 efx_dword_t reg;
546 unsigned write_ptr; 541 unsigned write_ptr;
547 542
548 while (rx_queue->notified_count != rx_queue->added_count) { 543 while (rx_queue->notified_count != rx_queue->added_count) {
549 falcon_build_rx_desc(rx_queue, 544 efx_build_rx_desc(rx_queue,
550 rx_queue->notified_count & 545 rx_queue->notified_count &
551 EFX_RXQ_MASK); 546 EFX_RXQ_MASK);
552 ++rx_queue->notified_count; 547 ++rx_queue->notified_count;
553 } 548 }
554 549
@@ -559,16 +554,16 @@ void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
559 FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue); 554 FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue);
560} 555}
561 556
562int falcon_probe_rx(struct efx_rx_queue *rx_queue) 557int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
563{ 558{
564 struct efx_nic *efx = rx_queue->efx; 559 struct efx_nic *efx = rx_queue->efx;
565 BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 || 560 BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 ||
566 EFX_RXQ_SIZE & EFX_RXQ_MASK); 561 EFX_RXQ_SIZE & EFX_RXQ_MASK);
567 return falcon_alloc_special_buffer(efx, &rx_queue->rxd, 562 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
568 EFX_RXQ_SIZE * sizeof(efx_qword_t)); 563 EFX_RXQ_SIZE * sizeof(efx_qword_t));
569} 564}
570 565
571void falcon_init_rx(struct efx_rx_queue *rx_queue) 566void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
572{ 567{
573 efx_oword_t rx_desc_ptr; 568 efx_oword_t rx_desc_ptr;
574 struct efx_nic *efx = rx_queue->efx; 569 struct efx_nic *efx = rx_queue->efx;
@@ -582,7 +577,7 @@ void falcon_init_rx(struct efx_rx_queue *rx_queue)
582 rx_queue->flushed = FLUSH_NONE; 577 rx_queue->flushed = FLUSH_NONE;
583 578
584 /* Pin RX descriptor ring */ 579 /* Pin RX descriptor ring */
585 falcon_init_special_buffer(efx, &rx_queue->rxd); 580 efx_init_special_buffer(efx, &rx_queue->rxd);
586 581
587 /* Push RX descriptor ring to card */ 582 /* Push RX descriptor ring to card */
588 EFX_POPULATE_OWORD_10(rx_desc_ptr, 583 EFX_POPULATE_OWORD_10(rx_desc_ptr,
@@ -603,7 +598,7 @@ void falcon_init_rx(struct efx_rx_queue *rx_queue)
603 rx_queue->queue); 598 rx_queue->queue);
604} 599}
605 600
606static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) 601static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
607{ 602{
608 struct efx_nic *efx = rx_queue->efx; 603 struct efx_nic *efx = rx_queue->efx;
609 efx_oword_t rx_flush_descq; 604 efx_oword_t rx_flush_descq;
@@ -617,7 +612,7 @@ static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
617 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 612 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
618} 613}
619 614
620void falcon_fini_rx(struct efx_rx_queue *rx_queue) 615void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
621{ 616{
622 efx_oword_t rx_desc_ptr; 617 efx_oword_t rx_desc_ptr;
623 struct efx_nic *efx = rx_queue->efx; 618 struct efx_nic *efx = rx_queue->efx;
@@ -631,18 +626,18 @@ void falcon_fini_rx(struct efx_rx_queue *rx_queue)
631 rx_queue->queue); 626 rx_queue->queue);
632 627
633 /* Unpin RX descriptor ring */ 628 /* Unpin RX descriptor ring */
634 falcon_fini_special_buffer(efx, &rx_queue->rxd); 629 efx_fini_special_buffer(efx, &rx_queue->rxd);
635} 630}
636 631
637/* Free buffers backing RX queue */ 632/* Free buffers backing RX queue */
638void falcon_remove_rx(struct efx_rx_queue *rx_queue) 633void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
639{ 634{
640 falcon_free_special_buffer(rx_queue->efx, &rx_queue->rxd); 635 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
641} 636}
642 637
643/************************************************************************** 638/**************************************************************************
644 * 639 *
645 * Falcon event queue processing 640 * Event queue processing
646 * Event queues are processed by per-channel tasklets. 641 * Event queues are processed by per-channel tasklets.
647 * 642 *
648 **************************************************************************/ 643 **************************************************************************/
@@ -656,7 +651,7 @@ void falcon_remove_rx(struct efx_rx_queue *rx_queue)
656 * whereas channel->eventq_read_ptr contains the index of the "next to 651 * whereas channel->eventq_read_ptr contains the index of the "next to
657 * read" event. 652 * read" event.
658 */ 653 */
659void falcon_eventq_read_ack(struct efx_channel *channel) 654void efx_nic_eventq_read_ack(struct efx_channel *channel)
660{ 655{
661 efx_dword_t reg; 656 efx_dword_t reg;
662 struct efx_nic *efx = channel->efx; 657 struct efx_nic *efx = channel->efx;
@@ -667,7 +662,7 @@ void falcon_eventq_read_ack(struct efx_channel *channel)
667} 662}
668 663
669/* Use HW to insert a SW defined event */ 664/* Use HW to insert a SW defined event */
670void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event) 665void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
671{ 666{
672 efx_oword_t drv_ev_reg; 667 efx_oword_t drv_ev_reg;
673 668
@@ -683,11 +678,11 @@ void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
683 678
684/* Handle a transmit completion event 679/* Handle a transmit completion event
685 * 680 *
686 * Falcon batches TX completion events; the message we receive is of 681 * The NIC batches TX completion events; the message we receive is of
687 * the form "complete all TX events up to this index". 682 * the form "complete all TX events up to this index".
688 */ 683 */
689static void falcon_handle_tx_event(struct efx_channel *channel, 684static void
690 efx_qword_t *event) 685efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
691{ 686{
692 unsigned int tx_ev_desc_ptr; 687 unsigned int tx_ev_desc_ptr;
693 unsigned int tx_ev_q_label; 688 unsigned int tx_ev_q_label;
@@ -710,7 +705,7 @@ static void falcon_handle_tx_event(struct efx_channel *channel,
710 705
711 if (efx_dev_registered(efx)) 706 if (efx_dev_registered(efx))
712 netif_tx_lock(efx->net_dev); 707 netif_tx_lock(efx->net_dev);
713 falcon_notify_tx_desc(tx_queue); 708 efx_notify_tx_desc(tx_queue);
714 if (efx_dev_registered(efx)) 709 if (efx_dev_registered(efx))
715 netif_tx_unlock(efx->net_dev); 710 netif_tx_unlock(efx->net_dev);
716 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && 711 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
@@ -724,10 +719,10 @@ static void falcon_handle_tx_event(struct efx_channel *channel,
724} 719}
725 720
726/* Detect errors included in the rx_evt_pkt_ok bit. */ 721/* Detect errors included in the rx_evt_pkt_ok bit. */
727static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 722static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
728 const efx_qword_t *event, 723 const efx_qword_t *event,
729 bool *rx_ev_pkt_ok, 724 bool *rx_ev_pkt_ok,
730 bool *discard) 725 bool *discard)
731{ 726{
732 struct efx_nic *efx = rx_queue->efx; 727 struct efx_nic *efx = rx_queue->efx;
733 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 728 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
@@ -799,8 +794,8 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
799} 794}
800 795
801/* Handle receive events that are not in-order. */ 796/* Handle receive events that are not in-order. */
802static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue, 797static void
803 unsigned index) 798efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
804{ 799{
805 struct efx_nic *efx = rx_queue->efx; 800 struct efx_nic *efx = rx_queue->efx;
806 unsigned expected, dropped; 801 unsigned expected, dropped;
@@ -816,13 +811,13 @@ static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
816 811
817/* Handle a packet received event 812/* Handle a packet received event
818 * 813 *
819 * Falcon silicon gives a "discard" flag if it's a unicast packet with the 814 * The NIC gives a "discard" flag if it's a unicast packet with the
820 * wrong destination address 815 * wrong destination address
821 * Also "is multicast" and "matches multicast filter" flags can be used to 816 * Also "is multicast" and "matches multicast filter" flags can be used to
822 * discard non-matching multicast packets. 817 * discard non-matching multicast packets.
823 */ 818 */
824static void falcon_handle_rx_event(struct efx_channel *channel, 819static void
825 const efx_qword_t *event) 820efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
826{ 821{
827 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 822 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
828 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 823 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
@@ -845,19 +840,18 @@ static void falcon_handle_rx_event(struct efx_channel *channel,
845 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 840 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
846 expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK; 841 expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK;
847 if (unlikely(rx_ev_desc_ptr != expected_ptr)) 842 if (unlikely(rx_ev_desc_ptr != expected_ptr))
848 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 843 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
849 844
850 if (likely(rx_ev_pkt_ok)) { 845 if (likely(rx_ev_pkt_ok)) {
851 /* If packet is marked as OK and packet type is TCP/IPv4 or 846 /* If packet is marked as OK and packet type is TCP/IP or
852 * UDP/IPv4, then we can rely on the hardware checksum. 847 * UDP/IP, then we can rely on the hardware checksum.
853 */ 848 */
854 checksummed = 849 checksummed =
855 likely(efx->rx_checksum_enabled) && 850 likely(efx->rx_checksum_enabled) &&
856 (rx_ev_hdr_type == FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP || 851 (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
857 rx_ev_hdr_type == FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP); 852 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP);
858 } else { 853 } else {
859 falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, 854 efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
860 &discard);
861 checksummed = false; 855 checksummed = false;
862 } 856 }
863 857
@@ -881,8 +875,8 @@ static void falcon_handle_rx_event(struct efx_channel *channel,
881} 875}
882 876
883/* Global events are basically PHY events */ 877/* Global events are basically PHY events */
884static void falcon_handle_global_event(struct efx_channel *channel, 878static void
885 efx_qword_t *event) 879efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
886{ 880{
887 struct efx_nic *efx = channel->efx; 881 struct efx_nic *efx = channel->efx;
888 bool handled = false; 882 bool handled = false;
@@ -918,8 +912,8 @@ static void falcon_handle_global_event(struct efx_channel *channel,
918 EFX_QWORD_VAL(*event)); 912 EFX_QWORD_VAL(*event));
919} 913}
920 914
921static void falcon_handle_driver_event(struct efx_channel *channel, 915static void
922 efx_qword_t *event) 916efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
923{ 917{
924 struct efx_nic *efx = channel->efx; 918 struct efx_nic *efx = channel->efx;
925 unsigned int ev_sub_code; 919 unsigned int ev_sub_code;
@@ -980,7 +974,7 @@ static void falcon_handle_driver_event(struct efx_channel *channel,
980 } 974 }
981} 975}
982 976
983int falcon_process_eventq(struct efx_channel *channel, int rx_quota) 977int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
984{ 978{
985 unsigned int read_ptr; 979 unsigned int read_ptr;
986 efx_qword_t event, *p_event; 980 efx_qword_t event, *p_event;
@@ -990,10 +984,10 @@ int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
990 read_ptr = channel->eventq_read_ptr; 984 read_ptr = channel->eventq_read_ptr;
991 985
992 do { 986 do {
993 p_event = falcon_event(channel, read_ptr); 987 p_event = efx_event(channel, read_ptr);
994 event = *p_event; 988 event = *p_event;
995 989
996 if (!falcon_event_present(&event)) 990 if (!efx_event_present(&event))
997 /* End of events */ 991 /* End of events */
998 break; 992 break;
999 993
@@ -1007,11 +1001,11 @@ int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
1007 1001
1008 switch (ev_code) { 1002 switch (ev_code) {
1009 case FSE_AZ_EV_CODE_RX_EV: 1003 case FSE_AZ_EV_CODE_RX_EV:
1010 falcon_handle_rx_event(channel, &event); 1004 efx_handle_rx_event(channel, &event);
1011 ++rx_packets; 1005 ++rx_packets;
1012 break; 1006 break;
1013 case FSE_AZ_EV_CODE_TX_EV: 1007 case FSE_AZ_EV_CODE_TX_EV:
1014 falcon_handle_tx_event(channel, &event); 1008 efx_handle_tx_event(channel, &event);
1015 break; 1009 break;
1016 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1010 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1017 channel->eventq_magic = EFX_QWORD_FIELD( 1011 channel->eventq_magic = EFX_QWORD_FIELD(
@@ -1021,10 +1015,10 @@ int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
1021 EFX_QWORD_VAL(event)); 1015 EFX_QWORD_VAL(event));
1022 break; 1016 break;
1023 case FSE_AZ_EV_CODE_GLOBAL_EV: 1017 case FSE_AZ_EV_CODE_GLOBAL_EV:
1024 falcon_handle_global_event(channel, &event); 1018 efx_handle_global_event(channel, &event);
1025 break; 1019 break;
1026 case FSE_AZ_EV_CODE_DRIVER_EV: 1020 case FSE_AZ_EV_CODE_DRIVER_EV:
1027 falcon_handle_driver_event(channel, &event); 1021 efx_handle_driver_event(channel, &event);
1028 break; 1022 break;
1029 default: 1023 default:
1030 EFX_ERR(channel->efx, "channel %d unknown event type %d" 1024 EFX_ERR(channel->efx, "channel %d unknown event type %d"
@@ -1066,16 +1060,16 @@ static void falcon_push_irq_moderation(struct efx_channel *channel)
1066} 1060}
1067 1061
1068/* Allocate buffer table entries for event queue */ 1062/* Allocate buffer table entries for event queue */
1069int falcon_probe_eventq(struct efx_channel *channel) 1063int efx_nic_probe_eventq(struct efx_channel *channel)
1070{ 1064{
1071 struct efx_nic *efx = channel->efx; 1065 struct efx_nic *efx = channel->efx;
1072 BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 || 1066 BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 ||
1073 EFX_EVQ_SIZE & EFX_EVQ_MASK); 1067 EFX_EVQ_SIZE & EFX_EVQ_MASK);
1074 return falcon_alloc_special_buffer(efx, &channel->eventq, 1068 return efx_alloc_special_buffer(efx, &channel->eventq,
1075 EFX_EVQ_SIZE * sizeof(efx_qword_t)); 1069 EFX_EVQ_SIZE * sizeof(efx_qword_t));
1076} 1070}
1077 1071
1078void falcon_init_eventq(struct efx_channel *channel) 1072void efx_nic_init_eventq(struct efx_channel *channel)
1079{ 1073{
1080 efx_oword_t evq_ptr; 1074 efx_oword_t evq_ptr;
1081 struct efx_nic *efx = channel->efx; 1075 struct efx_nic *efx = channel->efx;
@@ -1085,7 +1079,7 @@ void falcon_init_eventq(struct efx_channel *channel)
1085 channel->eventq.index + channel->eventq.entries - 1); 1079 channel->eventq.index + channel->eventq.entries - 1);
1086 1080
1087 /* Pin event queue buffer */ 1081 /* Pin event queue buffer */
1088 falcon_init_special_buffer(efx, &channel->eventq); 1082 efx_init_special_buffer(efx, &channel->eventq);
1089 1083
1090 /* Fill event queue with all ones (i.e. empty events) */ 1084 /* Fill event queue with all ones (i.e. empty events) */
1091 memset(channel->eventq.addr, 0xff, channel->eventq.len); 1085 memset(channel->eventq.addr, 0xff, channel->eventq.len);
@@ -1098,10 +1092,10 @@ void falcon_init_eventq(struct efx_channel *channel)
1098 efx_writeo_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base, 1092 efx_writeo_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
1099 channel->channel); 1093 channel->channel);
1100 1094
1101 falcon_push_irq_moderation(channel); 1095 efx->type->push_irq_moderation(channel);
1102} 1096}
1103 1097
1104void falcon_fini_eventq(struct efx_channel *channel) 1098void efx_nic_fini_eventq(struct efx_channel *channel)
1105{ 1099{
1106 efx_oword_t eventq_ptr; 1100 efx_oword_t eventq_ptr;
1107 struct efx_nic *efx = channel->efx; 1101 struct efx_nic *efx = channel->efx;
@@ -1112,13 +1106,13 @@ void falcon_fini_eventq(struct efx_channel *channel)
1112 channel->channel); 1106 channel->channel);
1113 1107
1114 /* Unpin event queue */ 1108 /* Unpin event queue */
1115 falcon_fini_special_buffer(efx, &channel->eventq); 1109 efx_fini_special_buffer(efx, &channel->eventq);
1116} 1110}
1117 1111
1118/* Free buffers backing event queue */ 1112/* Free buffers backing event queue */
1119void falcon_remove_eventq(struct efx_channel *channel) 1113void efx_nic_remove_eventq(struct efx_channel *channel)
1120{ 1114{
1121 falcon_free_special_buffer(channel->efx, &channel->eventq); 1115 efx_free_special_buffer(channel->efx, &channel->eventq);
1122} 1116}
1123 1117
1124 1118
@@ -1126,14 +1120,14 @@ void falcon_remove_eventq(struct efx_channel *channel)
1126 * process_eventq() should pick up the event and place the value of 1120 * process_eventq() should pick up the event and place the value of
1127 * "magic" into channel->eventq_magic; 1121 * "magic" into channel->eventq_magic;
1128 */ 1122 */
1129void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic) 1123void efx_nic_generate_test_event(struct efx_channel *channel, unsigned int magic)
1130{ 1124{
1131 efx_qword_t test_event; 1125 efx_qword_t test_event;
1132 1126
1133 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, 1127 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1134 FSE_AZ_EV_CODE_DRV_GEN_EV, 1128 FSE_AZ_EV_CODE_DRV_GEN_EV,
1135 FSF_AZ_DRV_GEN_EV_MAGIC, magic); 1129 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1136 falcon_generate_event(channel, &test_event); 1130 efx_generate_event(channel, &test_event);
1137} 1131}
1138 1132
1139/************************************************************************** 1133/**************************************************************************
@@ -1143,7 +1137,7 @@ void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
1143 **************************************************************************/ 1137 **************************************************************************/
1144 1138
1145 1139
1146static void falcon_poll_flush_events(struct efx_nic *efx) 1140static void efx_poll_flush_events(struct efx_nic *efx)
1147{ 1141{
1148 struct efx_channel *channel = &efx->channel[0]; 1142 struct efx_channel *channel = &efx->channel[0];
1149 struct efx_tx_queue *tx_queue; 1143 struct efx_tx_queue *tx_queue;
@@ -1152,11 +1146,11 @@ static void falcon_poll_flush_events(struct efx_nic *efx)
1152 unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK; 1146 unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK;
1153 1147
1154 do { 1148 do {
1155 efx_qword_t *event = falcon_event(channel, read_ptr); 1149 efx_qword_t *event = efx_event(channel, read_ptr);
1156 int ev_code, ev_sub_code, ev_queue; 1150 int ev_code, ev_sub_code, ev_queue;
1157 bool ev_failed; 1151 bool ev_failed;
1158 1152
1159 if (!falcon_event_present(event)) 1153 if (!efx_event_present(event))
1160 break; 1154 break;
1161 1155
1162 ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE); 1156 ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
@@ -1208,7 +1202,7 @@ static void falcon_prepare_flush(struct efx_nic *efx)
1208/* Handle tx and rx flushes at the same time, since they run in 1202/* Handle tx and rx flushes at the same time, since they run in
1209 * parallel in the hardware and there's no reason for us to 1203 * parallel in the hardware and there's no reason for us to
1210 * serialise them */ 1204 * serialise them */
1211int falcon_flush_queues(struct efx_nic *efx) 1205int efx_nic_flush_queues(struct efx_nic *efx)
1212{ 1206{
1213 struct efx_rx_queue *rx_queue; 1207 struct efx_rx_queue *rx_queue;
1214 struct efx_tx_queue *tx_queue; 1208 struct efx_tx_queue *tx_queue;
@@ -1219,22 +1213,22 @@ int falcon_flush_queues(struct efx_nic *efx)
1219 1213
1220 /* Flush all tx queues in parallel */ 1214 /* Flush all tx queues in parallel */
1221 efx_for_each_tx_queue(tx_queue, efx) 1215 efx_for_each_tx_queue(tx_queue, efx)
1222 falcon_flush_tx_queue(tx_queue); 1216 efx_flush_tx_queue(tx_queue);
1223 1217
1224 /* The hardware supports four concurrent rx flushes, each of which may 1218 /* The hardware supports four concurrent rx flushes, each of which may
1225 * need to be retried if there is an outstanding descriptor fetch */ 1219 * need to be retried if there is an outstanding descriptor fetch */
1226 for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) { 1220 for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
1227 rx_pending = tx_pending = 0; 1221 rx_pending = tx_pending = 0;
1228 efx_for_each_rx_queue(rx_queue, efx) { 1222 efx_for_each_rx_queue(rx_queue, efx) {
1229 if (rx_queue->flushed == FLUSH_PENDING) 1223 if (rx_queue->flushed == FLUSH_PENDING)
1230 ++rx_pending; 1224 ++rx_pending;
1231 } 1225 }
1232 efx_for_each_rx_queue(rx_queue, efx) { 1226 efx_for_each_rx_queue(rx_queue, efx) {
1233 if (rx_pending == FALCON_RX_FLUSH_COUNT) 1227 if (rx_pending == EFX_RX_FLUSH_COUNT)
1234 break; 1228 break;
1235 if (rx_queue->flushed == FLUSH_FAILED || 1229 if (rx_queue->flushed == FLUSH_FAILED ||
1236 rx_queue->flushed == FLUSH_NONE) { 1230 rx_queue->flushed == FLUSH_NONE) {
1237 falcon_flush_rx_queue(rx_queue); 1231 efx_flush_rx_queue(rx_queue);
1238 ++rx_pending; 1232 ++rx_pending;
1239 } 1233 }
1240 } 1234 }
@@ -1246,8 +1240,8 @@ int falcon_flush_queues(struct efx_nic *efx)
1246 if (rx_pending == 0 && tx_pending == 0) 1240 if (rx_pending == 0 && tx_pending == 0)
1247 return 0; 1241 return 0;
1248 1242
1249 msleep(FALCON_FLUSH_INTERVAL); 1243 msleep(EFX_FLUSH_INTERVAL);
1250 falcon_poll_flush_events(efx); 1244 efx_poll_flush_events(efx);
1251 } 1245 }
1252 1246
1253 /* Mark the queues as all flushed. We're going to return failure 1247 /* Mark the queues as all flushed. We're going to return failure
@@ -1273,15 +1267,15 @@ int falcon_flush_queues(struct efx_nic *efx)
1273 1267
1274/************************************************************************** 1268/**************************************************************************
1275 * 1269 *
1276 * Falcon hardware interrupts 1270 * Hardware interrupts
1277 * The hardware interrupt handler does very little work; all the event 1271 * The hardware interrupt handler does very little work; all the event
1278 * queue processing is carried out by per-channel tasklets. 1272 * queue processing is carried out by per-channel tasklets.
1279 * 1273 *
1280 **************************************************************************/ 1274 **************************************************************************/
1281 1275
1282/* Enable/disable/generate Falcon interrupts */ 1276/* Enable/disable/generate interrupts */
1283static inline void falcon_interrupts(struct efx_nic *efx, int enabled, 1277static inline void efx_nic_interrupts(struct efx_nic *efx,
1284 int force) 1278 bool enabled, bool force)
1285{ 1279{
1286 efx_oword_t int_en_reg_ker; 1280 efx_oword_t int_en_reg_ker;
1287 1281
@@ -1291,7 +1285,7 @@ static inline void falcon_interrupts(struct efx_nic *efx, int enabled,
1291 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1285 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1292} 1286}
1293 1287
1294void falcon_enable_interrupts(struct efx_nic *efx) 1288void efx_nic_enable_interrupts(struct efx_nic *efx)
1295{ 1289{
1296 struct efx_channel *channel; 1290 struct efx_channel *channel;
1297 1291
@@ -1299,7 +1293,7 @@ void falcon_enable_interrupts(struct efx_nic *efx)
1299 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1293 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1300 1294
1301 /* Enable interrupts */ 1295 /* Enable interrupts */
1302 falcon_interrupts(efx, 1, 0); 1296 efx_nic_interrupts(efx, true, false);
1303 1297
1304 /* Force processing of all the channels to get the EVQ RPTRs up to 1298 /* Force processing of all the channels to get the EVQ RPTRs up to
1305 date */ 1299 date */
@@ -1307,19 +1301,19 @@ void falcon_enable_interrupts(struct efx_nic *efx)
1307 efx_schedule_channel(channel); 1301 efx_schedule_channel(channel);
1308} 1302}
1309 1303
1310void falcon_disable_interrupts(struct efx_nic *efx) 1304void efx_nic_disable_interrupts(struct efx_nic *efx)
1311{ 1305{
1312 /* Disable interrupts */ 1306 /* Disable interrupts */
1313 falcon_interrupts(efx, 0, 0); 1307 efx_nic_interrupts(efx, false, false);
1314} 1308}
1315 1309
1316/* Generate a Falcon test interrupt 1310/* Generate a test interrupt
1317 * Interrupt must already have been enabled, otherwise nasty things 1311 * Interrupt must already have been enabled, otherwise nasty things
1318 * may happen. 1312 * may happen.
1319 */ 1313 */
1320void falcon_generate_interrupt(struct efx_nic *efx) 1314void efx_nic_generate_interrupt(struct efx_nic *efx)
1321{ 1315{
1322 falcon_interrupts(efx, 1, 1); 1316 efx_nic_interrupts(efx, true, true);
1323} 1317}
1324 1318
1325/* Acknowledge a legacy interrupt from Falcon 1319/* Acknowledge a legacy interrupt from Falcon
@@ -1332,7 +1326,7 @@ void falcon_generate_interrupt(struct efx_nic *efx)
1332 * 1326 *
1333 * NB most hardware supports MSI interrupts 1327 * NB most hardware supports MSI interrupts
1334 */ 1328 */
1335static inline void falcon_irq_ack_a1(struct efx_nic *efx) 1329inline void falcon_irq_ack_a1(struct efx_nic *efx)
1336{ 1330{
1337 efx_dword_t reg; 1331 efx_dword_t reg;
1338 1332
@@ -1344,7 +1338,7 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
1344/* Process a fatal interrupt 1338/* Process a fatal interrupt
1345 * Disable bus mastering ASAP and schedule a reset 1339 * Disable bus mastering ASAP and schedule a reset
1346 */ 1340 */
1347static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) 1341irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1348{ 1342{
1349 struct falcon_nic_data *nic_data = efx->nic_data; 1343 struct falcon_nic_data *nic_data = efx->nic_data;
1350 efx_oword_t *int_ker = efx->irq_status.addr; 1344 efx_oword_t *int_ker = efx->irq_status.addr;
@@ -1372,18 +1366,18 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1372 1366
1373 /* Disable both devices */ 1367 /* Disable both devices */
1374 pci_clear_master(efx->pci_dev); 1368 pci_clear_master(efx->pci_dev);
1375 if (FALCON_IS_DUAL_FUNC(efx)) 1369 if (efx_nic_is_dual_func(efx))
1376 pci_clear_master(nic_data->pci_dev2); 1370 pci_clear_master(nic_data->pci_dev2);
1377 falcon_disable_interrupts(efx); 1371 efx_nic_disable_interrupts(efx);
1378 1372
1379 /* Count errors and reset or disable the NIC accordingly */ 1373 /* Count errors and reset or disable the NIC accordingly */
1380 if (efx->int_error_count == 0 || 1374 if (efx->int_error_count == 0 ||
1381 time_after(jiffies, efx->int_error_expire)) { 1375 time_after(jiffies, efx->int_error_expire)) {
1382 efx->int_error_count = 0; 1376 efx->int_error_count = 0;
1383 efx->int_error_expire = 1377 efx->int_error_expire =
1384 jiffies + FALCON_INT_ERROR_EXPIRE * HZ; 1378 jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1385 } 1379 }
1386 if (++efx->int_error_count < FALCON_MAX_INT_ERRORS) { 1380 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1387 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n"); 1381 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
1388 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1382 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1389 } else { 1383 } else {
@@ -1395,10 +1389,10 @@ out:
1395 return IRQ_HANDLED; 1389 return IRQ_HANDLED;
1396} 1390}
1397 1391
1398/* Handle a legacy interrupt from Falcon 1392/* Handle a legacy interrupt
1399 * Acknowledges the interrupt and schedule event queue processing. 1393 * Acknowledges the interrupt and schedule event queue processing.
1400 */ 1394 */
1401static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) 1395static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1402{ 1396{
1403 struct efx_nic *efx = dev_id; 1397 struct efx_nic *efx = dev_id;
1404 efx_oword_t *int_ker = efx->irq_status.addr; 1398 efx_oword_t *int_ker = efx->irq_status.addr;
@@ -1415,13 +1409,13 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1415 /* Check to see if we have a serious error condition */ 1409 /* Check to see if we have a serious error condition */
1416 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1410 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1417 if (unlikely(syserr)) 1411 if (unlikely(syserr))
1418 return falcon_fatal_interrupt(efx); 1412 return efx_nic_fatal_interrupt(efx);
1419 1413
1420 /* Schedule processing of any interrupting queues */ 1414 /* Schedule processing of any interrupting queues */
1421 efx_for_each_channel(channel, efx) { 1415 efx_for_each_channel(channel, efx) {
1422 if ((queues & 1) || 1416 if ((queues & 1) ||
1423 falcon_event_present( 1417 efx_event_present(
1424 falcon_event(channel, channel->eventq_read_ptr))) { 1418 efx_event(channel, channel->eventq_read_ptr))) {
1425 efx_schedule_channel(channel); 1419 efx_schedule_channel(channel);
1426 result = IRQ_HANDLED; 1420 result = IRQ_HANDLED;
1427 } 1421 }
@@ -1438,7 +1432,7 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1438} 1432}
1439 1433
1440 1434
1441static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) 1435irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1442{ 1436{
1443 struct efx_nic *efx = dev_id; 1437 struct efx_nic *efx = dev_id;
1444 efx_oword_t *int_ker = efx->irq_status.addr; 1438 efx_oword_t *int_ker = efx->irq_status.addr;
@@ -1461,7 +1455,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1461 /* Check to see if we have a serious error condition */ 1455 /* Check to see if we have a serious error condition */
1462 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1456 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1463 if (unlikely(syserr)) 1457 if (unlikely(syserr))
1464 return falcon_fatal_interrupt(efx); 1458 return efx_nic_fatal_interrupt(efx);
1465 1459
1466 /* Determine interrupting queues, clear interrupt status 1460 /* Determine interrupting queues, clear interrupt status
1467 * register and acknowledge the device interrupt. 1461 * register and acknowledge the device interrupt.
@@ -1484,14 +1478,14 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1484 return IRQ_HANDLED; 1478 return IRQ_HANDLED;
1485} 1479}
1486 1480
1487/* Handle an MSI interrupt from Falcon 1481/* Handle an MSI interrupt
1488 * 1482 *
1489 * Handle an MSI hardware interrupt. This routine schedules event 1483 * Handle an MSI hardware interrupt. This routine schedules event
1490 * queue processing. No interrupt acknowledgement cycle is necessary. 1484 * queue processing. No interrupt acknowledgement cycle is necessary.
1491 * Also, we never need to check that the interrupt is for us, since 1485 * Also, we never need to check that the interrupt is for us, since
1492 * MSI interrupts cannot be shared. 1486 * MSI interrupts cannot be shared.
1493 */ 1487 */
1494static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) 1488static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1495{ 1489{
1496 struct efx_channel *channel = dev_id; 1490 struct efx_channel *channel = dev_id;
1497 struct efx_nic *efx = channel->efx; 1491 struct efx_nic *efx = channel->efx;
@@ -1505,7 +1499,7 @@ static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
1505 /* Check to see if we have a serious error condition */ 1499 /* Check to see if we have a serious error condition */
1506 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1500 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1507 if (unlikely(syserr)) 1501 if (unlikely(syserr))
1508 return falcon_fatal_interrupt(efx); 1502 return efx_nic_fatal_interrupt(efx);
1509 1503
1510 /* Schedule processing of the channel */ 1504 /* Schedule processing of the channel */
1511 efx_schedule_channel(channel); 1505 efx_schedule_channel(channel);
@@ -1517,7 +1511,7 @@ static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
1517/* Setup RSS indirection table. 1511/* Setup RSS indirection table.
1518 * This maps from the hash value of the packet to RXQ 1512 * This maps from the hash value of the packet to RXQ
1519 */ 1513 */
1520static void falcon_setup_rss_indir_table(struct efx_nic *efx) 1514static void efx_setup_rss_indir_table(struct efx_nic *efx)
1521{ 1515{
1522 int i = 0; 1516 int i = 0;
1523 unsigned long offset; 1517 unsigned long offset;
@@ -1539,7 +1533,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx)
1539/* Hook interrupt handler(s) 1533/* Hook interrupt handler(s)
1540 * Try MSI and then legacy interrupts. 1534 * Try MSI and then legacy interrupts.
1541 */ 1535 */
1542int falcon_init_interrupt(struct efx_nic *efx) 1536int efx_nic_init_interrupt(struct efx_nic *efx)
1543{ 1537{
1544 struct efx_channel *channel; 1538 struct efx_channel *channel;
1545 int rc; 1539 int rc;
@@ -1547,7 +1541,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
1547 if (!EFX_INT_MODE_USE_MSI(efx)) { 1541 if (!EFX_INT_MODE_USE_MSI(efx)) {
1548 irq_handler_t handler; 1542 irq_handler_t handler;
1549 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1543 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1550 handler = falcon_legacy_interrupt_b0; 1544 handler = efx_legacy_interrupt;
1551 else 1545 else
1552 handler = falcon_legacy_interrupt_a1; 1546 handler = falcon_legacy_interrupt_a1;
1553 1547
@@ -1563,7 +1557,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
1563 1557
1564 /* Hook MSI or MSI-X interrupt */ 1558 /* Hook MSI or MSI-X interrupt */
1565 efx_for_each_channel(channel, efx) { 1559 efx_for_each_channel(channel, efx) {
1566 rc = request_irq(channel->irq, falcon_msi_interrupt, 1560 rc = request_irq(channel->irq, efx_msi_interrupt,
1567 IRQF_PROBE_SHARED, /* Not shared */ 1561 IRQF_PROBE_SHARED, /* Not shared */
1568 channel->name, channel); 1562 channel->name, channel);
1569 if (rc) { 1563 if (rc) {
@@ -1581,7 +1575,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
1581 return rc; 1575 return rc;
1582} 1576}
1583 1577
1584void falcon_fini_interrupt(struct efx_nic *efx) 1578void efx_nic_fini_interrupt(struct efx_nic *efx)
1585{ 1579{
1586 struct efx_channel *channel; 1580 struct efx_channel *channel;
1587 efx_oword_t reg; 1581 efx_oword_t reg;
@@ -2322,8 +2316,8 @@ static int falcon_probe_port(struct efx_nic *efx)
2322 efx->wanted_fc = EFX_FC_RX; 2316 efx->wanted_fc = EFX_FC_RX;
2323 2317
2324 /* Allocate buffer for stats */ 2318 /* Allocate buffer for stats */
2325 rc = falcon_alloc_buffer(efx, &efx->stats_buffer, 2319 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
2326 FALCON_MAC_STATS_SIZE); 2320 FALCON_MAC_STATS_SIZE);
2327 if (rc) 2321 if (rc)
2328 return rc; 2322 return rc;
2329 EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n", 2323 EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
@@ -2336,7 +2330,7 @@ static int falcon_probe_port(struct efx_nic *efx)
2336 2330
2337static void falcon_remove_port(struct efx_nic *efx) 2331static void falcon_remove_port(struct efx_nic *efx)
2338{ 2332{
2339 falcon_free_buffer(efx, &efx->stats_buffer); 2333 efx_nic_free_buffer(efx, &efx->stats_buffer);
2340} 2334}
2341 2335
2342/************************************************************************** 2336/**************************************************************************
@@ -2414,11 +2408,7 @@ static int falcon_test_nvram(struct efx_nic *efx)
2414 return falcon_read_nvram(efx, NULL); 2408 return falcon_read_nvram(efx, NULL);
2415} 2409}
2416 2410
2417/* Registers tested in the falcon register test */ 2411static const struct efx_nic_register_test falcon_b0_register_tests[] = {
2418static struct {
2419 unsigned address;
2420 efx_oword_t mask;
2421} efx_test_registers[] = {
2422 { FR_AZ_ADR_REGION, 2412 { FR_AZ_ADR_REGION,
2423 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) }, 2413 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
2424 { FR_AZ_RX_CFG, 2414 { FR_AZ_RX_CFG,
@@ -2464,7 +2454,9 @@ static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
2464 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); 2454 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
2465} 2455}
2466 2456
2467static int falcon_b0_test_registers(struct efx_nic *efx) 2457int efx_nic_test_registers(struct efx_nic *efx,
2458 const struct efx_nic_register_test *regs,
2459 size_t n_regs)
2468{ 2460{
2469 unsigned address = 0, i, j; 2461 unsigned address = 0, i, j;
2470 efx_oword_t mask, imask, original, reg, buf; 2462 efx_oword_t mask, imask, original, reg, buf;
@@ -2472,9 +2464,9 @@ static int falcon_b0_test_registers(struct efx_nic *efx)
2472 /* Falcon should be in loopback to isolate the XMAC from the PHY */ 2464 /* Falcon should be in loopback to isolate the XMAC from the PHY */
2473 WARN_ON(!LOOPBACK_INTERNAL(efx)); 2465 WARN_ON(!LOOPBACK_INTERNAL(efx));
2474 2466
2475 for (i = 0; i < ARRAY_SIZE(efx_test_registers); ++i) { 2467 for (i = 0; i < n_regs; ++i) {
2476 address = efx_test_registers[i].address; 2468 address = regs[i].address;
2477 mask = imask = efx_test_registers[i].mask; 2469 mask = imask = regs[i].mask;
2478 EFX_INVERT_OWORD(imask); 2470 EFX_INVERT_OWORD(imask);
2479 2471
2480 efx_reado(efx, &original, address); 2472 efx_reado(efx, &original, address);
@@ -2517,6 +2509,12 @@ fail:
2517 return -EIO; 2509 return -EIO;
2518} 2510}
2519 2511
2512static int falcon_b0_test_registers(struct efx_nic *efx)
2513{
2514 return efx_nic_test_registers(efx, falcon_b0_register_tests,
2515 ARRAY_SIZE(falcon_b0_register_tests));
2516}
2517
2520/************************************************************************** 2518/**************************************************************************
2521 * 2519 *
2522 * Device reset 2520 * Device reset
@@ -2542,7 +2540,7 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2542 "function prior to hardware reset\n"); 2540 "function prior to hardware reset\n");
2543 goto fail1; 2541 goto fail1;
2544 } 2542 }
2545 if (FALCON_IS_DUAL_FUNC(efx)) { 2543 if (efx_nic_is_dual_func(efx)) {
2546 rc = pci_save_state(nic_data->pci_dev2); 2544 rc = pci_save_state(nic_data->pci_dev2);
2547 if (rc) { 2545 if (rc) {
2548 EFX_ERR(efx, "failed to backup PCI state of " 2546 EFX_ERR(efx, "failed to backup PCI state of "
@@ -2577,7 +2575,7 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2577 2575
2578 /* Restore PCI configuration if needed */ 2576 /* Restore PCI configuration if needed */
2579 if (method == RESET_TYPE_WORLD) { 2577 if (method == RESET_TYPE_WORLD) {
2580 if (FALCON_IS_DUAL_FUNC(efx)) { 2578 if (efx_nic_is_dual_func(efx)) {
2581 rc = pci_restore_state(nic_data->pci_dev2); 2579 rc = pci_restore_state(nic_data->pci_dev2);
2582 if (rc) { 2580 if (rc) {
2583 EFX_ERR(efx, "failed to restore PCI config for " 2581 EFX_ERR(efx, "failed to restore PCI config for "
@@ -2800,16 +2798,22 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
2800 return rc; 2798 return rc;
2801} 2799}
2802 2800
2801u32 efx_nic_fpga_ver(struct efx_nic *efx)
2802{
2803 efx_oword_t altera_build;
2804
2805 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
2806 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
2807}
2808
2803/* Probe the NIC variant (revision, ASIC vs FPGA, function count, port 2809/* Probe the NIC variant (revision, ASIC vs FPGA, function count, port
2804 * count, port speed). Set workaround and feature flags accordingly. 2810 * count, port speed). Set workaround and feature flags accordingly.
2805 */ 2811 */
2806static int falcon_probe_nic_variant(struct efx_nic *efx) 2812static int falcon_probe_nic_variant(struct efx_nic *efx)
2807{ 2813{
2808 efx_oword_t altera_build;
2809 efx_oword_t nic_stat; 2814 efx_oword_t nic_stat;
2810 2815
2811 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); 2816 if (efx_nic_fpga_ver(efx) != 0) {
2812 if (EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER)) {
2813 EFX_ERR(efx, "Falcon FPGA not supported\n"); 2817 EFX_ERR(efx, "Falcon FPGA not supported\n");
2814 return -ENODEV; 2818 return -ENODEV;
2815 } 2819 }
@@ -2893,7 +2897,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
2893 goto fail1; 2897 goto fail1;
2894 2898
2895 /* Probe secondary function if expected */ 2899 /* Probe secondary function if expected */
2896 if (FALCON_IS_DUAL_FUNC(efx)) { 2900 if (efx_nic_is_dual_func(efx)) {
2897 struct pci_dev *dev = pci_dev_get(efx->pci_dev); 2901 struct pci_dev *dev = pci_dev_get(efx->pci_dev);
2898 2902
2899 while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID, 2903 while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
@@ -2919,7 +2923,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
2919 } 2923 }
2920 2924
2921 /* Allocate memory for INT_KER */ 2925 /* Allocate memory for INT_KER */
2922 rc = falcon_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); 2926 rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
2923 if (rc) 2927 if (rc)
2924 goto fail4; 2928 goto fail4;
2925 BUG_ON(efx->irq_status.dma_addr & 0x0f); 2929 BUG_ON(efx->irq_status.dma_addr & 0x0f);
@@ -2965,7 +2969,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
2965 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); 2969 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2966 fail5: 2970 fail5:
2967 falcon_remove_spi_devices(efx); 2971 falcon_remove_spi_devices(efx);
2968 falcon_free_buffer(efx, &efx->irq_status); 2972 efx_nic_free_buffer(efx, &efx->irq_status);
2969 fail4: 2973 fail4:
2970 fail3: 2974 fail3:
2971 if (nic_data->pci_dev2) { 2975 if (nic_data->pci_dev2) {
@@ -2988,8 +2992,8 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
2988 const unsigned ctrl_xon_thr = 20; 2992 const unsigned ctrl_xon_thr = 20;
2989 const unsigned ctrl_xoff_thr = 25; 2993 const unsigned ctrl_xoff_thr = 25;
2990 /* RX data FIFO thresholds (256-byte units; size varies) */ 2994 /* RX data FIFO thresholds (256-byte units; size varies) */
2991 int data_xon_thr = rx_xon_thresh_bytes >> 8; 2995 int data_xon_thr = efx_nic_rx_xon_thresh >> 8;
2992 int data_xoff_thr = rx_xoff_thresh_bytes >> 8; 2996 int data_xoff_thr = efx_nic_rx_xoff_thresh >> 8;
2993 efx_oword_t reg; 2997 efx_oword_t reg;
2994 2998
2995 efx_reado(efx, &reg, FR_AZ_RX_CFG); 2999 efx_reado(efx, &reg, FR_AZ_RX_CFG);
@@ -3027,33 +3031,9 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
3027 efx_writeo(efx, &reg, FR_AZ_RX_CFG); 3031 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
3028} 3032}
3029 3033
3030/* This call performs hardware-specific global initialisation, such as 3034void efx_nic_init_common(struct efx_nic *efx)
3031 * defining the descriptor cache sizes and number of RSS channels.
3032 * It does not set up any buffers, descriptor rings or event queues.
3033 */
3034static int falcon_init_nic(struct efx_nic *efx)
3035{ 3035{
3036 efx_oword_t temp; 3036 efx_oword_t temp;
3037 int rc;
3038
3039 /* Use on-chip SRAM */
3040 efx_reado(efx, &temp, FR_AB_NIC_STAT);
3041 EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
3042 efx_writeo(efx, &temp, FR_AB_NIC_STAT);
3043
3044 /* Set the source of the GMAC clock */
3045 if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
3046 efx_reado(efx, &temp, FR_AB_GPIO_CTL);
3047 EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true);
3048 efx_writeo(efx, &temp, FR_AB_GPIO_CTL);
3049 }
3050
3051 /* Select the correct MAC */
3052 falcon_clock_mac(efx);
3053
3054 rc = falcon_reset_sram(efx);
3055 if (rc)
3056 return rc;
3057 3037
3058 /* Set positions of descriptor caches in SRAM. */ 3038 /* Set positions of descriptor caches in SRAM. */
3059 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, 3039 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR,
@@ -3084,15 +3064,6 @@ static int falcon_init_nic(struct efx_nic *efx)
3084 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 3064 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
3085 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 3065 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
3086 3066
3087 /* Clear the parity enables on the TX data fifos as
3088 * they produce false parity errors because of timing issues
3089 */
3090 if (EFX_WORKAROUND_5129(efx)) {
3091 efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
3092 EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
3093 efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
3094 }
3095
3096 /* Enable all the genuinely fatal interrupts. (They are still 3067 /* Enable all the genuinely fatal interrupts. (They are still
3097 * masked by the overall interrupt mask, controlled by 3068 * masked by the overall interrupt mask, controlled by
3098 * falcon_interrupts()). 3069 * falcon_interrupts()).
@@ -3106,6 +3077,64 @@ static int falcon_init_nic(struct efx_nic *efx)
3106 EFX_INVERT_OWORD(temp); 3077 EFX_INVERT_OWORD(temp);
3107 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 3078 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
3108 3079
3080 efx_setup_rss_indir_table(efx);
3081
3082 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
3083 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
3084 */
3085 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
3086 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
3087 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
3088 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
3089 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0);
3090 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
3091 /* Enable SW_EV to inherit in char driver - assume harmless here */
3092 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
3093 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
3094 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
3095 /* Squash TX of packets of 16 bytes or less */
3096 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
3097 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
3098 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
3099}
3100
3101/* This call performs hardware-specific global initialisation, such as
3102 * defining the descriptor cache sizes and number of RSS channels.
3103 * It does not set up any buffers, descriptor rings or event queues.
3104 */
3105static int falcon_init_nic(struct efx_nic *efx)
3106{
3107 efx_oword_t temp;
3108 int rc;
3109
3110 /* Use on-chip SRAM */
3111 efx_reado(efx, &temp, FR_AB_NIC_STAT);
3112 EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
3113 efx_writeo(efx, &temp, FR_AB_NIC_STAT);
3114
3115 /* Set the source of the GMAC clock */
3116 if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
3117 efx_reado(efx, &temp, FR_AB_GPIO_CTL);
3118 EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true);
3119 efx_writeo(efx, &temp, FR_AB_GPIO_CTL);
3120 }
3121
3122 /* Select the correct MAC */
3123 falcon_clock_mac(efx);
3124
3125 rc = falcon_reset_sram(efx);
3126 if (rc)
3127 return rc;
3128
3129 /* Clear the parity enables on the TX data fifos as
3130 * they produce false parity errors because of timing issues
3131 */
3132 if (EFX_WORKAROUND_5129(efx)) {
3133 efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
3134 EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
3135 efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
3136 }
3137
3109 if (EFX_WORKAROUND_7244(efx)) { 3138 if (EFX_WORKAROUND_7244(efx)) {
3110 efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL); 3139 efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
3111 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8); 3140 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
@@ -3115,8 +3144,6 @@ static int falcon_init_nic(struct efx_nic *efx)
3115 efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL); 3144 efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
3116 } 3145 }
3117 3146
3118 falcon_setup_rss_indir_table(efx);
3119
3120 /* XXX This is documented only for Falcon A0/A1 */ 3147 /* XXX This is documented only for Falcon A0/A1 */
3121 /* Setup RX. Wait for descriptor is broken and must 3148 /* Setup RX. Wait for descriptor is broken and must
3122 * be disabled. RXDP recovery shouldn't be needed, but is. 3149 * be disabled. RXDP recovery shouldn't be needed, but is.
@@ -3128,24 +3155,6 @@ static int falcon_init_nic(struct efx_nic *efx)
3128 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1); 3155 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
3129 efx_writeo(efx, &temp, FR_AA_RX_SELF_RST); 3156 efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
3130 3157
3131 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
3132 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
3133 */
3134 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
3135 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
3136 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
3137 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
3138 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0);
3139 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
3140 /* Enable SW_EV to inherit in char driver - assume harmless here */
3141 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
3142 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
3143 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
3144 /* Squash TX of packets of 16 bytes or less */
3145 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
3146 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
3147 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
3148
3149 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 3158 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
3150 * descriptors (which is bad). 3159 * descriptors (which is bad).
3151 */ 3160 */
@@ -3161,6 +3170,8 @@ static int falcon_init_nic(struct efx_nic *efx)
3161 efx_writeo(efx, &temp, FR_BZ_DP_CTRL); 3170 efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
3162 } 3171 }
3163 3172
3173 efx_nic_init_common(efx);
3174
3164 return 0; 3175 return 0;
3165} 3176}
3166 3177
@@ -3178,7 +3189,7 @@ static void falcon_remove_nic(struct efx_nic *efx)
3178 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); 3189 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
3179 3190
3180 falcon_remove_spi_devices(efx); 3191 falcon_remove_spi_devices(efx);
3181 falcon_free_buffer(efx, &efx->irq_status); 3192 efx_nic_free_buffer(efx, &efx->irq_status);
3182 3193
3183 falcon_reset_hw(efx, RESET_TYPE_ALL); 3194 falcon_reset_hw(efx, RESET_TYPE_ALL);
3184 3195
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
index 464c2747260f..875b58e94e8e 100644
--- a/drivers/net/sfc/falcon.h
+++ b/drivers/net/sfc/falcon.h
@@ -30,6 +30,14 @@ static inline int efx_nic_rev(struct efx_nic *efx)
30 return efx->type->revision; 30 return efx->type->revision;
31} 31}
32 32
33extern u32 efx_nic_fpga_ver(struct efx_nic *efx);
34
35/* NIC has two interlinked PCI functions for the same port. */
36static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
37{
38 return efx_nic_rev(efx) < EFX_REV_FALCON_B0;
39}
40
33/** 41/**
34 * struct falcon_board_type - board operations and type information 42 * struct falcon_board_type - board operations and type information
35 * @id: Board type id, as found in NVRAM 43 * @id: Board type id, as found in NVRAM
@@ -108,49 +116,65 @@ extern struct efx_nic_type falcon_b0_nic_type;
108extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info); 116extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info);
109 117
110/* TX data path */ 118/* TX data path */
111extern int falcon_probe_tx(struct efx_tx_queue *tx_queue); 119extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
112extern void falcon_init_tx(struct efx_tx_queue *tx_queue); 120extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue);
113extern void falcon_fini_tx(struct efx_tx_queue *tx_queue); 121extern void efx_nic_fini_tx(struct efx_tx_queue *tx_queue);
114extern void falcon_remove_tx(struct efx_tx_queue *tx_queue); 122extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue);
115extern void falcon_push_buffers(struct efx_tx_queue *tx_queue); 123extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue);
116 124
117/* RX data path */ 125/* RX data path */
118extern int falcon_probe_rx(struct efx_rx_queue *rx_queue); 126extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue);
119extern void falcon_init_rx(struct efx_rx_queue *rx_queue); 127extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue);
120extern void falcon_fini_rx(struct efx_rx_queue *rx_queue); 128extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue);
121extern void falcon_remove_rx(struct efx_rx_queue *rx_queue); 129extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue);
122extern void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue); 130extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue);
123 131
124/* Event data path */ 132/* Event data path */
125extern int falcon_probe_eventq(struct efx_channel *channel); 133extern int efx_nic_probe_eventq(struct efx_channel *channel);
126extern void falcon_init_eventq(struct efx_channel *channel); 134extern void efx_nic_init_eventq(struct efx_channel *channel);
127extern void falcon_fini_eventq(struct efx_channel *channel); 135extern void efx_nic_fini_eventq(struct efx_channel *channel);
128extern void falcon_remove_eventq(struct efx_channel *channel); 136extern void efx_nic_remove_eventq(struct efx_channel *channel);
129extern int falcon_process_eventq(struct efx_channel *channel, int rx_quota); 137extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
130extern void falcon_eventq_read_ack(struct efx_channel *channel); 138extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
131 139
132/* MAC/PHY */ 140/* MAC/PHY */
133extern void falcon_drain_tx_fifo(struct efx_nic *efx); 141extern void falcon_drain_tx_fifo(struct efx_nic *efx);
134extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); 142extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
143extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh;
135 144
136/* Interrupts and test events */ 145/* Interrupts and test events */
137extern int falcon_init_interrupt(struct efx_nic *efx); 146extern int efx_nic_init_interrupt(struct efx_nic *efx);
138extern void falcon_enable_interrupts(struct efx_nic *efx); 147extern void efx_nic_enable_interrupts(struct efx_nic *efx);
139extern void falcon_generate_test_event(struct efx_channel *channel, 148extern void efx_nic_generate_test_event(struct efx_channel *channel,
140 unsigned int magic); 149 unsigned int magic);
141extern void falcon_generate_interrupt(struct efx_nic *efx); 150extern void efx_nic_generate_interrupt(struct efx_nic *efx);
142extern void falcon_disable_interrupts(struct efx_nic *efx); 151extern void efx_nic_disable_interrupts(struct efx_nic *efx);
143extern void falcon_fini_interrupt(struct efx_nic *efx); 152extern void efx_nic_fini_interrupt(struct efx_nic *efx);
144 153extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx);
145#define FALCON_IRQ_MOD_RESOLUTION 5 154extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id);
155extern void falcon_irq_ack_a1(struct efx_nic *efx);
156
157#define EFX_IRQ_MOD_RESOLUTION 5
146 158
147/* Global Resources */ 159/* Global Resources */
148extern int falcon_flush_queues(struct efx_nic *efx); 160extern int efx_nic_flush_queues(struct efx_nic *efx);
149extern void falcon_start_nic_stats(struct efx_nic *efx); 161extern void falcon_start_nic_stats(struct efx_nic *efx);
150extern void falcon_stop_nic_stats(struct efx_nic *efx); 162extern void falcon_stop_nic_stats(struct efx_nic *efx);
151extern int falcon_reset_xaui(struct efx_nic *efx); 163extern int falcon_reset_xaui(struct efx_nic *efx);
164extern void efx_nic_init_common(struct efx_nic *efx);
165
166int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
167 unsigned int len);
168void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
152 169
153/* Tests */ 170/* Tests */
171struct efx_nic_register_test {
172 unsigned address;
173 efx_oword_t mask;
174};
175extern int efx_nic_test_registers(struct efx_nic *efx,
176 const struct efx_nic_register_test *regs,
177 size_t n_regs);
154 178
155/************************************************************************** 179/**************************************************************************
156 * 180 *
@@ -186,8 +210,8 @@ extern int falcon_reset_xaui(struct efx_nic *efx);
186#define MAC_DATA_LBN 0 210#define MAC_DATA_LBN 0
187#define MAC_DATA_WIDTH 32 211#define MAC_DATA_WIDTH 32
188 212
189extern void falcon_generate_event(struct efx_channel *channel, 213extern void efx_nic_generate_event(struct efx_channel *channel,
190 efx_qword_t *event); 214 efx_qword_t *event);
191 215
192extern void falcon_poll_xmac(struct efx_nic *efx); 216extern void falcon_poll_xmac(struct efx_nic *efx);
193 217
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index accf055ff89d..8fffd3792947 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -341,7 +341,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
341 341
342 out: 342 out:
343 /* Send write pointer to card. */ 343 /* Send write pointer to card. */
344 falcon_notify_rx_desc(rx_queue); 344 efx_nic_notify_rx_desc(rx_queue);
345 345
346 /* If the fast fill is running inside from the refill tasklet, then 346 /* If the fast fill is running inside from the refill tasklet, then
347 * for SMP systems it may be running on a different CPU to 347 * for SMP systems it may be running on a different CPU to
@@ -640,7 +640,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
640 if (!rx_queue->buffer) 640 if (!rx_queue->buffer)
641 return -ENOMEM; 641 return -ENOMEM;
642 642
643 rc = falcon_probe_rx(rx_queue); 643 rc = efx_nic_probe_rx(rx_queue);
644 if (rc) { 644 if (rc) {
645 kfree(rx_queue->buffer); 645 kfree(rx_queue->buffer);
646 rx_queue->buffer = NULL; 646 rx_queue->buffer = NULL;
@@ -671,7 +671,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
671 rx_queue->fast_fill_limit = limit; 671 rx_queue->fast_fill_limit = limit;
672 672
673 /* Set up RX descriptor ring */ 673 /* Set up RX descriptor ring */
674 falcon_init_rx(rx_queue); 674 efx_nic_init_rx(rx_queue);
675} 675}
676 676
677void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) 677void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
@@ -681,7 +681,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
681 681
682 EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue); 682 EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
683 683
684 falcon_fini_rx(rx_queue); 684 efx_nic_fini_rx(rx_queue);
685 685
686 /* Release RX buffers NB start at index 0 not current HW ptr */ 686 /* Release RX buffers NB start at index 0 not current HW ptr */
687 if (rx_queue->buffer) { 687 if (rx_queue->buffer) {
@@ -706,7 +706,7 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
706{ 706{
707 EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue); 707 EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue);
708 708
709 falcon_remove_rx(rx_queue); 709 efx_nic_remove_rx(rx_queue);
710 710
711 kfree(rx_queue->buffer); 711 kfree(rx_queue->buffer);
712 rx_queue->buffer = NULL; 712 rx_queue->buffer = NULL;
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index fa56e2e8e9c5..9a240536debc 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -164,7 +164,7 @@ static int efx_test_interrupts(struct efx_nic *efx,
164 goto success; 164 goto success;
165 } 165 }
166 166
167 falcon_generate_interrupt(efx); 167 efx_nic_generate_interrupt(efx);
168 168
169 /* Wait for arrival of test interrupt. */ 169 /* Wait for arrival of test interrupt. */
170 EFX_LOG(efx, "waiting for test interrupt\n"); 170 EFX_LOG(efx, "waiting for test interrupt\n");
@@ -202,7 +202,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
202 channel->eventq_magic = 0; 202 channel->eventq_magic = 0;
203 smp_wmb(); 203 smp_wmb();
204 204
205 falcon_generate_test_event(channel, magic); 205 efx_nic_generate_test_event(channel, magic);
206 206
207 /* Wait for arrival of interrupt */ 207 /* Wait for arrival of interrupt */
208 count = 0; 208 count = 0;
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index c54fa30e6277..2531d0207b96 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -278,7 +278,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
278 buffer->continuation = false; 278 buffer->continuation = false;
279 279
280 /* Pass off to hardware */ 280 /* Pass off to hardware */
281 falcon_push_buffers(tx_queue); 281 efx_nic_push_buffers(tx_queue);
282 282
283 return NETDEV_TX_OK; 283 return NETDEV_TX_OK;
284 284
@@ -426,7 +426,7 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
426 tx_queue->buffer[i].continuation = true; 426 tx_queue->buffer[i].continuation = true;
427 427
428 /* Allocate hardware ring */ 428 /* Allocate hardware ring */
429 rc = falcon_probe_tx(tx_queue); 429 rc = efx_nic_probe_tx(tx_queue);
430 if (rc) 430 if (rc)
431 goto fail; 431 goto fail;
432 432
@@ -449,7 +449,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
449 BUG_ON(tx_queue->stopped); 449 BUG_ON(tx_queue->stopped);
450 450
451 /* Set up TX descriptor ring */ 451 /* Set up TX descriptor ring */
452 falcon_init_tx(tx_queue); 452 efx_nic_init_tx(tx_queue);
453} 453}
454 454
455void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) 455void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -475,7 +475,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
475 EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue); 475 EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue);
476 476
477 /* Flush TX queue, remove descriptor ring */ 477 /* Flush TX queue, remove descriptor ring */
478 falcon_fini_tx(tx_queue); 478 efx_nic_fini_tx(tx_queue);
479 479
480 efx_release_tx_buffers(tx_queue); 480 efx_release_tx_buffers(tx_queue);
481 481
@@ -492,7 +492,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
492void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 492void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
493{ 493{
494 EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue); 494 EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue);
495 falcon_remove_tx(tx_queue); 495 efx_nic_remove_tx(tx_queue);
496 496
497 kfree(tx_queue->buffer); 497 kfree(tx_queue->buffer);
498 tx_queue->buffer = NULL; 498 tx_queue->buffer = NULL;
@@ -1078,7 +1078,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1078 } 1078 }
1079 1079
1080 /* Pass off to hardware */ 1080 /* Pass off to hardware */
1081 falcon_push_buffers(tx_queue); 1081 efx_nic_push_buffers(tx_queue);
1082 1082
1083 tx_queue->tso_bursts++; 1083 tx_queue->tso_bursts++;
1084 return NETDEV_TX_OK; 1084 return NETDEV_TX_OK;