diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/net/sfc/nic.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/net/sfc/nic.c')
-rw-r--r-- | drivers/net/sfc/nic.c | 383 |
1 files changed, 214 insertions, 169 deletions
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index f595d920c7c4..f2a2b947f860 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2009 Solarflare Communications Inc. | 4 | * Copyright 2006-2011 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
@@ -41,26 +41,6 @@ | |||
41 | #define RX_DC_ENTRIES 64 | 41 | #define RX_DC_ENTRIES 64 |
42 | #define RX_DC_ENTRIES_ORDER 3 | 42 | #define RX_DC_ENTRIES_ORDER 3 |
43 | 43 | ||
44 | /* RX FIFO XOFF watermark | ||
45 | * | ||
46 | * When the amount of the RX FIFO increases used increases past this | ||
47 | * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A) | ||
48 | * This also has an effect on RX/TX arbitration | ||
49 | */ | ||
50 | int efx_nic_rx_xoff_thresh = -1; | ||
51 | module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644); | ||
52 | MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold"); | ||
53 | |||
54 | /* RX FIFO XON watermark | ||
55 | * | ||
56 | * When the amount of the RX FIFO used decreases below this | ||
57 | * watermark send XON. Only used if TX flow control is enabled (ethtool -A) | ||
58 | * This also has an effect on RX/TX arbitration | ||
59 | */ | ||
60 | int efx_nic_rx_xon_thresh = -1; | ||
61 | module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644); | ||
62 | MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); | ||
63 | |||
64 | /* If EFX_MAX_INT_ERRORS internal errors occur within | 44 | /* If EFX_MAX_INT_ERRORS internal errors occur within |
65 | * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and | 45 | * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and |
66 | * disable it. | 46 | * disable it. |
@@ -104,7 +84,8 @@ static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, | |||
104 | static inline efx_qword_t *efx_event(struct efx_channel *channel, | 84 | static inline efx_qword_t *efx_event(struct efx_channel *channel, |
105 | unsigned int index) | 85 | unsigned int index) |
106 | { | 86 | { |
107 | return (((efx_qword_t *) (channel->eventq.addr)) + index); | 87 | return ((efx_qword_t *) (channel->eventq.addr)) + |
88 | (index & channel->eventq_mask); | ||
108 | } | 89 | } |
109 | 90 | ||
110 | /* See if an event is present | 91 | /* See if an event is present |
@@ -119,8 +100,8 @@ static inline efx_qword_t *efx_event(struct efx_channel *channel, | |||
119 | */ | 100 | */ |
120 | static inline int efx_event_present(efx_qword_t *event) | 101 | static inline int efx_event_present(efx_qword_t *event) |
121 | { | 102 | { |
122 | return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | | 103 | return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | |
123 | EFX_DWORD_IS_ALL_ONES(event->dword[1]))); | 104 | EFX_DWORD_IS_ALL_ONES(event->dword[1])); |
124 | } | 105 | } |
125 | 106 | ||
126 | static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, | 107 | static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, |
@@ -263,8 +244,8 @@ static int efx_alloc_special_buffer(struct efx_nic *efx, | |||
263 | { | 244 | { |
264 | len = ALIGN(len, EFX_BUF_SIZE); | 245 | len = ALIGN(len, EFX_BUF_SIZE); |
265 | 246 | ||
266 | buffer->addr = pci_alloc_consistent(efx->pci_dev, len, | 247 | buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, |
267 | &buffer->dma_addr); | 248 | &buffer->dma_addr, GFP_KERNEL); |
268 | if (!buffer->addr) | 249 | if (!buffer->addr) |
269 | return -ENOMEM; | 250 | return -ENOMEM; |
270 | buffer->len = len; | 251 | buffer->len = len; |
@@ -301,8 +282,8 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | |||
301 | (u64)buffer->dma_addr, buffer->len, | 282 | (u64)buffer->dma_addr, buffer->len, |
302 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | 283 | buffer->addr, (u64)virt_to_phys(buffer->addr)); |
303 | 284 | ||
304 | pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr, | 285 | dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr, |
305 | buffer->dma_addr); | 286 | buffer->dma_addr); |
306 | buffer->addr = NULL; | 287 | buffer->addr = NULL; |
307 | buffer->entries = 0; | 288 | buffer->entries = 0; |
308 | } | 289 | } |
@@ -347,7 +328,7 @@ void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) | |||
347 | static inline efx_qword_t * | 328 | static inline efx_qword_t * |
348 | efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) | 329 | efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) |
349 | { | 330 | { |
350 | return (((efx_qword_t *) (tx_queue->txd.addr)) + index); | 331 | return ((efx_qword_t *) (tx_queue->txd.addr)) + index; |
351 | } | 332 | } |
352 | 333 | ||
353 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ | 334 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ |
@@ -356,12 +337,41 @@ static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) | |||
356 | unsigned write_ptr; | 337 | unsigned write_ptr; |
357 | efx_dword_t reg; | 338 | efx_dword_t reg; |
358 | 339 | ||
359 | write_ptr = tx_queue->write_count & EFX_TXQ_MASK; | 340 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
360 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); | 341 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); |
361 | efx_writed_page(tx_queue->efx, ®, | 342 | efx_writed_page(tx_queue->efx, ®, |
362 | FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); | 343 | FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); |
363 | } | 344 | } |
364 | 345 | ||
346 | /* Write pointer and first descriptor for TX descriptor ring */ | ||
347 | static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, | ||
348 | const efx_qword_t *txd) | ||
349 | { | ||
350 | unsigned write_ptr; | ||
351 | efx_oword_t reg; | ||
352 | |||
353 | BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); | ||
354 | BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); | ||
355 | |||
356 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | ||
357 | EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, | ||
358 | FRF_AZ_TX_DESC_WPTR, write_ptr); | ||
359 | reg.qword[0] = *txd; | ||
360 | efx_writeo_page(tx_queue->efx, ®, | ||
361 | FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); | ||
362 | } | ||
363 | |||
364 | static inline bool | ||
365 | efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) | ||
366 | { | ||
367 | unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); | ||
368 | |||
369 | if (empty_read_count == 0) | ||
370 | return false; | ||
371 | |||
372 | tx_queue->empty_read_count = 0; | ||
373 | return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; | ||
374 | } | ||
365 | 375 | ||
366 | /* For each entry inserted into the software descriptor ring, create a | 376 | /* For each entry inserted into the software descriptor ring, create a |
367 | * descriptor in the hardware TX descriptor ring (in host memory), and | 377 | * descriptor in the hardware TX descriptor ring (in host memory), and |
@@ -373,11 +383,12 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) | |||
373 | struct efx_tx_buffer *buffer; | 383 | struct efx_tx_buffer *buffer; |
374 | efx_qword_t *txd; | 384 | efx_qword_t *txd; |
375 | unsigned write_ptr; | 385 | unsigned write_ptr; |
386 | unsigned old_write_count = tx_queue->write_count; | ||
376 | 387 | ||
377 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); | 388 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); |
378 | 389 | ||
379 | do { | 390 | do { |
380 | write_ptr = tx_queue->write_count & EFX_TXQ_MASK; | 391 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
381 | buffer = &tx_queue->buffer[write_ptr]; | 392 | buffer = &tx_queue->buffer[write_ptr]; |
382 | txd = efx_tx_desc(tx_queue, write_ptr); | 393 | txd = efx_tx_desc(tx_queue, write_ptr); |
383 | ++tx_queue->write_count; | 394 | ++tx_queue->write_count; |
@@ -391,23 +402,32 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) | |||
391 | } while (tx_queue->write_count != tx_queue->insert_count); | 402 | } while (tx_queue->write_count != tx_queue->insert_count); |
392 | 403 | ||
393 | wmb(); /* Ensure descriptors are written before they are fetched */ | 404 | wmb(); /* Ensure descriptors are written before they are fetched */ |
394 | efx_notify_tx_desc(tx_queue); | 405 | |
406 | if (efx_may_push_tx_desc(tx_queue, old_write_count)) { | ||
407 | txd = efx_tx_desc(tx_queue, | ||
408 | old_write_count & tx_queue->ptr_mask); | ||
409 | efx_push_tx_desc(tx_queue, txd); | ||
410 | ++tx_queue->pushes; | ||
411 | } else { | ||
412 | efx_notify_tx_desc(tx_queue); | ||
413 | } | ||
395 | } | 414 | } |
396 | 415 | ||
397 | /* Allocate hardware resources for a TX queue */ | 416 | /* Allocate hardware resources for a TX queue */ |
398 | int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) | 417 | int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) |
399 | { | 418 | { |
400 | struct efx_nic *efx = tx_queue->efx; | 419 | struct efx_nic *efx = tx_queue->efx; |
401 | BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 || | 420 | unsigned entries; |
402 | EFX_TXQ_SIZE & EFX_TXQ_MASK); | 421 | |
422 | entries = tx_queue->ptr_mask + 1; | ||
403 | return efx_alloc_special_buffer(efx, &tx_queue->txd, | 423 | return efx_alloc_special_buffer(efx, &tx_queue->txd, |
404 | EFX_TXQ_SIZE * sizeof(efx_qword_t)); | 424 | entries * sizeof(efx_qword_t)); |
405 | } | 425 | } |
406 | 426 | ||
407 | void efx_nic_init_tx(struct efx_tx_queue *tx_queue) | 427 | void efx_nic_init_tx(struct efx_tx_queue *tx_queue) |
408 | { | 428 | { |
409 | efx_oword_t tx_desc_ptr; | ||
410 | struct efx_nic *efx = tx_queue->efx; | 429 | struct efx_nic *efx = tx_queue->efx; |
430 | efx_oword_t reg; | ||
411 | 431 | ||
412 | tx_queue->flushed = FLUSH_NONE; | 432 | tx_queue->flushed = FLUSH_NONE; |
413 | 433 | ||
@@ -415,7 +435,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue) | |||
415 | efx_init_special_buffer(efx, &tx_queue->txd); | 435 | efx_init_special_buffer(efx, &tx_queue->txd); |
416 | 436 | ||
417 | /* Push TX descriptor ring to card */ | 437 | /* Push TX descriptor ring to card */ |
418 | EFX_POPULATE_OWORD_10(tx_desc_ptr, | 438 | EFX_POPULATE_OWORD_10(reg, |
419 | FRF_AZ_TX_DESCQ_EN, 1, | 439 | FRF_AZ_TX_DESCQ_EN, 1, |
420 | FRF_AZ_TX_ISCSI_DDIG_EN, 0, | 440 | FRF_AZ_TX_ISCSI_DDIG_EN, 0, |
421 | FRF_AZ_TX_ISCSI_HDIG_EN, 0, | 441 | FRF_AZ_TX_ISCSI_HDIG_EN, 0, |
@@ -431,17 +451,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue) | |||
431 | 451 | ||
432 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | 452 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { |
433 | int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; | 453 | int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; |
434 | EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); | 454 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); |
435 | EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, | 455 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, |
436 | !csum); | 456 | !csum); |
437 | } | 457 | } |
438 | 458 | ||
439 | efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | 459 | efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, |
440 | tx_queue->queue); | 460 | tx_queue->queue); |
441 | 461 | ||
442 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { | 462 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { |
443 | efx_oword_t reg; | ||
444 | |||
445 | /* Only 128 bits in this register */ | 463 | /* Only 128 bits in this register */ |
446 | BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); | 464 | BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); |
447 | 465 | ||
@@ -452,6 +470,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue) | |||
452 | set_bit_le(tx_queue->queue, (void *)®); | 470 | set_bit_le(tx_queue->queue, (void *)®); |
453 | efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); | 471 | efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); |
454 | } | 472 | } |
473 | |||
474 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | ||
475 | EFX_POPULATE_OWORD_1(reg, | ||
476 | FRF_BZ_TX_PACE, | ||
477 | (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? | ||
478 | FFE_BZ_TX_PACE_OFF : | ||
479 | FFE_BZ_TX_PACE_RESERVED); | ||
480 | efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, | ||
481 | tx_queue->queue); | ||
482 | } | ||
455 | } | 483 | } |
456 | 484 | ||
457 | static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) | 485 | static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) |
@@ -501,7 +529,7 @@ void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) | |||
501 | static inline efx_qword_t * | 529 | static inline efx_qword_t * |
502 | efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) | 530 | efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) |
503 | { | 531 | { |
504 | return (((efx_qword_t *) (rx_queue->rxd.addr)) + index); | 532 | return ((efx_qword_t *) (rx_queue->rxd.addr)) + index; |
505 | } | 533 | } |
506 | 534 | ||
507 | /* This creates an entry in the RX descriptor queue */ | 535 | /* This creates an entry in the RX descriptor queue */ |
@@ -526,30 +554,32 @@ efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) | |||
526 | */ | 554 | */ |
527 | void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) | 555 | void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) |
528 | { | 556 | { |
557 | struct efx_nic *efx = rx_queue->efx; | ||
529 | efx_dword_t reg; | 558 | efx_dword_t reg; |
530 | unsigned write_ptr; | 559 | unsigned write_ptr; |
531 | 560 | ||
532 | while (rx_queue->notified_count != rx_queue->added_count) { | 561 | while (rx_queue->notified_count != rx_queue->added_count) { |
533 | efx_build_rx_desc(rx_queue, | 562 | efx_build_rx_desc( |
534 | rx_queue->notified_count & | 563 | rx_queue, |
535 | EFX_RXQ_MASK); | 564 | rx_queue->notified_count & rx_queue->ptr_mask); |
536 | ++rx_queue->notified_count; | 565 | ++rx_queue->notified_count; |
537 | } | 566 | } |
538 | 567 | ||
539 | wmb(); | 568 | wmb(); |
540 | write_ptr = rx_queue->added_count & EFX_RXQ_MASK; | 569 | write_ptr = rx_queue->added_count & rx_queue->ptr_mask; |
541 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); | 570 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); |
542 | efx_writed_page(rx_queue->efx, ®, | 571 | efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, |
543 | FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue); | 572 | efx_rx_queue_index(rx_queue)); |
544 | } | 573 | } |
545 | 574 | ||
546 | int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) | 575 | int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) |
547 | { | 576 | { |
548 | struct efx_nic *efx = rx_queue->efx; | 577 | struct efx_nic *efx = rx_queue->efx; |
549 | BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 || | 578 | unsigned entries; |
550 | EFX_RXQ_SIZE & EFX_RXQ_MASK); | 579 | |
580 | entries = rx_queue->ptr_mask + 1; | ||
551 | return efx_alloc_special_buffer(efx, &rx_queue->rxd, | 581 | return efx_alloc_special_buffer(efx, &rx_queue->rxd, |
552 | EFX_RXQ_SIZE * sizeof(efx_qword_t)); | 582 | entries * sizeof(efx_qword_t)); |
553 | } | 583 | } |
554 | 584 | ||
555 | void efx_nic_init_rx(struct efx_rx_queue *rx_queue) | 585 | void efx_nic_init_rx(struct efx_rx_queue *rx_queue) |
@@ -561,7 +591,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue) | |||
561 | 591 | ||
562 | netif_dbg(efx, hw, efx->net_dev, | 592 | netif_dbg(efx, hw, efx->net_dev, |
563 | "RX queue %d ring in special buffers %d-%d\n", | 593 | "RX queue %d ring in special buffers %d-%d\n", |
564 | rx_queue->queue, rx_queue->rxd.index, | 594 | efx_rx_queue_index(rx_queue), rx_queue->rxd.index, |
565 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); | 595 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); |
566 | 596 | ||
567 | rx_queue->flushed = FLUSH_NONE; | 597 | rx_queue->flushed = FLUSH_NONE; |
@@ -575,9 +605,10 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue) | |||
575 | FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, | 605 | FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, |
576 | FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, | 606 | FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, |
577 | FRF_AZ_RX_DESCQ_EVQ_ID, | 607 | FRF_AZ_RX_DESCQ_EVQ_ID, |
578 | rx_queue->channel->channel, | 608 | efx_rx_queue_channel(rx_queue)->channel, |
579 | FRF_AZ_RX_DESCQ_OWNER_ID, 0, | 609 | FRF_AZ_RX_DESCQ_OWNER_ID, 0, |
580 | FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue, | 610 | FRF_AZ_RX_DESCQ_LABEL, |
611 | efx_rx_queue_index(rx_queue), | ||
581 | FRF_AZ_RX_DESCQ_SIZE, | 612 | FRF_AZ_RX_DESCQ_SIZE, |
582 | __ffs(rx_queue->rxd.entries), | 613 | __ffs(rx_queue->rxd.entries), |
583 | FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , | 614 | FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , |
@@ -585,7 +616,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue) | |||
585 | FRF_AZ_RX_DESCQ_JUMBO, !is_b0, | 616 | FRF_AZ_RX_DESCQ_JUMBO, !is_b0, |
586 | FRF_AZ_RX_DESCQ_EN, 1); | 617 | FRF_AZ_RX_DESCQ_EN, 1); |
587 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | 618 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, |
588 | rx_queue->queue); | 619 | efx_rx_queue_index(rx_queue)); |
589 | } | 620 | } |
590 | 621 | ||
591 | static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) | 622 | static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) |
@@ -598,7 +629,8 @@ static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) | |||
598 | /* Post a flush command */ | 629 | /* Post a flush command */ |
599 | EFX_POPULATE_OWORD_2(rx_flush_descq, | 630 | EFX_POPULATE_OWORD_2(rx_flush_descq, |
600 | FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, | 631 | FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, |
601 | FRF_AZ_RX_FLUSH_DESCQ, rx_queue->queue); | 632 | FRF_AZ_RX_FLUSH_DESCQ, |
633 | efx_rx_queue_index(rx_queue)); | ||
602 | efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); | 634 | efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); |
603 | } | 635 | } |
604 | 636 | ||
@@ -613,7 +645,7 @@ void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) | |||
613 | /* Remove RX descriptor ring from card */ | 645 | /* Remove RX descriptor ring from card */ |
614 | EFX_ZERO_OWORD(rx_desc_ptr); | 646 | EFX_ZERO_OWORD(rx_desc_ptr); |
615 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | 647 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, |
616 | rx_queue->queue); | 648 | efx_rx_queue_index(rx_queue)); |
617 | 649 | ||
618 | /* Unpin RX descriptor ring */ | 650 | /* Unpin RX descriptor ring */ |
619 | efx_fini_special_buffer(efx, &rx_queue->rxd); | 651 | efx_fini_special_buffer(efx, &rx_queue->rxd); |
@@ -642,13 +674,14 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel) | |||
642 | efx_dword_t reg; | 674 | efx_dword_t reg; |
643 | struct efx_nic *efx = channel->efx; | 675 | struct efx_nic *efx = channel->efx; |
644 | 676 | ||
645 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr); | 677 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, |
678 | channel->eventq_read_ptr & channel->eventq_mask); | ||
646 | efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, | 679 | efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, |
647 | channel->channel); | 680 | channel->channel); |
648 | } | 681 | } |
649 | 682 | ||
650 | /* Use HW to insert a SW defined event */ | 683 | /* Use HW to insert a SW defined event */ |
651 | void efx_generate_event(struct efx_channel *channel, efx_qword_t *event) | 684 | static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event) |
652 | { | 685 | { |
653 | efx_oword_t drv_ev_reg; | 686 | efx_oword_t drv_ev_reg; |
654 | 687 | ||
@@ -680,15 +713,17 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | |||
680 | /* Transmit completion */ | 713 | /* Transmit completion */ |
681 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); | 714 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); |
682 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); | 715 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); |
683 | tx_queue = &efx->tx_queue[tx_ev_q_label]; | 716 | tx_queue = efx_channel_get_tx_queue( |
717 | channel, tx_ev_q_label % EFX_TXQ_TYPES); | ||
684 | tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & | 718 | tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & |
685 | EFX_TXQ_MASK); | 719 | tx_queue->ptr_mask); |
686 | channel->irq_mod_score += tx_packets; | 720 | channel->irq_mod_score += tx_packets; |
687 | efx_xmit_done(tx_queue, tx_ev_desc_ptr); | 721 | efx_xmit_done(tx_queue, tx_ev_desc_ptr); |
688 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { | 722 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { |
689 | /* Rewrite the FIFO write pointer */ | 723 | /* Rewrite the FIFO write pointer */ |
690 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); | 724 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); |
691 | tx_queue = &efx->tx_queue[tx_ev_q_label]; | 725 | tx_queue = efx_channel_get_tx_queue( |
726 | channel, tx_ev_q_label % EFX_TXQ_TYPES); | ||
692 | 727 | ||
693 | if (efx_dev_registered(efx)) | 728 | if (efx_dev_registered(efx)) |
694 | netif_tx_lock(efx->net_dev); | 729 | netif_tx_lock(efx->net_dev); |
@@ -714,6 +749,7 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | |||
714 | bool *rx_ev_pkt_ok, | 749 | bool *rx_ev_pkt_ok, |
715 | bool *discard) | 750 | bool *discard) |
716 | { | 751 | { |
752 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | ||
717 | struct efx_nic *efx = rx_queue->efx; | 753 | struct efx_nic *efx = rx_queue->efx; |
718 | bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; | 754 | bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; |
719 | bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; | 755 | bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; |
@@ -746,14 +782,14 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | |||
746 | /* Count errors that are not in MAC stats. Ignore expected | 782 | /* Count errors that are not in MAC stats. Ignore expected |
747 | * checksum errors during self-test. */ | 783 | * checksum errors during self-test. */ |
748 | if (rx_ev_frm_trunc) | 784 | if (rx_ev_frm_trunc) |
749 | ++rx_queue->channel->n_rx_frm_trunc; | 785 | ++channel->n_rx_frm_trunc; |
750 | else if (rx_ev_tobe_disc) | 786 | else if (rx_ev_tobe_disc) |
751 | ++rx_queue->channel->n_rx_tobe_disc; | 787 | ++channel->n_rx_tobe_disc; |
752 | else if (!efx->loopback_selftest) { | 788 | else if (!efx->loopback_selftest) { |
753 | if (rx_ev_ip_hdr_chksum_err) | 789 | if (rx_ev_ip_hdr_chksum_err) |
754 | ++rx_queue->channel->n_rx_ip_hdr_chksum_err; | 790 | ++channel->n_rx_ip_hdr_chksum_err; |
755 | else if (rx_ev_tcp_udp_chksum_err) | 791 | else if (rx_ev_tcp_udp_chksum_err) |
756 | ++rx_queue->channel->n_rx_tcp_udp_chksum_err; | 792 | ++channel->n_rx_tcp_udp_chksum_err; |
757 | } | 793 | } |
758 | 794 | ||
759 | /* The frame must be discarded if any of these are true. */ | 795 | /* The frame must be discarded if any of these are true. */ |
@@ -769,7 +805,7 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | |||
769 | netif_dbg(efx, rx_err, efx->net_dev, | 805 | netif_dbg(efx, rx_err, efx->net_dev, |
770 | " RX queue %d unexpected RX event " | 806 | " RX queue %d unexpected RX event " |
771 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", | 807 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", |
772 | rx_queue->queue, EFX_QWORD_VAL(*event), | 808 | efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), |
773 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", | 809 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", |
774 | rx_ev_ip_hdr_chksum_err ? | 810 | rx_ev_ip_hdr_chksum_err ? |
775 | " [IP_HDR_CHKSUM_ERR]" : "", | 811 | " [IP_HDR_CHKSUM_ERR]" : "", |
@@ -791,8 +827,8 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) | |||
791 | struct efx_nic *efx = rx_queue->efx; | 827 | struct efx_nic *efx = rx_queue->efx; |
792 | unsigned expected, dropped; | 828 | unsigned expected, dropped; |
793 | 829 | ||
794 | expected = rx_queue->removed_count & EFX_RXQ_MASK; | 830 | expected = rx_queue->removed_count & rx_queue->ptr_mask; |
795 | dropped = (index - expected) & EFX_RXQ_MASK; | 831 | dropped = (index - expected) & rx_queue->ptr_mask; |
796 | netif_info(efx, rx_err, efx->net_dev, | 832 | netif_info(efx, rx_err, efx->net_dev, |
797 | "dropped %d events (index=%d expected=%d)\n", | 833 | "dropped %d events (index=%d expected=%d)\n", |
798 | dropped, index, expected); | 834 | dropped, index, expected); |
@@ -816,7 +852,6 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) | |||
816 | unsigned expected_ptr; | 852 | unsigned expected_ptr; |
817 | bool rx_ev_pkt_ok, discard = false, checksummed; | 853 | bool rx_ev_pkt_ok, discard = false, checksummed; |
818 | struct efx_rx_queue *rx_queue; | 854 | struct efx_rx_queue *rx_queue; |
819 | struct efx_nic *efx = channel->efx; | ||
820 | 855 | ||
821 | /* Basic packet information */ | 856 | /* Basic packet information */ |
822 | rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); | 857 | rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); |
@@ -827,10 +862,10 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) | |||
827 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != | 862 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != |
828 | channel->channel); | 863 | channel->channel); |
829 | 864 | ||
830 | rx_queue = &efx->rx_queue[channel->channel]; | 865 | rx_queue = efx_channel_get_rx_queue(channel); |
831 | 866 | ||
832 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); | 867 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); |
833 | expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK; | 868 | expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; |
834 | if (unlikely(rx_ev_desc_ptr != expected_ptr)) | 869 | if (unlikely(rx_ev_desc_ptr != expected_ptr)) |
835 | efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); | 870 | efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); |
836 | 871 | ||
@@ -839,9 +874,8 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) | |||
839 | * UDP/IP, then we can rely on the hardware checksum. | 874 | * UDP/IP, then we can rely on the hardware checksum. |
840 | */ | 875 | */ |
841 | checksummed = | 876 | checksummed = |
842 | likely(efx->rx_checksum_enabled) && | 877 | rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || |
843 | (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || | 878 | rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP; |
844 | rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP); | ||
845 | } else { | 879 | } else { |
846 | efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard); | 880 | efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard); |
847 | checksummed = false; | 881 | checksummed = false; |
@@ -874,58 +908,18 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) | |||
874 | 908 | ||
875 | code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); | 909 | code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); |
876 | if (code == EFX_CHANNEL_MAGIC_TEST(channel)) | 910 | if (code == EFX_CHANNEL_MAGIC_TEST(channel)) |
877 | ++channel->magic_count; | 911 | ; /* ignore */ |
878 | else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) | 912 | else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) |
879 | /* The queue must be empty, so we won't receive any rx | 913 | /* The queue must be empty, so we won't receive any rx |
880 | * events, so efx_process_channel() won't refill the | 914 | * events, so efx_process_channel() won't refill the |
881 | * queue. Refill it here */ | 915 | * queue. Refill it here */ |
882 | efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); | 916 | efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel)); |
883 | else | 917 | else |
884 | netif_dbg(efx, hw, efx->net_dev, "channel %d received " | 918 | netif_dbg(efx, hw, efx->net_dev, "channel %d received " |
885 | "generated event "EFX_QWORD_FMT"\n", | 919 | "generated event "EFX_QWORD_FMT"\n", |
886 | channel->channel, EFX_QWORD_VAL(*event)); | 920 | channel->channel, EFX_QWORD_VAL(*event)); |
887 | } | 921 | } |
888 | 922 | ||
889 | /* Global events are basically PHY events */ | ||
890 | static void | ||
891 | efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event) | ||
892 | { | ||
893 | struct efx_nic *efx = channel->efx; | ||
894 | bool handled = false; | ||
895 | |||
896 | if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) || | ||
897 | EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) || | ||
898 | EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) { | ||
899 | /* Ignored */ | ||
900 | handled = true; | ||
901 | } | ||
902 | |||
903 | if ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) && | ||
904 | EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) { | ||
905 | efx->xmac_poll_required = true; | ||
906 | handled = true; | ||
907 | } | ||
908 | |||
909 | if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? | ||
910 | EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : | ||
911 | EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { | ||
912 | netif_err(efx, rx_err, efx->net_dev, | ||
913 | "channel %d seen global RX_RESET event. Resetting.\n", | ||
914 | channel->channel); | ||
915 | |||
916 | atomic_inc(&efx->rx_reset); | ||
917 | efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? | ||
918 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | ||
919 | handled = true; | ||
920 | } | ||
921 | |||
922 | if (!handled) | ||
923 | netif_err(efx, hw, efx->net_dev, | ||
924 | "channel %d unknown global event " | ||
925 | EFX_QWORD_FMT "\n", channel->channel, | ||
926 | EFX_QWORD_VAL(*event)); | ||
927 | } | ||
928 | |||
929 | static void | 923 | static void |
930 | efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | 924 | efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) |
931 | { | 925 | { |
@@ -997,6 +991,7 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | |||
997 | 991 | ||
998 | int efx_nic_process_eventq(struct efx_channel *channel, int budget) | 992 | int efx_nic_process_eventq(struct efx_channel *channel, int budget) |
999 | { | 993 | { |
994 | struct efx_nic *efx = channel->efx; | ||
1000 | unsigned int read_ptr; | 995 | unsigned int read_ptr; |
1001 | efx_qword_t event, *p_event; | 996 | efx_qword_t event, *p_event; |
1002 | int ev_code; | 997 | int ev_code; |
@@ -1020,8 +1015,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget) | |||
1020 | /* Clear this event by marking it all ones */ | 1015 | /* Clear this event by marking it all ones */ |
1021 | EFX_SET_QWORD(*p_event); | 1016 | EFX_SET_QWORD(*p_event); |
1022 | 1017 | ||
1023 | /* Increment read pointer */ | 1018 | ++read_ptr; |
1024 | read_ptr = (read_ptr + 1) & EFX_EVQ_MASK; | ||
1025 | 1019 | ||
1026 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); | 1020 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); |
1027 | 1021 | ||
@@ -1033,7 +1027,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget) | |||
1033 | break; | 1027 | break; |
1034 | case FSE_AZ_EV_CODE_TX_EV: | 1028 | case FSE_AZ_EV_CODE_TX_EV: |
1035 | tx_packets += efx_handle_tx_event(channel, &event); | 1029 | tx_packets += efx_handle_tx_event(channel, &event); |
1036 | if (tx_packets >= EFX_TXQ_SIZE) { | 1030 | if (tx_packets > efx->txq_entries) { |
1037 | spent = budget; | 1031 | spent = budget; |
1038 | goto out; | 1032 | goto out; |
1039 | } | 1033 | } |
@@ -1041,15 +1035,17 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget) | |||
1041 | case FSE_AZ_EV_CODE_DRV_GEN_EV: | 1035 | case FSE_AZ_EV_CODE_DRV_GEN_EV: |
1042 | efx_handle_generated_event(channel, &event); | 1036 | efx_handle_generated_event(channel, &event); |
1043 | break; | 1037 | break; |
1044 | case FSE_AZ_EV_CODE_GLOBAL_EV: | ||
1045 | efx_handle_global_event(channel, &event); | ||
1046 | break; | ||
1047 | case FSE_AZ_EV_CODE_DRIVER_EV: | 1038 | case FSE_AZ_EV_CODE_DRIVER_EV: |
1048 | efx_handle_driver_event(channel, &event); | 1039 | efx_handle_driver_event(channel, &event); |
1049 | break; | 1040 | break; |
1050 | case FSE_CZ_EV_CODE_MCDI_EV: | 1041 | case FSE_CZ_EV_CODE_MCDI_EV: |
1051 | efx_mcdi_process_event(channel, &event); | 1042 | efx_mcdi_process_event(channel, &event); |
1052 | break; | 1043 | break; |
1044 | case FSE_AZ_EV_CODE_GLOBAL_EV: | ||
1045 | if (efx->type->handle_global_event && | ||
1046 | efx->type->handle_global_event(channel, &event)) | ||
1047 | break; | ||
1048 | /* else fall through */ | ||
1053 | default: | 1049 | default: |
1054 | netif_err(channel->efx, hw, channel->efx->net_dev, | 1050 | netif_err(channel->efx, hw, channel->efx->net_dev, |
1055 | "channel %d unknown event type %d (data " | 1051 | "channel %d unknown event type %d (data " |
@@ -1063,15 +1059,23 @@ out: | |||
1063 | return spent; | 1059 | return spent; |
1064 | } | 1060 | } |
1065 | 1061 | ||
1062 | /* Check whether an event is present in the eventq at the current | ||
1063 | * read pointer. Only useful for self-test. | ||
1064 | */ | ||
1065 | bool efx_nic_event_present(struct efx_channel *channel) | ||
1066 | { | ||
1067 | return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); | ||
1068 | } | ||
1066 | 1069 | ||
1067 | /* Allocate buffer table entries for event queue */ | 1070 | /* Allocate buffer table entries for event queue */ |
1068 | int efx_nic_probe_eventq(struct efx_channel *channel) | 1071 | int efx_nic_probe_eventq(struct efx_channel *channel) |
1069 | { | 1072 | { |
1070 | struct efx_nic *efx = channel->efx; | 1073 | struct efx_nic *efx = channel->efx; |
1071 | BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 || | 1074 | unsigned entries; |
1072 | EFX_EVQ_SIZE & EFX_EVQ_MASK); | 1075 | |
1076 | entries = channel->eventq_mask + 1; | ||
1073 | return efx_alloc_special_buffer(efx, &channel->eventq, | 1077 | return efx_alloc_special_buffer(efx, &channel->eventq, |
1074 | EFX_EVQ_SIZE * sizeof(efx_qword_t)); | 1078 | entries * sizeof(efx_qword_t)); |
1075 | } | 1079 | } |
1076 | 1080 | ||
1077 | void efx_nic_init_eventq(struct efx_channel *channel) | 1081 | void efx_nic_init_eventq(struct efx_channel *channel) |
@@ -1163,11 +1167,11 @@ void efx_nic_generate_fill_event(struct efx_channel *channel) | |||
1163 | 1167 | ||
1164 | static void efx_poll_flush_events(struct efx_nic *efx) | 1168 | static void efx_poll_flush_events(struct efx_nic *efx) |
1165 | { | 1169 | { |
1166 | struct efx_channel *channel = &efx->channel[0]; | 1170 | struct efx_channel *channel = efx_get_channel(efx, 0); |
1167 | struct efx_tx_queue *tx_queue; | 1171 | struct efx_tx_queue *tx_queue; |
1168 | struct efx_rx_queue *rx_queue; | 1172 | struct efx_rx_queue *rx_queue; |
1169 | unsigned int read_ptr = channel->eventq_read_ptr; | 1173 | unsigned int read_ptr = channel->eventq_read_ptr; |
1170 | unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK; | 1174 | unsigned int end_ptr = read_ptr + channel->eventq_mask - 1; |
1171 | 1175 | ||
1172 | do { | 1176 | do { |
1173 | efx_qword_t *event = efx_event(channel, read_ptr); | 1177 | efx_qword_t *event = efx_event(channel, read_ptr); |
@@ -1185,7 +1189,9 @@ static void efx_poll_flush_events(struct efx_nic *efx) | |||
1185 | ev_queue = EFX_QWORD_FIELD(*event, | 1189 | ev_queue = EFX_QWORD_FIELD(*event, |
1186 | FSF_AZ_DRIVER_EV_SUBDATA); | 1190 | FSF_AZ_DRIVER_EV_SUBDATA); |
1187 | if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) { | 1191 | if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) { |
1188 | tx_queue = efx->tx_queue + ev_queue; | 1192 | tx_queue = efx_get_tx_queue( |
1193 | efx, ev_queue / EFX_TXQ_TYPES, | ||
1194 | ev_queue % EFX_TXQ_TYPES); | ||
1189 | tx_queue->flushed = FLUSH_DONE; | 1195 | tx_queue->flushed = FLUSH_DONE; |
1190 | } | 1196 | } |
1191 | } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && | 1197 | } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && |
@@ -1195,7 +1201,7 @@ static void efx_poll_flush_events(struct efx_nic *efx) | |||
1195 | ev_failed = EFX_QWORD_FIELD( | 1201 | ev_failed = EFX_QWORD_FIELD( |
1196 | *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); | 1202 | *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); |
1197 | if (ev_queue < efx->n_rx_channels) { | 1203 | if (ev_queue < efx->n_rx_channels) { |
1198 | rx_queue = efx->rx_queue + ev_queue; | 1204 | rx_queue = efx_get_rx_queue(efx, ev_queue); |
1199 | rx_queue->flushed = | 1205 | rx_queue->flushed = |
1200 | ev_failed ? FLUSH_FAILED : FLUSH_DONE; | 1206 | ev_failed ? FLUSH_FAILED : FLUSH_DONE; |
1201 | } | 1207 | } |
@@ -1205,7 +1211,7 @@ static void efx_poll_flush_events(struct efx_nic *efx) | |||
1205 | * it's ok to throw away every non-flush event */ | 1211 | * it's ok to throw away every non-flush event */ |
1206 | EFX_SET_QWORD(*event); | 1212 | EFX_SET_QWORD(*event); |
1207 | 1213 | ||
1208 | read_ptr = (read_ptr + 1) & EFX_EVQ_MASK; | 1214 | ++read_ptr; |
1209 | } while (read_ptr != end_ptr); | 1215 | } while (read_ptr != end_ptr); |
1210 | 1216 | ||
1211 | channel->eventq_read_ptr = read_ptr; | 1217 | channel->eventq_read_ptr = read_ptr; |
@@ -1216,6 +1222,7 @@ static void efx_poll_flush_events(struct efx_nic *efx) | |||
1216 | * serialise them */ | 1222 | * serialise them */ |
1217 | int efx_nic_flush_queues(struct efx_nic *efx) | 1223 | int efx_nic_flush_queues(struct efx_nic *efx) |
1218 | { | 1224 | { |
1225 | struct efx_channel *channel; | ||
1219 | struct efx_rx_queue *rx_queue; | 1226 | struct efx_rx_queue *rx_queue; |
1220 | struct efx_tx_queue *tx_queue; | 1227 | struct efx_tx_queue *tx_queue; |
1221 | int i, tx_pending, rx_pending; | 1228 | int i, tx_pending, rx_pending; |
@@ -1224,29 +1231,38 @@ int efx_nic_flush_queues(struct efx_nic *efx) | |||
1224 | efx->type->prepare_flush(efx); | 1231 | efx->type->prepare_flush(efx); |
1225 | 1232 | ||
1226 | /* Flush all tx queues in parallel */ | 1233 | /* Flush all tx queues in parallel */ |
1227 | efx_for_each_tx_queue(tx_queue, efx) | 1234 | efx_for_each_channel(channel, efx) { |
1228 | efx_flush_tx_queue(tx_queue); | 1235 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) { |
1236 | if (tx_queue->initialised) | ||
1237 | efx_flush_tx_queue(tx_queue); | ||
1238 | } | ||
1239 | } | ||
1229 | 1240 | ||
1230 | /* The hardware supports four concurrent rx flushes, each of which may | 1241 | /* The hardware supports four concurrent rx flushes, each of which may |
1231 | * need to be retried if there is an outstanding descriptor fetch */ | 1242 | * need to be retried if there is an outstanding descriptor fetch */ |
1232 | for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) { | 1243 | for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) { |
1233 | rx_pending = tx_pending = 0; | 1244 | rx_pending = tx_pending = 0; |
1234 | efx_for_each_rx_queue(rx_queue, efx) { | 1245 | efx_for_each_channel(channel, efx) { |
1235 | if (rx_queue->flushed == FLUSH_PENDING) | 1246 | efx_for_each_channel_rx_queue(rx_queue, channel) { |
1236 | ++rx_pending; | 1247 | if (rx_queue->flushed == FLUSH_PENDING) |
1237 | } | 1248 | ++rx_pending; |
1238 | efx_for_each_rx_queue(rx_queue, efx) { | ||
1239 | if (rx_pending == EFX_RX_FLUSH_COUNT) | ||
1240 | break; | ||
1241 | if (rx_queue->flushed == FLUSH_FAILED || | ||
1242 | rx_queue->flushed == FLUSH_NONE) { | ||
1243 | efx_flush_rx_queue(rx_queue); | ||
1244 | ++rx_pending; | ||
1245 | } | 1249 | } |
1246 | } | 1250 | } |
1247 | efx_for_each_tx_queue(tx_queue, efx) { | 1251 | efx_for_each_channel(channel, efx) { |
1248 | if (tx_queue->flushed != FLUSH_DONE) | 1252 | efx_for_each_channel_rx_queue(rx_queue, channel) { |
1249 | ++tx_pending; | 1253 | if (rx_pending == EFX_RX_FLUSH_COUNT) |
1254 | break; | ||
1255 | if (rx_queue->flushed == FLUSH_FAILED || | ||
1256 | rx_queue->flushed == FLUSH_NONE) { | ||
1257 | efx_flush_rx_queue(rx_queue); | ||
1258 | ++rx_pending; | ||
1259 | } | ||
1260 | } | ||
1261 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) { | ||
1262 | if (tx_queue->initialised && | ||
1263 | tx_queue->flushed != FLUSH_DONE) | ||
1264 | ++tx_pending; | ||
1265 | } | ||
1250 | } | 1266 | } |
1251 | 1267 | ||
1252 | if (rx_pending == 0 && tx_pending == 0) | 1268 | if (rx_pending == 0 && tx_pending == 0) |
@@ -1258,19 +1274,22 @@ int efx_nic_flush_queues(struct efx_nic *efx) | |||
1258 | 1274 | ||
1259 | /* Mark the queues as all flushed. We're going to return failure | 1275 | /* Mark the queues as all flushed. We're going to return failure |
1260 | * leading to a reset, or fake up success anyway */ | 1276 | * leading to a reset, or fake up success anyway */ |
1261 | efx_for_each_tx_queue(tx_queue, efx) { | 1277 | efx_for_each_channel(channel, efx) { |
1262 | if (tx_queue->flushed != FLUSH_DONE) | 1278 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) { |
1263 | netif_err(efx, hw, efx->net_dev, | 1279 | if (tx_queue->initialised && |
1264 | "tx queue %d flush command timed out\n", | 1280 | tx_queue->flushed != FLUSH_DONE) |
1265 | tx_queue->queue); | 1281 | netif_err(efx, hw, efx->net_dev, |
1266 | tx_queue->flushed = FLUSH_DONE; | 1282 | "tx queue %d flush command timed out\n", |
1267 | } | 1283 | tx_queue->queue); |
1268 | efx_for_each_rx_queue(rx_queue, efx) { | 1284 | tx_queue->flushed = FLUSH_DONE; |
1269 | if (rx_queue->flushed != FLUSH_DONE) | 1285 | } |
1270 | netif_err(efx, hw, efx->net_dev, | 1286 | efx_for_each_channel_rx_queue(rx_queue, channel) { |
1271 | "rx queue %d flush command timed out\n", | 1287 | if (rx_queue->flushed != FLUSH_DONE) |
1272 | rx_queue->queue); | 1288 | netif_err(efx, hw, efx->net_dev, |
1273 | rx_queue->flushed = FLUSH_DONE; | 1289 | "rx queue %d flush command timed out\n", |
1290 | efx_rx_queue_index(rx_queue)); | ||
1291 | rx_queue->flushed = FLUSH_DONE; | ||
1292 | } | ||
1274 | } | 1293 | } |
1275 | 1294 | ||
1276 | return -ETIMEDOUT; | 1295 | return -ETIMEDOUT; |
@@ -1397,6 +1416,12 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) | |||
1397 | u32 queues; | 1416 | u32 queues; |
1398 | int syserr; | 1417 | int syserr; |
1399 | 1418 | ||
1419 | /* Could this be ours? If interrupts are disabled then the | ||
1420 | * channel state may not be valid. | ||
1421 | */ | ||
1422 | if (!efx->legacy_irq_enabled) | ||
1423 | return result; | ||
1424 | |||
1400 | /* Read the ISR which also ACKs the interrupts */ | 1425 | /* Read the ISR which also ACKs the interrupts */ |
1401 | efx_readd(efx, ®, FR_BZ_INT_ISR0); | 1426 | efx_readd(efx, ®, FR_BZ_INT_ISR0); |
1402 | queues = EFX_EXTRACT_DWORD(reg, 0, 31); | 1427 | queues = EFX_EXTRACT_DWORD(reg, 0, 31); |
@@ -1457,7 +1482,7 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) | |||
1457 | */ | 1482 | */ |
1458 | static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) | 1483 | static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) |
1459 | { | 1484 | { |
1460 | struct efx_channel *channel = dev_id; | 1485 | struct efx_channel *channel = *(struct efx_channel **)dev_id; |
1461 | struct efx_nic *efx = channel->efx; | 1486 | struct efx_nic *efx = channel->efx; |
1462 | efx_oword_t *int_ker = efx->irq_status.addr; | 1487 | efx_oword_t *int_ker = efx->irq_status.addr; |
1463 | int syserr; | 1488 | int syserr; |
@@ -1532,7 +1557,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx) | |||
1532 | efx_for_each_channel(channel, efx) { | 1557 | efx_for_each_channel(channel, efx) { |
1533 | rc = request_irq(channel->irq, efx_msi_interrupt, | 1558 | rc = request_irq(channel->irq, efx_msi_interrupt, |
1534 | IRQF_PROBE_SHARED, /* Not shared */ | 1559 | IRQF_PROBE_SHARED, /* Not shared */ |
1535 | channel->name, channel); | 1560 | efx->channel_name[channel->channel], |
1561 | &efx->channel[channel->channel]); | ||
1536 | if (rc) { | 1562 | if (rc) { |
1537 | netif_err(efx, drv, efx->net_dev, | 1563 | netif_err(efx, drv, efx->net_dev, |
1538 | "failed to hook IRQ %d\n", channel->irq); | 1564 | "failed to hook IRQ %d\n", channel->irq); |
@@ -1544,7 +1570,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx) | |||
1544 | 1570 | ||
1545 | fail2: | 1571 | fail2: |
1546 | efx_for_each_channel(channel, efx) | 1572 | efx_for_each_channel(channel, efx) |
1547 | free_irq(channel->irq, channel); | 1573 | free_irq(channel->irq, &efx->channel[channel->channel]); |
1548 | fail1: | 1574 | fail1: |
1549 | return rc; | 1575 | return rc; |
1550 | } | 1576 | } |
@@ -1557,7 +1583,7 @@ void efx_nic_fini_interrupt(struct efx_nic *efx) | |||
1557 | /* Disable MSI/MSI-X interrupts */ | 1583 | /* Disable MSI/MSI-X interrupts */ |
1558 | efx_for_each_channel(channel, efx) { | 1584 | efx_for_each_channel(channel, efx) { |
1559 | if (channel->irq) | 1585 | if (channel->irq) |
1560 | free_irq(channel->irq, channel); | 1586 | free_irq(channel->irq, &efx->channel[channel->channel]); |
1561 | } | 1587 | } |
1562 | 1588 | ||
1563 | /* ACK legacy interrupt */ | 1589 | /* ACK legacy interrupt */ |
@@ -1642,7 +1668,7 @@ void efx_nic_init_common(struct efx_nic *efx) | |||
1642 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); | 1668 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); |
1643 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); | 1669 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); |
1644 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); | 1670 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); |
1645 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0); | 1671 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); |
1646 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); | 1672 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); |
1647 | /* Enable SW_EV to inherit in char driver - assume harmless here */ | 1673 | /* Enable SW_EV to inherit in char driver - assume harmless here */ |
1648 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); | 1674 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); |
@@ -1654,6 +1680,19 @@ void efx_nic_init_common(struct efx_nic *efx) | |||
1654 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | 1680 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) |
1655 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); | 1681 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); |
1656 | efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); | 1682 | efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); |
1683 | |||
1684 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | ||
1685 | EFX_POPULATE_OWORD_4(temp, | ||
1686 | /* Default values */ | ||
1687 | FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, | ||
1688 | FRF_BZ_TX_PACE_SB_AF, 0xb, | ||
1689 | FRF_BZ_TX_PACE_FB_BASE, 0, | ||
1690 | /* Allow large pace values in the | ||
1691 | * fast bin. */ | ||
1692 | FRF_BZ_TX_PACE_BIN_TH, | ||
1693 | FFE_BZ_TX_PACE_RESERVED); | ||
1694 | efx_writeo(efx, &temp, FR_BZ_TX_PACE); | ||
1695 | } | ||
1657 | } | 1696 | } |
1658 | 1697 | ||
1659 | /* Register dump */ | 1698 | /* Register dump */ |
@@ -1827,8 +1866,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = { | |||
1827 | REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), | 1866 | REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), |
1828 | REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), | 1867 | REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), |
1829 | REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), | 1868 | REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), |
1830 | /* The register buffer is allocated with slab, so we can't | 1869 | /* We can't reasonably read all of the buffer table (up to 8MB!). |
1831 | * reasonably read all of the buffer table (up to 8MB!). | ||
1832 | * However this driver will only use a few entries. Reading | 1870 | * However this driver will only use a few entries. Reading |
1833 | * 1K entries allows for some expansion of queue count and | 1871 | * 1K entries allows for some expansion of queue count and |
1834 | * size before we need to change the version. */ | 1872 | * size before we need to change the version. */ |
@@ -1836,7 +1874,6 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = { | |||
1836 | A, A, 8, 1024), | 1874 | A, A, 8, 1024), |
1837 | REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, | 1875 | REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, |
1838 | B, Z, 8, 1024), | 1876 | B, Z, 8, 1024), |
1839 | /* RX_FILTER_TBL{0,1} is huge and not used by this driver */ | ||
1840 | REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), | 1877 | REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), |
1841 | REGISTER_TABLE_BB_CZ(TIMER_TBL), | 1878 | REGISTER_TABLE_BB_CZ(TIMER_TBL), |
1842 | REGISTER_TABLE_BB_CZ(TX_PACE_TBL), | 1879 | REGISTER_TABLE_BB_CZ(TX_PACE_TBL), |
@@ -1846,6 +1883,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = { | |||
1846 | REGISTER_TABLE_CZ(MC_TREG_SMEM), | 1883 | REGISTER_TABLE_CZ(MC_TREG_SMEM), |
1847 | /* MSIX_PBA_TABLE is not mapped */ | 1884 | /* MSIX_PBA_TABLE is not mapped */ |
1848 | /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ | 1885 | /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ |
1886 | REGISTER_TABLE_BZ(RX_FILTER_TBL0), | ||
1849 | }; | 1887 | }; |
1850 | 1888 | ||
1851 | size_t efx_nic_get_regs_len(struct efx_nic *efx) | 1889 | size_t efx_nic_get_regs_len(struct efx_nic *efx) |
@@ -1897,6 +1935,13 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf) | |||
1897 | 1935 | ||
1898 | size = min_t(size_t, table->step, 16); | 1936 | size = min_t(size_t, table->step, 16); |
1899 | 1937 | ||
1938 | if (table->offset >= efx->type->mem_map_size) { | ||
1939 | /* No longer mapped; return dummy data */ | ||
1940 | memcpy(buf, "\xde\xc0\xad\xde", 4); | ||
1941 | buf += table->rows * size; | ||
1942 | continue; | ||
1943 | } | ||
1944 | |||
1900 | for (i = 0; i < table->rows; i++) { | 1945 | for (i = 0; i < table->rows; i++) { |
1901 | switch (table->step) { | 1946 | switch (table->step) { |
1902 | case 4: /* 32-bit register or SRAM */ | 1947 | case 4: /* 32-bit register or SRAM */ |