diff options
author | Ben Hutchings <bhutchings@solarflare.com> | 2009-10-23 04:30:36 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-10-24 07:27:03 -0400 |
commit | 3e6c4538542ab2103ab7c01f4458bc2e21b672a1 (patch) | |
tree | 0ae49634fa3288704d6c5bf8e279909b52401734 /drivers/net/sfc/falcon.c | |
parent | 625b451455cebb7120492766c8425b6e808fc209 (diff) |
sfc: Update hardware definitions for Siena
Siena is still based on the Falcon hardware architecture and will
share many of these definitions, so replace falcon_hwdefs.h with
regs.h.
The new definitions have been generated according to a naming
convention which incorporates the type and revision information.
Update the code accordingly.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc/falcon.c')
-rw-r--r-- | drivers/net/sfc/falcon.c | 848 |
1 files changed, 437 insertions, 411 deletions
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index c23e8e2b094a..b35e01031e23 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include "mac.h" | 22 | #include "mac.h" |
23 | #include "spi.h" | 23 | #include "spi.h" |
24 | #include "falcon.h" | 24 | #include "falcon.h" |
25 | #include "falcon_hwdefs.h" | 25 | #include "regs.h" |
26 | #include "falcon_io.h" | 26 | #include "falcon_io.h" |
27 | #include "mdio_10g.h" | 27 | #include "mdio_10g.h" |
28 | #include "phy.h" | 28 | #include "phy.h" |
@@ -109,17 +109,17 @@ module_param(rx_xon_thresh_bytes, int, 0644); | |||
109 | MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); | 109 | MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); |
110 | 110 | ||
111 | /* TX descriptor ring size - min 512 max 4k */ | 111 | /* TX descriptor ring size - min 512 max 4k */ |
112 | #define FALCON_TXD_RING_ORDER TX_DESCQ_SIZE_1K | 112 | #define FALCON_TXD_RING_ORDER FFE_AZ_TX_DESCQ_SIZE_1K |
113 | #define FALCON_TXD_RING_SIZE 1024 | 113 | #define FALCON_TXD_RING_SIZE 1024 |
114 | #define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1) | 114 | #define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1) |
115 | 115 | ||
116 | /* RX descriptor ring size - min 512 max 4k */ | 116 | /* RX descriptor ring size - min 512 max 4k */ |
117 | #define FALCON_RXD_RING_ORDER RX_DESCQ_SIZE_1K | 117 | #define FALCON_RXD_RING_ORDER FFE_AZ_RX_DESCQ_SIZE_1K |
118 | #define FALCON_RXD_RING_SIZE 1024 | 118 | #define FALCON_RXD_RING_SIZE 1024 |
119 | #define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1) | 119 | #define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1) |
120 | 120 | ||
121 | /* Event queue size - max 32k */ | 121 | /* Event queue size - max 32k */ |
122 | #define FALCON_EVQ_ORDER EVQ_SIZE_4K | 122 | #define FALCON_EVQ_ORDER FFE_AZ_EVQ_SIZE_4K |
123 | #define FALCON_EVQ_SIZE 4096 | 123 | #define FALCON_EVQ_SIZE 4096 |
124 | #define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1) | 124 | #define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1) |
125 | 125 | ||
@@ -199,9 +199,9 @@ static void falcon_setsda(void *data, int state) | |||
199 | struct efx_nic *efx = (struct efx_nic *)data; | 199 | struct efx_nic *efx = (struct efx_nic *)data; |
200 | efx_oword_t reg; | 200 | efx_oword_t reg; |
201 | 201 | ||
202 | falcon_read(efx, ®, GPIO_CTL_REG_KER); | 202 | falcon_read(efx, ®, FR_AB_GPIO_CTL); |
203 | EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, !state); | 203 | EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state); |
204 | falcon_write(efx, ®, GPIO_CTL_REG_KER); | 204 | falcon_write(efx, ®, FR_AB_GPIO_CTL); |
205 | } | 205 | } |
206 | 206 | ||
207 | static void falcon_setscl(void *data, int state) | 207 | static void falcon_setscl(void *data, int state) |
@@ -209,9 +209,9 @@ static void falcon_setscl(void *data, int state) | |||
209 | struct efx_nic *efx = (struct efx_nic *)data; | 209 | struct efx_nic *efx = (struct efx_nic *)data; |
210 | efx_oword_t reg; | 210 | efx_oword_t reg; |
211 | 211 | ||
212 | falcon_read(efx, ®, GPIO_CTL_REG_KER); | 212 | falcon_read(efx, ®, FR_AB_GPIO_CTL); |
213 | EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, !state); | 213 | EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state); |
214 | falcon_write(efx, ®, GPIO_CTL_REG_KER); | 214 | falcon_write(efx, ®, FR_AB_GPIO_CTL); |
215 | } | 215 | } |
216 | 216 | ||
217 | static int falcon_getsda(void *data) | 217 | static int falcon_getsda(void *data) |
@@ -219,8 +219,8 @@ static int falcon_getsda(void *data) | |||
219 | struct efx_nic *efx = (struct efx_nic *)data; | 219 | struct efx_nic *efx = (struct efx_nic *)data; |
220 | efx_oword_t reg; | 220 | efx_oword_t reg; |
221 | 221 | ||
222 | falcon_read(efx, ®, GPIO_CTL_REG_KER); | 222 | falcon_read(efx, ®, FR_AB_GPIO_CTL); |
223 | return EFX_OWORD_FIELD(reg, GPIO3_IN); | 223 | return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN); |
224 | } | 224 | } |
225 | 225 | ||
226 | static int falcon_getscl(void *data) | 226 | static int falcon_getscl(void *data) |
@@ -228,8 +228,8 @@ static int falcon_getscl(void *data) | |||
228 | struct efx_nic *efx = (struct efx_nic *)data; | 228 | struct efx_nic *efx = (struct efx_nic *)data; |
229 | efx_oword_t reg; | 229 | efx_oword_t reg; |
230 | 230 | ||
231 | falcon_read(efx, ®, GPIO_CTL_REG_KER); | 231 | falcon_read(efx, ®, FR_AB_GPIO_CTL); |
232 | return EFX_OWORD_FIELD(reg, GPIO0_IN); | 232 | return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN); |
233 | } | 233 | } |
234 | 234 | ||
235 | static struct i2c_algo_bit_data falcon_i2c_bit_operations = { | 235 | static struct i2c_algo_bit_data falcon_i2c_bit_operations = { |
@@ -274,11 +274,10 @@ falcon_init_special_buffer(struct efx_nic *efx, | |||
274 | dma_addr = buffer->dma_addr + (i * 4096); | 274 | dma_addr = buffer->dma_addr + (i * 4096); |
275 | EFX_LOG(efx, "mapping special buffer %d at %llx\n", | 275 | EFX_LOG(efx, "mapping special buffer %d at %llx\n", |
276 | index, (unsigned long long)dma_addr); | 276 | index, (unsigned long long)dma_addr); |
277 | EFX_POPULATE_QWORD_4(buf_desc, | 277 | EFX_POPULATE_QWORD_3(buf_desc, |
278 | IP_DAT_BUF_SIZE, IP_DAT_BUF_SIZE_4K, | 278 | FRF_AZ_BUF_ADR_REGION, 0, |
279 | BUF_ADR_REGION, 0, | 279 | FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, |
280 | BUF_ADR_FBUF, (dma_addr >> 12), | 280 | FRF_AZ_BUF_OWNER_ID_FBUF, 0); |
281 | BUF_OWNER_ID_FBUF, 0); | ||
282 | falcon_write_sram(efx, &buf_desc, index); | 281 | falcon_write_sram(efx, &buf_desc, index); |
283 | } | 282 | } |
284 | } | 283 | } |
@@ -299,11 +298,11 @@ falcon_fini_special_buffer(struct efx_nic *efx, | |||
299 | buffer->index, buffer->index + buffer->entries - 1); | 298 | buffer->index, buffer->index + buffer->entries - 1); |
300 | 299 | ||
301 | EFX_POPULATE_OWORD_4(buf_tbl_upd, | 300 | EFX_POPULATE_OWORD_4(buf_tbl_upd, |
302 | BUF_UPD_CMD, 0, | 301 | FRF_AZ_BUF_UPD_CMD, 0, |
303 | BUF_CLR_CMD, 1, | 302 | FRF_AZ_BUF_CLR_CMD, 1, |
304 | BUF_CLR_END_ID, end, | 303 | FRF_AZ_BUF_CLR_END_ID, end, |
305 | BUF_CLR_START_ID, start); | 304 | FRF_AZ_BUF_CLR_START_ID, start); |
306 | falcon_write(efx, &buf_tbl_upd, BUF_TBL_UPD_REG_KER); | 305 | falcon_write(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); |
307 | } | 306 | } |
308 | 307 | ||
309 | /* | 308 | /* |
@@ -415,9 +414,9 @@ static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue) | |||
415 | efx_dword_t reg; | 414 | efx_dword_t reg; |
416 | 415 | ||
417 | write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK; | 416 | write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK; |
418 | EFX_POPULATE_DWORD_1(reg, TX_DESC_WPTR_DWORD, write_ptr); | 417 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); |
419 | falcon_writel_page(tx_queue->efx, ®, | 418 | falcon_writel_page(tx_queue->efx, ®, |
420 | TX_DESC_UPD_REG_KER_DWORD, tx_queue->queue); | 419 | FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); |
421 | } | 420 | } |
422 | 421 | ||
423 | 422 | ||
@@ -441,12 +440,11 @@ void falcon_push_buffers(struct efx_tx_queue *tx_queue) | |||
441 | ++tx_queue->write_count; | 440 | ++tx_queue->write_count; |
442 | 441 | ||
443 | /* Create TX descriptor ring entry */ | 442 | /* Create TX descriptor ring entry */ |
444 | EFX_POPULATE_QWORD_5(*txd, | 443 | EFX_POPULATE_QWORD_4(*txd, |
445 | TX_KER_PORT, 0, | 444 | FSF_AZ_TX_KER_CONT, buffer->continuation, |
446 | TX_KER_CONT, buffer->continuation, | 445 | FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, |
447 | TX_KER_BYTE_CNT, buffer->len, | 446 | FSF_AZ_TX_KER_BUF_REGION, 0, |
448 | TX_KER_BUF_REGION, 0, | 447 | FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); |
449 | TX_KER_BUF_ADR, buffer->dma_addr); | ||
450 | } while (tx_queue->write_count != tx_queue->insert_count); | 448 | } while (tx_queue->write_count != tx_queue->insert_count); |
451 | 449 | ||
452 | wmb(); /* Ensure descriptors are written before they are fetched */ | 450 | wmb(); /* Ensure descriptors are written before they are fetched */ |
@@ -474,21 +472,23 @@ void falcon_init_tx(struct efx_tx_queue *tx_queue) | |||
474 | 472 | ||
475 | /* Push TX descriptor ring to card */ | 473 | /* Push TX descriptor ring to card */ |
476 | EFX_POPULATE_OWORD_10(tx_desc_ptr, | 474 | EFX_POPULATE_OWORD_10(tx_desc_ptr, |
477 | TX_DESCQ_EN, 1, | 475 | FRF_AZ_TX_DESCQ_EN, 1, |
478 | TX_ISCSI_DDIG_EN, 0, | 476 | FRF_AZ_TX_ISCSI_DDIG_EN, 0, |
479 | TX_ISCSI_HDIG_EN, 0, | 477 | FRF_AZ_TX_ISCSI_HDIG_EN, 0, |
480 | TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, | 478 | FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, |
481 | TX_DESCQ_EVQ_ID, tx_queue->channel->channel, | 479 | FRF_AZ_TX_DESCQ_EVQ_ID, |
482 | TX_DESCQ_OWNER_ID, 0, | 480 | tx_queue->channel->channel, |
483 | TX_DESCQ_LABEL, tx_queue->queue, | 481 | FRF_AZ_TX_DESCQ_OWNER_ID, 0, |
484 | TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER, | 482 | FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, |
485 | TX_DESCQ_TYPE, 0, | 483 | FRF_AZ_TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER, |
486 | TX_NON_IP_DROP_DIS_B0, 1); | 484 | FRF_AZ_TX_DESCQ_TYPE, 0, |
485 | FRF_BZ_TX_NON_IP_DROP_DIS, 1); | ||
487 | 486 | ||
488 | if (falcon_rev(efx) >= FALCON_REV_B0) { | 487 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
489 | int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM; | 488 | int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM; |
490 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, !csum); | 489 | EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); |
491 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, !csum); | 490 | EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, |
491 | !csum); | ||
492 | } | 492 | } |
493 | 493 | ||
494 | falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | 494 | falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, |
@@ -500,12 +500,12 @@ void falcon_init_tx(struct efx_tx_queue *tx_queue) | |||
500 | /* Only 128 bits in this register */ | 500 | /* Only 128 bits in this register */ |
501 | BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128); | 501 | BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128); |
502 | 502 | ||
503 | falcon_read(efx, ®, TX_CHKSM_CFG_REG_KER_A1); | 503 | falcon_read(efx, ®, FR_AA_TX_CHKSM_CFG); |
504 | if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM) | 504 | if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM) |
505 | clear_bit_le(tx_queue->queue, (void *)®); | 505 | clear_bit_le(tx_queue->queue, (void *)®); |
506 | else | 506 | else |
507 | set_bit_le(tx_queue->queue, (void *)®); | 507 | set_bit_le(tx_queue->queue, (void *)®); |
508 | falcon_write(efx, ®, TX_CHKSM_CFG_REG_KER_A1); | 508 | falcon_write(efx, ®, FR_AA_TX_CHKSM_CFG); |
509 | } | 509 | } |
510 | } | 510 | } |
511 | 511 | ||
@@ -516,9 +516,9 @@ static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue) | |||
516 | 516 | ||
517 | /* Post a flush command */ | 517 | /* Post a flush command */ |
518 | EFX_POPULATE_OWORD_2(tx_flush_descq, | 518 | EFX_POPULATE_OWORD_2(tx_flush_descq, |
519 | TX_FLUSH_DESCQ_CMD, 1, | 519 | FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, |
520 | TX_FLUSH_DESCQ, tx_queue->queue); | 520 | FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); |
521 | falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER); | 521 | falcon_write(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); |
522 | } | 522 | } |
523 | 523 | ||
524 | void falcon_fini_tx(struct efx_tx_queue *tx_queue) | 524 | void falcon_fini_tx(struct efx_tx_queue *tx_queue) |
@@ -567,11 +567,11 @@ static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue, | |||
567 | rxd = falcon_rx_desc(rx_queue, index); | 567 | rxd = falcon_rx_desc(rx_queue, index); |
568 | rx_buf = efx_rx_buffer(rx_queue, index); | 568 | rx_buf = efx_rx_buffer(rx_queue, index); |
569 | EFX_POPULATE_QWORD_3(*rxd, | 569 | EFX_POPULATE_QWORD_3(*rxd, |
570 | RX_KER_BUF_SIZE, | 570 | FSF_AZ_RX_KER_BUF_SIZE, |
571 | rx_buf->len - | 571 | rx_buf->len - |
572 | rx_queue->efx->type->rx_buffer_padding, | 572 | rx_queue->efx->type->rx_buffer_padding, |
573 | RX_KER_BUF_REGION, 0, | 573 | FSF_AZ_RX_KER_BUF_REGION, 0, |
574 | RX_KER_BUF_ADR, rx_buf->dma_addr); | 574 | FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); |
575 | } | 575 | } |
576 | 576 | ||
577 | /* This writes to the RX_DESC_WPTR register for the specified receive | 577 | /* This writes to the RX_DESC_WPTR register for the specified receive |
@@ -591,9 +591,9 @@ void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue) | |||
591 | 591 | ||
592 | wmb(); | 592 | wmb(); |
593 | write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK; | 593 | write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK; |
594 | EFX_POPULATE_DWORD_1(reg, RX_DESC_WPTR_DWORD, write_ptr); | 594 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); |
595 | falcon_writel_page(rx_queue->efx, ®, | 595 | falcon_writel_page(rx_queue->efx, ®, |
596 | RX_DESC_UPD_REG_KER_DWORD, rx_queue->queue); | 596 | FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue); |
597 | } | 597 | } |
598 | 598 | ||
599 | int falcon_probe_rx(struct efx_rx_queue *rx_queue) | 599 | int falcon_probe_rx(struct efx_rx_queue *rx_queue) |
@@ -622,17 +622,18 @@ void falcon_init_rx(struct efx_rx_queue *rx_queue) | |||
622 | 622 | ||
623 | /* Push RX descriptor ring to card */ | 623 | /* Push RX descriptor ring to card */ |
624 | EFX_POPULATE_OWORD_10(rx_desc_ptr, | 624 | EFX_POPULATE_OWORD_10(rx_desc_ptr, |
625 | RX_ISCSI_DDIG_EN, iscsi_digest_en, | 625 | FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, |
626 | RX_ISCSI_HDIG_EN, iscsi_digest_en, | 626 | FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, |
627 | RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, | 627 | FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, |
628 | RX_DESCQ_EVQ_ID, rx_queue->channel->channel, | 628 | FRF_AZ_RX_DESCQ_EVQ_ID, |
629 | RX_DESCQ_OWNER_ID, 0, | 629 | rx_queue->channel->channel, |
630 | RX_DESCQ_LABEL, rx_queue->queue, | 630 | FRF_AZ_RX_DESCQ_OWNER_ID, 0, |
631 | RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER, | 631 | FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue, |
632 | RX_DESCQ_TYPE, 0 /* kernel queue */ , | 632 | FRF_AZ_RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER, |
633 | FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , | ||
633 | /* For >=B0 this is scatter so disable */ | 634 | /* For >=B0 this is scatter so disable */ |
634 | RX_DESCQ_JUMBO, !is_b0, | 635 | FRF_AZ_RX_DESCQ_JUMBO, !is_b0, |
635 | RX_DESCQ_EN, 1); | 636 | FRF_AZ_RX_DESCQ_EN, 1); |
636 | falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | 637 | falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, |
637 | rx_queue->queue); | 638 | rx_queue->queue); |
638 | } | 639 | } |
@@ -644,9 +645,9 @@ static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) | |||
644 | 645 | ||
645 | /* Post a flush command */ | 646 | /* Post a flush command */ |
646 | EFX_POPULATE_OWORD_2(rx_flush_descq, | 647 | EFX_POPULATE_OWORD_2(rx_flush_descq, |
647 | RX_FLUSH_DESCQ_CMD, 1, | 648 | FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, |
648 | RX_FLUSH_DESCQ, rx_queue->queue); | 649 | FRF_AZ_RX_FLUSH_DESCQ, rx_queue->queue); |
649 | falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER); | 650 | falcon_write(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); |
650 | } | 651 | } |
651 | 652 | ||
652 | void falcon_fini_rx(struct efx_rx_queue *rx_queue) | 653 | void falcon_fini_rx(struct efx_rx_queue *rx_queue) |
@@ -693,7 +694,7 @@ void falcon_eventq_read_ack(struct efx_channel *channel) | |||
693 | efx_dword_t reg; | 694 | efx_dword_t reg; |
694 | struct efx_nic *efx = channel->efx; | 695 | struct efx_nic *efx = channel->efx; |
695 | 696 | ||
696 | EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr); | 697 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr); |
697 | falcon_writel_table(efx, ®, efx->type->evq_rptr_tbl_base, | 698 | falcon_writel_table(efx, ®, efx->type->evq_rptr_tbl_base, |
698 | channel->channel); | 699 | channel->channel); |
699 | } | 700 | } |
@@ -703,11 +704,14 @@ void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event) | |||
703 | { | 704 | { |
704 | efx_oword_t drv_ev_reg; | 705 | efx_oword_t drv_ev_reg; |
705 | 706 | ||
706 | EFX_POPULATE_OWORD_2(drv_ev_reg, | 707 | BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || |
707 | DRV_EV_QID, channel->channel, | 708 | FRF_AZ_DRV_EV_DATA_WIDTH != 64); |
708 | DRV_EV_DATA, | 709 | drv_ev_reg.u32[0] = event->u32[0]; |
709 | EFX_QWORD_FIELD64(*event, WHOLE_EVENT)); | 710 | drv_ev_reg.u32[1] = event->u32[1]; |
710 | falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER); | 711 | drv_ev_reg.u32[2] = 0; |
712 | drv_ev_reg.u32[3] = 0; | ||
713 | EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel); | ||
714 | falcon_write(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV); | ||
711 | } | 715 | } |
712 | 716 | ||
713 | /* Handle a transmit completion event | 717 | /* Handle a transmit completion event |
@@ -723,18 +727,18 @@ static void falcon_handle_tx_event(struct efx_channel *channel, | |||
723 | struct efx_tx_queue *tx_queue; | 727 | struct efx_tx_queue *tx_queue; |
724 | struct efx_nic *efx = channel->efx; | 728 | struct efx_nic *efx = channel->efx; |
725 | 729 | ||
726 | if (likely(EFX_QWORD_FIELD(*event, TX_EV_COMP))) { | 730 | if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { |
727 | /* Transmit completion */ | 731 | /* Transmit completion */ |
728 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, TX_EV_DESC_PTR); | 732 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); |
729 | tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); | 733 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); |
730 | tx_queue = &efx->tx_queue[tx_ev_q_label]; | 734 | tx_queue = &efx->tx_queue[tx_ev_q_label]; |
731 | channel->irq_mod_score += | 735 | channel->irq_mod_score += |
732 | (tx_ev_desc_ptr - tx_queue->read_count) & | 736 | (tx_ev_desc_ptr - tx_queue->read_count) & |
733 | efx->type->txd_ring_mask; | 737 | efx->type->txd_ring_mask; |
734 | efx_xmit_done(tx_queue, tx_ev_desc_ptr); | 738 | efx_xmit_done(tx_queue, tx_ev_desc_ptr); |
735 | } else if (EFX_QWORD_FIELD(*event, TX_EV_WQ_FF_FULL)) { | 739 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { |
736 | /* Rewrite the FIFO write pointer */ | 740 | /* Rewrite the FIFO write pointer */ |
737 | tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); | 741 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); |
738 | tx_queue = &efx->tx_queue[tx_ev_q_label]; | 742 | tx_queue = &efx->tx_queue[tx_ev_q_label]; |
739 | 743 | ||
740 | if (efx_dev_registered(efx)) | 744 | if (efx_dev_registered(efx)) |
@@ -742,7 +746,7 @@ static void falcon_handle_tx_event(struct efx_channel *channel, | |||
742 | falcon_notify_tx_desc(tx_queue); | 746 | falcon_notify_tx_desc(tx_queue); |
743 | if (efx_dev_registered(efx)) | 747 | if (efx_dev_registered(efx)) |
744 | netif_tx_unlock(efx->net_dev); | 748 | netif_tx_unlock(efx->net_dev); |
745 | } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && | 749 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && |
746 | EFX_WORKAROUND_10727(efx)) { | 750 | EFX_WORKAROUND_10727(efx)) { |
747 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | 751 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); |
748 | } else { | 752 | } else { |
@@ -766,22 +770,22 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | |||
766 | bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt; | 770 | bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt; |
767 | unsigned rx_ev_pkt_type; | 771 | unsigned rx_ev_pkt_type; |
768 | 772 | ||
769 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE); | 773 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); |
770 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT); | 774 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); |
771 | rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, RX_EV_TOBE_DISC); | 775 | rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); |
772 | rx_ev_pkt_type = EFX_QWORD_FIELD(*event, RX_EV_PKT_TYPE); | 776 | rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); |
773 | rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, | 777 | rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, |
774 | RX_EV_BUF_OWNER_ID_ERR); | 778 | FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); |
775 | rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, RX_EV_IF_FRAG_ERR); | 779 | rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_IP_FRAG_ERR); |
776 | rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, | 780 | rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, |
777 | RX_EV_IP_HDR_CHKSUM_ERR); | 781 | FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); |
778 | rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, | 782 | rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, |
779 | RX_EV_TCP_UDP_CHKSUM_ERR); | 783 | FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); |
780 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); | 784 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); |
781 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); | 785 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); |
782 | rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ? | 786 | rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ? |
783 | 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); | 787 | 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); |
784 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); | 788 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); |
785 | 789 | ||
786 | /* Every error apart from tobe_disc and pause_frm */ | 790 | /* Every error apart from tobe_disc and pause_frm */ |
787 | rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | | 791 | rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | |
@@ -865,16 +869,17 @@ static void falcon_handle_rx_event(struct efx_channel *channel, | |||
865 | struct efx_nic *efx = channel->efx; | 869 | struct efx_nic *efx = channel->efx; |
866 | 870 | ||
867 | /* Basic packet information */ | 871 | /* Basic packet information */ |
868 | rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, RX_EV_BYTE_CNT); | 872 | rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); |
869 | rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, RX_EV_PKT_OK); | 873 | rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); |
870 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE); | 874 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); |
871 | WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT)); | 875 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); |
872 | WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1); | 876 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); |
873 | WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL) != channel->channel); | 877 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != |
878 | channel->channel); | ||
874 | 879 | ||
875 | rx_queue = &efx->rx_queue[channel->channel]; | 880 | rx_queue = &efx->rx_queue[channel->channel]; |
876 | 881 | ||
877 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR); | 882 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); |
878 | expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK; | 883 | expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK; |
879 | if (unlikely(rx_ev_desc_ptr != expected_ptr)) | 884 | if (unlikely(rx_ev_desc_ptr != expected_ptr)) |
880 | falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); | 885 | falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); |
@@ -883,7 +888,9 @@ static void falcon_handle_rx_event(struct efx_channel *channel, | |||
883 | /* If packet is marked as OK and packet type is TCP/IPv4 or | 888 | /* If packet is marked as OK and packet type is TCP/IPv4 or |
884 | * UDP/IPv4, then we can rely on the hardware checksum. | 889 | * UDP/IPv4, then we can rely on the hardware checksum. |
885 | */ | 890 | */ |
886 | checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type); | 891 | checksummed = |
892 | rx_ev_hdr_type == FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP || | ||
893 | rx_ev_hdr_type == FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP; | ||
887 | } else { | 894 | } else { |
888 | falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, | 895 | falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, |
889 | &discard); | 896 | &discard); |
@@ -891,10 +898,10 @@ static void falcon_handle_rx_event(struct efx_channel *channel, | |||
891 | } | 898 | } |
892 | 899 | ||
893 | /* Detect multicast packets that didn't match the filter */ | 900 | /* Detect multicast packets that didn't match the filter */ |
894 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT); | 901 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); |
895 | if (rx_ev_mcast_pkt) { | 902 | if (rx_ev_mcast_pkt) { |
896 | unsigned int rx_ev_mcast_hash_match = | 903 | unsigned int rx_ev_mcast_hash_match = |
897 | EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH); | 904 | EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); |
898 | 905 | ||
899 | if (unlikely(!rx_ev_mcast_hash_match)) | 906 | if (unlikely(!rx_ev_mcast_hash_match)) |
900 | discard = true; | 907 | discard = true; |
@@ -914,24 +921,23 @@ static void falcon_handle_global_event(struct efx_channel *channel, | |||
914 | struct efx_nic *efx = channel->efx; | 921 | struct efx_nic *efx = channel->efx; |
915 | bool handled = false; | 922 | bool handled = false; |
916 | 923 | ||
917 | if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) || | 924 | if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) || |
918 | EFX_QWORD_FIELD(*event, G_PHY1_INTR) || | 925 | EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) || |
919 | EFX_QWORD_FIELD(*event, XG_PHY_INTR) || | 926 | EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) { |
920 | EFX_QWORD_FIELD(*event, XFP_PHY_INTR)) { | ||
921 | efx->phy_op->clear_interrupt(efx); | 927 | efx->phy_op->clear_interrupt(efx); |
922 | queue_work(efx->workqueue, &efx->phy_work); | 928 | queue_work(efx->workqueue, &efx->phy_work); |
923 | handled = true; | 929 | handled = true; |
924 | } | 930 | } |
925 | 931 | ||
926 | if ((falcon_rev(efx) >= FALCON_REV_B0) && | 932 | if ((falcon_rev(efx) >= FALCON_REV_B0) && |
927 | EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0)) { | 933 | EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) { |
928 | queue_work(efx->workqueue, &efx->mac_work); | 934 | queue_work(efx->workqueue, &efx->mac_work); |
929 | handled = true; | 935 | handled = true; |
930 | } | 936 | } |
931 | 937 | ||
932 | if (falcon_rev(efx) <= FALCON_REV_A1 ? | 938 | if (falcon_rev(efx) <= FALCON_REV_A1 ? |
933 | EFX_QWORD_FIELD(*event, RX_RECOVERY_A1) : | 939 | EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : |
934 | EFX_QWORD_FIELD(*event, RX_RECOVERY_B0)) { | 940 | EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { |
935 | EFX_ERR(efx, "channel %d seen global RX_RESET " | 941 | EFX_ERR(efx, "channel %d seen global RX_RESET " |
936 | "event. Resetting.\n", channel->channel); | 942 | "event. Resetting.\n", channel->channel); |
937 | 943 | ||
@@ -954,35 +960,35 @@ static void falcon_handle_driver_event(struct efx_channel *channel, | |||
954 | unsigned int ev_sub_code; | 960 | unsigned int ev_sub_code; |
955 | unsigned int ev_sub_data; | 961 | unsigned int ev_sub_data; |
956 | 962 | ||
957 | ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE); | 963 | ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); |
958 | ev_sub_data = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_DATA); | 964 | ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); |
959 | 965 | ||
960 | switch (ev_sub_code) { | 966 | switch (ev_sub_code) { |
961 | case TX_DESCQ_FLS_DONE_EV_DECODE: | 967 | case FSE_AZ_TX_DESCQ_FLS_DONE_EV: |
962 | EFX_TRACE(efx, "channel %d TXQ %d flushed\n", | 968 | EFX_TRACE(efx, "channel %d TXQ %d flushed\n", |
963 | channel->channel, ev_sub_data); | 969 | channel->channel, ev_sub_data); |
964 | break; | 970 | break; |
965 | case RX_DESCQ_FLS_DONE_EV_DECODE: | 971 | case FSE_AZ_RX_DESCQ_FLS_DONE_EV: |
966 | EFX_TRACE(efx, "channel %d RXQ %d flushed\n", | 972 | EFX_TRACE(efx, "channel %d RXQ %d flushed\n", |
967 | channel->channel, ev_sub_data); | 973 | channel->channel, ev_sub_data); |
968 | break; | 974 | break; |
969 | case EVQ_INIT_DONE_EV_DECODE: | 975 | case FSE_AZ_EVQ_INIT_DONE_EV: |
970 | EFX_LOG(efx, "channel %d EVQ %d initialised\n", | 976 | EFX_LOG(efx, "channel %d EVQ %d initialised\n", |
971 | channel->channel, ev_sub_data); | 977 | channel->channel, ev_sub_data); |
972 | break; | 978 | break; |
973 | case SRM_UPD_DONE_EV_DECODE: | 979 | case FSE_AZ_SRM_UPD_DONE_EV: |
974 | EFX_TRACE(efx, "channel %d SRAM update done\n", | 980 | EFX_TRACE(efx, "channel %d SRAM update done\n", |
975 | channel->channel); | 981 | channel->channel); |
976 | break; | 982 | break; |
977 | case WAKE_UP_EV_DECODE: | 983 | case FSE_AZ_WAKE_UP_EV: |
978 | EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n", | 984 | EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n", |
979 | channel->channel, ev_sub_data); | 985 | channel->channel, ev_sub_data); |
980 | break; | 986 | break; |
981 | case TIMER_EV_DECODE: | 987 | case FSE_AZ_TIMER_EV: |
982 | EFX_TRACE(efx, "channel %d RX queue %d timer expired\n", | 988 | EFX_TRACE(efx, "channel %d RX queue %d timer expired\n", |
983 | channel->channel, ev_sub_data); | 989 | channel->channel, ev_sub_data); |
984 | break; | 990 | break; |
985 | case RX_RECOVERY_EV_DECODE: | 991 | case FSE_AA_RX_RECOVER_EV: |
986 | EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " | 992 | EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " |
987 | "Resetting.\n", channel->channel); | 993 | "Resetting.\n", channel->channel); |
988 | atomic_inc(&efx->rx_reset); | 994 | atomic_inc(&efx->rx_reset); |
@@ -991,12 +997,12 @@ static void falcon_handle_driver_event(struct efx_channel *channel, | |||
991 | RESET_TYPE_RX_RECOVERY : | 997 | RESET_TYPE_RX_RECOVERY : |
992 | RESET_TYPE_DISABLE); | 998 | RESET_TYPE_DISABLE); |
993 | break; | 999 | break; |
994 | case RX_DSC_ERROR_EV_DECODE: | 1000 | case FSE_BZ_RX_DSC_ERROR_EV: |
995 | EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error." | 1001 | EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error." |
996 | " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | 1002 | " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); |
997 | efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); | 1003 | efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); |
998 | break; | 1004 | break; |
999 | case TX_DSC_ERROR_EV_DECODE: | 1005 | case FSE_BZ_TX_DSC_ERROR_EV: |
1000 | EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error." | 1006 | EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error." |
1001 | " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | 1007 | " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); |
1002 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | 1008 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); |
@@ -1032,27 +1038,27 @@ int falcon_process_eventq(struct efx_channel *channel, int rx_quota) | |||
1032 | /* Clear this event by marking it all ones */ | 1038 | /* Clear this event by marking it all ones */ |
1033 | EFX_SET_QWORD(*p_event); | 1039 | EFX_SET_QWORD(*p_event); |
1034 | 1040 | ||
1035 | ev_code = EFX_QWORD_FIELD(event, EV_CODE); | 1041 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); |
1036 | 1042 | ||
1037 | switch (ev_code) { | 1043 | switch (ev_code) { |
1038 | case RX_IP_EV_DECODE: | 1044 | case FSE_AZ_EV_CODE_RX_EV: |
1039 | falcon_handle_rx_event(channel, &event); | 1045 | falcon_handle_rx_event(channel, &event); |
1040 | ++rx_packets; | 1046 | ++rx_packets; |
1041 | break; | 1047 | break; |
1042 | case TX_IP_EV_DECODE: | 1048 | case FSE_AZ_EV_CODE_TX_EV: |
1043 | falcon_handle_tx_event(channel, &event); | 1049 | falcon_handle_tx_event(channel, &event); |
1044 | break; | 1050 | break; |
1045 | case DRV_GEN_EV_DECODE: | 1051 | case FSE_AZ_EV_CODE_DRV_GEN_EV: |
1046 | channel->eventq_magic | 1052 | channel->eventq_magic = EFX_QWORD_FIELD( |
1047 | = EFX_QWORD_FIELD(event, EVQ_MAGIC); | 1053 | event, FSF_AZ_DRV_GEN_EV_MAGIC); |
1048 | EFX_LOG(channel->efx, "channel %d received generated " | 1054 | EFX_LOG(channel->efx, "channel %d received generated " |
1049 | "event "EFX_QWORD_FMT"\n", channel->channel, | 1055 | "event "EFX_QWORD_FMT"\n", channel->channel, |
1050 | EFX_QWORD_VAL(event)); | 1056 | EFX_QWORD_VAL(event)); |
1051 | break; | 1057 | break; |
1052 | case GLOBAL_EV_DECODE: | 1058 | case FSE_AZ_EV_CODE_GLOBAL_EV: |
1053 | falcon_handle_global_event(channel, &event); | 1059 | falcon_handle_global_event(channel, &event); |
1054 | break; | 1060 | break; |
1055 | case DRIVER_EV_DECODE: | 1061 | case FSE_AZ_EV_CODE_DRIVER_EV: |
1056 | falcon_handle_driver_event(channel, &event); | 1062 | falcon_handle_driver_event(channel, &event); |
1057 | break; | 1063 | break; |
1058 | default: | 1064 | default: |
@@ -1086,16 +1092,19 @@ void falcon_set_int_moderation(struct efx_channel *channel) | |||
1086 | if (channel->irq_moderation < FALCON_IRQ_MOD_RESOLUTION) | 1092 | if (channel->irq_moderation < FALCON_IRQ_MOD_RESOLUTION) |
1087 | channel->irq_moderation = FALCON_IRQ_MOD_RESOLUTION; | 1093 | channel->irq_moderation = FALCON_IRQ_MOD_RESOLUTION; |
1088 | EFX_POPULATE_DWORD_2(timer_cmd, | 1094 | EFX_POPULATE_DWORD_2(timer_cmd, |
1089 | TIMER_MODE, TIMER_MODE_INT_HLDOFF, | 1095 | FRF_AB_TC_TIMER_MODE, |
1090 | TIMER_VAL, | 1096 | FFE_BB_TIMER_MODE_INT_HLDOFF, |
1097 | FRF_AB_TC_TIMER_VAL, | ||
1091 | channel->irq_moderation / | 1098 | channel->irq_moderation / |
1092 | FALCON_IRQ_MOD_RESOLUTION - 1); | 1099 | FALCON_IRQ_MOD_RESOLUTION - 1); |
1093 | } else { | 1100 | } else { |
1094 | EFX_POPULATE_DWORD_2(timer_cmd, | 1101 | EFX_POPULATE_DWORD_2(timer_cmd, |
1095 | TIMER_MODE, TIMER_MODE_DIS, | 1102 | FRF_AB_TC_TIMER_MODE, |
1096 | TIMER_VAL, 0); | 1103 | FFE_BB_TIMER_MODE_DIS, |
1104 | FRF_AB_TC_TIMER_VAL, 0); | ||
1097 | } | 1105 | } |
1098 | falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER, | 1106 | BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0); |
1107 | falcon_writel_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, | ||
1099 | channel->channel); | 1108 | channel->channel); |
1100 | 1109 | ||
1101 | } | 1110 | } |
@@ -1127,9 +1136,9 @@ void falcon_init_eventq(struct efx_channel *channel) | |||
1127 | 1136 | ||
1128 | /* Push event queue to card */ | 1137 | /* Push event queue to card */ |
1129 | EFX_POPULATE_OWORD_3(evq_ptr, | 1138 | EFX_POPULATE_OWORD_3(evq_ptr, |
1130 | EVQ_EN, 1, | 1139 | FRF_AZ_EVQ_EN, 1, |
1131 | EVQ_SIZE, FALCON_EVQ_ORDER, | 1140 | FRF_AZ_EVQ_SIZE, FALCON_EVQ_ORDER, |
1132 | EVQ_BUF_BASE_ID, channel->eventq.index); | 1141 | FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); |
1133 | falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base, | 1142 | falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base, |
1134 | channel->channel); | 1143 | channel->channel); |
1135 | 1144 | ||
@@ -1165,9 +1174,9 @@ void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic) | |||
1165 | { | 1174 | { |
1166 | efx_qword_t test_event; | 1175 | efx_qword_t test_event; |
1167 | 1176 | ||
1168 | EFX_POPULATE_QWORD_2(test_event, | 1177 | EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, |
1169 | EV_CODE, DRV_GEN_EV_DECODE, | 1178 | FSE_AZ_EV_CODE_DRV_GEN_EV, |
1170 | EVQ_MAGIC, magic); | 1179 | FSF_AZ_DRV_GEN_EV_MAGIC, magic); |
1171 | falcon_generate_event(channel, &test_event); | 1180 | falcon_generate_event(channel, &test_event); |
1172 | } | 1181 | } |
1173 | 1182 | ||
@@ -1175,11 +1184,12 @@ void falcon_sim_phy_event(struct efx_nic *efx) | |||
1175 | { | 1184 | { |
1176 | efx_qword_t phy_event; | 1185 | efx_qword_t phy_event; |
1177 | 1186 | ||
1178 | EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE); | 1187 | EFX_POPULATE_QWORD_1(phy_event, FSF_AZ_EV_CODE, |
1188 | FSE_AZ_EV_CODE_GLOBAL_EV); | ||
1179 | if (EFX_IS10G(efx)) | 1189 | if (EFX_IS10G(efx)) |
1180 | EFX_SET_QWORD_FIELD(phy_event, XG_PHY_INTR, 1); | 1190 | EFX_SET_QWORD_FIELD(phy_event, FSF_AB_GLB_EV_XG_PHY0_INTR, 1); |
1181 | else | 1191 | else |
1182 | EFX_SET_QWORD_FIELD(phy_event, G_PHY0_INTR, 1); | 1192 | EFX_SET_QWORD_FIELD(phy_event, FSF_AB_GLB_EV_G_PHY0_INTR, 1); |
1183 | 1193 | ||
1184 | falcon_generate_event(&efx->channel[0], &phy_event); | 1194 | falcon_generate_event(&efx->channel[0], &phy_event); |
1185 | } | 1195 | } |
@@ -1207,22 +1217,23 @@ static void falcon_poll_flush_events(struct efx_nic *efx) | |||
1207 | if (!falcon_event_present(event)) | 1217 | if (!falcon_event_present(event)) |
1208 | break; | 1218 | break; |
1209 | 1219 | ||
1210 | ev_code = EFX_QWORD_FIELD(*event, EV_CODE); | 1220 | ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE); |
1211 | ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE); | 1221 | ev_sub_code = EFX_QWORD_FIELD(*event, |
1212 | if (ev_code == DRIVER_EV_DECODE && | 1222 | FSF_AZ_DRIVER_EV_SUBCODE); |
1213 | ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) { | 1223 | if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && |
1224 | ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) { | ||
1214 | ev_queue = EFX_QWORD_FIELD(*event, | 1225 | ev_queue = EFX_QWORD_FIELD(*event, |
1215 | DRIVER_EV_TX_DESCQ_ID); | 1226 | FSF_AZ_DRIVER_EV_SUBDATA); |
1216 | if (ev_queue < EFX_TX_QUEUE_COUNT) { | 1227 | if (ev_queue < EFX_TX_QUEUE_COUNT) { |
1217 | tx_queue = efx->tx_queue + ev_queue; | 1228 | tx_queue = efx->tx_queue + ev_queue; |
1218 | tx_queue->flushed = true; | 1229 | tx_queue->flushed = true; |
1219 | } | 1230 | } |
1220 | } else if (ev_code == DRIVER_EV_DECODE && | 1231 | } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && |
1221 | ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) { | 1232 | ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) { |
1222 | ev_queue = EFX_QWORD_FIELD(*event, | 1233 | ev_queue = EFX_QWORD_FIELD( |
1223 | DRIVER_EV_RX_DESCQ_ID); | 1234 | *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); |
1224 | ev_failed = EFX_QWORD_FIELD(*event, | 1235 | ev_failed = EFX_QWORD_FIELD( |
1225 | DRIVER_EV_RX_FLUSH_FAIL); | 1236 | *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); |
1226 | if (ev_queue < efx->n_rx_queues) { | 1237 | if (ev_queue < efx->n_rx_queues) { |
1227 | rx_queue = efx->rx_queue + ev_queue; | 1238 | rx_queue = efx->rx_queue + ev_queue; |
1228 | 1239 | ||
@@ -1312,9 +1323,9 @@ static inline void falcon_interrupts(struct efx_nic *efx, int enabled, | |||
1312 | efx_oword_t int_en_reg_ker; | 1323 | efx_oword_t int_en_reg_ker; |
1313 | 1324 | ||
1314 | EFX_POPULATE_OWORD_2(int_en_reg_ker, | 1325 | EFX_POPULATE_OWORD_2(int_en_reg_ker, |
1315 | KER_INT_KER, force, | 1326 | FRF_AZ_KER_INT_KER, force, |
1316 | DRV_INT_EN_KER, enabled); | 1327 | FRF_AZ_DRV_INT_EN_KER, enabled); |
1317 | falcon_write(efx, &int_en_reg_ker, INT_EN_REG_KER); | 1328 | falcon_write(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); |
1318 | } | 1329 | } |
1319 | 1330 | ||
1320 | void falcon_enable_interrupts(struct efx_nic *efx) | 1331 | void falcon_enable_interrupts(struct efx_nic *efx) |
@@ -1327,9 +1338,10 @@ void falcon_enable_interrupts(struct efx_nic *efx) | |||
1327 | 1338 | ||
1328 | /* Program address */ | 1339 | /* Program address */ |
1329 | EFX_POPULATE_OWORD_2(int_adr_reg_ker, | 1340 | EFX_POPULATE_OWORD_2(int_adr_reg_ker, |
1330 | NORM_INT_VEC_DIS_KER, EFX_INT_MODE_USE_MSI(efx), | 1341 | FRF_AZ_NORM_INT_VEC_DIS_KER, |
1331 | INT_ADR_KER, efx->irq_status.dma_addr); | 1342 | EFX_INT_MODE_USE_MSI(efx), |
1332 | falcon_write(efx, &int_adr_reg_ker, INT_ADR_REG_KER); | 1343 | FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); |
1344 | falcon_write(efx, &int_adr_reg_ker, FR_AZ_INT_ADR_KER); | ||
1333 | 1345 | ||
1334 | /* Enable interrupts */ | 1346 | /* Enable interrupts */ |
1335 | falcon_interrupts(efx, 1, 0); | 1347 | falcon_interrupts(efx, 1, 0); |
@@ -1369,9 +1381,9 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx) | |||
1369 | { | 1381 | { |
1370 | efx_dword_t reg; | 1382 | efx_dword_t reg; |
1371 | 1383 | ||
1372 | EFX_POPULATE_DWORD_1(reg, INT_ACK_DUMMY_DATA, 0xb7eb7e); | 1384 | EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e); |
1373 | falcon_writel(efx, ®, INT_ACK_REG_KER_A1); | 1385 | falcon_writel(efx, ®, FR_AA_INT_ACK_KER); |
1374 | falcon_readl(efx, ®, WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1); | 1386 | falcon_readl(efx, ®, FR_AA_WORK_AROUND_BROKEN_PCI_READS); |
1375 | } | 1387 | } |
1376 | 1388 | ||
1377 | /* Process a fatal interrupt | 1389 | /* Process a fatal interrupt |
@@ -1384,8 +1396,8 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) | |||
1384 | efx_oword_t fatal_intr; | 1396 | efx_oword_t fatal_intr; |
1385 | int error, mem_perr; | 1397 | int error, mem_perr; |
1386 | 1398 | ||
1387 | falcon_read(efx, &fatal_intr, FATAL_INTR_REG_KER); | 1399 | falcon_read(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); |
1388 | error = EFX_OWORD_FIELD(fatal_intr, INT_KER_ERROR); | 1400 | error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); |
1389 | 1401 | ||
1390 | EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status " | 1402 | EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status " |
1391 | EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), | 1403 | EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), |
@@ -1395,10 +1407,10 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) | |||
1395 | goto out; | 1407 | goto out; |
1396 | 1408 | ||
1397 | /* If this is a memory parity error dump which blocks are offending */ | 1409 | /* If this is a memory parity error dump which blocks are offending */ |
1398 | mem_perr = EFX_OWORD_FIELD(fatal_intr, MEM_PERR_INT_KER); | 1410 | mem_perr = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER); |
1399 | if (mem_perr) { | 1411 | if (mem_perr) { |
1400 | efx_oword_t reg; | 1412 | efx_oword_t reg; |
1401 | falcon_read(efx, ®, MEM_STAT_REG_KER); | 1413 | falcon_read(efx, ®, FR_AZ_MEM_STAT); |
1402 | EFX_ERR(efx, "SYSTEM ERROR: memory parity error " | 1414 | EFX_ERR(efx, "SYSTEM ERROR: memory parity error " |
1403 | EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg)); | 1415 | EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg)); |
1404 | } | 1416 | } |
@@ -1442,11 +1454,11 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) | |||
1442 | int syserr; | 1454 | int syserr; |
1443 | 1455 | ||
1444 | /* Read the ISR which also ACKs the interrupts */ | 1456 | /* Read the ISR which also ACKs the interrupts */ |
1445 | falcon_readl(efx, ®, INT_ISR0_B0); | 1457 | falcon_readl(efx, ®, FR_BZ_INT_ISR0); |
1446 | queues = EFX_EXTRACT_DWORD(reg, 0, 31); | 1458 | queues = EFX_EXTRACT_DWORD(reg, 0, 31); |
1447 | 1459 | ||
1448 | /* Check to see if we have a serious error condition */ | 1460 | /* Check to see if we have a serious error condition */ |
1449 | syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT); | 1461 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); |
1450 | if (unlikely(syserr)) | 1462 | if (unlikely(syserr)) |
1451 | return falcon_fatal_interrupt(efx); | 1463 | return falcon_fatal_interrupt(efx); |
1452 | 1464 | ||
@@ -1492,7 +1504,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | |||
1492 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | 1504 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); |
1493 | 1505 | ||
1494 | /* Check to see if we have a serious error condition */ | 1506 | /* Check to see if we have a serious error condition */ |
1495 | syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT); | 1507 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); |
1496 | if (unlikely(syserr)) | 1508 | if (unlikely(syserr)) |
1497 | return falcon_fatal_interrupt(efx); | 1509 | return falcon_fatal_interrupt(efx); |
1498 | 1510 | ||
@@ -1559,10 +1571,10 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx) | |||
1559 | if (falcon_rev(efx) < FALCON_REV_B0) | 1571 | if (falcon_rev(efx) < FALCON_REV_B0) |
1560 | return; | 1572 | return; |
1561 | 1573 | ||
1562 | for (offset = RX_RSS_INDIR_TBL_B0; | 1574 | for (offset = FR_BZ_RX_INDIRECTION_TBL; |
1563 | offset < RX_RSS_INDIR_TBL_B0 + 0x800; | 1575 | offset < FR_BZ_RX_INDIRECTION_TBL + 0x800; |
1564 | offset += 0x10) { | 1576 | offset += 0x10) { |
1565 | EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0, | 1577 | EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, |
1566 | i % efx->n_rx_queues); | 1578 | i % efx->n_rx_queues); |
1567 | falcon_writel(efx, &dword, offset); | 1579 | falcon_writel(efx, &dword, offset); |
1568 | i++; | 1580 | i++; |
@@ -1627,7 +1639,7 @@ void falcon_fini_interrupt(struct efx_nic *efx) | |||
1627 | 1639 | ||
1628 | /* ACK legacy interrupt */ | 1640 | /* ACK legacy interrupt */ |
1629 | if (falcon_rev(efx) >= FALCON_REV_B0) | 1641 | if (falcon_rev(efx) >= FALCON_REV_B0) |
1630 | falcon_read(efx, ®, INT_ISR0_B0); | 1642 | falcon_read(efx, ®, FR_BZ_INT_ISR0); |
1631 | else | 1643 | else |
1632 | falcon_irq_ack_a1(efx); | 1644 | falcon_irq_ack_a1(efx); |
1633 | 1645 | ||
@@ -1648,8 +1660,8 @@ void falcon_fini_interrupt(struct efx_nic *efx) | |||
1648 | static int falcon_spi_poll(struct efx_nic *efx) | 1660 | static int falcon_spi_poll(struct efx_nic *efx) |
1649 | { | 1661 | { |
1650 | efx_oword_t reg; | 1662 | efx_oword_t reg; |
1651 | falcon_read(efx, ®, EE_SPI_HCMD_REG_KER); | 1663 | falcon_read(efx, ®, FR_AB_EE_SPI_HCMD); |
1652 | return EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0; | 1664 | return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0; |
1653 | } | 1665 | } |
1654 | 1666 | ||
1655 | /* Wait for SPI command completion */ | 1667 | /* Wait for SPI command completion */ |
@@ -1701,27 +1713,27 @@ int falcon_spi_cmd(const struct efx_spi_device *spi, | |||
1701 | 1713 | ||
1702 | /* Program address register, if we have an address */ | 1714 | /* Program address register, if we have an address */ |
1703 | if (addressed) { | 1715 | if (addressed) { |
1704 | EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address); | 1716 | EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address); |
1705 | falcon_write(efx, ®, EE_SPI_HADR_REG_KER); | 1717 | falcon_write(efx, ®, FR_AB_EE_SPI_HADR); |
1706 | } | 1718 | } |
1707 | 1719 | ||
1708 | /* Program data register, if we have data */ | 1720 | /* Program data register, if we have data */ |
1709 | if (in != NULL) { | 1721 | if (in != NULL) { |
1710 | memcpy(®, in, len); | 1722 | memcpy(®, in, len); |
1711 | falcon_write(efx, ®, EE_SPI_HDATA_REG_KER); | 1723 | falcon_write(efx, ®, FR_AB_EE_SPI_HDATA); |
1712 | } | 1724 | } |
1713 | 1725 | ||
1714 | /* Issue read/write command */ | 1726 | /* Issue read/write command */ |
1715 | EFX_POPULATE_OWORD_7(reg, | 1727 | EFX_POPULATE_OWORD_7(reg, |
1716 | EE_SPI_HCMD_CMD_EN, 1, | 1728 | FRF_AB_EE_SPI_HCMD_CMD_EN, 1, |
1717 | EE_SPI_HCMD_SF_SEL, spi->device_id, | 1729 | FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id, |
1718 | EE_SPI_HCMD_DABCNT, len, | 1730 | FRF_AB_EE_SPI_HCMD_DABCNT, len, |
1719 | EE_SPI_HCMD_READ, reading, | 1731 | FRF_AB_EE_SPI_HCMD_READ, reading, |
1720 | EE_SPI_HCMD_DUBCNT, 0, | 1732 | FRF_AB_EE_SPI_HCMD_DUBCNT, 0, |
1721 | EE_SPI_HCMD_ADBCNT, | 1733 | FRF_AB_EE_SPI_HCMD_ADBCNT, |
1722 | (addressed ? spi->addr_len : 0), | 1734 | (addressed ? spi->addr_len : 0), |
1723 | EE_SPI_HCMD_ENC, command); | 1735 | FRF_AB_EE_SPI_HCMD_ENC, command); |
1724 | falcon_write(efx, ®, EE_SPI_HCMD_REG_KER); | 1736 | falcon_write(efx, ®, FR_AB_EE_SPI_HCMD); |
1725 | 1737 | ||
1726 | /* Wait for read/write to complete */ | 1738 | /* Wait for read/write to complete */ |
1727 | rc = falcon_spi_wait(efx); | 1739 | rc = falcon_spi_wait(efx); |
@@ -1730,7 +1742,7 @@ int falcon_spi_cmd(const struct efx_spi_device *spi, | |||
1730 | 1742 | ||
1731 | /* Read data */ | 1743 | /* Read data */ |
1732 | if (out != NULL) { | 1744 | if (out != NULL) { |
1733 | falcon_read(efx, ®, EE_SPI_HDATA_REG_KER); | 1745 | falcon_read(efx, ®, FR_AB_EE_SPI_HDATA); |
1734 | memcpy(out, ®, len); | 1746 | memcpy(out, ®, len); |
1735 | } | 1747 | } |
1736 | 1748 | ||
@@ -1871,21 +1883,22 @@ static int falcon_reset_macs(struct efx_nic *efx) | |||
1871 | * macs, so instead use the internal MAC resets | 1883 | * macs, so instead use the internal MAC resets |
1872 | */ | 1884 | */ |
1873 | if (!EFX_IS10G(efx)) { | 1885 | if (!EFX_IS10G(efx)) { |
1874 | EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 1); | 1886 | EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 1); |
1875 | falcon_write(efx, ®, GM_CFG1_REG); | 1887 | falcon_write(efx, ®, FR_AB_GM_CFG1); |
1876 | udelay(1000); | 1888 | udelay(1000); |
1877 | 1889 | ||
1878 | EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 0); | 1890 | EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 0); |
1879 | falcon_write(efx, ®, GM_CFG1_REG); | 1891 | falcon_write(efx, ®, FR_AB_GM_CFG1); |
1880 | udelay(1000); | 1892 | udelay(1000); |
1881 | return 0; | 1893 | return 0; |
1882 | } else { | 1894 | } else { |
1883 | EFX_POPULATE_OWORD_1(reg, XM_CORE_RST, 1); | 1895 | EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1); |
1884 | falcon_write(efx, ®, XM_GLB_CFG_REG); | 1896 | falcon_write(efx, ®, FR_AB_XM_GLB_CFG); |
1885 | 1897 | ||
1886 | for (count = 0; count < 10000; count++) { | 1898 | for (count = 0; count < 10000; count++) { |
1887 | falcon_read(efx, ®, XM_GLB_CFG_REG); | 1899 | falcon_read(efx, ®, FR_AB_XM_GLB_CFG); |
1888 | if (EFX_OWORD_FIELD(reg, XM_CORE_RST) == 0) | 1900 | if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) == |
1901 | 0) | ||
1889 | return 0; | 1902 | return 0; |
1890 | udelay(10); | 1903 | udelay(10); |
1891 | } | 1904 | } |
@@ -1899,22 +1912,22 @@ static int falcon_reset_macs(struct efx_nic *efx) | |||
1899 | * the drain sequence with the statistics fetch */ | 1912 | * the drain sequence with the statistics fetch */ |
1900 | efx_stats_disable(efx); | 1913 | efx_stats_disable(efx); |
1901 | 1914 | ||
1902 | falcon_read(efx, ®, MAC0_CTRL_REG_KER); | 1915 | falcon_read(efx, ®, FR_AB_MAC_CTRL); |
1903 | EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1); | 1916 | EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, 1); |
1904 | falcon_write(efx, ®, MAC0_CTRL_REG_KER); | 1917 | falcon_write(efx, ®, FR_AB_MAC_CTRL); |
1905 | 1918 | ||
1906 | falcon_read(efx, ®, GLB_CTL_REG_KER); | 1919 | falcon_read(efx, ®, FR_AB_GLB_CTL); |
1907 | EFX_SET_OWORD_FIELD(reg, RST_XGTX, 1); | 1920 | EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1); |
1908 | EFX_SET_OWORD_FIELD(reg, RST_XGRX, 1); | 1921 | EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1); |
1909 | EFX_SET_OWORD_FIELD(reg, RST_EM, 1); | 1922 | EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1); |
1910 | falcon_write(efx, ®, GLB_CTL_REG_KER); | 1923 | falcon_write(efx, ®, FR_AB_GLB_CTL); |
1911 | 1924 | ||
1912 | count = 0; | 1925 | count = 0; |
1913 | while (1) { | 1926 | while (1) { |
1914 | falcon_read(efx, ®, GLB_CTL_REG_KER); | 1927 | falcon_read(efx, ®, FR_AB_GLB_CTL); |
1915 | if (!EFX_OWORD_FIELD(reg, RST_XGTX) && | 1928 | if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) && |
1916 | !EFX_OWORD_FIELD(reg, RST_XGRX) && | 1929 | !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) && |
1917 | !EFX_OWORD_FIELD(reg, RST_EM)) { | 1930 | !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) { |
1918 | EFX_LOG(efx, "Completed MAC reset after %d loops\n", | 1931 | EFX_LOG(efx, "Completed MAC reset after %d loops\n", |
1919 | count); | 1932 | count); |
1920 | break; | 1933 | break; |
@@ -1945,9 +1958,9 @@ void falcon_drain_tx_fifo(struct efx_nic *efx) | |||
1945 | (efx->loopback_mode != LOOPBACK_NONE)) | 1958 | (efx->loopback_mode != LOOPBACK_NONE)) |
1946 | return; | 1959 | return; |
1947 | 1960 | ||
1948 | falcon_read(efx, ®, MAC0_CTRL_REG_KER); | 1961 | falcon_read(efx, ®, FR_AB_MAC_CTRL); |
1949 | /* There is no point in draining more than once */ | 1962 | /* There is no point in draining more than once */ |
1950 | if (EFX_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0)) | 1963 | if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN)) |
1951 | return; | 1964 | return; |
1952 | 1965 | ||
1953 | falcon_reset_macs(efx); | 1966 | falcon_reset_macs(efx); |
@@ -1961,9 +1974,9 @@ void falcon_deconfigure_mac_wrapper(struct efx_nic *efx) | |||
1961 | return; | 1974 | return; |
1962 | 1975 | ||
1963 | /* Isolate the MAC -> RX */ | 1976 | /* Isolate the MAC -> RX */ |
1964 | falcon_read(efx, ®, RX_CFG_REG_KER); | 1977 | falcon_read(efx, ®, FR_AZ_RX_CFG); |
1965 | EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 0); | 1978 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0); |
1966 | falcon_write(efx, ®, RX_CFG_REG_KER); | 1979 | falcon_write(efx, ®, FR_AZ_RX_CFG); |
1967 | 1980 | ||
1968 | if (!efx->link_up) | 1981 | if (!efx->link_up) |
1969 | falcon_drain_tx_fifo(efx); | 1982 | falcon_drain_tx_fifo(efx); |
@@ -1986,19 +1999,19 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | |||
1986 | * indefinitely held and TX queue can be flushed at any point | 1999 | * indefinitely held and TX queue can be flushed at any point |
1987 | * while the link is down. */ | 2000 | * while the link is down. */ |
1988 | EFX_POPULATE_OWORD_5(reg, | 2001 | EFX_POPULATE_OWORD_5(reg, |
1989 | MAC_XOFF_VAL, 0xffff /* max pause time */, | 2002 | FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */, |
1990 | MAC_BCAD_ACPT, 1, | 2003 | FRF_AB_MAC_BCAD_ACPT, 1, |
1991 | MAC_UC_PROM, efx->promiscuous, | 2004 | FRF_AB_MAC_UC_PROM, efx->promiscuous, |
1992 | MAC_LINK_STATUS, 1, /* always set */ | 2005 | FRF_AB_MAC_LINK_STATUS, 1, /* always set */ |
1993 | MAC_SPEED, link_speed); | 2006 | FRF_AB_MAC_SPEED, link_speed); |
1994 | /* On B0, MAC backpressure can be disabled and packets get | 2007 | /* On B0, MAC backpressure can be disabled and packets get |
1995 | * discarded. */ | 2008 | * discarded. */ |
1996 | if (falcon_rev(efx) >= FALCON_REV_B0) { | 2009 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
1997 | EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, | 2010 | EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, |
1998 | !efx->link_up); | 2011 | !efx->link_up); |
1999 | } | 2012 | } |
2000 | 2013 | ||
2001 | falcon_write(efx, ®, MAC0_CTRL_REG_KER); | 2014 | falcon_write(efx, ®, FR_AB_MAC_CTRL); |
2002 | 2015 | ||
2003 | /* Restore the multicast hash registers. */ | 2016 | /* Restore the multicast hash registers. */ |
2004 | falcon_set_multicast_hash(efx); | 2017 | falcon_set_multicast_hash(efx); |
@@ -2007,13 +2020,13 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | |||
2007 | * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL. | 2020 | * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL. |
2008 | * Action on receipt of pause frames is controller by XM_DIS_FCNTL */ | 2021 | * Action on receipt of pause frames is controller by XM_DIS_FCNTL */ |
2009 | tx_fc = !!(efx->link_fc & EFX_FC_TX); | 2022 | tx_fc = !!(efx->link_fc & EFX_FC_TX); |
2010 | falcon_read(efx, ®, RX_CFG_REG_KER); | 2023 | falcon_read(efx, ®, FR_AZ_RX_CFG); |
2011 | EFX_SET_OWORD_FIELD(reg, RX_XOFF_MAC_EN, tx_fc); | 2024 | EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, tx_fc); |
2012 | 2025 | ||
2013 | /* Unisolate the MAC -> RX */ | 2026 | /* Unisolate the MAC -> RX */ |
2014 | if (falcon_rev(efx) >= FALCON_REV_B0) | 2027 | if (falcon_rev(efx) >= FALCON_REV_B0) |
2015 | EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); | 2028 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); |
2016 | falcon_write(efx, ®, RX_CFG_REG_KER); | 2029 | falcon_write(efx, ®, FR_AZ_RX_CFG); |
2017 | } | 2030 | } |
2018 | 2031 | ||
2019 | int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset) | 2032 | int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset) |
@@ -2028,8 +2041,8 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset) | |||
2028 | /* Statistics fetch will fail if the MAC is in TX drain */ | 2041 | /* Statistics fetch will fail if the MAC is in TX drain */ |
2029 | if (falcon_rev(efx) >= FALCON_REV_B0) { | 2042 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
2030 | efx_oword_t temp; | 2043 | efx_oword_t temp; |
2031 | falcon_read(efx, &temp, MAC0_CTRL_REG_KER); | 2044 | falcon_read(efx, &temp, FR_AB_MAC_CTRL); |
2032 | if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) | 2045 | if (EFX_OWORD_FIELD(temp, FRF_BB_TXFIFO_DRAIN_EN)) |
2033 | return 0; | 2046 | return 0; |
2034 | } | 2047 | } |
2035 | 2048 | ||
@@ -2039,10 +2052,10 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset) | |||
2039 | 2052 | ||
2040 | /* Initiate DMA transfer of stats */ | 2053 | /* Initiate DMA transfer of stats */ |
2041 | EFX_POPULATE_OWORD_2(reg, | 2054 | EFX_POPULATE_OWORD_2(reg, |
2042 | MAC_STAT_DMA_CMD, 1, | 2055 | FRF_AB_MAC_STAT_DMA_CMD, 1, |
2043 | MAC_STAT_DMA_ADR, | 2056 | FRF_AB_MAC_STAT_DMA_ADR, |
2044 | efx->stats_buffer.dma_addr); | 2057 | efx->stats_buffer.dma_addr); |
2045 | falcon_write(efx, ®, MAC0_STAT_DMA_REG_KER); | 2058 | falcon_write(efx, ®, FR_AB_MAC_STAT_DMA); |
2046 | 2059 | ||
2047 | /* Wait for transfer to complete */ | 2060 | /* Wait for transfer to complete */ |
2048 | for (i = 0; i < 400; i++) { | 2061 | for (i = 0; i < 400; i++) { |
@@ -2072,10 +2085,10 @@ static int falcon_gmii_wait(struct efx_nic *efx) | |||
2072 | 2085 | ||
2073 | /* wait upto 50ms - taken max from datasheet */ | 2086 | /* wait upto 50ms - taken max from datasheet */ |
2074 | for (count = 0; count < 5000; count++) { | 2087 | for (count = 0; count < 5000; count++) { |
2075 | falcon_readl(efx, &md_stat, MD_STAT_REG_KER); | 2088 | falcon_readl(efx, &md_stat, FR_AB_MD_STAT); |
2076 | if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) { | 2089 | if (EFX_DWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) { |
2077 | if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 || | 2090 | if (EFX_DWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 || |
2078 | EFX_DWORD_FIELD(md_stat, MD_BSERR) != 0) { | 2091 | EFX_DWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) { |
2079 | EFX_ERR(efx, "error from GMII access " | 2092 | EFX_ERR(efx, "error from GMII access " |
2080 | EFX_DWORD_FMT"\n", | 2093 | EFX_DWORD_FMT"\n", |
2081 | EFX_DWORD_VAL(md_stat)); | 2094 | EFX_DWORD_VAL(md_stat)); |
@@ -2108,29 +2121,30 @@ static int falcon_mdio_write(struct net_device *net_dev, | |||
2108 | goto out; | 2121 | goto out; |
2109 | 2122 | ||
2110 | /* Write the address/ID register */ | 2123 | /* Write the address/ID register */ |
2111 | EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr); | 2124 | EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); |
2112 | falcon_write(efx, ®, MD_PHY_ADR_REG_KER); | 2125 | falcon_write(efx, ®, FR_AB_MD_PHY_ADR); |
2113 | 2126 | ||
2114 | EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad); | 2127 | EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, |
2115 | falcon_write(efx, ®, MD_ID_REG_KER); | 2128 | FRF_AB_MD_DEV_ADR, devad); |
2129 | falcon_write(efx, ®, FR_AB_MD_ID); | ||
2116 | 2130 | ||
2117 | /* Write data */ | 2131 | /* Write data */ |
2118 | EFX_POPULATE_OWORD_1(reg, MD_TXD, value); | 2132 | EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value); |
2119 | falcon_write(efx, ®, MD_TXD_REG_KER); | 2133 | falcon_write(efx, ®, FR_AB_MD_TXD); |
2120 | 2134 | ||
2121 | EFX_POPULATE_OWORD_2(reg, | 2135 | EFX_POPULATE_OWORD_2(reg, |
2122 | MD_WRC, 1, | 2136 | FRF_AB_MD_WRC, 1, |
2123 | MD_GC, 0); | 2137 | FRF_AB_MD_GC, 0); |
2124 | falcon_write(efx, ®, MD_CS_REG_KER); | 2138 | falcon_write(efx, ®, FR_AB_MD_CS); |
2125 | 2139 | ||
2126 | /* Wait for data to be written */ | 2140 | /* Wait for data to be written */ |
2127 | rc = falcon_gmii_wait(efx); | 2141 | rc = falcon_gmii_wait(efx); |
2128 | if (rc) { | 2142 | if (rc) { |
2129 | /* Abort the write operation */ | 2143 | /* Abort the write operation */ |
2130 | EFX_POPULATE_OWORD_2(reg, | 2144 | EFX_POPULATE_OWORD_2(reg, |
2131 | MD_WRC, 0, | 2145 | FRF_AB_MD_WRC, 0, |
2132 | MD_GC, 1); | 2146 | FRF_AB_MD_GC, 1); |
2133 | falcon_write(efx, ®, MD_CS_REG_KER); | 2147 | falcon_write(efx, ®, FR_AB_MD_CS); |
2134 | udelay(10); | 2148 | udelay(10); |
2135 | } | 2149 | } |
2136 | 2150 | ||
@@ -2154,29 +2168,30 @@ static int falcon_mdio_read(struct net_device *net_dev, | |||
2154 | if (rc) | 2168 | if (rc) |
2155 | goto out; | 2169 | goto out; |
2156 | 2170 | ||
2157 | EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr); | 2171 | EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); |
2158 | falcon_write(efx, ®, MD_PHY_ADR_REG_KER); | 2172 | falcon_write(efx, ®, FR_AB_MD_PHY_ADR); |
2159 | 2173 | ||
2160 | EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad); | 2174 | EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, |
2161 | falcon_write(efx, ®, MD_ID_REG_KER); | 2175 | FRF_AB_MD_DEV_ADR, devad); |
2176 | falcon_write(efx, ®, FR_AB_MD_ID); | ||
2162 | 2177 | ||
2163 | /* Request data to be read */ | 2178 | /* Request data to be read */ |
2164 | EFX_POPULATE_OWORD_2(reg, MD_RDC, 1, MD_GC, 0); | 2179 | EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0); |
2165 | falcon_write(efx, ®, MD_CS_REG_KER); | 2180 | falcon_write(efx, ®, FR_AB_MD_CS); |
2166 | 2181 | ||
2167 | /* Wait for data to become available */ | 2182 | /* Wait for data to become available */ |
2168 | rc = falcon_gmii_wait(efx); | 2183 | rc = falcon_gmii_wait(efx); |
2169 | if (rc == 0) { | 2184 | if (rc == 0) { |
2170 | falcon_read(efx, ®, MD_RXD_REG_KER); | 2185 | falcon_read(efx, ®, FR_AB_MD_RXD); |
2171 | rc = EFX_OWORD_FIELD(reg, MD_RXD); | 2186 | rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD); |
2172 | EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n", | 2187 | EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n", |
2173 | prtad, devad, addr, rc); | 2188 | prtad, devad, addr, rc); |
2174 | } else { | 2189 | } else { |
2175 | /* Abort the read operation */ | 2190 | /* Abort the read operation */ |
2176 | EFX_POPULATE_OWORD_2(reg, | 2191 | EFX_POPULATE_OWORD_2(reg, |
2177 | MD_RIC, 0, | 2192 | FRF_AB_MD_RIC, 0, |
2178 | MD_GC, 1); | 2193 | FRF_AB_MD_GC, 1); |
2179 | falcon_write(efx, ®, MD_CS_REG_KER); | 2194 | falcon_write(efx, ®, FR_AB_MD_CS); |
2180 | 2195 | ||
2181 | EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n", | 2196 | EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n", |
2182 | prtad, devad, addr, rc); | 2197 | prtad, devad, addr, rc); |
@@ -2243,16 +2258,17 @@ int falcon_switch_mac(struct efx_nic *efx) | |||
2243 | 2258 | ||
2244 | /* Always push the NIC_STAT_REG setting even if the mac hasn't | 2259 | /* Always push the NIC_STAT_REG setting even if the mac hasn't |
2245 | * changed, because this function is run post online reset */ | 2260 | * changed, because this function is run post online reset */ |
2246 | falcon_read(efx, &nic_stat, NIC_STAT_REG); | 2261 | falcon_read(efx, &nic_stat, FR_AB_NIC_STAT); |
2247 | strap_val = EFX_IS10G(efx) ? 5 : 3; | 2262 | strap_val = EFX_IS10G(efx) ? 5 : 3; |
2248 | if (falcon_rev(efx) >= FALCON_REV_B0) { | 2263 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
2249 | EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_EN, 1); | 2264 | EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP_EN, 1); |
2250 | EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_OVR, strap_val); | 2265 | EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP, strap_val); |
2251 | falcon_write(efx, &nic_stat, NIC_STAT_REG); | 2266 | falcon_write(efx, &nic_stat, FR_AB_NIC_STAT); |
2252 | } else { | 2267 | } else { |
2253 | /* Falcon A1 does not support 1G/10G speed switching | 2268 | /* Falcon A1 does not support 1G/10G speed switching |
2254 | * and must not be used with a PHY that does. */ | 2269 | * and must not be used with a PHY that does. */ |
2255 | BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val); | 2270 | BUG_ON(EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_PINS) != |
2271 | strap_val); | ||
2256 | } | 2272 | } |
2257 | 2273 | ||
2258 | if (old_mac_op == efx->mac_op) | 2274 | if (old_mac_op == efx->mac_op) |
@@ -2325,8 +2341,8 @@ void falcon_set_multicast_hash(struct efx_nic *efx) | |||
2325 | */ | 2341 | */ |
2326 | set_bit_le(0xff, mc_hash->byte); | 2342 | set_bit_le(0xff, mc_hash->byte); |
2327 | 2343 | ||
2328 | falcon_write(efx, &mc_hash->oword[0], MAC_MCAST_HASH_REG0_KER); | 2344 | falcon_write(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0); |
2329 | falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER); | 2345 | falcon_write(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1); |
2330 | } | 2346 | } |
2331 | 2347 | ||
2332 | 2348 | ||
@@ -2352,7 +2368,7 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) | |||
2352 | region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL); | 2368 | region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL); |
2353 | if (!region) | 2369 | if (!region) |
2354 | return -ENOMEM; | 2370 | return -ENOMEM; |
2355 | nvconfig = region + NVCONFIG_OFFSET; | 2371 | nvconfig = region + FALCON_NVCONFIG_OFFSET; |
2356 | 2372 | ||
2357 | mutex_lock(&efx->spi_lock); | 2373 | mutex_lock(&efx->spi_lock); |
2358 | rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region); | 2374 | rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region); |
@@ -2368,7 +2384,7 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) | |||
2368 | struct_ver = le16_to_cpu(nvconfig->board_struct_ver); | 2384 | struct_ver = le16_to_cpu(nvconfig->board_struct_ver); |
2369 | 2385 | ||
2370 | rc = -EINVAL; | 2386 | rc = -EINVAL; |
2371 | if (magic_num != NVCONFIG_BOARD_MAGIC_NUM) { | 2387 | if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) { |
2372 | EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num); | 2388 | EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num); |
2373 | goto out; | 2389 | goto out; |
2374 | } | 2390 | } |
@@ -2404,41 +2420,41 @@ static struct { | |||
2404 | unsigned address; | 2420 | unsigned address; |
2405 | efx_oword_t mask; | 2421 | efx_oword_t mask; |
2406 | } efx_test_registers[] = { | 2422 | } efx_test_registers[] = { |
2407 | { ADR_REGION_REG_KER, | 2423 | { FR_AZ_ADR_REGION, |
2408 | EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) }, | 2424 | EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) }, |
2409 | { RX_CFG_REG_KER, | 2425 | { FR_AZ_RX_CFG, |
2410 | EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) }, | 2426 | EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) }, |
2411 | { TX_CFG_REG_KER, | 2427 | { FR_AZ_TX_CFG, |
2412 | EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) }, | 2428 | EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) }, |
2413 | { TX_CFG2_REG_KER, | 2429 | { FR_AZ_TX_RESERVED, |
2414 | EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, | 2430 | EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, |
2415 | { MAC0_CTRL_REG_KER, | 2431 | { FR_AB_MAC_CTRL, |
2416 | EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) }, | 2432 | EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) }, |
2417 | { SRM_TX_DC_CFG_REG_KER, | 2433 | { FR_AZ_SRM_TX_DC_CFG, |
2418 | EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, | 2434 | EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, |
2419 | { RX_DC_CFG_REG_KER, | 2435 | { FR_AZ_RX_DC_CFG, |
2420 | EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) }, | 2436 | EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) }, |
2421 | { RX_DC_PF_WM_REG_KER, | 2437 | { FR_AZ_RX_DC_PF_WM, |
2422 | EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, | 2438 | EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, |
2423 | { DP_CTRL_REG, | 2439 | { FR_BZ_DP_CTRL, |
2424 | EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, | 2440 | EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, |
2425 | { GM_CFG2_REG, | 2441 | { FR_AB_GM_CFG2, |
2426 | EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) }, | 2442 | EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) }, |
2427 | { GMF_CFG0_REG, | 2443 | { FR_AB_GMF_CFG0, |
2428 | EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) }, | 2444 | EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) }, |
2429 | { XM_GLB_CFG_REG, | 2445 | { FR_AB_XM_GLB_CFG, |
2430 | EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) }, | 2446 | EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) }, |
2431 | { XM_TX_CFG_REG, | 2447 | { FR_AB_XM_TX_CFG, |
2432 | EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) }, | 2448 | EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) }, |
2433 | { XM_RX_CFG_REG, | 2449 | { FR_AB_XM_RX_CFG, |
2434 | EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) }, | 2450 | EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) }, |
2435 | { XM_RX_PARAM_REG, | 2451 | { FR_AB_XM_RX_PARAM, |
2436 | EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) }, | 2452 | EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) }, |
2437 | { XM_FC_REG, | 2453 | { FR_AB_XM_FC, |
2438 | EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) }, | 2454 | EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) }, |
2439 | { XM_ADR_LO_REG, | 2455 | { FR_AB_XM_ADR_LO, |
2440 | EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) }, | 2456 | EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) }, |
2441 | { XX_SD_CTL_REG, | 2457 | { FR_AB_XX_SD_CTL, |
2442 | EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) }, | 2458 | EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) }, |
2443 | }; | 2459 | }; |
2444 | 2460 | ||
@@ -2538,22 +2554,24 @@ int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) | |||
2538 | } | 2554 | } |
2539 | 2555 | ||
2540 | EFX_POPULATE_OWORD_2(glb_ctl_reg_ker, | 2556 | EFX_POPULATE_OWORD_2(glb_ctl_reg_ker, |
2541 | EXT_PHY_RST_DUR, 0x7, | 2557 | FRF_AB_EXT_PHY_RST_DUR, |
2542 | SWRST, 1); | 2558 | FFE_AB_EXT_PHY_RST_DUR_10240US, |
2559 | FRF_AB_SWRST, 1); | ||
2543 | } else { | 2560 | } else { |
2544 | int reset_phy = (method == RESET_TYPE_INVISIBLE ? | ||
2545 | EXCLUDE_FROM_RESET : 0); | ||
2546 | |||
2547 | EFX_POPULATE_OWORD_7(glb_ctl_reg_ker, | 2561 | EFX_POPULATE_OWORD_7(glb_ctl_reg_ker, |
2548 | EXT_PHY_RST_CTL, reset_phy, | 2562 | /* exclude PHY from "invisible" reset */ |
2549 | PCIE_CORE_RST_CTL, EXCLUDE_FROM_RESET, | 2563 | FRF_AB_EXT_PHY_RST_CTL, |
2550 | PCIE_NSTCK_RST_CTL, EXCLUDE_FROM_RESET, | 2564 | method == RESET_TYPE_INVISIBLE, |
2551 | PCIE_SD_RST_CTL, EXCLUDE_FROM_RESET, | 2565 | /* exclude EEPROM/flash and PCIe */ |
2552 | EE_RST_CTL, EXCLUDE_FROM_RESET, | 2566 | FRF_AB_PCIE_CORE_RST_CTL, 1, |
2553 | EXT_PHY_RST_DUR, 0x7 /* 10ms */, | 2567 | FRF_AB_PCIE_NSTKY_RST_CTL, 1, |
2554 | SWRST, 1); | 2568 | FRF_AB_PCIE_SD_RST_CTL, 1, |
2555 | } | 2569 | FRF_AB_EE_RST_CTL, 1, |
2556 | falcon_write(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER); | 2570 | FRF_AB_EXT_PHY_RST_DUR, |
2571 | FFE_AB_EXT_PHY_RST_DUR_10240US, | ||
2572 | FRF_AB_SWRST, 1); | ||
2573 | } | ||
2574 | falcon_write(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); | ||
2557 | 2575 | ||
2558 | EFX_LOG(efx, "waiting for hardware reset\n"); | 2576 | EFX_LOG(efx, "waiting for hardware reset\n"); |
2559 | schedule_timeout_uninterruptible(HZ / 20); | 2577 | schedule_timeout_uninterruptible(HZ / 20); |
@@ -2578,8 +2596,8 @@ int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) | |||
2578 | } | 2596 | } |
2579 | 2597 | ||
2580 | /* Assert that reset complete */ | 2598 | /* Assert that reset complete */ |
2581 | falcon_read(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER); | 2599 | falcon_read(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); |
2582 | if (EFX_OWORD_FIELD(glb_ctl_reg_ker, SWRST) != 0) { | 2600 | if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) { |
2583 | rc = -ETIMEDOUT; | 2601 | rc = -ETIMEDOUT; |
2584 | EFX_ERR(efx, "timed out waiting for hardware reset\n"); | 2602 | EFX_ERR(efx, "timed out waiting for hardware reset\n"); |
2585 | goto fail5; | 2603 | goto fail5; |
@@ -2607,16 +2625,16 @@ static int falcon_reset_sram(struct efx_nic *efx) | |||
2607 | int count; | 2625 | int count; |
2608 | 2626 | ||
2609 | /* Set the SRAM wake/sleep GPIO appropriately. */ | 2627 | /* Set the SRAM wake/sleep GPIO appropriately. */ |
2610 | falcon_read(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER); | 2628 | falcon_read(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); |
2611 | EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OEN, 1); | 2629 | EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1); |
2612 | EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, 1); | 2630 | EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1); |
2613 | falcon_write(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER); | 2631 | falcon_write(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); |
2614 | 2632 | ||
2615 | /* Initiate SRAM reset */ | 2633 | /* Initiate SRAM reset */ |
2616 | EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, | 2634 | EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, |
2617 | SRAM_OOB_BT_INIT_EN, 1, | 2635 | FRF_AZ_SRM_INIT_EN, 1, |
2618 | SRM_NUM_BANKS_AND_BANK_SIZE, 0); | 2636 | FRF_AZ_SRM_NB_SZ, 0); |
2619 | falcon_write(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER); | 2637 | falcon_write(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); |
2620 | 2638 | ||
2621 | /* Wait for SRAM reset to complete */ | 2639 | /* Wait for SRAM reset to complete */ |
2622 | count = 0; | 2640 | count = 0; |
@@ -2627,8 +2645,8 @@ static int falcon_reset_sram(struct efx_nic *efx) | |||
2627 | schedule_timeout_uninterruptible(HZ / 50); | 2645 | schedule_timeout_uninterruptible(HZ / 50); |
2628 | 2646 | ||
2629 | /* Check for reset complete */ | 2647 | /* Check for reset complete */ |
2630 | falcon_read(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER); | 2648 | falcon_read(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); |
2631 | if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, SRAM_OOB_BT_INIT_EN)) { | 2649 | if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) { |
2632 | EFX_LOG(efx, "SRAM reset complete\n"); | 2650 | EFX_LOG(efx, "SRAM reset complete\n"); |
2633 | 2651 | ||
2634 | return 0; | 2652 | return 0; |
@@ -2713,16 +2731,16 @@ static int falcon_probe_nvconfig(struct efx_nic *efx) | |||
2713 | board_rev = le16_to_cpu(v2->board_revision); | 2731 | board_rev = le16_to_cpu(v2->board_revision); |
2714 | 2732 | ||
2715 | if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) { | 2733 | if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) { |
2716 | __le32 fl = v3->spi_device_type[EE_SPI_FLASH]; | 2734 | rc = falcon_spi_device_init( |
2717 | __le32 ee = v3->spi_device_type[EE_SPI_EEPROM]; | 2735 | efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH, |
2718 | rc = falcon_spi_device_init(efx, &efx->spi_flash, | 2736 | le32_to_cpu(v3->spi_device_type |
2719 | EE_SPI_FLASH, | 2737 | [FFE_AB_SPI_DEVICE_FLASH])); |
2720 | le32_to_cpu(fl)); | ||
2721 | if (rc) | 2738 | if (rc) |
2722 | goto fail2; | 2739 | goto fail2; |
2723 | rc = falcon_spi_device_init(efx, &efx->spi_eeprom, | 2740 | rc = falcon_spi_device_init( |
2724 | EE_SPI_EEPROM, | 2741 | efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM, |
2725 | le32_to_cpu(ee)); | 2742 | le32_to_cpu(v3->spi_device_type |
2743 | [FFE_AB_SPI_DEVICE_EEPROM])); | ||
2726 | if (rc) | 2744 | if (rc) |
2727 | goto fail2; | 2745 | goto fail2; |
2728 | } | 2746 | } |
@@ -2753,13 +2771,13 @@ static int falcon_probe_nic_variant(struct efx_nic *efx) | |||
2753 | efx_oword_t altera_build; | 2771 | efx_oword_t altera_build; |
2754 | efx_oword_t nic_stat; | 2772 | efx_oword_t nic_stat; |
2755 | 2773 | ||
2756 | falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER); | 2774 | falcon_read(efx, &altera_build, FR_AZ_ALTERA_BUILD); |
2757 | if (EFX_OWORD_FIELD(altera_build, VER_ALL)) { | 2775 | if (EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER)) { |
2758 | EFX_ERR(efx, "Falcon FPGA not supported\n"); | 2776 | EFX_ERR(efx, "Falcon FPGA not supported\n"); |
2759 | return -ENODEV; | 2777 | return -ENODEV; |
2760 | } | 2778 | } |
2761 | 2779 | ||
2762 | falcon_read(efx, &nic_stat, NIC_STAT_REG); | 2780 | falcon_read(efx, &nic_stat, FR_AB_NIC_STAT); |
2763 | 2781 | ||
2764 | switch (falcon_rev(efx)) { | 2782 | switch (falcon_rev(efx)) { |
2765 | case FALCON_REV_A0: | 2783 | case FALCON_REV_A0: |
@@ -2768,7 +2786,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx) | |||
2768 | return -ENODEV; | 2786 | return -ENODEV; |
2769 | 2787 | ||
2770 | case FALCON_REV_A1: | 2788 | case FALCON_REV_A1: |
2771 | if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) { | 2789 | if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) { |
2772 | EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n"); | 2790 | EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n"); |
2773 | return -ENODEV; | 2791 | return -ENODEV; |
2774 | } | 2792 | } |
@@ -2783,7 +2801,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx) | |||
2783 | } | 2801 | } |
2784 | 2802 | ||
2785 | /* Initial assumed speed */ | 2803 | /* Initial assumed speed */ |
2786 | efx->link_speed = EFX_OWORD_FIELD(nic_stat, STRAP_10G) ? 10000 : 1000; | 2804 | efx->link_speed = EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) ? 10000 : 1000; |
2787 | 2805 | ||
2788 | return 0; | 2806 | return 0; |
2789 | } | 2807 | } |
@@ -2794,34 +2812,36 @@ static void falcon_probe_spi_devices(struct efx_nic *efx) | |||
2794 | efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; | 2812 | efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; |
2795 | int boot_dev; | 2813 | int boot_dev; |
2796 | 2814 | ||
2797 | falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER); | 2815 | falcon_read(efx, &gpio_ctl, FR_AB_GPIO_CTL); |
2798 | falcon_read(efx, &nic_stat, NIC_STAT_REG); | 2816 | falcon_read(efx, &nic_stat, FR_AB_NIC_STAT); |
2799 | falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER); | 2817 | falcon_read(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); |
2800 | 2818 | ||
2801 | if (EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE)) { | 2819 | if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) { |
2802 | boot_dev = (EFX_OWORD_FIELD(nic_stat, SF_PRST) ? | 2820 | boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ? |
2803 | EE_SPI_FLASH : EE_SPI_EEPROM); | 2821 | FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM); |
2804 | EFX_LOG(efx, "Booted from %s\n", | 2822 | EFX_LOG(efx, "Booted from %s\n", |
2805 | boot_dev == EE_SPI_FLASH ? "flash" : "EEPROM"); | 2823 | boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM"); |
2806 | } else { | 2824 | } else { |
2807 | /* Disable VPD and set clock dividers to safe | 2825 | /* Disable VPD and set clock dividers to safe |
2808 | * values for initial programming. */ | 2826 | * values for initial programming. */ |
2809 | boot_dev = -1; | 2827 | boot_dev = -1; |
2810 | EFX_LOG(efx, "Booted from internal ASIC settings;" | 2828 | EFX_LOG(efx, "Booted from internal ASIC settings;" |
2811 | " setting SPI config\n"); | 2829 | " setting SPI config\n"); |
2812 | EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0, | 2830 | EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0, |
2813 | /* 125 MHz / 7 ~= 20 MHz */ | 2831 | /* 125 MHz / 7 ~= 20 MHz */ |
2814 | EE_SF_CLOCK_DIV, 7, | 2832 | FRF_AB_EE_SF_CLOCK_DIV, 7, |
2815 | /* 125 MHz / 63 ~= 2 MHz */ | 2833 | /* 125 MHz / 63 ~= 2 MHz */ |
2816 | EE_EE_CLOCK_DIV, 63); | 2834 | FRF_AB_EE_EE_CLOCK_DIV, 63); |
2817 | falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER); | 2835 | falcon_write(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); |
2818 | } | 2836 | } |
2819 | 2837 | ||
2820 | if (boot_dev == EE_SPI_FLASH) | 2838 | if (boot_dev == FFE_AB_SPI_DEVICE_FLASH) |
2821 | falcon_spi_device_init(efx, &efx->spi_flash, EE_SPI_FLASH, | 2839 | falcon_spi_device_init(efx, &efx->spi_flash, |
2840 | FFE_AB_SPI_DEVICE_FLASH, | ||
2822 | default_flash_type); | 2841 | default_flash_type); |
2823 | if (boot_dev == EE_SPI_EEPROM) | 2842 | if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM) |
2824 | falcon_spi_device_init(efx, &efx->spi_eeprom, EE_SPI_EEPROM, | 2843 | falcon_spi_device_init(efx, &efx->spi_eeprom, |
2844 | FFE_AB_SPI_DEVICE_EEPROM, | ||
2825 | large_eeprom_type); | 2845 | large_eeprom_type); |
2826 | } | 2846 | } |
2827 | 2847 | ||
@@ -2926,34 +2946,36 @@ static void falcon_init_rx_cfg(struct efx_nic *efx) | |||
2926 | int data_xoff_thr = rx_xoff_thresh_bytes >> 8; | 2946 | int data_xoff_thr = rx_xoff_thresh_bytes >> 8; |
2927 | efx_oword_t reg; | 2947 | efx_oword_t reg; |
2928 | 2948 | ||
2929 | falcon_read(efx, ®, RX_CFG_REG_KER); | 2949 | falcon_read(efx, ®, FR_AZ_RX_CFG); |
2930 | if (falcon_rev(efx) <= FALCON_REV_A1) { | 2950 | if (falcon_rev(efx) <= FALCON_REV_A1) { |
2931 | /* Data FIFO size is 5.5K */ | 2951 | /* Data FIFO size is 5.5K */ |
2932 | if (data_xon_thr < 0) | 2952 | if (data_xon_thr < 0) |
2933 | data_xon_thr = 512 >> 8; | 2953 | data_xon_thr = 512 >> 8; |
2934 | if (data_xoff_thr < 0) | 2954 | if (data_xoff_thr < 0) |
2935 | data_xoff_thr = 2048 >> 8; | 2955 | data_xoff_thr = 2048 >> 8; |
2936 | EFX_SET_OWORD_FIELD(reg, RX_DESC_PUSH_EN_A1, 0); | 2956 | EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0); |
2937 | EFX_SET_OWORD_FIELD(reg, RX_USR_BUF_SIZE_A1, huge_buf_size); | 2957 | EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE, |
2938 | EFX_SET_OWORD_FIELD(reg, RX_XON_MAC_TH_A1, data_xon_thr); | 2958 | huge_buf_size); |
2939 | EFX_SET_OWORD_FIELD(reg, RX_XOFF_MAC_TH_A1, data_xoff_thr); | 2959 | EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr); |
2940 | EFX_SET_OWORD_FIELD(reg, RX_XON_TX_TH_A1, ctrl_xon_thr); | 2960 | EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr); |
2941 | EFX_SET_OWORD_FIELD(reg, RX_XOFF_TX_TH_A1, ctrl_xoff_thr); | 2961 | EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr); |
2962 | EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr); | ||
2942 | } else { | 2963 | } else { |
2943 | /* Data FIFO size is 80K; register fields moved */ | 2964 | /* Data FIFO size is 80K; register fields moved */ |
2944 | if (data_xon_thr < 0) | 2965 | if (data_xon_thr < 0) |
2945 | data_xon_thr = 27648 >> 8; /* ~3*max MTU */ | 2966 | data_xon_thr = 27648 >> 8; /* ~3*max MTU */ |
2946 | if (data_xoff_thr < 0) | 2967 | if (data_xoff_thr < 0) |
2947 | data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */ | 2968 | data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */ |
2948 | EFX_SET_OWORD_FIELD(reg, RX_DESC_PUSH_EN_B0, 0); | 2969 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0); |
2949 | EFX_SET_OWORD_FIELD(reg, RX_USR_BUF_SIZE_B0, huge_buf_size); | 2970 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE, |
2950 | EFX_SET_OWORD_FIELD(reg, RX_XON_MAC_TH_B0, data_xon_thr); | 2971 | huge_buf_size); |
2951 | EFX_SET_OWORD_FIELD(reg, RX_XOFF_MAC_TH_B0, data_xoff_thr); | 2972 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr); |
2952 | EFX_SET_OWORD_FIELD(reg, RX_XON_TX_TH_B0, ctrl_xon_thr); | 2973 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr); |
2953 | EFX_SET_OWORD_FIELD(reg, RX_XOFF_TX_TH_B0, ctrl_xoff_thr); | 2974 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr); |
2954 | EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); | 2975 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr); |
2976 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); | ||
2955 | } | 2977 | } |
2956 | falcon_write(efx, ®, RX_CFG_REG_KER); | 2978 | falcon_write(efx, ®, FR_AZ_RX_CFG); |
2957 | } | 2979 | } |
2958 | 2980 | ||
2959 | /* This call performs hardware-specific global initialisation, such as | 2981 | /* This call performs hardware-specific global initialisation, such as |
@@ -2966,15 +2988,15 @@ int falcon_init_nic(struct efx_nic *efx) | |||
2966 | int rc; | 2988 | int rc; |
2967 | 2989 | ||
2968 | /* Use on-chip SRAM */ | 2990 | /* Use on-chip SRAM */ |
2969 | falcon_read(efx, &temp, NIC_STAT_REG); | 2991 | falcon_read(efx, &temp, FR_AB_NIC_STAT); |
2970 | EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1); | 2992 | EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1); |
2971 | falcon_write(efx, &temp, NIC_STAT_REG); | 2993 | falcon_write(efx, &temp, FR_AB_NIC_STAT); |
2972 | 2994 | ||
2973 | /* Set the source of the GMAC clock */ | 2995 | /* Set the source of the GMAC clock */ |
2974 | if (falcon_rev(efx) == FALCON_REV_B0) { | 2996 | if (falcon_rev(efx) == FALCON_REV_B0) { |
2975 | falcon_read(efx, &temp, GPIO_CTL_REG_KER); | 2997 | falcon_read(efx, &temp, FR_AB_GPIO_CTL); |
2976 | EFX_SET_OWORD_FIELD(temp, GPIO_USE_NIC_CLK, true); | 2998 | EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true); |
2977 | falcon_write(efx, &temp, GPIO_CTL_REG_KER); | 2999 | falcon_write(efx, &temp, FR_AB_GPIO_CTL); |
2978 | } | 3000 | } |
2979 | 3001 | ||
2980 | rc = falcon_reset_sram(efx); | 3002 | rc = falcon_reset_sram(efx); |
@@ -2982,32 +3004,32 @@ int falcon_init_nic(struct efx_nic *efx) | |||
2982 | return rc; | 3004 | return rc; |
2983 | 3005 | ||
2984 | /* Set positions of descriptor caches in SRAM. */ | 3006 | /* Set positions of descriptor caches in SRAM. */ |
2985 | EFX_POPULATE_OWORD_1(temp, SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8); | 3007 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8); |
2986 | falcon_write(efx, &temp, SRM_TX_DC_CFG_REG_KER); | 3008 | falcon_write(efx, &temp, FR_AZ_SRM_TX_DC_CFG); |
2987 | EFX_POPULATE_OWORD_1(temp, SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8); | 3009 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8); |
2988 | falcon_write(efx, &temp, SRM_RX_DC_CFG_REG_KER); | 3010 | falcon_write(efx, &temp, FR_AZ_SRM_RX_DC_CFG); |
2989 | 3011 | ||
2990 | /* Set TX descriptor cache size. */ | 3012 | /* Set TX descriptor cache size. */ |
2991 | BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER)); | 3013 | BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER)); |
2992 | EFX_POPULATE_OWORD_1(temp, TX_DC_SIZE, TX_DC_ENTRIES_ORDER); | 3014 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); |
2993 | falcon_write(efx, &temp, TX_DC_CFG_REG_KER); | 3015 | falcon_write(efx, &temp, FR_AZ_TX_DC_CFG); |
2994 | 3016 | ||
2995 | /* Set RX descriptor cache size. Set low watermark to size-8, as | 3017 | /* Set RX descriptor cache size. Set low watermark to size-8, as |
2996 | * this allows most efficient prefetching. | 3018 | * this allows most efficient prefetching. |
2997 | */ | 3019 | */ |
2998 | BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER)); | 3020 | BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER)); |
2999 | EFX_POPULATE_OWORD_1(temp, RX_DC_SIZE, RX_DC_ENTRIES_ORDER); | 3021 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); |
3000 | falcon_write(efx, &temp, RX_DC_CFG_REG_KER); | 3022 | falcon_write(efx, &temp, FR_AZ_RX_DC_CFG); |
3001 | EFX_POPULATE_OWORD_1(temp, RX_DC_PF_LWM, RX_DC_ENTRIES - 8); | 3023 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); |
3002 | falcon_write(efx, &temp, RX_DC_PF_WM_REG_KER); | 3024 | falcon_write(efx, &temp, FR_AZ_RX_DC_PF_WM); |
3003 | 3025 | ||
3004 | /* Clear the parity enables on the TX data fifos as | 3026 | /* Clear the parity enables on the TX data fifos as |
3005 | * they produce false parity errors because of timing issues | 3027 | * they produce false parity errors because of timing issues |
3006 | */ | 3028 | */ |
3007 | if (EFX_WORKAROUND_5129(efx)) { | 3029 | if (EFX_WORKAROUND_5129(efx)) { |
3008 | falcon_read(efx, &temp, SPARE_REG_KER); | 3030 | falcon_read(efx, &temp, FR_AZ_CSR_SPARE); |
3009 | EFX_SET_OWORD_FIELD(temp, MEM_PERR_EN_TX_DATA, 0); | 3031 | EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0); |
3010 | falcon_write(efx, &temp, SPARE_REG_KER); | 3032 | falcon_write(efx, &temp, FR_AZ_CSR_SPARE); |
3011 | } | 3033 | } |
3012 | 3034 | ||
3013 | /* Enable all the genuinely fatal interrupts. (They are still | 3035 | /* Enable all the genuinely fatal interrupts. (They are still |
@@ -3017,64 +3039,65 @@ int falcon_init_nic(struct efx_nic *efx) | |||
3017 | * Note: All other fatal interrupts are enabled | 3039 | * Note: All other fatal interrupts are enabled |
3018 | */ | 3040 | */ |
3019 | EFX_POPULATE_OWORD_3(temp, | 3041 | EFX_POPULATE_OWORD_3(temp, |
3020 | ILL_ADR_INT_KER_EN, 1, | 3042 | FRF_AZ_ILL_ADR_INT_KER_EN, 1, |
3021 | RBUF_OWN_INT_KER_EN, 1, | 3043 | FRF_AZ_RBUF_OWN_INT_KER_EN, 1, |
3022 | TBUF_OWN_INT_KER_EN, 1); | 3044 | FRF_AZ_TBUF_OWN_INT_KER_EN, 1); |
3023 | EFX_INVERT_OWORD(temp); | 3045 | EFX_INVERT_OWORD(temp); |
3024 | falcon_write(efx, &temp, FATAL_INTR_REG_KER); | 3046 | falcon_write(efx, &temp, FR_AZ_FATAL_INTR_KER); |
3025 | 3047 | ||
3026 | if (EFX_WORKAROUND_7244(efx)) { | 3048 | if (EFX_WORKAROUND_7244(efx)) { |
3027 | falcon_read(efx, &temp, RX_FILTER_CTL_REG); | 3049 | falcon_read(efx, &temp, FR_BZ_RX_FILTER_CTL); |
3028 | EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8); | 3050 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8); |
3029 | EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8); | 3051 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8); |
3030 | EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8); | 3052 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8); |
3031 | EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8); | 3053 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8); |
3032 | falcon_write(efx, &temp, RX_FILTER_CTL_REG); | 3054 | falcon_write(efx, &temp, FR_BZ_RX_FILTER_CTL); |
3033 | } | 3055 | } |
3034 | 3056 | ||
3035 | falcon_setup_rss_indir_table(efx); | 3057 | falcon_setup_rss_indir_table(efx); |
3036 | 3058 | ||
3059 | /* XXX This is documented only for Falcon A0/A1 */ | ||
3037 | /* Setup RX. Wait for descriptor is broken and must | 3060 | /* Setup RX. Wait for descriptor is broken and must |
3038 | * be disabled. RXDP recovery shouldn't be needed, but is. | 3061 | * be disabled. RXDP recovery shouldn't be needed, but is. |
3039 | */ | 3062 | */ |
3040 | falcon_read(efx, &temp, RX_SELF_RST_REG_KER); | 3063 | falcon_read(efx, &temp, FR_AA_RX_SELF_RST); |
3041 | EFX_SET_OWORD_FIELD(temp, RX_NODESC_WAIT_DIS, 1); | 3064 | EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1); |
3042 | EFX_SET_OWORD_FIELD(temp, RX_RECOVERY_EN, 1); | 3065 | EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1); |
3043 | if (EFX_WORKAROUND_5583(efx)) | 3066 | if (EFX_WORKAROUND_5583(efx)) |
3044 | EFX_SET_OWORD_FIELD(temp, RX_ISCSI_DIS, 1); | 3067 | EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1); |
3045 | falcon_write(efx, &temp, RX_SELF_RST_REG_KER); | 3068 | falcon_write(efx, &temp, FR_AA_RX_SELF_RST); |
3046 | 3069 | ||
3047 | /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be | 3070 | /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be |
3048 | * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. | 3071 | * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. |
3049 | */ | 3072 | */ |
3050 | falcon_read(efx, &temp, TX_CFG2_REG_KER); | 3073 | falcon_read(efx, &temp, FR_AZ_TX_RESERVED); |
3051 | EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER, 0xfe); | 3074 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); |
3052 | EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER_EN, 1); | 3075 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); |
3053 | EFX_SET_OWORD_FIELD(temp, TX_ONE_PKT_PER_Q, 1); | 3076 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); |
3054 | EFX_SET_OWORD_FIELD(temp, TX_CSR_PUSH_EN, 0); | 3077 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0); |
3055 | EFX_SET_OWORD_FIELD(temp, TX_DIS_NON_IP_EV, 1); | 3078 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); |
3056 | /* Enable SW_EV to inherit in char driver - assume harmless here */ | 3079 | /* Enable SW_EV to inherit in char driver - assume harmless here */ |
3057 | EFX_SET_OWORD_FIELD(temp, TX_SW_EV_EN, 1); | 3080 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); |
3058 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ | 3081 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ |
3059 | EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); | 3082 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); |
3060 | /* Squash TX of packets of 16 bytes or less */ | 3083 | /* Squash TX of packets of 16 bytes or less */ |
3061 | if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) | 3084 | if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) |
3062 | EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); | 3085 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); |
3063 | falcon_write(efx, &temp, TX_CFG2_REG_KER); | 3086 | falcon_write(efx, &temp, FR_AZ_TX_RESERVED); |
3064 | 3087 | ||
3065 | /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 | 3088 | /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 |
3066 | * descriptors (which is bad). | 3089 | * descriptors (which is bad). |
3067 | */ | 3090 | */ |
3068 | falcon_read(efx, &temp, TX_CFG_REG_KER); | 3091 | falcon_read(efx, &temp, FR_AZ_TX_CFG); |
3069 | EFX_SET_OWORD_FIELD(temp, TX_NO_EOP_DISC_EN, 0); | 3092 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); |
3070 | falcon_write(efx, &temp, TX_CFG_REG_KER); | 3093 | falcon_write(efx, &temp, FR_AZ_TX_CFG); |
3071 | 3094 | ||
3072 | falcon_init_rx_cfg(efx); | 3095 | falcon_init_rx_cfg(efx); |
3073 | 3096 | ||
3074 | /* Set destination of both TX and RX Flush events */ | 3097 | /* Set destination of both TX and RX Flush events */ |
3075 | if (falcon_rev(efx) >= FALCON_REV_B0) { | 3098 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
3076 | EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); | 3099 | EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); |
3077 | falcon_write(efx, &temp, DP_CTRL_REG); | 3100 | falcon_write(efx, &temp, FR_BZ_DP_CTRL); |
3078 | } | 3101 | } |
3079 | 3102 | ||
3080 | return 0; | 3103 | return 0; |
@@ -3110,8 +3133,9 @@ void falcon_update_nic_stats(struct efx_nic *efx) | |||
3110 | { | 3133 | { |
3111 | efx_oword_t cnt; | 3134 | efx_oword_t cnt; |
3112 | 3135 | ||
3113 | falcon_read(efx, &cnt, RX_NODESC_DROP_REG_KER); | 3136 | falcon_read(efx, &cnt, FR_AZ_RX_NODESC_DROP); |
3114 | efx->n_rx_nodesc_drop_cnt += EFX_OWORD_FIELD(cnt, RX_NODESC_DROP_CNT); | 3137 | efx->n_rx_nodesc_drop_cnt += |
3138 | EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT); | ||
3115 | } | 3139 | } |
3116 | 3140 | ||
3117 | /************************************************************************** | 3141 | /************************************************************************** |
@@ -3124,11 +3148,11 @@ void falcon_update_nic_stats(struct efx_nic *efx) | |||
3124 | struct efx_nic_type falcon_a_nic_type = { | 3148 | struct efx_nic_type falcon_a_nic_type = { |
3125 | .mem_bar = 2, | 3149 | .mem_bar = 2, |
3126 | .mem_map_size = 0x20000, | 3150 | .mem_map_size = 0x20000, |
3127 | .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_A1, | 3151 | .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER, |
3128 | .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_A1, | 3152 | .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER, |
3129 | .buf_tbl_base = BUF_TBL_KER_A1, | 3153 | .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER, |
3130 | .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_A1, | 3154 | .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER, |
3131 | .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_A1, | 3155 | .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER, |
3132 | .txd_ring_mask = FALCON_TXD_RING_MASK, | 3156 | .txd_ring_mask = FALCON_TXD_RING_MASK, |
3133 | .rxd_ring_mask = FALCON_RXD_RING_MASK, | 3157 | .rxd_ring_mask = FALCON_RXD_RING_MASK, |
3134 | .evq_size = FALCON_EVQ_SIZE, | 3158 | .evq_size = FALCON_EVQ_SIZE, |
@@ -3145,12 +3169,14 @@ struct efx_nic_type falcon_b_nic_type = { | |||
3145 | /* Map everything up to and including the RSS indirection | 3169 | /* Map everything up to and including the RSS indirection |
3146 | * table. Don't map MSI-X table, MSI-X PBA since Linux | 3170 | * table. Don't map MSI-X table, MSI-X PBA since Linux |
3147 | * requires that they not be mapped. */ | 3171 | * requires that they not be mapped. */ |
3148 | .mem_map_size = RX_RSS_INDIR_TBL_B0 + 0x800, | 3172 | .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL + |
3149 | .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_B0, | 3173 | FR_BZ_RX_INDIRECTION_TBL_STEP * |
3150 | .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_B0, | 3174 | FR_BZ_RX_INDIRECTION_TBL_ROWS), |
3151 | .buf_tbl_base = BUF_TBL_KER_B0, | 3175 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, |
3152 | .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_B0, | 3176 | .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, |
3153 | .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_B0, | 3177 | .buf_tbl_base = FR_BZ_BUF_FULL_TBL, |
3178 | .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, | ||
3179 | .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, | ||
3154 | .txd_ring_mask = FALCON_TXD_RING_MASK, | 3180 | .txd_ring_mask = FALCON_TXD_RING_MASK, |
3155 | .rxd_ring_mask = FALCON_RXD_RING_MASK, | 3181 | .rxd_ring_mask = FALCON_RXD_RING_MASK, |
3156 | .evq_size = FALCON_EVQ_SIZE, | 3182 | .evq_size = FALCON_EVQ_SIZE, |