aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom/bnx2.c
diff options
context:
space:
mode:
authorMichael Chan <mchan@broadcom.com>2012-12-06 05:33:08 -0500
committerDavid S. Miller <davem@davemloft.net>2012-12-07 12:44:01 -0500
commite503e0662447ce2bd7c0a73c90395c78ebee494c (patch)
tree78cbe4e2ec2b3c2df26b69f4b82821ed81fbf3e3 /drivers/net/ethernet/broadcom/bnx2.c
parent1d9c5a04d5208c6bc53364a513508ffcab1bc338 (diff)
bnx2: Rename register read and write macros
with BNX2_ prefix for namespace consistency. Currently, these macro names conflict with similar macros in bnx2x.h, preventing the cnic driver from including both bnx2.h and bnx2x.h. Including bnx2x.h in cnic.c will remove many redundant definitions and simplify the interface. Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2.c')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c636
1 files changed, 318 insertions, 318 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index e264e960a762..41fa6af2fd86 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -274,8 +274,8 @@ bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
274 u32 val; 274 u32 val;
275 275
276 spin_lock_bh(&bp->indirect_lock); 276 spin_lock_bh(&bp->indirect_lock);
277 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); 277 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW); 278 val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
279 spin_unlock_bh(&bp->indirect_lock); 279 spin_unlock_bh(&bp->indirect_lock);
280 return val; 280 return val;
281} 281}
@@ -284,8 +284,8 @@ static void
284bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val) 284bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285{ 285{
286 spin_lock_bh(&bp->indirect_lock); 286 spin_lock_bh(&bp->indirect_lock);
287 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); 287 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val); 288 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289 spin_unlock_bh(&bp->indirect_lock); 289 spin_unlock_bh(&bp->indirect_lock);
290} 290}
291 291
@@ -309,18 +309,18 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
309 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 309 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
310 int i; 310 int i;
311 311
312 REG_WR(bp, BNX2_CTX_CTX_DATA, val); 312 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
313 REG_WR(bp, BNX2_CTX_CTX_CTRL, 313 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
314 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ); 314 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
315 for (i = 0; i < 5; i++) { 315 for (i = 0; i < 5; i++) {
316 val = REG_RD(bp, BNX2_CTX_CTX_CTRL); 316 val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
317 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0) 317 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
318 break; 318 break;
319 udelay(5); 319 udelay(5);
320 } 320 }
321 } else { 321 } else {
322 REG_WR(bp, BNX2_CTX_DATA_ADR, offset); 322 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
323 REG_WR(bp, BNX2_CTX_DATA, val); 323 BNX2_WR(bp, BNX2_CTX_DATA, val);
324 } 324 }
325 spin_unlock_bh(&bp->indirect_lock); 325 spin_unlock_bh(&bp->indirect_lock);
326} 326}
@@ -494,11 +494,11 @@ bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
494 int i, ret; 494 int i, ret;
495 495
496 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { 496 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
497 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); 497 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
498 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL; 498 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
499 499
500 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); 500 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
501 REG_RD(bp, BNX2_EMAC_MDIO_MODE); 501 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
502 502
503 udelay(40); 503 udelay(40);
504 } 504 }
@@ -506,16 +506,16 @@ bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
506 val1 = (bp->phy_addr << 21) | (reg << 16) | 506 val1 = (bp->phy_addr << 21) | (reg << 16) |
507 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT | 507 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
508 BNX2_EMAC_MDIO_COMM_START_BUSY; 508 BNX2_EMAC_MDIO_COMM_START_BUSY;
509 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1); 509 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
510 510
511 for (i = 0; i < 50; i++) { 511 for (i = 0; i < 50; i++) {
512 udelay(10); 512 udelay(10);
513 513
514 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); 514 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
515 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) { 515 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
516 udelay(5); 516 udelay(5);
517 517
518 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); 518 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
519 val1 &= BNX2_EMAC_MDIO_COMM_DATA; 519 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
520 520
521 break; 521 break;
@@ -532,11 +532,11 @@ bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
532 } 532 }
533 533
534 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { 534 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
535 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); 535 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
536 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL; 536 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
537 537
538 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); 538 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
539 REG_RD(bp, BNX2_EMAC_MDIO_MODE); 539 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
540 540
541 udelay(40); 541 udelay(40);
542 } 542 }
@@ -551,11 +551,11 @@ bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
551 int i, ret; 551 int i, ret;
552 552
553 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { 553 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
554 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); 554 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
555 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL; 555 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
556 556
557 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); 557 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
558 REG_RD(bp, BNX2_EMAC_MDIO_MODE); 558 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
559 559
560 udelay(40); 560 udelay(40);
561 } 561 }
@@ -563,12 +563,12 @@ bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
563 val1 = (bp->phy_addr << 21) | (reg << 16) | val | 563 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
564 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE | 564 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
565 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT; 565 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
566 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1); 566 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
567 567
568 for (i = 0; i < 50; i++) { 568 for (i = 0; i < 50; i++) {
569 udelay(10); 569 udelay(10);
570 570
571 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); 571 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
572 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) { 572 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
573 udelay(5); 573 udelay(5);
574 break; 574 break;
@@ -581,11 +581,11 @@ bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
581 ret = 0; 581 ret = 0;
582 582
583 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { 583 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
584 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); 584 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
585 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL; 585 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
586 586
587 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); 587 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
588 REG_RD(bp, BNX2_EMAC_MDIO_MODE); 588 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
589 589
590 udelay(40); 590 udelay(40);
591 } 591 }
@@ -601,10 +601,10 @@ bnx2_disable_int(struct bnx2 *bp)
601 601
602 for (i = 0; i < bp->irq_nvecs; i++) { 602 for (i = 0; i < bp->irq_nvecs; i++) {
603 bnapi = &bp->bnx2_napi[i]; 603 bnapi = &bp->bnx2_napi[i];
604 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | 604 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
605 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 605 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
606 } 606 }
607 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD); 607 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
608} 608}
609 609
610static void 610static void
@@ -616,16 +616,16 @@ bnx2_enable_int(struct bnx2 *bp)
616 for (i = 0; i < bp->irq_nvecs; i++) { 616 for (i = 0; i < bp->irq_nvecs; i++) {
617 bnapi = &bp->bnx2_napi[i]; 617 bnapi = &bp->bnx2_napi[i];
618 618
619 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | 619 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
620 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 620 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
621 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | 621 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
622 bnapi->last_status_idx); 622 bnapi->last_status_idx);
623 623
624 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | 624 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
625 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 625 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
626 bnapi->last_status_idx); 626 bnapi->last_status_idx);
627 } 627 }
628 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); 628 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
629} 629}
630 630
631static void 631static void
@@ -1294,14 +1294,14 @@ bnx2_set_mac_link(struct bnx2 *bp)
1294{ 1294{
1295 u32 val; 1295 u32 val;
1296 1296
1297 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620); 1297 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1298 if (bp->link_up && (bp->line_speed == SPEED_1000) && 1298 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1299 (bp->duplex == DUPLEX_HALF)) { 1299 (bp->duplex == DUPLEX_HALF)) {
1300 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff); 1300 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1301 } 1301 }
1302 1302
1303 /* Configure the EMAC mode register. */ 1303 /* Configure the EMAC mode register. */
1304 val = REG_RD(bp, BNX2_EMAC_MODE); 1304 val = BNX2_RD(bp, BNX2_EMAC_MODE);
1305 1305
1306 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX | 1306 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1307 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK | 1307 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
@@ -1333,25 +1333,25 @@ bnx2_set_mac_link(struct bnx2 *bp)
1333 /* Set the MAC to operate in the appropriate duplex mode. */ 1333 /* Set the MAC to operate in the appropriate duplex mode. */
1334 if (bp->duplex == DUPLEX_HALF) 1334 if (bp->duplex == DUPLEX_HALF)
1335 val |= BNX2_EMAC_MODE_HALF_DUPLEX; 1335 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1336 REG_WR(bp, BNX2_EMAC_MODE, val); 1336 BNX2_WR(bp, BNX2_EMAC_MODE, val);
1337 1337
1338 /* Enable/disable rx PAUSE. */ 1338 /* Enable/disable rx PAUSE. */
1339 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN; 1339 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1340 1340
1341 if (bp->flow_ctrl & FLOW_CTRL_RX) 1341 if (bp->flow_ctrl & FLOW_CTRL_RX)
1342 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN; 1342 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1343 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode); 1343 BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1344 1344
1345 /* Enable/disable tx PAUSE. */ 1345 /* Enable/disable tx PAUSE. */
1346 val = REG_RD(bp, BNX2_EMAC_TX_MODE); 1346 val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1347 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN; 1347 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1348 1348
1349 if (bp->flow_ctrl & FLOW_CTRL_TX) 1349 if (bp->flow_ctrl & FLOW_CTRL_TX)
1350 val |= BNX2_EMAC_TX_MODE_FLOW_EN; 1350 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1351 REG_WR(bp, BNX2_EMAC_TX_MODE, val); 1351 BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1352 1352
1353 /* Acknowledge the interrupt. */ 1353 /* Acknowledge the interrupt. */
1354 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE); 1354 BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1355 1355
1356 bnx2_init_all_rx_contexts(bp); 1356 bnx2_init_all_rx_contexts(bp);
1357} 1357}
@@ -1554,7 +1554,7 @@ bnx2_set_link(struct bnx2 *bp)
1554 bnx2_5706s_force_link_dn(bp, 0); 1554 bnx2_5706s_force_link_dn(bp, 0);
1555 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN; 1555 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1556 } 1556 }
1557 val = REG_RD(bp, BNX2_EMAC_STATUS); 1557 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1558 1558
1559 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG); 1559 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1560 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); 1560 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
@@ -1942,8 +1942,8 @@ bnx2_send_heart_beat(struct bnx2 *bp)
1942 spin_lock(&bp->indirect_lock); 1942 spin_lock(&bp->indirect_lock);
1943 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK); 1943 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1944 addr = bp->shmem_base + BNX2_DRV_PULSE_MB; 1944 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1945 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr); 1945 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1946 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg); 1946 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1947 spin_unlock(&bp->indirect_lock); 1947 spin_unlock(&bp->indirect_lock);
1948} 1948}
1949 1949
@@ -2269,7 +2269,7 @@ bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2269 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; 2269 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2270 2270
2271 if (CHIP_NUM(bp) == CHIP_NUM_5706) 2271 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2272 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300); 2272 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2273 2273
2274 if (bp->dev->mtu > 1500) { 2274 if (bp->dev->mtu > 1500) {
2275 u32 val; 2275 u32 val;
@@ -2368,7 +2368,7 @@ __acquires(&bp->phy_lock)
2368 bp->mii_adv = MII_ADVERTISE; 2368 bp->mii_adv = MII_ADVERTISE;
2369 bp->mii_lpa = MII_LPA; 2369 bp->mii_lpa = MII_LPA;
2370 2370
2371 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); 2371 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2372 2372
2373 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) 2373 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2374 goto setup_phy; 2374 goto setup_phy;
@@ -2402,10 +2402,10 @@ bnx2_set_mac_loopback(struct bnx2 *bp)
2402{ 2402{
2403 u32 mac_mode; 2403 u32 mac_mode;
2404 2404
2405 mac_mode = REG_RD(bp, BNX2_EMAC_MODE); 2405 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2406 mac_mode &= ~BNX2_EMAC_MODE_PORT; 2406 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2407 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK; 2407 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2408 REG_WR(bp, BNX2_EMAC_MODE, mac_mode); 2408 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2409 bp->link_up = 1; 2409 bp->link_up = 1;
2410 return 0; 2410 return 0;
2411} 2411}
@@ -2431,13 +2431,13 @@ bnx2_set_phy_loopback(struct bnx2 *bp)
2431 msleep(100); 2431 msleep(100);
2432 } 2432 }
2433 2433
2434 mac_mode = REG_RD(bp, BNX2_EMAC_MODE); 2434 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2435 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX | 2435 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2436 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK | 2436 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2437 BNX2_EMAC_MODE_25G_MODE); 2437 BNX2_EMAC_MODE_25G_MODE);
2438 2438
2439 mac_mode |= BNX2_EMAC_MODE_PORT_GMII; 2439 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2440 REG_WR(bp, BNX2_EMAC_MODE, mac_mode); 2440 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2441 bp->link_up = 1; 2441 bp->link_up = 1;
2442 return 0; 2442 return 0;
2443} 2443}
@@ -2539,9 +2539,9 @@ bnx2_init_5709_context(struct bnx2 *bp)
2539 2539
2540 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12); 2540 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2541 val |= (BCM_PAGE_BITS - 8) << 16; 2541 val |= (BCM_PAGE_BITS - 8) << 16;
2542 REG_WR(bp, BNX2_CTX_COMMAND, val); 2542 BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2543 for (i = 0; i < 10; i++) { 2543 for (i = 0; i < 10; i++) {
2544 val = REG_RD(bp, BNX2_CTX_COMMAND); 2544 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2545 if (!(val & BNX2_CTX_COMMAND_MEM_INIT)) 2545 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2546 break; 2546 break;
2547 udelay(2); 2547 udelay(2);
@@ -2557,16 +2557,16 @@ bnx2_init_5709_context(struct bnx2 *bp)
2557 else 2557 else
2558 return -ENOMEM; 2558 return -ENOMEM;
2559 2559
2560 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0, 2560 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2561 (bp->ctx_blk_mapping[i] & 0xffffffff) | 2561 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2562 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID); 2562 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2563 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1, 2563 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2564 (u64) bp->ctx_blk_mapping[i] >> 32); 2564 (u64) bp->ctx_blk_mapping[i] >> 32);
2565 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i | 2565 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2566 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 2566 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2567 for (j = 0; j < 10; j++) { 2567 for (j = 0; j < 10; j++) {
2568 2568
2569 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL); 2569 val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2570 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) 2570 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2571 break; 2571 break;
2572 udelay(5); 2572 udelay(5);
@@ -2612,8 +2612,8 @@ bnx2_init_context(struct bnx2 *bp)
2612 vcid_addr += (i << PHY_CTX_SHIFT); 2612 vcid_addr += (i << PHY_CTX_SHIFT);
2613 pcid_addr += (i << PHY_CTX_SHIFT); 2613 pcid_addr += (i << PHY_CTX_SHIFT);
2614 2614
2615 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr); 2615 BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2616 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); 2616 BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2617 2617
2618 /* Zero out the context. */ 2618 /* Zero out the context. */
2619 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) 2619 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
@@ -2633,7 +2633,7 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2633 if (good_mbuf == NULL) 2633 if (good_mbuf == NULL)
2634 return -ENOMEM; 2634 return -ENOMEM;
2635 2635
2636 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 2636 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2637 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE); 2637 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2638 2638
2639 good_mbuf_cnt = 0; 2639 good_mbuf_cnt = 0;
@@ -2678,12 +2678,12 @@ bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2678 2678
2679 val = (mac_addr[0] << 8) | mac_addr[1]; 2679 val = (mac_addr[0] << 8) | mac_addr[1];
2680 2680
2681 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val); 2681 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2682 2682
2683 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 2683 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2684 (mac_addr[4] << 8) | mac_addr[5]; 2684 (mac_addr[4] << 8) | mac_addr[5];
2685 2685
2686 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val); 2686 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2687} 2687}
2688 2688
2689static inline int 2689static inline int
@@ -2770,9 +2770,9 @@ bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2770 old_link_state = sblk->status_attn_bits_ack & event; 2770 old_link_state = sblk->status_attn_bits_ack & event;
2771 if (new_link_state != old_link_state) { 2771 if (new_link_state != old_link_state) {
2772 if (new_link_state) 2772 if (new_link_state)
2773 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event); 2773 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2774 else 2774 else
2775 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event); 2775 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2776 } else 2776 } else
2777 is_set = 0; 2777 is_set = 0;
2778 2778
@@ -3255,11 +3255,11 @@ next_rx:
3255 rxr->rx_prod = sw_prod; 3255 rxr->rx_prod = sw_prod;
3256 3256
3257 if (pg_ring_used) 3257 if (pg_ring_used)
3258 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod); 3258 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3259 3259
3260 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod); 3260 BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3261 3261
3262 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); 3262 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3263 3263
3264 mmiowb(); 3264 mmiowb();
3265 3265
@@ -3277,7 +3277,7 @@ bnx2_msi(int irq, void *dev_instance)
3277 struct bnx2 *bp = bnapi->bp; 3277 struct bnx2 *bp = bnapi->bp;
3278 3278
3279 prefetch(bnapi->status_blk.msi); 3279 prefetch(bnapi->status_blk.msi);
3280 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 3280 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3281 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 3281 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3282 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 3282 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3283 3283
@@ -3321,18 +3321,18 @@ bnx2_interrupt(int irq, void *dev_instance)
3321 * the status block write. 3321 * the status block write.
3322 */ 3322 */
3323 if ((sblk->status_idx == bnapi->last_status_idx) && 3323 if ((sblk->status_idx == bnapi->last_status_idx) &&
3324 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) & 3324 (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3325 BNX2_PCICFG_MISC_STATUS_INTA_VALUE)) 3325 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3326 return IRQ_NONE; 3326 return IRQ_NONE;
3327 3327
3328 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 3328 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3329 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 3329 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3330 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 3330 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3331 3331
3332 /* Read back to deassert IRQ immediately to avoid too many 3332 /* Read back to deassert IRQ immediately to avoid too many
3333 * spurious interrupts. 3333 * spurious interrupts.
3334 */ 3334 */
3335 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD); 3335 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3336 3336
3337 /* Return here if interrupt is shared and is disabled. */ 3337 /* Return here if interrupt is shared and is disabled. */
3338 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 3338 if (unlikely(atomic_read(&bp->intr_sem) != 0))
@@ -3388,14 +3388,14 @@ bnx2_chk_missed_msi(struct bnx2 *bp)
3388 u32 msi_ctrl; 3388 u32 msi_ctrl;
3389 3389
3390 if (bnx2_has_work(bnapi)) { 3390 if (bnx2_has_work(bnapi)) {
3391 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL); 3391 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3392 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE)) 3392 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3393 return; 3393 return;
3394 3394
3395 if (bnapi->last_status_idx == bp->idle_chk_status_idx) { 3395 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3396 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl & 3396 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3397 ~BNX2_PCICFG_MSI_CONTROL_ENABLE); 3397 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3398 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl); 3398 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3399 bnx2_msi(bp->irq_tbl[0].vector, bnapi); 3399 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3400 } 3400 }
3401 } 3401 }
@@ -3434,9 +3434,9 @@ static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3434 /* This is needed to take care of transient status 3434 /* This is needed to take care of transient status
3435 * during link changes. 3435 * during link changes.
3436 */ 3436 */
3437 REG_WR(bp, BNX2_HC_COMMAND, 3437 BNX2_WR(bp, BNX2_HC_COMMAND,
3438 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 3438 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3439 REG_RD(bp, BNX2_HC_COMMAND); 3439 BNX2_RD(bp, BNX2_HC_COMMAND);
3440 } 3440 }
3441} 3441}
3442 3442
@@ -3473,9 +3473,9 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3473 if (likely(!bnx2_has_fast_work(bnapi))) { 3473 if (likely(!bnx2_has_fast_work(bnapi))) {
3474 3474
3475 napi_complete(napi); 3475 napi_complete(napi);
3476 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | 3476 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3477 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 3477 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3478 bnapi->last_status_idx); 3478 bnapi->last_status_idx);
3479 break; 3479 break;
3480 } 3480 }
3481 } 3481 }
@@ -3511,19 +3511,19 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
3511 if (likely(!bnx2_has_work(bnapi))) { 3511 if (likely(!bnx2_has_work(bnapi))) {
3512 napi_complete(napi); 3512 napi_complete(napi);
3513 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) { 3513 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3514 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 3514 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3515 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 3515 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3516 bnapi->last_status_idx); 3516 bnapi->last_status_idx);
3517 break; 3517 break;
3518 } 3518 }
3519 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 3519 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3520 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 3520 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3521 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | 3521 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3522 bnapi->last_status_idx); 3522 bnapi->last_status_idx);
3523 3523
3524 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 3524 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3525 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 3525 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3526 bnapi->last_status_idx); 3526 bnapi->last_status_idx);
3527 break; 3527 break;
3528 } 3528 }
3529 } 3529 }
@@ -3561,8 +3561,8 @@ bnx2_set_rx_mode(struct net_device *dev)
3561 } 3561 }
3562 else if (dev->flags & IFF_ALLMULTI) { 3562 else if (dev->flags & IFF_ALLMULTI) {
3563 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 3563 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3564 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), 3564 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3565 0xffffffff); 3565 0xffffffff);
3566 } 3566 }
3567 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN; 3567 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3568 } 3568 }
@@ -3584,8 +3584,8 @@ bnx2_set_rx_mode(struct net_device *dev)
3584 } 3584 }
3585 3585
3586 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 3586 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3587 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), 3587 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3588 mc_filter[i]); 3588 mc_filter[i]);
3589 } 3589 }
3590 3590
3591 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN; 3591 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
@@ -3610,12 +3610,12 @@ bnx2_set_rx_mode(struct net_device *dev)
3610 3610
3611 if (rx_mode != bp->rx_mode) { 3611 if (rx_mode != bp->rx_mode) {
3612 bp->rx_mode = rx_mode; 3612 bp->rx_mode = rx_mode;
3613 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode); 3613 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3614 } 3614 }
3615 3615
3616 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0); 3616 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3617 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode); 3617 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3618 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA); 3618 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3619 3619
3620 spin_unlock_bh(&bp->phy_lock); 3620 spin_unlock_bh(&bp->phy_lock);
3621} 3621}
@@ -3756,13 +3756,13 @@ load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3756 } 3756 }
3757 3757
3758 for (i = 0; i < rv2p_code_len; i += 8) { 3758 for (i = 0; i < rv2p_code_len; i += 8) {
3759 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code)); 3759 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3760 rv2p_code++; 3760 rv2p_code++;
3761 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code)); 3761 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3762 rv2p_code++; 3762 rv2p_code++;
3763 3763
3764 val = (i / 8) | cmd; 3764 val = (i / 8) | cmd;
3765 REG_WR(bp, addr, val); 3765 BNX2_WR(bp, addr, val);
3766 } 3766 }
3767 3767
3768 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset); 3768 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
@@ -3772,22 +3772,22 @@ load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3772 loc = be32_to_cpu(fw_entry->fixup[i]); 3772 loc = be32_to_cpu(fw_entry->fixup[i]);
3773 if (loc && ((loc * 4) < rv2p_code_len)) { 3773 if (loc && ((loc * 4) < rv2p_code_len)) {
3774 code = be32_to_cpu(*(rv2p_code + loc - 1)); 3774 code = be32_to_cpu(*(rv2p_code + loc - 1));
3775 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code); 3775 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3776 code = be32_to_cpu(*(rv2p_code + loc)); 3776 code = be32_to_cpu(*(rv2p_code + loc));
3777 code = rv2p_fw_fixup(rv2p_proc, i, loc, code); 3777 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3778 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code); 3778 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3779 3779
3780 val = (loc / 2) | cmd; 3780 val = (loc / 2) | cmd;
3781 REG_WR(bp, addr, val); 3781 BNX2_WR(bp, addr, val);
3782 } 3782 }
3783 } 3783 }
3784 3784
3785 /* Reset the processor, un-stall is done later. */ 3785 /* Reset the processor, un-stall is done later. */
3786 if (rv2p_proc == RV2P_PROC1) { 3786 if (rv2p_proc == RV2P_PROC1) {
3787 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET); 3787 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3788 } 3788 }
3789 else { 3789 else {
3790 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET); 3790 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3791 } 3791 }
3792 3792
3793 return 0; 3793 return 0;
@@ -3924,14 +3924,14 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3924 /* delay required during transition out of D3hot */ 3924 /* delay required during transition out of D3hot */
3925 msleep(20); 3925 msleep(20);
3926 3926
3927 val = REG_RD(bp, BNX2_EMAC_MODE); 3927 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3928 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD; 3928 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3929 val &= ~BNX2_EMAC_MODE_MPKT; 3929 val &= ~BNX2_EMAC_MODE_MPKT;
3930 REG_WR(bp, BNX2_EMAC_MODE, val); 3930 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3931 3931
3932 val = REG_RD(bp, BNX2_RPM_CONFIG); 3932 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3933 val &= ~BNX2_RPM_CONFIG_ACPI_ENA; 3933 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3934 REG_WR(bp, BNX2_RPM_CONFIG, val); 3934 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3935 break; 3935 break;
3936 } 3936 }
3937 case PCI_D3hot: { 3937 case PCI_D3hot: {
@@ -3963,7 +3963,7 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3963 3963
3964 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); 3964 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3965 3965
3966 val = REG_RD(bp, BNX2_EMAC_MODE); 3966 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3967 3967
3968 /* Enable port mode. */ 3968 /* Enable port mode. */
3969 val &= ~BNX2_EMAC_MODE_PORT; 3969 val &= ~BNX2_EMAC_MODE_PORT;
@@ -3978,32 +3978,32 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3978 val |= BNX2_EMAC_MODE_25G_MODE; 3978 val |= BNX2_EMAC_MODE_25G_MODE;
3979 } 3979 }
3980 3980
3981 REG_WR(bp, BNX2_EMAC_MODE, val); 3981 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3982 3982
3983 /* receive all multicast */ 3983 /* receive all multicast */
3984 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 3984 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3985 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), 3985 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3986 0xffffffff); 3986 0xffffffff);
3987 } 3987 }
3988 REG_WR(bp, BNX2_EMAC_RX_MODE, 3988 BNX2_WR(bp, BNX2_EMAC_RX_MODE,
3989 BNX2_EMAC_RX_MODE_SORT_MODE); 3989 BNX2_EMAC_RX_MODE_SORT_MODE);
3990 3990
3991 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | 3991 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3992 BNX2_RPM_SORT_USER0_MC_EN; 3992 BNX2_RPM_SORT_USER0_MC_EN;
3993 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0); 3993 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3994 REG_WR(bp, BNX2_RPM_SORT_USER0, val); 3994 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3995 REG_WR(bp, BNX2_RPM_SORT_USER0, val | 3995 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val |
3996 BNX2_RPM_SORT_USER0_ENA); 3996 BNX2_RPM_SORT_USER0_ENA);
3997 3997
3998 /* Need to enable EMAC and RPM for WOL. */ 3998 /* Need to enable EMAC and RPM for WOL. */
3999 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 3999 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4000 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE | 4000 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4001 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE | 4001 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4002 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE); 4002 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4003 4003
4004 val = REG_RD(bp, BNX2_RPM_CONFIG); 4004 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4005 val &= ~BNX2_RPM_CONFIG_ACPI_ENA; 4005 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4006 REG_WR(bp, BNX2_RPM_CONFIG, val); 4006 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4007 4007
4008 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL; 4008 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4009 } 4009 }
@@ -4050,9 +4050,9 @@ bnx2_acquire_nvram_lock(struct bnx2 *bp)
4050 int j; 4050 int j;
4051 4051
4052 /* Request access to the flash interface. */ 4052 /* Request access to the flash interface. */
4053 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2); 4053 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4054 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 4054 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4055 val = REG_RD(bp, BNX2_NVM_SW_ARB); 4055 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4056 if (val & BNX2_NVM_SW_ARB_ARB_ARB2) 4056 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4057 break; 4057 break;
4058 4058
@@ -4072,10 +4072,10 @@ bnx2_release_nvram_lock(struct bnx2 *bp)
4072 u32 val; 4072 u32 val;
4073 4073
4074 /* Relinquish nvram interface. */ 4074 /* Relinquish nvram interface. */
4075 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2); 4075 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4076 4076
4077 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 4077 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4078 val = REG_RD(bp, BNX2_NVM_SW_ARB); 4078 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4079 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2)) 4079 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4080 break; 4080 break;
4081 4081
@@ -4094,20 +4094,20 @@ bnx2_enable_nvram_write(struct bnx2 *bp)
4094{ 4094{
4095 u32 val; 4095 u32 val;
4096 4096
4097 val = REG_RD(bp, BNX2_MISC_CFG); 4097 val = BNX2_RD(bp, BNX2_MISC_CFG);
4098 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI); 4098 BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4099 4099
4100 if (bp->flash_info->flags & BNX2_NV_WREN) { 4100 if (bp->flash_info->flags & BNX2_NV_WREN) {
4101 int j; 4101 int j;
4102 4102
4103 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); 4103 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4104 REG_WR(bp, BNX2_NVM_COMMAND, 4104 BNX2_WR(bp, BNX2_NVM_COMMAND,
4105 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT); 4105 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4106 4106
4107 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 4107 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4108 udelay(5); 4108 udelay(5);
4109 4109
4110 val = REG_RD(bp, BNX2_NVM_COMMAND); 4110 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4111 if (val & BNX2_NVM_COMMAND_DONE) 4111 if (val & BNX2_NVM_COMMAND_DONE)
4112 break; 4112 break;
4113 } 4113 }
@@ -4123,8 +4123,8 @@ bnx2_disable_nvram_write(struct bnx2 *bp)
4123{ 4123{
4124 u32 val; 4124 u32 val;
4125 4125
4126 val = REG_RD(bp, BNX2_MISC_CFG); 4126 val = BNX2_RD(bp, BNX2_MISC_CFG);
4127 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN); 4127 BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4128} 4128}
4129 4129
4130 4130
@@ -4133,10 +4133,10 @@ bnx2_enable_nvram_access(struct bnx2 *bp)
4133{ 4133{
4134 u32 val; 4134 u32 val;
4135 4135
4136 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE); 4136 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4137 /* Enable both bits, even on read. */ 4137 /* Enable both bits, even on read. */
4138 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 4138 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4139 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN); 4139 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4140} 4140}
4141 4141
4142static void 4142static void
@@ -4144,9 +4144,9 @@ bnx2_disable_nvram_access(struct bnx2 *bp)
4144{ 4144{
4145 u32 val; 4145 u32 val;
4146 4146
4147 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE); 4147 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4148 /* Disable both bits, even after read. */ 4148 /* Disable both bits, even after read. */
4149 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 4149 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4150 val & ~(BNX2_NVM_ACCESS_ENABLE_EN | 4150 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4151 BNX2_NVM_ACCESS_ENABLE_WR_EN)); 4151 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4152} 4152}
@@ -4166,13 +4166,13 @@ bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4166 BNX2_NVM_COMMAND_DOIT; 4166 BNX2_NVM_COMMAND_DOIT;
4167 4167
4168 /* Need to clear DONE bit separately. */ 4168 /* Need to clear DONE bit separately. */
4169 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); 4169 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4170 4170
4171 /* Address of the NVRAM to read from. */ 4171 /* Address of the NVRAM to read from. */
4172 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); 4172 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4173 4173
4174 /* Issue an erase command. */ 4174 /* Issue an erase command. */
4175 REG_WR(bp, BNX2_NVM_COMMAND, cmd); 4175 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4176 4176
4177 /* Wait for completion. */ 4177 /* Wait for completion. */
4178 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 4178 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
@@ -4180,7 +4180,7 @@ bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4180 4180
4181 udelay(5); 4181 udelay(5);
4182 4182
4183 val = REG_RD(bp, BNX2_NVM_COMMAND); 4183 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4184 if (val & BNX2_NVM_COMMAND_DONE) 4184 if (val & BNX2_NVM_COMMAND_DONE)
4185 break; 4185 break;
4186 } 4186 }
@@ -4208,13 +4208,13 @@ bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4208 } 4208 }
4209 4209
4210 /* Need to clear DONE bit separately. */ 4210 /* Need to clear DONE bit separately. */
4211 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); 4211 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4212 4212
4213 /* Address of the NVRAM to read from. */ 4213 /* Address of the NVRAM to read from. */
4214 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); 4214 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4215 4215
4216 /* Issue a read command. */ 4216 /* Issue a read command. */
4217 REG_WR(bp, BNX2_NVM_COMMAND, cmd); 4217 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4218 4218
4219 /* Wait for completion. */ 4219 /* Wait for completion. */
4220 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 4220 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
@@ -4222,9 +4222,9 @@ bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4222 4222
4223 udelay(5); 4223 udelay(5);
4224 4224
4225 val = REG_RD(bp, BNX2_NVM_COMMAND); 4225 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4226 if (val & BNX2_NVM_COMMAND_DONE) { 4226 if (val & BNX2_NVM_COMMAND_DONE) {
4227 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ)); 4227 __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4228 memcpy(ret_val, &v, 4); 4228 memcpy(ret_val, &v, 4);
4229 break; 4229 break;
4230 } 4230 }
@@ -4254,24 +4254,24 @@ bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4254 } 4254 }
4255 4255
4256 /* Need to clear DONE bit separately. */ 4256 /* Need to clear DONE bit separately. */
4257 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); 4257 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4258 4258
4259 memcpy(&val32, val, 4); 4259 memcpy(&val32, val, 4);
4260 4260
4261 /* Write the data. */ 4261 /* Write the data. */
4262 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32)); 4262 BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4263 4263
4264 /* Address of the NVRAM to write to. */ 4264 /* Address of the NVRAM to write to. */
4265 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); 4265 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4266 4266
4267 /* Issue the write command. */ 4267 /* Issue the write command. */
4268 REG_WR(bp, BNX2_NVM_COMMAND, cmd); 4268 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4269 4269
4270 /* Wait for completion. */ 4270 /* Wait for completion. */
4271 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 4271 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4272 udelay(5); 4272 udelay(5);
4273 4273
4274 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE) 4274 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4275 break; 4275 break;
4276 } 4276 }
4277 if (j >= NVRAM_TIMEOUT_COUNT) 4277 if (j >= NVRAM_TIMEOUT_COUNT)
@@ -4293,7 +4293,7 @@ bnx2_init_nvram(struct bnx2 *bp)
4293 } 4293 }
4294 4294
4295 /* Determine the selected interface. */ 4295 /* Determine the selected interface. */
4296 val = REG_RD(bp, BNX2_NVM_CFG1); 4296 val = BNX2_RD(bp, BNX2_NVM_CFG1);
4297 4297
4298 entry_count = ARRAY_SIZE(flash_table); 4298 entry_count = ARRAY_SIZE(flash_table);
4299 4299
@@ -4332,10 +4332,10 @@ bnx2_init_nvram(struct bnx2 *bp)
4332 bnx2_enable_nvram_access(bp); 4332 bnx2_enable_nvram_access(bp);
4333 4333
4334 /* Reconfigure the flash interface */ 4334 /* Reconfigure the flash interface */
4335 REG_WR(bp, BNX2_NVM_CFG1, flash->config1); 4335 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4336 REG_WR(bp, BNX2_NVM_CFG2, flash->config2); 4336 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4337 REG_WR(bp, BNX2_NVM_CFG3, flash->config3); 4337 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4338 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1); 4338 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4339 4339
4340 /* Disable access to flash interface */ 4340 /* Disable access to flash interface */
4341 bnx2_disable_nvram_access(bp); 4341 bnx2_disable_nvram_access(bp);
@@ -4696,10 +4696,10 @@ bnx2_init_fw_cap(struct bnx2 *bp)
4696static void 4696static void
4697bnx2_setup_msix_tbl(struct bnx2 *bp) 4697bnx2_setup_msix_tbl(struct bnx2 *bp)
4698{ 4698{
4699 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN); 4699 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4700 4700
4701 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR); 4701 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4702 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR); 4702 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4703} 4703}
4704 4704
4705static int 4705static int
@@ -4713,22 +4713,22 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4713 * issuing a reset. */ 4713 * issuing a reset. */
4714 if ((CHIP_NUM(bp) == CHIP_NUM_5706) || 4714 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4715 (CHIP_NUM(bp) == CHIP_NUM_5708)) { 4715 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4716 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, 4716 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4717 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 4717 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4718 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 4718 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4719 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 4719 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4720 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 4720 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4721 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS); 4721 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4722 udelay(5); 4722 udelay(5);
4723 } else { /* 5709 */ 4723 } else { /* 5709 */
4724 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL); 4724 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4725 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE; 4725 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4726 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); 4726 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4727 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL); 4727 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4728 4728
4729 for (i = 0; i < 100; i++) { 4729 for (i = 0; i < 100; i++) {
4730 msleep(1); 4730 msleep(1);
4731 val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL); 4731 val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4732 if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND)) 4732 if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4733 break; 4733 break;
4734 } 4734 }
@@ -4744,17 +4744,17 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4744 4744
4745 /* Do a dummy read to force the chip to complete all current transaction 4745 /* Do a dummy read to force the chip to complete all current transaction
4746 * before we issue a reset. */ 4746 * before we issue a reset. */
4747 val = REG_RD(bp, BNX2_MISC_ID); 4747 val = BNX2_RD(bp, BNX2_MISC_ID);
4748 4748
4749 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 4749 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4750 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET); 4750 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4751 REG_RD(bp, BNX2_MISC_COMMAND); 4751 BNX2_RD(bp, BNX2_MISC_COMMAND);
4752 udelay(5); 4752 udelay(5);
4753 4753
4754 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 4754 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4755 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 4755 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4756 4756
4757 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); 4757 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4758 4758
4759 } else { 4759 } else {
4760 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | 4760 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
@@ -4762,7 +4762,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4762 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 4762 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4763 4763
4764 /* Chip reset. */ 4764 /* Chip reset. */
4765 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); 4765 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4766 4766
4767 /* Reading back any register after chip reset will hang the 4767 /* Reading back any register after chip reset will hang the
4768 * bus on 5706 A0 and A1. The msleep below provides plenty 4768 * bus on 5706 A0 and A1. The msleep below provides plenty
@@ -4774,7 +4774,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4774 4774
4775 /* Reset takes approximate 30 usec */ 4775 /* Reset takes approximate 30 usec */
4776 for (i = 0; i < 10; i++) { 4776 for (i = 0; i < 10; i++) {
4777 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG); 4777 val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4778 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | 4778 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4779 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) 4779 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4780 break; 4780 break;
@@ -4789,7 +4789,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4789 } 4789 }
4790 4790
4791 /* Make sure byte swapping is properly configured. */ 4791 /* Make sure byte swapping is properly configured. */
4792 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0); 4792 val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4793 if (val != 0x01020304) { 4793 if (val != 0x01020304) {
4794 pr_err("Chip not in correct endian mode\n"); 4794 pr_err("Chip not in correct endian mode\n");
4795 return -ENODEV; 4795 return -ENODEV;
@@ -4811,7 +4811,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4811 if (CHIP_ID(bp) == CHIP_ID_5706_A0) { 4811 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4812 /* Adjust the voltage regular to two steps lower. The default 4812 /* Adjust the voltage regular to two steps lower. The default
4813 * of this register is 0x0000000e. */ 4813 * of this register is 0x0000000e. */
4814 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa); 4814 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4815 4815
4816 /* Remove bad rbuf memory from the free pool. */ 4816 /* Remove bad rbuf memory from the free pool. */
4817 rc = bnx2_alloc_bad_rbuf(bp); 4817 rc = bnx2_alloc_bad_rbuf(bp);
@@ -4820,7 +4820,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4820 if (bp->flags & BNX2_FLAG_USING_MSIX) { 4820 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4821 bnx2_setup_msix_tbl(bp); 4821 bnx2_setup_msix_tbl(bp);
4822 /* Prevent MSIX table reads and write from timing out */ 4822 /* Prevent MSIX table reads and write from timing out */
4823 REG_WR(bp, BNX2_MISC_ECO_HW_CTL, 4823 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4824 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN); 4824 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4825 } 4825 }
4826 4826
@@ -4834,7 +4834,7 @@ bnx2_init_chip(struct bnx2 *bp)
4834 int rc, i; 4834 int rc, i;
4835 4835
4836 /* Make sure the interrupt is not active. */ 4836 /* Make sure the interrupt is not active. */
4837 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 4837 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4838 4838
4839 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP | 4839 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4840 BNX2_DMA_CONFIG_DATA_WORD_SWAP | 4840 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
@@ -4854,12 +4854,12 @@ bnx2_init_chip(struct bnx2 *bp)
4854 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX)) 4854 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4855 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA; 4855 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4856 4856
4857 REG_WR(bp, BNX2_DMA_CONFIG, val); 4857 BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4858 4858
4859 if (CHIP_ID(bp) == CHIP_ID_5706_A0) { 4859 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4860 val = REG_RD(bp, BNX2_TDMA_CONFIG); 4860 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4861 val |= BNX2_TDMA_CONFIG_ONE_DMA; 4861 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4862 REG_WR(bp, BNX2_TDMA_CONFIG, val); 4862 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4863 } 4863 }
4864 4864
4865 if (bp->flags & BNX2_FLAG_PCIX) { 4865 if (bp->flags & BNX2_FLAG_PCIX) {
@@ -4871,10 +4871,10 @@ bnx2_init_chip(struct bnx2 *bp)
4871 val16 & ~PCI_X_CMD_ERO); 4871 val16 & ~PCI_X_CMD_ERO);
4872 } 4872 }
4873 4873
4874 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 4874 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4875 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 4875 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4876 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 4876 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4877 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 4877 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4878 4878
4879 /* Initialize context mapping and zero out the quick contexts. The 4879 /* Initialize context mapping and zero out the quick contexts. The
4880 * context block must have already been enabled. */ 4880 * context block must have already been enabled. */
@@ -4892,7 +4892,7 @@ bnx2_init_chip(struct bnx2 *bp)
4892 4892
4893 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); 4893 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4894 4894
4895 val = REG_RD(bp, BNX2_MQ_CONFIG); 4895 val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4896 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; 4896 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4897 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 4897 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4898 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 4898 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
@@ -4901,20 +4901,20 @@ bnx2_init_chip(struct bnx2 *bp)
4901 val |= BNX2_MQ_CONFIG_HALT_DIS; 4901 val |= BNX2_MQ_CONFIG_HALT_DIS;
4902 } 4902 }
4903 4903
4904 REG_WR(bp, BNX2_MQ_CONFIG, val); 4904 BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4905 4905
4906 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); 4906 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4907 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val); 4907 BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4908 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val); 4908 BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4909 4909
4910 val = (BCM_PAGE_BITS - 8) << 24; 4910 val = (BCM_PAGE_BITS - 8) << 24;
4911 REG_WR(bp, BNX2_RV2P_CONFIG, val); 4911 BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4912 4912
4913 /* Configure page size. */ 4913 /* Configure page size. */
4914 val = REG_RD(bp, BNX2_TBDR_CONFIG); 4914 val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4915 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE; 4915 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4916 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 4916 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4917 REG_WR(bp, BNX2_TBDR_CONFIG, val); 4917 BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4918 4918
4919 val = bp->mac_addr[0] + 4919 val = bp->mac_addr[0] +
4920 (bp->mac_addr[1] << 8) + 4920 (bp->mac_addr[1] << 8) +
@@ -4922,14 +4922,14 @@ bnx2_init_chip(struct bnx2 *bp)
4922 bp->mac_addr[3] + 4922 bp->mac_addr[3] +
4923 (bp->mac_addr[4] << 8) + 4923 (bp->mac_addr[4] << 8) +
4924 (bp->mac_addr[5] << 16); 4924 (bp->mac_addr[5] << 16);
4925 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val); 4925 BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4926 4926
4927 /* Program the MTU. Also include 4 bytes for CRC32. */ 4927 /* Program the MTU. Also include 4 bytes for CRC32. */
4928 mtu = bp->dev->mtu; 4928 mtu = bp->dev->mtu;
4929 val = mtu + ETH_HLEN + ETH_FCS_LEN; 4929 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4930 if (val > (MAX_ETHERNET_PACKET_SIZE + 4)) 4930 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4931 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA; 4931 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4932 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val); 4932 BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4933 4933
4934 if (mtu < 1500) 4934 if (mtu < 1500)
4935 mtu = 1500; 4935 mtu = 1500;
@@ -4947,41 +4947,41 @@ bnx2_init_chip(struct bnx2 *bp)
4947 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; 4947 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4948 4948
4949 /* Set up how to generate a link change interrupt. */ 4949 /* Set up how to generate a link change interrupt. */
4950 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); 4950 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4951 4951
4952 REG_WR(bp, BNX2_HC_STATUS_ADDR_L, 4952 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
4953 (u64) bp->status_blk_mapping & 0xffffffff); 4953 (u64) bp->status_blk_mapping & 0xffffffff);
4954 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32); 4954 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4955 4955
4956 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L, 4956 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4957 (u64) bp->stats_blk_mapping & 0xffffffff); 4957 (u64) bp->stats_blk_mapping & 0xffffffff);
4958 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H, 4958 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4959 (u64) bp->stats_blk_mapping >> 32); 4959 (u64) bp->stats_blk_mapping >> 32);
4960 4960
4961 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP, 4961 BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4962 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip); 4962 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4963 4963
4964 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP, 4964 BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4965 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip); 4965 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4966 4966
4967 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP, 4967 BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4968 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip); 4968 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4969 4969
4970 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks); 4970 BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4971 4971
4972 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks); 4972 BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4973 4973
4974 REG_WR(bp, BNX2_HC_COM_TICKS, 4974 BNX2_WR(bp, BNX2_HC_COM_TICKS,
4975 (bp->com_ticks_int << 16) | bp->com_ticks); 4975 (bp->com_ticks_int << 16) | bp->com_ticks);
4976 4976
4977 REG_WR(bp, BNX2_HC_CMD_TICKS, 4977 BNX2_WR(bp, BNX2_HC_CMD_TICKS,
4978 (bp->cmd_ticks_int << 16) | bp->cmd_ticks); 4978 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4979 4979
4980 if (bp->flags & BNX2_FLAG_BROKEN_STATS) 4980 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4981 REG_WR(bp, BNX2_HC_STATS_TICKS, 0); 4981 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
4982 else 4982 else
4983 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks); 4983 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4984 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 4984 BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4985 4985
4986 if (CHIP_ID(bp) == CHIP_ID_5706_A1) 4986 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4987 val = BNX2_HC_CONFIG_COLLECT_STATS; 4987 val = BNX2_HC_CONFIG_COLLECT_STATS;
@@ -4991,8 +4991,8 @@ bnx2_init_chip(struct bnx2 *bp)
4991 } 4991 }
4992 4992
4993 if (bp->flags & BNX2_FLAG_USING_MSIX) { 4993 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4994 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR, 4994 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4995 BNX2_HC_MSIX_BIT_VECTOR_VAL); 4995 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4996 4996
4997 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B; 4997 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4998 } 4998 }
@@ -5000,7 +5000,7 @@ bnx2_init_chip(struct bnx2 *bp)
5000 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI) 5000 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5001 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM; 5001 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5002 5002
5003 REG_WR(bp, BNX2_HC_CONFIG, val); 5003 BNX2_WR(bp, BNX2_HC_CONFIG, val);
5004 5004
5005 if (bp->rx_ticks < 25) 5005 if (bp->rx_ticks < 25)
5006 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1); 5006 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
@@ -5011,48 +5011,48 @@ bnx2_init_chip(struct bnx2 *bp)
5011 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) + 5011 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5012 BNX2_HC_SB_CONFIG_1; 5012 BNX2_HC_SB_CONFIG_1;
5013 5013
5014 REG_WR(bp, base, 5014 BNX2_WR(bp, base,
5015 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE | 5015 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5016 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE | 5016 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5017 BNX2_HC_SB_CONFIG_1_ONE_SHOT); 5017 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5018 5018
5019 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF, 5019 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5020 (bp->tx_quick_cons_trip_int << 16) | 5020 (bp->tx_quick_cons_trip_int << 16) |
5021 bp->tx_quick_cons_trip); 5021 bp->tx_quick_cons_trip);
5022 5022
5023 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF, 5023 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5024 (bp->tx_ticks_int << 16) | bp->tx_ticks); 5024 (bp->tx_ticks_int << 16) | bp->tx_ticks);
5025 5025
5026 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF, 5026 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5027 (bp->rx_quick_cons_trip_int << 16) | 5027 (bp->rx_quick_cons_trip_int << 16) |
5028 bp->rx_quick_cons_trip); 5028 bp->rx_quick_cons_trip);
5029 5029
5030 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF, 5030 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5031 (bp->rx_ticks_int << 16) | bp->rx_ticks); 5031 (bp->rx_ticks_int << 16) | bp->rx_ticks);
5032 } 5032 }
5033 5033
5034 /* Clear internal stats counters. */ 5034 /* Clear internal stats counters. */
5035 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW); 5035 BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5036 5036
5037 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS); 5037 BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5038 5038
5039 /* Initialize the receive filter. */ 5039 /* Initialize the receive filter. */
5040 bnx2_set_rx_mode(bp->dev); 5040 bnx2_set_rx_mode(bp->dev);
5041 5041
5042 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 5042 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5043 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL); 5043 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5044 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE; 5044 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5045 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); 5045 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5046 } 5046 }
5047 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET, 5047 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5048 1, 0); 5048 1, 0);
5049 5049
5050 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT); 5050 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5051 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS); 5051 BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5052 5052
5053 udelay(20); 5053 udelay(20);
5054 5054
5055 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND); 5055 bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5056 5056
5057 return rc; 5057 return rc;
5058} 5058}
@@ -5188,8 +5188,8 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5188 bnx2_init_rx_context(bp, cid); 5188 bnx2_init_rx_context(bp, cid);
5189 5189
5190 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 5190 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5191 val = REG_RD(bp, BNX2_MQ_MAP_L2_5); 5191 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5192 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM); 5192 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5193 } 5193 }
5194 5194
5195 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0); 5195 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
@@ -5209,7 +5209,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5209 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val); 5209 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5210 5210
5211 if (CHIP_NUM(bp) == CHIP_NUM_5709) 5211 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5212 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT); 5212 BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5213 } 5213 }
5214 5214
5215 val = (u64) rxr->rx_desc_mapping[0] >> 32; 5215 val = (u64) rxr->rx_desc_mapping[0] >> 32;
@@ -5246,10 +5246,10 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5246 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ; 5246 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5247 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX; 5247 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5248 5248
5249 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod); 5249 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5250 REG_WR16(bp, rxr->rx_bidx_addr, prod); 5250 BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5251 5251
5252 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); 5252 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5253} 5253}
5254 5254
5255static void 5255static void
@@ -5260,15 +5260,15 @@ bnx2_init_all_rings(struct bnx2 *bp)
5260 5260
5261 bnx2_clear_ring_states(bp); 5261 bnx2_clear_ring_states(bp);
5262 5262
5263 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0); 5263 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5264 for (i = 0; i < bp->num_tx_rings; i++) 5264 for (i = 0; i < bp->num_tx_rings; i++)
5265 bnx2_init_tx_ring(bp, i); 5265 bnx2_init_tx_ring(bp, i);
5266 5266
5267 if (bp->num_tx_rings > 1) 5267 if (bp->num_tx_rings > 1)
5268 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) | 5268 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5269 (TX_TSS_CID << 7)); 5269 (TX_TSS_CID << 7));
5270 5270
5271 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0); 5271 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5272 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0); 5272 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5273 5273
5274 for (i = 0; i < bp->num_rx_rings; i++) 5274 for (i = 0; i < bp->num_rx_rings; i++)
@@ -5282,8 +5282,8 @@ bnx2_init_all_rings(struct bnx2 *bp)
5282 5282
5283 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift; 5283 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5284 if ((i % 8) == 7) { 5284 if ((i % 8) == 7) {
5285 REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32); 5285 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5286 REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) | 5286 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5287 BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK | 5287 BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5288 BNX2_RLUP_RSS_COMMAND_WRITE | 5288 BNX2_RLUP_RSS_COMMAND_WRITE |
5289 BNX2_RLUP_RSS_COMMAND_HASH_MASK); 5289 BNX2_RLUP_RSS_COMMAND_HASH_MASK);
@@ -5294,7 +5294,7 @@ bnx2_init_all_rings(struct bnx2 *bp)
5294 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI | 5294 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5295 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI; 5295 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5296 5296
5297 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val); 5297 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5298 5298
5299 } 5299 }
5300} 5300}
@@ -5784,10 +5784,10 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5784 return -EIO; 5784 return -EIO;
5785 } 5785 }
5786 5786
5787 REG_WR(bp, BNX2_HC_COMMAND, 5787 BNX2_WR(bp, BNX2_HC_COMMAND,
5788 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 5788 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5789 5789
5790 REG_RD(bp, BNX2_HC_COMMAND); 5790 BNX2_RD(bp, BNX2_HC_COMMAND);
5791 5791
5792 udelay(5); 5792 udelay(5);
5793 rx_start_idx = bnx2_get_hw_rx_cons(bnapi); 5793 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
@@ -5805,15 +5805,15 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5805 txr->tx_prod = NEXT_TX_BD(txr->tx_prod); 5805 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5806 txr->tx_prod_bseq += pkt_size; 5806 txr->tx_prod_bseq += pkt_size;
5807 5807
5808 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod); 5808 BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5809 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); 5809 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5810 5810
5811 udelay(100); 5811 udelay(100);
5812 5812
5813 REG_WR(bp, BNX2_HC_COMMAND, 5813 BNX2_WR(bp, BNX2_HC_COMMAND,
5814 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 5814 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5815 5815
5816 REG_RD(bp, BNX2_HC_COMMAND); 5816 BNX2_RD(bp, BNX2_HC_COMMAND);
5817 5817
5818 udelay(5); 5818 udelay(5);
5819 5819
@@ -5962,14 +5962,14 @@ bnx2_test_intr(struct bnx2 *bp)
5962 if (!netif_running(bp->dev)) 5962 if (!netif_running(bp->dev))
5963 return -ENODEV; 5963 return -ENODEV;
5964 5964
5965 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff; 5965 status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5966 5966
5967 /* This register is not touched during run-time. */ 5967 /* This register is not touched during run-time. */
5968 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); 5968 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5969 REG_RD(bp, BNX2_HC_COMMAND); 5969 BNX2_RD(bp, BNX2_HC_COMMAND);
5970 5970
5971 for (i = 0; i < 10; i++) { 5971 for (i = 0; i < 10; i++) {
5972 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) != 5972 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5973 status_idx) { 5973 status_idx) {
5974 5974
5975 break; 5975 break;
@@ -6132,8 +6132,8 @@ bnx2_timer(unsigned long data)
6132 6132
6133 /* workaround occasional corrupted counters */ 6133 /* workaround occasional corrupted counters */
6134 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks) 6134 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6135 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | 6135 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6136 BNX2_HC_COMMAND_STATS_NOW); 6136 BNX2_HC_COMMAND_STATS_NOW);
6137 6137
6138 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { 6138 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6139 if (CHIP_NUM(bp) == CHIP_NUM_5706) 6139 if (CHIP_NUM(bp) == CHIP_NUM_5706)
@@ -6205,13 +6205,13 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6205 const int len = sizeof(bp->irq_tbl[0].name); 6205 const int len = sizeof(bp->irq_tbl[0].name);
6206 6206
6207 bnx2_setup_msix_tbl(bp); 6207 bnx2_setup_msix_tbl(bp);
6208 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1); 6208 BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6209 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE); 6209 BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6210 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE); 6210 BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6211 6211
6212 /* Need to flush the previous three writes to ensure MSI-X 6212 /* Need to flush the previous three writes to ensure MSI-X
6213 * is setup properly */ 6213 * is setup properly */
6214 REG_RD(bp, BNX2_PCI_MSIX_CONTROL); 6214 BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6215 6215
6216 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { 6216 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6217 msix_ent[i].entry = i; 6217 msix_ent[i].entry = i;
@@ -6464,22 +6464,22 @@ bnx2_dump_ftq(struct bnx2 *bp)
6464 netdev_err(dev, "<--- end FTQ dump --->\n"); 6464 netdev_err(dev, "<--- end FTQ dump --->\n");
6465 netdev_err(dev, "<--- start TBDC dump --->\n"); 6465 netdev_err(dev, "<--- start TBDC dump --->\n");
6466 netdev_err(dev, "TBDC free cnt: %ld\n", 6466 netdev_err(dev, "TBDC free cnt: %ld\n",
6467 REG_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT); 6467 BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6468 netdev_err(dev, "LINE CID BIDX CMD VALIDS\n"); 6468 netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
6469 for (i = 0; i < 0x20; i++) { 6469 for (i = 0; i < 0x20; i++) {
6470 int j = 0; 6470 int j = 0;
6471 6471
6472 REG_WR(bp, BNX2_TBDC_BD_ADDR, i); 6472 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6473 REG_WR(bp, BNX2_TBDC_CAM_OPCODE, 6473 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6474 BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ); 6474 BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6475 REG_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB); 6475 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6476 while ((REG_RD(bp, BNX2_TBDC_COMMAND) & 6476 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6477 BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100) 6477 BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6478 j++; 6478 j++;
6479 6479
6480 cid = REG_RD(bp, BNX2_TBDC_CID); 6480 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6481 bdidx = REG_RD(bp, BNX2_TBDC_BIDX); 6481 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6482 valid = REG_RD(bp, BNX2_TBDC_CAM_OPCODE); 6482 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6483 netdev_err(dev, "%02x %06x %04lx %02x [%x]\n", 6483 netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
6484 i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX, 6484 i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6485 bdidx >> 24, (valid >> 8) & 0x0ff); 6485 bdidx >> 24, (valid >> 8) & 0x0ff);
@@ -6500,15 +6500,15 @@ bnx2_dump_state(struct bnx2 *bp)
6500 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2); 6500 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6501 netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2); 6501 netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6502 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n", 6502 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6503 REG_RD(bp, BNX2_EMAC_TX_STATUS), 6503 BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6504 REG_RD(bp, BNX2_EMAC_RX_STATUS)); 6504 BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6505 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n", 6505 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6506 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL)); 6506 BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6507 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n", 6507 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6508 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS)); 6508 BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6509 if (bp->flags & BNX2_FLAG_USING_MSIX) 6509 if (bp->flags & BNX2_FLAG_USING_MSIX)
6510 netdev_err(dev, "DEBUG: PBA[%08x]\n", 6510 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6511 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE)); 6511 BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6512} 6512}
6513 6513
6514static void 6514static void
@@ -6655,8 +6655,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6655 prod = NEXT_TX_BD(prod); 6655 prod = NEXT_TX_BD(prod);
6656 txr->tx_prod_bseq += skb->len; 6656 txr->tx_prod_bseq += skb->len;
6657 6657
6658 REG_WR16(bp, txr->tx_bidx_addr, prod); 6658 BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6659 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); 6659 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6660 6660
6661 mmiowb(); 6661 mmiowb();
6662 6662
@@ -7030,7 +7030,7 @@ bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7030 offset = reg_boundaries[0]; 7030 offset = reg_boundaries[0];
7031 p += offset; 7031 p += offset;
7032 while (offset < BNX2_REGDUMP_LEN) { 7032 while (offset < BNX2_REGDUMP_LEN) {
7033 *p++ = REG_RD(bp, offset); 7033 *p++ = BNX2_RD(bp, offset);
7034 offset += 4; 7034 offset += 4;
7035 if (offset == reg_boundaries[i + 1]) { 7035 if (offset == reg_boundaries[i + 1]) {
7036 offset = reg_boundaries[i + 2]; 7036 offset = reg_boundaries[i + 2];
@@ -7655,26 +7655,26 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7655 case ETHTOOL_ID_ACTIVE: 7655 case ETHTOOL_ID_ACTIVE:
7656 bnx2_set_power_state(bp, PCI_D0); 7656 bnx2_set_power_state(bp, PCI_D0);
7657 7657
7658 bp->leds_save = REG_RD(bp, BNX2_MISC_CFG); 7658 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7659 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC); 7659 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7660 return 1; /* cycle on/off once per second */ 7660 return 1; /* cycle on/off once per second */
7661 7661
7662 case ETHTOOL_ID_ON: 7662 case ETHTOOL_ID_ON:
7663 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE | 7663 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7664 BNX2_EMAC_LED_1000MB_OVERRIDE | 7664 BNX2_EMAC_LED_1000MB_OVERRIDE |
7665 BNX2_EMAC_LED_100MB_OVERRIDE | 7665 BNX2_EMAC_LED_100MB_OVERRIDE |
7666 BNX2_EMAC_LED_10MB_OVERRIDE | 7666 BNX2_EMAC_LED_10MB_OVERRIDE |
7667 BNX2_EMAC_LED_TRAFFIC_OVERRIDE | 7667 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7668 BNX2_EMAC_LED_TRAFFIC); 7668 BNX2_EMAC_LED_TRAFFIC);
7669 break; 7669 break;
7670 7670
7671 case ETHTOOL_ID_OFF: 7671 case ETHTOOL_ID_OFF:
7672 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE); 7672 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7673 break; 7673 break;
7674 7674
7675 case ETHTOOL_ID_INACTIVE: 7675 case ETHTOOL_ID_INACTIVE:
7676 REG_WR(bp, BNX2_EMAC_LED, 0); 7676 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7677 REG_WR(bp, BNX2_MISC_CFG, bp->leds_save); 7677 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7678 7678
7679 if (!netif_running(dev)) 7679 if (!netif_running(dev))
7680 bnx2_set_power_state(bp, PCI_D3hot); 7680 bnx2_set_power_state(bp, PCI_D3hot);
@@ -7899,7 +7899,7 @@ poll_bnx2(struct net_device *dev)
7899static void 7899static void
7900bnx2_get_5709_media(struct bnx2 *bp) 7900bnx2_get_5709_media(struct bnx2 *bp)
7901{ 7901{
7902 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL); 7902 u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7903 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID; 7903 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7904 u32 strap; 7904 u32 strap;
7905 7905
@@ -7939,13 +7939,13 @@ bnx2_get_pci_speed(struct bnx2 *bp)
7939{ 7939{
7940 u32 reg; 7940 u32 reg;
7941 7941
7942 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS); 7942 reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7943 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) { 7943 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7944 u32 clkreg; 7944 u32 clkreg;
7945 7945
7946 bp->flags |= BNX2_FLAG_PCIX; 7946 bp->flags |= BNX2_FLAG_PCIX;
7947 7947
7948 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS); 7948 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7949 7949
7950 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 7950 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7951 switch (clkreg) { 7951 switch (clkreg) {
@@ -8131,11 +8131,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8131 * Rely on CPU to do target byte swapping on big endian systems 8131 * Rely on CPU to do target byte swapping on big endian systems
8132 * The chip's target access swapping will not swap all accesses 8132 * The chip's target access swapping will not swap all accesses
8133 */ 8133 */
8134 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, 8134 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8135 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 8135 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8136 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); 8136 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8137 8137
8138 bp->chip_id = REG_RD(bp, BNX2_MISC_ID); 8138 bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8139 8139
8140 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 8140 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8141 if (!pci_is_pcie(pdev)) { 8141 if (!pci_is_pcie(pdev)) {
@@ -8198,9 +8198,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8198 8198
8199 /* 5706A0 may falsely detect SERR and PERR. */ 8199 /* 5706A0 may falsely detect SERR and PERR. */
8200 if (CHIP_ID(bp) == CHIP_ID_5706_A0) { 8200 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8201 reg = REG_RD(bp, PCI_COMMAND); 8201 reg = BNX2_RD(bp, PCI_COMMAND);
8202 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY); 8202 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8203 REG_WR(bp, PCI_COMMAND, reg); 8203 BNX2_WR(bp, PCI_COMMAND, reg);
8204 } 8204 }
8205 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) && 8205 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
8206 !(bp->flags & BNX2_FLAG_PCIX)) { 8206 !(bp->flags & BNX2_FLAG_PCIX)) {
@@ -8358,7 +8358,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8358 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || 8358 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8359 (CHIP_ID(bp) == CHIP_ID_5708_B0) || 8359 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8360 (CHIP_ID(bp) == CHIP_ID_5708_B1) || 8360 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8361 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) { 8361 !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8362 bp->flags |= BNX2_FLAG_NO_WOL; 8362 bp->flags |= BNX2_FLAG_NO_WOL;
8363 bp->wol = 0; 8363 bp->wol = 0;
8364 } 8364 }