aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2.c')
-rw-r--r--drivers/net/bnx2.c1339
1 files changed, 806 insertions, 533 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 367b6d462708..5ebde67d4297 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -47,6 +47,7 @@
47#include <linux/prefetch.h> 47#include <linux/prefetch.h>
48#include <linux/cache.h> 48#include <linux/cache.h>
49#include <linux/zlib.h> 49#include <linux/zlib.h>
50#include <linux/log2.h>
50 51
51#include "bnx2.h" 52#include "bnx2.h"
52#include "bnx2_fw.h" 53#include "bnx2_fw.h"
@@ -56,8 +57,8 @@
56 57
57#define DRV_MODULE_NAME "bnx2" 58#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": " 59#define PFX DRV_MODULE_NAME ": "
59#define DRV_MODULE_VERSION "1.7.5" 60#define DRV_MODULE_VERSION "1.7.9"
60#define DRV_MODULE_RELDATE "April 29, 2008" 61#define DRV_MODULE_RELDATE "July 18, 2008"
61 62
62#define RUN_AT(x) (jiffies + (x)) 63#define RUN_AT(x) (jiffies + (x))
63 64
@@ -68,7 +69,7 @@ static char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 69 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69 70
70MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>"); 71MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver"); 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 Driver");
72MODULE_LICENSE("GPL"); 73MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_MODULE_VERSION); 74MODULE_VERSION(DRV_MODULE_VERSION);
74 75
@@ -87,6 +88,7 @@ typedef enum {
87 BCM5708S, 88 BCM5708S,
88 BCM5709, 89 BCM5709,
89 BCM5709S, 90 BCM5709S,
91 BCM5716,
90} board_t; 92} board_t;
91 93
92/* indexed by board_t, above */ 94/* indexed by board_t, above */
@@ -102,9 +104,10 @@ static struct {
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" }, 104 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" }, 105 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" }, 106 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
105 }; 108 };
106 109
107static struct pci_device_id bnx2_pci_tbl[] = { 110static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, 111 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T }, 112 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, 113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
@@ -123,6 +126,8 @@ static struct pci_device_id bnx2_pci_tbl[] = {
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 }, 126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S, 127 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S }, 128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129 { PCI_VENDOR_ID_BROADCOM, 0x163b,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
126 { 0, } 131 { 0, }
127}; 132};
128 133
@@ -226,7 +231,7 @@ static struct flash_spec flash_5709 = {
226 231
227MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); 232MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228 233
229static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi) 234static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
230{ 235{
231 u32 diff; 236 u32 diff;
232 237
@@ -235,7 +240,7 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
235 /* The ring uses 256 indices for 255 entries, one of them 240 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped. 241 * needs to be skipped.
237 */ 242 */
238 diff = bp->tx_prod - bnapi->tx_cons; 243 diff = txr->tx_prod - txr->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) { 244 if (unlikely(diff >= TX_DESC_CNT)) {
240 diff &= 0xffff; 245 diff &= 0xffff;
241 if (diff == TX_DESC_CNT) 246 if (diff == TX_DESC_CNT)
@@ -289,7 +294,6 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
289 REG_WR(bp, BNX2_CTX_CTX_CTRL, 294 REG_WR(bp, BNX2_CTX_CTX_CTRL,
290 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ); 295 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
291 for (i = 0; i < 5; i++) { 296 for (i = 0; i < 5; i++) {
292 u32 val;
293 val = REG_RD(bp, BNX2_CTX_CTX_CTRL); 297 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
294 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0) 298 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
295 break; 299 break;
@@ -488,7 +492,7 @@ bnx2_netif_start(struct bnx2 *bp)
488{ 492{
489 if (atomic_dec_and_test(&bp->intr_sem)) { 493 if (atomic_dec_and_test(&bp->intr_sem)) {
490 if (netif_running(bp->dev)) { 494 if (netif_running(bp->dev)) {
491 netif_wake_queue(bp->dev); 495 netif_tx_wake_all_queues(bp->dev);
492 bnx2_napi_enable(bp); 496 bnx2_napi_enable(bp);
493 bnx2_enable_int(bp); 497 bnx2_enable_int(bp);
494 } 498 }
@@ -496,99 +500,162 @@ bnx2_netif_start(struct bnx2 *bp)
496} 500}
497 501
498static void 502static void
499bnx2_free_mem(struct bnx2 *bp) 503bnx2_free_tx_mem(struct bnx2 *bp)
500{ 504{
501 int i; 505 int i;
502 506
503 for (i = 0; i < bp->ctx_pages; i++) { 507 for (i = 0; i < bp->num_tx_rings; i++) {
504 if (bp->ctx_blk[i]) { 508 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
505 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE, 509 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
506 bp->ctx_blk[i], 510
507 bp->ctx_blk_mapping[i]); 511 if (txr->tx_desc_ring) {
508 bp->ctx_blk[i] = NULL; 512 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
513 txr->tx_desc_ring,
514 txr->tx_desc_mapping);
515 txr->tx_desc_ring = NULL;
509 } 516 }
517 kfree(txr->tx_buf_ring);
518 txr->tx_buf_ring = NULL;
510 } 519 }
511 if (bp->status_blk) { 520}
512 pci_free_consistent(bp->pdev, bp->status_stats_size, 521
513 bp->status_blk, bp->status_blk_mapping); 522static void
514 bp->status_blk = NULL; 523bnx2_free_rx_mem(struct bnx2 *bp)
515 bp->stats_blk = NULL; 524{
525 int i;
526
527 for (i = 0; i < bp->num_rx_rings; i++) {
528 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
529 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
530 int j;
531
532 for (j = 0; j < bp->rx_max_ring; j++) {
533 if (rxr->rx_desc_ring[j])
534 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
535 rxr->rx_desc_ring[j],
536 rxr->rx_desc_mapping[j]);
537 rxr->rx_desc_ring[j] = NULL;
538 }
539 if (rxr->rx_buf_ring)
540 vfree(rxr->rx_buf_ring);
541 rxr->rx_buf_ring = NULL;
542
543 for (j = 0; j < bp->rx_max_pg_ring; j++) {
544 if (rxr->rx_pg_desc_ring[j])
545 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
546 rxr->rx_pg_desc_ring[i],
547 rxr->rx_pg_desc_mapping[i]);
548 rxr->rx_pg_desc_ring[i] = NULL;
549 }
550 if (rxr->rx_pg_ring)
551 vfree(rxr->rx_pg_ring);
552 rxr->rx_pg_ring = NULL;
516 } 553 }
517 if (bp->tx_desc_ring) {
518 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
519 bp->tx_desc_ring, bp->tx_desc_mapping);
520 bp->tx_desc_ring = NULL;
521 }
522 kfree(bp->tx_buf_ring);
523 bp->tx_buf_ring = NULL;
524 for (i = 0; i < bp->rx_max_ring; i++) {
525 if (bp->rx_desc_ring[i])
526 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
527 bp->rx_desc_ring[i],
528 bp->rx_desc_mapping[i]);
529 bp->rx_desc_ring[i] = NULL;
530 }
531 vfree(bp->rx_buf_ring);
532 bp->rx_buf_ring = NULL;
533 for (i = 0; i < bp->rx_max_pg_ring; i++) {
534 if (bp->rx_pg_desc_ring[i])
535 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
536 bp->rx_pg_desc_ring[i],
537 bp->rx_pg_desc_mapping[i]);
538 bp->rx_pg_desc_ring[i] = NULL;
539 }
540 if (bp->rx_pg_ring)
541 vfree(bp->rx_pg_ring);
542 bp->rx_pg_ring = NULL;
543} 554}
544 555
545static int 556static int
546bnx2_alloc_mem(struct bnx2 *bp) 557bnx2_alloc_tx_mem(struct bnx2 *bp)
547{ 558{
548 int i, status_blk_size; 559 int i;
549 560
550 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL); 561 for (i = 0; i < bp->num_tx_rings; i++) {
551 if (bp->tx_buf_ring == NULL) 562 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
552 return -ENOMEM; 563 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
553 564
554 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE, 565 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
555 &bp->tx_desc_mapping); 566 if (txr->tx_buf_ring == NULL)
556 if (bp->tx_desc_ring == NULL) 567 return -ENOMEM;
557 goto alloc_mem_err;
558 568
559 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring); 569 txr->tx_desc_ring =
560 if (bp->rx_buf_ring == NULL) 570 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
561 goto alloc_mem_err; 571 &txr->tx_desc_mapping);
572 if (txr->tx_desc_ring == NULL)
573 return -ENOMEM;
574 }
575 return 0;
576}
577
578static int
579bnx2_alloc_rx_mem(struct bnx2 *bp)
580{
581 int i;
582
583 for (i = 0; i < bp->num_rx_rings; i++) {
584 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
585 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
586 int j;
562 587
563 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring); 588 rxr->rx_buf_ring =
589 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
590 if (rxr->rx_buf_ring == NULL)
591 return -ENOMEM;
564 592
565 for (i = 0; i < bp->rx_max_ring; i++) { 593 memset(rxr->rx_buf_ring, 0,
566 bp->rx_desc_ring[i] = 594 SW_RXBD_RING_SIZE * bp->rx_max_ring);
567 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
568 &bp->rx_desc_mapping[i]);
569 if (bp->rx_desc_ring[i] == NULL)
570 goto alloc_mem_err;
571 595
572 } 596 for (j = 0; j < bp->rx_max_ring; j++) {
597 rxr->rx_desc_ring[j] =
598 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
599 &rxr->rx_desc_mapping[j]);
600 if (rxr->rx_desc_ring[j] == NULL)
601 return -ENOMEM;
573 602
574 if (bp->rx_pg_ring_size) { 603 }
575 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE * 604
576 bp->rx_max_pg_ring); 605 if (bp->rx_pg_ring_size) {
577 if (bp->rx_pg_ring == NULL) 606 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
578 goto alloc_mem_err; 607 bp->rx_max_pg_ring);
608 if (rxr->rx_pg_ring == NULL)
609 return -ENOMEM;
610
611 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
612 bp->rx_max_pg_ring);
613 }
614
615 for (j = 0; j < bp->rx_max_pg_ring; j++) {
616 rxr->rx_pg_desc_ring[j] =
617 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
618 &rxr->rx_pg_desc_mapping[j]);
619 if (rxr->rx_pg_desc_ring[j] == NULL)
620 return -ENOMEM;
579 621
580 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE * 622 }
581 bp->rx_max_pg_ring);
582 } 623 }
624 return 0;
625}
583 626
584 for (i = 0; i < bp->rx_max_pg_ring; i++) { 627static void
585 bp->rx_pg_desc_ring[i] = 628bnx2_free_mem(struct bnx2 *bp)
586 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE, 629{
587 &bp->rx_pg_desc_mapping[i]); 630 int i;
588 if (bp->rx_pg_desc_ring[i] == NULL) 631 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
589 goto alloc_mem_err; 632
633 bnx2_free_tx_mem(bp);
634 bnx2_free_rx_mem(bp);
590 635
636 for (i = 0; i < bp->ctx_pages; i++) {
637 if (bp->ctx_blk[i]) {
638 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
639 bp->ctx_blk[i],
640 bp->ctx_blk_mapping[i]);
641 bp->ctx_blk[i] = NULL;
642 }
591 } 643 }
644 if (bnapi->status_blk.msi) {
645 pci_free_consistent(bp->pdev, bp->status_stats_size,
646 bnapi->status_blk.msi,
647 bp->status_blk_mapping);
648 bnapi->status_blk.msi = NULL;
649 bp->stats_blk = NULL;
650 }
651}
652
653static int
654bnx2_alloc_mem(struct bnx2 *bp)
655{
656 int i, status_blk_size, err;
657 struct bnx2_napi *bnapi;
658 void *status_blk;
592 659
593 /* Combine status and statistics blocks into one allocation. */ 660 /* Combine status and statistics blocks into one allocation. */
594 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block)); 661 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
@@ -598,27 +665,37 @@ bnx2_alloc_mem(struct bnx2 *bp)
598 bp->status_stats_size = status_blk_size + 665 bp->status_stats_size = status_blk_size +
599 sizeof(struct statistics_block); 666 sizeof(struct statistics_block);
600 667
601 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size, 668 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
602 &bp->status_blk_mapping); 669 &bp->status_blk_mapping);
603 if (bp->status_blk == NULL) 670 if (status_blk == NULL)
604 goto alloc_mem_err; 671 goto alloc_mem_err;
605 672
606 memset(bp->status_blk, 0, bp->status_stats_size); 673 memset(status_blk, 0, bp->status_stats_size);
607 674
608 bp->bnx2_napi[0].status_blk = bp->status_blk; 675 bnapi = &bp->bnx2_napi[0];
676 bnapi->status_blk.msi = status_blk;
677 bnapi->hw_tx_cons_ptr =
678 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
679 bnapi->hw_rx_cons_ptr =
680 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
609 if (bp->flags & BNX2_FLAG_MSIX_CAP) { 681 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
610 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) { 682 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
611 struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; 683 struct status_block_msix *sblk;
612 684
613 bnapi->status_blk_msix = (void *) 685 bnapi = &bp->bnx2_napi[i];
614 ((unsigned long) bp->status_blk + 686
615 BNX2_SBLK_MSIX_ALIGN_SIZE * i); 687 sblk = (void *) (status_blk +
688 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
689 bnapi->status_blk.msix = sblk;
690 bnapi->hw_tx_cons_ptr =
691 &sblk->status_tx_quick_consumer_index;
692 bnapi->hw_rx_cons_ptr =
693 &sblk->status_rx_quick_consumer_index;
616 bnapi->int_num = i << 24; 694 bnapi->int_num = i << 24;
617 } 695 }
618 } 696 }
619 697
620 bp->stats_blk = (void *) ((unsigned long) bp->status_blk + 698 bp->stats_blk = status_blk + status_blk_size;
621 status_blk_size);
622 699
623 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size; 700 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
624 701
@@ -634,6 +711,15 @@ bnx2_alloc_mem(struct bnx2 *bp)
634 goto alloc_mem_err; 711 goto alloc_mem_err;
635 } 712 }
636 } 713 }
714
715 err = bnx2_alloc_rx_mem(bp);
716 if (err)
717 goto alloc_mem_err;
718
719 err = bnx2_alloc_tx_mem(bp);
720 if (err)
721 goto alloc_mem_err;
722
637 return 0; 723 return 0;
638 724
639alloc_mem_err: 725alloc_mem_err:
@@ -993,9 +1079,9 @@ bnx2_copper_linkup(struct bnx2 *bp)
993} 1079}
994 1080
995static void 1081static void
996bnx2_init_rx_context0(struct bnx2 *bp) 1082bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
997{ 1083{
998 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID); 1084 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
999 1085
1000 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE; 1086 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1001 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2; 1087 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
@@ -1028,6 +1114,19 @@ bnx2_init_rx_context0(struct bnx2 *bp)
1028 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val); 1114 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1029} 1115}
1030 1116
1117static void
1118bnx2_init_all_rx_contexts(struct bnx2 *bp)
1119{
1120 int i;
1121 u32 cid;
1122
1123 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1124 if (i == 1)
1125 cid = RX_RSS_CID;
1126 bnx2_init_rx_context(bp, cid);
1127 }
1128}
1129
1031static int 1130static int
1032bnx2_set_mac_link(struct bnx2 *bp) 1131bnx2_set_mac_link(struct bnx2 *bp)
1033{ 1132{
@@ -1093,7 +1192,7 @@ bnx2_set_mac_link(struct bnx2 *bp)
1093 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE); 1192 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1094 1193
1095 if (CHIP_NUM(bp) == CHIP_NUM_5709) 1194 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1096 bnx2_init_rx_context0(bp); 1195 bnx2_init_all_rx_contexts(bp);
1097 1196
1098 return 0; 1197 return 0;
1099} 1198}
@@ -1392,7 +1491,7 @@ bnx2_phy_get_pause_adv(struct bnx2 *bp)
1392 return adv; 1491 return adv;
1393} 1492}
1394 1493
1395static int bnx2_fw_sync(struct bnx2 *, u32, int); 1494static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1396 1495
1397static int 1496static int
1398bnx2_setup_remote_phy(struct bnx2 *bp, u8 port) 1497bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
@@ -1445,7 +1544,7 @@ bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1445 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg); 1544 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1446 1545
1447 spin_unlock_bh(&bp->phy_lock); 1546 spin_unlock_bh(&bp->phy_lock);
1448 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0); 1547 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1449 spin_lock_bh(&bp->phy_lock); 1548 spin_lock_bh(&bp->phy_lock);
1450 1549
1451 return 0; 1550 return 0;
@@ -1875,7 +1974,7 @@ bnx2_setup_phy(struct bnx2 *bp, u8 port)
1875} 1974}
1876 1975
1877static int 1976static int
1878bnx2_init_5709s_phy(struct bnx2 *bp) 1977bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1879{ 1978{
1880 u32 val; 1979 u32 val;
1881 1980
@@ -1890,7 +1989,8 @@ bnx2_init_5709s_phy(struct bnx2 *bp)
1890 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD); 1989 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1891 1990
1892 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); 1991 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1893 bnx2_reset_phy(bp); 1992 if (reset_phy)
1993 bnx2_reset_phy(bp);
1894 1994
1895 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG); 1995 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1896 1996
@@ -1924,11 +2024,12 @@ bnx2_init_5709s_phy(struct bnx2 *bp)
1924} 2024}
1925 2025
1926static int 2026static int
1927bnx2_init_5708s_phy(struct bnx2 *bp) 2027bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
1928{ 2028{
1929 u32 val; 2029 u32 val;
1930 2030
1931 bnx2_reset_phy(bp); 2031 if (reset_phy)
2032 bnx2_reset_phy(bp);
1932 2033
1933 bp->mii_up1 = BCM5708S_UP1; 2034 bp->mii_up1 = BCM5708S_UP1;
1934 2035
@@ -1981,9 +2082,10 @@ bnx2_init_5708s_phy(struct bnx2 *bp)
1981} 2082}
1982 2083
1983static int 2084static int
1984bnx2_init_5706s_phy(struct bnx2 *bp) 2085bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
1985{ 2086{
1986 bnx2_reset_phy(bp); 2087 if (reset_phy)
2088 bnx2_reset_phy(bp);
1987 2089
1988 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; 2090 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1989 2091
@@ -2018,11 +2120,12 @@ bnx2_init_5706s_phy(struct bnx2 *bp)
2018} 2120}
2019 2121
2020static int 2122static int
2021bnx2_init_copper_phy(struct bnx2 *bp) 2123bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2022{ 2124{
2023 u32 val; 2125 u32 val;
2024 2126
2025 bnx2_reset_phy(bp); 2127 if (reset_phy)
2128 bnx2_reset_phy(bp);
2026 2129
2027 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) { 2130 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2028 bnx2_write_phy(bp, 0x18, 0x0c00); 2131 bnx2_write_phy(bp, 0x18, 0x0c00);
@@ -2070,7 +2173,7 @@ bnx2_init_copper_phy(struct bnx2 *bp)
2070 2173
2071 2174
2072static int 2175static int
2073bnx2_init_phy(struct bnx2 *bp) 2176bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2074{ 2177{
2075 u32 val; 2178 u32 val;
2076 int rc = 0; 2179 int rc = 0;
@@ -2096,14 +2199,14 @@ bnx2_init_phy(struct bnx2 *bp)
2096 2199
2097 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { 2200 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2098 if (CHIP_NUM(bp) == CHIP_NUM_5706) 2201 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2099 rc = bnx2_init_5706s_phy(bp); 2202 rc = bnx2_init_5706s_phy(bp, reset_phy);
2100 else if (CHIP_NUM(bp) == CHIP_NUM_5708) 2203 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2101 rc = bnx2_init_5708s_phy(bp); 2204 rc = bnx2_init_5708s_phy(bp, reset_phy);
2102 else if (CHIP_NUM(bp) == CHIP_NUM_5709) 2205 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2103 rc = bnx2_init_5709s_phy(bp); 2206 rc = bnx2_init_5709s_phy(bp, reset_phy);
2104 } 2207 }
2105 else { 2208 else {
2106 rc = bnx2_init_copper_phy(bp); 2209 rc = bnx2_init_copper_phy(bp, reset_phy);
2107 } 2210 }
2108 2211
2109setup_phy: 2212setup_phy:
@@ -2159,7 +2262,7 @@ bnx2_set_phy_loopback(struct bnx2 *bp)
2159} 2262}
2160 2263
2161static int 2264static int
2162bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent) 2265bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2163{ 2266{
2164 int i; 2267 int i;
2165 u32 val; 2268 u32 val;
@@ -2169,6 +2272,9 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2169 2272
2170 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); 2273 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2171 2274
2275 if (!ack)
2276 return 0;
2277
2172 /* wait for an acknowledgement. */ 2278 /* wait for an acknowledgement. */
2173 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) { 2279 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2174 msleep(10); 2280 msleep(10);
@@ -2345,28 +2451,27 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2345} 2451}
2346 2452
2347static void 2453static void
2348bnx2_set_mac_addr(struct bnx2 *bp) 2454bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2349{ 2455{
2350 u32 val; 2456 u32 val;
2351 u8 *mac_addr = bp->dev->dev_addr;
2352 2457
2353 val = (mac_addr[0] << 8) | mac_addr[1]; 2458 val = (mac_addr[0] << 8) | mac_addr[1];
2354 2459
2355 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val); 2460 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2356 2461
2357 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 2462 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2358 (mac_addr[4] << 8) | mac_addr[5]; 2463 (mac_addr[4] << 8) | mac_addr[5];
2359 2464
2360 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val); 2465 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2361} 2466}
2362 2467
2363static inline int 2468static inline int
2364bnx2_alloc_rx_page(struct bnx2 *bp, u16 index) 2469bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2365{ 2470{
2366 dma_addr_t mapping; 2471 dma_addr_t mapping;
2367 struct sw_pg *rx_pg = &bp->rx_pg_ring[index]; 2472 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2368 struct rx_bd *rxbd = 2473 struct rx_bd *rxbd =
2369 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)]; 2474 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2370 struct page *page = alloc_page(GFP_ATOMIC); 2475 struct page *page = alloc_page(GFP_ATOMIC);
2371 2476
2372 if (!page) 2477 if (!page)
@@ -2381,9 +2486,9 @@ bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2381} 2486}
2382 2487
2383static void 2488static void
2384bnx2_free_rx_page(struct bnx2 *bp, u16 index) 2489bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2385{ 2490{
2386 struct sw_pg *rx_pg = &bp->rx_pg_ring[index]; 2491 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2387 struct page *page = rx_pg->page; 2492 struct page *page = rx_pg->page;
2388 2493
2389 if (!page) 2494 if (!page)
@@ -2397,12 +2502,12 @@ bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2397} 2502}
2398 2503
2399static inline int 2504static inline int
2400bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index) 2505bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2401{ 2506{
2402 struct sk_buff *skb; 2507 struct sk_buff *skb;
2403 struct sw_bd *rx_buf = &bp->rx_buf_ring[index]; 2508 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2404 dma_addr_t mapping; 2509 dma_addr_t mapping;
2405 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; 2510 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2406 unsigned long align; 2511 unsigned long align;
2407 2512
2408 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); 2513 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
@@ -2422,7 +2527,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2422 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; 2527 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2423 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; 2528 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2424 2529
2425 bnapi->rx_prod_bseq += bp->rx_buf_use_size; 2530 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2426 2531
2427 return 0; 2532 return 0;
2428} 2533}
@@ -2430,7 +2535,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2430static int 2535static int
2431bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event) 2536bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2432{ 2537{
2433 struct status_block *sblk = bnapi->status_blk; 2538 struct status_block *sblk = bnapi->status_blk.msi;
2434 u32 new_link_state, old_link_state; 2539 u32 new_link_state, old_link_state;
2435 int is_set = 1; 2540 int is_set = 1;
2436 2541
@@ -2466,11 +2571,9 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2466{ 2571{
2467 u16 cons; 2572 u16 cons;
2468 2573
2469 if (bnapi->int_num == 0) 2574 /* Tell compiler that status block fields can change. */
2470 cons = bnapi->status_blk->status_tx_quick_consumer_index0; 2575 barrier();
2471 else 2576 cons = *bnapi->hw_tx_cons_ptr;
2472 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2473
2474 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT)) 2577 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2475 cons++; 2578 cons++;
2476 return cons; 2579 return cons;
@@ -2479,11 +2582,16 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2479static int 2582static int
2480bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) 2583bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2481{ 2584{
2585 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2482 u16 hw_cons, sw_cons, sw_ring_cons; 2586 u16 hw_cons, sw_cons, sw_ring_cons;
2483 int tx_pkt = 0; 2587 int tx_pkt = 0, index;
2588 struct netdev_queue *txq;
2589
2590 index = (bnapi - bp->bnx2_napi);
2591 txq = netdev_get_tx_queue(bp->dev, index);
2484 2592
2485 hw_cons = bnx2_get_hw_tx_cons(bnapi); 2593 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2486 sw_cons = bnapi->tx_cons; 2594 sw_cons = txr->tx_cons;
2487 2595
2488 while (sw_cons != hw_cons) { 2596 while (sw_cons != hw_cons) {
2489 struct sw_bd *tx_buf; 2597 struct sw_bd *tx_buf;
@@ -2492,7 +2600,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2492 2600
2493 sw_ring_cons = TX_RING_IDX(sw_cons); 2601 sw_ring_cons = TX_RING_IDX(sw_cons);
2494 2602
2495 tx_buf = &bp->tx_buf_ring[sw_ring_cons]; 2603 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2496 skb = tx_buf->skb; 2604 skb = tx_buf->skb;
2497 2605
2498 /* partial BD completions possible with TSO packets */ 2606 /* partial BD completions possible with TSO packets */
@@ -2522,7 +2630,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2522 2630
2523 pci_unmap_page(bp->pdev, 2631 pci_unmap_page(bp->pdev,
2524 pci_unmap_addr( 2632 pci_unmap_addr(
2525 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)], 2633 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2526 mapping), 2634 mapping),
2527 skb_shinfo(skb)->frags[i].size, 2635 skb_shinfo(skb)->frags[i].size,
2528 PCI_DMA_TODEVICE); 2636 PCI_DMA_TODEVICE);
@@ -2538,44 +2646,46 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2538 hw_cons = bnx2_get_hw_tx_cons(bnapi); 2646 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2539 } 2647 }
2540 2648
2541 bnapi->hw_tx_cons = hw_cons; 2649 txr->hw_tx_cons = hw_cons;
2542 bnapi->tx_cons = sw_cons; 2650 txr->tx_cons = sw_cons;
2651
2543 /* Need to make the tx_cons update visible to bnx2_start_xmit() 2652 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2544 * before checking for netif_queue_stopped(). Without the 2653 * before checking for netif_tx_queue_stopped(). Without the
2545 * memory barrier, there is a small possibility that bnx2_start_xmit() 2654 * memory barrier, there is a small possibility that bnx2_start_xmit()
2546 * will miss it and cause the queue to be stopped forever. 2655 * will miss it and cause the queue to be stopped forever.
2547 */ 2656 */
2548 smp_mb(); 2657 smp_mb();
2549 2658
2550 if (unlikely(netif_queue_stopped(bp->dev)) && 2659 if (unlikely(netif_tx_queue_stopped(txq)) &&
2551 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) { 2660 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2552 netif_tx_lock(bp->dev); 2661 __netif_tx_lock(txq, smp_processor_id());
2553 if ((netif_queue_stopped(bp->dev)) && 2662 if ((netif_tx_queue_stopped(txq)) &&
2554 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) 2663 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2555 netif_wake_queue(bp->dev); 2664 netif_tx_wake_queue(txq);
2556 netif_tx_unlock(bp->dev); 2665 __netif_tx_unlock(txq);
2557 } 2666 }
2667
2558 return tx_pkt; 2668 return tx_pkt;
2559} 2669}
2560 2670
2561static void 2671static void
2562bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi, 2672bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2563 struct sk_buff *skb, int count) 2673 struct sk_buff *skb, int count)
2564{ 2674{
2565 struct sw_pg *cons_rx_pg, *prod_rx_pg; 2675 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2566 struct rx_bd *cons_bd, *prod_bd; 2676 struct rx_bd *cons_bd, *prod_bd;
2567 dma_addr_t mapping; 2677 dma_addr_t mapping;
2568 int i; 2678 int i;
2569 u16 hw_prod = bnapi->rx_pg_prod, prod; 2679 u16 hw_prod = rxr->rx_pg_prod, prod;
2570 u16 cons = bnapi->rx_pg_cons; 2680 u16 cons = rxr->rx_pg_cons;
2571 2681
2572 for (i = 0; i < count; i++) { 2682 for (i = 0; i < count; i++) {
2573 prod = RX_PG_RING_IDX(hw_prod); 2683 prod = RX_PG_RING_IDX(hw_prod);
2574 2684
2575 prod_rx_pg = &bp->rx_pg_ring[prod]; 2685 prod_rx_pg = &rxr->rx_pg_ring[prod];
2576 cons_rx_pg = &bp->rx_pg_ring[cons]; 2686 cons_rx_pg = &rxr->rx_pg_ring[cons];
2577 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 2687 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2578 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 2688 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2579 2689
2580 if (i == 0 && skb) { 2690 if (i == 0 && skb) {
2581 struct page *page; 2691 struct page *page;
@@ -2604,25 +2714,25 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2604 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons)); 2714 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2605 hw_prod = NEXT_RX_BD(hw_prod); 2715 hw_prod = NEXT_RX_BD(hw_prod);
2606 } 2716 }
2607 bnapi->rx_pg_prod = hw_prod; 2717 rxr->rx_pg_prod = hw_prod;
2608 bnapi->rx_pg_cons = cons; 2718 rxr->rx_pg_cons = cons;
2609} 2719}
2610 2720
2611static inline void 2721static inline void
2612bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb, 2722bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2613 u16 cons, u16 prod) 2723 struct sk_buff *skb, u16 cons, u16 prod)
2614{ 2724{
2615 struct sw_bd *cons_rx_buf, *prod_rx_buf; 2725 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2616 struct rx_bd *cons_bd, *prod_bd; 2726 struct rx_bd *cons_bd, *prod_bd;
2617 2727
2618 cons_rx_buf = &bp->rx_buf_ring[cons]; 2728 cons_rx_buf = &rxr->rx_buf_ring[cons];
2619 prod_rx_buf = &bp->rx_buf_ring[prod]; 2729 prod_rx_buf = &rxr->rx_buf_ring[prod];
2620 2730
2621 pci_dma_sync_single_for_device(bp->pdev, 2731 pci_dma_sync_single_for_device(bp->pdev,
2622 pci_unmap_addr(cons_rx_buf, mapping), 2732 pci_unmap_addr(cons_rx_buf, mapping),
2623 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); 2733 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2624 2734
2625 bnapi->rx_prod_bseq += bp->rx_buf_use_size; 2735 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2626 2736
2627 prod_rx_buf->skb = skb; 2737 prod_rx_buf->skb = skb;
2628 2738
@@ -2632,33 +2742,33 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2632 pci_unmap_addr_set(prod_rx_buf, mapping, 2742 pci_unmap_addr_set(prod_rx_buf, mapping,
2633 pci_unmap_addr(cons_rx_buf, mapping)); 2743 pci_unmap_addr(cons_rx_buf, mapping));
2634 2744
2635 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 2745 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2636 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 2746 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2637 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; 2747 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2638 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; 2748 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2639} 2749}
2640 2750
2641static int 2751static int
2642bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb, 2752bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2643 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr, 2753 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2644 u32 ring_idx) 2754 u32 ring_idx)
2645{ 2755{
2646 int err; 2756 int err;
2647 u16 prod = ring_idx & 0xffff; 2757 u16 prod = ring_idx & 0xffff;
2648 2758
2649 err = bnx2_alloc_rx_skb(bp, bnapi, prod); 2759 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2650 if (unlikely(err)) { 2760 if (unlikely(err)) {
2651 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod); 2761 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2652 if (hdr_len) { 2762 if (hdr_len) {
2653 unsigned int raw_len = len + 4; 2763 unsigned int raw_len = len + 4;
2654 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT; 2764 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2655 2765
2656 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages); 2766 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2657 } 2767 }
2658 return err; 2768 return err;
2659 } 2769 }
2660 2770
2661 skb_reserve(skb, bp->rx_offset); 2771 skb_reserve(skb, BNX2_RX_OFFSET);
2662 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size, 2772 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2663 PCI_DMA_FROMDEVICE); 2773 PCI_DMA_FROMDEVICE);
2664 2774
@@ -2668,8 +2778,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2668 } else { 2778 } else {
2669 unsigned int i, frag_len, frag_size, pages; 2779 unsigned int i, frag_len, frag_size, pages;
2670 struct sw_pg *rx_pg; 2780 struct sw_pg *rx_pg;
2671 u16 pg_cons = bnapi->rx_pg_cons; 2781 u16 pg_cons = rxr->rx_pg_cons;
2672 u16 pg_prod = bnapi->rx_pg_prod; 2782 u16 pg_prod = rxr->rx_pg_prod;
2673 2783
2674 frag_size = len + 4 - hdr_len; 2784 frag_size = len + 4 - hdr_len;
2675 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT; 2785 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
@@ -2680,9 +2790,9 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2680 if (unlikely(frag_len <= 4)) { 2790 if (unlikely(frag_len <= 4)) {
2681 unsigned int tail = 4 - frag_len; 2791 unsigned int tail = 4 - frag_len;
2682 2792
2683 bnapi->rx_pg_cons = pg_cons; 2793 rxr->rx_pg_cons = pg_cons;
2684 bnapi->rx_pg_prod = pg_prod; 2794 rxr->rx_pg_prod = pg_prod;
2685 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, 2795 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2686 pages - i); 2796 pages - i);
2687 skb->len -= tail; 2797 skb->len -= tail;
2688 if (i == 0) { 2798 if (i == 0) {
@@ -2696,7 +2806,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2696 } 2806 }
2697 return 0; 2807 return 0;
2698 } 2808 }
2699 rx_pg = &bp->rx_pg_ring[pg_cons]; 2809 rx_pg = &rxr->rx_pg_ring[pg_cons];
2700 2810
2701 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), 2811 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2702 PAGE_SIZE, PCI_DMA_FROMDEVICE); 2812 PAGE_SIZE, PCI_DMA_FROMDEVICE);
@@ -2707,11 +2817,12 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2707 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len); 2817 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2708 rx_pg->page = NULL; 2818 rx_pg->page = NULL;
2709 2819
2710 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod)); 2820 err = bnx2_alloc_rx_page(bp, rxr,
2821 RX_PG_RING_IDX(pg_prod));
2711 if (unlikely(err)) { 2822 if (unlikely(err)) {
2712 bnapi->rx_pg_cons = pg_cons; 2823 rxr->rx_pg_cons = pg_cons;
2713 bnapi->rx_pg_prod = pg_prod; 2824 rxr->rx_pg_prod = pg_prod;
2714 bnx2_reuse_rx_skb_pages(bp, bnapi, skb, 2825 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2715 pages - i); 2826 pages - i);
2716 return err; 2827 return err;
2717 } 2828 }
@@ -2724,8 +2835,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2724 pg_prod = NEXT_RX_BD(pg_prod); 2835 pg_prod = NEXT_RX_BD(pg_prod);
2725 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons)); 2836 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2726 } 2837 }
2727 bnapi->rx_pg_prod = pg_prod; 2838 rxr->rx_pg_prod = pg_prod;
2728 bnapi->rx_pg_cons = pg_cons; 2839 rxr->rx_pg_cons = pg_cons;
2729 } 2840 }
2730 return 0; 2841 return 0;
2731} 2842}
@@ -2733,8 +2844,11 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2733static inline u16 2844static inline u16
2734bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi) 2845bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2735{ 2846{
2736 u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0; 2847 u16 cons;
2737 2848
2849 /* Tell compiler that status block fields can change. */
2850 barrier();
2851 cons = *bnapi->hw_rx_cons_ptr;
2738 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)) 2852 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2739 cons++; 2853 cons++;
2740 return cons; 2854 return cons;
@@ -2743,13 +2857,14 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2743static int 2857static int
2744bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) 2858bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2745{ 2859{
2860 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2746 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod; 2861 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2747 struct l2_fhdr *rx_hdr; 2862 struct l2_fhdr *rx_hdr;
2748 int rx_pkt = 0, pg_ring_used = 0; 2863 int rx_pkt = 0, pg_ring_used = 0;
2749 2864
2750 hw_cons = bnx2_get_hw_rx_cons(bnapi); 2865 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2751 sw_cons = bnapi->rx_cons; 2866 sw_cons = rxr->rx_cons;
2752 sw_prod = bnapi->rx_prod; 2867 sw_prod = rxr->rx_prod;
2753 2868
2754 /* Memory barrier necessary as speculative reads of the rx 2869 /* Memory barrier necessary as speculative reads of the rx
2755 * buffer can be ahead of the index in the status block 2870 * buffer can be ahead of the index in the status block
@@ -2765,7 +2880,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2765 sw_ring_cons = RX_RING_IDX(sw_cons); 2880 sw_ring_cons = RX_RING_IDX(sw_cons);
2766 sw_ring_prod = RX_RING_IDX(sw_prod); 2881 sw_ring_prod = RX_RING_IDX(sw_prod);
2767 2882
2768 rx_buf = &bp->rx_buf_ring[sw_ring_cons]; 2883 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2769 skb = rx_buf->skb; 2884 skb = rx_buf->skb;
2770 2885
2771 rx_buf->skb = NULL; 2886 rx_buf->skb = NULL;
@@ -2773,7 +2888,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2773 dma_addr = pci_unmap_addr(rx_buf, mapping); 2888 dma_addr = pci_unmap_addr(rx_buf, mapping);
2774 2889
2775 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr, 2890 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2776 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); 2891 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2892 PCI_DMA_FROMDEVICE);
2777 2893
2778 rx_hdr = (struct l2_fhdr *) skb->data; 2894 rx_hdr = (struct l2_fhdr *) skb->data;
2779 len = rx_hdr->l2_fhdr_pkt_len; 2895 len = rx_hdr->l2_fhdr_pkt_len;
@@ -2785,7 +2901,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2785 L2_FHDR_ERRORS_TOO_SHORT | 2901 L2_FHDR_ERRORS_TOO_SHORT |
2786 L2_FHDR_ERRORS_GIANT_FRAME)) { 2902 L2_FHDR_ERRORS_GIANT_FRAME)) {
2787 2903
2788 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons, 2904 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2789 sw_ring_prod); 2905 sw_ring_prod);
2790 goto next_rx; 2906 goto next_rx;
2791 } 2907 }
@@ -2805,22 +2921,23 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2805 2921
2806 new_skb = netdev_alloc_skb(bp->dev, len + 2); 2922 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2807 if (new_skb == NULL) { 2923 if (new_skb == NULL) {
2808 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons, 2924 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2809 sw_ring_prod); 2925 sw_ring_prod);
2810 goto next_rx; 2926 goto next_rx;
2811 } 2927 }
2812 2928
2813 /* aligned copy */ 2929 /* aligned copy */
2814 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2, 2930 skb_copy_from_linear_data_offset(skb,
2931 BNX2_RX_OFFSET - 2,
2815 new_skb->data, len + 2); 2932 new_skb->data, len + 2);
2816 skb_reserve(new_skb, 2); 2933 skb_reserve(new_skb, 2);
2817 skb_put(new_skb, len); 2934 skb_put(new_skb, len);
2818 2935
2819 bnx2_reuse_rx_skb(bp, bnapi, skb, 2936 bnx2_reuse_rx_skb(bp, rxr, skb,
2820 sw_ring_cons, sw_ring_prod); 2937 sw_ring_cons, sw_ring_prod);
2821 2938
2822 skb = new_skb; 2939 skb = new_skb;
2823 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len, 2940 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2824 dma_addr, (sw_ring_cons << 16) | sw_ring_prod))) 2941 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2825 goto next_rx; 2942 goto next_rx;
2826 2943
@@ -2869,16 +2986,15 @@ next_rx:
2869 rmb(); 2986 rmb();
2870 } 2987 }
2871 } 2988 }
2872 bnapi->rx_cons = sw_cons; 2989 rxr->rx_cons = sw_cons;
2873 bnapi->rx_prod = sw_prod; 2990 rxr->rx_prod = sw_prod;
2874 2991
2875 if (pg_ring_used) 2992 if (pg_ring_used)
2876 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, 2993 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
2877 bnapi->rx_pg_prod);
2878 2994
2879 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod); 2995 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
2880 2996
2881 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq); 2997 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
2882 2998
2883 mmiowb(); 2999 mmiowb();
2884 3000
@@ -2892,11 +3008,11 @@ next_rx:
2892static irqreturn_t 3008static irqreturn_t
2893bnx2_msi(int irq, void *dev_instance) 3009bnx2_msi(int irq, void *dev_instance)
2894{ 3010{
2895 struct net_device *dev = dev_instance; 3011 struct bnx2_napi *bnapi = dev_instance;
2896 struct bnx2 *bp = netdev_priv(dev); 3012 struct bnx2 *bp = bnapi->bp;
2897 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; 3013 struct net_device *dev = bp->dev;
2898 3014
2899 prefetch(bnapi->status_blk); 3015 prefetch(bnapi->status_blk.msi);
2900 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 3016 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2901 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 3017 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2902 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 3018 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
@@ -2913,11 +3029,11 @@ bnx2_msi(int irq, void *dev_instance)
2913static irqreturn_t 3029static irqreturn_t
2914bnx2_msi_1shot(int irq, void *dev_instance) 3030bnx2_msi_1shot(int irq, void *dev_instance)
2915{ 3031{
2916 struct net_device *dev = dev_instance; 3032 struct bnx2_napi *bnapi = dev_instance;
2917 struct bnx2 *bp = netdev_priv(dev); 3033 struct bnx2 *bp = bnapi->bp;
2918 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; 3034 struct net_device *dev = bp->dev;
2919 3035
2920 prefetch(bnapi->status_blk); 3036 prefetch(bnapi->status_blk.msi);
2921 3037
2922 /* Return here if interrupt is disabled. */ 3038 /* Return here if interrupt is disabled. */
2923 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 3039 if (unlikely(atomic_read(&bp->intr_sem) != 0))
@@ -2931,10 +3047,10 @@ bnx2_msi_1shot(int irq, void *dev_instance)
2931static irqreturn_t 3047static irqreturn_t
2932bnx2_interrupt(int irq, void *dev_instance) 3048bnx2_interrupt(int irq, void *dev_instance)
2933{ 3049{
2934 struct net_device *dev = dev_instance; 3050 struct bnx2_napi *bnapi = dev_instance;
2935 struct bnx2 *bp = netdev_priv(dev); 3051 struct bnx2 *bp = bnapi->bp;
2936 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; 3052 struct net_device *dev = bp->dev;
2937 struct status_block *sblk = bnapi->status_blk; 3053 struct status_block *sblk = bnapi->status_blk.msi;
2938 3054
2939 /* When using INTx, it is possible for the interrupt to arrive 3055 /* When using INTx, it is possible for the interrupt to arrive
2940 * at the CPU before the status block posted prior to the 3056 * at the CPU before the status block posted prior to the
@@ -2968,21 +3084,16 @@ bnx2_interrupt(int irq, void *dev_instance)
2968 return IRQ_HANDLED; 3084 return IRQ_HANDLED;
2969} 3085}
2970 3086
2971static irqreturn_t 3087static inline int
2972bnx2_tx_msix(int irq, void *dev_instance) 3088bnx2_has_fast_work(struct bnx2_napi *bnapi)
2973{ 3089{
2974 struct net_device *dev = dev_instance; 3090 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2975 struct bnx2 *bp = netdev_priv(dev); 3091 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2976 struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
2977
2978 prefetch(bnapi->status_blk_msix);
2979 3092
2980 /* Return here if interrupt is disabled. */ 3093 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
2981 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 3094 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
2982 return IRQ_HANDLED; 3095 return 1;
2983 3096 return 0;
2984 netif_rx_schedule(dev, &bnapi->napi);
2985 return IRQ_HANDLED;
2986} 3097}
2987 3098
2988#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \ 3099#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
@@ -2991,10 +3102,9 @@ bnx2_tx_msix(int irq, void *dev_instance)
2991static inline int 3102static inline int
2992bnx2_has_work(struct bnx2_napi *bnapi) 3103bnx2_has_work(struct bnx2_napi *bnapi)
2993{ 3104{
2994 struct status_block *sblk = bnapi->status_blk; 3105 struct status_block *sblk = bnapi->status_blk.msi;
2995 3106
2996 if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) || 3107 if (bnx2_has_fast_work(bnapi))
2997 (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
2998 return 1; 3108 return 1;
2999 3109
3000 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) != 3110 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
@@ -3004,33 +3114,9 @@ bnx2_has_work(struct bnx2_napi *bnapi)
3004 return 0; 3114 return 0;
3005} 3115}
3006 3116
3007static int bnx2_tx_poll(struct napi_struct *napi, int budget) 3117static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3008{
3009 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3010 struct bnx2 *bp = bnapi->bp;
3011 int work_done = 0;
3012 struct status_block_msix *sblk = bnapi->status_blk_msix;
3013
3014 do {
3015 work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
3016 if (unlikely(work_done >= budget))
3017 return work_done;
3018
3019 bnapi->last_status_idx = sblk->status_idx;
3020 rmb();
3021 } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons);
3022
3023 netif_rx_complete(bp->dev, napi);
3024 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3025 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3026 bnapi->last_status_idx);
3027 return work_done;
3028}
3029
3030static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3031 int work_done, int budget)
3032{ 3118{
3033 struct status_block *sblk = bnapi->status_blk; 3119 struct status_block *sblk = bnapi->status_blk.msi;
3034 u32 status_attn_bits = sblk->status_attn_bits; 3120 u32 status_attn_bits = sblk->status_attn_bits;
3035 u32 status_attn_bits_ack = sblk->status_attn_bits_ack; 3121 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3036 3122
@@ -3046,24 +3132,60 @@ static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3046 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 3132 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3047 REG_RD(bp, BNX2_HC_COMMAND); 3133 REG_RD(bp, BNX2_HC_COMMAND);
3048 } 3134 }
3135}
3049 3136
3050 if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons) 3137static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3138 int work_done, int budget)
3139{
3140 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3141 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3142
3143 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3051 bnx2_tx_int(bp, bnapi, 0); 3144 bnx2_tx_int(bp, bnapi, 0);
3052 3145
3053 if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) 3146 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3054 work_done += bnx2_rx_int(bp, bnapi, budget - work_done); 3147 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3055 3148
3056 return work_done; 3149 return work_done;
3057} 3150}
3058 3151
3152static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3153{
3154 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3155 struct bnx2 *bp = bnapi->bp;
3156 int work_done = 0;
3157 struct status_block_msix *sblk = bnapi->status_blk.msix;
3158
3159 while (1) {
3160 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3161 if (unlikely(work_done >= budget))
3162 break;
3163
3164 bnapi->last_status_idx = sblk->status_idx;
3165 /* status idx must be read before checking for more work. */
3166 rmb();
3167 if (likely(!bnx2_has_fast_work(bnapi))) {
3168
3169 netif_rx_complete(bp->dev, napi);
3170 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3171 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3172 bnapi->last_status_idx);
3173 break;
3174 }
3175 }
3176 return work_done;
3177}
3178
3059static int bnx2_poll(struct napi_struct *napi, int budget) 3179static int bnx2_poll(struct napi_struct *napi, int budget)
3060{ 3180{
3061 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi); 3181 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3062 struct bnx2 *bp = bnapi->bp; 3182 struct bnx2 *bp = bnapi->bp;
3063 int work_done = 0; 3183 int work_done = 0;
3064 struct status_block *sblk = bnapi->status_blk; 3184 struct status_block *sblk = bnapi->status_blk.msi;
3065 3185
3066 while (1) { 3186 while (1) {
3187 bnx2_poll_link(bp, bnapi);
3188
3067 work_done = bnx2_poll_work(bp, bnapi, work_done, budget); 3189 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3068 3190
3069 if (unlikely(work_done >= budget)) 3191 if (unlikely(work_done >= budget))
@@ -3106,6 +3228,7 @@ bnx2_set_rx_mode(struct net_device *dev)
3106{ 3228{
3107 struct bnx2 *bp = netdev_priv(dev); 3229 struct bnx2 *bp = netdev_priv(dev);
3108 u32 rx_mode, sort_mode; 3230 u32 rx_mode, sort_mode;
3231 struct dev_addr_list *uc_ptr;
3109 int i; 3232 int i;
3110 3233
3111 spin_lock_bh(&bp->phy_lock); 3234 spin_lock_bh(&bp->phy_lock);
@@ -3161,6 +3284,25 @@ bnx2_set_rx_mode(struct net_device *dev)
3161 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN; 3284 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3162 } 3285 }
3163 3286
3287 uc_ptr = NULL;
3288 if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
3289 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3290 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3291 BNX2_RPM_SORT_USER0_PROM_VLAN;
3292 } else if (!(dev->flags & IFF_PROMISC)) {
3293 uc_ptr = dev->uc_list;
3294
3295 /* Add all entries into to the match filter list */
3296 for (i = 0; i < dev->uc_count; i++) {
3297 bnx2_set_mac_addr(bp, uc_ptr->da_addr,
3298 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3299 sort_mode |= (1 <<
3300 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3301 uc_ptr = uc_ptr->next;
3302 }
3303
3304 }
3305
3164 if (rx_mode != bp->rx_mode) { 3306 if (rx_mode != bp->rx_mode) {
3165 bp->rx_mode = rx_mode; 3307 bp->rx_mode = rx_mode;
3166 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode); 3308 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
@@ -3213,7 +3355,7 @@ load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3213} 3355}
3214 3356
3215static int 3357static int
3216load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw) 3358load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3217{ 3359{
3218 u32 offset; 3360 u32 offset;
3219 u32 val; 3361 u32 val;
@@ -3297,7 +3439,6 @@ load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3297static int 3439static int
3298bnx2_init_cpus(struct bnx2 *bp) 3440bnx2_init_cpus(struct bnx2 *bp)
3299{ 3441{
3300 struct cpu_reg cpu_reg;
3301 struct fw_info *fw; 3442 struct fw_info *fw;
3302 int rc, rv2p_len; 3443 int rc, rv2p_len;
3303 void *text, *rv2p; 3444 void *text, *rv2p;
@@ -3333,122 +3474,57 @@ bnx2_init_cpus(struct bnx2 *bp)
3333 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2); 3474 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3334 3475
3335 /* Initialize the RX Processor. */ 3476 /* Initialize the RX Processor. */
3336 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3337 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3338 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3339 cpu_reg.state = BNX2_RXP_CPU_STATE;
3340 cpu_reg.state_value_clear = 0xffffff;
3341 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3342 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3343 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3344 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3345 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3346 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3347 cpu_reg.mips_view_base = 0x8000000;
3348
3349 if (CHIP_NUM(bp) == CHIP_NUM_5709) 3477 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3350 fw = &bnx2_rxp_fw_09; 3478 fw = &bnx2_rxp_fw_09;
3351 else 3479 else
3352 fw = &bnx2_rxp_fw_06; 3480 fw = &bnx2_rxp_fw_06;
3353 3481
3354 fw->text = text; 3482 fw->text = text;
3355 rc = load_cpu_fw(bp, &cpu_reg, fw); 3483 rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3356 if (rc) 3484 if (rc)
3357 goto init_cpu_err; 3485 goto init_cpu_err;
3358 3486
3359 /* Initialize the TX Processor. */ 3487 /* Initialize the TX Processor. */
3360 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3361 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3362 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3363 cpu_reg.state = BNX2_TXP_CPU_STATE;
3364 cpu_reg.state_value_clear = 0xffffff;
3365 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3366 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3367 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3368 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3369 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3370 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3371 cpu_reg.mips_view_base = 0x8000000;
3372
3373 if (CHIP_NUM(bp) == CHIP_NUM_5709) 3488 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3374 fw = &bnx2_txp_fw_09; 3489 fw = &bnx2_txp_fw_09;
3375 else 3490 else
3376 fw = &bnx2_txp_fw_06; 3491 fw = &bnx2_txp_fw_06;
3377 3492
3378 fw->text = text; 3493 fw->text = text;
3379 rc = load_cpu_fw(bp, &cpu_reg, fw); 3494 rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3380 if (rc) 3495 if (rc)
3381 goto init_cpu_err; 3496 goto init_cpu_err;
3382 3497
3383 /* Initialize the TX Patch-up Processor. */ 3498 /* Initialize the TX Patch-up Processor. */
3384 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3385 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3386 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3387 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3388 cpu_reg.state_value_clear = 0xffffff;
3389 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3390 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3391 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3392 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3393 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3394 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3395 cpu_reg.mips_view_base = 0x8000000;
3396
3397 if (CHIP_NUM(bp) == CHIP_NUM_5709) 3499 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3398 fw = &bnx2_tpat_fw_09; 3500 fw = &bnx2_tpat_fw_09;
3399 else 3501 else
3400 fw = &bnx2_tpat_fw_06; 3502 fw = &bnx2_tpat_fw_06;
3401 3503
3402 fw->text = text; 3504 fw->text = text;
3403 rc = load_cpu_fw(bp, &cpu_reg, fw); 3505 rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3404 if (rc) 3506 if (rc)
3405 goto init_cpu_err; 3507 goto init_cpu_err;
3406 3508
3407 /* Initialize the Completion Processor. */ 3509 /* Initialize the Completion Processor. */
3408 cpu_reg.mode = BNX2_COM_CPU_MODE;
3409 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3410 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3411 cpu_reg.state = BNX2_COM_CPU_STATE;
3412 cpu_reg.state_value_clear = 0xffffff;
3413 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3414 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3415 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3416 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3417 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3418 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3419 cpu_reg.mips_view_base = 0x8000000;
3420
3421 if (CHIP_NUM(bp) == CHIP_NUM_5709) 3510 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3422 fw = &bnx2_com_fw_09; 3511 fw = &bnx2_com_fw_09;
3423 else 3512 else
3424 fw = &bnx2_com_fw_06; 3513 fw = &bnx2_com_fw_06;
3425 3514
3426 fw->text = text; 3515 fw->text = text;
3427 rc = load_cpu_fw(bp, &cpu_reg, fw); 3516 rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3428 if (rc) 3517 if (rc)
3429 goto init_cpu_err; 3518 goto init_cpu_err;
3430 3519
3431 /* Initialize the Command Processor. */ 3520 /* Initialize the Command Processor. */
3432 cpu_reg.mode = BNX2_CP_CPU_MODE;
3433 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3434 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3435 cpu_reg.state = BNX2_CP_CPU_STATE;
3436 cpu_reg.state_value_clear = 0xffffff;
3437 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3438 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3439 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3440 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3441 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3442 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3443 cpu_reg.mips_view_base = 0x8000000;
3444
3445 if (CHIP_NUM(bp) == CHIP_NUM_5709) 3521 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3446 fw = &bnx2_cp_fw_09; 3522 fw = &bnx2_cp_fw_09;
3447 else 3523 else
3448 fw = &bnx2_cp_fw_06; 3524 fw = &bnx2_cp_fw_06;
3449 3525
3450 fw->text = text; 3526 fw->text = text;
3451 rc = load_cpu_fw(bp, &cpu_reg, fw); 3527 rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3452 3528
3453init_cpu_err: 3529init_cpu_err:
3454 vfree(text); 3530 vfree(text);
@@ -3511,7 +3587,7 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3511 bp->autoneg = autoneg; 3587 bp->autoneg = autoneg;
3512 bp->advertising = advertising; 3588 bp->advertising = advertising;
3513 3589
3514 bnx2_set_mac_addr(bp); 3590 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3515 3591
3516 val = REG_RD(bp, BNX2_EMAC_MODE); 3592 val = REG_RD(bp, BNX2_EMAC_MODE);
3517 3593
@@ -3562,7 +3638,8 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3562 } 3638 }
3563 3639
3564 if (!(bp->flags & BNX2_FLAG_NO_WOL)) 3640 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3565 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0); 3641 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3642 1, 0);
3566 3643
3567 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 3644 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3568 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || 3645 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
@@ -4203,35 +4280,43 @@ nvram_write_end:
4203} 4280}
4204 4281
4205static void 4282static void
4206bnx2_init_remote_phy(struct bnx2 *bp) 4283bnx2_init_fw_cap(struct bnx2 *bp)
4207{ 4284{
4208 u32 val; 4285 u32 val, sig = 0;
4209 4286
4210 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP; 4287 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4211 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) 4288 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4212 return; 4289
4290 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4291 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4213 4292
4214 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB); 4293 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4215 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE) 4294 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4216 return; 4295 return;
4217 4296
4218 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) { 4297 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4298 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4299 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4300 }
4301
4302 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4303 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4304 u32 link;
4305
4219 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP; 4306 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4220 4307
4221 val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS); 4308 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4222 if (val & BNX2_LINK_STATUS_SERDES_LINK) 4309 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4223 bp->phy_port = PORT_FIBRE; 4310 bp->phy_port = PORT_FIBRE;
4224 else 4311 else
4225 bp->phy_port = PORT_TP; 4312 bp->phy_port = PORT_TP;
4226 4313
4227 if (netif_running(bp->dev)) { 4314 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4228 u32 sig; 4315 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4229
4230 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4231 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4232 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4233 }
4234 } 4316 }
4317
4318 if (netif_running(bp->dev) && sig)
4319 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4235} 4320}
4236 4321
4237static void 4322static void
@@ -4261,7 +4346,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4261 udelay(5); 4346 udelay(5);
4262 4347
4263 /* Wait for the firmware to tell us it is ok to issue a reset. */ 4348 /* Wait for the firmware to tell us it is ok to issue a reset. */
4264 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1); 4349 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4265 4350
4266 /* Deposit a driver reset signature so the firmware knows that 4351 /* Deposit a driver reset signature so the firmware knows that
4267 * this is a soft reset. */ 4352 * this is a soft reset. */
@@ -4322,13 +4407,13 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4322 } 4407 }
4323 4408
4324 /* Wait for the firmware to finish its initialization. */ 4409 /* Wait for the firmware to finish its initialization. */
4325 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0); 4410 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4326 if (rc) 4411 if (rc)
4327 return rc; 4412 return rc;
4328 4413
4329 spin_lock_bh(&bp->phy_lock); 4414 spin_lock_bh(&bp->phy_lock);
4330 old_port = bp->phy_port; 4415 old_port = bp->phy_port;
4331 bnx2_init_remote_phy(bp); 4416 bnx2_init_fw_cap(bp);
4332 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) && 4417 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4333 old_port != bp->phy_port) 4418 old_port != bp->phy_port)
4334 bnx2_set_default_remote_link(bp); 4419 bnx2_set_default_remote_link(bp);
@@ -4412,7 +4497,7 @@ bnx2_init_chip(struct bnx2 *bp)
4412 4497
4413 bnx2_init_nvram(bp); 4498 bnx2_init_nvram(bp);
4414 4499
4415 bnx2_set_mac_addr(bp); 4500 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4416 4501
4417 val = REG_RD(bp, BNX2_MQ_CONFIG); 4502 val = REG_RD(bp, BNX2_MQ_CONFIG);
4418 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; 4503 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
@@ -4498,15 +4583,25 @@ bnx2_init_chip(struct bnx2 *bp)
4498 BNX2_HC_CONFIG_COLLECT_STATS; 4583 BNX2_HC_CONFIG_COLLECT_STATS;
4499 } 4584 }
4500 4585
4501 if (bp->flags & BNX2_FLAG_USING_MSIX) { 4586 if (bp->irq_nvecs > 1) {
4502 u32 base = ((BNX2_TX_VEC - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4503 BNX2_HC_SB_CONFIG_1;
4504
4505 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR, 4587 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4506 BNX2_HC_MSIX_BIT_VECTOR_VAL); 4588 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4507 4589
4590 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4591 }
4592
4593 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4594 val |= BNX2_HC_CONFIG_ONE_SHOT;
4595
4596 REG_WR(bp, BNX2_HC_CONFIG, val);
4597
4598 for (i = 1; i < bp->irq_nvecs; i++) {
4599 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4600 BNX2_HC_SB_CONFIG_1;
4601
4508 REG_WR(bp, base, 4602 REG_WR(bp, base,
4509 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE | 4603 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4604 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4510 BNX2_HC_SB_CONFIG_1_ONE_SHOT); 4605 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4511 4606
4512 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF, 4607 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
@@ -4516,13 +4611,13 @@ bnx2_init_chip(struct bnx2 *bp)
4516 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF, 4611 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4517 (bp->tx_ticks_int << 16) | bp->tx_ticks); 4612 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4518 4613
4519 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B; 4614 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4520 } 4615 (bp->rx_quick_cons_trip_int << 16) |
4616 bp->rx_quick_cons_trip);
4521 4617
4522 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI) 4618 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4523 val |= BNX2_HC_CONFIG_ONE_SHOT; 4619 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4524 4620 }
4525 REG_WR(bp, BNX2_HC_CONFIG, val);
4526 4621
4527 /* Clear internal stats counters. */ 4622 /* Clear internal stats counters. */
4528 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW); 4623 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
@@ -4538,7 +4633,7 @@ bnx2_init_chip(struct bnx2 *bp)
4538 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); 4633 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4539 } 4634 }
4540 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET, 4635 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4541 0); 4636 1, 0);
4542 4637
4543 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT); 4638 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4544 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS); 4639 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
@@ -4554,23 +4649,27 @@ static void
4554bnx2_clear_ring_states(struct bnx2 *bp) 4649bnx2_clear_ring_states(struct bnx2 *bp)
4555{ 4650{
4556 struct bnx2_napi *bnapi; 4651 struct bnx2_napi *bnapi;
4652 struct bnx2_tx_ring_info *txr;
4653 struct bnx2_rx_ring_info *rxr;
4557 int i; 4654 int i;
4558 4655
4559 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { 4656 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4560 bnapi = &bp->bnx2_napi[i]; 4657 bnapi = &bp->bnx2_napi[i];
4658 txr = &bnapi->tx_ring;
4659 rxr = &bnapi->rx_ring;
4561 4660
4562 bnapi->tx_cons = 0; 4661 txr->tx_cons = 0;
4563 bnapi->hw_tx_cons = 0; 4662 txr->hw_tx_cons = 0;
4564 bnapi->rx_prod_bseq = 0; 4663 rxr->rx_prod_bseq = 0;
4565 bnapi->rx_prod = 0; 4664 rxr->rx_prod = 0;
4566 bnapi->rx_cons = 0; 4665 rxr->rx_cons = 0;
4567 bnapi->rx_pg_prod = 0; 4666 rxr->rx_pg_prod = 0;
4568 bnapi->rx_pg_cons = 0; 4667 rxr->rx_pg_cons = 0;
4569 } 4668 }
4570} 4669}
4571 4670
4572static void 4671static void
4573bnx2_init_tx_context(struct bnx2 *bp, u32 cid) 4672bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4574{ 4673{
4575 u32 val, offset0, offset1, offset2, offset3; 4674 u32 val, offset0, offset1, offset2, offset3;
4576 u32 cid_addr = GET_CID_ADDR(cid); 4675 u32 cid_addr = GET_CID_ADDR(cid);
@@ -4592,43 +4691,43 @@ bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4592 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 4691 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4593 bnx2_ctx_wr(bp, cid_addr, offset1, val); 4692 bnx2_ctx_wr(bp, cid_addr, offset1, val);
4594 4693
4595 val = (u64) bp->tx_desc_mapping >> 32; 4694 val = (u64) txr->tx_desc_mapping >> 32;
4596 bnx2_ctx_wr(bp, cid_addr, offset2, val); 4695 bnx2_ctx_wr(bp, cid_addr, offset2, val);
4597 4696
4598 val = (u64) bp->tx_desc_mapping & 0xffffffff; 4697 val = (u64) txr->tx_desc_mapping & 0xffffffff;
4599 bnx2_ctx_wr(bp, cid_addr, offset3, val); 4698 bnx2_ctx_wr(bp, cid_addr, offset3, val);
4600} 4699}
4601 4700
4602static void 4701static void
4603bnx2_init_tx_ring(struct bnx2 *bp) 4702bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4604{ 4703{
4605 struct tx_bd *txbd; 4704 struct tx_bd *txbd;
4606 u32 cid = TX_CID; 4705 u32 cid = TX_CID;
4607 struct bnx2_napi *bnapi; 4706 struct bnx2_napi *bnapi;
4707 struct bnx2_tx_ring_info *txr;
4608 4708
4609 bp->tx_vec = 0; 4709 bnapi = &bp->bnx2_napi[ring_num];
4610 if (bp->flags & BNX2_FLAG_USING_MSIX) { 4710 txr = &bnapi->tx_ring;
4611 cid = TX_TSS_CID; 4711
4612 bp->tx_vec = BNX2_TX_VEC; 4712 if (ring_num == 0)
4613 REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM | 4713 cid = TX_CID;
4614 (TX_TSS_CID << 7)); 4714 else
4615 } 4715 cid = TX_TSS_CID + ring_num - 1;
4616 bnapi = &bp->bnx2_napi[bp->tx_vec];
4617 4716
4618 bp->tx_wake_thresh = bp->tx_ring_size / 2; 4717 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4619 4718
4620 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT]; 4719 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4621 4720
4622 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32; 4721 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4623 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff; 4722 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4624 4723
4625 bp->tx_prod = 0; 4724 txr->tx_prod = 0;
4626 bp->tx_prod_bseq = 0; 4725 txr->tx_prod_bseq = 0;
4627 4726
4628 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX; 4727 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4629 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ; 4728 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4630 4729
4631 bnx2_init_tx_context(bp, cid); 4730 bnx2_init_tx_context(bp, cid, txr);
4632} 4731}
4633 4732
4634static void 4733static void
@@ -4656,17 +4755,25 @@ bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4656} 4755}
4657 4756
4658static void 4757static void
4659bnx2_init_rx_ring(struct bnx2 *bp) 4758bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4660{ 4759{
4661 int i; 4760 int i;
4662 u16 prod, ring_prod; 4761 u16 prod, ring_prod;
4663 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID); 4762 u32 cid, rx_cid_addr, val;
4664 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; 4763 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4764 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4665 4765
4666 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping, 4766 if (ring_num == 0)
4767 cid = RX_CID;
4768 else
4769 cid = RX_RSS_CID + ring_num - 1;
4770
4771 rx_cid_addr = GET_CID_ADDR(cid);
4772
4773 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4667 bp->rx_buf_use_size, bp->rx_max_ring); 4774 bp->rx_buf_use_size, bp->rx_max_ring);
4668 4775
4669 bnx2_init_rx_context0(bp); 4776 bnx2_init_rx_context(bp, cid);
4670 4777
4671 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 4778 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4672 val = REG_RD(bp, BNX2_MQ_MAP_L2_5); 4779 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
@@ -4675,54 +4782,101 @@ bnx2_init_rx_ring(struct bnx2 *bp)
4675 4782
4676 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0); 4783 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4677 if (bp->rx_pg_ring_size) { 4784 if (bp->rx_pg_ring_size) {
4678 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring, 4785 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4679 bp->rx_pg_desc_mapping, 4786 rxr->rx_pg_desc_mapping,
4680 PAGE_SIZE, bp->rx_max_pg_ring); 4787 PAGE_SIZE, bp->rx_max_pg_ring);
4681 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE; 4788 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4682 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val); 4789 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4683 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY, 4790 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4684 BNX2_L2CTX_RBDC_JUMBO_KEY); 4791 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4685 4792
4686 val = (u64) bp->rx_pg_desc_mapping[0] >> 32; 4793 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4687 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val); 4794 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4688 4795
4689 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff; 4796 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4690 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val); 4797 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4691 4798
4692 if (CHIP_NUM(bp) == CHIP_NUM_5709) 4799 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4693 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT); 4800 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4694 } 4801 }
4695 4802
4696 val = (u64) bp->rx_desc_mapping[0] >> 32; 4803 val = (u64) rxr->rx_desc_mapping[0] >> 32;
4697 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); 4804 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4698 4805
4699 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff; 4806 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4700 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); 4807 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4701 4808
4702 ring_prod = prod = bnapi->rx_pg_prod; 4809 ring_prod = prod = rxr->rx_pg_prod;
4703 for (i = 0; i < bp->rx_pg_ring_size; i++) { 4810 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4704 if (bnx2_alloc_rx_page(bp, ring_prod) < 0) 4811 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4705 break; 4812 break;
4706 prod = NEXT_RX_BD(prod); 4813 prod = NEXT_RX_BD(prod);
4707 ring_prod = RX_PG_RING_IDX(prod); 4814 ring_prod = RX_PG_RING_IDX(prod);
4708 } 4815 }
4709 bnapi->rx_pg_prod = prod; 4816 rxr->rx_pg_prod = prod;
4710 4817
4711 ring_prod = prod = bnapi->rx_prod; 4818 ring_prod = prod = rxr->rx_prod;
4712 for (i = 0; i < bp->rx_ring_size; i++) { 4819 for (i = 0; i < bp->rx_ring_size; i++) {
4713 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) { 4820 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4714 break; 4821 break;
4715 }
4716 prod = NEXT_RX_BD(prod); 4822 prod = NEXT_RX_BD(prod);
4717 ring_prod = RX_RING_IDX(prod); 4823 ring_prod = RX_RING_IDX(prod);
4718 } 4824 }
4719 bnapi->rx_prod = prod; 4825 rxr->rx_prod = prod;
4826
4827 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4828 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4829 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4720 4830
4721 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, 4831 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4722 bnapi->rx_pg_prod); 4832 REG_WR16(bp, rxr->rx_bidx_addr, prod);
4723 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4724 4833
4725 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq); 4834 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4835}
4836
4837static void
4838bnx2_init_all_rings(struct bnx2 *bp)
4839{
4840 int i;
4841 u32 val;
4842
4843 bnx2_clear_ring_states(bp);
4844
4845 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4846 for (i = 0; i < bp->num_tx_rings; i++)
4847 bnx2_init_tx_ring(bp, i);
4848
4849 if (bp->num_tx_rings > 1)
4850 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4851 (TX_TSS_CID << 7));
4852
4853 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4854 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4855
4856 for (i = 0; i < bp->num_rx_rings; i++)
4857 bnx2_init_rx_ring(bp, i);
4858
4859 if (bp->num_rx_rings > 1) {
4860 u32 tbl_32;
4861 u8 *tbl = (u8 *) &tbl_32;
4862
4863 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
4864 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
4865
4866 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
4867 tbl[i % 4] = i % (bp->num_rx_rings - 1);
4868 if ((i % 4) == 3)
4869 bnx2_reg_wr_ind(bp,
4870 BNX2_RXP_SCRATCH_RSS_TBL + i,
4871 cpu_to_be32(tbl_32));
4872 }
4873
4874 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
4875 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
4876
4877 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
4878
4879 }
4726} 4880}
4727 4881
4728static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size) 4882static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
@@ -4750,12 +4904,12 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4750 u32 rx_size, rx_space, jumbo_size; 4904 u32 rx_size, rx_space, jumbo_size;
4751 4905
4752 /* 8 for CRC and VLAN */ 4906 /* 8 for CRC and VLAN */
4753 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8; 4907 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4754 4908
4755 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD + 4909 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4756 sizeof(struct skb_shared_info); 4910 sizeof(struct skb_shared_info);
4757 4911
4758 bp->rx_copy_thresh = RX_COPY_THRESH; 4912 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4759 bp->rx_pg_ring_size = 0; 4913 bp->rx_pg_ring_size = 0;
4760 bp->rx_max_pg_ring = 0; 4914 bp->rx_max_pg_ring = 0;
4761 bp->rx_max_pg_ring_idx = 0; 4915 bp->rx_max_pg_ring_idx = 0;
@@ -4770,14 +4924,14 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4770 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size, 4924 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4771 MAX_RX_PG_RINGS); 4925 MAX_RX_PG_RINGS);
4772 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1; 4926 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4773 rx_size = RX_COPY_THRESH + bp->rx_offset; 4927 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
4774 bp->rx_copy_thresh = 0; 4928 bp->rx_copy_thresh = 0;
4775 } 4929 }
4776 4930
4777 bp->rx_buf_use_size = rx_size; 4931 bp->rx_buf_use_size = rx_size;
4778 /* hw alignment */ 4932 /* hw alignment */
4779 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN; 4933 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4780 bp->rx_jumbo_thresh = rx_size - bp->rx_offset; 4934 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
4781 bp->rx_ring_size = size; 4935 bp->rx_ring_size = size;
4782 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS); 4936 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4783 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1; 4937 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
@@ -4788,36 +4942,42 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
4788{ 4942{
4789 int i; 4943 int i;
4790 4944
4791 if (bp->tx_buf_ring == NULL) 4945 for (i = 0; i < bp->num_tx_rings; i++) {
4792 return; 4946 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4793 4947 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
4794 for (i = 0; i < TX_DESC_CNT; ) { 4948 int j;
4795 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4796 struct sk_buff *skb = tx_buf->skb;
4797 int j, last;
4798 4949
4799 if (skb == NULL) { 4950 if (txr->tx_buf_ring == NULL)
4800 i++;
4801 continue; 4951 continue;
4802 }
4803 4952
4804 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), 4953 for (j = 0; j < TX_DESC_CNT; ) {
4954 struct sw_bd *tx_buf = &txr->tx_buf_ring[j];
4955 struct sk_buff *skb = tx_buf->skb;
4956 int k, last;
4957
4958 if (skb == NULL) {
4959 j++;
4960 continue;
4961 }
4962
4963 pci_unmap_single(bp->pdev,
4964 pci_unmap_addr(tx_buf, mapping),
4805 skb_headlen(skb), PCI_DMA_TODEVICE); 4965 skb_headlen(skb), PCI_DMA_TODEVICE);
4806 4966
4807 tx_buf->skb = NULL; 4967 tx_buf->skb = NULL;
4808 4968
4809 last = skb_shinfo(skb)->nr_frags; 4969 last = skb_shinfo(skb)->nr_frags;
4810 for (j = 0; j < last; j++) { 4970 for (k = 0; k < last; k++) {
4811 tx_buf = &bp->tx_buf_ring[i + j + 1]; 4971 tx_buf = &txr->tx_buf_ring[j + k + 1];
4812 pci_unmap_page(bp->pdev, 4972 pci_unmap_page(bp->pdev,
4813 pci_unmap_addr(tx_buf, mapping), 4973 pci_unmap_addr(tx_buf, mapping),
4814 skb_shinfo(skb)->frags[j].size, 4974 skb_shinfo(skb)->frags[j].size,
4815 PCI_DMA_TODEVICE); 4975 PCI_DMA_TODEVICE);
4976 }
4977 dev_kfree_skb(skb);
4978 j += k + 1;
4816 } 4979 }
4817 dev_kfree_skb(skb);
4818 i += j + 1;
4819 } 4980 }
4820
4821} 4981}
4822 4982
4823static void 4983static void
@@ -4825,25 +4985,33 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
4825{ 4985{
4826 int i; 4986 int i;
4827 4987
4828 if (bp->rx_buf_ring == NULL) 4988 for (i = 0; i < bp->num_rx_rings; i++) {
4829 return; 4989 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4990 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4991 int j;
4830 4992
4831 for (i = 0; i < bp->rx_max_ring_idx; i++) { 4993 if (rxr->rx_buf_ring == NULL)
4832 struct sw_bd *rx_buf = &bp->rx_buf_ring[i]; 4994 return;
4833 struct sk_buff *skb = rx_buf->skb;
4834 4995
4835 if (skb == NULL) 4996 for (j = 0; j < bp->rx_max_ring_idx; j++) {
4836 continue; 4997 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
4998 struct sk_buff *skb = rx_buf->skb;
4837 4999
4838 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), 5000 if (skb == NULL)
4839 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); 5001 continue;
4840 5002
4841 rx_buf->skb = NULL; 5003 pci_unmap_single(bp->pdev,
5004 pci_unmap_addr(rx_buf, mapping),
5005 bp->rx_buf_use_size,
5006 PCI_DMA_FROMDEVICE);
4842 5007
4843 dev_kfree_skb(skb); 5008 rx_buf->skb = NULL;
5009
5010 dev_kfree_skb(skb);
5011 }
5012 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5013 bnx2_free_rx_page(bp, rxr, j);
4844 } 5014 }
4845 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4846 bnx2_free_rx_page(bp, i);
4847} 5015}
4848 5016
4849static void 5017static void
@@ -4866,14 +5034,12 @@ bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4866 if ((rc = bnx2_init_chip(bp)) != 0) 5034 if ((rc = bnx2_init_chip(bp)) != 0)
4867 return rc; 5035 return rc;
4868 5036
4869 bnx2_clear_ring_states(bp); 5037 bnx2_init_all_rings(bp);
4870 bnx2_init_tx_ring(bp);
4871 bnx2_init_rx_ring(bp);
4872 return 0; 5038 return 0;
4873} 5039}
4874 5040
4875static int 5041static int
4876bnx2_init_nic(struct bnx2 *bp) 5042bnx2_init_nic(struct bnx2 *bp, int reset_phy)
4877{ 5043{
4878 int rc; 5044 int rc;
4879 5045
@@ -4881,7 +5047,7 @@ bnx2_init_nic(struct bnx2 *bp)
4881 return rc; 5047 return rc;
4882 5048
4883 spin_lock_bh(&bp->phy_lock); 5049 spin_lock_bh(&bp->phy_lock);
4884 bnx2_init_phy(bp); 5050 bnx2_init_phy(bp, reset_phy);
4885 bnx2_set_link(bp); 5051 bnx2_set_link(bp);
4886 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) 5052 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
4887 bnx2_remote_phy_event(bp); 5053 bnx2_remote_phy_event(bp);
@@ -5141,11 +5307,13 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5141 struct l2_fhdr *rx_hdr; 5307 struct l2_fhdr *rx_hdr;
5142 int ret = -ENODEV; 5308 int ret = -ENODEV;
5143 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi; 5309 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5310 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5311 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5144 5312
5145 tx_napi = bnapi; 5313 tx_napi = bnapi;
5146 if (bp->flags & BNX2_FLAG_USING_MSIX)
5147 tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
5148 5314
5315 txr = &tx_napi->tx_ring;
5316 rxr = &bnapi->rx_ring;
5149 if (loopback_mode == BNX2_MAC_LOOPBACK) { 5317 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5150 bp->loopback = MAC_LOOPBACK; 5318 bp->loopback = MAC_LOOPBACK;
5151 bnx2_set_mac_loopback(bp); 5319 bnx2_set_mac_loopback(bp);
@@ -5183,7 +5351,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5183 5351
5184 num_pkts = 0; 5352 num_pkts = 0;
5185 5353
5186 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)]; 5354 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5187 5355
5188 txbd->tx_bd_haddr_hi = (u64) map >> 32; 5356 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5189 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff; 5357 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
@@ -5191,11 +5359,11 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5191 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END; 5359 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5192 5360
5193 num_pkts++; 5361 num_pkts++;
5194 bp->tx_prod = NEXT_TX_BD(bp->tx_prod); 5362 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5195 bp->tx_prod_bseq += pkt_size; 5363 txr->tx_prod_bseq += pkt_size;
5196 5364
5197 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod); 5365 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5198 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq); 5366 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5199 5367
5200 udelay(100); 5368 udelay(100);
5201 5369
@@ -5209,7 +5377,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5209 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE); 5377 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5210 dev_kfree_skb(skb); 5378 dev_kfree_skb(skb);
5211 5379
5212 if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod) 5380 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5213 goto loopback_test_done; 5381 goto loopback_test_done;
5214 5382
5215 rx_idx = bnx2_get_hw_rx_cons(bnapi); 5383 rx_idx = bnx2_get_hw_rx_cons(bnapi);
@@ -5217,11 +5385,11 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5217 goto loopback_test_done; 5385 goto loopback_test_done;
5218 } 5386 }
5219 5387
5220 rx_buf = &bp->rx_buf_ring[rx_start_idx]; 5388 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5221 rx_skb = rx_buf->skb; 5389 rx_skb = rx_buf->skb;
5222 5390
5223 rx_hdr = (struct l2_fhdr *) rx_skb->data; 5391 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5224 skb_reserve(rx_skb, bp->rx_offset); 5392 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5225 5393
5226 pci_dma_sync_single_for_cpu(bp->pdev, 5394 pci_dma_sync_single_for_cpu(bp->pdev,
5227 pci_unmap_addr(rx_buf, mapping), 5395 pci_unmap_addr(rx_buf, mapping),
@@ -5269,7 +5437,7 @@ bnx2_test_loopback(struct bnx2 *bp)
5269 5437
5270 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); 5438 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5271 spin_lock_bh(&bp->phy_lock); 5439 spin_lock_bh(&bp->phy_lock);
5272 bnx2_init_phy(bp); 5440 bnx2_init_phy(bp, 1);
5273 spin_unlock_bh(&bp->phy_lock); 5441 spin_unlock_bh(&bp->phy_lock);
5274 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK)) 5442 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5275 rc |= BNX2_MAC_LOOPBACK_FAILED; 5443 rc |= BNX2_MAC_LOOPBACK_FAILED;
@@ -5531,7 +5699,6 @@ bnx2_restart_timer:
5531static int 5699static int
5532bnx2_request_irq(struct bnx2 *bp) 5700bnx2_request_irq(struct bnx2 *bp)
5533{ 5701{
5534 struct net_device *dev = bp->dev;
5535 unsigned long flags; 5702 unsigned long flags;
5536 struct bnx2_irq *irq; 5703 struct bnx2_irq *irq;
5537 int rc = 0, i; 5704 int rc = 0, i;
@@ -5544,7 +5711,7 @@ bnx2_request_irq(struct bnx2 *bp)
5544 for (i = 0; i < bp->irq_nvecs; i++) { 5711 for (i = 0; i < bp->irq_nvecs; i++) {
5545 irq = &bp->irq_tbl[i]; 5712 irq = &bp->irq_tbl[i];
5546 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 5713 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5547 dev); 5714 &bp->bnx2_napi[i]);
5548 if (rc) 5715 if (rc)
5549 break; 5716 break;
5550 irq->requested = 1; 5717 irq->requested = 1;
@@ -5555,14 +5722,13 @@ bnx2_request_irq(struct bnx2 *bp)
5555static void 5722static void
5556bnx2_free_irq(struct bnx2 *bp) 5723bnx2_free_irq(struct bnx2 *bp)
5557{ 5724{
5558 struct net_device *dev = bp->dev;
5559 struct bnx2_irq *irq; 5725 struct bnx2_irq *irq;
5560 int i; 5726 int i;
5561 5727
5562 for (i = 0; i < bp->irq_nvecs; i++) { 5728 for (i = 0; i < bp->irq_nvecs; i++) {
5563 irq = &bp->irq_tbl[i]; 5729 irq = &bp->irq_tbl[i];
5564 if (irq->requested) 5730 if (irq->requested)
5565 free_irq(irq->vector, dev); 5731 free_irq(irq->vector, &bp->bnx2_napi[i]);
5566 irq->requested = 0; 5732 irq->requested = 0;
5567 } 5733 }
5568 if (bp->flags & BNX2_FLAG_USING_MSI) 5734 if (bp->flags & BNX2_FLAG_USING_MSI)
@@ -5574,7 +5740,7 @@ bnx2_free_irq(struct bnx2 *bp)
5574} 5740}
5575 5741
5576static void 5742static void
5577bnx2_enable_msix(struct bnx2 *bp) 5743bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5578{ 5744{
5579 int i, rc; 5745 int i, rc;
5580 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC]; 5746 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
@@ -5587,21 +5753,16 @@ bnx2_enable_msix(struct bnx2 *bp)
5587 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { 5753 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5588 msix_ent[i].entry = i; 5754 msix_ent[i].entry = i;
5589 msix_ent[i].vector = 0; 5755 msix_ent[i].vector = 0;
5756
5757 strcpy(bp->irq_tbl[i].name, bp->dev->name);
5758 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5590 } 5759 }
5591 5760
5592 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC); 5761 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5593 if (rc != 0) 5762 if (rc != 0)
5594 return; 5763 return;
5595 5764
5596 bp->irq_tbl[BNX2_BASE_VEC].handler = bnx2_msi_1shot; 5765 bp->irq_nvecs = msix_vecs;
5597 bp->irq_tbl[BNX2_TX_VEC].handler = bnx2_tx_msix;
5598
5599 strcpy(bp->irq_tbl[BNX2_BASE_VEC].name, bp->dev->name);
5600 strcat(bp->irq_tbl[BNX2_BASE_VEC].name, "-base");
5601 strcpy(bp->irq_tbl[BNX2_TX_VEC].name, bp->dev->name);
5602 strcat(bp->irq_tbl[BNX2_TX_VEC].name, "-tx");
5603
5604 bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5605 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI; 5766 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5606 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) 5767 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5607 bp->irq_tbl[i].vector = msix_ent[i].vector; 5768 bp->irq_tbl[i].vector = msix_ent[i].vector;
@@ -5610,13 +5771,16 @@ bnx2_enable_msix(struct bnx2 *bp)
5610static void 5771static void
5611bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi) 5772bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5612{ 5773{
5774 int cpus = num_online_cpus();
5775 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5776
5613 bp->irq_tbl[0].handler = bnx2_interrupt; 5777 bp->irq_tbl[0].handler = bnx2_interrupt;
5614 strcpy(bp->irq_tbl[0].name, bp->dev->name); 5778 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5615 bp->irq_nvecs = 1; 5779 bp->irq_nvecs = 1;
5616 bp->irq_tbl[0].vector = bp->pdev->irq; 5780 bp->irq_tbl[0].vector = bp->pdev->irq;
5617 5781
5618 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi) 5782 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5619 bnx2_enable_msix(bp); 5783 bnx2_enable_msix(bp, msix_vecs);
5620 5784
5621 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi && 5785 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5622 !(bp->flags & BNX2_FLAG_USING_MSIX)) { 5786 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
@@ -5631,6 +5795,11 @@ bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5631 bp->irq_tbl[0].vector = bp->pdev->irq; 5795 bp->irq_tbl[0].vector = bp->pdev->irq;
5632 } 5796 }
5633 } 5797 }
5798
5799 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
5800 bp->dev->real_num_tx_queues = bp->num_tx_rings;
5801
5802 bp->num_rx_rings = bp->irq_nvecs;
5634} 5803}
5635 5804
5636/* Called with rtnl_lock */ 5805/* Called with rtnl_lock */
@@ -5645,29 +5814,19 @@ bnx2_open(struct net_device *dev)
5645 bnx2_set_power_state(bp, PCI_D0); 5814 bnx2_set_power_state(bp, PCI_D0);
5646 bnx2_disable_int(bp); 5815 bnx2_disable_int(bp);
5647 5816
5817 bnx2_setup_int_mode(bp, disable_msi);
5818 bnx2_napi_enable(bp);
5648 rc = bnx2_alloc_mem(bp); 5819 rc = bnx2_alloc_mem(bp);
5649 if (rc) 5820 if (rc)
5650 return rc; 5821 goto open_err;
5651 5822
5652 bnx2_setup_int_mode(bp, disable_msi);
5653 bnx2_napi_enable(bp);
5654 rc = bnx2_request_irq(bp); 5823 rc = bnx2_request_irq(bp);
5824 if (rc)
5825 goto open_err;
5655 5826
5656 if (rc) { 5827 rc = bnx2_init_nic(bp, 1);
5657 bnx2_napi_disable(bp); 5828 if (rc)
5658 bnx2_free_mem(bp); 5829 goto open_err;
5659 return rc;
5660 }
5661
5662 rc = bnx2_init_nic(bp);
5663
5664 if (rc) {
5665 bnx2_napi_disable(bp);
5666 bnx2_free_irq(bp);
5667 bnx2_free_skbs(bp);
5668 bnx2_free_mem(bp);
5669 return rc;
5670 }
5671 5830
5672 mod_timer(&bp->timer, jiffies + bp->current_interval); 5831 mod_timer(&bp->timer, jiffies + bp->current_interval);
5673 5832
@@ -5691,17 +5850,14 @@ bnx2_open(struct net_device *dev)
5691 5850
5692 bnx2_setup_int_mode(bp, 1); 5851 bnx2_setup_int_mode(bp, 1);
5693 5852
5694 rc = bnx2_init_nic(bp); 5853 rc = bnx2_init_nic(bp, 0);
5695 5854
5696 if (!rc) 5855 if (!rc)
5697 rc = bnx2_request_irq(bp); 5856 rc = bnx2_request_irq(bp);
5698 5857
5699 if (rc) { 5858 if (rc) {
5700 bnx2_napi_disable(bp);
5701 bnx2_free_skbs(bp);
5702 bnx2_free_mem(bp);
5703 del_timer_sync(&bp->timer); 5859 del_timer_sync(&bp->timer);
5704 return rc; 5860 goto open_err;
5705 } 5861 }
5706 bnx2_enable_int(bp); 5862 bnx2_enable_int(bp);
5707 } 5863 }
@@ -5711,9 +5867,16 @@ bnx2_open(struct net_device *dev)
5711 else if (bp->flags & BNX2_FLAG_USING_MSIX) 5867 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5712 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name); 5868 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5713 5869
5714 netif_start_queue(dev); 5870 netif_tx_start_all_queues(dev);
5715 5871
5716 return 0; 5872 return 0;
5873
5874open_err:
5875 bnx2_napi_disable(bp);
5876 bnx2_free_skbs(bp);
5877 bnx2_free_irq(bp);
5878 bnx2_free_mem(bp);
5879 return rc;
5717} 5880}
5718 5881
5719static void 5882static void
@@ -5726,7 +5889,7 @@ bnx2_reset_task(struct work_struct *work)
5726 5889
5727 bnx2_netif_stop(bp); 5890 bnx2_netif_stop(bp);
5728 5891
5729 bnx2_init_nic(bp); 5892 bnx2_init_nic(bp, 1);
5730 5893
5731 atomic_set(&bp->intr_sem, 1); 5894 atomic_set(&bp->intr_sem, 1);
5732 bnx2_netif_start(bp); 5895 bnx2_netif_start(bp);
@@ -5752,6 +5915,8 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5752 5915
5753 bp->vlgrp = vlgrp; 5916 bp->vlgrp = vlgrp;
5754 bnx2_set_rx_mode(dev); 5917 bnx2_set_rx_mode(dev);
5918 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
5919 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
5755 5920
5756 bnx2_netif_start(bp); 5921 bnx2_netif_start(bp);
5757} 5922}
@@ -5771,18 +5936,26 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5771 u32 len, vlan_tag_flags, last_frag, mss; 5936 u32 len, vlan_tag_flags, last_frag, mss;
5772 u16 prod, ring_prod; 5937 u16 prod, ring_prod;
5773 int i; 5938 int i;
5774 struct bnx2_napi *bnapi = &bp->bnx2_napi[bp->tx_vec]; 5939 struct bnx2_napi *bnapi;
5940 struct bnx2_tx_ring_info *txr;
5941 struct netdev_queue *txq;
5775 5942
5776 if (unlikely(bnx2_tx_avail(bp, bnapi) < 5943 /* Determine which tx ring we will be placed on */
5944 i = skb_get_queue_mapping(skb);
5945 bnapi = &bp->bnx2_napi[i];
5946 txr = &bnapi->tx_ring;
5947 txq = netdev_get_tx_queue(dev, i);
5948
5949 if (unlikely(bnx2_tx_avail(bp, txr) <
5777 (skb_shinfo(skb)->nr_frags + 1))) { 5950 (skb_shinfo(skb)->nr_frags + 1))) {
5778 netif_stop_queue(dev); 5951 netif_tx_stop_queue(txq);
5779 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n", 5952 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5780 dev->name); 5953 dev->name);
5781 5954
5782 return NETDEV_TX_BUSY; 5955 return NETDEV_TX_BUSY;
5783 } 5956 }
5784 len = skb_headlen(skb); 5957 len = skb_headlen(skb);
5785 prod = bp->tx_prod; 5958 prod = txr->tx_prod;
5786 ring_prod = TX_RING_IDX(prod); 5959 ring_prod = TX_RING_IDX(prod);
5787 5960
5788 vlan_tag_flags = 0; 5961 vlan_tag_flags = 0;
@@ -5844,11 +6017,11 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5844 6017
5845 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); 6018 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5846 6019
5847 tx_buf = &bp->tx_buf_ring[ring_prod]; 6020 tx_buf = &txr->tx_buf_ring[ring_prod];
5848 tx_buf->skb = skb; 6021 tx_buf->skb = skb;
5849 pci_unmap_addr_set(tx_buf, mapping, mapping); 6022 pci_unmap_addr_set(tx_buf, mapping, mapping);
5850 6023
5851 txbd = &bp->tx_desc_ring[ring_prod]; 6024 txbd = &txr->tx_desc_ring[ring_prod];
5852 6025
5853 txbd->tx_bd_haddr_hi = (u64) mapping >> 32; 6026 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5854 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; 6027 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -5862,12 +6035,12 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5862 6035
5863 prod = NEXT_TX_BD(prod); 6036 prod = NEXT_TX_BD(prod);
5864 ring_prod = TX_RING_IDX(prod); 6037 ring_prod = TX_RING_IDX(prod);
5865 txbd = &bp->tx_desc_ring[ring_prod]; 6038 txbd = &txr->tx_desc_ring[ring_prod];
5866 6039
5867 len = frag->size; 6040 len = frag->size;
5868 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset, 6041 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5869 len, PCI_DMA_TODEVICE); 6042 len, PCI_DMA_TODEVICE);
5870 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod], 6043 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod],
5871 mapping, mapping); 6044 mapping, mapping);
5872 6045
5873 txbd->tx_bd_haddr_hi = (u64) mapping >> 32; 6046 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
@@ -5879,20 +6052,20 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5879 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END; 6052 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5880 6053
5881 prod = NEXT_TX_BD(prod); 6054 prod = NEXT_TX_BD(prod);
5882 bp->tx_prod_bseq += skb->len; 6055 txr->tx_prod_bseq += skb->len;
5883 6056
5884 REG_WR16(bp, bp->tx_bidx_addr, prod); 6057 REG_WR16(bp, txr->tx_bidx_addr, prod);
5885 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq); 6058 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5886 6059
5887 mmiowb(); 6060 mmiowb();
5888 6061
5889 bp->tx_prod = prod; 6062 txr->tx_prod = prod;
5890 dev->trans_start = jiffies; 6063 dev->trans_start = jiffies;
5891 6064
5892 if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) { 6065 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
5893 netif_stop_queue(dev); 6066 netif_tx_stop_queue(txq);
5894 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh) 6067 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
5895 netif_wake_queue(dev); 6068 netif_tx_wake_queue(txq);
5896 } 6069 }
5897 6070
5898 return NETDEV_TX_OK; 6071 return NETDEV_TX_OK;
@@ -6095,6 +6268,12 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6095 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)) 6268 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6096 goto err_out_unlock; 6269 goto err_out_unlock;
6097 6270
6271 /* If device is down, we can store the settings only if the user
6272 * is setting the currently active port.
6273 */
6274 if (!netif_running(dev) && cmd->port != bp->phy_port)
6275 goto err_out_unlock;
6276
6098 if (cmd->autoneg == AUTONEG_ENABLE) { 6277 if (cmd->autoneg == AUTONEG_ENABLE) {
6099 autoneg |= AUTONEG_SPEED; 6278 autoneg |= AUTONEG_SPEED;
6100 6279
@@ -6152,7 +6331,12 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6152 bp->req_line_speed = req_line_speed; 6331 bp->req_line_speed = req_line_speed;
6153 bp->req_duplex = req_duplex; 6332 bp->req_duplex = req_duplex;
6154 6333
6155 err = bnx2_setup_phy(bp, cmd->port); 6334 err = 0;
6335 /* If device is down, the new settings will be picked up when it is
6336 * brought up.
6337 */
6338 if (netif_running(dev))
6339 err = bnx2_setup_phy(bp, cmd->port);
6156 6340
6157err_out_unlock: 6341err_out_unlock:
6158 spin_unlock_bh(&bp->phy_lock); 6342 spin_unlock_bh(&bp->phy_lock);
@@ -6414,7 +6598,7 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6414 6598
6415 if (netif_running(bp->dev)) { 6599 if (netif_running(bp->dev)) {
6416 bnx2_netif_stop(bp); 6600 bnx2_netif_stop(bp);
6417 bnx2_init_nic(bp); 6601 bnx2_init_nic(bp, 0);
6418 bnx2_netif_start(bp); 6602 bnx2_netif_start(bp);
6419 } 6603 }
6420 6604
@@ -6457,7 +6641,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6457 rc = bnx2_alloc_mem(bp); 6641 rc = bnx2_alloc_mem(bp);
6458 if (rc) 6642 if (rc)
6459 return rc; 6643 return rc;
6460 bnx2_init_nic(bp); 6644 bnx2_init_nic(bp, 0);
6461 bnx2_netif_start(bp); 6645 bnx2_netif_start(bp);
6462 } 6646 }
6463 return 0; 6647 return 0;
@@ -6725,7 +6909,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6725 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); 6909 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6726 } 6910 }
6727 else { 6911 else {
6728 bnx2_init_nic(bp); 6912 bnx2_init_nic(bp, 1);
6729 bnx2_netif_start(bp); 6913 bnx2_netif_start(bp);
6730 } 6914 }
6731 6915
@@ -6951,7 +7135,7 @@ bnx2_change_mac_addr(struct net_device *dev, void *p)
6951 7135
6952 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 7136 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6953 if (netif_running(dev)) 7137 if (netif_running(dev))
6954 bnx2_set_mac_addr(bp); 7138 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
6955 7139
6956 return 0; 7140 return 0;
6957} 7141}
@@ -7108,6 +7292,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7108 } 7292 }
7109 7293
7110 pci_set_master(pdev); 7294 pci_set_master(pdev);
7295 pci_save_state(pdev);
7111 7296
7112 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 7297 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7113 if (bp->pm_cap == 0) { 7298 if (bp->pm_cap == 0) {
@@ -7125,7 +7310,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7125 INIT_WORK(&bp->reset_task, bnx2_reset_task); 7310 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7126 7311
7127 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); 7312 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7128 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1); 7313 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
7129 dev->mem_end = dev->mem_start + mem_len; 7314 dev->mem_end = dev->mem_start + mem_len;
7130 dev->irq = pdev->irq; 7315 dev->irq = pdev->irq;
7131 7316
@@ -7272,7 +7457,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7272 reg &= BNX2_CONDITION_MFW_RUN_MASK; 7457 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7273 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN && 7458 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7274 reg != BNX2_CONDITION_MFW_RUN_NONE) { 7459 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7275 int i;
7276 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR); 7460 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7277 7461
7278 bp->fw_version[j++] = ' '; 7462 bp->fw_version[j++] = ' ';
@@ -7294,8 +7478,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7294 bp->mac_addr[4] = (u8) (reg >> 8); 7478 bp->mac_addr[4] = (u8) (reg >> 8);
7295 bp->mac_addr[5] = (u8) reg; 7479 bp->mac_addr[5] = (u8) reg;
7296 7480
7297 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
7298
7299 bp->tx_ring_size = MAX_TX_DESC_CNT; 7481 bp->tx_ring_size = MAX_TX_DESC_CNT;
7300 bnx2_set_rx_ring_size(bp, 255); 7482 bnx2_set_rx_ring_size(bp, 255);
7301 7483
@@ -7345,8 +7527,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7345 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G) 7527 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7346 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE; 7528 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7347 } 7529 }
7348 bnx2_init_remote_phy(bp);
7349
7350 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 || 7530 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7351 CHIP_NUM(bp) == CHIP_NUM_5708) 7531 CHIP_NUM(bp) == CHIP_NUM_5708)
7352 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX; 7532 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
@@ -7355,6 +7535,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7355 CHIP_REV(bp) == CHIP_REV_Bx)) 7535 CHIP_REV(bp) == CHIP_REV_Bx))
7356 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC; 7536 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7357 7537
7538 bnx2_init_fw_cap(bp);
7539
7358 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || 7540 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7359 (CHIP_ID(bp) == CHIP_ID_5708_B0) || 7541 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7360 (CHIP_ID(bp) == CHIP_ID_5708_B1)) { 7542 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
@@ -7451,15 +7633,19 @@ static void __devinit
7451bnx2_init_napi(struct bnx2 *bp) 7633bnx2_init_napi(struct bnx2 *bp)
7452{ 7634{
7453 int i; 7635 int i;
7454 struct bnx2_napi *bnapi;
7455 7636
7456 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { 7637 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7457 bnapi = &bp->bnx2_napi[i]; 7638 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7639 int (*poll)(struct napi_struct *, int);
7640
7641 if (i == 0)
7642 poll = bnx2_poll;
7643 else
7644 poll = bnx2_poll_msix;
7645
7646 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7458 bnapi->bp = bp; 7647 bnapi->bp = bp;
7459 } 7648 }
7460 netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
7461 netif_napi_add(bp->dev, &bp->bnx2_napi[BNX2_TX_VEC].napi, bnx2_tx_poll,
7462 64);
7463} 7649}
7464 7650
7465static int __devinit 7651static int __devinit
@@ -7476,7 +7662,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7476 printk(KERN_INFO "%s", version); 7662 printk(KERN_INFO "%s", version);
7477 7663
7478 /* dev zeroed in init_etherdev */ 7664 /* dev zeroed in init_etherdev */
7479 dev = alloc_etherdev(sizeof(*bp)); 7665 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
7480 7666
7481 if (!dev) 7667 if (!dev)
7482 return -ENOMEM; 7668 return -ENOMEM;
@@ -7491,7 +7677,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7491 dev->hard_start_xmit = bnx2_start_xmit; 7677 dev->hard_start_xmit = bnx2_start_xmit;
7492 dev->stop = bnx2_close; 7678 dev->stop = bnx2_close;
7493 dev->get_stats = bnx2_get_stats; 7679 dev->get_stats = bnx2_get_stats;
7494 dev->set_multicast_list = bnx2_set_rx_mode; 7680 dev->set_rx_mode = bnx2_set_rx_mode;
7495 dev->do_ioctl = bnx2_ioctl; 7681 dev->do_ioctl = bnx2_ioctl;
7496 dev->set_mac_address = bnx2_change_mac_addr; 7682 dev->set_mac_address = bnx2_change_mac_addr;
7497 dev->change_mtu = bnx2_change_mtu; 7683 dev->change_mtu = bnx2_change_mtu;
@@ -7612,11 +7798,97 @@ bnx2_resume(struct pci_dev *pdev)
7612 7798
7613 bnx2_set_power_state(bp, PCI_D0); 7799 bnx2_set_power_state(bp, PCI_D0);
7614 netif_device_attach(dev); 7800 netif_device_attach(dev);
7615 bnx2_init_nic(bp); 7801 bnx2_init_nic(bp, 1);
7616 bnx2_netif_start(bp); 7802 bnx2_netif_start(bp);
7617 return 0; 7803 return 0;
7618} 7804}
7619 7805
7806/**
7807 * bnx2_io_error_detected - called when PCI error is detected
7808 * @pdev: Pointer to PCI device
7809 * @state: The current pci connection state
7810 *
7811 * This function is called after a PCI bus error affecting
7812 * this device has been detected.
7813 */
7814static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7815 pci_channel_state_t state)
7816{
7817 struct net_device *dev = pci_get_drvdata(pdev);
7818 struct bnx2 *bp = netdev_priv(dev);
7819
7820 rtnl_lock();
7821 netif_device_detach(dev);
7822
7823 if (netif_running(dev)) {
7824 bnx2_netif_stop(bp);
7825 del_timer_sync(&bp->timer);
7826 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7827 }
7828
7829 pci_disable_device(pdev);
7830 rtnl_unlock();
7831
7832 /* Request a slot slot reset. */
7833 return PCI_ERS_RESULT_NEED_RESET;
7834}
7835
7836/**
7837 * bnx2_io_slot_reset - called after the pci bus has been reset.
7838 * @pdev: Pointer to PCI device
7839 *
7840 * Restart the card from scratch, as if from a cold-boot.
7841 */
7842static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7843{
7844 struct net_device *dev = pci_get_drvdata(pdev);
7845 struct bnx2 *bp = netdev_priv(dev);
7846
7847 rtnl_lock();
7848 if (pci_enable_device(pdev)) {
7849 dev_err(&pdev->dev,
7850 "Cannot re-enable PCI device after reset.\n");
7851 rtnl_unlock();
7852 return PCI_ERS_RESULT_DISCONNECT;
7853 }
7854 pci_set_master(pdev);
7855 pci_restore_state(pdev);
7856
7857 if (netif_running(dev)) {
7858 bnx2_set_power_state(bp, PCI_D0);
7859 bnx2_init_nic(bp, 1);
7860 }
7861
7862 rtnl_unlock();
7863 return PCI_ERS_RESULT_RECOVERED;
7864}
7865
7866/**
7867 * bnx2_io_resume - called when traffic can start flowing again.
7868 * @pdev: Pointer to PCI device
7869 *
7870 * This callback is called when the error recovery driver tells us that
7871 * its OK to resume normal operation.
7872 */
7873static void bnx2_io_resume(struct pci_dev *pdev)
7874{
7875 struct net_device *dev = pci_get_drvdata(pdev);
7876 struct bnx2 *bp = netdev_priv(dev);
7877
7878 rtnl_lock();
7879 if (netif_running(dev))
7880 bnx2_netif_start(bp);
7881
7882 netif_device_attach(dev);
7883 rtnl_unlock();
7884}
7885
7886static struct pci_error_handlers bnx2_err_handler = {
7887 .error_detected = bnx2_io_error_detected,
7888 .slot_reset = bnx2_io_slot_reset,
7889 .resume = bnx2_io_resume,
7890};
7891
7620static struct pci_driver bnx2_pci_driver = { 7892static struct pci_driver bnx2_pci_driver = {
7621 .name = DRV_MODULE_NAME, 7893 .name = DRV_MODULE_NAME,
7622 .id_table = bnx2_pci_tbl, 7894 .id_table = bnx2_pci_tbl,
@@ -7624,6 +7896,7 @@ static struct pci_driver bnx2_pci_driver = {
7624 .remove = __devexit_p(bnx2_remove_one), 7896 .remove = __devexit_p(bnx2_remove_one),
7625 .suspend = bnx2_suspend, 7897 .suspend = bnx2_suspend,
7626 .resume = bnx2_resume, 7898 .resume = bnx2_resume,
7899 .err_handler = &bnx2_err_handler,
7627}; 7900};
7628 7901
7629static int __init bnx2_init(void) 7902static int __init bnx2_init(void)