aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2.c
diff options
context:
space:
mode:
authorMichael Chan <mchan@broadcom.com>2008-06-19 19:38:19 -0400
committerDavid S. Miller <davem@davemloft.net>2008-06-19 19:38:19 -0400
commitbb4f98abf590cf9899017f14f1a54984f02a0009 (patch)
tree602da9b7cab22a16f2f1fd77f5db51a3de44b085 /drivers/net/bnx2.c
parent35e9010b22503f42cbf88144ffe1feff90ea3835 (diff)
bnx2: Put rx ring variables in a separate struct.
In preparation for multi-ring support, rx ring variables are now put in a separate bnx2_rx_ring_info struct. With MSI-X, we can support multiple rx rings. The functions to allocate/free rx memory and to initialize rx rings are now modified to handle multiple rings. Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: Benjamin Li <benli@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2.c')
-rw-r--r--drivers/net/bnx2.c390
1 files changed, 235 insertions, 155 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 61f2b4fc4275..4360528ded39 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -515,6 +515,40 @@ bnx2_free_tx_mem(struct bnx2 *bp)
515 } 515 }
516} 516}
517 517
518static void
519bnx2_free_rx_mem(struct bnx2 *bp)
520{
521 int i;
522
523 for (i = 0; i < bp->num_rx_rings; i++) {
524 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
525 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
526 int j;
527
528 for (j = 0; j < bp->rx_max_ring; j++) {
529 if (rxr->rx_desc_ring[j])
530 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
531 rxr->rx_desc_ring[j],
532 rxr->rx_desc_mapping[j]);
533 rxr->rx_desc_ring[j] = NULL;
534 }
535 if (rxr->rx_buf_ring)
536 vfree(rxr->rx_buf_ring);
537 rxr->rx_buf_ring = NULL;
538
539 for (j = 0; j < bp->rx_max_pg_ring; j++) {
540 if (rxr->rx_pg_desc_ring[j])
541 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
542 rxr->rx_pg_desc_ring[i],
543 rxr->rx_pg_desc_mapping[i]);
544 rxr->rx_pg_desc_ring[i] = NULL;
545 }
546 if (rxr->rx_pg_ring)
547 vfree(rxr->rx_pg_ring);
548 rxr->rx_pg_ring = NULL;
549 }
550}
551
518static int 552static int
519bnx2_alloc_tx_mem(struct bnx2 *bp) 553bnx2_alloc_tx_mem(struct bnx2 *bp)
520{ 554{
@@ -537,12 +571,62 @@ bnx2_alloc_tx_mem(struct bnx2 *bp)
537 return 0; 571 return 0;
538} 572}
539 573
574static int
575bnx2_alloc_rx_mem(struct bnx2 *bp)
576{
577 int i;
578
579 for (i = 0; i < bp->num_rx_rings; i++) {
580 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
581 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
582 int j;
583
584 rxr->rx_buf_ring =
585 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
586 if (rxr->rx_buf_ring == NULL)
587 return -ENOMEM;
588
589 memset(rxr->rx_buf_ring, 0,
590 SW_RXBD_RING_SIZE * bp->rx_max_ring);
591
592 for (j = 0; j < bp->rx_max_ring; j++) {
593 rxr->rx_desc_ring[j] =
594 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
595 &rxr->rx_desc_mapping[j]);
596 if (rxr->rx_desc_ring[j] == NULL)
597 return -ENOMEM;
598
599 }
600
601 if (bp->rx_pg_ring_size) {
602 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
603 bp->rx_max_pg_ring);
604 if (rxr->rx_pg_ring == NULL)
605 return -ENOMEM;
606
607 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
608 bp->rx_max_pg_ring);
609 }
610
611 for (j = 0; j < bp->rx_max_pg_ring; j++) {
612 rxr->rx_pg_desc_ring[j] =
613 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
614 &rxr->rx_pg_desc_mapping[j]);
615 if (rxr->rx_pg_desc_ring[j] == NULL)
616 return -ENOMEM;
617
618 }
619 }
620 return 0;
621}
622
540static void 623static void
541bnx2_free_mem(struct bnx2 *bp) 624bnx2_free_mem(struct bnx2 *bp)
542{ 625{
543 int i; 626 int i;
544 627
545 bnx2_free_tx_mem(bp); 628 bnx2_free_tx_mem(bp);
629 bnx2_free_rx_mem(bp);
546 630
547 for (i = 0; i < bp->ctx_pages; i++) { 631 for (i = 0; i < bp->ctx_pages; i++) {
548 if (bp->ctx_blk[i]) { 632 if (bp->ctx_blk[i]) {
@@ -558,25 +642,6 @@ bnx2_free_mem(struct bnx2 *bp)
558 bp->status_blk = NULL; 642 bp->status_blk = NULL;
559 bp->stats_blk = NULL; 643 bp->stats_blk = NULL;
560 } 644 }
561 for (i = 0; i < bp->rx_max_ring; i++) {
562 if (bp->rx_desc_ring[i])
563 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
564 bp->rx_desc_ring[i],
565 bp->rx_desc_mapping[i]);
566 bp->rx_desc_ring[i] = NULL;
567 }
568 vfree(bp->rx_buf_ring);
569 bp->rx_buf_ring = NULL;
570 for (i = 0; i < bp->rx_max_pg_ring; i++) {
571 if (bp->rx_pg_desc_ring[i])
572 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
573 bp->rx_pg_desc_ring[i],
574 bp->rx_pg_desc_mapping[i]);
575 bp->rx_pg_desc_ring[i] = NULL;
576 }
577 if (bp->rx_pg_ring)
578 vfree(bp->rx_pg_ring);
579 bp->rx_pg_ring = NULL;
580} 645}
581 646
582static int 647static int
@@ -584,40 +649,6 @@ bnx2_alloc_mem(struct bnx2 *bp)
584{ 649{
585 int i, status_blk_size, err; 650 int i, status_blk_size, err;
586 651
587 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
588 if (bp->rx_buf_ring == NULL)
589 goto alloc_mem_err;
590
591 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
592
593 for (i = 0; i < bp->rx_max_ring; i++) {
594 bp->rx_desc_ring[i] =
595 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
596 &bp->rx_desc_mapping[i]);
597 if (bp->rx_desc_ring[i] == NULL)
598 goto alloc_mem_err;
599
600 }
601
602 if (bp->rx_pg_ring_size) {
603 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
604 bp->rx_max_pg_ring);
605 if (bp->rx_pg_ring == NULL)
606 goto alloc_mem_err;
607
608 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
609 bp->rx_max_pg_ring);
610 }
611
612 for (i = 0; i < bp->rx_max_pg_ring; i++) {
613 bp->rx_pg_desc_ring[i] =
614 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
615 &bp->rx_pg_desc_mapping[i]);
616 if (bp->rx_pg_desc_ring[i] == NULL)
617 goto alloc_mem_err;
618
619 }
620
621 /* Combine status and statistics blocks into one allocation. */ 652 /* Combine status and statistics blocks into one allocation. */
622 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block)); 653 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
623 if (bp->flags & BNX2_FLAG_MSIX_CAP) 654 if (bp->flags & BNX2_FLAG_MSIX_CAP)
@@ -663,6 +694,10 @@ bnx2_alloc_mem(struct bnx2 *bp)
663 } 694 }
664 } 695 }
665 696
697 err = bnx2_alloc_rx_mem(bp);
698 if (err)
699 goto alloc_mem_err;
700
666 err = bnx2_alloc_tx_mem(bp); 701 err = bnx2_alloc_tx_mem(bp);
667 if (err) 702 if (err)
668 goto alloc_mem_err; 703 goto alloc_mem_err;
@@ -1026,9 +1061,9 @@ bnx2_copper_linkup(struct bnx2 *bp)
1026} 1061}
1027 1062
1028static void 1063static void
1029bnx2_init_rx_context0(struct bnx2 *bp) 1064bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1030{ 1065{
1031 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID); 1066 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1032 1067
1033 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE; 1068 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1034 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2; 1069 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
@@ -1061,6 +1096,19 @@ bnx2_init_rx_context0(struct bnx2 *bp)
1061 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val); 1096 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1062} 1097}
1063 1098
1099static void
1100bnx2_init_all_rx_contexts(struct bnx2 *bp)
1101{
1102 int i;
1103 u32 cid;
1104
1105 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1106 if (i == 1)
1107 cid = RX_RSS_CID;
1108 bnx2_init_rx_context(bp, cid);
1109 }
1110}
1111
1064static int 1112static int
1065bnx2_set_mac_link(struct bnx2 *bp) 1113bnx2_set_mac_link(struct bnx2 *bp)
1066{ 1114{
@@ -1126,7 +1174,7 @@ bnx2_set_mac_link(struct bnx2 *bp)
1126 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE); 1174 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1127 1175
1128 if (CHIP_NUM(bp) == CHIP_NUM_5709) 1176 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1129 bnx2_init_rx_context0(bp); 1177 bnx2_init_all_rx_contexts(bp);
1130 1178
1131 return 0; 1179 return 0;
1132} 1180}
@@ -2398,12 +2446,12 @@ bnx2_set_mac_addr(struct bnx2 *bp)
2398} 2446}
2399 2447
2400static inline int 2448static inline int
2401bnx2_alloc_rx_page(struct bnx2 *bp, u16 index) 2449bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2402{ 2450{
2403 dma_addr_t mapping; 2451 dma_addr_t mapping;
2404 struct sw_pg *rx_pg = &bp->rx_pg_ring[index]; 2452 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2405 struct rx_bd *rxbd = 2453 struct rx_bd *rxbd =
2406 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)]; 2454 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2407 struct page *page = alloc_page(GFP_ATOMIC); 2455 struct page *page = alloc_page(GFP_ATOMIC);
2408 2456
2409 if (!page) 2457 if (!page)
@@ -2418,9 +2466,9 @@ bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2418} 2466}
2419 2467
2420static void 2468static void
2421bnx2_free_rx_page(struct bnx2 *bp, u16 index) 2469bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2422{ 2470{
2423 struct sw_pg *rx_pg = &bp->rx_pg_ring[index]; 2471 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2424 struct page *page = rx_pg->page; 2472 struct page *page = rx_pg->page;
2425 2473
2426 if (!page) 2474 if (!page)
@@ -2434,12 +2482,12 @@ bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2434} 2482}
2435 2483
2436static inline int 2484static inline int
2437bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index) 2485bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2438{ 2486{
2439 struct sk_buff *skb; 2487 struct sk_buff *skb;
2440 struct sw_bd *rx_buf = &bp->rx_buf_ring[index]; 2488 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2441 dma_addr_t mapping; 2489 dma_addr_t mapping;
2442 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; 2490 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2443 unsigned long align; 2491 unsigned long align;
2444 2492
2445 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); 2493 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
@@ -2459,7 +2507,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2459 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; 2507 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2460 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; 2508 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2461 2509
2462 bnapi->rx_prod_bseq += bp->rx_buf_use_size; 2510 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2463 2511
2464 return 0; 2512 return 0;
2465} 2513}
@@ -2597,23 +2645,23 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2597} 2645}
2598 2646
2599static void 2647static void
2600bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi, 2648bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2601 struct sk_buff *skb, int count) 2649 struct sk_buff *skb, int count)
2602{ 2650{
2603 struct sw_pg *cons_rx_pg, *prod_rx_pg; 2651 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2604 struct rx_bd *cons_bd, *prod_bd; 2652 struct rx_bd *cons_bd, *prod_bd;
2605 dma_addr_t mapping; 2653 dma_addr_t mapping;
2606 int i; 2654 int i;
2607 u16 hw_prod = bnapi->rx_pg_prod, prod; 2655 u16 hw_prod = rxr->rx_pg_prod, prod;
2608 u16 cons = bnapi->rx_pg_cons; 2656 u16 cons = rxr->rx_pg_cons;
2609 2657
2610 for (i = 0; i < count; i++) { 2658 for (i = 0; i < count; i++) {
2611 prod = RX_PG_RING_IDX(hw_prod); 2659 prod = RX_PG_RING_IDX(hw_prod);
2612 2660
2613 prod_rx_pg = &bp->rx_pg_ring[prod]; 2661 prod_rx_pg = &rxr->rx_pg_ring[prod];
2614 cons_rx_pg = &bp->rx_pg_ring[cons]; 2662 cons_rx_pg = &rxr->rx_pg_ring[cons];
2615 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 2663 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2616 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 2664 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2617 2665
2618 if (i == 0 && skb) { 2666 if (i == 0 && skb) {
2619 struct page *page; 2667 struct page *page;
@@ -2642,25 +2690,25 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2642 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons)); 2690 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2643 hw_prod = NEXT_RX_BD(hw_prod); 2691 hw_prod = NEXT_RX_BD(hw_prod);
2644 } 2692 }
2645 bnapi->rx_pg_prod = hw_prod; 2693 rxr->rx_pg_prod = hw_prod;
2646 bnapi->rx_pg_cons = cons; 2694 rxr->rx_pg_cons = cons;
2647} 2695}
2648 2696
2649static inline void 2697static inline void
2650bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb, 2698bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2651 u16 cons, u16 prod) 2699 struct sk_buff *skb, u16 cons, u16 prod)
2652{ 2700{
2653 struct sw_bd *cons_rx_buf, *prod_rx_buf; 2701 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2654 struct rx_bd *cons_bd, *prod_bd; 2702 struct rx_bd *cons_bd, *prod_bd;
2655 2703
2656 cons_rx_buf = &bp->rx_buf_ring[cons]; 2704 cons_rx_buf = &rxr->rx_buf_ring[cons];
2657 prod_rx_buf = &bp->rx_buf_ring[prod]; 2705 prod_rx_buf = &rxr->rx_buf_ring[prod];
2658 2706
2659 pci_dma_sync_single_for_device(bp->pdev, 2707 pci_dma_sync_single_for_device(bp->pdev,
2660 pci_unmap_addr(cons_rx_buf, mapping), 2708 pci_unmap_addr(cons_rx_buf, mapping),
2661 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); 2709 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2662 2710
2663 bnapi->rx_prod_bseq += bp->rx_buf_use_size; 2711 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2664 2712
2665 prod_rx_buf->skb = skb; 2713 prod_rx_buf->skb = skb;
2666 2714
@@ -2670,28 +2718,28 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2670 pci_unmap_addr_set(prod_rx_buf, mapping, 2718 pci_unmap_addr_set(prod_rx_buf, mapping,
2671 pci_unmap_addr(cons_rx_buf, mapping)); 2719 pci_unmap_addr(cons_rx_buf, mapping));
2672 2720
2673 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 2721 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2674 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 2722 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2675 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; 2723 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2676 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; 2724 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2677} 2725}
2678 2726
2679static int 2727static int
2680bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb, 2728bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2681 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr, 2729 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2682 u32 ring_idx) 2730 u32 ring_idx)
2683{ 2731{
2684 int err; 2732 int err;
2685 u16 prod = ring_idx & 0xffff; 2733 u16 prod = ring_idx & 0xffff;
2686 2734
2687 err = bnx2_alloc_rx_skb(bp, bnapi, prod); 2735 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2688 if (unlikely(err)) { 2736 if (unlikely(err)) {
2689 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod); 2737 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2690 if (hdr_len) { 2738 if (hdr_len) {
2691 unsigned int raw_len = len + 4; 2739 unsigned int raw_len = len + 4;
2692 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT; 2740 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2693 2741
2694 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages); 2742 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2695 } 2743 }
2696 return err; 2744 return err;
2697 } 2745 }
@@ -2706,8 +2754,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2706 } else { 2754 } else {
2707 unsigned int i, frag_len, frag_size, pages; 2755 unsigned int i, frag_len, frag_size, pages;
2708 struct sw_pg *rx_pg; 2756 struct sw_pg *rx_pg;
2709 u16 pg_cons = bnapi->rx_pg_cons; 2757 u16 pg_cons = rxr->rx_pg_cons;
2710 u16 pg_prod = bnapi->rx_pg_prod; 2758 u16 pg_prod = rxr->rx_pg_prod;
2711 2759
2712 frag_size = len + 4 - hdr_len; 2760 frag_size = len + 4 - hdr_len;
2713 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT; 2761 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
@@ -2718,9 +2766,9 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2718 if (unlikely(frag_len <= 4)) { 2766 if (unlikely(frag_len <= 4)) {
2719 unsigned int tail = 4 - frag_len; 2767 unsigned int tail = 4 - frag_len;
2720 2768
2721 bnapi->rx_pg_cons = pg_cons; 2769 rxr->rx_pg_cons = pg_cons;
2722 bnapi->rx_pg_prod = pg_prod; 2770 rxr->rx_pg_prod = pg_prod;
2723 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, 2771 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2724 pages - i); 2772 pages - i);
2725 skb->len -= tail; 2773 skb->len -= tail;
2726 if (i == 0) { 2774 if (i == 0) {
@@ -2734,7 +2782,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2734 } 2782 }
2735 return 0; 2783 return 0;
2736 } 2784 }
2737 rx_pg = &bp->rx_pg_ring[pg_cons]; 2785 rx_pg = &rxr->rx_pg_ring[pg_cons];
2738 2786
2739 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), 2787 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2740 PAGE_SIZE, PCI_DMA_FROMDEVICE); 2788 PAGE_SIZE, PCI_DMA_FROMDEVICE);
@@ -2745,11 +2793,12 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2745 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len); 2793 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2746 rx_pg->page = NULL; 2794 rx_pg->page = NULL;
2747 2795
2748 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod)); 2796 err = bnx2_alloc_rx_page(bp, rxr,
2797 RX_PG_RING_IDX(pg_prod));
2749 if (unlikely(err)) { 2798 if (unlikely(err)) {
2750 bnapi->rx_pg_cons = pg_cons; 2799 rxr->rx_pg_cons = pg_cons;
2751 bnapi->rx_pg_prod = pg_prod; 2800 rxr->rx_pg_prod = pg_prod;
2752 bnx2_reuse_rx_skb_pages(bp, bnapi, skb, 2801 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2753 pages - i); 2802 pages - i);
2754 return err; 2803 return err;
2755 } 2804 }
@@ -2762,8 +2811,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2762 pg_prod = NEXT_RX_BD(pg_prod); 2811 pg_prod = NEXT_RX_BD(pg_prod);
2763 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons)); 2812 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2764 } 2813 }
2765 bnapi->rx_pg_prod = pg_prod; 2814 rxr->rx_pg_prod = pg_prod;
2766 bnapi->rx_pg_cons = pg_cons; 2815 rxr->rx_pg_cons = pg_cons;
2767 } 2816 }
2768 return 0; 2817 return 0;
2769} 2818}
@@ -2771,7 +2820,12 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2771static inline u16 2820static inline u16
2772bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi) 2821bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2773{ 2822{
2774 u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0; 2823 u16 cons;
2824
2825 if (bnapi->int_num == 0)
2826 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2827 else
2828 cons = bnapi->status_blk_msix->status_rx_quick_consumer_index;
2775 2829
2776 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)) 2830 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2777 cons++; 2831 cons++;
@@ -2781,13 +2835,14 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2781static int 2835static int
2782bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) 2836bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2783{ 2837{
2838 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2784 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod; 2839 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2785 struct l2_fhdr *rx_hdr; 2840 struct l2_fhdr *rx_hdr;
2786 int rx_pkt = 0, pg_ring_used = 0; 2841 int rx_pkt = 0, pg_ring_used = 0;
2787 2842
2788 hw_cons = bnx2_get_hw_rx_cons(bnapi); 2843 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2789 sw_cons = bnapi->rx_cons; 2844 sw_cons = rxr->rx_cons;
2790 sw_prod = bnapi->rx_prod; 2845 sw_prod = rxr->rx_prod;
2791 2846
2792 /* Memory barrier necessary as speculative reads of the rx 2847 /* Memory barrier necessary as speculative reads of the rx
2793 * buffer can be ahead of the index in the status block 2848 * buffer can be ahead of the index in the status block
@@ -2803,7 +2858,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2803 sw_ring_cons = RX_RING_IDX(sw_cons); 2858 sw_ring_cons = RX_RING_IDX(sw_cons);
2804 sw_ring_prod = RX_RING_IDX(sw_prod); 2859 sw_ring_prod = RX_RING_IDX(sw_prod);
2805 2860
2806 rx_buf = &bp->rx_buf_ring[sw_ring_cons]; 2861 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2807 skb = rx_buf->skb; 2862 skb = rx_buf->skb;
2808 2863
2809 rx_buf->skb = NULL; 2864 rx_buf->skb = NULL;
@@ -2824,7 +2879,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2824 L2_FHDR_ERRORS_TOO_SHORT | 2879 L2_FHDR_ERRORS_TOO_SHORT |
2825 L2_FHDR_ERRORS_GIANT_FRAME)) { 2880 L2_FHDR_ERRORS_GIANT_FRAME)) {
2826 2881
2827 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons, 2882 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2828 sw_ring_prod); 2883 sw_ring_prod);
2829 goto next_rx; 2884 goto next_rx;
2830 } 2885 }
@@ -2844,7 +2899,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2844 2899
2845 new_skb = netdev_alloc_skb(bp->dev, len + 2); 2900 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2846 if (new_skb == NULL) { 2901 if (new_skb == NULL) {
2847 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons, 2902 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2848 sw_ring_prod); 2903 sw_ring_prod);
2849 goto next_rx; 2904 goto next_rx;
2850 } 2905 }
@@ -2856,11 +2911,11 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2856 skb_reserve(new_skb, 2); 2911 skb_reserve(new_skb, 2);
2857 skb_put(new_skb, len); 2912 skb_put(new_skb, len);
2858 2913
2859 bnx2_reuse_rx_skb(bp, bnapi, skb, 2914 bnx2_reuse_rx_skb(bp, rxr, skb,
2860 sw_ring_cons, sw_ring_prod); 2915 sw_ring_cons, sw_ring_prod);
2861 2916
2862 skb = new_skb; 2917 skb = new_skb;
2863 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len, 2918 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2864 dma_addr, (sw_ring_cons << 16) | sw_ring_prod))) 2919 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2865 goto next_rx; 2920 goto next_rx;
2866 2921
@@ -2909,16 +2964,15 @@ next_rx:
2909 rmb(); 2964 rmb();
2910 } 2965 }
2911 } 2966 }
2912 bnapi->rx_cons = sw_cons; 2967 rxr->rx_cons = sw_cons;
2913 bnapi->rx_prod = sw_prod; 2968 rxr->rx_prod = sw_prod;
2914 2969
2915 if (pg_ring_used) 2970 if (pg_ring_used)
2916 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, 2971 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
2917 bnapi->rx_pg_prod);
2918 2972
2919 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod); 2973 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
2920 2974
2921 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq); 2975 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
2922 2976
2923 mmiowb(); 2977 mmiowb();
2924 2978
@@ -3032,9 +3086,10 @@ static inline int
3032bnx2_has_work(struct bnx2_napi *bnapi) 3086bnx2_has_work(struct bnx2_napi *bnapi)
3033{ 3087{
3034 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; 3088 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3089 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3035 struct status_block *sblk = bnapi->status_blk; 3090 struct status_block *sblk = bnapi->status_blk;
3036 3091
3037 if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) || 3092 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3038 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)) 3093 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3039 return 1; 3094 return 1;
3040 3095
@@ -3073,6 +3128,7 @@ static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3073 int work_done, int budget) 3128 int work_done, int budget)
3074{ 3129{
3075 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; 3130 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3131 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3076 struct status_block *sblk = bnapi->status_blk; 3132 struct status_block *sblk = bnapi->status_blk;
3077 u32 status_attn_bits = sblk->status_attn_bits; 3133 u32 status_attn_bits = sblk->status_attn_bits;
3078 u32 status_attn_bits_ack = sblk->status_attn_bits_ack; 3134 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
@@ -3093,7 +3149,7 @@ static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3093 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons) 3149 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3094 bnx2_tx_int(bp, bnapi, 0); 3150 bnx2_tx_int(bp, bnapi, 0);
3095 3151
3096 if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) 3152 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3097 work_done += bnx2_rx_int(bp, bnapi, budget - work_done); 3153 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3098 3154
3099 return work_done; 3155 return work_done;
@@ -4532,19 +4588,21 @@ bnx2_clear_ring_states(struct bnx2 *bp)
4532{ 4588{
4533 struct bnx2_napi *bnapi; 4589 struct bnx2_napi *bnapi;
4534 struct bnx2_tx_ring_info *txr; 4590 struct bnx2_tx_ring_info *txr;
4591 struct bnx2_rx_ring_info *rxr;
4535 int i; 4592 int i;
4536 4593
4537 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { 4594 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4538 bnapi = &bp->bnx2_napi[i]; 4595 bnapi = &bp->bnx2_napi[i];
4539 txr = &bnapi->tx_ring; 4596 txr = &bnapi->tx_ring;
4597 rxr = &bnapi->rx_ring;
4540 4598
4541 txr->tx_cons = 0; 4599 txr->tx_cons = 0;
4542 txr->hw_tx_cons = 0; 4600 txr->hw_tx_cons = 0;
4543 bnapi->rx_prod_bseq = 0; 4601 rxr->rx_prod_bseq = 0;
4544 bnapi->rx_prod = 0; 4602 rxr->rx_prod = 0;
4545 bnapi->rx_cons = 0; 4603 rxr->rx_cons = 0;
4546 bnapi->rx_pg_prod = 0; 4604 rxr->rx_pg_prod = 0;
4547 bnapi->rx_pg_cons = 0; 4605 rxr->rx_pg_cons = 0;
4548 } 4606 }
4549} 4607}
4550 4608
@@ -4635,17 +4693,25 @@ bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4635} 4693}
4636 4694
4637static void 4695static void
4638bnx2_init_rx_ring(struct bnx2 *bp) 4696bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4639{ 4697{
4640 int i; 4698 int i;
4641 u16 prod, ring_prod; 4699 u16 prod, ring_prod;
4642 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID); 4700 u32 cid, rx_cid_addr, val;
4643 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; 4701 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4702 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4703
4704 if (ring_num == 0)
4705 cid = RX_CID;
4706 else
4707 cid = RX_RSS_CID + ring_num - 1;
4644 4708
4645 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping, 4709 rx_cid_addr = GET_CID_ADDR(cid);
4710
4711 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4646 bp->rx_buf_use_size, bp->rx_max_ring); 4712 bp->rx_buf_use_size, bp->rx_max_ring);
4647 4713
4648 bnx2_init_rx_context0(bp); 4714 bnx2_init_rx_context(bp, cid);
4649 4715
4650 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 4716 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4651 val = REG_RD(bp, BNX2_MQ_MAP_L2_5); 4717 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
@@ -4654,54 +4720,56 @@ bnx2_init_rx_ring(struct bnx2 *bp)
4654 4720
4655 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0); 4721 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4656 if (bp->rx_pg_ring_size) { 4722 if (bp->rx_pg_ring_size) {
4657 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring, 4723 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4658 bp->rx_pg_desc_mapping, 4724 rxr->rx_pg_desc_mapping,
4659 PAGE_SIZE, bp->rx_max_pg_ring); 4725 PAGE_SIZE, bp->rx_max_pg_ring);
4660 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE; 4726 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4661 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val); 4727 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4662 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY, 4728 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4663 BNX2_L2CTX_RBDC_JUMBO_KEY); 4729 BNX2_L2CTX_RBDC_JUMBO_KEY);
4664 4730
4665 val = (u64) bp->rx_pg_desc_mapping[0] >> 32; 4731 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4666 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val); 4732 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4667 4733
4668 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff; 4734 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4669 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val); 4735 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4670 4736
4671 if (CHIP_NUM(bp) == CHIP_NUM_5709) 4737 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4672 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT); 4738 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4673 } 4739 }
4674 4740
4675 val = (u64) bp->rx_desc_mapping[0] >> 32; 4741 val = (u64) rxr->rx_desc_mapping[0] >> 32;
4676 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); 4742 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4677 4743
4678 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff; 4744 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4679 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); 4745 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4680 4746
4681 ring_prod = prod = bnapi->rx_pg_prod; 4747 ring_prod = prod = rxr->rx_pg_prod;
4682 for (i = 0; i < bp->rx_pg_ring_size; i++) { 4748 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4683 if (bnx2_alloc_rx_page(bp, ring_prod) < 0) 4749 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4684 break; 4750 break;
4685 prod = NEXT_RX_BD(prod); 4751 prod = NEXT_RX_BD(prod);
4686 ring_prod = RX_PG_RING_IDX(prod); 4752 ring_prod = RX_PG_RING_IDX(prod);
4687 } 4753 }
4688 bnapi->rx_pg_prod = prod; 4754 rxr->rx_pg_prod = prod;
4689 4755
4690 ring_prod = prod = bnapi->rx_prod; 4756 ring_prod = prod = rxr->rx_prod;
4691 for (i = 0; i < bp->rx_ring_size; i++) { 4757 for (i = 0; i < bp->rx_ring_size; i++) {
4692 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) { 4758 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4693 break; 4759 break;
4694 }
4695 prod = NEXT_RX_BD(prod); 4760 prod = NEXT_RX_BD(prod);
4696 ring_prod = RX_RING_IDX(prod); 4761 ring_prod = RX_RING_IDX(prod);
4697 } 4762 }
4698 bnapi->rx_prod = prod; 4763 rxr->rx_prod = prod;
4764
4765 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4766 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4767 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4699 4768
4700 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, 4769 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4701 bnapi->rx_pg_prod); 4770 REG_WR16(bp, rxr->rx_bidx_addr, prod);
4702 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4703 4771
4704 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq); 4772 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4705} 4773}
4706 4774
4707static void 4775static void
@@ -4719,7 +4787,8 @@ bnx2_init_all_rings(struct bnx2 *bp)
4719 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) | 4787 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4720 (TX_TSS_CID << 7)); 4788 (TX_TSS_CID << 7));
4721 4789
4722 bnx2_init_rx_ring(bp); 4790 for (i = 0; i < bp->num_rx_rings; i++)
4791 bnx2_init_rx_ring(bp, i);
4723} 4792}
4724 4793
4725static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size) 4794static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
@@ -4828,25 +4897,33 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
4828{ 4897{
4829 int i; 4898 int i;
4830 4899
4831 if (bp->rx_buf_ring == NULL) 4900 for (i = 0; i < bp->num_rx_rings; i++) {
4832 return; 4901 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4902 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4903 int j;
4833 4904
4834 for (i = 0; i < bp->rx_max_ring_idx; i++) { 4905 if (rxr->rx_buf_ring == NULL)
4835 struct sw_bd *rx_buf = &bp->rx_buf_ring[i]; 4906 return;
4836 struct sk_buff *skb = rx_buf->skb;
4837 4907
4838 if (skb == NULL) 4908 for (j = 0; j < bp->rx_max_ring_idx; j++) {
4839 continue; 4909 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
4910 struct sk_buff *skb = rx_buf->skb;
4840 4911
4841 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), 4912 if (skb == NULL)
4842 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); 4913 continue;
4843 4914
4844 rx_buf->skb = NULL; 4915 pci_unmap_single(bp->pdev,
4916 pci_unmap_addr(rx_buf, mapping),
4917 bp->rx_buf_use_size,
4918 PCI_DMA_FROMDEVICE);
4845 4919
4846 dev_kfree_skb(skb); 4920 rx_buf->skb = NULL;
4921
4922 dev_kfree_skb(skb);
4923 }
4924 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
4925 bnx2_free_rx_page(bp, rxr, j);
4847 } 4926 }
4848 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4849 bnx2_free_rx_page(bp, i);
4850} 4927}
4851 4928
4852static void 4929static void
@@ -5143,10 +5220,12 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5143 int ret = -ENODEV; 5220 int ret = -ENODEV;
5144 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi; 5221 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5145 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; 5222 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5223 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5146 5224
5147 tx_napi = bnapi; 5225 tx_napi = bnapi;
5148 5226
5149 txr = &tx_napi->tx_ring; 5227 txr = &tx_napi->tx_ring;
5228 rxr = &bnapi->rx_ring;
5150 if (loopback_mode == BNX2_MAC_LOOPBACK) { 5229 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5151 bp->loopback = MAC_LOOPBACK; 5230 bp->loopback = MAC_LOOPBACK;
5152 bnx2_set_mac_loopback(bp); 5231 bnx2_set_mac_loopback(bp);
@@ -5218,7 +5297,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5218 goto loopback_test_done; 5297 goto loopback_test_done;
5219 } 5298 }
5220 5299
5221 rx_buf = &bp->rx_buf_ring[rx_start_idx]; 5300 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5222 rx_skb = rx_buf->skb; 5301 rx_skb = rx_buf->skb;
5223 5302
5224 rx_hdr = (struct l2_fhdr *) rx_skb->data; 5303 rx_hdr = (struct l2_fhdr *) rx_skb->data;
@@ -5631,6 +5710,7 @@ bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5631 } 5710 }
5632 } 5711 }
5633 bp->num_tx_rings = 1; 5712 bp->num_tx_rings = 1;
5713 bp->num_rx_rings = 1;
5634} 5714}
5635 5715
5636/* Called with rtnl_lock */ 5716/* Called with rtnl_lock */