aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2.c
diff options
context:
space:
mode:
authorMichael Chan <mchan@broadcom.com>2008-06-19 19:37:42 -0400
committerDavid S. Miller <davem@davemloft.net>2008-06-19 19:37:42 -0400
commit35e9010b22503f42cbf88144ffe1feff90ea3835 (patch)
treeee2c0e5ae61ee6a7508cce38ed2a79ce52049d38 /drivers/net/bnx2.c
parent4497b0763cb1afae463f5e144c28b5d806e28b60 (diff)
bnx2: Put tx ring variables in a separate struct.
In preparation for multi-ring support, tx ring variables are now put in a separate bnx2_tx_ring_info struct. Multi tx ring will not be enabled until it is fully supported by the stack. Only 1 tx ring will be used at the moment. The functions to allocate/free tx memory and to initialize tx rings are now modified to handle multiple rings. Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: Benjamin Li <benli@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2.c')
-rw-r--r--drivers/net/bnx2.c294
1 files changed, 181 insertions, 113 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 2c52d2c7c495..61f2b4fc4275 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -226,7 +226,7 @@ static struct flash_spec flash_5709 = {
226 226
227MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); 227MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228 228
229static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi) 229static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
230{ 230{
231 u32 diff; 231 u32 diff;
232 232
@@ -235,7 +235,7 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
235 /* The ring uses 256 indices for 255 entries, one of them 235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped. 236 * needs to be skipped.
237 */ 237 */
238 diff = bp->tx_prod - bnapi->tx_cons; 238 diff = txr->tx_prod - txr->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) { 239 if (unlikely(diff >= TX_DESC_CNT)) {
240 diff &= 0xffff; 240 diff &= 0xffff;
241 if (diff == TX_DESC_CNT) 241 if (diff == TX_DESC_CNT)
@@ -496,10 +496,54 @@ bnx2_netif_start(struct bnx2 *bp)
496} 496}
497 497
498static void 498static void
499bnx2_free_tx_mem(struct bnx2 *bp)
500{
501 int i;
502
503 for (i = 0; i < bp->num_tx_rings; i++) {
504 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
505 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
506
507 if (txr->tx_desc_ring) {
508 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
509 txr->tx_desc_ring,
510 txr->tx_desc_mapping);
511 txr->tx_desc_ring = NULL;
512 }
513 kfree(txr->tx_buf_ring);
514 txr->tx_buf_ring = NULL;
515 }
516}
517
518static int
519bnx2_alloc_tx_mem(struct bnx2 *bp)
520{
521 int i;
522
523 for (i = 0; i < bp->num_tx_rings; i++) {
524 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
525 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
526
527 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
528 if (txr->tx_buf_ring == NULL)
529 return -ENOMEM;
530
531 txr->tx_desc_ring =
532 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
533 &txr->tx_desc_mapping);
534 if (txr->tx_desc_ring == NULL)
535 return -ENOMEM;
536 }
537 return 0;
538}
539
540static void
499bnx2_free_mem(struct bnx2 *bp) 541bnx2_free_mem(struct bnx2 *bp)
500{ 542{
501 int i; 543 int i;
502 544
545 bnx2_free_tx_mem(bp);
546
503 for (i = 0; i < bp->ctx_pages; i++) { 547 for (i = 0; i < bp->ctx_pages; i++) {
504 if (bp->ctx_blk[i]) { 548 if (bp->ctx_blk[i]) {
505 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE, 549 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
@@ -514,13 +558,6 @@ bnx2_free_mem(struct bnx2 *bp)
514 bp->status_blk = NULL; 558 bp->status_blk = NULL;
515 bp->stats_blk = NULL; 559 bp->stats_blk = NULL;
516 } 560 }
517 if (bp->tx_desc_ring) {
518 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
519 bp->tx_desc_ring, bp->tx_desc_mapping);
520 bp->tx_desc_ring = NULL;
521 }
522 kfree(bp->tx_buf_ring);
523 bp->tx_buf_ring = NULL;
524 for (i = 0; i < bp->rx_max_ring; i++) { 561 for (i = 0; i < bp->rx_max_ring; i++) {
525 if (bp->rx_desc_ring[i]) 562 if (bp->rx_desc_ring[i])
526 pci_free_consistent(bp->pdev, RXBD_RING_SIZE, 563 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
@@ -545,16 +582,7 @@ bnx2_free_mem(struct bnx2 *bp)
545static int 582static int
546bnx2_alloc_mem(struct bnx2 *bp) 583bnx2_alloc_mem(struct bnx2 *bp)
547{ 584{
548 int i, status_blk_size; 585 int i, status_blk_size, err;
549
550 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
551 if (bp->tx_buf_ring == NULL)
552 return -ENOMEM;
553
554 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
555 &bp->tx_desc_mapping);
556 if (bp->tx_desc_ring == NULL)
557 goto alloc_mem_err;
558 586
559 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring); 587 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
560 if (bp->rx_buf_ring == NULL) 588 if (bp->rx_buf_ring == NULL)
@@ -634,6 +662,11 @@ bnx2_alloc_mem(struct bnx2 *bp)
634 goto alloc_mem_err; 662 goto alloc_mem_err;
635 } 663 }
636 } 664 }
665
666 err = bnx2_alloc_tx_mem(bp);
667 if (err)
668 goto alloc_mem_err;
669
637 return 0; 670 return 0;
638 671
639alloc_mem_err: 672alloc_mem_err:
@@ -2483,11 +2516,12 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2483static int 2516static int
2484bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) 2517bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2485{ 2518{
2519 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2486 u16 hw_cons, sw_cons, sw_ring_cons; 2520 u16 hw_cons, sw_cons, sw_ring_cons;
2487 int tx_pkt = 0; 2521 int tx_pkt = 0;
2488 2522
2489 hw_cons = bnx2_get_hw_tx_cons(bnapi); 2523 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2490 sw_cons = bnapi->tx_cons; 2524 sw_cons = txr->tx_cons;
2491 2525
2492 while (sw_cons != hw_cons) { 2526 while (sw_cons != hw_cons) {
2493 struct sw_bd *tx_buf; 2527 struct sw_bd *tx_buf;
@@ -2496,7 +2530,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2496 2530
2497 sw_ring_cons = TX_RING_IDX(sw_cons); 2531 sw_ring_cons = TX_RING_IDX(sw_cons);
2498 2532
2499 tx_buf = &bp->tx_buf_ring[sw_ring_cons]; 2533 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2500 skb = tx_buf->skb; 2534 skb = tx_buf->skb;
2501 2535
2502 /* partial BD completions possible with TSO packets */ 2536 /* partial BD completions possible with TSO packets */
@@ -2526,7 +2560,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2526 2560
2527 pci_unmap_page(bp->pdev, 2561 pci_unmap_page(bp->pdev,
2528 pci_unmap_addr( 2562 pci_unmap_addr(
2529 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)], 2563 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2530 mapping), 2564 mapping),
2531 skb_shinfo(skb)->frags[i].size, 2565 skb_shinfo(skb)->frags[i].size,
2532 PCI_DMA_TODEVICE); 2566 PCI_DMA_TODEVICE);
@@ -2542,8 +2576,8 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2542 hw_cons = bnx2_get_hw_tx_cons(bnapi); 2576 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2543 } 2577 }
2544 2578
2545 bnapi->hw_tx_cons = hw_cons; 2579 txr->hw_tx_cons = hw_cons;
2546 bnapi->tx_cons = sw_cons; 2580 txr->tx_cons = sw_cons;
2547 /* Need to make the tx_cons update visible to bnx2_start_xmit() 2581 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2548 * before checking for netif_queue_stopped(). Without the 2582 * before checking for netif_queue_stopped(). Without the
2549 * memory barrier, there is a small possibility that bnx2_start_xmit() 2583 * memory barrier, there is a small possibility that bnx2_start_xmit()
@@ -2552,10 +2586,10 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2552 smp_mb(); 2586 smp_mb();
2553 2587
2554 if (unlikely(netif_queue_stopped(bp->dev)) && 2588 if (unlikely(netif_queue_stopped(bp->dev)) &&
2555 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) { 2589 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2556 netif_tx_lock(bp->dev); 2590 netif_tx_lock(bp->dev);
2557 if ((netif_queue_stopped(bp->dev)) && 2591 if ((netif_queue_stopped(bp->dev)) &&
2558 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) 2592 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2559 netif_wake_queue(bp->dev); 2593 netif_wake_queue(bp->dev);
2560 netif_tx_unlock(bp->dev); 2594 netif_tx_unlock(bp->dev);
2561 } 2595 }
@@ -2997,10 +3031,11 @@ bnx2_tx_msix(int irq, void *dev_instance)
2997static inline int 3031static inline int
2998bnx2_has_work(struct bnx2_napi *bnapi) 3032bnx2_has_work(struct bnx2_napi *bnapi)
2999{ 3033{
3034 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3000 struct status_block *sblk = bnapi->status_blk; 3035 struct status_block *sblk = bnapi->status_blk;
3001 3036
3002 if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) || 3037 if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
3003 (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)) 3038 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3004 return 1; 3039 return 1;
3005 3040
3006 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) != 3041 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
@@ -3014,6 +3049,7 @@ static int bnx2_tx_poll(struct napi_struct *napi, int budget)
3014{ 3049{
3015 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi); 3050 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3016 struct bnx2 *bp = bnapi->bp; 3051 struct bnx2 *bp = bnapi->bp;
3052 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3017 int work_done = 0; 3053 int work_done = 0;
3018 struct status_block_msix *sblk = bnapi->status_blk_msix; 3054 struct status_block_msix *sblk = bnapi->status_blk_msix;
3019 3055
@@ -3024,7 +3060,7 @@ static int bnx2_tx_poll(struct napi_struct *napi, int budget)
3024 3060
3025 bnapi->last_status_idx = sblk->status_idx; 3061 bnapi->last_status_idx = sblk->status_idx;
3026 rmb(); 3062 rmb();
3027 } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons); 3063 } while (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons);
3028 3064
3029 netif_rx_complete(bp->dev, napi); 3065 netif_rx_complete(bp->dev, napi);
3030 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | 3066 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
@@ -3036,6 +3072,7 @@ static int bnx2_tx_poll(struct napi_struct *napi, int budget)
3036static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi, 3072static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3037 int work_done, int budget) 3073 int work_done, int budget)
3038{ 3074{
3075 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3039 struct status_block *sblk = bnapi->status_blk; 3076 struct status_block *sblk = bnapi->status_blk;
3040 u32 status_attn_bits = sblk->status_attn_bits; 3077 u32 status_attn_bits = sblk->status_attn_bits;
3041 u32 status_attn_bits_ack = sblk->status_attn_bits_ack; 3078 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
@@ -3053,7 +3090,7 @@ static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3053 REG_RD(bp, BNX2_HC_COMMAND); 3090 REG_RD(bp, BNX2_HC_COMMAND);
3054 } 3091 }
3055 3092
3056 if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons) 3093 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3057 bnx2_tx_int(bp, bnapi, 0); 3094 bnx2_tx_int(bp, bnapi, 0);
3058 3095
3059 if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) 3096 if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
@@ -4494,13 +4531,15 @@ static void
4494bnx2_clear_ring_states(struct bnx2 *bp) 4531bnx2_clear_ring_states(struct bnx2 *bp)
4495{ 4532{
4496 struct bnx2_napi *bnapi; 4533 struct bnx2_napi *bnapi;
4534 struct bnx2_tx_ring_info *txr;
4497 int i; 4535 int i;
4498 4536
4499 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { 4537 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4500 bnapi = &bp->bnx2_napi[i]; 4538 bnapi = &bp->bnx2_napi[i];
4539 txr = &bnapi->tx_ring;
4501 4540
4502 bnapi->tx_cons = 0; 4541 txr->tx_cons = 0;
4503 bnapi->hw_tx_cons = 0; 4542 txr->hw_tx_cons = 0;
4504 bnapi->rx_prod_bseq = 0; 4543 bnapi->rx_prod_bseq = 0;
4505 bnapi->rx_prod = 0; 4544 bnapi->rx_prod = 0;
4506 bnapi->rx_cons = 0; 4545 bnapi->rx_cons = 0;
@@ -4510,7 +4549,7 @@ bnx2_clear_ring_states(struct bnx2 *bp)
4510} 4549}
4511 4550
4512static void 4551static void
4513bnx2_init_tx_context(struct bnx2 *bp, u32 cid) 4552bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4514{ 4553{
4515 u32 val, offset0, offset1, offset2, offset3; 4554 u32 val, offset0, offset1, offset2, offset3;
4516 u32 cid_addr = GET_CID_ADDR(cid); 4555 u32 cid_addr = GET_CID_ADDR(cid);
@@ -4532,43 +4571,43 @@ bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4532 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 4571 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4533 bnx2_ctx_wr(bp, cid_addr, offset1, val); 4572 bnx2_ctx_wr(bp, cid_addr, offset1, val);
4534 4573
4535 val = (u64) bp->tx_desc_mapping >> 32; 4574 val = (u64) txr->tx_desc_mapping >> 32;
4536 bnx2_ctx_wr(bp, cid_addr, offset2, val); 4575 bnx2_ctx_wr(bp, cid_addr, offset2, val);
4537 4576
4538 val = (u64) bp->tx_desc_mapping & 0xffffffff; 4577 val = (u64) txr->tx_desc_mapping & 0xffffffff;
4539 bnx2_ctx_wr(bp, cid_addr, offset3, val); 4578 bnx2_ctx_wr(bp, cid_addr, offset3, val);
4540} 4579}
4541 4580
4542static void 4581static void
4543bnx2_init_tx_ring(struct bnx2 *bp) 4582bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4544{ 4583{
4545 struct tx_bd *txbd; 4584 struct tx_bd *txbd;
4546 u32 cid = TX_CID; 4585 u32 cid = TX_CID;
4547 struct bnx2_napi *bnapi; 4586 struct bnx2_napi *bnapi;
4587 struct bnx2_tx_ring_info *txr;
4548 4588
4549 bp->tx_vec = 0; 4589 bnapi = &bp->bnx2_napi[ring_num];
4550 if (bp->flags & BNX2_FLAG_USING_MSIX) { 4590 txr = &bnapi->tx_ring;
4551 cid = TX_TSS_CID; 4591
4552 bp->tx_vec = BNX2_TX_VEC; 4592 if (ring_num == 0)
4553 REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM | 4593 cid = TX_CID;
4554 (TX_TSS_CID << 7)); 4594 else
4555 } 4595 cid = TX_TSS_CID + ring_num - 1;
4556 bnapi = &bp->bnx2_napi[bp->tx_vec];
4557 4596
4558 bp->tx_wake_thresh = bp->tx_ring_size / 2; 4597 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4559 4598
4560 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT]; 4599 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4561 4600
4562 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32; 4601 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4563 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff; 4602 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4564 4603
4565 bp->tx_prod = 0; 4604 txr->tx_prod = 0;
4566 bp->tx_prod_bseq = 0; 4605 txr->tx_prod_bseq = 0;
4567 4606
4568 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX; 4607 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4569 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ; 4608 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4570 4609
4571 bnx2_init_tx_context(bp, cid); 4610 bnx2_init_tx_context(bp, cid, txr);
4572} 4611}
4573 4612
4574static void 4613static void
@@ -4665,6 +4704,24 @@ bnx2_init_rx_ring(struct bnx2 *bp)
4665 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq); 4704 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4666} 4705}
4667 4706
4707static void
4708bnx2_init_all_rings(struct bnx2 *bp)
4709{
4710 int i;
4711
4712 bnx2_clear_ring_states(bp);
4713
4714 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4715 for (i = 0; i < bp->num_tx_rings; i++)
4716 bnx2_init_tx_ring(bp, i);
4717
4718 if (bp->num_tx_rings > 1)
4719 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4720 (TX_TSS_CID << 7));
4721
4722 bnx2_init_rx_ring(bp);
4723}
4724
4668static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size) 4725static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4669{ 4726{
4670 u32 max, num_rings = 1; 4727 u32 max, num_rings = 1;
@@ -4728,36 +4785,42 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
4728{ 4785{
4729 int i; 4786 int i;
4730 4787
4731 if (bp->tx_buf_ring == NULL) 4788 for (i = 0; i < bp->num_tx_rings; i++) {
4732 return; 4789 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4733 4790 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
4734 for (i = 0; i < TX_DESC_CNT; ) { 4791 int j;
4735 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4736 struct sk_buff *skb = tx_buf->skb;
4737 int j, last;
4738 4792
4739 if (skb == NULL) { 4793 if (txr->tx_buf_ring == NULL)
4740 i++;
4741 continue; 4794 continue;
4742 }
4743 4795
4744 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), 4796 for (j = 0; j < TX_DESC_CNT; ) {
4797 struct sw_bd *tx_buf = &txr->tx_buf_ring[j];
4798 struct sk_buff *skb = tx_buf->skb;
4799 int k, last;
4800
4801 if (skb == NULL) {
4802 j++;
4803 continue;
4804 }
4805
4806 pci_unmap_single(bp->pdev,
4807 pci_unmap_addr(tx_buf, mapping),
4745 skb_headlen(skb), PCI_DMA_TODEVICE); 4808 skb_headlen(skb), PCI_DMA_TODEVICE);
4746 4809
4747 tx_buf->skb = NULL; 4810 tx_buf->skb = NULL;
4748 4811
4749 last = skb_shinfo(skb)->nr_frags; 4812 last = skb_shinfo(skb)->nr_frags;
4750 for (j = 0; j < last; j++) { 4813 for (k = 0; k < last; k++) {
4751 tx_buf = &bp->tx_buf_ring[i + j + 1]; 4814 tx_buf = &txr->tx_buf_ring[j + k + 1];
4752 pci_unmap_page(bp->pdev, 4815 pci_unmap_page(bp->pdev,
4753 pci_unmap_addr(tx_buf, mapping), 4816 pci_unmap_addr(tx_buf, mapping),
4754 skb_shinfo(skb)->frags[j].size, 4817 skb_shinfo(skb)->frags[j].size,
4755 PCI_DMA_TODEVICE); 4818 PCI_DMA_TODEVICE);
4819 }
4820 dev_kfree_skb(skb);
4821 j += k + 1;
4756 } 4822 }
4757 dev_kfree_skb(skb);
4758 i += j + 1;
4759 } 4823 }
4760
4761} 4824}
4762 4825
4763static void 4826static void
@@ -4806,9 +4869,7 @@ bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4806 if ((rc = bnx2_init_chip(bp)) != 0) 4869 if ((rc = bnx2_init_chip(bp)) != 0)
4807 return rc; 4870 return rc;
4808 4871
4809 bnx2_clear_ring_states(bp); 4872 bnx2_init_all_rings(bp);
4810 bnx2_init_tx_ring(bp);
4811 bnx2_init_rx_ring(bp);
4812 return 0; 4873 return 0;
4813} 4874}
4814 4875
@@ -5081,11 +5142,11 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5081 struct l2_fhdr *rx_hdr; 5142 struct l2_fhdr *rx_hdr;
5082 int ret = -ENODEV; 5143 int ret = -ENODEV;
5083 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi; 5144 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5145 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5084 5146
5085 tx_napi = bnapi; 5147 tx_napi = bnapi;
5086 if (bp->flags & BNX2_FLAG_USING_MSIX)
5087 tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
5088 5148
5149 txr = &tx_napi->tx_ring;
5089 if (loopback_mode == BNX2_MAC_LOOPBACK) { 5150 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5090 bp->loopback = MAC_LOOPBACK; 5151 bp->loopback = MAC_LOOPBACK;
5091 bnx2_set_mac_loopback(bp); 5152 bnx2_set_mac_loopback(bp);
@@ -5123,7 +5184,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5123 5184
5124 num_pkts = 0; 5185 num_pkts = 0;
5125 5186
5126 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)]; 5187 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5127 5188
5128 txbd->tx_bd_haddr_hi = (u64) map >> 32; 5189 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5129 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff; 5190 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
@@ -5131,11 +5192,11 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5131 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END; 5192 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5132 5193
5133 num_pkts++; 5194 num_pkts++;
5134 bp->tx_prod = NEXT_TX_BD(bp->tx_prod); 5195 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5135 bp->tx_prod_bseq += pkt_size; 5196 txr->tx_prod_bseq += pkt_size;
5136 5197
5137 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod); 5198 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5138 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq); 5199 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5139 5200
5140 udelay(100); 5201 udelay(100);
5141 5202
@@ -5149,7 +5210,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5149 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE); 5210 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5150 dev_kfree_skb(skb); 5211 dev_kfree_skb(skb);
5151 5212
5152 if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod) 5213 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5153 goto loopback_test_done; 5214 goto loopback_test_done;
5154 5215
5155 rx_idx = bnx2_get_hw_rx_cons(bnapi); 5216 rx_idx = bnx2_get_hw_rx_cons(bnapi);
@@ -5527,20 +5588,18 @@ bnx2_enable_msix(struct bnx2 *bp)
5527 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { 5588 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5528 msix_ent[i].entry = i; 5589 msix_ent[i].entry = i;
5529 msix_ent[i].vector = 0; 5590 msix_ent[i].vector = 0;
5591
5592 strcpy(bp->irq_tbl[i].name, bp->dev->name);
5593 if (i == 0)
5594 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5595 else
5596 bp->irq_tbl[i].handler = bnx2_tx_msix;
5530 } 5597 }
5531 5598
5532 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC); 5599 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5533 if (rc != 0) 5600 if (rc != 0)
5534 return; 5601 return;
5535 5602
5536 bp->irq_tbl[BNX2_BASE_VEC].handler = bnx2_msi_1shot;
5537 bp->irq_tbl[BNX2_TX_VEC].handler = bnx2_tx_msix;
5538
5539 strcpy(bp->irq_tbl[BNX2_BASE_VEC].name, bp->dev->name);
5540 strcat(bp->irq_tbl[BNX2_BASE_VEC].name, "-base");
5541 strcpy(bp->irq_tbl[BNX2_TX_VEC].name, bp->dev->name);
5542 strcat(bp->irq_tbl[BNX2_TX_VEC].name, "-tx");
5543
5544 bp->irq_nvecs = BNX2_MAX_MSIX_VEC; 5603 bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5545 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI; 5604 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5546 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) 5605 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
@@ -5571,6 +5630,7 @@ bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5571 bp->irq_tbl[0].vector = bp->pdev->irq; 5630 bp->irq_tbl[0].vector = bp->pdev->irq;
5572 } 5631 }
5573 } 5632 }
5633 bp->num_tx_rings = 1;
5574} 5634}
5575 5635
5576/* Called with rtnl_lock */ 5636/* Called with rtnl_lock */
@@ -5585,12 +5645,15 @@ bnx2_open(struct net_device *dev)
5585 bnx2_set_power_state(bp, PCI_D0); 5645 bnx2_set_power_state(bp, PCI_D0);
5586 bnx2_disable_int(bp); 5646 bnx2_disable_int(bp);
5587 5647
5648 bnx2_setup_int_mode(bp, disable_msi);
5649 bnx2_napi_enable(bp);
5588 rc = bnx2_alloc_mem(bp); 5650 rc = bnx2_alloc_mem(bp);
5589 if (rc) 5651 if (rc) {
5652 bnx2_napi_disable(bp);
5653 bnx2_free_mem(bp);
5590 return rc; 5654 return rc;
5655 }
5591 5656
5592 bnx2_setup_int_mode(bp, disable_msi);
5593 bnx2_napi_enable(bp);
5594 rc = bnx2_request_irq(bp); 5657 rc = bnx2_request_irq(bp);
5595 5658
5596 if (rc) { 5659 if (rc) {
@@ -5711,9 +5774,10 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5711 u32 len, vlan_tag_flags, last_frag, mss; 5774 u32 len, vlan_tag_flags, last_frag, mss;
5712 u16 prod, ring_prod; 5775 u16 prod, ring_prod;
5713 int i; 5776 int i;
5714 struct bnx2_napi *bnapi = &bp->bnx2_napi[bp->tx_vec]; 5777 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
5778 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5715 5779
5716 if (unlikely(bnx2_tx_avail(bp, bnapi) < 5780 if (unlikely(bnx2_tx_avail(bp, txr) <
5717 (skb_shinfo(skb)->nr_frags + 1))) { 5781 (skb_shinfo(skb)->nr_frags + 1))) {
5718 netif_stop_queue(dev); 5782 netif_stop_queue(dev);
5719 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n", 5783 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
@@ -5722,7 +5786,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5722 return NETDEV_TX_BUSY; 5786 return NETDEV_TX_BUSY;
5723 } 5787 }
5724 len = skb_headlen(skb); 5788 len = skb_headlen(skb);
5725 prod = bp->tx_prod; 5789 prod = txr->tx_prod;
5726 ring_prod = TX_RING_IDX(prod); 5790 ring_prod = TX_RING_IDX(prod);
5727 5791
5728 vlan_tag_flags = 0; 5792 vlan_tag_flags = 0;
@@ -5784,11 +5848,11 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5784 5848
5785 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); 5849 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5786 5850
5787 tx_buf = &bp->tx_buf_ring[ring_prod]; 5851 tx_buf = &txr->tx_buf_ring[ring_prod];
5788 tx_buf->skb = skb; 5852 tx_buf->skb = skb;
5789 pci_unmap_addr_set(tx_buf, mapping, mapping); 5853 pci_unmap_addr_set(tx_buf, mapping, mapping);
5790 5854
5791 txbd = &bp->tx_desc_ring[ring_prod]; 5855 txbd = &txr->tx_desc_ring[ring_prod];
5792 5856
5793 txbd->tx_bd_haddr_hi = (u64) mapping >> 32; 5857 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5794 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; 5858 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -5802,12 +5866,12 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5802 5866
5803 prod = NEXT_TX_BD(prod); 5867 prod = NEXT_TX_BD(prod);
5804 ring_prod = TX_RING_IDX(prod); 5868 ring_prod = TX_RING_IDX(prod);
5805 txbd = &bp->tx_desc_ring[ring_prod]; 5869 txbd = &txr->tx_desc_ring[ring_prod];
5806 5870
5807 len = frag->size; 5871 len = frag->size;
5808 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset, 5872 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5809 len, PCI_DMA_TODEVICE); 5873 len, PCI_DMA_TODEVICE);
5810 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod], 5874 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod],
5811 mapping, mapping); 5875 mapping, mapping);
5812 5876
5813 txbd->tx_bd_haddr_hi = (u64) mapping >> 32; 5877 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
@@ -5819,19 +5883,19 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5819 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END; 5883 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5820 5884
5821 prod = NEXT_TX_BD(prod); 5885 prod = NEXT_TX_BD(prod);
5822 bp->tx_prod_bseq += skb->len; 5886 txr->tx_prod_bseq += skb->len;
5823 5887
5824 REG_WR16(bp, bp->tx_bidx_addr, prod); 5888 REG_WR16(bp, txr->tx_bidx_addr, prod);
5825 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq); 5889 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5826 5890
5827 mmiowb(); 5891 mmiowb();
5828 5892
5829 bp->tx_prod = prod; 5893 txr->tx_prod = prod;
5830 dev->trans_start = jiffies; 5894 dev->trans_start = jiffies;
5831 5895
5832 if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) { 5896 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
5833 netif_stop_queue(dev); 5897 netif_stop_queue(dev);
5834 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh) 5898 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
5835 netif_wake_queue(dev); 5899 netif_wake_queue(dev);
5836 } 5900 }
5837 5901
@@ -7390,15 +7454,19 @@ static void __devinit
7390bnx2_init_napi(struct bnx2 *bp) 7454bnx2_init_napi(struct bnx2 *bp)
7391{ 7455{
7392 int i; 7456 int i;
7393 struct bnx2_napi *bnapi;
7394 7457
7395 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { 7458 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7396 bnapi = &bp->bnx2_napi[i]; 7459 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7460 int (*poll)(struct napi_struct *, int);
7461
7462 if (i == 0)
7463 poll = bnx2_poll;
7464 else
7465 poll = bnx2_tx_poll;
7466
7467 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7397 bnapi->bp = bp; 7468 bnapi->bp = bp;
7398 } 7469 }
7399 netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
7400 netif_napi_add(bp->dev, &bp->bnx2_napi[BNX2_TX_VEC].napi, bnx2_tx_poll,
7401 64);
7402} 7470}
7403 7471
7404static int __devinit 7472static int __devinit