aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2.c')
-rw-r--r--drivers/net/bnx2.c105
1 files changed, 56 insertions, 49 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 0300a759728c..ecfaad102f70 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -2276,7 +2276,7 @@ bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2276} 2276}
2277 2277
2278static inline int 2278static inline int
2279bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index) 2279bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2280{ 2280{
2281 struct sk_buff *skb; 2281 struct sk_buff *skb;
2282 struct sw_bd *rx_buf = &bp->rx_buf_ring[index]; 2282 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
@@ -2301,7 +2301,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2301 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; 2301 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2302 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; 2302 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2303 2303
2304 bp->rx_prod_bseq += bp->rx_buf_use_size; 2304 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2305 2305
2306 return 0; 2306 return 0;
2307} 2307}
@@ -2432,14 +2432,15 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2432} 2432}
2433 2433
2434static void 2434static void
2435bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct sk_buff *skb, int count) 2435bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2436 struct sk_buff *skb, int count)
2436{ 2437{
2437 struct sw_pg *cons_rx_pg, *prod_rx_pg; 2438 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2438 struct rx_bd *cons_bd, *prod_bd; 2439 struct rx_bd *cons_bd, *prod_bd;
2439 dma_addr_t mapping; 2440 dma_addr_t mapping;
2440 int i; 2441 int i;
2441 u16 hw_prod = bp->rx_pg_prod, prod; 2442 u16 hw_prod = bnapi->rx_pg_prod, prod;
2442 u16 cons = bp->rx_pg_cons; 2443 u16 cons = bnapi->rx_pg_cons;
2443 2444
2444 for (i = 0; i < count; i++) { 2445 for (i = 0; i < count; i++) {
2445 prod = RX_PG_RING_IDX(hw_prod); 2446 prod = RX_PG_RING_IDX(hw_prod);
@@ -2476,12 +2477,12 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct sk_buff *skb, int count)
2476 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons)); 2477 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2477 hw_prod = NEXT_RX_BD(hw_prod); 2478 hw_prod = NEXT_RX_BD(hw_prod);
2478 } 2479 }
2479 bp->rx_pg_prod = hw_prod; 2480 bnapi->rx_pg_prod = hw_prod;
2480 bp->rx_pg_cons = cons; 2481 bnapi->rx_pg_cons = cons;
2481} 2482}
2482 2483
2483static inline void 2484static inline void
2484bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb, 2485bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2485 u16 cons, u16 prod) 2486 u16 cons, u16 prod)
2486{ 2487{
2487 struct sw_bd *cons_rx_buf, *prod_rx_buf; 2488 struct sw_bd *cons_rx_buf, *prod_rx_buf;
@@ -2494,7 +2495,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2494 pci_unmap_addr(cons_rx_buf, mapping), 2495 pci_unmap_addr(cons_rx_buf, mapping),
2495 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); 2496 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2496 2497
2497 bp->rx_prod_bseq += bp->rx_buf_use_size; 2498 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2498 2499
2499 prod_rx_buf->skb = skb; 2500 prod_rx_buf->skb = skb;
2500 2501
@@ -2511,20 +2512,21 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2511} 2512}
2512 2513
2513static int 2514static int
2514bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len, 2515bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2515 unsigned int hdr_len, dma_addr_t dma_addr, u32 ring_idx) 2516 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2517 u32 ring_idx)
2516{ 2518{
2517 int err; 2519 int err;
2518 u16 prod = ring_idx & 0xffff; 2520 u16 prod = ring_idx & 0xffff;
2519 2521
2520 err = bnx2_alloc_rx_skb(bp, prod); 2522 err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2521 if (unlikely(err)) { 2523 if (unlikely(err)) {
2522 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod); 2524 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2523 if (hdr_len) { 2525 if (hdr_len) {
2524 unsigned int raw_len = len + 4; 2526 unsigned int raw_len = len + 4;
2525 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT; 2527 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2526 2528
2527 bnx2_reuse_rx_skb_pages(bp, NULL, pages); 2529 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2528 } 2530 }
2529 return err; 2531 return err;
2530 } 2532 }
@@ -2539,8 +2541,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
2539 } else { 2541 } else {
2540 unsigned int i, frag_len, frag_size, pages; 2542 unsigned int i, frag_len, frag_size, pages;
2541 struct sw_pg *rx_pg; 2543 struct sw_pg *rx_pg;
2542 u16 pg_cons = bp->rx_pg_cons; 2544 u16 pg_cons = bnapi->rx_pg_cons;
2543 u16 pg_prod = bp->rx_pg_prod; 2545 u16 pg_prod = bnapi->rx_pg_prod;
2544 2546
2545 frag_size = len + 4 - hdr_len; 2547 frag_size = len + 4 - hdr_len;
2546 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT; 2548 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
@@ -2551,9 +2553,10 @@ bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
2551 if (unlikely(frag_len <= 4)) { 2553 if (unlikely(frag_len <= 4)) {
2552 unsigned int tail = 4 - frag_len; 2554 unsigned int tail = 4 - frag_len;
2553 2555
2554 bp->rx_pg_cons = pg_cons; 2556 bnapi->rx_pg_cons = pg_cons;
2555 bp->rx_pg_prod = pg_prod; 2557 bnapi->rx_pg_prod = pg_prod;
2556 bnx2_reuse_rx_skb_pages(bp, NULL, pages - i); 2558 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2559 pages - i);
2557 skb->len -= tail; 2560 skb->len -= tail;
2558 if (i == 0) { 2561 if (i == 0) {
2559 skb->tail -= tail; 2562 skb->tail -= tail;
@@ -2579,9 +2582,10 @@ bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
2579 2582
2580 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod)); 2583 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2581 if (unlikely(err)) { 2584 if (unlikely(err)) {
2582 bp->rx_pg_cons = pg_cons; 2585 bnapi->rx_pg_cons = pg_cons;
2583 bp->rx_pg_prod = pg_prod; 2586 bnapi->rx_pg_prod = pg_prod;
2584 bnx2_reuse_rx_skb_pages(bp, skb, pages - i); 2587 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2588 pages - i);
2585 return err; 2589 return err;
2586 } 2590 }
2587 2591
@@ -2593,8 +2597,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
2593 pg_prod = NEXT_RX_BD(pg_prod); 2597 pg_prod = NEXT_RX_BD(pg_prod);
2594 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons)); 2598 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2595 } 2599 }
2596 bp->rx_pg_prod = pg_prod; 2600 bnapi->rx_pg_prod = pg_prod;
2597 bp->rx_pg_cons = pg_cons; 2601 bnapi->rx_pg_cons = pg_cons;
2598 } 2602 }
2599 return 0; 2603 return 0;
2600} 2604}
@@ -2617,8 +2621,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2617 int rx_pkt = 0, pg_ring_used = 0; 2621 int rx_pkt = 0, pg_ring_used = 0;
2618 2622
2619 hw_cons = bnx2_get_hw_rx_cons(bnapi); 2623 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2620 sw_cons = bp->rx_cons; 2624 sw_cons = bnapi->rx_cons;
2621 sw_prod = bp->rx_prod; 2625 sw_prod = bnapi->rx_prod;
2622 2626
2623 /* Memory barrier necessary as speculative reads of the rx 2627 /* Memory barrier necessary as speculative reads of the rx
2624 * buffer can be ahead of the index in the status block 2628 * buffer can be ahead of the index in the status block
@@ -2654,7 +2658,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2654 L2_FHDR_ERRORS_TOO_SHORT | 2658 L2_FHDR_ERRORS_TOO_SHORT |
2655 L2_FHDR_ERRORS_GIANT_FRAME)) { 2659 L2_FHDR_ERRORS_GIANT_FRAME)) {
2656 2660
2657 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod); 2661 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2662 sw_ring_prod);
2658 goto next_rx; 2663 goto next_rx;
2659 } 2664 }
2660 hdr_len = 0; 2665 hdr_len = 0;
@@ -2673,7 +2678,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2673 2678
2674 new_skb = netdev_alloc_skb(bp->dev, len + 2); 2679 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2675 if (new_skb == NULL) { 2680 if (new_skb == NULL) {
2676 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, 2681 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2677 sw_ring_prod); 2682 sw_ring_prod);
2678 goto next_rx; 2683 goto next_rx;
2679 } 2684 }
@@ -2684,12 +2689,12 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2684 skb_reserve(new_skb, 2); 2689 skb_reserve(new_skb, 2);
2685 skb_put(new_skb, len); 2690 skb_put(new_skb, len);
2686 2691
2687 bnx2_reuse_rx_skb(bp, skb, 2692 bnx2_reuse_rx_skb(bp, bnapi, skb,
2688 sw_ring_cons, sw_ring_prod); 2693 sw_ring_cons, sw_ring_prod);
2689 2694
2690 skb = new_skb; 2695 skb = new_skb;
2691 } else if (unlikely(bnx2_rx_skb(bp, skb, len, hdr_len, dma_addr, 2696 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2692 (sw_ring_cons << 16) | sw_ring_prod))) 2697 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2693 goto next_rx; 2698 goto next_rx;
2694 2699
2695 skb->protocol = eth_type_trans(skb, bp->dev); 2700 skb->protocol = eth_type_trans(skb, bp->dev);
@@ -2737,16 +2742,16 @@ next_rx:
2737 rmb(); 2742 rmb();
2738 } 2743 }
2739 } 2744 }
2740 bp->rx_cons = sw_cons; 2745 bnapi->rx_cons = sw_cons;
2741 bp->rx_prod = sw_prod; 2746 bnapi->rx_prod = sw_prod;
2742 2747
2743 if (pg_ring_used) 2748 if (pg_ring_used)
2744 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, 2749 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2745 bp->rx_pg_prod); 2750 bnapi->rx_pg_prod);
2746 2751
2747 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod); 2752 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2748 2753
2749 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq); 2754 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2750 2755
2751 mmiowb(); 2756 mmiowb();
2752 2757
@@ -2845,7 +2850,7 @@ bnx2_has_work(struct bnx2_napi *bnapi)
2845 struct bnx2 *bp = bnapi->bp; 2850 struct bnx2 *bp = bnapi->bp;
2846 struct status_block *sblk = bp->status_blk; 2851 struct status_block *sblk = bp->status_blk;
2847 2852
2848 if ((bnx2_get_hw_rx_cons(bnapi) != bp->rx_cons) || 2853 if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
2849 (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)) 2854 (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
2850 return 1; 2855 return 1;
2851 2856
@@ -2879,7 +2884,7 @@ static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
2879 if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons) 2884 if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
2880 bnx2_tx_int(bp, bnapi); 2885 bnx2_tx_int(bp, bnapi);
2881 2886
2882 if (bnx2_get_hw_rx_cons(bnapi) != bp->rx_cons) 2887 if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
2883 work_done += bnx2_rx_int(bp, bnapi, budget - work_done); 2888 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
2884 2889
2885 return work_done; 2890 return work_done;
@@ -4432,12 +4437,13 @@ bnx2_init_rx_ring(struct bnx2 *bp)
4432 int i; 4437 int i;
4433 u16 prod, ring_prod; 4438 u16 prod, ring_prod;
4434 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID); 4439 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4440 struct bnx2_napi *bnapi = &bp->bnx2_napi;
4435 4441
4436 bp->rx_prod = 0; 4442 bnapi->rx_prod = 0;
4437 bp->rx_cons = 0; 4443 bnapi->rx_cons = 0;
4438 bp->rx_prod_bseq = 0; 4444 bnapi->rx_prod_bseq = 0;
4439 bp->rx_pg_prod = 0; 4445 bnapi->rx_pg_prod = 0;
4440 bp->rx_pg_cons = 0; 4446 bnapi->rx_pg_cons = 0;
4441 4447
4442 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping, 4448 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4443 bp->rx_buf_use_size, bp->rx_max_ring); 4449 bp->rx_buf_use_size, bp->rx_max_ring);
@@ -4473,29 +4479,30 @@ bnx2_init_rx_ring(struct bnx2 *bp)
4473 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff; 4479 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4474 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); 4480 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4475 4481
4476 ring_prod = prod = bp->rx_pg_prod; 4482 ring_prod = prod = bnapi->rx_pg_prod;
4477 for (i = 0; i < bp->rx_pg_ring_size; i++) { 4483 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4478 if (bnx2_alloc_rx_page(bp, ring_prod) < 0) 4484 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4479 break; 4485 break;
4480 prod = NEXT_RX_BD(prod); 4486 prod = NEXT_RX_BD(prod);
4481 ring_prod = RX_PG_RING_IDX(prod); 4487 ring_prod = RX_PG_RING_IDX(prod);
4482 } 4488 }
4483 bp->rx_pg_prod = prod; 4489 bnapi->rx_pg_prod = prod;
4484 4490
4485 ring_prod = prod = bp->rx_prod; 4491 ring_prod = prod = bnapi->rx_prod;
4486 for (i = 0; i < bp->rx_ring_size; i++) { 4492 for (i = 0; i < bp->rx_ring_size; i++) {
4487 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) { 4493 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
4488 break; 4494 break;
4489 } 4495 }
4490 prod = NEXT_RX_BD(prod); 4496 prod = NEXT_RX_BD(prod);
4491 ring_prod = RX_RING_IDX(prod); 4497 ring_prod = RX_RING_IDX(prod);
4492 } 4498 }
4493 bp->rx_prod = prod; 4499 bnapi->rx_prod = prod;
4494 4500
4495 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, bp->rx_pg_prod); 4501 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4502 bnapi->rx_pg_prod);
4496 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod); 4503 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4497 4504
4498 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq); 4505 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4499} 4506}
4500 4507
4501static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size) 4508static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)