diff options
author | Michael Chan <mchan@broadcom.com> | 2007-12-12 14:19:35 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-28 17:57:31 -0500 |
commit | 1db82f2aec0766edd4a4f8d86283e91559350de7 (patch) | |
tree | 7f356cec78c596ca2c6143c5c2fdbfeed13fe802 /drivers/net/bnx2.c | |
parent | 47bf4246a357d36762c9e7c282d7307152eb92e1 (diff) |
[BNX2]: Add fast path code to handle RX pages.
Add function to reuse a page in case of allocation or other errors.
Add code to construct the completed SKB with the additional data in
the pages.
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2.c')
-rw-r--r-- | drivers/net/bnx2.c | 144 |
1 files changed, 138 insertions, 6 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 38e8e31cabf3..6c0fc8a99c67 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -2411,6 +2411,55 @@ bnx2_tx_int(struct bnx2 *bp) | |||
2411 | } | 2411 | } |
2412 | } | 2412 | } |
2413 | 2413 | ||
2414 | static void | ||
2415 | bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct sk_buff *skb, int count) | ||
2416 | { | ||
2417 | struct sw_pg *cons_rx_pg, *prod_rx_pg; | ||
2418 | struct rx_bd *cons_bd, *prod_bd; | ||
2419 | dma_addr_t mapping; | ||
2420 | int i; | ||
2421 | u16 hw_prod = bp->rx_pg_prod, prod; | ||
2422 | u16 cons = bp->rx_pg_cons; | ||
2423 | |||
2424 | for (i = 0; i < count; i++) { | ||
2425 | prod = RX_PG_RING_IDX(hw_prod); | ||
2426 | |||
2427 | prod_rx_pg = &bp->rx_pg_ring[prod]; | ||
2428 | cons_rx_pg = &bp->rx_pg_ring[cons]; | ||
2429 | cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)]; | ||
2430 | prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; | ||
2431 | |||
2432 | if (i == 0 && skb) { | ||
2433 | struct page *page; | ||
2434 | struct skb_shared_info *shinfo; | ||
2435 | |||
2436 | shinfo = skb_shinfo(skb); | ||
2437 | shinfo->nr_frags--; | ||
2438 | page = shinfo->frags[shinfo->nr_frags].page; | ||
2439 | shinfo->frags[shinfo->nr_frags].page = NULL; | ||
2440 | mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE, | ||
2441 | PCI_DMA_FROMDEVICE); | ||
2442 | cons_rx_pg->page = page; | ||
2443 | pci_unmap_addr_set(cons_rx_pg, mapping, mapping); | ||
2444 | dev_kfree_skb(skb); | ||
2445 | } | ||
2446 | if (prod != cons) { | ||
2447 | prod_rx_pg->page = cons_rx_pg->page; | ||
2448 | cons_rx_pg->page = NULL; | ||
2449 | pci_unmap_addr_set(prod_rx_pg, mapping, | ||
2450 | pci_unmap_addr(cons_rx_pg, mapping)); | ||
2451 | |||
2452 | prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; | ||
2453 | prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; | ||
2454 | |||
2455 | } | ||
2456 | cons = RX_PG_RING_IDX(NEXT_RX_BD(cons)); | ||
2457 | hw_prod = NEXT_RX_BD(hw_prod); | ||
2458 | } | ||
2459 | bp->rx_pg_prod = hw_prod; | ||
2460 | bp->rx_pg_cons = cons; | ||
2461 | } | ||
2462 | |||
2414 | static inline void | 2463 | static inline void |
2415 | bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb, | 2464 | bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb, |
2416 | u16 cons, u16 prod) | 2465 | u16 cons, u16 prod) |
@@ -2443,7 +2492,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb, | |||
2443 | 2492 | ||
2444 | static int | 2493 | static int |
2445 | bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len, | 2494 | bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len, |
2446 | dma_addr_t dma_addr, u32 ring_idx) | 2495 | unsigned int hdr_len, dma_addr_t dma_addr, u32 ring_idx) |
2447 | { | 2496 | { |
2448 | int err; | 2497 | int err; |
2449 | u16 prod = ring_idx & 0xffff; | 2498 | u16 prod = ring_idx & 0xffff; |
@@ -2451,6 +2500,12 @@ bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len, | |||
2451 | err = bnx2_alloc_rx_skb(bp, prod); | 2500 | err = bnx2_alloc_rx_skb(bp, prod); |
2452 | if (unlikely(err)) { | 2501 | if (unlikely(err)) { |
2453 | bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod); | 2502 | bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod); |
2503 | if (hdr_len) { | ||
2504 | unsigned int raw_len = len + 4; | ||
2505 | int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT; | ||
2506 | |||
2507 | bnx2_reuse_rx_skb_pages(bp, NULL, pages); | ||
2508 | } | ||
2454 | return err; | 2509 | return err; |
2455 | } | 2510 | } |
2456 | 2511 | ||
@@ -2458,7 +2513,69 @@ bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len, | |||
2458 | pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size, | 2513 | pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size, |
2459 | PCI_DMA_FROMDEVICE); | 2514 | PCI_DMA_FROMDEVICE); |
2460 | 2515 | ||
2461 | skb_put(skb, len); | 2516 | if (hdr_len == 0) { |
2517 | skb_put(skb, len); | ||
2518 | return 0; | ||
2519 | } else { | ||
2520 | unsigned int i, frag_len, frag_size, pages; | ||
2521 | struct sw_pg *rx_pg; | ||
2522 | u16 pg_cons = bp->rx_pg_cons; | ||
2523 | u16 pg_prod = bp->rx_pg_prod; | ||
2524 | |||
2525 | frag_size = len + 4 - hdr_len; | ||
2526 | pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT; | ||
2527 | skb_put(skb, hdr_len); | ||
2528 | |||
2529 | for (i = 0; i < pages; i++) { | ||
2530 | frag_len = min(frag_size, (unsigned int) PAGE_SIZE); | ||
2531 | if (unlikely(frag_len <= 4)) { | ||
2532 | unsigned int tail = 4 - frag_len; | ||
2533 | |||
2534 | bp->rx_pg_cons = pg_cons; | ||
2535 | bp->rx_pg_prod = pg_prod; | ||
2536 | bnx2_reuse_rx_skb_pages(bp, NULL, pages - i); | ||
2537 | skb->len -= tail; | ||
2538 | if (i == 0) { | ||
2539 | skb->tail -= tail; | ||
2540 | } else { | ||
2541 | skb_frag_t *frag = | ||
2542 | &skb_shinfo(skb)->frags[i - 1]; | ||
2543 | frag->size -= tail; | ||
2544 | skb->data_len -= tail; | ||
2545 | skb->truesize -= tail; | ||
2546 | } | ||
2547 | return 0; | ||
2548 | } | ||
2549 | rx_pg = &bp->rx_pg_ring[pg_cons]; | ||
2550 | |||
2551 | pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), | ||
2552 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | ||
2553 | |||
2554 | if (i == pages - 1) | ||
2555 | frag_len -= 4; | ||
2556 | |||
2557 | skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len); | ||
2558 | rx_pg->page = NULL; | ||
2559 | |||
2560 | err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod)); | ||
2561 | if (unlikely(err)) { | ||
2562 | bp->rx_pg_cons = pg_cons; | ||
2563 | bp->rx_pg_prod = pg_prod; | ||
2564 | bnx2_reuse_rx_skb_pages(bp, skb, pages - i); | ||
2565 | return err; | ||
2566 | } | ||
2567 | |||
2568 | frag_size -= frag_len; | ||
2569 | skb->data_len += frag_len; | ||
2570 | skb->truesize += frag_len; | ||
2571 | skb->len += frag_len; | ||
2572 | |||
2573 | pg_prod = NEXT_RX_BD(pg_prod); | ||
2574 | pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons)); | ||
2575 | } | ||
2576 | bp->rx_pg_prod = pg_prod; | ||
2577 | bp->rx_pg_cons = pg_cons; | ||
2578 | } | ||
2462 | return 0; | 2579 | return 0; |
2463 | } | 2580 | } |
2464 | 2581 | ||
@@ -2477,7 +2594,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget) | |||
2477 | { | 2594 | { |
2478 | u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod; | 2595 | u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod; |
2479 | struct l2_fhdr *rx_hdr; | 2596 | struct l2_fhdr *rx_hdr; |
2480 | int rx_pkt = 0; | 2597 | int rx_pkt = 0, pg_ring_used = 0; |
2481 | 2598 | ||
2482 | hw_cons = bnx2_get_hw_rx_cons(bp); | 2599 | hw_cons = bnx2_get_hw_rx_cons(bp); |
2483 | sw_cons = bp->rx_cons; | 2600 | sw_cons = bp->rx_cons; |
@@ -2488,7 +2605,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget) | |||
2488 | */ | 2605 | */ |
2489 | rmb(); | 2606 | rmb(); |
2490 | while (sw_cons != hw_cons) { | 2607 | while (sw_cons != hw_cons) { |
2491 | unsigned int len; | 2608 | unsigned int len, hdr_len; |
2492 | u32 status; | 2609 | u32 status; |
2493 | struct sw_bd *rx_buf; | 2610 | struct sw_bd *rx_buf; |
2494 | struct sk_buff *skb; | 2611 | struct sk_buff *skb; |
@@ -2508,7 +2625,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget) | |||
2508 | bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); | 2625 | bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); |
2509 | 2626 | ||
2510 | rx_hdr = (struct l2_fhdr *) skb->data; | 2627 | rx_hdr = (struct l2_fhdr *) skb->data; |
2511 | len = rx_hdr->l2_fhdr_pkt_len - 4; | 2628 | len = rx_hdr->l2_fhdr_pkt_len; |
2512 | 2629 | ||
2513 | if ((status = rx_hdr->l2_fhdr_status) & | 2630 | if ((status = rx_hdr->l2_fhdr_status) & |
2514 | (L2_FHDR_ERRORS_BAD_CRC | | 2631 | (L2_FHDR_ERRORS_BAD_CRC | |
@@ -2520,6 +2637,16 @@ bnx2_rx_int(struct bnx2 *bp, int budget) | |||
2520 | bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod); | 2637 | bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod); |
2521 | goto next_rx; | 2638 | goto next_rx; |
2522 | } | 2639 | } |
2640 | hdr_len = 0; | ||
2641 | if (status & L2_FHDR_STATUS_SPLIT) { | ||
2642 | hdr_len = rx_hdr->l2_fhdr_ip_xsum; | ||
2643 | pg_ring_used = 1; | ||
2644 | } else if (len > bp->rx_jumbo_thresh) { | ||
2645 | hdr_len = bp->rx_jumbo_thresh; | ||
2646 | pg_ring_used = 1; | ||
2647 | } | ||
2648 | |||
2649 | len -= 4; | ||
2523 | 2650 | ||
2524 | if (len <= bp->rx_copy_thresh) { | 2651 | if (len <= bp->rx_copy_thresh) { |
2525 | struct sk_buff *new_skb; | 2652 | struct sk_buff *new_skb; |
@@ -2541,7 +2668,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget) | |||
2541 | sw_ring_cons, sw_ring_prod); | 2668 | sw_ring_cons, sw_ring_prod); |
2542 | 2669 | ||
2543 | skb = new_skb; | 2670 | skb = new_skb; |
2544 | } else if (unlikely(bnx2_rx_skb(bp, skb, len, dma_addr, | 2671 | } else if (unlikely(bnx2_rx_skb(bp, skb, len, hdr_len, dma_addr, |
2545 | (sw_ring_cons << 16) | sw_ring_prod))) | 2672 | (sw_ring_cons << 16) | sw_ring_prod))) |
2546 | goto next_rx; | 2673 | goto next_rx; |
2547 | 2674 | ||
@@ -2593,6 +2720,10 @@ next_rx: | |||
2593 | bp->rx_cons = sw_cons; | 2720 | bp->rx_cons = sw_cons; |
2594 | bp->rx_prod = sw_prod; | 2721 | bp->rx_prod = sw_prod; |
2595 | 2722 | ||
2723 | if (pg_ring_used) | ||
2724 | REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, | ||
2725 | bp->rx_pg_prod); | ||
2726 | |||
2596 | REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod); | 2727 | REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod); |
2597 | 2728 | ||
2598 | REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq); | 2729 | REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq); |
@@ -4375,6 +4506,7 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) | |||
4375 | bp->rx_buf_use_size = rx_size; | 4506 | bp->rx_buf_use_size = rx_size; |
4376 | /* hw alignment */ | 4507 | /* hw alignment */ |
4377 | bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN; | 4508 | bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN; |
4509 | bp->rx_jumbo_thresh = rx_size - bp->rx_offset; | ||
4378 | bp->rx_ring_size = size; | 4510 | bp->rx_ring_size = size; |
4379 | bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS); | 4511 | bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS); |
4380 | bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1; | 4512 | bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1; |