aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Chan <mchan@broadcom.com>2007-12-12 14:17:01 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 17:57:28 -0500
commit85833c6269016d009ada17b04ac288e2ab9c37ea (patch)
treed102f33d3cb4c07506cc3429f124dcef063e5930
parente343d55c0a624c5bb88cd6821a17586474f20271 (diff)
[BNX2]: Restructure RX fast path handling.
Add a new function to handle new SKB allocation and to prepare the completed SKB. This makes it easier to add support for non-linear SKB. Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/bnx2.c46
1 files changed, 30 insertions, 16 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index dfe50c286d95..14119fb5964d 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -2379,6 +2379,27 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2379 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; 2379 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2380} 2380}
2381 2381
2382static int
2383bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
2384 dma_addr_t dma_addr, u32 ring_idx)
2385{
2386 int err;
2387 u16 prod = ring_idx & 0xffff;
2388
2389 err = bnx2_alloc_rx_skb(bp, prod);
2390 if (unlikely(err)) {
2391 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
2392 return err;
2393 }
2394
2395 skb_reserve(skb, bp->rx_offset);
2396 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2397 PCI_DMA_FROMDEVICE);
2398
2399 skb_put(skb, len);
2400 return 0;
2401}
2402
2382static inline u16 2403static inline u16
2383bnx2_get_hw_rx_cons(struct bnx2 *bp) 2404bnx2_get_hw_rx_cons(struct bnx2 *bp)
2384{ 2405{
@@ -2434,7 +2455,8 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
2434 L2_FHDR_ERRORS_TOO_SHORT | 2455 L2_FHDR_ERRORS_TOO_SHORT |
2435 L2_FHDR_ERRORS_GIANT_FRAME)) { 2456 L2_FHDR_ERRORS_GIANT_FRAME)) {
2436 2457
2437 goto reuse_rx; 2458 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
2459 goto next_rx;
2438 } 2460 }
2439 2461
2440 /* Since we don't have a jumbo ring, copy small packets 2462 /* Since we don't have a jumbo ring, copy small packets
@@ -2444,8 +2466,11 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
2444 struct sk_buff *new_skb; 2466 struct sk_buff *new_skb;
2445 2467
2446 new_skb = netdev_alloc_skb(bp->dev, len + 2); 2468 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2447 if (new_skb == NULL) 2469 if (new_skb == NULL) {
2448 goto reuse_rx; 2470 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
2471 sw_ring_prod);
2472 goto next_rx;
2473 }
2449 2474
2450 /* aligned copy */ 2475 /* aligned copy */
2451 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2, 2476 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
@@ -2457,20 +2482,9 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
2457 sw_ring_cons, sw_ring_prod); 2482 sw_ring_cons, sw_ring_prod);
2458 2483
2459 skb = new_skb; 2484 skb = new_skb;
2460 } 2485 } else if (unlikely(bnx2_rx_skb(bp, skb, len, dma_addr,
2461 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) { 2486 (sw_ring_cons << 16) | sw_ring_prod)))
2462 pci_unmap_single(bp->pdev, dma_addr,
2463 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2464
2465 skb_reserve(skb, bp->rx_offset);
2466 skb_put(skb, len);
2467 }
2468 else {
2469reuse_rx:
2470 bnx2_reuse_rx_skb(bp, skb,
2471 sw_ring_cons, sw_ring_prod);
2472 goto next_rx; 2487 goto next_rx;
2473 }
2474 2488
2475 skb->protocol = eth_type_trans(skb, bp->dev); 2489 skb->protocol = eth_type_trans(skb, bp->dev);
2476 2490