diff options
author | Michael Chan <mchan@broadcom.com> | 2007-12-12 14:19:57 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-28 17:57:32 -0500 |
commit | 84eaa1877137def7fe01340f2abbad510aa890f5 (patch) | |
tree | ed964fded82e861d4566dbaa16594625f9bce77c /drivers/net/bnx2.c | |
parent | 1db82f2aec0766edd4a4f8d86283e91559350de7 (diff) |
[BNX2]: Enable S/G for jumbo RX.
If the MTU requires more than 1 page for the SKB, enable the page ring
and calculate the size of the page ring. This will guarantee order-0
allocation regardless of the MTU size.
Fixup loopback test packet size so that we don't deal with the pages
during loopback test.
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2.c')
-rw-r--r-- | drivers/net/bnx2.c | 21 |
1 files changed, 19 insertions, 2 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 6c0fc8a99c67..ae081c8dd45f 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -4493,15 +4493,32 @@ static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size) | |||
4493 | static void | 4493 | static void |
4494 | bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) | 4494 | bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) |
4495 | { | 4495 | { |
4496 | u32 rx_size; | 4496 | u32 rx_size, rx_space, jumbo_size; |
4497 | 4497 | ||
4498 | /* 8 for CRC and VLAN */ | 4498 | /* 8 for CRC and VLAN */ |
4499 | rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8; | 4499 | rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8; |
4500 | 4500 | ||
4501 | rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD + | ||
4502 | sizeof(struct skb_shared_info); | ||
4503 | |||
4501 | bp->rx_copy_thresh = RX_COPY_THRESH; | 4504 | bp->rx_copy_thresh = RX_COPY_THRESH; |
4502 | bp->rx_pg_ring_size = 0; | 4505 | bp->rx_pg_ring_size = 0; |
4503 | bp->rx_max_pg_ring = 0; | 4506 | bp->rx_max_pg_ring = 0; |
4504 | bp->rx_max_pg_ring_idx = 0; | 4507 | bp->rx_max_pg_ring_idx = 0; |
4508 | if (rx_space > PAGE_SIZE) { | ||
4509 | int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; | ||
4510 | |||
4511 | jumbo_size = size * pages; | ||
4512 | if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT) | ||
4513 | jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT; | ||
4514 | |||
4515 | bp->rx_pg_ring_size = jumbo_size; | ||
4516 | bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size, | ||
4517 | MAX_RX_PG_RINGS); | ||
4518 | bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1; | ||
4519 | rx_size = RX_COPY_THRESH + bp->rx_offset; | ||
4520 | bp->rx_copy_thresh = 0; | ||
4521 | } | ||
4505 | 4522 | ||
4506 | bp->rx_buf_use_size = rx_size; | 4523 | bp->rx_buf_use_size = rx_size; |
4507 | /* hw alignment */ | 4524 | /* hw alignment */ |
@@ -4881,7 +4898,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) | |||
4881 | else | 4898 | else |
4882 | return -EINVAL; | 4899 | return -EINVAL; |
4883 | 4900 | ||
4884 | pkt_size = 1514; | 4901 | pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4); |
4885 | skb = netdev_alloc_skb(bp->dev, pkt_size); | 4902 | skb = netdev_alloc_skb(bp->dev, pkt_size); |
4886 | if (!skb) | 4903 | if (!skb) |
4887 | return -ENOMEM; | 4904 | return -ENOMEM; |