aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMichael Chan <mchan@broadcom.com>2006-03-20 20:49:20 -0500
committerDavid S. Miller <davem@davemloft.net>2006-03-20 20:49:20 -0500
commit13daffa2f2ba65674e7816a0e95e7b93246cb686 (patch)
tree3170a19dfba530398d4f12e54a21400254fa98b1 /drivers
parent236b6394bb49ea58465c6f935a286d2342576f8d (diff)
[BNX2]: Support larger rx ring sizes (part 1)
Increase maximum receive ring size from 255 to 1020 by supporting up to 4 linked pages of receive descriptors. To accomodate the higher memory usage, each physical descriptor page is allocated separately and the software ring that keeps track of the SKBs and the DMA addresses is allocated using vmalloc. Some of the receive-related fields in the bp structure are re- organized a bit for better locality of reference. The max. was reduced to 1020 from 4080 after discussion with David Miller. This patch contains ring init code changes only. This next patch contains rx data path code changes. Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/bnx2.c117
-rw-r--r--drivers/net/bnx2.h36
2 files changed, 107 insertions, 46 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 0d592f7c3a99..03c47cf04e90 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -360,6 +360,8 @@ bnx2_netif_start(struct bnx2 *bp)
360static void 360static void
361bnx2_free_mem(struct bnx2 *bp) 361bnx2_free_mem(struct bnx2 *bp)
362{ 362{
363 int i;
364
363 if (bp->stats_blk) { 365 if (bp->stats_blk) {
364 pci_free_consistent(bp->pdev, sizeof(struct statistics_block), 366 pci_free_consistent(bp->pdev, sizeof(struct statistics_block),
365 bp->stats_blk, bp->stats_blk_mapping); 367 bp->stats_blk, bp->stats_blk_mapping);
@@ -378,19 +380,23 @@ bnx2_free_mem(struct bnx2 *bp)
378 } 380 }
379 kfree(bp->tx_buf_ring); 381 kfree(bp->tx_buf_ring);
380 bp->tx_buf_ring = NULL; 382 bp->tx_buf_ring = NULL;
381 if (bp->rx_desc_ring) { 383 for (i = 0; i < bp->rx_max_ring; i++) {
382 pci_free_consistent(bp->pdev, 384 if (bp->rx_desc_ring[i])
383 sizeof(struct rx_bd) * RX_DESC_CNT, 385 pci_free_consistent(bp->pdev,
384 bp->rx_desc_ring, bp->rx_desc_mapping); 386 sizeof(struct rx_bd) * RX_DESC_CNT,
385 bp->rx_desc_ring = NULL; 387 bp->rx_desc_ring[i],
386 } 388 bp->rx_desc_mapping[i]);
387 kfree(bp->rx_buf_ring); 389 bp->rx_desc_ring[i] = NULL;
390 }
391 vfree(bp->rx_buf_ring);
388 bp->rx_buf_ring = NULL; 392 bp->rx_buf_ring = NULL;
389} 393}
390 394
391static int 395static int
392bnx2_alloc_mem(struct bnx2 *bp) 396bnx2_alloc_mem(struct bnx2 *bp)
393{ 397{
398 int i;
399
394 bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT, 400 bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
395 GFP_KERNEL); 401 GFP_KERNEL);
396 if (bp->tx_buf_ring == NULL) 402 if (bp->tx_buf_ring == NULL)
@@ -404,18 +410,23 @@ bnx2_alloc_mem(struct bnx2 *bp)
404 if (bp->tx_desc_ring == NULL) 410 if (bp->tx_desc_ring == NULL)
405 goto alloc_mem_err; 411 goto alloc_mem_err;
406 412
407 bp->rx_buf_ring = kmalloc(sizeof(struct sw_bd) * RX_DESC_CNT, 413 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
408 GFP_KERNEL); 414 bp->rx_max_ring);
409 if (bp->rx_buf_ring == NULL) 415 if (bp->rx_buf_ring == NULL)
410 goto alloc_mem_err; 416 goto alloc_mem_err;
411 417
412 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT); 418 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
413 bp->rx_desc_ring = pci_alloc_consistent(bp->pdev, 419 bp->rx_max_ring);
414 sizeof(struct rx_bd) * 420
415 RX_DESC_CNT, 421 for (i = 0; i < bp->rx_max_ring; i++) {
416 &bp->rx_desc_mapping); 422 bp->rx_desc_ring[i] =
417 if (bp->rx_desc_ring == NULL) 423 pci_alloc_consistent(bp->pdev,
418 goto alloc_mem_err; 424 sizeof(struct rx_bd) * RX_DESC_CNT,
425 &bp->rx_desc_mapping[i]);
426 if (bp->rx_desc_ring[i] == NULL)
427 goto alloc_mem_err;
428
429 }
419 430
420 bp->status_blk = pci_alloc_consistent(bp->pdev, 431 bp->status_blk = pci_alloc_consistent(bp->pdev,
421 sizeof(struct status_block), 432 sizeof(struct status_block),
@@ -1520,7 +1531,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1520 struct sk_buff *skb; 1531 struct sk_buff *skb;
1521 struct sw_bd *rx_buf = &bp->rx_buf_ring[index]; 1532 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1522 dma_addr_t mapping; 1533 dma_addr_t mapping;
1523 struct rx_bd *rxbd = &bp->rx_desc_ring[index]; 1534 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1524 unsigned long align; 1535 unsigned long align;
1525 1536
1526 skb = dev_alloc_skb(bp->rx_buf_size); 1537 skb = dev_alloc_skb(bp->rx_buf_size);
@@ -3349,24 +3360,32 @@ bnx2_init_rx_ring(struct bnx2 *bp)
3349 bp->hw_rx_cons = 0; 3360 bp->hw_rx_cons = 0;
3350 bp->rx_prod_bseq = 0; 3361 bp->rx_prod_bseq = 0;
3351 3362
3352 rxbd = &bp->rx_desc_ring[0]; 3363 for (i = 0; i < bp->rx_max_ring; i++) {
3353 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { 3364 int j;
3354 rxbd->rx_bd_len = bp->rx_buf_use_size;
3355 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3356 }
3357 3365
3358 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping >> 32; 3366 rxbd = &bp->rx_desc_ring[i][0];
3359 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping & 0xffffffff; 3367 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3368 rxbd->rx_bd_len = bp->rx_buf_use_size;
3369 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3370 }
3371 if (i == (bp->rx_max_ring - 1))
3372 j = 0;
3373 else
3374 j = i + 1;
3375 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3376 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3377 0xffffffff;
3378 }
3360 3379
3361 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE; 3380 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3362 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2; 3381 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3363 val |= 0x02 << 8; 3382 val |= 0x02 << 8;
3364 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val); 3383 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3365 3384
3366 val = (u64) bp->rx_desc_mapping >> 32; 3385 val = (u64) bp->rx_desc_mapping[0] >> 32;
3367 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val); 3386 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3368 3387
3369 val = (u64) bp->rx_desc_mapping & 0xffffffff; 3388 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3370 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val); 3389 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3371 3390
3372 for (i = 0; i < bp->rx_ring_size; i++) { 3391 for (i = 0; i < bp->rx_ring_size; i++) {
@@ -3384,6 +3403,29 @@ bnx2_init_rx_ring(struct bnx2 *bp)
3384} 3403}
3385 3404
3386static void 3405static void
3406bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3407{
3408 u32 num_rings, max;
3409
3410 bp->rx_ring_size = size;
3411 num_rings = 1;
3412 while (size > MAX_RX_DESC_CNT) {
3413 size -= MAX_RX_DESC_CNT;
3414 num_rings++;
3415 }
3416 /* round to next power of 2 */
3417 max = MAX_RX_RINGS;
3418 while ((max & num_rings) == 0)
3419 max >>= 1;
3420
3421 if (num_rings != max)
3422 max <<= 1;
3423
3424 bp->rx_max_ring = max;
3425 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3426}
3427
3428static void
3387bnx2_free_tx_skbs(struct bnx2 *bp) 3429bnx2_free_tx_skbs(struct bnx2 *bp)
3388{ 3430{
3389 int i; 3431 int i;
@@ -3428,7 +3470,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
3428 if (bp->rx_buf_ring == NULL) 3470 if (bp->rx_buf_ring == NULL)
3429 return; 3471 return;
3430 3472
3431 for (i = 0; i < RX_DESC_CNT; i++) { 3473 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3432 struct sw_bd *rx_buf = &bp->rx_buf_ring[i]; 3474 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3433 struct sk_buff *skb = rx_buf->skb; 3475 struct sk_buff *skb = rx_buf->skb;
3434 3476
@@ -4792,7 +4834,7 @@ bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4792{ 4834{
4793 struct bnx2 *bp = netdev_priv(dev); 4835 struct bnx2 *bp = netdev_priv(dev);
4794 4836
4795 ering->rx_max_pending = MAX_RX_DESC_CNT; 4837 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
4796 ering->rx_mini_max_pending = 0; 4838 ering->rx_mini_max_pending = 0;
4797 ering->rx_jumbo_max_pending = 0; 4839 ering->rx_jumbo_max_pending = 0;
4798 4840
@@ -4809,17 +4851,28 @@ bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4809{ 4851{
4810 struct bnx2 *bp = netdev_priv(dev); 4852 struct bnx2 *bp = netdev_priv(dev);
4811 4853
4812 if ((ering->rx_pending > MAX_RX_DESC_CNT) || 4854 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
4813 (ering->tx_pending > MAX_TX_DESC_CNT) || 4855 (ering->tx_pending > MAX_TX_DESC_CNT) ||
4814 (ering->tx_pending <= MAX_SKB_FRAGS)) { 4856 (ering->tx_pending <= MAX_SKB_FRAGS)) {
4815 4857
4816 return -EINVAL; 4858 return -EINVAL;
4817 } 4859 }
4818 bp->rx_ring_size = ering->rx_pending; 4860 if (netif_running(bp->dev)) {
4861 bnx2_netif_stop(bp);
4862 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
4863 bnx2_free_skbs(bp);
4864 bnx2_free_mem(bp);
4865 }
4866
4867 bnx2_set_rx_ring_size(bp, ering->rx_pending);
4819 bp->tx_ring_size = ering->tx_pending; 4868 bp->tx_ring_size = ering->tx_pending;
4820 4869
4821 if (netif_running(bp->dev)) { 4870 if (netif_running(bp->dev)) {
4822 bnx2_netif_stop(bp); 4871 int rc;
4872
4873 rc = bnx2_alloc_mem(bp);
4874 if (rc)
4875 return rc;
4823 bnx2_init_nic(bp); 4876 bnx2_init_nic(bp);
4824 bnx2_netif_start(bp); 4877 bnx2_netif_start(bp);
4825 } 4878 }
@@ -5493,7 +5546,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5493 bp->mac_addr[5] = (u8) reg; 5546 bp->mac_addr[5] = (u8) reg;
5494 5547
5495 bp->tx_ring_size = MAX_TX_DESC_CNT; 5548 bp->tx_ring_size = MAX_TX_DESC_CNT;
5496 bp->rx_ring_size = 100; 5549 bnx2_set_rx_ring_size(bp, 100);
5497 5550
5498 bp->rx_csum = 1; 5551 bp->rx_csum = 1;
5499 5552
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 9f691cbd666b..beb2e8bcc659 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -3792,8 +3792,10 @@ struct l2_fhdr {
3792#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct tx_bd)) 3792#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct tx_bd))
3793#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) 3793#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
3794 3794
3795#define MAX_RX_RINGS 4
3795#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct rx_bd)) 3796#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct rx_bd))
3796#define MAX_RX_DESC_CNT (RX_DESC_CNT - 1) 3797#define MAX_RX_DESC_CNT (RX_DESC_CNT - 1)
3798#define MAX_TOTAL_RX_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_RINGS)
3797 3799
3798#define NEXT_TX_BD(x) (((x) & (MAX_TX_DESC_CNT - 1)) == \ 3800#define NEXT_TX_BD(x) (((x) & (MAX_TX_DESC_CNT - 1)) == \
3799 (MAX_TX_DESC_CNT - 1)) ? \ 3801 (MAX_TX_DESC_CNT - 1)) ? \
@@ -3805,8 +3807,10 @@ struct l2_fhdr {
3805 (MAX_RX_DESC_CNT - 1)) ? \ 3807 (MAX_RX_DESC_CNT - 1)) ? \
3806 (x) + 2 : (x) + 1 3808 (x) + 2 : (x) + 1
3807 3809
3808#define RX_RING_IDX(x) ((x) & MAX_RX_DESC_CNT) 3810#define RX_RING_IDX(x) ((x) & bp->rx_max_ring_idx)
3809 3811
3812#define RX_RING(x) (((x) & ~MAX_RX_DESC_CNT) >> 8)
3813#define RX_IDX(x) ((x) & MAX_RX_DESC_CNT)
3810 3814
3811/* Context size. */ 3815/* Context size. */
3812#define CTX_SHIFT 7 3816#define CTX_SHIFT 7
@@ -3903,6 +3907,15 @@ struct bnx2 {
3903 struct status_block *status_blk; 3907 struct status_block *status_blk;
3904 u32 last_status_idx; 3908 u32 last_status_idx;
3905 3909
3910 u32 flags;
3911#define PCIX_FLAG 1
3912#define PCI_32BIT_FLAG 2
3913#define ONE_TDMA_FLAG 4 /* no longer used */
3914#define NO_WOL_FLAG 8
3915#define USING_DAC_FLAG 0x10
3916#define USING_MSI_FLAG 0x20
3917#define ASF_ENABLE_FLAG 0x40
3918
3906 struct tx_bd *tx_desc_ring; 3919 struct tx_bd *tx_desc_ring;
3907 struct sw_bd *tx_buf_ring; 3920 struct sw_bd *tx_buf_ring;
3908 u32 tx_prod_bseq; 3921 u32 tx_prod_bseq;
@@ -3920,19 +3933,22 @@ struct bnx2 {
3920 u32 rx_offset; 3933 u32 rx_offset;
3921 u32 rx_buf_use_size; /* useable size */ 3934 u32 rx_buf_use_size; /* useable size */
3922 u32 rx_buf_size; /* with alignment */ 3935 u32 rx_buf_size; /* with alignment */
3923 struct rx_bd *rx_desc_ring; 3936 u32 rx_max_ring_idx;
3924 struct sw_bd *rx_buf_ring; 3937
3925 u32 rx_prod_bseq; 3938 u32 rx_prod_bseq;
3926 u16 rx_prod; 3939 u16 rx_prod;
3927 u16 rx_cons; 3940 u16 rx_cons;
3928 3941
3929 u32 rx_csum; 3942 u32 rx_csum;
3930 3943
3944 struct sw_bd *rx_buf_ring;
3945 struct rx_bd *rx_desc_ring[MAX_RX_RINGS];
3946
3931 /* Only used to synchronize netif_stop_queue/wake_queue when tx */ 3947 /* Only used to synchronize netif_stop_queue/wake_queue when tx */
3932 /* ring is full */ 3948 /* ring is full */
3933 spinlock_t tx_lock; 3949 spinlock_t tx_lock;
3934 3950
3935 /* End of fileds used in the performance code paths. */ 3951 /* End of fields used in the performance code paths. */
3936 3952
3937 char *name; 3953 char *name;
3938 3954
@@ -3945,15 +3961,6 @@ struct bnx2 {
3945 /* Used to synchronize phy accesses. */ 3961 /* Used to synchronize phy accesses. */
3946 spinlock_t phy_lock; 3962 spinlock_t phy_lock;
3947 3963
3948 u32 flags;
3949#define PCIX_FLAG 1
3950#define PCI_32BIT_FLAG 2
3951#define ONE_TDMA_FLAG 4 /* no longer used */
3952#define NO_WOL_FLAG 8
3953#define USING_DAC_FLAG 0x10
3954#define USING_MSI_FLAG 0x20
3955#define ASF_ENABLE_FLAG 0x40
3956
3957 u32 phy_flags; 3964 u32 phy_flags;
3958#define PHY_SERDES_FLAG 1 3965#define PHY_SERDES_FLAG 1
3959#define PHY_CRC_FIX_FLAG 2 3966#define PHY_CRC_FIX_FLAG 2
@@ -4004,8 +4011,9 @@ struct bnx2 {
4004 dma_addr_t tx_desc_mapping; 4011 dma_addr_t tx_desc_mapping;
4005 4012
4006 4013
4014 int rx_max_ring;
4007 int rx_ring_size; 4015 int rx_ring_size;
4008 dma_addr_t rx_desc_mapping; 4016 dma_addr_t rx_desc_mapping[MAX_RX_RINGS];
4009 4017
4010 u16 tx_quick_cons_trip; 4018 u16 tx_quick_cons_trip;
4011 u16 tx_quick_cons_trip_int; 4019 u16 tx_quick_cons_trip_int;