diff options
author | Eilon Greenstein <eilong@broadcom.com> | 2008-08-13 18:51:07 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-08-13 19:02:34 -0400 |
commit | 326262307bad2391a6393bb1968ed9a9a16fc617 (patch) | |
tree | 932131d9dd6ac1ae66c4fcd4e0b3942e49216ccd /drivers/net/bnx2x_main.c | |
parent | 3fcaf2e566b9cf8ccd16bcda3440717236de163d (diff) |
bnx2x: Memory allocation
Memory allocation
- The CQE ring was allocated to the max size even for a chip that does
not support it. Fixed to allocate according to the chip type to save
memory
- The rx_page_ring was not freed on driver unload
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r-- | drivers/net/bnx2x_main.c | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index c8b61788abb8..b8cdce21c5d9 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -4252,7 +4252,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, | |||
4252 | static void bnx2x_init_rx_rings(struct bnx2x *bp) | 4252 | static void bnx2x_init_rx_rings(struct bnx2x *bp) |
4253 | { | 4253 | { |
4254 | int func = BP_FUNC(bp); | 4254 | int func = BP_FUNC(bp); |
4255 | u16 ring_prod, cqe_ring_prod = 0; | 4255 | int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : |
4256 | ETH_MAX_AGGREGATION_QUEUES_E1H; | ||
4257 | u16 ring_prod, cqe_ring_prod; | ||
4256 | int i, j; | 4258 | int i, j; |
4257 | 4259 | ||
4258 | bp->rx_buf_use_size = bp->dev->mtu; | 4260 | bp->rx_buf_use_size = bp->dev->mtu; |
@@ -4266,9 +4268,9 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
4266 | bp->dev->mtu + ETH_OVREHEAD); | 4268 | bp->dev->mtu + ETH_OVREHEAD); |
4267 | 4269 | ||
4268 | for_each_queue(bp, j) { | 4270 | for_each_queue(bp, j) { |
4269 | for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) { | 4271 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
4270 | struct bnx2x_fastpath *fp = &bp->fp[j]; | ||
4271 | 4272 | ||
4273 | for (i = 0; i < max_agg_queues; i++) { | ||
4272 | fp->tpa_pool[i].skb = | 4274 | fp->tpa_pool[i].skb = |
4273 | netdev_alloc_skb(bp->dev, bp->rx_buf_size); | 4275 | netdev_alloc_skb(bp->dev, bp->rx_buf_size); |
4274 | if (!fp->tpa_pool[i].skb) { | 4276 | if (!fp->tpa_pool[i].skb) { |
@@ -4348,8 +4350,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
4348 | BNX2X_ERR("disabling TPA for queue[%d]\n", j); | 4350 | BNX2X_ERR("disabling TPA for queue[%d]\n", j); |
4349 | /* Cleanup already allocated elements */ | 4351 | /* Cleanup already allocated elements */ |
4350 | bnx2x_free_rx_sge_range(bp, fp, ring_prod); | 4352 | bnx2x_free_rx_sge_range(bp, fp, ring_prod); |
4351 | bnx2x_free_tpa_pool(bp, fp, | 4353 | bnx2x_free_tpa_pool(bp, fp, max_agg_queues); |
4352 | ETH_MAX_AGGREGATION_QUEUES_E1H); | ||
4353 | fp->disable_tpa = 1; | 4354 | fp->disable_tpa = 1; |
4354 | ring_prod = 0; | 4355 | ring_prod = 0; |
4355 | break; | 4356 | break; |
@@ -5772,6 +5773,7 @@ static void bnx2x_free_mem(struct bnx2x *bp) | |||
5772 | NUM_RCQ_BD); | 5773 | NUM_RCQ_BD); |
5773 | 5774 | ||
5774 | /* SGE ring */ | 5775 | /* SGE ring */ |
5776 | BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring)); | ||
5775 | BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring), | 5777 | BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring), |
5776 | bnx2x_fp(bp, i, rx_sge_mapping), | 5778 | bnx2x_fp(bp, i, rx_sge_mapping), |
5777 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); | 5779 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); |
@@ -5949,7 +5951,8 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) | |||
5949 | dev_kfree_skb(skb); | 5951 | dev_kfree_skb(skb); |
5950 | } | 5952 | } |
5951 | if (!fp->disable_tpa) | 5953 | if (!fp->disable_tpa) |
5952 | bnx2x_free_tpa_pool(bp, fp, | 5954 | bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? |
5955 | ETH_MAX_AGGREGATION_QUEUES_E1 : | ||
5953 | ETH_MAX_AGGREGATION_QUEUES_E1H); | 5956 | ETH_MAX_AGGREGATION_QUEUES_E1H); |
5954 | } | 5957 | } |
5955 | } | 5958 | } |