aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorMichal Schmidt <mschmidt@redhat.com>2013-09-05 16:13:09 -0400
committerDavid S. Miller <davem@davemloft.net>2013-09-11 15:43:54 -0400
commit996dedbafe640aee40dc846ad634dd352b6bcd44 (patch)
treebd7b091ebbf1ac628a2640c9cb4f4459bf2f5783 /drivers/net
parentc19d65c95c6d472d69829fea7d473228493d5245 (diff)
bnx2x: avoid atomic allocations during initialization
During initialization bnx2x allocates significant amounts of memory (for rx data, rx SGEs, TPA pool) using atomic allocations. I received a report where bnx2x failed to allocate SGEs and it had to fall back to TPA-less operation. Let's use GFP_KERNEL allocations during initialization, which runs in process context. Add gfp_t parameters to functions that are used both in initialization and in the receive path. Use an unlikely branch in bnx2x_frag_alloc() to avoid atomic allocation by netdev_alloc_frag(). The branch is taken several thousands of times during initialization, but then never more. Note that fp->rx_frag_size is never greater than PAGE_SIZE, so __get_free_page() can be used here. Signed-off-by: Michal Schmidt <mschmidt@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c38
1 files changed, 23 insertions, 15 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 2361bf236ce3..90045c920d09 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -490,10 +490,10 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
490 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs; 490 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
491} 491}
492 492
493static int bnx2x_alloc_rx_sge(struct bnx2x *bp, 493static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
494 struct bnx2x_fastpath *fp, u16 index) 494 u16 index, gfp_t gfp_mask)
495{ 495{
496 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT); 496 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
497 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; 497 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
498 struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; 498 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
499 dma_addr_t mapping; 499 dma_addr_t mapping;
@@ -572,7 +572,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
572 572
573 /* If we fail to allocate a substitute page, we simply stop 573 /* If we fail to allocate a substitute page, we simply stop
574 where we are and drop the whole packet */ 574 where we are and drop the whole packet */
575 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); 575 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
576 if (unlikely(err)) { 576 if (unlikely(err)) {
577 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; 577 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
578 return err; 578 return err;
@@ -616,12 +616,17 @@ static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
616 kfree(data); 616 kfree(data);
617} 617}
618 618
619static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp) 619static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
620{ 620{
621 if (fp->rx_frag_size) 621 if (fp->rx_frag_size) {
622 /* GFP_KERNEL allocations are used only during initialization */
623 if (unlikely(gfp_mask & __GFP_WAIT))
624 return (void *)__get_free_page(gfp_mask);
625
622 return netdev_alloc_frag(fp->rx_frag_size); 626 return netdev_alloc_frag(fp->rx_frag_size);
627 }
623 628
624 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); 629 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
625} 630}
626 631
627#ifdef CONFIG_INET 632#ifdef CONFIG_INET
@@ -701,7 +706,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
701 goto drop; 706 goto drop;
702 707
703 /* Try to allocate the new data */ 708 /* Try to allocate the new data */
704 new_data = bnx2x_frag_alloc(fp); 709 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
705 /* Unmap skb in the pool anyway, as we are going to change 710 /* Unmap skb in the pool anyway, as we are going to change
706 pool entry status to BNX2X_TPA_STOP even if new skb allocation 711 pool entry status to BNX2X_TPA_STOP even if new skb allocation
707 fails. */ 712 fails. */
@@ -752,15 +757,15 @@ drop:
752 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++; 757 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
753} 758}
754 759
755static int bnx2x_alloc_rx_data(struct bnx2x *bp, 760static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
756 struct bnx2x_fastpath *fp, u16 index) 761 u16 index, gfp_t gfp_mask)
757{ 762{
758 u8 *data; 763 u8 *data;
759 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; 764 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
760 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; 765 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
761 dma_addr_t mapping; 766 dma_addr_t mapping;
762 767
763 data = bnx2x_frag_alloc(fp); 768 data = bnx2x_frag_alloc(fp, gfp_mask);
764 if (unlikely(data == NULL)) 769 if (unlikely(data == NULL))
765 return -ENOMEM; 770 return -ENOMEM;
766 771
@@ -953,7 +958,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
953 memcpy(skb->data, data + pad, len); 958 memcpy(skb->data, data + pad, len);
954 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); 959 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
955 } else { 960 } else {
956 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) { 961 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
962 GFP_ATOMIC) == 0)) {
957 dma_unmap_single(&bp->pdev->dev, 963 dma_unmap_single(&bp->pdev->dev,
958 dma_unmap_addr(rx_buf, mapping), 964 dma_unmap_addr(rx_buf, mapping),
959 fp->rx_buf_size, 965 fp->rx_buf_size,
@@ -1313,7 +1319,8 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1313 struct sw_rx_bd *first_buf = 1319 struct sw_rx_bd *first_buf =
1314 &tpa_info->first_buf; 1320 &tpa_info->first_buf;
1315 1321
1316 first_buf->data = bnx2x_frag_alloc(fp); 1322 first_buf->data =
1323 bnx2x_frag_alloc(fp, GFP_KERNEL);
1317 if (!first_buf->data) { 1324 if (!first_buf->data) {
1318 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n", 1325 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1319 j); 1326 j);
@@ -1335,7 +1342,8 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1335 for (i = 0, ring_prod = 0; 1342 for (i = 0, ring_prod = 0;
1336 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) { 1343 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1337 1344
1338 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) { 1345 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1346 GFP_KERNEL) < 0) {
1339 BNX2X_ERR("was only able to allocate %d rx sges\n", 1347 BNX2X_ERR("was only able to allocate %d rx sges\n",
1340 i); 1348 i);
1341 BNX2X_ERR("disabling TPA for queue[%d]\n", 1349 BNX2X_ERR("disabling TPA for queue[%d]\n",
@@ -4221,7 +4229,7 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4221 * fp->eth_q_stats.rx_skb_alloc_failed = 0 4229 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4222 */ 4230 */
4223 for (i = 0; i < rx_ring_size; i++) { 4231 for (i = 0; i < rx_ring_size; i++) {
4224 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) { 4232 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4225 failure_cnt++; 4233 failure_cnt++;
4226 continue; 4234 continue;
4227 } 4235 }