aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-12-10 07:16:06 -0500
committerDavid S. Miller <davem@davemloft.net>2012-12-11 12:49:52 -0500
commitd46d132cc0212ef08c22b9179dfa5fe21d07d253 (patch)
tree1122710042b34889ddfec2d2e5d0c6a9a16553a7 /drivers/net
parentd825da2ede50160e567e666ff43c89a403bf0193 (diff)
bnx2x: use netdev_alloc_frag()
Using netdev_alloc_frag() instead of kmalloc() permits better GRO or TCP coalescing behavior, as skb_gro_receive() doesn't have to fallback to frag_list overhead. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Dmitry Kravkov <dmitry@broadcom.com> Cc: Eilon Greenstein <eilong@broadcom.com> Acked-by: Dmitry Kravkov <dmitry@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c43
2 files changed, 32 insertions, 13 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 9a3b81e75c01..e8d4db10c8f3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -489,7 +489,7 @@ struct bnx2x_fastpath {
489 u32 ustorm_rx_prods_offset; 489 u32 ustorm_rx_prods_offset;
490 490
491 u32 rx_buf_size; 491 u32 rx_buf_size;
492 492 u32 rx_frag_size; /* 0 if kmalloced(), or rx_buf_size + NET_SKB_PAD */
493 dma_addr_t status_blk_mapping; 493 dma_addr_t status_blk_mapping;
494 494
495 enum bnx2x_tpa_mode_t mode; 495 enum bnx2x_tpa_mode_t mode;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 67baddd13a6e..a2998bea5d4b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -552,6 +552,23 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
552 return 0; 552 return 0;
553} 553}
554 554
555static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
556{
557 if (fp->rx_frag_size)
558 put_page(virt_to_head_page(data));
559 else
560 kfree(data);
561}
562
563static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
564{
565 if (fp->rx_frag_size)
566 return netdev_alloc_frag(fp->rx_frag_size);
567
568 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
569}
570
571
555static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, 572static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
556 struct bnx2x_agg_info *tpa_info, 573 struct bnx2x_agg_info *tpa_info,
557 u16 pages, 574 u16 pages,
@@ -574,15 +591,14 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
574 goto drop; 591 goto drop;
575 592
576 /* Try to allocate the new data */ 593 /* Try to allocate the new data */
577 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); 594 new_data = bnx2x_frag_alloc(fp);
578
579 /* Unmap skb in the pool anyway, as we are going to change 595 /* Unmap skb in the pool anyway, as we are going to change
580 pool entry status to BNX2X_TPA_STOP even if new skb allocation 596 pool entry status to BNX2X_TPA_STOP even if new skb allocation
581 fails. */ 597 fails. */
582 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), 598 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
583 fp->rx_buf_size, DMA_FROM_DEVICE); 599 fp->rx_buf_size, DMA_FROM_DEVICE);
584 if (likely(new_data)) 600 if (likely(new_data))
585 skb = build_skb(data, 0); 601 skb = build_skb(data, fp->rx_frag_size);
586 602
587 if (likely(skb)) { 603 if (likely(skb)) {
588#ifdef BNX2X_STOP_ON_ERROR 604#ifdef BNX2X_STOP_ON_ERROR
@@ -619,7 +635,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
619 635
620 return; 636 return;
621 } 637 }
622 kfree(new_data); 638 bnx2x_frag_free(fp, new_data);
623drop: 639drop:
624 /* drop the packet and keep the buffer in the bin */ 640 /* drop the packet and keep the buffer in the bin */
625 DP(NETIF_MSG_RX_STATUS, 641 DP(NETIF_MSG_RX_STATUS,
@@ -635,7 +651,7 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
635 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; 651 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
636 dma_addr_t mapping; 652 dma_addr_t mapping;
637 653
638 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); 654 data = bnx2x_frag_alloc(fp);
639 if (unlikely(data == NULL)) 655 if (unlikely(data == NULL))
640 return -ENOMEM; 656 return -ENOMEM;
641 657
@@ -643,7 +659,7 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
643 fp->rx_buf_size, 659 fp->rx_buf_size,
644 DMA_FROM_DEVICE); 660 DMA_FROM_DEVICE);
645 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 661 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
646 kfree(data); 662 bnx2x_frag_free(fp, data);
647 BNX2X_ERR("Can't map rx data\n"); 663 BNX2X_ERR("Can't map rx data\n");
648 return -ENOMEM; 664 return -ENOMEM;
649 } 665 }
@@ -845,9 +861,9 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
845 dma_unmap_addr(rx_buf, mapping), 861 dma_unmap_addr(rx_buf, mapping),
846 fp->rx_buf_size, 862 fp->rx_buf_size,
847 DMA_FROM_DEVICE); 863 DMA_FROM_DEVICE);
848 skb = build_skb(data, 0); 864 skb = build_skb(data, fp->rx_frag_size);
849 if (unlikely(!skb)) { 865 if (unlikely(!skb)) {
850 kfree(data); 866 bnx2x_frag_free(fp, data);
851 bnx2x_fp_qstats(bp, fp)-> 867 bnx2x_fp_qstats(bp, fp)->
852 rx_skb_alloc_failed++; 868 rx_skb_alloc_failed++;
853 goto next_rx; 869 goto next_rx;
@@ -1145,7 +1161,7 @@ static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1145 dma_unmap_single(&bp->pdev->dev, 1161 dma_unmap_single(&bp->pdev->dev,
1146 dma_unmap_addr(first_buf, mapping), 1162 dma_unmap_addr(first_buf, mapping),
1147 fp->rx_buf_size, DMA_FROM_DEVICE); 1163 fp->rx_buf_size, DMA_FROM_DEVICE);
1148 kfree(data); 1164 bnx2x_frag_free(fp, data);
1149 first_buf->data = NULL; 1165 first_buf->data = NULL;
1150 } 1166 }
1151} 1167}
@@ -1190,8 +1206,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1190 struct sw_rx_bd *first_buf = 1206 struct sw_rx_bd *first_buf =
1191 &tpa_info->first_buf; 1207 &tpa_info->first_buf;
1192 1208
1193 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, 1209 first_buf->data = bnx2x_frag_alloc(fp);
1194 GFP_ATOMIC);
1195 if (!first_buf->data) { 1210 if (!first_buf->data) {
1196 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n", 1211 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1197 j); 1212 j);
@@ -1323,7 +1338,7 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1323 fp->rx_buf_size, DMA_FROM_DEVICE); 1338 fp->rx_buf_size, DMA_FROM_DEVICE);
1324 1339
1325 rx_buf->data = NULL; 1340 rx_buf->data = NULL;
1326 kfree(data); 1341 bnx2x_frag_free(fp, data);
1327 } 1342 }
1328} 1343}
1329 1344
@@ -1782,6 +1797,10 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1782 mtu + 1797 mtu +
1783 BNX2X_FW_RX_ALIGN_END; 1798 BNX2X_FW_RX_ALIGN_END;
1784 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */ 1799 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1800 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1801 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1802 else
1803 fp->rx_frag_size = 0;
1785 } 1804 }
1786} 1805}
1787 1806