aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-10-05 05:35:17 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-06 01:04:15 -0400
commitb9d8839a44092cb4268ef2813c34d5dbf3363603 (patch)
tree93e3bb2052e4edb6d674193d244918f87b84336b
parentf905c79e12791d09bbfd9ba7b672fbeb19c1e7c2 (diff)
net/mlx4_en: Use local var in tx flow for skb_shinfo(skb)
Acces skb_shinfo(skb) once in tx flow. Also, rename @i variable to @i_frag to avoid confusion, as the "goto tx_drop_unmap;" relied on this @i variable. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Amir Vadai <amirv@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c58
1 files changed, 34 insertions, 24 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 99875c8d70c1..aa05b09a0d0e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -532,13 +532,14 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
532} 532}
533 533
534static bool is_inline(int inline_thold, const struct sk_buff *skb, 534static bool is_inline(int inline_thold, const struct sk_buff *skb,
535 const struct skb_shared_info *shinfo,
535 void **pfrag) 536 void **pfrag)
536{ 537{
537 void *ptr; 538 void *ptr;
538 539
539 if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) { 540 if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
540 if (skb_shinfo(skb)->nr_frags == 1) { 541 if (shinfo->nr_frags == 1) {
541 ptr = skb_frag_address_safe(&skb_shinfo(skb)->frags[0]); 542 ptr = skb_frag_address_safe(&shinfo->frags[0]);
542 if (unlikely(!ptr)) 543 if (unlikely(!ptr))
543 return 0; 544 return 0;
544 545
@@ -546,7 +547,7 @@ static bool is_inline(int inline_thold, const struct sk_buff *skb,
546 *pfrag = ptr; 547 *pfrag = ptr;
547 548
548 return 1; 549 return 1;
549 } else if (unlikely(skb_shinfo(skb)->nr_frags)) 550 } else if (unlikely(shinfo->nr_frags))
550 return 0; 551 return 0;
551 else 552 else
552 return 1; 553 return 1;
@@ -567,18 +568,19 @@ static int inline_size(const struct sk_buff *skb)
567} 568}
568 569
569static int get_real_size(const struct sk_buff *skb, 570static int get_real_size(const struct sk_buff *skb,
571 const struct skb_shared_info *shinfo,
570 struct net_device *dev, 572 struct net_device *dev,
571 int *lso_header_size) 573 int *lso_header_size)
572{ 574{
573 struct mlx4_en_priv *priv = netdev_priv(dev); 575 struct mlx4_en_priv *priv = netdev_priv(dev);
574 int real_size; 576 int real_size;
575 577
576 if (skb_is_gso(skb)) { 578 if (shinfo->gso_size) {
577 if (skb->encapsulation) 579 if (skb->encapsulation)
578 *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb); 580 *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
579 else 581 else
580 *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb); 582 *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
581 real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE + 583 real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE +
582 ALIGN(*lso_header_size + 4, DS_SIZE); 584 ALIGN(*lso_header_size + 4, DS_SIZE);
583 if (unlikely(*lso_header_size != skb_headlen(skb))) { 585 if (unlikely(*lso_header_size != skb_headlen(skb))) {
584 /* We add a segment for the skb linear buffer only if 586 /* We add a segment for the skb linear buffer only if
@@ -593,8 +595,8 @@ static int get_real_size(const struct sk_buff *skb,
593 } 595 }
594 } else { 596 } else {
595 *lso_header_size = 0; 597 *lso_header_size = 0;
596 if (!is_inline(priv->prof->inline_thold, skb, NULL)) 598 if (!is_inline(priv->prof->inline_thold, skb, shinfo, NULL))
597 real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE; 599 real_size = CTRL_SIZE + (shinfo->nr_frags + 1) * DS_SIZE;
598 else 600 else
599 real_size = inline_size(skb); 601 real_size = inline_size(skb);
600 } 602 }
@@ -604,6 +606,7 @@ static int get_real_size(const struct sk_buff *skb,
604 606
605static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, 607static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
606 const struct sk_buff *skb, 608 const struct sk_buff *skb,
609 const struct skb_shared_info *shinfo,
607 int real_size, u16 *vlan_tag, 610 int real_size, u16 *vlan_tag,
608 int tx_ind, void *fragptr) 611 int tx_ind, void *fragptr)
609{ 612{
@@ -619,9 +622,9 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
619 MIN_PKT_LEN - skb->len); 622 MIN_PKT_LEN - skb->len);
620 } 623 }
621 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); 624 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
622 if (skb_shinfo(skb)->nr_frags) 625 if (shinfo->nr_frags)
623 memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr, 626 memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
624 skb_frag_size(&skb_shinfo(skb)->frags[0])); 627 skb_frag_size(&shinfo->frags[0]));
625 628
626 } else { 629 } else {
627 inl->byte_count = cpu_to_be32(1 << 31 | spc); 630 inl->byte_count = cpu_to_be32(1 << 31 | spc);
@@ -639,9 +642,10 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
639 inl = (void *) (inl + 1) + spc; 642 inl = (void *) (inl + 1) + spc;
640 skb_copy_from_linear_data_offset(skb, spc, inl + 1, 643 skb_copy_from_linear_data_offset(skb, spc, inl + 1,
641 skb_headlen(skb) - spc); 644 skb_headlen(skb) - spc);
642 if (skb_shinfo(skb)->nr_frags) 645 if (shinfo->nr_frags)
643 memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc, 646 memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
644 fragptr, skb_frag_size(&skb_shinfo(skb)->frags[0])); 647 fragptr,
648 skb_frag_size(&shinfo->frags[0]));
645 } 649 }
646 650
647 wmb(); 651 wmb();
@@ -673,6 +677,7 @@ static void mlx4_bf_copy(void __iomem *dst, const void *src,
673 677
674netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) 678netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
675{ 679{
680 struct skb_shared_info *shinfo = skb_shinfo(skb);
676 struct mlx4_en_priv *priv = netdev_priv(dev); 681 struct mlx4_en_priv *priv = netdev_priv(dev);
677 struct device *ddev = priv->ddev; 682 struct device *ddev = priv->ddev;
678 struct mlx4_en_tx_ring *ring; 683 struct mlx4_en_tx_ring *ring;
@@ -686,7 +691,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
686 u32 index, bf_index; 691 u32 index, bf_index;
687 __be32 op_own; 692 __be32 op_own;
688 u16 vlan_tag = 0; 693 u16 vlan_tag = 0;
689 int i; 694 int i_frag;
690 int lso_header_size; 695 int lso_header_size;
691 void *fragptr; 696 void *fragptr;
692 bool bounce = false; 697 bool bounce = false;
@@ -702,7 +707,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
702 /* fetch ring->cons far ahead before needing it to avoid stall */ 707 /* fetch ring->cons far ahead before needing it to avoid stall */
703 ring_cons = ACCESS_ONCE(ring->cons); 708 ring_cons = ACCESS_ONCE(ring->cons);
704 709
705 real_size = get_real_size(skb, dev, &lso_header_size); 710 real_size = get_real_size(skb, shinfo, dev, &lso_header_size);
706 if (unlikely(!real_size)) 711 if (unlikely(!real_size))
707 goto tx_drop; 712 goto tx_drop;
708 713
@@ -776,21 +781,22 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
776 tx_info->data_offset = (void *)data - (void *)tx_desc; 781 tx_info->data_offset = (void *)data - (void *)tx_desc;
777 782
778 tx_info->linear = (lso_header_size < skb_headlen(skb) && 783 tx_info->linear = (lso_header_size < skb_headlen(skb) &&
779 !is_inline(ring->inline_thold, skb, NULL)) ? 1 : 0; 784 !is_inline(ring->inline_thold, skb, shinfo, NULL)) ? 1 : 0;
780 785
781 tx_info->nr_maps = skb_shinfo(skb)->nr_frags + tx_info->linear; 786 tx_info->nr_maps = shinfo->nr_frags + tx_info->linear;
782 data += tx_info->nr_maps - 1; 787 data += tx_info->nr_maps - 1;
783 788
784 if (is_inline(ring->inline_thold, skb, &fragptr)) { 789 if (is_inline(ring->inline_thold, skb, shinfo, &fragptr)) {
785 tx_info->inl = 1; 790 tx_info->inl = 1;
786 } else { 791 } else {
787 dma_addr_t dma = 0; 792 dma_addr_t dma = 0;
788 u32 byte_count = 0; 793 u32 byte_count = 0;
789 794
790 /* Map fragments if any */ 795 /* Map fragments if any */
791 for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { 796 for (i_frag = shinfo->nr_frags - 1; i_frag >= 0; i_frag--) {
792 const struct skb_frag_struct *frag; 797 const struct skb_frag_struct *frag;
793 frag = &skb_shinfo(skb)->frags[i]; 798
799 frag = &shinfo->frags[i_frag];
794 byte_count = skb_frag_size(frag); 800 byte_count = skb_frag_size(frag);
795 dma = skb_frag_dma_map(ddev, frag, 801 dma = skb_frag_dma_map(ddev, frag,
796 0, byte_count, 802 0, byte_count,
@@ -858,6 +864,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
858 864
859 /* Handle LSO (TSO) packets */ 865 /* Handle LSO (TSO) packets */
860 if (lso_header_size) { 866 if (lso_header_size) {
867 int i;
868
861 /* Mark opcode as LSO */ 869 /* Mark opcode as LSO */
862 op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) | 870 op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
863 ((ring->prod & ring->size) ? 871 ((ring->prod & ring->size) ?
@@ -865,15 +873,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
865 873
866 /* Fill in the LSO prefix */ 874 /* Fill in the LSO prefix */
867 tx_desc->lso.mss_hdr_size = cpu_to_be32( 875 tx_desc->lso.mss_hdr_size = cpu_to_be32(
868 skb_shinfo(skb)->gso_size << 16 | lso_header_size); 876 shinfo->gso_size << 16 | lso_header_size);
869 877
870 /* Copy headers; 878 /* Copy headers;
871 * note that we already verified that it is linear */ 879 * note that we already verified that it is linear */
872 memcpy(tx_desc->lso.header, skb->data, lso_header_size); 880 memcpy(tx_desc->lso.header, skb->data, lso_header_size);
873 881
874 ring->tso_packets++; 882 ring->tso_packets++;
875 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + 883
876 !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size); 884 i = ((skb->len - lso_header_size) / shinfo->gso_size) +
885 !!((skb->len - lso_header_size) % shinfo->gso_size);
877 tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size; 886 tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size;
878 ring->packets += i; 887 ring->packets += i;
879 } else { 888 } else {
@@ -889,7 +898,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
889 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); 898 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
890 899
891 if (tx_info->inl) { 900 if (tx_info->inl) {
892 build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr); 901 build_inline_wqe(tx_desc, skb, shinfo, real_size, &vlan_tag,
902 tx_ind, fragptr);
893 tx_info->inl = 1; 903 tx_info->inl = 1;
894 } 904 }
895 905
@@ -958,8 +968,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
958tx_drop_unmap: 968tx_drop_unmap:
959 en_err(priv, "DMA mapping error\n"); 969 en_err(priv, "DMA mapping error\n");
960 970
961 for (i++; i < skb_shinfo(skb)->nr_frags; i++) { 971 while (++i_frag < shinfo->nr_frags) {
962 data++; 972 ++data;
963 dma_unmap_page(ddev, (dma_addr_t) be64_to_cpu(data->addr), 973 dma_unmap_page(ddev, (dma_addr_t) be64_to_cpu(data->addr),
964 be32_to_cpu(data->byte_count), 974 be32_to_cpu(data->byte_count),
965 PCI_DMA_TODEVICE); 975 PCI_DMA_TODEVICE);