diff options
author | Peng Li <lipeng321@huawei.com> | 2017-12-21 23:21:48 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-12-27 10:41:21 -0500 |
commit | 9699cffe97fee6eb957a23b58d814a2e62dd43e9 (patch) | |
tree | 7b0ecbe1148a1613b69dd49ec5ad178ed4a5c996 | |
parent | 052ece6dc19c610a48c1cedeee1b2f1478838e99 (diff) |
net: hns3: add handling vlan tag offload in bd
This patch deals with the vlan tag information between
sk_buff and rx/tx bd.
Signed-off-by: Peng Li <lipeng321@huawei.com>
Signed-off-by: Jian Shen <shenjian15@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 83 |
1 files changed, 78 insertions, 5 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 301b3296856e..320ae8892a68 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | |||
@@ -723,6 +723,58 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) | |||
723 | hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); | 723 | hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); |
724 | } | 724 | } |
725 | 725 | ||
726 | static int hns3_fill_desc_vtags(struct sk_buff *skb, | ||
727 | struct hns3_enet_ring *tx_ring, | ||
728 | u32 *inner_vlan_flag, | ||
729 | u32 *out_vlan_flag, | ||
730 | u16 *inner_vtag, | ||
731 | u16 *out_vtag) | ||
732 | { | ||
733 | #define HNS3_TX_VLAN_PRIO_SHIFT 13 | ||
734 | |||
735 | if (skb->protocol == htons(ETH_P_8021Q) && | ||
736 | !(tx_ring->tqp->handle->kinfo.netdev->features & | ||
737 | NETIF_F_HW_VLAN_CTAG_TX)) { | ||
738 | /* When HW VLAN acceleration is turned off, and the stack | ||
739 | * sets the protocol to 802.1q, the driver just need to | ||
740 | * set the protocol to the encapsulated ethertype. | ||
741 | */ | ||
742 | skb->protocol = vlan_get_protocol(skb); | ||
743 | return 0; | ||
744 | } | ||
745 | |||
746 | if (skb_vlan_tag_present(skb)) { | ||
747 | u16 vlan_tag; | ||
748 | |||
749 | vlan_tag = skb_vlan_tag_get(skb); | ||
750 | vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT; | ||
751 | |||
752 | /* Based on hw strategy, use out_vtag in two layer tag case, | ||
753 | * and use inner_vtag in one tag case. | ||
754 | */ | ||
755 | if (skb->protocol == htons(ETH_P_8021Q)) { | ||
756 | hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); | ||
757 | *out_vtag = vlan_tag; | ||
758 | } else { | ||
759 | hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); | ||
760 | *inner_vtag = vlan_tag; | ||
761 | } | ||
762 | } else if (skb->protocol == htons(ETH_P_8021Q)) { | ||
763 | struct vlan_ethhdr *vhdr; | ||
764 | int rc; | ||
765 | |||
766 | rc = skb_cow_head(skb, 0); | ||
767 | if (rc < 0) | ||
768 | return rc; | ||
769 | vhdr = (struct vlan_ethhdr *)skb->data; | ||
770 | vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7) | ||
771 | << HNS3_TX_VLAN_PRIO_SHIFT); | ||
772 | } | ||
773 | |||
774 | skb->protocol = vlan_get_protocol(skb); | ||
775 | return 0; | ||
776 | } | ||
777 | |||
726 | static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, | 778 | static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, |
727 | int size, dma_addr_t dma, int frag_end, | 779 | int size, dma_addr_t dma, int frag_end, |
728 | enum hns_desc_type type) | 780 | enum hns_desc_type type) |
@@ -733,6 +785,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, | |||
733 | u16 bdtp_fe_sc_vld_ra_ri = 0; | 785 | u16 bdtp_fe_sc_vld_ra_ri = 0; |
734 | u32 type_cs_vlan_tso = 0; | 786 | u32 type_cs_vlan_tso = 0; |
735 | struct sk_buff *skb; | 787 | struct sk_buff *skb; |
788 | u16 inner_vtag = 0; | ||
789 | u16 out_vtag = 0; | ||
736 | u32 paylen = 0; | 790 | u32 paylen = 0; |
737 | u16 mss = 0; | 791 | u16 mss = 0; |
738 | __be16 protocol; | 792 | __be16 protocol; |
@@ -756,15 +810,16 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, | |||
756 | skb = (struct sk_buff *)priv; | 810 | skb = (struct sk_buff *)priv; |
757 | paylen = skb->len; | 811 | paylen = skb->len; |
758 | 812 | ||
813 | ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso, | ||
814 | &ol_type_vlan_len_msec, | ||
815 | &inner_vtag, &out_vtag); | ||
816 | if (unlikely(ret)) | ||
817 | return ret; | ||
818 | |||
759 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 819 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
760 | skb_reset_mac_len(skb); | 820 | skb_reset_mac_len(skb); |
761 | protocol = skb->protocol; | 821 | protocol = skb->protocol; |
762 | 822 | ||
763 | /* vlan packet*/ | ||
764 | if (protocol == htons(ETH_P_8021Q)) { | ||
765 | protocol = vlan_get_protocol(skb); | ||
766 | skb->protocol = protocol; | ||
767 | } | ||
768 | ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); | 823 | ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); |
769 | if (ret) | 824 | if (ret) |
770 | return ret; | 825 | return ret; |
@@ -790,6 +845,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, | |||
790 | cpu_to_le32(type_cs_vlan_tso); | 845 | cpu_to_le32(type_cs_vlan_tso); |
791 | desc->tx.paylen = cpu_to_le32(paylen); | 846 | desc->tx.paylen = cpu_to_le32(paylen); |
792 | desc->tx.mss = cpu_to_le16(mss); | 847 | desc->tx.mss = cpu_to_le16(mss); |
848 | desc->tx.vlan_tag = cpu_to_le16(inner_vtag); | ||
849 | desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); | ||
793 | } | 850 | } |
794 | 851 | ||
795 | /* move ring pointer to next.*/ | 852 | /* move ring pointer to next.*/ |
@@ -2101,6 +2158,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, | |||
2101 | 2158 | ||
2102 | prefetchw(skb->data); | 2159 | prefetchw(skb->data); |
2103 | 2160 | ||
2161 | /* Based on hw strategy, the tag offloaded will be stored at | ||
2162 | * ot_vlan_tag in two layer tag case, and stored at vlan_tag | ||
2163 | * in one layer tag case. | ||
2164 | */ | ||
2165 | if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { | ||
2166 | u16 vlan_tag; | ||
2167 | |||
2168 | vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); | ||
2169 | if (!(vlan_tag & VLAN_VID_MASK)) | ||
2170 | vlan_tag = le16_to_cpu(desc->rx.vlan_tag); | ||
2171 | if (vlan_tag & VLAN_VID_MASK) | ||
2172 | __vlan_hwaccel_put_tag(skb, | ||
2173 | htons(ETH_P_8021Q), | ||
2174 | vlan_tag); | ||
2175 | } | ||
2176 | |||
2104 | bnum = 1; | 2177 | bnum = 1; |
2105 | if (length <= HNS3_RX_HEAD_SIZE) { | 2178 | if (length <= HNS3_RX_HEAD_SIZE) { |
2106 | memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); | 2179 | memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); |