aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ice/ice_txrx.c
diff options
context:
space:
mode:
authorAnirudh Venkataramanan <anirudh.venkataramanan@intel.com>2018-03-20 10:58:15 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2018-03-26 14:54:49 -0400
commitd76a60ba7afb89523c88cf2ed3a044ce4180289e (patch)
tree9cdcbbce6de9f28400f7610d565cc86450a3f80f /drivers/net/ethernet/intel/ice/ice_txrx.c
parent2b245cb29421abbad508e93cdfedf81adc12edf1 (diff)
ice: Add support for VLANs and offloads
This patch adds support for VLANs. When a VLAN is created a switch filter is added to direct the VLAN traffic to the corresponding VSI. When a VLAN is deleted, the filter is deleted as well. This patch also adds support for the following hardware offloads. 1) VLAN tag insertion/stripping 2) Receive Side Scaling (RSS) 3) Tx checksum and TCP segmentation 4) Rx checksum Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_txrx.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c405
1 files changed, 402 insertions, 3 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 1ccf8e69b85a..6481e3d86374 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -797,6 +797,134 @@ static bool ice_is_non_eop(struct ice_ring *rx_ring,
797} 797}
798 798
799/** 799/**
800 * ice_ptype_to_htype - get a hash type
801 * @ptype: the ptype value from the descriptor
802 *
803 * Returns a hash type to be used by skb_set_hash
804 */
805static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
806{
807 return PKT_HASH_TYPE_NONE;
808}
809
810/**
811 * ice_rx_hash - set the hash value in the skb
812 * @rx_ring: descriptor ring
813 * @rx_desc: specific descriptor
814 * @skb: pointer to current skb
815 * @rx_ptype: the ptype value from the descriptor
816 */
817static void
818ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
819 struct sk_buff *skb, u8 rx_ptype)
820{
821 struct ice_32b_rx_flex_desc_nic *nic_mdid;
822 u32 hash;
823
824 if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
825 return;
826
827 if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
828 return;
829
830 nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
831 hash = le32_to_cpu(nic_mdid->rss_hash);
832 skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
833}
834
835/**
836 * ice_rx_csum - Indicate in skb if checksum is good
837 * @vsi: the VSI we care about
838 * @skb: skb currently being received and modified
839 * @rx_desc: the receive descriptor
840 * @ptype: the packet type decoded by hardware
841 *
842 * skb->protocol must be set before this function is called
843 */
844static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
845 union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
846{
847 struct ice_rx_ptype_decoded decoded;
848 u32 rx_error, rx_status;
849 bool ipv4, ipv6;
850
851 rx_status = le16_to_cpu(rx_desc->wb.status_error0);
852 rx_error = rx_status;
853
854 decoded = ice_decode_rx_desc_ptype(ptype);
855
856 /* Start with CHECKSUM_NONE and by default csum_level = 0 */
857 skb->ip_summed = CHECKSUM_NONE;
858 skb_checksum_none_assert(skb);
859
860 /* check if Rx checksum is enabled */
861 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
862 return;
863
864 /* check if HW has decoded the packet and checksum */
865 if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
866 return;
867
868 if (!(decoded.known && decoded.outer_ip))
869 return;
870
871 ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
872 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
873 ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
874 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
875
876 if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
877 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
878 goto checksum_fail;
879 else if (ipv6 && (rx_status &
880 (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
881 goto checksum_fail;
882
883 /* check for L4 errors and handle packets that were not able to be
884 * checksummed due to arrival speed
885 */
886 if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
887 goto checksum_fail;
888
889 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
890 switch (decoded.inner_prot) {
891 case ICE_RX_PTYPE_INNER_PROT_TCP:
892 case ICE_RX_PTYPE_INNER_PROT_UDP:
893 case ICE_RX_PTYPE_INNER_PROT_SCTP:
894 skb->ip_summed = CHECKSUM_UNNECESSARY;
895 default:
896 break;
897 }
898 return;
899
900checksum_fail:
901 vsi->back->hw_csum_rx_error++;
902}
903
904/**
905 * ice_process_skb_fields - Populate skb header fields from Rx descriptor
906 * @rx_ring: rx descriptor ring packet is being transacted on
907 * @rx_desc: pointer to the EOP Rx descriptor
908 * @skb: pointer to current skb being populated
909 * @ptype: the packet type decoded by hardware
910 *
911 * This function checks the ring, descriptor, and packet information in
912 * order to populate the hash, checksum, VLAN, protocol, and
913 * other fields within the skb.
914 */
915static void ice_process_skb_fields(struct ice_ring *rx_ring,
916 union ice_32b_rx_flex_desc *rx_desc,
917 struct sk_buff *skb, u8 ptype)
918{
919 ice_rx_hash(rx_ring, rx_desc, skb, ptype);
920
921 /* modifies the skb - consumes the enet header */
922 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
923
924 ice_rx_csum(rx_ring->vsi, skb, rx_desc, ptype);
925}
926
927/**
800 * ice_receive_skb - Send a completed packet up the stack 928 * ice_receive_skb - Send a completed packet up the stack
801 * @rx_ring: rx ring in play 929 * @rx_ring: rx ring in play
802 * @skb: packet to send up 930 * @skb: packet to send up
@@ -839,6 +967,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
839 struct sk_buff *skb; 967 struct sk_buff *skb;
840 u16 stat_err_bits; 968 u16 stat_err_bits;
841 u16 vlan_tag = 0; 969 u16 vlan_tag = 0;
970 u8 rx_ptype;
842 971
843 /* return some buffers to hardware, one at a time is too slow */ 972 /* return some buffers to hardware, one at a time is too slow */
844 if (cleaned_count >= ICE_RX_BUF_WRITE) { 973 if (cleaned_count >= ICE_RX_BUF_WRITE) {
@@ -882,6 +1011,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
882 continue; 1011 continue;
883 } 1012 }
884 1013
1014 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1015 ICE_RX_FLEX_DESC_PTYPE_M;
1016
885 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 1017 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
886 if (ice_test_staterr(rx_desc, stat_err_bits)) 1018 if (ice_test_staterr(rx_desc, stat_err_bits))
887 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); 1019 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
@@ -897,6 +1029,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
897 /* probably a little skewed due to removing CRC */ 1029 /* probably a little skewed due to removing CRC */
898 total_rx_bytes += skb->len; 1030 total_rx_bytes += skb->len;
899 1031
1032 /* populate checksum, VLAN, and protocol */
1033 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1034
900 /* send completed skb up the stack */ 1035 /* send completed skb up the stack */
901 ice_receive_skb(rx_ring, skb, vlan_tag); 1036 ice_receive_skb(rx_ring, skb, vlan_tag);
902 1037
@@ -1026,14 +1161,17 @@ static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1026 * ice_tx_map - Build the Tx descriptor 1161 * ice_tx_map - Build the Tx descriptor
1027 * @tx_ring: ring to send buffer on 1162 * @tx_ring: ring to send buffer on
1028 * @first: first buffer info buffer to use 1163 * @first: first buffer info buffer to use
1164 * @off: pointer to struct that holds offload parameters
1029 * 1165 *
1030 * This function loops over the skb data pointed to by *first 1166 * This function loops over the skb data pointed to by *first
1031 * and gets a physical address for each memory location and programs 1167 * and gets a physical address for each memory location and programs
1032 * it and the length into the transmit descriptor. 1168 * it and the length into the transmit descriptor.
1033 */ 1169 */
1034static void ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first) 1170static void
1171ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1172 struct ice_tx_offload_params *off)
1035{ 1173{
1036 u64 td_offset = 0, td_tag = 0, td_cmd = 0; 1174 u64 td_offset, td_tag, td_cmd;
1037 u16 i = tx_ring->next_to_use; 1175 u16 i = tx_ring->next_to_use;
1038 struct skb_frag_struct *frag; 1176 struct skb_frag_struct *frag;
1039 unsigned int data_len, size; 1177 unsigned int data_len, size;
@@ -1042,6 +1180,9 @@ static void ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1042 struct sk_buff *skb; 1180 struct sk_buff *skb;
1043 dma_addr_t dma; 1181 dma_addr_t dma;
1044 1182
1183 td_tag = off->td_l2tag1;
1184 td_cmd = off->td_cmd;
1185 td_offset = off->td_offset;
1045 skb = first->skb; 1186 skb = first->skb;
1046 1187
1047 data_len = skb->data_len; 1188 data_len = skb->data_len;
@@ -1049,6 +1190,12 @@ static void ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1049 1190
1050 tx_desc = ICE_TX_DESC(tx_ring, i); 1191 tx_desc = ICE_TX_DESC(tx_ring, i);
1051 1192
1193 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1194 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1195 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1196 ICE_TX_FLAGS_VLAN_S;
1197 }
1198
1052 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 1199 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1053 1200
1054 tx_buf = first; 1201 tx_buf = first;
@@ -1170,6 +1317,223 @@ dma_error:
1170} 1317}
1171 1318
1172/** 1319/**
1320 * ice_tx_csum - Enable Tx checksum offloads
1321 * @first: pointer to the first descriptor
1322 * @off: pointer to struct that holds offload parameters
1323 *
1324 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1325 */
1326static
1327int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1328{
1329 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1330 struct sk_buff *skb = first->skb;
1331 union {
1332 struct iphdr *v4;
1333 struct ipv6hdr *v6;
1334 unsigned char *hdr;
1335 } ip;
1336 union {
1337 struct tcphdr *tcp;
1338 unsigned char *hdr;
1339 } l4;
1340 __be16 frag_off, protocol;
1341 unsigned char *exthdr;
1342 u32 offset, cmd = 0;
1343 u8 l4_proto = 0;
1344
1345 if (skb->ip_summed != CHECKSUM_PARTIAL)
1346 return 0;
1347
1348 ip.hdr = skb_network_header(skb);
1349 l4.hdr = skb_transport_header(skb);
1350
1351 /* compute outer L2 header size */
1352 l2_len = ip.hdr - skb->data;
1353 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1354
1355 if (skb->encapsulation)
1356 return -1;
1357
1358 /* Enable IP checksum offloads */
1359 protocol = vlan_get_protocol(skb);
1360 if (protocol == htons(ETH_P_IP)) {
1361 l4_proto = ip.v4->protocol;
1362 /* the stack computes the IP header already, the only time we
1363 * need the hardware to recompute it is in the case of TSO.
1364 */
1365 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1366 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1367 else
1368 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1369
1370 } else if (protocol == htons(ETH_P_IPV6)) {
1371 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1372 exthdr = ip.hdr + sizeof(*ip.v6);
1373 l4_proto = ip.v6->nexthdr;
1374 if (l4.hdr != exthdr)
1375 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1376 &frag_off);
1377 } else {
1378 return -1;
1379 }
1380
1381 /* compute inner L3 header size */
1382 l3_len = l4.hdr - ip.hdr;
1383 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1384
1385 /* Enable L4 checksum offloads */
1386 switch (l4_proto) {
1387 case IPPROTO_TCP:
1388 /* enable checksum offloads */
1389 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1390 l4_len = l4.tcp->doff;
1391 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1392 break;
1393 case IPPROTO_UDP:
1394 /* enable UDP checksum offload */
1395 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1396 l4_len = (sizeof(struct udphdr) >> 2);
1397 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1398 break;
1399 case IPPROTO_SCTP:
1400 default:
1401 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1402 return -1;
1403 skb_checksum_help(skb);
1404 return 0;
1405 }
1406
1407 off->td_cmd |= cmd;
1408 off->td_offset |= offset;
1409 return 1;
1410}
1411
1412/**
1413 * ice_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1414 * @tx_ring: ring to send buffer on
1415 * @first: pointer to struct ice_tx_buf
1416 *
1417 * Checks the skb and set up correspondingly several generic transmit flags
1418 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1419 *
1420 * Returns error code indicate the frame should be dropped upon error and the
1421 * otherwise returns 0 to indicate the flags has been set properly.
1422 */
1423static int
1424ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1425{
1426 struct sk_buff *skb = first->skb;
1427 __be16 protocol = skb->protocol;
1428
1429 if (protocol == htons(ETH_P_8021Q) &&
1430 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1431 /* when HW VLAN acceleration is turned off by the user the
1432 * stack sets the protocol to 8021q so that the driver
1433 * can take any steps required to support the SW only
1434 * VLAN handling. In our case the driver doesn't need
1435 * to take any further steps so just set the protocol
1436 * to the encapsulated ethertype.
1437 */
1438 skb->protocol = vlan_get_protocol(skb);
1439 goto out;
1440 }
1441
1442 /* if we have a HW VLAN tag being added, default to the HW one */
1443 if (skb_vlan_tag_present(skb)) {
1444 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1445 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1446 } else if (protocol == htons(ETH_P_8021Q)) {
1447 struct vlan_hdr *vhdr, _vhdr;
1448
1449 /* for SW VLAN, check the next protocol and store the tag */
1450 vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
1451 sizeof(_vhdr),
1452 &_vhdr);
1453 if (!vhdr)
1454 return -EINVAL;
1455
1456 first->tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
1457 ICE_TX_FLAGS_VLAN_S;
1458 first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
1459 }
1460
1461out:
1462 return 0;
1463}
1464
1465/**
1466 * ice_tso - computes mss and TSO length to prepare for TSO
1467 * @first: pointer to struct ice_tx_buf
1468 * @off: pointer to struct that holds offload parameters
1469 *
1470 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1471 */
1472static
1473int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1474{
1475 struct sk_buff *skb = first->skb;
1476 union {
1477 struct iphdr *v4;
1478 struct ipv6hdr *v6;
1479 unsigned char *hdr;
1480 } ip;
1481 union {
1482 struct tcphdr *tcp;
1483 unsigned char *hdr;
1484 } l4;
1485 u64 cd_mss, cd_tso_len;
1486 u32 paylen, l4_start;
1487 int err;
1488
1489 if (skb->ip_summed != CHECKSUM_PARTIAL)
1490 return 0;
1491
1492 if (!skb_is_gso(skb))
1493 return 0;
1494
1495 err = skb_cow_head(skb, 0);
1496 if (err < 0)
1497 return err;
1498
1499 ip.hdr = skb_network_header(skb);
1500 l4.hdr = skb_transport_header(skb);
1501
1502 /* initialize outer IP header fields */
1503 if (ip.v4->version == 4) {
1504 ip.v4->tot_len = 0;
1505 ip.v4->check = 0;
1506 } else {
1507 ip.v6->payload_len = 0;
1508 }
1509
1510 /* determine offset of transport header */
1511 l4_start = l4.hdr - skb->data;
1512
1513 /* remove payload length from checksum */
1514 paylen = skb->len - l4_start;
1515 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
1516
1517 /* compute length of segmentation header */
1518 off->header_len = (l4.tcp->doff * 4) + l4_start;
1519
1520 /* update gso_segs and bytecount */
1521 first->gso_segs = skb_shinfo(skb)->gso_segs;
1522 first->bytecount = (first->gso_segs - 1) * off->header_len;
1523
1524 cd_tso_len = skb->len - off->header_len;
1525 cd_mss = skb_shinfo(skb)->gso_size;
1526
1527 /* record cdesc_qw1 with TSO parameters */
1528 off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX |
1529 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
1530 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1531 (cd_mss << ICE_TXD_CTX_QW1_MSS_S);
1532 first->tx_flags |= ICE_TX_FLAGS_TSO;
1533 return 1;
1534}
1535
1536/**
1173 * ice_txd_use_count - estimate the number of descriptors needed for Tx 1537 * ice_txd_use_count - estimate the number of descriptors needed for Tx
1174 * @size: transmit request size in bytes 1538 * @size: transmit request size in bytes
1175 * 1539 *
@@ -1322,8 +1686,10 @@ static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
1322static netdev_tx_t 1686static netdev_tx_t
1323ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) 1687ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
1324{ 1688{
1689 struct ice_tx_offload_params offload = { 0 };
1325 struct ice_tx_buf *first; 1690 struct ice_tx_buf *first;
1326 unsigned int count; 1691 unsigned int count;
1692 int tso, csum;
1327 1693
1328 count = ice_xmit_desc_count(skb); 1694 count = ice_xmit_desc_count(skb);
1329 if (ice_chk_linearize(skb, count)) { 1695 if (ice_chk_linearize(skb, count)) {
@@ -1344,13 +1710,46 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
1344 return NETDEV_TX_BUSY; 1710 return NETDEV_TX_BUSY;
1345 } 1711 }
1346 1712
1713 offload.tx_ring = tx_ring;
1714
1347 /* record the location of the first descriptor for this packet */ 1715 /* record the location of the first descriptor for this packet */
1348 first = &tx_ring->tx_buf[tx_ring->next_to_use]; 1716 first = &tx_ring->tx_buf[tx_ring->next_to_use];
1349 first->skb = skb; 1717 first->skb = skb;
1350 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 1718 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
1351 first->gso_segs = 1; 1719 first->gso_segs = 1;
1720 first->tx_flags = 0;
1721
1722 /* prepare the VLAN tagging flags for Tx */
1723 if (ice_tx_prepare_vlan_flags(tx_ring, first))
1724 goto out_drop;
1725
1726 /* set up TSO offload */
1727 tso = ice_tso(first, &offload);
1728 if (tso < 0)
1729 goto out_drop;
1730
1731 /* always set up Tx checksum offload */
1732 csum = ice_tx_csum(first, &offload);
1733 if (csum < 0)
1734 goto out_drop;
1735
1736 if (tso || offload.cd_tunnel_params) {
1737 struct ice_tx_ctx_desc *cdesc;
1738 int i = tx_ring->next_to_use;
1739
1740 /* grab the next descriptor */
1741 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
1742 i++;
1743 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1744
1745 /* setup context descriptor */
1746 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
1747 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
1748 cdesc->rsvd = cpu_to_le16(0);
1749 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
1750 }
1352 1751
1353 ice_tx_map(tx_ring, first); 1752 ice_tx_map(tx_ring, first, &offload);
1354 return NETDEV_TX_OK; 1753 return NETDEV_TX_OK;
1355 1754
1356out_drop: 1755out_drop: