aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/i40evf/i40e_txrx.c')
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c143
1 files changed, 107 insertions, 36 deletions
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 29004382f462..708891571dae 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -126,6 +126,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
126} 126}
127 127
128/** 128/**
129 * i40e_get_head - Retrieve head from head writeback
130 * @tx_ring: tx ring to fetch head of
131 *
132 * Returns value of Tx ring head based on value stored
133 * in head write-back location
134 **/
135static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
136{
137 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
138
139 return le32_to_cpu(*(volatile __le32 *)head);
140}
141
142/**
129 * i40e_get_tx_pending - how many tx descriptors not processed 143 * i40e_get_tx_pending - how many tx descriptors not processed
130 * @tx_ring: the ring of descriptors 144 * @tx_ring: the ring of descriptors
131 * 145 *
@@ -134,10 +148,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
134 **/ 148 **/
135static u32 i40e_get_tx_pending(struct i40e_ring *ring) 149static u32 i40e_get_tx_pending(struct i40e_ring *ring)
136{ 150{
137 u32 ntu = ((ring->next_to_clean <= ring->next_to_use) 151 u32 head, tail;
138 ? ring->next_to_use 152
139 : ring->next_to_use + ring->count); 153 head = i40e_get_head(ring);
140 return ntu - ring->next_to_clean; 154 tail = readl(ring->tail);
155
156 if (head != tail)
157 return (head < tail) ?
158 tail - head : (tail + ring->count - head);
159
160 return 0;
141} 161}
142 162
143/** 163/**
@@ -146,6 +166,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
146 **/ 166 **/
147static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) 167static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
148{ 168{
169 u32 tx_done = tx_ring->stats.packets;
170 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
149 u32 tx_pending = i40e_get_tx_pending(tx_ring); 171 u32 tx_pending = i40e_get_tx_pending(tx_ring);
150 bool ret = false; 172 bool ret = false;
151 173
@@ -162,36 +184,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
162 * run the check_tx_hang logic with a transmit completion 184 * run the check_tx_hang logic with a transmit completion
163 * pending but without time to complete it yet. 185 * pending but without time to complete it yet.
164 */ 186 */
165 if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && 187 if ((tx_done_old == tx_done) && tx_pending) {
166 (tx_pending >= I40E_MIN_DESC_PENDING)) {
167 /* make sure it is true for two checks in a row */ 188 /* make sure it is true for two checks in a row */
168 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, 189 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
169 &tx_ring->state); 190 &tx_ring->state);
170 } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) || 191 } else if (tx_done_old == tx_done &&
171 !(tx_pending < I40E_MIN_DESC_PENDING) || 192 (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
172 !(tx_pending > 0)) {
173 /* update completed stats and disarm the hang check */ 193 /* update completed stats and disarm the hang check */
174 tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; 194 tx_ring->tx_stats.tx_done_old = tx_done;
175 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); 195 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
176 } 196 }
177 197
178 return ret; 198 return ret;
179} 199}
180 200
181/**
182 * i40e_get_head - Retrieve head from head writeback
183 * @tx_ring: tx ring to fetch head of
184 *
185 * Returns value of Tx ring head based on value stored
186 * in head write-back location
187 **/
188static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
189{
190 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
191
192 return le32_to_cpu(*(volatile __le32 *)head);
193}
194
195#define WB_STRIDE 0x3 201#define WB_STRIDE 0x3
196 202
197/** 203/**
@@ -1206,17 +1212,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1206 if (err < 0) 1212 if (err < 0)
1207 return err; 1213 return err;
1208 1214
1209 if (protocol == htons(ETH_P_IP)) { 1215 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
1210 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 1216 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
1217
1218 if (iph->version == 4) {
1211 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1219 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1212 iph->tot_len = 0; 1220 iph->tot_len = 0;
1213 iph->check = 0; 1221 iph->check = 0;
1214 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 1222 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1215 0, IPPROTO_TCP, 0); 1223 0, IPPROTO_TCP, 0);
1216 } else if (skb_is_gso_v6(skb)) { 1224 } else if (ipv6h->version == 6) {
1217
1218 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
1219 : ipv6_hdr(skb);
1220 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1225 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1221 ipv6h->payload_len = 0; 1226 ipv6h->payload_len = 0;
1222 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 1227 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1274,13 +1279,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1274 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; 1279 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1275 } 1280 }
1276 } else if (tx_flags & I40E_TX_FLAGS_IPV6) { 1281 } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
1277 if (tx_flags & I40E_TX_FLAGS_TSO) { 1282 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
1278 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; 1283 if (tx_flags & I40E_TX_FLAGS_TSO)
1279 ip_hdr(skb)->check = 0; 1284 ip_hdr(skb)->check = 0;
1280 } else {
1281 *cd_tunneling |=
1282 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1283 }
1284 } 1285 }
1285 1286
1286 /* Now set the ctx descriptor fields */ 1287 /* Now set the ctx descriptor fields */
@@ -1290,6 +1291,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1290 ((skb_inner_network_offset(skb) - 1291 ((skb_inner_network_offset(skb) -
1291 skb_transport_offset(skb)) >> 1) << 1292 skb_transport_offset(skb)) >> 1) <<
1292 I40E_TXD_CTX_QW0_NATLEN_SHIFT; 1293 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
1294 if (this_ip_hdr->version == 6) {
1295 tx_flags &= ~I40E_TX_FLAGS_IPV4;
1296 tx_flags |= I40E_TX_FLAGS_IPV6;
1297 }
1298
1293 1299
1294 } else { 1300 } else {
1295 network_hdr_len = skb_network_header_len(skb); 1301 network_hdr_len = skb_network_header_len(skb);
@@ -1380,6 +1386,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1380 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); 1386 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
1381} 1387}
1382 1388
1389 /**
1390 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
1391 * @skb: send buffer
1392 * @tx_flags: collected send information
1393 * @hdr_len: size of the packet header
1394 *
1395 * Note: Our HW can't scatter-gather more than 8 fragments to build
1396 * a packet on the wire and so we need to figure out the cases where we
1397 * need to linearize the skb.
1398 **/
1399static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
1400 const u8 hdr_len)
1401{
1402 struct skb_frag_struct *frag;
1403 bool linearize = false;
1404 unsigned int size = 0;
1405 u16 num_frags;
1406 u16 gso_segs;
1407
1408 num_frags = skb_shinfo(skb)->nr_frags;
1409 gso_segs = skb_shinfo(skb)->gso_segs;
1410
1411 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
1412 u16 j = 1;
1413
1414 if (num_frags < (I40E_MAX_BUFFER_TXD))
1415 goto linearize_chk_done;
1416 /* try the simple math, if we have too many frags per segment */
1417 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
1418 I40E_MAX_BUFFER_TXD) {
1419 linearize = true;
1420 goto linearize_chk_done;
1421 }
1422 frag = &skb_shinfo(skb)->frags[0];
1423 size = hdr_len;
1424 /* we might still have more fragments per segment */
1425 do {
1426 size += skb_frag_size(frag);
1427 frag++; j++;
1428 if (j == I40E_MAX_BUFFER_TXD) {
1429 if (size < skb_shinfo(skb)->gso_size) {
1430 linearize = true;
1431 break;
1432 }
1433 j = 1;
1434 size -= skb_shinfo(skb)->gso_size;
1435 if (size)
1436 j++;
1437 size += hdr_len;
1438 }
1439 num_frags--;
1440 } while (num_frags);
1441 } else {
1442 if (num_frags >= I40E_MAX_BUFFER_TXD)
1443 linearize = true;
1444 }
1445
1446linearize_chk_done:
1447 return linearize;
1448}
1449
1383/** 1450/**
1384 * i40e_tx_map - Build the Tx descriptor 1451 * i40e_tx_map - Build the Tx descriptor
1385 * @tx_ring: ring to send buffer on 1452 * @tx_ring: ring to send buffer on
@@ -1654,6 +1721,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
1654 else if (tso) 1721 else if (tso)
1655 tx_flags |= I40E_TX_FLAGS_TSO; 1722 tx_flags |= I40E_TX_FLAGS_TSO;
1656 1723
1724 if (i40e_chk_linearize(skb, tx_flags, hdr_len))
1725 if (skb_linearize(skb))
1726 goto out_drop;
1727
1657 skb_tx_timestamp(skb); 1728 skb_tx_timestamp(skb);
1658 1729
1659 /* always enable CRC insertion offload */ 1730 /* always enable CRC insertion offload */