aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/i40evf
diff options
context:
space:
mode:
authorAnjali Singhai <anjali.singhai@intel.com>2015-02-21 01:42:35 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2015-02-26 07:53:58 -0500
commit71da61976ec18fb57b3ba9a1dd846b747cc7c884 (patch)
tree9de12ed2f5e134a02805d09f26c11b9295bd4438 /drivers/net/ethernet/intel/i40evf
parentb67a03357cab0ccb91d641fead6f167c697a24cb (diff)
i40e: Fix TSO with more than 8 frags per segment issue
The hardware has some limitations the driver needs to adhere to, that we found in extended testing. 1) no more than 8 descriptors per packet on the wire 2) no header can span more than 3 descriptors If one of these events occurs, the hardware will generate an internal error and freeze the Tx queue. This patch linearizes the skb to avoid these situations. Change-ID: I37dab7d3966e14895a9663ec4d0aaa8eb0d9e115 Signed-off-by: Anjali Singhai Jain <anjali.singhai@intel.com> Tested-by: Jim Young <james.m.young@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/i40evf')
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c65
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h1
2 files changed, 66 insertions, 0 deletions
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 29004382f462..1320a433b8ed 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1380,6 +1380,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1380 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); 1380 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
1381} 1381}
1382 1382
1383 /**
1384 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
1385 * @skb: send buffer
1386 * @tx_flags: collected send information
1387 * @hdr_len: size of the packet header
1388 *
1389 * Note: Our HW can't scatter-gather more than 8 fragments to build
1390 * a packet on the wire and so we need to figure out the cases where we
1391 * need to linearize the skb.
1392 **/
1393static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
1394 const u8 hdr_len)
1395{
1396 struct skb_frag_struct *frag;
1397 bool linearize = false;
1398 unsigned int size = 0;
1399 u16 num_frags;
1400 u16 gso_segs;
1401
1402 num_frags = skb_shinfo(skb)->nr_frags;
1403 gso_segs = skb_shinfo(skb)->gso_segs;
1404
1405 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
1406 u16 j = 1;
1407
1408 if (num_frags < (I40E_MAX_BUFFER_TXD))
1409 goto linearize_chk_done;
1410 /* try the simple math, if we have too many frags per segment */
1411 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
1412 I40E_MAX_BUFFER_TXD) {
1413 linearize = true;
1414 goto linearize_chk_done;
1415 }
1416 frag = &skb_shinfo(skb)->frags[0];
1417 size = hdr_len;
1418 /* we might still have more fragments per segment */
1419 do {
1420 size += skb_frag_size(frag);
1421 frag++; j++;
1422 if (j == I40E_MAX_BUFFER_TXD) {
1423 if (size < skb_shinfo(skb)->gso_size) {
1424 linearize = true;
1425 break;
1426 }
1427 j = 1;
1428 size -= skb_shinfo(skb)->gso_size;
1429 if (size)
1430 j++;
1431 size += hdr_len;
1432 }
1433 num_frags--;
1434 } while (num_frags);
1435 } else {
1436 if (num_frags >= I40E_MAX_BUFFER_TXD)
1437 linearize = true;
1438 }
1439
1440linearize_chk_done:
1441 return linearize;
1442}
1443
1383/** 1444/**
1384 * i40e_tx_map - Build the Tx descriptor 1445 * i40e_tx_map - Build the Tx descriptor
1385 * @tx_ring: ring to send buffer on 1446 * @tx_ring: ring to send buffer on
@@ -1654,6 +1715,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
1654 else if (tso) 1715 else if (tso)
1655 tx_flags |= I40E_TX_FLAGS_TSO; 1716 tx_flags |= I40E_TX_FLAGS_TSO;
1656 1717
1718 if (i40e_chk_linearize(skb, tx_flags, hdr_len))
1719 if (skb_linearize(skb))
1720 goto out_drop;
1721
1657 skb_tx_timestamp(skb); 1722 skb_tx_timestamp(skb);
1658 1723
1659 /* always enable CRC insertion offload */ 1724 /* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 4e15903b2b6d..c950a038237c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
112 112
113#define i40e_rx_desc i40e_32byte_rx_desc 113#define i40e_rx_desc i40e_32byte_rx_desc
114 114
115#define I40E_MAX_BUFFER_TXD 8
115#define I40E_MIN_TX_LEN 17 116#define I40E_MIN_TX_LEN 17
116#define I40E_MAX_DATA_PER_TXD 8192 117#define I40E_MAX_DATA_PER_TXD 8192
117 118