aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHamad Kadmany <qca_hkadmany@qca.qualcomm.com>2015-10-25 09:59:22 -0400
committerKalle Valo <kvalo@qca.qualcomm.com>2015-10-29 07:06:45 -0400
commite3d2ed9434331dedb9eeece586d38aec9e29f60b (patch)
treeea70b960cb6554f71a84d6d66e99de4b9d69e48a
parentb03fbab0c4d502510400b7ee5fb3d5bab6d859bf (diff)
wil6210: Fix TSO overflow handling
When Tx ring full is encountered with TSO, printout of "DMA error" was wrongly printed. In addition, in case of Tx ring full return proper error code so that NETDEV_TX_BUSY is returned to network stack in order not to drop the packets and retry transmission of the packets when ring is emptied. Signed-off-by: Hamad Kadmany <qca_hkadmany@qca.qualcomm.com> Signed-off-by: Vladimir Kondratiev <qca_vkondrat@qca.qualcomm.com> Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 0f8b6877497e..3bc9bc0efbac 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -1242,6 +1242,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
1242 int tcp_hdr_len; 1242 int tcp_hdr_len;
1243 int skb_net_hdr_len; 1243 int skb_net_hdr_len;
1244 int gso_type; 1244 int gso_type;
1245 int rc = -EINVAL;
1245 1246
1246 wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n", 1247 wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
1247 __func__, skb->len, vring_index); 1248 __func__, skb->len, vring_index);
@@ -1333,8 +1334,9 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
1333 len, rem_data, descs_used); 1334 len, rem_data, descs_used);
1334 1335
1335 if (descs_used == avail) { 1336 if (descs_used == avail) {
1336 wil_err(wil, "TSO: ring overflow\n"); 1337 wil_err_ratelimited(wil, "TSO: ring overflow\n");
1337 goto dma_error; 1338 rc = -ENOMEM;
1339 goto mem_error;
1338 } 1340 }
1339 1341
1340 lenmss = min_t(int, rem_data, len); 1342 lenmss = min_t(int, rem_data, len);
@@ -1356,8 +1358,10 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
1356 headlen -= lenmss; 1358 headlen -= lenmss;
1357 } 1359 }
1358 1360
1359 if (unlikely(dma_mapping_error(dev, pa))) 1361 if (unlikely(dma_mapping_error(dev, pa))) {
1360 goto dma_error; 1362 wil_err(wil, "TSO: DMA map page error\n");
1363 goto mem_error;
1364 }
1361 1365
1362 _desc = &vring->va[i].tx; 1366 _desc = &vring->va[i].tx;
1363 1367
@@ -1456,8 +1460,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
1456 } 1460 }
1457 1461
1458 /* advance swhead */ 1462 /* advance swhead */
1459 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
1460 wil_vring_advance_head(vring, descs_used); 1463 wil_vring_advance_head(vring, descs_used);
1464 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
1461 1465
1462 /* make sure all writes to descriptors (shared memory) are done before 1466 /* make sure all writes to descriptors (shared memory) are done before
1463 * committing them to HW 1467 * committing them to HW
@@ -1467,8 +1471,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
1467 wil_w(wil, vring->hwtail, vring->swhead); 1471 wil_w(wil, vring->hwtail, vring->swhead);
1468 return 0; 1472 return 0;
1469 1473
1470dma_error: 1474mem_error:
1471 wil_err(wil, "TSO: DMA map page error\n");
1472 while (descs_used > 0) { 1475 while (descs_used > 0) {
1473 struct wil_ctx *ctx; 1476 struct wil_ctx *ctx;
1474 1477
@@ -1479,14 +1482,11 @@ dma_error:
1479 _desc->dma.status = TX_DMA_STATUS_DU; 1482 _desc->dma.status = TX_DMA_STATUS_DU;
1480 ctx = &vring->ctx[i]; 1483 ctx = &vring->ctx[i];
1481 wil_txdesc_unmap(dev, d, ctx); 1484 wil_txdesc_unmap(dev, d, ctx);
1482 if (ctx->skb)
1483 dev_kfree_skb_any(ctx->skb);
1484 memset(ctx, 0, sizeof(*ctx)); 1485 memset(ctx, 0, sizeof(*ctx));
1485 descs_used--; 1486 descs_used--;
1486 } 1487 }
1487
1488err_exit: 1488err_exit:
1489 return -EINVAL; 1489 return rc;
1490} 1490}
1491 1491
1492static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, 1492static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
@@ -1562,8 +1562,11 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
1562 _d = &vring->va[i].tx; 1562 _d = &vring->va[i].tx;
1563 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), 1563 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
1564 DMA_TO_DEVICE); 1564 DMA_TO_DEVICE);
1565 if (unlikely(dma_mapping_error(dev, pa))) 1565 if (unlikely(dma_mapping_error(dev, pa))) {
1566 wil_err(wil, "Tx[%2d] failed to map fragment\n",
1567 vring_index);
1566 goto dma_error; 1568 goto dma_error;
1569 }
1567 vring->ctx[i].mapped_as = wil_mapped_as_page; 1570 vring->ctx[i].mapped_as = wil_mapped_as_page;
1568 wil_tx_desc_map(d, pa, len, vring_index); 1571 wil_tx_desc_map(d, pa, len, vring_index);
1569 /* no need to check return code - 1572 /* no need to check return code -
@@ -1623,9 +1626,6 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
1623 _d->dma.status = TX_DMA_STATUS_DU; 1626 _d->dma.status = TX_DMA_STATUS_DU;
1624 wil_txdesc_unmap(dev, d, ctx); 1627 wil_txdesc_unmap(dev, d, ctx);
1625 1628
1626 if (ctx->skb)
1627 dev_kfree_skb_any(ctx->skb);
1628
1629 memset(ctx, 0, sizeof(*ctx)); 1629 memset(ctx, 0, sizeof(*ctx));
1630 } 1630 }
1631 1631