diff options
author | Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> | 2017-04-17 08:55:22 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-05-03 11:36:36 -0400 |
commit | e344e97fb359ca02c1892d093daae7c2060e965e (patch) | |
tree | 5907a38ebb3ee7c5a0accfdb581940950d05d31b | |
parent | b4580d6f10a3b277669fd1d4cf7cac49ad9c8b77 (diff) |
sh_eth: unmap DMA buffers when freeing rings
[ Upstream commit 1debdc8f9ebd07daf140e417b3841596911e0066 ]
The DMA API debugging (when enabled) causes:
WARNING: CPU: 0 PID: 1445 at lib/dma-debug.c:519 add_dma_entry+0xe0/0x12c
DMA-API: exceeded 7 overlapping mappings of cacheline 0x01b2974d
to be printed after repeated initialization of the Ether device, e.g.
suspend/resume or 'ifconfig' up/down. This is because DMA buffers mapped
using dma_map_single() in sh_eth_ring_format() and sh_eth_start_xmit() are
never unmapped. Resolve this problem by unmapping the buffers when freeing
the descriptor rings; in order to do it right, we'd have to add an extra
parameter to sh_eth_txfree() (we rename this function to sh_eth_tx_free(),
while at it).
Based on the commit a47b70ea86bd ("ravb: unmap descriptors when freeing
rings").
Signed-off-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | drivers/net/ethernet/renesas/sh_eth.c | 122 |
1 files changed, 67 insertions, 55 deletions
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 1a92de705199..a2d218b28c0e 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -1059,12 +1059,70 @@ static struct mdiobb_ops bb_ops = { | |||
1059 | .get_mdio_data = sh_get_mdio, | 1059 | .get_mdio_data = sh_get_mdio, |
1060 | }; | 1060 | }; |
1061 | 1061 | ||
1062 | /* free Tx skb function */ | ||
1063 | static int sh_eth_tx_free(struct net_device *ndev, bool sent_only) | ||
1064 | { | ||
1065 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1066 | struct sh_eth_txdesc *txdesc; | ||
1067 | int free_num = 0; | ||
1068 | int entry; | ||
1069 | bool sent; | ||
1070 | |||
1071 | for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { | ||
1072 | entry = mdp->dirty_tx % mdp->num_tx_ring; | ||
1073 | txdesc = &mdp->tx_ring[entry]; | ||
1074 | sent = !(txdesc->status & cpu_to_le32(TD_TACT)); | ||
1075 | if (sent_only && !sent) | ||
1076 | break; | ||
1077 | /* TACT bit must be checked before all the following reads */ | ||
1078 | dma_rmb(); | ||
1079 | netif_info(mdp, tx_done, ndev, | ||
1080 | "tx entry %d status 0x%08x\n", | ||
1081 | entry, le32_to_cpu(txdesc->status)); | ||
1082 | /* Free the original skb. */ | ||
1083 | if (mdp->tx_skbuff[entry]) { | ||
1084 | dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr), | ||
1085 | le32_to_cpu(txdesc->len) >> 16, | ||
1086 | DMA_TO_DEVICE); | ||
1087 | dev_kfree_skb_irq(mdp->tx_skbuff[entry]); | ||
1088 | mdp->tx_skbuff[entry] = NULL; | ||
1089 | free_num++; | ||
1090 | } | ||
1091 | txdesc->status = cpu_to_le32(TD_TFP); | ||
1092 | if (entry >= mdp->num_tx_ring - 1) | ||
1093 | txdesc->status |= cpu_to_le32(TD_TDLE); | ||
1094 | |||
1095 | if (sent) { | ||
1096 | ndev->stats.tx_packets++; | ||
1097 | ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16; | ||
1098 | } | ||
1099 | } | ||
1100 | return free_num; | ||
1101 | } | ||
1102 | |||
1062 | /* free skb and descriptor buffer */ | 1103 | /* free skb and descriptor buffer */ |
1063 | static void sh_eth_ring_free(struct net_device *ndev) | 1104 | static void sh_eth_ring_free(struct net_device *ndev) |
1064 | { | 1105 | { |
1065 | struct sh_eth_private *mdp = netdev_priv(ndev); | 1106 | struct sh_eth_private *mdp = netdev_priv(ndev); |
1066 | int ringsize, i; | 1107 | int ringsize, i; |
1067 | 1108 | ||
1109 | if (mdp->rx_ring) { | ||
1110 | for (i = 0; i < mdp->num_rx_ring; i++) { | ||
1111 | if (mdp->rx_skbuff[i]) { | ||
1112 | struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i]; | ||
1113 | |||
1114 | dma_unmap_single(&ndev->dev, | ||
1115 | le32_to_cpu(rxdesc->addr), | ||
1116 | ALIGN(mdp->rx_buf_sz, 32), | ||
1117 | DMA_FROM_DEVICE); | ||
1118 | } | ||
1119 | } | ||
1120 | ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; | ||
1121 | dma_free_coherent(NULL, ringsize, mdp->rx_ring, | ||
1122 | mdp->rx_desc_dma); | ||
1123 | mdp->rx_ring = NULL; | ||
1124 | } | ||
1125 | |||
1068 | /* Free Rx skb ringbuffer */ | 1126 | /* Free Rx skb ringbuffer */ |
1069 | if (mdp->rx_skbuff) { | 1127 | if (mdp->rx_skbuff) { |
1070 | for (i = 0; i < mdp->num_rx_ring; i++) | 1128 | for (i = 0; i < mdp->num_rx_ring; i++) |
@@ -1073,27 +1131,18 @@ static void sh_eth_ring_free(struct net_device *ndev) | |||
1073 | kfree(mdp->rx_skbuff); | 1131 | kfree(mdp->rx_skbuff); |
1074 | mdp->rx_skbuff = NULL; | 1132 | mdp->rx_skbuff = NULL; |
1075 | 1133 | ||
1076 | /* Free Tx skb ringbuffer */ | ||
1077 | if (mdp->tx_skbuff) { | ||
1078 | for (i = 0; i < mdp->num_tx_ring; i++) | ||
1079 | dev_kfree_skb(mdp->tx_skbuff[i]); | ||
1080 | } | ||
1081 | kfree(mdp->tx_skbuff); | ||
1082 | mdp->tx_skbuff = NULL; | ||
1083 | |||
1084 | if (mdp->rx_ring) { | ||
1085 | ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; | ||
1086 | dma_free_coherent(NULL, ringsize, mdp->rx_ring, | ||
1087 | mdp->rx_desc_dma); | ||
1088 | mdp->rx_ring = NULL; | ||
1089 | } | ||
1090 | |||
1091 | if (mdp->tx_ring) { | 1134 | if (mdp->tx_ring) { |
1135 | sh_eth_tx_free(ndev, false); | ||
1136 | |||
1092 | ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; | 1137 | ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; |
1093 | dma_free_coherent(NULL, ringsize, mdp->tx_ring, | 1138 | dma_free_coherent(NULL, ringsize, mdp->tx_ring, |
1094 | mdp->tx_desc_dma); | 1139 | mdp->tx_desc_dma); |
1095 | mdp->tx_ring = NULL; | 1140 | mdp->tx_ring = NULL; |
1096 | } | 1141 | } |
1142 | |||
1143 | /* Free Tx skb ringbuffer */ | ||
1144 | kfree(mdp->tx_skbuff); | ||
1145 | mdp->tx_skbuff = NULL; | ||
1097 | } | 1146 | } |
1098 | 1147 | ||
1099 | /* format skb and descriptor buffer */ | 1148 | /* format skb and descriptor buffer */ |
@@ -1341,43 +1390,6 @@ static void sh_eth_dev_exit(struct net_device *ndev) | |||
1341 | update_mac_address(ndev); | 1390 | update_mac_address(ndev); |
1342 | } | 1391 | } |
1343 | 1392 | ||
1344 | /* free Tx skb function */ | ||
1345 | static int sh_eth_txfree(struct net_device *ndev) | ||
1346 | { | ||
1347 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1348 | struct sh_eth_txdesc *txdesc; | ||
1349 | int free_num = 0; | ||
1350 | int entry; | ||
1351 | |||
1352 | for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { | ||
1353 | entry = mdp->dirty_tx % mdp->num_tx_ring; | ||
1354 | txdesc = &mdp->tx_ring[entry]; | ||
1355 | if (txdesc->status & cpu_to_le32(TD_TACT)) | ||
1356 | break; | ||
1357 | /* TACT bit must be checked before all the following reads */ | ||
1358 | dma_rmb(); | ||
1359 | netif_info(mdp, tx_done, ndev, | ||
1360 | "tx entry %d status 0x%08x\n", | ||
1361 | entry, le32_to_cpu(txdesc->status)); | ||
1362 | /* Free the original skb. */ | ||
1363 | if (mdp->tx_skbuff[entry]) { | ||
1364 | dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr), | ||
1365 | le32_to_cpu(txdesc->len) >> 16, | ||
1366 | DMA_TO_DEVICE); | ||
1367 | dev_kfree_skb_irq(mdp->tx_skbuff[entry]); | ||
1368 | mdp->tx_skbuff[entry] = NULL; | ||
1369 | free_num++; | ||
1370 | } | ||
1371 | txdesc->status = cpu_to_le32(TD_TFP); | ||
1372 | if (entry >= mdp->num_tx_ring - 1) | ||
1373 | txdesc->status |= cpu_to_le32(TD_TDLE); | ||
1374 | |||
1375 | ndev->stats.tx_packets++; | ||
1376 | ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16; | ||
1377 | } | ||
1378 | return free_num; | ||
1379 | } | ||
1380 | |||
1381 | /* Packet receive function */ | 1393 | /* Packet receive function */ |
1382 | static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | 1394 | static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) |
1383 | { | 1395 | { |
@@ -1620,7 +1632,7 @@ ignore_link: | |||
1620 | intr_status, mdp->cur_tx, mdp->dirty_tx, | 1632 | intr_status, mdp->cur_tx, mdp->dirty_tx, |
1621 | (u32)ndev->state, edtrr); | 1633 | (u32)ndev->state, edtrr); |
1622 | /* dirty buffer free */ | 1634 | /* dirty buffer free */ |
1623 | sh_eth_txfree(ndev); | 1635 | sh_eth_tx_free(ndev, true); |
1624 | 1636 | ||
1625 | /* SH7712 BUG */ | 1637 | /* SH7712 BUG */ |
1626 | if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { | 1638 | if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { |
@@ -1679,7 +1691,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
1679 | /* Clear Tx interrupts */ | 1691 | /* Clear Tx interrupts */ |
1680 | sh_eth_write(ndev, intr_status & cd->tx_check, EESR); | 1692 | sh_eth_write(ndev, intr_status & cd->tx_check, EESR); |
1681 | 1693 | ||
1682 | sh_eth_txfree(ndev); | 1694 | sh_eth_tx_free(ndev, true); |
1683 | netif_wake_queue(ndev); | 1695 | netif_wake_queue(ndev); |
1684 | } | 1696 | } |
1685 | 1697 | ||
@@ -2307,7 +2319,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
2307 | 2319 | ||
2308 | spin_lock_irqsave(&mdp->lock, flags); | 2320 | spin_lock_irqsave(&mdp->lock, flags); |
2309 | if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { | 2321 | if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { |
2310 | if (!sh_eth_txfree(ndev)) { | 2322 | if (!sh_eth_tx_free(ndev, true)) { |
2311 | netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n"); | 2323 | netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n"); |
2312 | netif_stop_queue(ndev); | 2324 | netif_stop_queue(ndev); |
2313 | spin_unlock_irqrestore(&mdp->lock, flags); | 2325 | spin_unlock_irqrestore(&mdp->lock, flags); |