diff options
author | Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> | 2017-01-26 08:29:27 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-01-26 18:41:05 -0500 |
commit | a47b70ea86bdeb3091341f5ae3ef580f1a1ad822 (patch) | |
tree | 486911b0dd44dff74cd19814e24617a4f1c0ae0b | |
parent | 086cb6a41264b5af33928b82e09ae7f0f8bbc291 (diff) |
ravb: unmap descriptors when freeing rings
"swiotlb buffer is full" errors occur after repeated initialisation of a
device - f.e. suspend/resume or ip link set up/down. This is because memory
mapped using dma_map_single() in ravb_ring_format() and ravb_start_xmit()
is not released. Resolve this problem by unmapping descriptors when
freeing rings.
Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper")
Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com>
[simon: reworked]
Signed-off-by: Simon Horman <horms+renesas@verge.net.au>
Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/renesas/ravb_main.c | 112 |
1 files changed, 64 insertions, 48 deletions
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 89ac1e3f6175..301f48755093 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
@@ -179,6 +179,49 @@ static struct mdiobb_ops bb_ops = { | |||
179 | .get_mdio_data = ravb_get_mdio_data, | 179 | .get_mdio_data = ravb_get_mdio_data, |
180 | }; | 180 | }; |
181 | 181 | ||
182 | /* Free TX skb function for AVB-IP */ | ||
183 | static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) | ||
184 | { | ||
185 | struct ravb_private *priv = netdev_priv(ndev); | ||
186 | struct net_device_stats *stats = &priv->stats[q]; | ||
187 | struct ravb_tx_desc *desc; | ||
188 | int free_num = 0; | ||
189 | int entry; | ||
190 | u32 size; | ||
191 | |||
192 | for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { | ||
193 | bool txed; | ||
194 | |||
195 | entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * | ||
196 | NUM_TX_DESC); | ||
197 | desc = &priv->tx_ring[q][entry]; | ||
198 | txed = desc->die_dt == DT_FEMPTY; | ||
199 | if (free_txed_only && !txed) | ||
200 | break; | ||
201 | /* Descriptor type must be checked before all other reads */ | ||
202 | dma_rmb(); | ||
203 | size = le16_to_cpu(desc->ds_tagl) & TX_DS; | ||
204 | /* Free the original skb. */ | ||
205 | if (priv->tx_skb[q][entry / NUM_TX_DESC]) { | ||
206 | dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), | ||
207 | size, DMA_TO_DEVICE); | ||
208 | /* Last packet descriptor? */ | ||
209 | if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { | ||
210 | entry /= NUM_TX_DESC; | ||
211 | dev_kfree_skb_any(priv->tx_skb[q][entry]); | ||
212 | priv->tx_skb[q][entry] = NULL; | ||
213 | if (txed) | ||
214 | stats->tx_packets++; | ||
215 | } | ||
216 | free_num++; | ||
217 | } | ||
218 | if (txed) | ||
219 | stats->tx_bytes += size; | ||
220 | desc->die_dt = DT_EEMPTY; | ||
221 | } | ||
222 | return free_num; | ||
223 | } | ||
224 | |||
182 | /* Free skb's and DMA buffers for Ethernet AVB */ | 225 | /* Free skb's and DMA buffers for Ethernet AVB */ |
183 | static void ravb_ring_free(struct net_device *ndev, int q) | 226 | static void ravb_ring_free(struct net_device *ndev, int q) |
184 | { | 227 | { |
@@ -194,19 +237,21 @@ static void ravb_ring_free(struct net_device *ndev, int q) | |||
194 | kfree(priv->rx_skb[q]); | 237 | kfree(priv->rx_skb[q]); |
195 | priv->rx_skb[q] = NULL; | 238 | priv->rx_skb[q] = NULL; |
196 | 239 | ||
197 | /* Free TX skb ringbuffer */ | ||
198 | if (priv->tx_skb[q]) { | ||
199 | for (i = 0; i < priv->num_tx_ring[q]; i++) | ||
200 | dev_kfree_skb(priv->tx_skb[q][i]); | ||
201 | } | ||
202 | kfree(priv->tx_skb[q]); | ||
203 | priv->tx_skb[q] = NULL; | ||
204 | |||
205 | /* Free aligned TX buffers */ | 240 | /* Free aligned TX buffers */ |
206 | kfree(priv->tx_align[q]); | 241 | kfree(priv->tx_align[q]); |
207 | priv->tx_align[q] = NULL; | 242 | priv->tx_align[q] = NULL; |
208 | 243 | ||
209 | if (priv->rx_ring[q]) { | 244 | if (priv->rx_ring[q]) { |
245 | for (i = 0; i < priv->num_rx_ring[q]; i++) { | ||
246 | struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; | ||
247 | |||
248 | if (!dma_mapping_error(ndev->dev.parent, | ||
249 | le32_to_cpu(desc->dptr))) | ||
250 | dma_unmap_single(ndev->dev.parent, | ||
251 | le32_to_cpu(desc->dptr), | ||
252 | PKT_BUF_SZ, | ||
253 | DMA_FROM_DEVICE); | ||
254 | } | ||
210 | ring_size = sizeof(struct ravb_ex_rx_desc) * | 255 | ring_size = sizeof(struct ravb_ex_rx_desc) * |
211 | (priv->num_rx_ring[q] + 1); | 256 | (priv->num_rx_ring[q] + 1); |
212 | dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], | 257 | dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], |
@@ -215,12 +260,20 @@ static void ravb_ring_free(struct net_device *ndev, int q) | |||
215 | } | 260 | } |
216 | 261 | ||
217 | if (priv->tx_ring[q]) { | 262 | if (priv->tx_ring[q]) { |
263 | ravb_tx_free(ndev, q, false); | ||
264 | |||
218 | ring_size = sizeof(struct ravb_tx_desc) * | 265 | ring_size = sizeof(struct ravb_tx_desc) * |
219 | (priv->num_tx_ring[q] * NUM_TX_DESC + 1); | 266 | (priv->num_tx_ring[q] * NUM_TX_DESC + 1); |
220 | dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], | 267 | dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], |
221 | priv->tx_desc_dma[q]); | 268 | priv->tx_desc_dma[q]); |
222 | priv->tx_ring[q] = NULL; | 269 | priv->tx_ring[q] = NULL; |
223 | } | 270 | } |
271 | |||
272 | /* Free TX skb ringbuffer. | ||
273 | * SKBs are freed by ravb_tx_free() call above. | ||
274 | */ | ||
275 | kfree(priv->tx_skb[q]); | ||
276 | priv->tx_skb[q] = NULL; | ||
224 | } | 277 | } |
225 | 278 | ||
226 | /* Format skb and descriptor buffer for Ethernet AVB */ | 279 | /* Format skb and descriptor buffer for Ethernet AVB */ |
@@ -431,44 +484,6 @@ static int ravb_dmac_init(struct net_device *ndev) | |||
431 | return 0; | 484 | return 0; |
432 | } | 485 | } |
433 | 486 | ||
434 | /* Free TX skb function for AVB-IP */ | ||
435 | static int ravb_tx_free(struct net_device *ndev, int q) | ||
436 | { | ||
437 | struct ravb_private *priv = netdev_priv(ndev); | ||
438 | struct net_device_stats *stats = &priv->stats[q]; | ||
439 | struct ravb_tx_desc *desc; | ||
440 | int free_num = 0; | ||
441 | int entry; | ||
442 | u32 size; | ||
443 | |||
444 | for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { | ||
445 | entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * | ||
446 | NUM_TX_DESC); | ||
447 | desc = &priv->tx_ring[q][entry]; | ||
448 | if (desc->die_dt != DT_FEMPTY) | ||
449 | break; | ||
450 | /* Descriptor type must be checked before all other reads */ | ||
451 | dma_rmb(); | ||
452 | size = le16_to_cpu(desc->ds_tagl) & TX_DS; | ||
453 | /* Free the original skb. */ | ||
454 | if (priv->tx_skb[q][entry / NUM_TX_DESC]) { | ||
455 | dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), | ||
456 | size, DMA_TO_DEVICE); | ||
457 | /* Last packet descriptor? */ | ||
458 | if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { | ||
459 | entry /= NUM_TX_DESC; | ||
460 | dev_kfree_skb_any(priv->tx_skb[q][entry]); | ||
461 | priv->tx_skb[q][entry] = NULL; | ||
462 | stats->tx_packets++; | ||
463 | } | ||
464 | free_num++; | ||
465 | } | ||
466 | stats->tx_bytes += size; | ||
467 | desc->die_dt = DT_EEMPTY; | ||
468 | } | ||
469 | return free_num; | ||
470 | } | ||
471 | |||
472 | static void ravb_get_tx_tstamp(struct net_device *ndev) | 487 | static void ravb_get_tx_tstamp(struct net_device *ndev) |
473 | { | 488 | { |
474 | struct ravb_private *priv = netdev_priv(ndev); | 489 | struct ravb_private *priv = netdev_priv(ndev); |
@@ -902,7 +917,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) | |||
902 | spin_lock_irqsave(&priv->lock, flags); | 917 | spin_lock_irqsave(&priv->lock, flags); |
903 | /* Clear TX interrupt */ | 918 | /* Clear TX interrupt */ |
904 | ravb_write(ndev, ~mask, TIS); | 919 | ravb_write(ndev, ~mask, TIS); |
905 | ravb_tx_free(ndev, q); | 920 | ravb_tx_free(ndev, q, true); |
906 | netif_wake_subqueue(ndev, q); | 921 | netif_wake_subqueue(ndev, q); |
907 | mmiowb(); | 922 | mmiowb(); |
908 | spin_unlock_irqrestore(&priv->lock, flags); | 923 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -1567,7 +1582,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1567 | 1582 | ||
1568 | priv->cur_tx[q] += NUM_TX_DESC; | 1583 | priv->cur_tx[q] += NUM_TX_DESC; |
1569 | if (priv->cur_tx[q] - priv->dirty_tx[q] > | 1584 | if (priv->cur_tx[q] - priv->dirty_tx[q] > |
1570 | (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q)) | 1585 | (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && |
1586 | !ravb_tx_free(ndev, q, true)) | ||
1571 | netif_stop_subqueue(ndev, q); | 1587 | netif_stop_subqueue(ndev, q); |
1572 | 1588 | ||
1573 | exit: | 1589 | exit: |