diff options
author | Thomas Petazzoni <thomas.petazzoni@free-electrons.com> | 2012-11-22 12:19:09 -0500 |
---|---|---|
committer | Thomas Petazzoni <thomas.petazzoni@free-electrons.com> | 2012-11-22 12:19:09 -0500 |
commit | 2d0a074517da34a6386bdd9a22bc006c8fa21044 (patch) | |
tree | e0353bf8c5cdf8edf41312ea4bc0a7623726dfae /drivers/dma | |
parent | c4b4b732b2e99e6e302d90d57f2a4f5c9516d9a3 (diff) |
dma: mv_xor: use request_irq() instead of devm_request_irq()
Even through the usage of devm_*() functions is generally recommended
over their classic variants, in the case of devm_request_irq()
combined with irq_of_parse_and_map(), it doesn't work nicely.
We have the following scenario:
irq_of_parse_and_map(...)
devm_request_irq(...)
For some reason, the driver initialization fails at a later
point. Since irq_of_parse_and_map() is no device-managed, we do a:
irq_dispose_mapping(...)
Unfortunately, this doesn't work, because the free_irq() must be done
prior to calling irq_dispose_mapping(). But with the devm mechanism,
the automatic free_irq() would happen only after we get out of the
->probe() function.
So basically, we revert to using request_irq() with traditional error
handling, so that in case of error, free_irq() gets called before
irq_dispose_mapping().
Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/mv_xor.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 2c69b89eac4f..0d4c24e529f7 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -1162,9 +1162,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1162 | /* clear errors before enabling interrupts */ | 1162 | /* clear errors before enabling interrupts */ |
1163 | mv_xor_device_clear_err_status(mv_chan); | 1163 | mv_xor_device_clear_err_status(mv_chan); |
1164 | 1164 | ||
1165 | ret = devm_request_irq(&pdev->dev, mv_chan->irq, | 1165 | ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler, |
1166 | mv_xor_interrupt_handler, | 1166 | 0, dev_name(&pdev->dev), mv_chan); |
1167 | 0, dev_name(&pdev->dev), mv_chan); | ||
1168 | if (ret) | 1167 | if (ret) |
1169 | goto err_free_dma; | 1168 | goto err_free_dma; |
1170 | 1169 | ||
@@ -1185,14 +1184,14 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1185 | ret = mv_xor_memcpy_self_test(mv_chan); | 1184 | ret = mv_xor_memcpy_self_test(mv_chan); |
1186 | dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); | 1185 | dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); |
1187 | if (ret) | 1186 | if (ret) |
1188 | goto err_free_dma; | 1187 | goto err_free_irq; |
1189 | } | 1188 | } |
1190 | 1189 | ||
1191 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | 1190 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
1192 | ret = mv_xor_xor_self_test(mv_chan); | 1191 | ret = mv_xor_xor_self_test(mv_chan); |
1193 | dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); | 1192 | dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); |
1194 | if (ret) | 1193 | if (ret) |
1195 | goto err_free_dma; | 1194 | goto err_free_irq; |
1196 | } | 1195 | } |
1197 | 1196 | ||
1198 | dev_info(&pdev->dev, "Marvell XOR: " | 1197 | dev_info(&pdev->dev, "Marvell XOR: " |
@@ -1205,6 +1204,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1205 | dma_async_device_register(dma_dev); | 1204 | dma_async_device_register(dma_dev); |
1206 | return mv_chan; | 1205 | return mv_chan; |
1207 | 1206 | ||
1207 | err_free_irq: | ||
1208 | free_irq(mv_chan->irq, mv_chan); | ||
1208 | err_free_dma: | 1209 | err_free_dma: |
1209 | dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, | 1210 | dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, |
1210 | mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); | 1211 | mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); |