aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-02-15 15:07:35 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-02-15 15:07:35 -0500
commitb45bbf07722bd9491c35681c6698cab93a778904 (patch)
treed1c25524535496251ba7ac76f6d4aa01a562b233 /drivers
parentf60c153d503e798b354333772e7c00f1e8733e71 (diff)
parent4abed0af1e9bc911f28bb525eece522d94d047f2 (diff)
Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (21 commits) dmaengine: add slave-dma maintainer dma: ipu_idmac: do not lose valid received data in the irq handler dmaengine: imx-sdma: fix up param for the last BD in sdma_prep_slave_sg() dmaengine: imx-sdma: correct sdmac->status in sdma_handle_channel_loop() dmaengine: imx-sdma: return sdmac->status in sdma_tx_status() dmaengine: imx-sdma: set sdmac->status to DMA_ERROR in err_out of sdma_prep_slave_sg() dmaengine: imx-sdma: remove IMX_DMA_SG_LOOP handling in sdma_prep_slave_sg() dmaengine i.MX dma: initialize dma capabilities outside channel loop dmaengine i.MX DMA: do not initialize chan_id field dmaengine i.MX dma: check sg entries for valid addresses and lengths dmaengine i.MX dma: set maximum segment size for our device dmaengine i.MX SDMA: reserve channel 0 by not registering it dmaengine i.MX SDMA: initialize dma capabilities outside channel loop dmaengine i.MX SDMA: do not initialize chan_id field dmaengine i.MX sdma: check sg entries for valid addresses and lengths dmaengine i.MX sdma: set maximum segment size for our device DMA: PL08x: fix channel pausing to timeout rather than lockup DMA: PL08x: fix infinite wait when terminating transfers dmaengine: imx-sdma: fix inconsistent naming in sdma_assign_cookie() dmaengine: imx-sdma: propagate error in sdma_probe() instead of returning 0 ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/amba-pl08x.c53
-rw-r--r--drivers/dma/imx-dma.c26
-rw-r--r--drivers/dma/imx-sdma.c88
-rw-r--r--drivers/dma/ipu/ipu_idmac.c50
4 files changed, 102 insertions, 115 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 297f48b0cba..07bca4970e5 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -79,6 +79,7 @@
79#include <linux/module.h> 79#include <linux/module.h>
80#include <linux/interrupt.h> 80#include <linux/interrupt.h>
81#include <linux/slab.h> 81#include <linux/slab.h>
82#include <linux/delay.h>
82#include <linux/dmapool.h> 83#include <linux/dmapool.h>
83#include <linux/dmaengine.h> 84#include <linux/dmaengine.h>
84#include <linux/amba/bus.h> 85#include <linux/amba/bus.h>
@@ -235,16 +236,19 @@ static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
235} 236}
236 237
237/* 238/*
238 * Overall DMAC remains enabled always. 239 * Pause the channel by setting the HALT bit.
239 * 240 *
240 * Disabling individual channels could lose data. 241 * For M->P transfers, pause the DMAC first and then stop the peripheral -
242 * the FIFO can only drain if the peripheral is still requesting data.
243 * (note: this can still timeout if the DMAC FIFO never drains of data.)
241 * 244 *
242 * Disable the peripheral DMA after disabling the DMAC in order to allow 245 * For P->M transfers, disable the peripheral first to stop it filling
243 * the DMAC FIFO to drain, and hence allow the channel to show inactive 246 * the DMAC FIFO, and then pause the DMAC.
244 */ 247 */
245static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 248static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
246{ 249{
247 u32 val; 250 u32 val;
251 int timeout;
248 252
249 /* Set the HALT bit and wait for the FIFO to drain */ 253 /* Set the HALT bit and wait for the FIFO to drain */
250 val = readl(ch->base + PL080_CH_CONFIG); 254 val = readl(ch->base + PL080_CH_CONFIG);
@@ -252,8 +256,13 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
252 writel(val, ch->base + PL080_CH_CONFIG); 256 writel(val, ch->base + PL080_CH_CONFIG);
253 257
254 /* Wait for channel inactive */ 258 /* Wait for channel inactive */
255 while (pl08x_phy_channel_busy(ch)) 259 for (timeout = 1000; timeout; timeout--) {
256 cpu_relax(); 260 if (!pl08x_phy_channel_busy(ch))
261 break;
262 udelay(1);
263 }
264 if (pl08x_phy_channel_busy(ch))
265 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
257} 266}
258 267
259static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 268static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
@@ -267,19 +276,24 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
267} 276}
268 277
269 278
270/* Stops the channel */ 279/*
271static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch) 280 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
281 * clears any pending interrupt status. This should not be used for
282 * an on-going transfer, but as a method of shutting down a channel
283 * (eg, when it's no longer used) or terminating a transfer.
284 */
285static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
286 struct pl08x_phy_chan *ch)
272{ 287{
273 u32 val; 288 u32 val = readl(ch->base + PL080_CH_CONFIG);
274 289
275 pl08x_pause_phy_chan(ch); 290 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
291 PL080_CONFIG_TC_IRQ_MASK);
276 292
277 /* Disable channel */
278 val = readl(ch->base + PL080_CH_CONFIG);
279 val &= ~PL080_CONFIG_ENABLE;
280 val &= ~PL080_CONFIG_ERR_IRQ_MASK;
281 val &= ~PL080_CONFIG_TC_IRQ_MASK;
282 writel(val, ch->base + PL080_CH_CONFIG); 293 writel(val, ch->base + PL080_CH_CONFIG);
294
295 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
296 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
283} 297}
284 298
285static inline u32 get_bytes_in_cctl(u32 cctl) 299static inline u32 get_bytes_in_cctl(u32 cctl)
@@ -404,13 +418,12 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
404{ 418{
405 unsigned long flags; 419 unsigned long flags;
406 420
421 spin_lock_irqsave(&ch->lock, flags);
422
407 /* Stop the channel and clear its interrupts */ 423 /* Stop the channel and clear its interrupts */
408 pl08x_stop_phy_chan(ch); 424 pl08x_terminate_phy_chan(pl08x, ch);
409 writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR);
410 writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR);
411 425
412 /* Mark it as free */ 426 /* Mark it as free */
413 spin_lock_irqsave(&ch->lock, flags);
414 ch->serving = NULL; 427 ch->serving = NULL;
415 spin_unlock_irqrestore(&ch->lock, flags); 428 spin_unlock_irqrestore(&ch->lock, flags);
416} 429}
@@ -1449,7 +1462,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1449 plchan->state = PL08X_CHAN_IDLE; 1462 plchan->state = PL08X_CHAN_IDLE;
1450 1463
1451 if (plchan->phychan) { 1464 if (plchan->phychan) {
1452 pl08x_stop_phy_chan(plchan->phychan); 1465 pl08x_terminate_phy_chan(pl08x, plchan->phychan);
1453 1466
1454 /* 1467 /*
1455 * Mark physical channel as free and free any slave 1468 * Mark physical channel as free and free any slave
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index e53d438142b..e18eaabe92b 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -49,6 +49,7 @@ struct imxdma_channel {
49 49
50struct imxdma_engine { 50struct imxdma_engine {
51 struct device *dev; 51 struct device *dev;
52 struct device_dma_parameters dma_parms;
52 struct dma_device dma_device; 53 struct dma_device dma_device;
53 struct imxdma_channel channel[MAX_DMA_CHANNELS]; 54 struct imxdma_channel channel[MAX_DMA_CHANNELS];
54}; 55};
@@ -242,6 +243,21 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
242 else 243 else
243 dmamode = DMA_MODE_WRITE; 244 dmamode = DMA_MODE_WRITE;
244 245
246 switch (imxdmac->word_size) {
247 case DMA_SLAVE_BUSWIDTH_4_BYTES:
248 if (sgl->length & 3 || sgl->dma_address & 3)
249 return NULL;
250 break;
251 case DMA_SLAVE_BUSWIDTH_2_BYTES:
252 if (sgl->length & 1 || sgl->dma_address & 1)
253 return NULL;
254 break;
255 case DMA_SLAVE_BUSWIDTH_1_BYTE:
256 break;
257 default:
258 return NULL;
259 }
260
245 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len, 261 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len,
246 dma_length, imxdmac->per_address, dmamode); 262 dma_length, imxdmac->per_address, dmamode);
247 if (ret) 263 if (ret)
@@ -329,6 +345,9 @@ static int __init imxdma_probe(struct platform_device *pdev)
329 345
330 INIT_LIST_HEAD(&imxdma->dma_device.channels); 346 INIT_LIST_HEAD(&imxdma->dma_device.channels);
331 347
348 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
349 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
350
332 /* Initialize channel parameters */ 351 /* Initialize channel parameters */
333 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 352 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
334 struct imxdma_channel *imxdmac = &imxdma->channel[i]; 353 struct imxdma_channel *imxdmac = &imxdma->channel[i];
@@ -346,11 +365,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
346 imxdmac->imxdma = imxdma; 365 imxdmac->imxdma = imxdma;
347 spin_lock_init(&imxdmac->lock); 366 spin_lock_init(&imxdmac->lock);
348 367
349 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
350 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
351
352 imxdmac->chan.device = &imxdma->dma_device; 368 imxdmac->chan.device = &imxdma->dma_device;
353 imxdmac->chan.chan_id = i;
354 imxdmac->channel = i; 369 imxdmac->channel = i;
355 370
356 /* Add the channel to the DMAC list */ 371 /* Add the channel to the DMAC list */
@@ -370,6 +385,9 @@ static int __init imxdma_probe(struct platform_device *pdev)
370 385
371 platform_set_drvdata(pdev, imxdma); 386 platform_set_drvdata(pdev, imxdma);
372 387
388 imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
389 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
390
373 ret = dma_async_device_register(&imxdma->dma_device); 391 ret = dma_async_device_register(&imxdma->dma_device);
374 if (ret) { 392 if (ret) {
375 dev_err(&pdev->dev, "unable to register\n"); 393 dev_err(&pdev->dev, "unable to register\n");
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index d5a5d4d9c19..b6d1455fa93 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -230,7 +230,7 @@ struct sdma_engine;
230 * struct sdma_channel - housekeeping for a SDMA channel 230 * struct sdma_channel - housekeeping for a SDMA channel
231 * 231 *
232 * @sdma pointer to the SDMA engine for this channel 232 * @sdma pointer to the SDMA engine for this channel
233 * @channel the channel number, matches dmaengine chan_id 233 * @channel the channel number, matches dmaengine chan_id + 1
234 * @direction transfer type. Needed for setting SDMA script 234 * @direction transfer type. Needed for setting SDMA script
235 * @peripheral_type Peripheral type. Needed for setting SDMA script 235 * @peripheral_type Peripheral type. Needed for setting SDMA script
236 * @event_id0 aka dma request line 236 * @event_id0 aka dma request line
@@ -301,6 +301,7 @@ struct sdma_firmware_header {
301 301
302struct sdma_engine { 302struct sdma_engine {
303 struct device *dev; 303 struct device *dev;
304 struct device_dma_parameters dma_parms;
304 struct sdma_channel channel[MAX_DMA_CHANNELS]; 305 struct sdma_channel channel[MAX_DMA_CHANNELS];
305 struct sdma_channel_control *channel_control; 306 struct sdma_channel_control *channel_control;
306 void __iomem *regs; 307 void __iomem *regs;
@@ -449,7 +450,7 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
449 if (bd->mode.status & BD_RROR) 450 if (bd->mode.status & BD_RROR)
450 sdmac->status = DMA_ERROR; 451 sdmac->status = DMA_ERROR;
451 else 452 else
452 sdmac->status = DMA_SUCCESS; 453 sdmac->status = DMA_IN_PROGRESS;
453 454
454 bd->mode.status |= BD_DONE; 455 bd->mode.status |= BD_DONE;
455 sdmac->buf_tail++; 456 sdmac->buf_tail++;
@@ -770,15 +771,15 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
770 __raw_writel(1 << channel, sdma->regs + SDMA_H_START); 771 __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
771} 772}
772 773
773static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma) 774static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
774{ 775{
775 dma_cookie_t cookie = sdma->chan.cookie; 776 dma_cookie_t cookie = sdmac->chan.cookie;
776 777
777 if (++cookie < 0) 778 if (++cookie < 0)
778 cookie = 1; 779 cookie = 1;
779 780
780 sdma->chan.cookie = cookie; 781 sdmac->chan.cookie = cookie;
781 sdma->desc.cookie = cookie; 782 sdmac->desc.cookie = cookie;
782 783
783 return cookie; 784 return cookie;
784} 785}
@@ -798,7 +799,7 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
798 799
799 cookie = sdma_assign_cookie(sdmac); 800 cookie = sdma_assign_cookie(sdmac);
800 801
801 sdma_enable_channel(sdma, tx->chan->chan_id); 802 sdma_enable_channel(sdma, sdmac->channel);
802 803
803 spin_unlock_irq(&sdmac->lock); 804 spin_unlock_irq(&sdmac->lock);
804 805
@@ -811,10 +812,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
811 struct imx_dma_data *data = chan->private; 812 struct imx_dma_data *data = chan->private;
812 int prio, ret; 813 int prio, ret;
813 814
814 /* No need to execute this for internal channel 0 */
815 if (chan->chan_id == 0)
816 return 0;
817
818 if (!data) 815 if (!data)
819 return -EINVAL; 816 return -EINVAL;
820 817
@@ -879,7 +876,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
879 struct sdma_channel *sdmac = to_sdma_chan(chan); 876 struct sdma_channel *sdmac = to_sdma_chan(chan);
880 struct sdma_engine *sdma = sdmac->sdma; 877 struct sdma_engine *sdma = sdmac->sdma;
881 int ret, i, count; 878 int ret, i, count;
882 int channel = chan->chan_id; 879 int channel = sdmac->channel;
883 struct scatterlist *sg; 880 struct scatterlist *sg;
884 881
885 if (sdmac->status == DMA_IN_PROGRESS) 882 if (sdmac->status == DMA_IN_PROGRESS)
@@ -924,22 +921,33 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
924 ret = -EINVAL; 921 ret = -EINVAL;
925 goto err_out; 922 goto err_out;
926 } 923 }
927 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) 924
925 switch (sdmac->word_size) {
926 case DMA_SLAVE_BUSWIDTH_4_BYTES:
928 bd->mode.command = 0; 927 bd->mode.command = 0;
929 else 928 if (count & 3 || sg->dma_address & 3)
930 bd->mode.command = sdmac->word_size; 929 return NULL;
930 break;
931 case DMA_SLAVE_BUSWIDTH_2_BYTES:
932 bd->mode.command = 2;
933 if (count & 1 || sg->dma_address & 1)
934 return NULL;
935 break;
936 case DMA_SLAVE_BUSWIDTH_1_BYTE:
937 bd->mode.command = 1;
938 break;
939 default:
940 return NULL;
941 }
931 942
932 param = BD_DONE | BD_EXTD | BD_CONT; 943 param = BD_DONE | BD_EXTD | BD_CONT;
933 944
934 if (sdmac->flags & IMX_DMA_SG_LOOP) { 945 if (i + 1 == sg_len) {
935 param |= BD_INTR; 946 param |= BD_INTR;
936 if (i + 1 == sg_len) 947 param |= BD_LAST;
937 param |= BD_WRAP; 948 param &= ~BD_CONT;
938 } 949 }
939 950
940 if (i + 1 == sg_len)
941 param |= BD_INTR;
942
943 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", 951 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
944 i, count, sg->dma_address, 952 i, count, sg->dma_address,
945 param & BD_WRAP ? "wrap" : "", 953 param & BD_WRAP ? "wrap" : "",
@@ -953,6 +961,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
953 961
954 return &sdmac->desc; 962 return &sdmac->desc;
955err_out: 963err_out:
964 sdmac->status = DMA_ERROR;
956 return NULL; 965 return NULL;
957} 966}
958 967
@@ -963,7 +972,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
963 struct sdma_channel *sdmac = to_sdma_chan(chan); 972 struct sdma_channel *sdmac = to_sdma_chan(chan);
964 struct sdma_engine *sdma = sdmac->sdma; 973 struct sdma_engine *sdma = sdmac->sdma;
965 int num_periods = buf_len / period_len; 974 int num_periods = buf_len / period_len;
966 int channel = chan->chan_id; 975 int channel = sdmac->channel;
967 int ret, i = 0, buf = 0; 976 int ret, i = 0, buf = 0;
968 977
969 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); 978 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
@@ -1066,14 +1075,12 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
1066{ 1075{
1067 struct sdma_channel *sdmac = to_sdma_chan(chan); 1076 struct sdma_channel *sdmac = to_sdma_chan(chan);
1068 dma_cookie_t last_used; 1077 dma_cookie_t last_used;
1069 enum dma_status ret;
1070 1078
1071 last_used = chan->cookie; 1079 last_used = chan->cookie;
1072 1080
1073 ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used);
1074 dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); 1081 dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
1075 1082
1076 return ret; 1083 return sdmac->status;
1077} 1084}
1078 1085
1079static void sdma_issue_pending(struct dma_chan *chan) 1086static void sdma_issue_pending(struct dma_chan *chan)
@@ -1135,7 +1142,7 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma,
1135 /* download the RAM image for SDMA */ 1142 /* download the RAM image for SDMA */
1136 sdma_load_script(sdma, ram_code, 1143 sdma_load_script(sdma, ram_code,
1137 header->ram_code_size, 1144 header->ram_code_size,
1138 sdma->script_addrs->ram_code_start_addr); 1145 addr->ram_code_start_addr);
1139 clk_disable(sdma->clk); 1146 clk_disable(sdma->clk);
1140 1147
1141 sdma_add_scripts(sdma, addr); 1148 sdma_add_scripts(sdma, addr);
@@ -1237,7 +1244,6 @@ static int __init sdma_probe(struct platform_device *pdev)
1237 struct resource *iores; 1244 struct resource *iores;
1238 struct sdma_platform_data *pdata = pdev->dev.platform_data; 1245 struct sdma_platform_data *pdata = pdev->dev.platform_data;
1239 int i; 1246 int i;
1240 dma_cap_mask_t mask;
1241 struct sdma_engine *sdma; 1247 struct sdma_engine *sdma;
1242 1248
1243 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); 1249 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
@@ -1280,6 +1286,9 @@ static int __init sdma_probe(struct platform_device *pdev)
1280 1286
1281 sdma->version = pdata->sdma_version; 1287 sdma->version = pdata->sdma_version;
1282 1288
1289 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1290 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1291
1283 INIT_LIST_HEAD(&sdma->dma_device.channels); 1292 INIT_LIST_HEAD(&sdma->dma_device.channels);
1284 /* Initialize channel parameters */ 1293 /* Initialize channel parameters */
1285 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 1294 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
@@ -1288,15 +1297,17 @@ static int __init sdma_probe(struct platform_device *pdev)
1288 sdmac->sdma = sdma; 1297 sdmac->sdma = sdma;
1289 spin_lock_init(&sdmac->lock); 1298 spin_lock_init(&sdmac->lock);
1290 1299
1291 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1292 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1293
1294 sdmac->chan.device = &sdma->dma_device; 1300 sdmac->chan.device = &sdma->dma_device;
1295 sdmac->chan.chan_id = i;
1296 sdmac->channel = i; 1301 sdmac->channel = i;
1297 1302
1298 /* Add the channel to the DMAC list */ 1303 /*
1299 list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels); 1304 * Add the channel to the DMAC list. Do not add channel 0 though
1305 * because we need it internally in the SDMA driver. This also means
1306 * that channel 0 in dmaengine counting matches sdma channel 1.
1307 */
1308 if (i)
1309 list_add_tail(&sdmac->chan.device_node,
1310 &sdma->dma_device.channels);
1300 } 1311 }
1301 1312
1302 ret = sdma_init(sdma); 1313 ret = sdma_init(sdma);
@@ -1317,6 +1328,8 @@ static int __init sdma_probe(struct platform_device *pdev)
1317 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; 1328 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1318 sdma->dma_device.device_control = sdma_control; 1329 sdma->dma_device.device_control = sdma_control;
1319 sdma->dma_device.device_issue_pending = sdma_issue_pending; 1330 sdma->dma_device.device_issue_pending = sdma_issue_pending;
1331 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1332 dma_set_max_seg_size(sdma->dma_device.dev, 65535);
1320 1333
1321 ret = dma_async_device_register(&sdma->dma_device); 1334 ret = dma_async_device_register(&sdma->dma_device);
1322 if (ret) { 1335 if (ret) {
@@ -1324,13 +1337,6 @@ static int __init sdma_probe(struct platform_device *pdev)
1324 goto err_init; 1337 goto err_init;
1325 } 1338 }
1326 1339
1327 /* request channel 0. This is an internal control channel
1328 * to the SDMA engine and not available to clients.
1329 */
1330 dma_cap_zero(mask);
1331 dma_cap_set(DMA_SLAVE, mask);
1332 dma_request_channel(mask, NULL, NULL);
1333
1334 dev_info(sdma->dev, "initialized\n"); 1340 dev_info(sdma->dev, "initialized\n");
1335 1341
1336 return 0; 1342 return 0;
@@ -1348,7 +1354,7 @@ err_clk:
1348err_request_region: 1354err_request_region:
1349err_irq: 1355err_irq:
1350 kfree(sdma); 1356 kfree(sdma);
1351 return 0; 1357 return ret;
1352} 1358}
1353 1359
1354static int __exit sdma_remove(struct platform_device *pdev) 1360static int __exit sdma_remove(struct platform_device *pdev)
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index cb26ee9773d..c1a125e7d1d 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1145,29 +1145,6 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
1145 reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); 1145 reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
1146 idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN); 1146 idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN);
1147 1147
1148 /*
1149 * Problem (observed with channel DMAIC_7): after enabling the channel
1150 * and initialising buffers, there comes an interrupt with current still
1151 * pointing at buffer 0, whereas it should use buffer 0 first and only
1152 * generate an interrupt when it is done, then current should already
1153 * point to buffer 1. This spurious interrupt also comes on channel
1154 * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the
1155 * first interrupt, there comes the second with current correctly
1156 * pointing to buffer 1 this time. But sometimes this second interrupt
1157 * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling
1158 * the channel seems to prevent the channel from hanging, but it doesn't
1159 * prevent the spurious interrupt. This might also be unsafe. Think
1160 * about the IDMAC controller trying to switch to a buffer, when we
1161 * clear the ready bit, and re-enable it a moment later.
1162 */
1163 reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY);
1164 idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY);
1165 idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY);
1166
1167 reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY);
1168 idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY);
1169 idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY);
1170
1171 spin_unlock_irqrestore(&ipu->lock, flags); 1148 spin_unlock_irqrestore(&ipu->lock, flags);
1172 1149
1173 return 0; 1150 return 0;
@@ -1246,33 +1223,6 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1246 1223
1247 /* Other interrupts do not interfere with this channel */ 1224 /* Other interrupts do not interfere with this channel */
1248 spin_lock(&ichan->lock); 1225 spin_lock(&ichan->lock);
1249 if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
1250 ((curbuf >> chan_id) & 1) == ichan->active_buffer &&
1251 !list_is_last(ichan->queue.next, &ichan->queue))) {
1252 int i = 100;
1253
1254 /* This doesn't help. See comment in ipu_disable_channel() */
1255 while (--i) {
1256 curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
1257 if (((curbuf >> chan_id) & 1) != ichan->active_buffer)
1258 break;
1259 cpu_relax();
1260 }
1261
1262 if (!i) {
1263 spin_unlock(&ichan->lock);
1264 dev_dbg(dev,
1265 "IRQ on active buffer on channel %x, active "
1266 "%d, ready %x, %x, current %x!\n", chan_id,
1267 ichan->active_buffer, ready0, ready1, curbuf);
1268 return IRQ_NONE;
1269 } else
1270 dev_dbg(dev,
1271 "Buffer deactivated on channel %x, active "
1272 "%d, ready %x, %x, current %x, rest %d!\n", chan_id,
1273 ichan->active_buffer, ready0, ready1, curbuf, i);
1274 }
1275
1276 if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || 1226 if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
1277 (!ichan->active_buffer && (ready0 >> chan_id) & 1) 1227 (!ichan->active_buffer && (ready0 >> chan_id) & 1)
1278 )) { 1228 )) {