aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ipu/ipu_idmac.c
diff options
context:
space:
mode:
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>2009-03-25 12:13:24 -0400
committerDan Williams <dan.j.williams@intel.com>2009-03-25 12:13:24 -0400
commit8d47bae004f062630f69f7f83d098424252e232d (patch)
tree68379790587eef10926d029ffce74b763175b10c /drivers/dma/ipu/ipu_idmac.c
parent234f2df56f5b05756c444edc9879145deddf69f4 (diff)
dma: i.MX31 IPU DMA robustness improvements
Add DMA error handling to the ISR, move common code fragments to functions, fix scatter-gather element queuing in the ISR, survive channel freeing and re-allocation in a quick succession. Signed-off-by: Guennadi Liakhovetski <lg@denx.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/ipu/ipu_idmac.c')
-rw-r--r--drivers/dma/ipu/ipu_idmac.c265
1 files changed, 181 insertions, 84 deletions
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index b759ae9315ba..0a525c450f24 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -28,6 +28,9 @@
28#define FS_VF_IN_VALID 0x00000002 28#define FS_VF_IN_VALID 0x00000002
29#define FS_ENC_IN_VALID 0x00000001 29#define FS_ENC_IN_VALID 0x00000001
30 30
31static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
32 bool wait_for_stop);
33
31/* 34/*
32 * There can be only one, we could allocate it dynamically, but then we'd have 35 * There can be only one, we could allocate it dynamically, but then we'd have
33 * to add an extra parameter to some functions, and use something as ugly as 36 * to add an extra parameter to some functions, and use something as ugly as
@@ -762,9 +765,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
762 * function will fail if the buffer is set to ready. 765 * function will fail if the buffer is set to ready.
763 */ 766 */
764/* Called under spin_lock(_irqsave)(&ichan->lock) */ 767/* Called under spin_lock(_irqsave)(&ichan->lock) */
765static int ipu_update_channel_buffer(enum ipu_channel channel, 768static int ipu_update_channel_buffer(struct idmac_channel *ichan,
766 int buffer_n, dma_addr_t phyaddr) 769 int buffer_n, dma_addr_t phyaddr)
767{ 770{
771 enum ipu_channel channel = ichan->dma_chan.chan_id;
768 uint32_t reg; 772 uint32_t reg;
769 unsigned long flags; 773 unsigned long flags;
770 774
@@ -773,8 +777,8 @@ static int ipu_update_channel_buffer(enum ipu_channel channel,
773 if (buffer_n == 0) { 777 if (buffer_n == 0) {
774 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY); 778 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
775 if (reg & (1UL << channel)) { 779 if (reg & (1UL << channel)) {
776 spin_unlock_irqrestore(&ipu_data.lock, flags); 780 ipu_ic_disable_task(&ipu_data, channel);
777 return -EACCES; 781 ichan->status = IPU_CHANNEL_READY;
778 } 782 }
779 783
780 /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */ 784 /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */
@@ -784,8 +788,8 @@ static int ipu_update_channel_buffer(enum ipu_channel channel,
784 } else { 788 } else {
785 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY); 789 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
786 if (reg & (1UL << channel)) { 790 if (reg & (1UL << channel)) {
787 spin_unlock_irqrestore(&ipu_data.lock, flags); 791 ipu_ic_disable_task(&ipu_data, channel);
788 return -EACCES; 792 ichan->status = IPU_CHANNEL_READY;
789 } 793 }
790 794
791 /* Check if double-buffering is already enabled */ 795 /* Check if double-buffering is already enabled */
@@ -807,6 +811,39 @@ static int ipu_update_channel_buffer(enum ipu_channel channel,
807} 811}
808 812
809/* Called under spin_lock_irqsave(&ichan->lock) */ 813/* Called under spin_lock_irqsave(&ichan->lock) */
814static int ipu_submit_buffer(struct idmac_channel *ichan,
815 struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx)
816{
817 unsigned int chan_id = ichan->dma_chan.chan_id;
818 struct device *dev = &ichan->dma_chan.dev->device;
819 int ret;
820
821 if (async_tx_test_ack(&desc->txd))
822 return -EINTR;
823
824 /*
825 * On first invocation this shouldn't be necessary, the call to
826 * ipu_init_channel_buffer() above will set addresses for us, so we
827 * could make it conditional on status >= IPU_CHANNEL_ENABLED, but
828 * doing it again shouldn't hurt either.
829 */
830 ret = ipu_update_channel_buffer(ichan, buf_idx,
831 sg_dma_address(sg));
832
833 if (ret < 0) {
834 dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n",
835 sg, chan_id, buf_idx);
836 return ret;
837 }
838
839 ipu_select_buffer(chan_id, buf_idx);
840 dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
841 sg, chan_id, buf_idx);
842
843 return 0;
844}
845
846/* Called under spin_lock_irqsave(&ichan->lock) */
810static int ipu_submit_channel_buffers(struct idmac_channel *ichan, 847static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
811 struct idmac_tx_desc *desc) 848 struct idmac_tx_desc *desc)
812{ 849{
@@ -817,20 +854,10 @@ static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
817 if (!ichan->sg[i]) { 854 if (!ichan->sg[i]) {
818 ichan->sg[i] = sg; 855 ichan->sg[i] = sg;
819 856
820 /* 857 ret = ipu_submit_buffer(ichan, desc, sg, i);
821 * On first invocation this shouldn't be necessary, the
822 * call to ipu_init_channel_buffer() above will set
823 * addresses for us, so we could make it conditional
824 * on status >= IPU_CHANNEL_ENABLED, but doing it again
825 * shouldn't hurt either.
826 */
827 ret = ipu_update_channel_buffer(ichan->dma_chan.chan_id, i,
828 sg_dma_address(sg));
829 if (ret < 0) 858 if (ret < 0)
830 return ret; 859 return ret;
831 860
832 ipu_select_buffer(ichan->dma_chan.chan_id, i);
833
834 sg = sg_next(sg); 861 sg = sg_next(sg);
835 } 862 }
836 } 863 }
@@ -844,19 +871,22 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
844 struct idmac_channel *ichan = to_idmac_chan(tx->chan); 871 struct idmac_channel *ichan = to_idmac_chan(tx->chan);
845 struct idmac *idmac = to_idmac(tx->chan->device); 872 struct idmac *idmac = to_idmac(tx->chan->device);
846 struct ipu *ipu = to_ipu(idmac); 873 struct ipu *ipu = to_ipu(idmac);
874 struct device *dev = &ichan->dma_chan.dev->device;
847 dma_cookie_t cookie; 875 dma_cookie_t cookie;
848 unsigned long flags; 876 unsigned long flags;
877 int ret;
849 878
850 /* Sanity check */ 879 /* Sanity check */
851 if (!list_empty(&desc->list)) { 880 if (!list_empty(&desc->list)) {
852 /* The descriptor doesn't belong to client */ 881 /* The descriptor doesn't belong to client */
853 dev_err(&ichan->dma_chan.dev->device, 882 dev_err(dev, "Descriptor %p not prepared!\n", tx);
854 "Descriptor %p not prepared!\n", tx);
855 return -EBUSY; 883 return -EBUSY;
856 } 884 }
857 885
858 mutex_lock(&ichan->chan_mutex); 886 mutex_lock(&ichan->chan_mutex);
859 887
888 async_tx_clear_ack(tx);
889
860 if (ichan->status < IPU_CHANNEL_READY) { 890 if (ichan->status < IPU_CHANNEL_READY) {
861 struct idmac_video_param *video = &ichan->params.video; 891 struct idmac_video_param *video = &ichan->params.video;
862 /* 892 /*
@@ -880,16 +910,7 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
880 goto out; 910 goto out;
881 } 911 }
882 912
883 /* ipu->lock can be taken under ichan->lock, but not v.v. */ 913 dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]);
884 spin_lock_irqsave(&ichan->lock, flags);
885
886 /* submit_buffers() atomically verifies and fills empty sg slots */
887 cookie = ipu_submit_channel_buffers(ichan, desc);
888
889 spin_unlock_irqrestore(&ichan->lock, flags);
890
891 if (cookie < 0)
892 goto out;
893 914
894 cookie = ichan->dma_chan.cookie; 915 cookie = ichan->dma_chan.cookie;
895 916
@@ -899,24 +920,40 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
899 /* from dmaengine.h: "last cookie value returned to client" */ 920 /* from dmaengine.h: "last cookie value returned to client" */
900 ichan->dma_chan.cookie = cookie; 921 ichan->dma_chan.cookie = cookie;
901 tx->cookie = cookie; 922 tx->cookie = cookie;
923
924 /* ipu->lock can be taken under ichan->lock, but not v.v. */
902 spin_lock_irqsave(&ichan->lock, flags); 925 spin_lock_irqsave(&ichan->lock, flags);
926
903 list_add_tail(&desc->list, &ichan->queue); 927 list_add_tail(&desc->list, &ichan->queue);
928 /* submit_buffers() atomically verifies and fills empty sg slots */
929 ret = ipu_submit_channel_buffers(ichan, desc);
930
904 spin_unlock_irqrestore(&ichan->lock, flags); 931 spin_unlock_irqrestore(&ichan->lock, flags);
905 932
933 if (ret < 0) {
934 cookie = ret;
935 goto dequeue;
936 }
937
906 if (ichan->status < IPU_CHANNEL_ENABLED) { 938 if (ichan->status < IPU_CHANNEL_ENABLED) {
907 int ret = ipu_enable_channel(idmac, ichan); 939 ret = ipu_enable_channel(idmac, ichan);
908 if (ret < 0) { 940 if (ret < 0) {
909 cookie = ret; 941 cookie = ret;
910 spin_lock_irqsave(&ichan->lock, flags); 942 goto dequeue;
911 list_del_init(&desc->list);
912 spin_unlock_irqrestore(&ichan->lock, flags);
913 tx->cookie = cookie;
914 ichan->dma_chan.cookie = cookie;
915 } 943 }
916 } 944 }
917 945
918 dump_idmac_reg(ipu); 946 dump_idmac_reg(ipu);
919 947
948dequeue:
949 if (cookie < 0) {
950 spin_lock_irqsave(&ichan->lock, flags);
951 list_del_init(&desc->list);
952 spin_unlock_irqrestore(&ichan->lock, flags);
953 tx->cookie = cookie;
954 ichan->dma_chan.cookie = cookie;
955 }
956
920out: 957out:
921 mutex_unlock(&ichan->chan_mutex); 958 mutex_unlock(&ichan->chan_mutex);
922 959
@@ -1163,6 +1200,24 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
1163 return 0; 1200 return 0;
1164} 1201}
1165 1202
1203static struct scatterlist *idmac_sg_next(struct idmac_channel *ichan,
1204 struct idmac_tx_desc **desc, struct scatterlist *sg)
1205{
1206 struct scatterlist *sgnew = sg ? sg_next(sg) : NULL;
1207
1208 if (sgnew)
1209 /* next sg-element in this list */
1210 return sgnew;
1211
1212 if ((*desc)->list.next == &ichan->queue)
1213 /* No more descriptors on the queue */
1214 return NULL;
1215
1216 /* Fetch next descriptor */
1217 *desc = list_entry((*desc)->list.next, struct idmac_tx_desc, list);
1218 return (*desc)->sg;
1219}
1220
1166/* 1221/*
1167 * We have several possibilities here: 1222 * We have several possibilities here:
1168 * current BUF next BUF 1223 * current BUF next BUF
@@ -1178,23 +1233,46 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
1178static irqreturn_t idmac_interrupt(int irq, void *dev_id) 1233static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1179{ 1234{
1180 struct idmac_channel *ichan = dev_id; 1235 struct idmac_channel *ichan = dev_id;
1236 struct device *dev = &ichan->dma_chan.dev->device;
1181 unsigned int chan_id = ichan->dma_chan.chan_id; 1237 unsigned int chan_id = ichan->dma_chan.chan_id;
1182 struct scatterlist **sg, *sgnext, *sgnew = NULL; 1238 struct scatterlist **sg, *sgnext, *sgnew = NULL;
1183 /* Next transfer descriptor */ 1239 /* Next transfer descriptor */
1184 struct idmac_tx_desc *desc = NULL, *descnew; 1240 struct idmac_tx_desc *desc, *descnew;
1185 dma_async_tx_callback callback; 1241 dma_async_tx_callback callback;
1186 void *callback_param; 1242 void *callback_param;
1187 bool done = false; 1243 bool done = false;
1188 u32 ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY), 1244 u32 ready0, ready1, curbuf, err;
1189 ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY), 1245 unsigned long flags;
1190 curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
1191 1246
1192 /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */ 1247 /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
1193 1248
1194 pr_debug("IDMAC irq %d\n", irq); 1249 dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer);
1250
1251 spin_lock_irqsave(&ipu_data.lock, flags);
1252
1253 ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
1254 ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
1255 curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
1256 err = idmac_read_ipureg(&ipu_data, IPU_INT_STAT_4);
1257
1258 if (err & (1 << chan_id)) {
1259 idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4);
1260 spin_unlock_irqrestore(&ipu_data.lock, flags);
1261 /*
1262 * Doing this
1263 * ichan->sg[0] = ichan->sg[1] = NULL;
1264 * you can force channel re-enable on the next tx_submit(), but
1265 * this is dirty - think about descriptors with multiple
1266 * sg elements.
1267 */
1268 dev_warn(dev, "NFB4EOF on channel %d, ready %x, %x, cur %x\n",
1269 chan_id, ready0, ready1, curbuf);
1270 return IRQ_HANDLED;
1271 }
1272 spin_unlock_irqrestore(&ipu_data.lock, flags);
1273
1195 /* Other interrupts do not interfere with this channel */ 1274 /* Other interrupts do not interfere with this channel */
1196 spin_lock(&ichan->lock); 1275 spin_lock(&ichan->lock);
1197
1198 if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 && 1276 if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
1199 ((curbuf >> chan_id) & 1) == ichan->active_buffer)) { 1277 ((curbuf >> chan_id) & 1) == ichan->active_buffer)) {
1200 int i = 100; 1278 int i = 100;
@@ -1209,19 +1287,23 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1209 1287
1210 if (!i) { 1288 if (!i) {
1211 spin_unlock(&ichan->lock); 1289 spin_unlock(&ichan->lock);
1212 dev_dbg(ichan->dma_chan.device->dev, 1290 dev_dbg(dev,
1213 "IRQ on active buffer on channel %x, active " 1291 "IRQ on active buffer on channel %x, active "
1214 "%d, ready %x, %x, current %x!\n", chan_id, 1292 "%d, ready %x, %x, current %x!\n", chan_id,
1215 ichan->active_buffer, ready0, ready1, curbuf); 1293 ichan->active_buffer, ready0, ready1, curbuf);
1216 return IRQ_NONE; 1294 return IRQ_NONE;
1217 } 1295 } else
1296 dev_dbg(dev,
1297 "Buffer deactivated on channel %x, active "
1298 "%d, ready %x, %x, current %x, rest %d!\n", chan_id,
1299 ichan->active_buffer, ready0, ready1, curbuf, i);
1218 } 1300 }
1219 1301
1220 if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || 1302 if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
1221 (!ichan->active_buffer && (ready0 >> chan_id) & 1) 1303 (!ichan->active_buffer && (ready0 >> chan_id) & 1)
1222 )) { 1304 )) {
1223 spin_unlock(&ichan->lock); 1305 spin_unlock(&ichan->lock);
1224 dev_dbg(ichan->dma_chan.device->dev, 1306 dev_dbg(dev,
1225 "IRQ with active buffer still ready on channel %x, " 1307 "IRQ with active buffer still ready on channel %x, "
1226 "active %d, ready %x, %x!\n", chan_id, 1308 "active %d, ready %x, %x!\n", chan_id,
1227 ichan->active_buffer, ready0, ready1); 1309 ichan->active_buffer, ready0, ready1);
@@ -1229,8 +1311,9 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1229 } 1311 }
1230 1312
1231 if (unlikely(list_empty(&ichan->queue))) { 1313 if (unlikely(list_empty(&ichan->queue))) {
1314 ichan->sg[ichan->active_buffer] = NULL;
1232 spin_unlock(&ichan->lock); 1315 spin_unlock(&ichan->lock);
1233 dev_err(ichan->dma_chan.device->dev, 1316 dev_err(dev,
1234 "IRQ without queued buffers on channel %x, active %d, " 1317 "IRQ without queued buffers on channel %x, active %d, "
1235 "ready %x, %x!\n", chan_id, 1318 "ready %x, %x!\n", chan_id,
1236 ichan->active_buffer, ready0, ready1); 1319 ichan->active_buffer, ready0, ready1);
@@ -1245,40 +1328,44 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1245 sg = &ichan->sg[ichan->active_buffer]; 1328 sg = &ichan->sg[ichan->active_buffer];
1246 sgnext = ichan->sg[!ichan->active_buffer]; 1329 sgnext = ichan->sg[!ichan->active_buffer];
1247 1330
1331 if (!*sg) {
1332 spin_unlock(&ichan->lock);
1333 return IRQ_HANDLED;
1334 }
1335
1336 desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
1337 descnew = desc;
1338
1339 dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n",
1340 irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf);
1341
1342 /* Find the descriptor of sgnext */
1343 sgnew = idmac_sg_next(ichan, &descnew, *sg);
1344 if (sgnext != sgnew)
1345 dev_err(dev, "Submitted buffer %p, next buffer %p\n", sgnext, sgnew);
1346
1248 /* 1347 /*
1249 * if sgnext == NULL sg must be the last element in a scatterlist and 1348 * if sgnext == NULL sg must be the last element in a scatterlist and
1250 * queue must be empty 1349 * queue must be empty
1251 */ 1350 */
1252 if (unlikely(!sgnext)) { 1351 if (unlikely(!sgnext)) {
1253 if (unlikely(sg_next(*sg))) { 1352 if (!WARN_ON(sg_next(*sg)))
1254 dev_err(ichan->dma_chan.device->dev, 1353 dev_dbg(dev, "Underrun on channel %x\n", chan_id);
1255 "Broken buffer-update locking on channel %x!\n", 1354 ichan->sg[!ichan->active_buffer] = sgnew;
1256 chan_id); 1355
1257 /* We'll let the user catch up */ 1356 if (unlikely(sgnew)) {
1357 ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer);
1258 } else { 1358 } else {
1259 /* Underrun */ 1359 spin_lock_irqsave(&ipu_data.lock, flags);
1260 ipu_ic_disable_task(&ipu_data, chan_id); 1360 ipu_ic_disable_task(&ipu_data, chan_id);
1261 dev_dbg(ichan->dma_chan.device->dev, 1361 spin_unlock_irqrestore(&ipu_data.lock, flags);
1262 "Underrun on channel %x\n", chan_id);
1263 ichan->status = IPU_CHANNEL_READY; 1362 ichan->status = IPU_CHANNEL_READY;
1264 /* Continue to check for complete descriptor */ 1363 /* Continue to check for complete descriptor */
1265 } 1364 }
1266 } 1365 }
1267 1366
1268 desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); 1367 /* Calculate and submit the next sg element */
1269 1368 sgnew = idmac_sg_next(ichan, &descnew, sgnew);
1270 /* First calculate and submit the next sg element */
1271 if (likely(sgnext))
1272 sgnew = sg_next(sgnext);
1273
1274 if (unlikely(!sgnew)) {
1275 /* Start a new scatterlist, if any queued */
1276 if (likely(desc->list.next != &ichan->queue)) {
1277 descnew = list_entry(desc->list.next,
1278 struct idmac_tx_desc, list);
1279 sgnew = &descnew->sg[0];
1280 }
1281 }
1282 1369
1283 if (unlikely(!sg_next(*sg)) || !sgnext) { 1370 if (unlikely(!sg_next(*sg)) || !sgnext) {
1284 /* 1371 /*
@@ -1291,17 +1378,13 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1291 1378
1292 *sg = sgnew; 1379 *sg = sgnew;
1293 1380
1294 if (likely(sgnew)) { 1381 if (likely(sgnew) &&
1295 int ret; 1382 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
1296 1383 callback = desc->txd.callback;
1297 ret = ipu_update_channel_buffer(chan_id, ichan->active_buffer, 1384 callback_param = desc->txd.callback_param;
1298 sg_dma_address(*sg)); 1385 spin_unlock(&ichan->lock);
1299 if (ret < 0) 1386 callback(callback_param);
1300 dev_err(ichan->dma_chan.device->dev, 1387 spin_lock(&ichan->lock);
1301 "Failed to update buffer on channel %x buffer %d!\n",
1302 chan_id, ichan->active_buffer);
1303 else
1304 ipu_select_buffer(chan_id, ichan->active_buffer);
1305 } 1388 }
1306 1389
1307 /* Flip the active buffer - even if update above failed */ 1390 /* Flip the active buffer - even if update above failed */
@@ -1329,13 +1412,20 @@ static void ipu_gc_tasklet(unsigned long arg)
1329 struct idmac_channel *ichan = ipu->channel + i; 1412 struct idmac_channel *ichan = ipu->channel + i;
1330 struct idmac_tx_desc *desc; 1413 struct idmac_tx_desc *desc;
1331 unsigned long flags; 1414 unsigned long flags;
1332 int j; 1415 struct scatterlist *sg;
1416 int j, k;
1333 1417
1334 for (j = 0; j < ichan->n_tx_desc; j++) { 1418 for (j = 0; j < ichan->n_tx_desc; j++) {
1335 desc = ichan->desc + j; 1419 desc = ichan->desc + j;
1336 spin_lock_irqsave(&ichan->lock, flags); 1420 spin_lock_irqsave(&ichan->lock, flags);
1337 if (async_tx_test_ack(&desc->txd)) { 1421 if (async_tx_test_ack(&desc->txd)) {
1338 list_move(&desc->list, &ichan->free_list); 1422 list_move(&desc->list, &ichan->free_list);
1423 for_each_sg(desc->sg, sg, desc->sg_len, k) {
1424 if (ichan->sg[0] == sg)
1425 ichan->sg[0] = NULL;
1426 else if (ichan->sg[1] == sg)
1427 ichan->sg[1] = NULL;
1428 }
1339 async_tx_clear_ack(&desc->txd); 1429 async_tx_clear_ack(&desc->txd);
1340 } 1430 }
1341 spin_unlock_irqrestore(&ichan->lock, flags); 1431 spin_unlock_irqrestore(&ichan->lock, flags);
@@ -1471,15 +1561,22 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan)
1471 goto eimap; 1561 goto eimap;
1472 1562
1473 ichan->eof_irq = ret; 1563 ichan->eof_irq = ret;
1474 ret = request_irq(ichan->eof_irq, idmac_interrupt, 0, 1564
1475 ichan->eof_name, ichan); 1565 /*
1476 if (ret < 0) 1566 * Important to first disable the channel, because maybe someone
1477 goto erirq; 1567 * used it before us, e.g., the bootloader
1568 */
1569 ipu_disable_channel(idmac, ichan, true);
1478 1570
1479 ret = ipu_init_channel(idmac, ichan); 1571 ret = ipu_init_channel(idmac, ichan);
1480 if (ret < 0) 1572 if (ret < 0)
1481 goto eichan; 1573 goto eichan;
1482 1574
1575 ret = request_irq(ichan->eof_irq, idmac_interrupt, 0,
1576 ichan->eof_name, ichan);
1577 if (ret < 0)
1578 goto erirq;
1579
1483 ichan->status = IPU_CHANNEL_INITIALIZED; 1580 ichan->status = IPU_CHANNEL_INITIALIZED;
1484 1581
1485 dev_dbg(&ichan->dma_chan.dev->device, "Found channel 0x%x, irq %d\n", 1582 dev_dbg(&ichan->dma_chan.dev->device, "Found channel 0x%x, irq %d\n",
@@ -1487,9 +1584,9 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan)
1487 1584
1488 return ret; 1585 return ret;
1489 1586
1490eichan:
1491 free_irq(ichan->eof_irq, ichan);
1492erirq: 1587erirq:
1588 ipu_uninit_channel(idmac, ichan);
1589eichan:
1493 ipu_irq_unmap(ichan->dma_chan.chan_id); 1590 ipu_irq_unmap(ichan->dma_chan.chan_id);
1494eimap: 1591eimap:
1495 return ret; 1592 return ret;