aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ipu/ipu_idmac.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/ipu/ipu_idmac.c')
-rw-r--r--drivers/dma/ipu/ipu_idmac.c371
1 files changed, 254 insertions, 117 deletions
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index da781d107895..e202a6ce5573 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -28,6 +28,9 @@
28#define FS_VF_IN_VALID 0x00000002 28#define FS_VF_IN_VALID 0x00000002
29#define FS_ENC_IN_VALID 0x00000001 29#define FS_ENC_IN_VALID 0x00000001
30 30
31static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
32 bool wait_for_stop);
33
31/* 34/*
32 * There can be only one, we could allocate it dynamically, but then we'd have 35 * There can be only one, we could allocate it dynamically, but then we'd have
33 * to add an extra parameter to some functions, and use something as ugly as 36 * to add an extra parameter to some functions, and use something as ugly as
@@ -107,7 +110,7 @@ static uint32_t bytes_per_pixel(enum pixel_fmt fmt)
107 } 110 }
108} 111}
109 112
110/* Enable / disable direct write to memory by the Camera Sensor Interface */ 113/* Enable direct write to memory by the Camera Sensor Interface */
111static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel) 114static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
112{ 115{
113 uint32_t ic_conf, mask; 116 uint32_t ic_conf, mask;
@@ -126,6 +129,7 @@ static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
126 idmac_write_icreg(ipu, ic_conf, IC_CONF); 129 idmac_write_icreg(ipu, ic_conf, IC_CONF);
127} 130}
128 131
132/* Called under spin_lock_irqsave(&ipu_data.lock) */
129static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel) 133static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel)
130{ 134{
131 uint32_t ic_conf, mask; 135 uint32_t ic_conf, mask;
@@ -422,7 +426,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
422 break; 426 break;
423 default: 427 default:
424 dev_err(ipu_data.dev, 428 dev_err(ipu_data.dev,
425 "mxc ipu: unimplemented pixel format %d\n", pixel_fmt); 429 "mx3 ipu: unimplemented pixel format %d\n", pixel_fmt);
426 break; 430 break;
427 } 431 }
428 432
@@ -433,20 +437,20 @@ static void ipu_ch_param_set_burst_size(union chan_param_mem *params,
433 uint16_t burst_pixels) 437 uint16_t burst_pixels)
434{ 438{
435 params->pp.npb = burst_pixels - 1; 439 params->pp.npb = burst_pixels - 1;
436}; 440}
437 441
438static void ipu_ch_param_set_buffer(union chan_param_mem *params, 442static void ipu_ch_param_set_buffer(union chan_param_mem *params,
439 dma_addr_t buf0, dma_addr_t buf1) 443 dma_addr_t buf0, dma_addr_t buf1)
440{ 444{
441 params->pp.eba0 = buf0; 445 params->pp.eba0 = buf0;
442 params->pp.eba1 = buf1; 446 params->pp.eba1 = buf1;
443}; 447}
444 448
445static void ipu_ch_param_set_rotation(union chan_param_mem *params, 449static void ipu_ch_param_set_rotation(union chan_param_mem *params,
446 enum ipu_rotate_mode rotate) 450 enum ipu_rotate_mode rotate)
447{ 451{
448 params->pp.bam = rotate; 452 params->pp.bam = rotate;
449}; 453}
450 454
451static void ipu_write_param_mem(uint32_t addr, uint32_t *data, 455static void ipu_write_param_mem(uint32_t addr, uint32_t *data,
452 uint32_t num_words) 456 uint32_t num_words)
@@ -571,7 +575,7 @@ static uint32_t dma_param_addr(uint32_t dma_ch)
571{ 575{
572 /* Channel Parameter Memory */ 576 /* Channel Parameter Memory */
573 return 0x10000 | (dma_ch << 4); 577 return 0x10000 | (dma_ch << 4);
574}; 578}
575 579
576static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel, 580static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel,
577 bool prio) 581 bool prio)
@@ -611,7 +615,8 @@ static uint32_t ipu_channel_conf_mask(enum ipu_channel channel)
611 615
612/** 616/**
613 * ipu_enable_channel() - enable an IPU channel. 617 * ipu_enable_channel() - enable an IPU channel.
614 * @channel: channel ID. 618 * @idmac: IPU DMAC context.
619 * @ichan: IDMAC channel.
615 * @return: 0 on success or negative error code on failure. 620 * @return: 0 on success or negative error code on failure.
616 */ 621 */
617static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan) 622static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
@@ -649,7 +654,7 @@ static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
649 654
650/** 655/**
651 * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel. 656 * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel.
652 * @channel: channel ID. 657 * @ichan: IDMAC channel.
653 * @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code. 658 * @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code.
654 * @width: width of buffer in pixels. 659 * @width: width of buffer in pixels.
655 * @height: height of buffer in pixels. 660 * @height: height of buffer in pixels.
@@ -687,7 +692,7 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan,
687 } 692 }
688 693
689 /* IC channel's stride must be a multiple of 8 pixels */ 694 /* IC channel's stride must be a multiple of 8 pixels */
690 if ((channel <= 13) && (stride % 8)) { 695 if ((channel <= IDMAC_IC_13) && (stride % 8)) {
691 dev_err(ipu->dev, "Stride must be 8 pixel multiple\n"); 696 dev_err(ipu->dev, "Stride must be 8 pixel multiple\n");
692 return -EINVAL; 697 return -EINVAL;
693 } 698 }
@@ -752,7 +757,7 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
752 757
753/** 758/**
754 * ipu_update_channel_buffer() - update physical address of a channel buffer. 759 * ipu_update_channel_buffer() - update physical address of a channel buffer.
755 * @channel: channel ID. 760 * @ichan: IDMAC channel.
756 * @buffer_n: buffer number to update. 761 * @buffer_n: buffer number to update.
757 * 0 or 1 are the only valid values. 762 * 0 or 1 are the only valid values.
758 * @phyaddr: buffer physical address. 763 * @phyaddr: buffer physical address.
@@ -760,9 +765,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
760 * function will fail if the buffer is set to ready. 765 * function will fail if the buffer is set to ready.
761 */ 766 */
762/* Called under spin_lock(_irqsave)(&ichan->lock) */ 767/* Called under spin_lock(_irqsave)(&ichan->lock) */
763static int ipu_update_channel_buffer(enum ipu_channel channel, 768static int ipu_update_channel_buffer(struct idmac_channel *ichan,
764 int buffer_n, dma_addr_t phyaddr) 769 int buffer_n, dma_addr_t phyaddr)
765{ 770{
771 enum ipu_channel channel = ichan->dma_chan.chan_id;
766 uint32_t reg; 772 uint32_t reg;
767 unsigned long flags; 773 unsigned long flags;
768 774
@@ -771,8 +777,8 @@ static int ipu_update_channel_buffer(enum ipu_channel channel,
771 if (buffer_n == 0) { 777 if (buffer_n == 0) {
772 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY); 778 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
773 if (reg & (1UL << channel)) { 779 if (reg & (1UL << channel)) {
774 spin_unlock_irqrestore(&ipu_data.lock, flags); 780 ipu_ic_disable_task(&ipu_data, channel);
775 return -EACCES; 781 ichan->status = IPU_CHANNEL_READY;
776 } 782 }
777 783
778 /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */ 784 /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */
@@ -782,8 +788,8 @@ static int ipu_update_channel_buffer(enum ipu_channel channel,
782 } else { 788 } else {
783 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY); 789 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
784 if (reg & (1UL << channel)) { 790 if (reg & (1UL << channel)) {
785 spin_unlock_irqrestore(&ipu_data.lock, flags); 791 ipu_ic_disable_task(&ipu_data, channel);
786 return -EACCES; 792 ichan->status = IPU_CHANNEL_READY;
787 } 793 }
788 794
789 /* Check if double-buffering is already enabled */ 795 /* Check if double-buffering is already enabled */
@@ -805,6 +811,39 @@ static int ipu_update_channel_buffer(enum ipu_channel channel,
805} 811}
806 812
807/* Called under spin_lock_irqsave(&ichan->lock) */ 813/* Called under spin_lock_irqsave(&ichan->lock) */
814static int ipu_submit_buffer(struct idmac_channel *ichan,
815 struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx)
816{
817 unsigned int chan_id = ichan->dma_chan.chan_id;
818 struct device *dev = &ichan->dma_chan.dev->device;
819 int ret;
820
821 if (async_tx_test_ack(&desc->txd))
822 return -EINTR;
823
824 /*
825 * On first invocation this shouldn't be necessary, the call to
826 * ipu_init_channel_buffer() above will set addresses for us, so we
827 * could make it conditional on status >= IPU_CHANNEL_ENABLED, but
828 * doing it again shouldn't hurt either.
829 */
830 ret = ipu_update_channel_buffer(ichan, buf_idx,
831 sg_dma_address(sg));
832
833 if (ret < 0) {
834 dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n",
835 sg, chan_id, buf_idx);
836 return ret;
837 }
838
839 ipu_select_buffer(chan_id, buf_idx);
840 dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
841 sg, chan_id, buf_idx);
842
843 return 0;
844}
845
846/* Called under spin_lock_irqsave(&ichan->lock) */
808static int ipu_submit_channel_buffers(struct idmac_channel *ichan, 847static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
809 struct idmac_tx_desc *desc) 848 struct idmac_tx_desc *desc)
810{ 849{
@@ -815,20 +854,10 @@ static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
815 if (!ichan->sg[i]) { 854 if (!ichan->sg[i]) {
816 ichan->sg[i] = sg; 855 ichan->sg[i] = sg;
817 856
818 /* 857 ret = ipu_submit_buffer(ichan, desc, sg, i);
819 * On first invocation this shouldn't be necessary, the
820 * call to ipu_init_channel_buffer() above will set
821 * addresses for us, so we could make it conditional
822 * on status >= IPU_CHANNEL_ENABLED, but doing it again
823 * shouldn't hurt either.
824 */
825 ret = ipu_update_channel_buffer(ichan->dma_chan.chan_id, i,
826 sg_dma_address(sg));
827 if (ret < 0) 858 if (ret < 0)
828 return ret; 859 return ret;
829 860
830 ipu_select_buffer(ichan->dma_chan.chan_id, i);
831
832 sg = sg_next(sg); 861 sg = sg_next(sg);
833 } 862 }
834 } 863 }
@@ -842,19 +871,22 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
842 struct idmac_channel *ichan = to_idmac_chan(tx->chan); 871 struct idmac_channel *ichan = to_idmac_chan(tx->chan);
843 struct idmac *idmac = to_idmac(tx->chan->device); 872 struct idmac *idmac = to_idmac(tx->chan->device);
844 struct ipu *ipu = to_ipu(idmac); 873 struct ipu *ipu = to_ipu(idmac);
874 struct device *dev = &ichan->dma_chan.dev->device;
845 dma_cookie_t cookie; 875 dma_cookie_t cookie;
846 unsigned long flags; 876 unsigned long flags;
877 int ret;
847 878
848 /* Sanity check */ 879 /* Sanity check */
849 if (!list_empty(&desc->list)) { 880 if (!list_empty(&desc->list)) {
850 /* The descriptor doesn't belong to client */ 881 /* The descriptor doesn't belong to client */
851 dev_err(&ichan->dma_chan.dev->device, 882 dev_err(dev, "Descriptor %p not prepared!\n", tx);
852 "Descriptor %p not prepared!\n", tx);
853 return -EBUSY; 883 return -EBUSY;
854 } 884 }
855 885
856 mutex_lock(&ichan->chan_mutex); 886 mutex_lock(&ichan->chan_mutex);
857 887
888 async_tx_clear_ack(tx);
889
858 if (ichan->status < IPU_CHANNEL_READY) { 890 if (ichan->status < IPU_CHANNEL_READY) {
859 struct idmac_video_param *video = &ichan->params.video; 891 struct idmac_video_param *video = &ichan->params.video;
860 /* 892 /*
@@ -878,16 +910,7 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
878 goto out; 910 goto out;
879 } 911 }
880 912
881 /* ipu->lock can be taken under ichan->lock, but not v.v. */ 913 dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]);
882 spin_lock_irqsave(&ichan->lock, flags);
883
884 /* submit_buffers() atomically verifies and fills empty sg slots */
885 cookie = ipu_submit_channel_buffers(ichan, desc);
886
887 spin_unlock_irqrestore(&ichan->lock, flags);
888
889 if (cookie < 0)
890 goto out;
891 914
892 cookie = ichan->dma_chan.cookie; 915 cookie = ichan->dma_chan.cookie;
893 916
@@ -897,24 +920,40 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
897 /* from dmaengine.h: "last cookie value returned to client" */ 920 /* from dmaengine.h: "last cookie value returned to client" */
898 ichan->dma_chan.cookie = cookie; 921 ichan->dma_chan.cookie = cookie;
899 tx->cookie = cookie; 922 tx->cookie = cookie;
923
924 /* ipu->lock can be taken under ichan->lock, but not v.v. */
900 spin_lock_irqsave(&ichan->lock, flags); 925 spin_lock_irqsave(&ichan->lock, flags);
926
901 list_add_tail(&desc->list, &ichan->queue); 927 list_add_tail(&desc->list, &ichan->queue);
928 /* submit_buffers() atomically verifies and fills empty sg slots */
929 ret = ipu_submit_channel_buffers(ichan, desc);
930
902 spin_unlock_irqrestore(&ichan->lock, flags); 931 spin_unlock_irqrestore(&ichan->lock, flags);
903 932
933 if (ret < 0) {
934 cookie = ret;
935 goto dequeue;
936 }
937
904 if (ichan->status < IPU_CHANNEL_ENABLED) { 938 if (ichan->status < IPU_CHANNEL_ENABLED) {
905 int ret = ipu_enable_channel(idmac, ichan); 939 ret = ipu_enable_channel(idmac, ichan);
906 if (ret < 0) { 940 if (ret < 0) {
907 cookie = ret; 941 cookie = ret;
908 spin_lock_irqsave(&ichan->lock, flags); 942 goto dequeue;
909 list_del_init(&desc->list);
910 spin_unlock_irqrestore(&ichan->lock, flags);
911 tx->cookie = cookie;
912 ichan->dma_chan.cookie = cookie;
913 } 943 }
914 } 944 }
915 945
916 dump_idmac_reg(ipu); 946 dump_idmac_reg(ipu);
917 947
948dequeue:
949 if (cookie < 0) {
950 spin_lock_irqsave(&ichan->lock, flags);
951 list_del_init(&desc->list);
952 spin_unlock_irqrestore(&ichan->lock, flags);
953 tx->cookie = cookie;
954 ichan->dma_chan.cookie = cookie;
955 }
956
918out: 957out:
919 mutex_unlock(&ichan->chan_mutex); 958 mutex_unlock(&ichan->chan_mutex);
920 959
@@ -944,8 +983,6 @@ static int idmac_desc_alloc(struct idmac_channel *ichan, int n)
944 memset(txd, 0, sizeof(*txd)); 983 memset(txd, 0, sizeof(*txd));
945 dma_async_tx_descriptor_init(txd, &ichan->dma_chan); 984 dma_async_tx_descriptor_init(txd, &ichan->dma_chan);
946 txd->tx_submit = idmac_tx_submit; 985 txd->tx_submit = idmac_tx_submit;
947 txd->chan = &ichan->dma_chan;
948 INIT_LIST_HEAD(&txd->tx_list);
949 986
950 list_add(&desc->list, &ichan->free_list); 987 list_add(&desc->list, &ichan->free_list);
951 988
@@ -1161,6 +1198,24 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
1161 return 0; 1198 return 0;
1162} 1199}
1163 1200
1201static struct scatterlist *idmac_sg_next(struct idmac_channel *ichan,
1202 struct idmac_tx_desc **desc, struct scatterlist *sg)
1203{
1204 struct scatterlist *sgnew = sg ? sg_next(sg) : NULL;
1205
1206 if (sgnew)
1207 /* next sg-element in this list */
1208 return sgnew;
1209
1210 if ((*desc)->list.next == &ichan->queue)
1211 /* No more descriptors on the queue */
1212 return NULL;
1213
1214 /* Fetch next descriptor */
1215 *desc = list_entry((*desc)->list.next, struct idmac_tx_desc, list);
1216 return (*desc)->sg;
1217}
1218
1164/* 1219/*
1165 * We have several possibilities here: 1220 * We have several possibilities here:
1166 * current BUF next BUF 1221 * current BUF next BUF
@@ -1176,23 +1231,46 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
1176static irqreturn_t idmac_interrupt(int irq, void *dev_id) 1231static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1177{ 1232{
1178 struct idmac_channel *ichan = dev_id; 1233 struct idmac_channel *ichan = dev_id;
1234 struct device *dev = &ichan->dma_chan.dev->device;
1179 unsigned int chan_id = ichan->dma_chan.chan_id; 1235 unsigned int chan_id = ichan->dma_chan.chan_id;
1180 struct scatterlist **sg, *sgnext, *sgnew = NULL; 1236 struct scatterlist **sg, *sgnext, *sgnew = NULL;
1181 /* Next transfer descriptor */ 1237 /* Next transfer descriptor */
1182 struct idmac_tx_desc *desc = NULL, *descnew; 1238 struct idmac_tx_desc *desc, *descnew;
1183 dma_async_tx_callback callback; 1239 dma_async_tx_callback callback;
1184 void *callback_param; 1240 void *callback_param;
1185 bool done = false; 1241 bool done = false;
1186 u32 ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY), 1242 u32 ready0, ready1, curbuf, err;
1187 ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY), 1243 unsigned long flags;
1188 curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
1189 1244
1190 /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */ 1245 /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
1191 1246
1192 pr_debug("IDMAC irq %d\n", irq); 1247 dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer);
1248
1249 spin_lock_irqsave(&ipu_data.lock, flags);
1250
1251 ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
1252 ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
1253 curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
1254 err = idmac_read_ipureg(&ipu_data, IPU_INT_STAT_4);
1255
1256 if (err & (1 << chan_id)) {
1257 idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4);
1258 spin_unlock_irqrestore(&ipu_data.lock, flags);
1259 /*
1260 * Doing this
1261 * ichan->sg[0] = ichan->sg[1] = NULL;
1262 * you can force channel re-enable on the next tx_submit(), but
1263 * this is dirty - think about descriptors with multiple
1264 * sg elements.
1265 */
1266 dev_warn(dev, "NFB4EOF on channel %d, ready %x, %x, cur %x\n",
1267 chan_id, ready0, ready1, curbuf);
1268 return IRQ_HANDLED;
1269 }
1270 spin_unlock_irqrestore(&ipu_data.lock, flags);
1271
1193 /* Other interrupts do not interfere with this channel */ 1272 /* Other interrupts do not interfere with this channel */
1194 spin_lock(&ichan->lock); 1273 spin_lock(&ichan->lock);
1195
1196 if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 && 1274 if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
1197 ((curbuf >> chan_id) & 1) == ichan->active_buffer)) { 1275 ((curbuf >> chan_id) & 1) == ichan->active_buffer)) {
1198 int i = 100; 1276 int i = 100;
@@ -1207,19 +1285,23 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1207 1285
1208 if (!i) { 1286 if (!i) {
1209 spin_unlock(&ichan->lock); 1287 spin_unlock(&ichan->lock);
1210 dev_dbg(ichan->dma_chan.device->dev, 1288 dev_dbg(dev,
1211 "IRQ on active buffer on channel %x, active " 1289 "IRQ on active buffer on channel %x, active "
1212 "%d, ready %x, %x, current %x!\n", chan_id, 1290 "%d, ready %x, %x, current %x!\n", chan_id,
1213 ichan->active_buffer, ready0, ready1, curbuf); 1291 ichan->active_buffer, ready0, ready1, curbuf);
1214 return IRQ_NONE; 1292 return IRQ_NONE;
1215 } 1293 } else
1294 dev_dbg(dev,
1295 "Buffer deactivated on channel %x, active "
1296 "%d, ready %x, %x, current %x, rest %d!\n", chan_id,
1297 ichan->active_buffer, ready0, ready1, curbuf, i);
1216 } 1298 }
1217 1299
1218 if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || 1300 if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
1219 (!ichan->active_buffer && (ready0 >> chan_id) & 1) 1301 (!ichan->active_buffer && (ready0 >> chan_id) & 1)
1220 )) { 1302 )) {
1221 spin_unlock(&ichan->lock); 1303 spin_unlock(&ichan->lock);
1222 dev_dbg(ichan->dma_chan.device->dev, 1304 dev_dbg(dev,
1223 "IRQ with active buffer still ready on channel %x, " 1305 "IRQ with active buffer still ready on channel %x, "
1224 "active %d, ready %x, %x!\n", chan_id, 1306 "active %d, ready %x, %x!\n", chan_id,
1225 ichan->active_buffer, ready0, ready1); 1307 ichan->active_buffer, ready0, ready1);
@@ -1227,8 +1309,9 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1227 } 1309 }
1228 1310
1229 if (unlikely(list_empty(&ichan->queue))) { 1311 if (unlikely(list_empty(&ichan->queue))) {
1312 ichan->sg[ichan->active_buffer] = NULL;
1230 spin_unlock(&ichan->lock); 1313 spin_unlock(&ichan->lock);
1231 dev_err(ichan->dma_chan.device->dev, 1314 dev_err(dev,
1232 "IRQ without queued buffers on channel %x, active %d, " 1315 "IRQ without queued buffers on channel %x, active %d, "
1233 "ready %x, %x!\n", chan_id, 1316 "ready %x, %x!\n", chan_id,
1234 ichan->active_buffer, ready0, ready1); 1317 ichan->active_buffer, ready0, ready1);
@@ -1243,40 +1326,44 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1243 sg = &ichan->sg[ichan->active_buffer]; 1326 sg = &ichan->sg[ichan->active_buffer];
1244 sgnext = ichan->sg[!ichan->active_buffer]; 1327 sgnext = ichan->sg[!ichan->active_buffer];
1245 1328
1329 if (!*sg) {
1330 spin_unlock(&ichan->lock);
1331 return IRQ_HANDLED;
1332 }
1333
1334 desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
1335 descnew = desc;
1336
1337 dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n",
1338 irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf);
1339
1340 /* Find the descriptor of sgnext */
1341 sgnew = idmac_sg_next(ichan, &descnew, *sg);
1342 if (sgnext != sgnew)
1343 dev_err(dev, "Submitted buffer %p, next buffer %p\n", sgnext, sgnew);
1344
1246 /* 1345 /*
1247 * if sgnext == NULL sg must be the last element in a scatterlist and 1346 * if sgnext == NULL sg must be the last element in a scatterlist and
1248 * queue must be empty 1347 * queue must be empty
1249 */ 1348 */
1250 if (unlikely(!sgnext)) { 1349 if (unlikely(!sgnext)) {
1251 if (unlikely(sg_next(*sg))) { 1350 if (!WARN_ON(sg_next(*sg)))
1252 dev_err(ichan->dma_chan.device->dev, 1351 dev_dbg(dev, "Underrun on channel %x\n", chan_id);
1253 "Broken buffer-update locking on channel %x!\n", 1352 ichan->sg[!ichan->active_buffer] = sgnew;
1254 chan_id); 1353
1255 /* We'll let the user catch up */ 1354 if (unlikely(sgnew)) {
1355 ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer);
1256 } else { 1356 } else {
1257 /* Underrun */ 1357 spin_lock_irqsave(&ipu_data.lock, flags);
1258 ipu_ic_disable_task(&ipu_data, chan_id); 1358 ipu_ic_disable_task(&ipu_data, chan_id);
1259 dev_dbg(ichan->dma_chan.device->dev, 1359 spin_unlock_irqrestore(&ipu_data.lock, flags);
1260 "Underrun on channel %x\n", chan_id);
1261 ichan->status = IPU_CHANNEL_READY; 1360 ichan->status = IPU_CHANNEL_READY;
1262 /* Continue to check for complete descriptor */ 1361 /* Continue to check for complete descriptor */
1263 } 1362 }
1264 } 1363 }
1265 1364
1266 desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); 1365 /* Calculate and submit the next sg element */
1267 1366 sgnew = idmac_sg_next(ichan, &descnew, sgnew);
1268 /* First calculate and submit the next sg element */
1269 if (likely(sgnext))
1270 sgnew = sg_next(sgnext);
1271
1272 if (unlikely(!sgnew)) {
1273 /* Start a new scatterlist, if any queued */
1274 if (likely(desc->list.next != &ichan->queue)) {
1275 descnew = list_entry(desc->list.next,
1276 struct idmac_tx_desc, list);
1277 sgnew = &descnew->sg[0];
1278 }
1279 }
1280 1367
1281 if (unlikely(!sg_next(*sg)) || !sgnext) { 1368 if (unlikely(!sg_next(*sg)) || !sgnext) {
1282 /* 1369 /*
@@ -1289,17 +1376,13 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1289 1376
1290 *sg = sgnew; 1377 *sg = sgnew;
1291 1378
1292 if (likely(sgnew)) { 1379 if (likely(sgnew) &&
1293 int ret; 1380 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
1294 1381 callback = desc->txd.callback;
1295 ret = ipu_update_channel_buffer(chan_id, ichan->active_buffer, 1382 callback_param = desc->txd.callback_param;
1296 sg_dma_address(*sg)); 1383 spin_unlock(&ichan->lock);
1297 if (ret < 0) 1384 callback(callback_param);
1298 dev_err(ichan->dma_chan.device->dev, 1385 spin_lock(&ichan->lock);
1299 "Failed to update buffer on channel %x buffer %d!\n",
1300 chan_id, ichan->active_buffer);
1301 else
1302 ipu_select_buffer(chan_id, ichan->active_buffer);
1303 } 1386 }
1304 1387
1305 /* Flip the active buffer - even if update above failed */ 1388 /* Flip the active buffer - even if update above failed */
@@ -1327,13 +1410,20 @@ static void ipu_gc_tasklet(unsigned long arg)
1327 struct idmac_channel *ichan = ipu->channel + i; 1410 struct idmac_channel *ichan = ipu->channel + i;
1328 struct idmac_tx_desc *desc; 1411 struct idmac_tx_desc *desc;
1329 unsigned long flags; 1412 unsigned long flags;
1330 int j; 1413 struct scatterlist *sg;
1414 int j, k;
1331 1415
1332 for (j = 0; j < ichan->n_tx_desc; j++) { 1416 for (j = 0; j < ichan->n_tx_desc; j++) {
1333 desc = ichan->desc + j; 1417 desc = ichan->desc + j;
1334 spin_lock_irqsave(&ichan->lock, flags); 1418 spin_lock_irqsave(&ichan->lock, flags);
1335 if (async_tx_test_ack(&desc->txd)) { 1419 if (async_tx_test_ack(&desc->txd)) {
1336 list_move(&desc->list, &ichan->free_list); 1420 list_move(&desc->list, &ichan->free_list);
1421 for_each_sg(desc->sg, sg, desc->sg_len, k) {
1422 if (ichan->sg[0] == sg)
1423 ichan->sg[0] = NULL;
1424 else if (ichan->sg[1] == sg)
1425 ichan->sg[1] = NULL;
1426 }
1337 async_tx_clear_ack(&desc->txd); 1427 async_tx_clear_ack(&desc->txd);
1338 } 1428 }
1339 spin_unlock_irqrestore(&ichan->lock, flags); 1429 spin_unlock_irqrestore(&ichan->lock, flags);
@@ -1341,13 +1431,7 @@ static void ipu_gc_tasklet(unsigned long arg)
1341 } 1431 }
1342} 1432}
1343 1433
1344/* 1434/* Allocate and initialise a transfer descriptor. */
1345 * At the time .device_alloc_chan_resources() method is called, we cannot know,
1346 * whether the client will accept the channel. Thus we must only check, if we
1347 * can satisfy client's request but the only real criterion to verify, whether
1348 * the client has accepted our offer is the client_count. That's why we have to
1349 * perform the rest of our allocation tasks on the first call to this function.
1350 */
1351static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, 1435static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
1352 struct scatterlist *sgl, unsigned int sg_len, 1436 struct scatterlist *sgl, unsigned int sg_len,
1353 enum dma_data_direction direction, unsigned long tx_flags) 1437 enum dma_data_direction direction, unsigned long tx_flags)
@@ -1358,8 +1442,8 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan
1358 unsigned long flags; 1442 unsigned long flags;
1359 1443
1360 /* We only can handle these three channels so far */ 1444 /* We only can handle these three channels so far */
1361 if (ichan->dma_chan.chan_id != IDMAC_SDC_0 && ichan->dma_chan.chan_id != IDMAC_SDC_1 && 1445 if (chan->chan_id != IDMAC_SDC_0 && chan->chan_id != IDMAC_SDC_1 &&
1362 ichan->dma_chan.chan_id != IDMAC_IC_7) 1446 chan->chan_id != IDMAC_IC_7)
1363 return NULL; 1447 return NULL;
1364 1448
1365 if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) { 1449 if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) {
@@ -1400,7 +1484,7 @@ static void idmac_issue_pending(struct dma_chan *chan)
1400 1484
1401 /* This is not always needed, but doesn't hurt either */ 1485 /* This is not always needed, but doesn't hurt either */
1402 spin_lock_irqsave(&ipu->lock, flags); 1486 spin_lock_irqsave(&ipu->lock, flags);
1403 ipu_select_buffer(ichan->dma_chan.chan_id, ichan->active_buffer); 1487 ipu_select_buffer(chan->chan_id, ichan->active_buffer);
1404 spin_unlock_irqrestore(&ipu->lock, flags); 1488 spin_unlock_irqrestore(&ipu->lock, flags);
1405 1489
1406 /* 1490 /*
@@ -1432,8 +1516,7 @@ static void __idmac_terminate_all(struct dma_chan *chan)
1432 struct idmac_tx_desc *desc = ichan->desc + i; 1516 struct idmac_tx_desc *desc = ichan->desc + i;
1433 if (list_empty(&desc->list)) 1517 if (list_empty(&desc->list))
1434 /* Descriptor was prepared, but not submitted */ 1518 /* Descriptor was prepared, but not submitted */
1435 list_add(&desc->list, 1519 list_add(&desc->list, &ichan->free_list);
1436 &ichan->free_list);
1437 1520
1438 async_tx_clear_ack(&desc->txd); 1521 async_tx_clear_ack(&desc->txd);
1439 } 1522 }
@@ -1458,6 +1541,28 @@ static void idmac_terminate_all(struct dma_chan *chan)
1458 mutex_unlock(&ichan->chan_mutex); 1541 mutex_unlock(&ichan->chan_mutex);
1459} 1542}
1460 1543
1544#ifdef DEBUG
1545static irqreturn_t ic_sof_irq(int irq, void *dev_id)
1546{
1547 struct idmac_channel *ichan = dev_id;
1548 printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n",
1549 irq, ichan->dma_chan.chan_id);
1550 disable_irq(irq);
1551 return IRQ_HANDLED;
1552}
1553
1554static irqreturn_t ic_eof_irq(int irq, void *dev_id)
1555{
1556 struct idmac_channel *ichan = dev_id;
1557 printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n",
1558 irq, ichan->dma_chan.chan_id);
1559 disable_irq(irq);
1560 return IRQ_HANDLED;
1561}
1562
1563static int ic_sof = -EINVAL, ic_eof = -EINVAL;
1564#endif
1565
1461static int idmac_alloc_chan_resources(struct dma_chan *chan) 1566static int idmac_alloc_chan_resources(struct dma_chan *chan)
1462{ 1567{
1463 struct idmac_channel *ichan = to_idmac_chan(chan); 1568 struct idmac_channel *ichan = to_idmac_chan(chan);
@@ -1471,31 +1576,49 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan)
1471 chan->cookie = 1; 1576 chan->cookie = 1;
1472 ichan->completed = -ENXIO; 1577 ichan->completed = -ENXIO;
1473 1578
1474 ret = ipu_irq_map(ichan->dma_chan.chan_id); 1579 ret = ipu_irq_map(chan->chan_id);
1475 if (ret < 0) 1580 if (ret < 0)
1476 goto eimap; 1581 goto eimap;
1477 1582
1478 ichan->eof_irq = ret; 1583 ichan->eof_irq = ret;
1584
1585 /*
1586 * Important to first disable the channel, because maybe someone
1587 * used it before us, e.g., the bootloader
1588 */
1589 ipu_disable_channel(idmac, ichan, true);
1590
1591 ret = ipu_init_channel(idmac, ichan);
1592 if (ret < 0)
1593 goto eichan;
1594
1479 ret = request_irq(ichan->eof_irq, idmac_interrupt, 0, 1595 ret = request_irq(ichan->eof_irq, idmac_interrupt, 0,
1480 ichan->eof_name, ichan); 1596 ichan->eof_name, ichan);
1481 if (ret < 0) 1597 if (ret < 0)
1482 goto erirq; 1598 goto erirq;
1483 1599
1484 ret = ipu_init_channel(idmac, ichan); 1600#ifdef DEBUG
1485 if (ret < 0) 1601 if (chan->chan_id == IDMAC_IC_7) {
1486 goto eichan; 1602 ic_sof = ipu_irq_map(69);
1603 if (ic_sof > 0)
1604 request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan);
1605 ic_eof = ipu_irq_map(70);
1606 if (ic_eof > 0)
1607 request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan);
1608 }
1609#endif
1487 1610
1488 ichan->status = IPU_CHANNEL_INITIALIZED; 1611 ichan->status = IPU_CHANNEL_INITIALIZED;
1489 1612
1490 dev_dbg(&ichan->dma_chan.dev->device, "Found channel 0x%x, irq %d\n", 1613 dev_dbg(&chan->dev->device, "Found channel 0x%x, irq %d\n",
1491 ichan->dma_chan.chan_id, ichan->eof_irq); 1614 chan->chan_id, ichan->eof_irq);
1492 1615
1493 return ret; 1616 return ret;
1494 1617
1495eichan:
1496 free_irq(ichan->eof_irq, ichan);
1497erirq: 1618erirq:
1498 ipu_irq_unmap(ichan->dma_chan.chan_id); 1619 ipu_uninit_channel(idmac, ichan);
1620eichan:
1621 ipu_irq_unmap(chan->chan_id);
1499eimap: 1622eimap:
1500 return ret; 1623 return ret;
1501} 1624}
@@ -1510,8 +1633,22 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
1510 __idmac_terminate_all(chan); 1633 __idmac_terminate_all(chan);
1511 1634
1512 if (ichan->status > IPU_CHANNEL_FREE) { 1635 if (ichan->status > IPU_CHANNEL_FREE) {
1636#ifdef DEBUG
1637 if (chan->chan_id == IDMAC_IC_7) {
1638 if (ic_sof > 0) {
1639 free_irq(ic_sof, ichan);
1640 ipu_irq_unmap(69);
1641 ic_sof = -EINVAL;
1642 }
1643 if (ic_eof > 0) {
1644 free_irq(ic_eof, ichan);
1645 ipu_irq_unmap(70);
1646 ic_eof = -EINVAL;
1647 }
1648 }
1649#endif
1513 free_irq(ichan->eof_irq, ichan); 1650 free_irq(ichan->eof_irq, ichan);
1514 ipu_irq_unmap(ichan->dma_chan.chan_id); 1651 ipu_irq_unmap(chan->chan_id);
1515 } 1652 }
1516 1653
1517 ichan->status = IPU_CHANNEL_FREE; 1654 ichan->status = IPU_CHANNEL_FREE;
@@ -1573,7 +1710,7 @@ static int __init ipu_idmac_init(struct ipu *ipu)
1573 dma_chan->device = &idmac->dma; 1710 dma_chan->device = &idmac->dma;
1574 dma_chan->cookie = 1; 1711 dma_chan->cookie = 1;
1575 dma_chan->chan_id = i; 1712 dma_chan->chan_id = i;
1576 list_add_tail(&ichan->dma_chan.device_node, &dma->channels); 1713 list_add_tail(&dma_chan->device_node, &dma->channels);
1577 } 1714 }
1578 1715
1579 idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF); 1716 idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF);
@@ -1581,7 +1718,7 @@ static int __init ipu_idmac_init(struct ipu *ipu)
1581 return dma_async_device_register(&idmac->dma); 1718 return dma_async_device_register(&idmac->dma);
1582} 1719}
1583 1720
1584static void ipu_idmac_exit(struct ipu *ipu) 1721static void __exit ipu_idmac_exit(struct ipu *ipu)
1585{ 1722{
1586 int i; 1723 int i;
1587 struct idmac *idmac = &ipu->idmac; 1724 struct idmac *idmac = &ipu->idmac;
@@ -1600,7 +1737,7 @@ static void ipu_idmac_exit(struct ipu *ipu)
1600 * IPU common probe / remove 1737 * IPU common probe / remove
1601 */ 1738 */
1602 1739
1603static int ipu_probe(struct platform_device *pdev) 1740static int __init ipu_probe(struct platform_device *pdev)
1604{ 1741{
1605 struct ipu_platform_data *pdata = pdev->dev.platform_data; 1742 struct ipu_platform_data *pdata = pdev->dev.platform_data;
1606 struct resource *mem_ipu, *mem_ic; 1743 struct resource *mem_ipu, *mem_ic;
@@ -1700,7 +1837,7 @@ err_noirq:
1700 return ret; 1837 return ret;
1701} 1838}
1702 1839
1703static int ipu_remove(struct platform_device *pdev) 1840static int __exit ipu_remove(struct platform_device *pdev)
1704{ 1841{
1705 struct ipu *ipu = platform_get_drvdata(pdev); 1842 struct ipu *ipu = platform_get_drvdata(pdev);
1706 1843
@@ -1725,7 +1862,7 @@ static struct platform_driver ipu_platform_driver = {
1725 .name = "ipu-core", 1862 .name = "ipu-core",
1726 .owner = THIS_MODULE, 1863 .owner = THIS_MODULE,
1727 }, 1864 },
1728 .remove = ipu_remove, 1865 .remove = __exit_p(ipu_remove),
1729}; 1866};
1730 1867
1731static int __init ipu_init(void) 1868static int __init ipu_init(void)