aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/imx-sdma.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2011-02-14 05:40:46 -0500
committerDan Williams <dan.j.williams@intel.com>2011-02-14 05:40:46 -0500
commite19d1d4988f8020c25bf1758f9a898e1374cef35 (patch)
tree5bd1ba3f13178becefad85b41a8bfe8c4652cee2 /drivers/dma/imx-sdma.c
parenta646bd7f0824d3e0f02ff8d7410704f965de01bc (diff)
parent60f1df5dc6ecd07befc332ae30109fb86505634c (diff)
Merge branch 'imx' into dmaengine-fixes
Diffstat (limited to 'drivers/dma/imx-sdma.c')
-rw-r--r--drivers/dma/imx-sdma.c76
1 files changed, 41 insertions, 35 deletions
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index e89fd1033df9..b6d1455fa936 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -230,7 +230,7 @@ struct sdma_engine;
230 * struct sdma_channel - housekeeping for a SDMA channel 230 * struct sdma_channel - housekeeping for a SDMA channel
231 * 231 *
232 * @sdma pointer to the SDMA engine for this channel 232 * @sdma pointer to the SDMA engine for this channel
233 * @channel the channel number, matches dmaengine chan_id 233 * @channel the channel number, matches dmaengine chan_id + 1
234 * @direction transfer type. Needed for setting SDMA script 234 * @direction transfer type. Needed for setting SDMA script
235 * @peripheral_type Peripheral type. Needed for setting SDMA script 235 * @peripheral_type Peripheral type. Needed for setting SDMA script
236 * @event_id0 aka dma request line 236 * @event_id0 aka dma request line
@@ -301,6 +301,7 @@ struct sdma_firmware_header {
301 301
302struct sdma_engine { 302struct sdma_engine {
303 struct device *dev; 303 struct device *dev;
304 struct device_dma_parameters dma_parms;
304 struct sdma_channel channel[MAX_DMA_CHANNELS]; 305 struct sdma_channel channel[MAX_DMA_CHANNELS];
305 struct sdma_channel_control *channel_control; 306 struct sdma_channel_control *channel_control;
306 void __iomem *regs; 307 void __iomem *regs;
@@ -449,7 +450,7 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
449 if (bd->mode.status & BD_RROR) 450 if (bd->mode.status & BD_RROR)
450 sdmac->status = DMA_ERROR; 451 sdmac->status = DMA_ERROR;
451 else 452 else
452 sdmac->status = DMA_SUCCESS; 453 sdmac->status = DMA_IN_PROGRESS;
453 454
454 bd->mode.status |= BD_DONE; 455 bd->mode.status |= BD_DONE;
455 sdmac->buf_tail++; 456 sdmac->buf_tail++;
@@ -798,7 +799,7 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
798 799
799 cookie = sdma_assign_cookie(sdmac); 800 cookie = sdma_assign_cookie(sdmac);
800 801
801 sdma_enable_channel(sdma, tx->chan->chan_id); 802 sdma_enable_channel(sdma, sdmac->channel);
802 803
803 spin_unlock_irq(&sdmac->lock); 804 spin_unlock_irq(&sdmac->lock);
804 805
@@ -811,10 +812,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
811 struct imx_dma_data *data = chan->private; 812 struct imx_dma_data *data = chan->private;
812 int prio, ret; 813 int prio, ret;
813 814
814 /* No need to execute this for internal channel 0 */
815 if (chan->chan_id == 0)
816 return 0;
817
818 if (!data) 815 if (!data)
819 return -EINVAL; 816 return -EINVAL;
820 817
@@ -879,7 +876,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
879 struct sdma_channel *sdmac = to_sdma_chan(chan); 876 struct sdma_channel *sdmac = to_sdma_chan(chan);
880 struct sdma_engine *sdma = sdmac->sdma; 877 struct sdma_engine *sdma = sdmac->sdma;
881 int ret, i, count; 878 int ret, i, count;
882 int channel = chan->chan_id; 879 int channel = sdmac->channel;
883 struct scatterlist *sg; 880 struct scatterlist *sg;
884 881
885 if (sdmac->status == DMA_IN_PROGRESS) 882 if (sdmac->status == DMA_IN_PROGRESS)
@@ -924,22 +921,33 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
924 ret = -EINVAL; 921 ret = -EINVAL;
925 goto err_out; 922 goto err_out;
926 } 923 }
927 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) 924
925 switch (sdmac->word_size) {
926 case DMA_SLAVE_BUSWIDTH_4_BYTES:
928 bd->mode.command = 0; 927 bd->mode.command = 0;
929 else 928 if (count & 3 || sg->dma_address & 3)
930 bd->mode.command = sdmac->word_size; 929 return NULL;
930 break;
931 case DMA_SLAVE_BUSWIDTH_2_BYTES:
932 bd->mode.command = 2;
933 if (count & 1 || sg->dma_address & 1)
934 return NULL;
935 break;
936 case DMA_SLAVE_BUSWIDTH_1_BYTE:
937 bd->mode.command = 1;
938 break;
939 default:
940 return NULL;
941 }
931 942
932 param = BD_DONE | BD_EXTD | BD_CONT; 943 param = BD_DONE | BD_EXTD | BD_CONT;
933 944
934 if (sdmac->flags & IMX_DMA_SG_LOOP) { 945 if (i + 1 == sg_len) {
935 param |= BD_INTR; 946 param |= BD_INTR;
936 if (i + 1 == sg_len) 947 param |= BD_LAST;
937 param |= BD_WRAP; 948 param &= ~BD_CONT;
938 } 949 }
939 950
940 if (i + 1 == sg_len)
941 param |= BD_INTR;
942
943 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", 951 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
944 i, count, sg->dma_address, 952 i, count, sg->dma_address,
945 param & BD_WRAP ? "wrap" : "", 953 param & BD_WRAP ? "wrap" : "",
@@ -953,6 +961,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
953 961
954 return &sdmac->desc; 962 return &sdmac->desc;
955err_out: 963err_out:
964 sdmac->status = DMA_ERROR;
956 return NULL; 965 return NULL;
957} 966}
958 967
@@ -963,7 +972,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
963 struct sdma_channel *sdmac = to_sdma_chan(chan); 972 struct sdma_channel *sdmac = to_sdma_chan(chan);
964 struct sdma_engine *sdma = sdmac->sdma; 973 struct sdma_engine *sdma = sdmac->sdma;
965 int num_periods = buf_len / period_len; 974 int num_periods = buf_len / period_len;
966 int channel = chan->chan_id; 975 int channel = sdmac->channel;
967 int ret, i = 0, buf = 0; 976 int ret, i = 0, buf = 0;
968 977
969 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); 978 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
@@ -1066,14 +1075,12 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
1066{ 1075{
1067 struct sdma_channel *sdmac = to_sdma_chan(chan); 1076 struct sdma_channel *sdmac = to_sdma_chan(chan);
1068 dma_cookie_t last_used; 1077 dma_cookie_t last_used;
1069 enum dma_status ret;
1070 1078
1071 last_used = chan->cookie; 1079 last_used = chan->cookie;
1072 1080
1073 ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used);
1074 dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); 1081 dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
1075 1082
1076 return ret; 1083 return sdmac->status;
1077} 1084}
1078 1085
1079static void sdma_issue_pending(struct dma_chan *chan) 1086static void sdma_issue_pending(struct dma_chan *chan)
@@ -1237,7 +1244,6 @@ static int __init sdma_probe(struct platform_device *pdev)
1237 struct resource *iores; 1244 struct resource *iores;
1238 struct sdma_platform_data *pdata = pdev->dev.platform_data; 1245 struct sdma_platform_data *pdata = pdev->dev.platform_data;
1239 int i; 1246 int i;
1240 dma_cap_mask_t mask;
1241 struct sdma_engine *sdma; 1247 struct sdma_engine *sdma;
1242 1248
1243 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); 1249 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
@@ -1280,6 +1286,9 @@ static int __init sdma_probe(struct platform_device *pdev)
1280 1286
1281 sdma->version = pdata->sdma_version; 1287 sdma->version = pdata->sdma_version;
1282 1288
1289 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1290 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1291
1283 INIT_LIST_HEAD(&sdma->dma_device.channels); 1292 INIT_LIST_HEAD(&sdma->dma_device.channels);
1284 /* Initialize channel parameters */ 1293 /* Initialize channel parameters */
1285 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 1294 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
@@ -1288,15 +1297,17 @@ static int __init sdma_probe(struct platform_device *pdev)
1288 sdmac->sdma = sdma; 1297 sdmac->sdma = sdma;
1289 spin_lock_init(&sdmac->lock); 1298 spin_lock_init(&sdmac->lock);
1290 1299
1291 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1292 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1293
1294 sdmac->chan.device = &sdma->dma_device; 1300 sdmac->chan.device = &sdma->dma_device;
1295 sdmac->chan.chan_id = i;
1296 sdmac->channel = i; 1301 sdmac->channel = i;
1297 1302
1298 /* Add the channel to the DMAC list */ 1303 /*
1299 list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels); 1304 * Add the channel to the DMAC list. Do not add channel 0 though
1305 * because we need it internally in the SDMA driver. This also means
1306 * that channel 0 in dmaengine counting matches sdma channel 1.
1307 */
1308 if (i)
1309 list_add_tail(&sdmac->chan.device_node,
1310 &sdma->dma_device.channels);
1300 } 1311 }
1301 1312
1302 ret = sdma_init(sdma); 1313 ret = sdma_init(sdma);
@@ -1317,6 +1328,8 @@ static int __init sdma_probe(struct platform_device *pdev)
1317 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; 1328 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1318 sdma->dma_device.device_control = sdma_control; 1329 sdma->dma_device.device_control = sdma_control;
1319 sdma->dma_device.device_issue_pending = sdma_issue_pending; 1330 sdma->dma_device.device_issue_pending = sdma_issue_pending;
1331 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1332 dma_set_max_seg_size(sdma->dma_device.dev, 65535);
1320 1333
1321 ret = dma_async_device_register(&sdma->dma_device); 1334 ret = dma_async_device_register(&sdma->dma_device);
1322 if (ret) { 1335 if (ret) {
@@ -1324,13 +1337,6 @@ static int __init sdma_probe(struct platform_device *pdev)
1324 goto err_init; 1337 goto err_init;
1325 } 1338 }
1326 1339
1327 /* request channel 0. This is an internal control channel
1328 * to the SDMA engine and not available to clients.
1329 */
1330 dma_cap_zero(mask);
1331 dma_cap_set(DMA_SLAVE, mask);
1332 dma_request_channel(mask, NULL, NULL);
1333
1334 dev_info(sdma->dev, "initialized\n"); 1340 dev_info(sdma->dev, "initialized\n");
1335 1341
1336 return 0; 1342 return 0;