diff options
author | Sascha Hauer <s.hauer@pengutronix.de> | 2011-01-31 06:42:48 -0500 |
---|---|---|
committer | Sascha Hauer <s.hauer@pengutronix.de> | 2011-01-31 06:42:48 -0500 |
commit | 2335d338a20a6f5ac70ae4338733c8ff5933f978 (patch) | |
tree | 32d4d5270a761c72854258c9c3d1180de34ebb4b | |
parent | f8a356ff96a9070156f863e4f7716e2a0eb8c995 (diff) | |
parent | 23889c6352ab4a842a30221bb412ff49954b2fb3 (diff) |
Merge branch 'dmaengine-sdma' into dmaengine
-rw-r--r-- | drivers/dma/imx-sdma.c | 60 |
1 files changed, 35 insertions, 25 deletions
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index d5a5d4d9c19b..1eb3f0077403 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -230,7 +230,7 @@ struct sdma_engine; | |||
230 | * struct sdma_channel - housekeeping for a SDMA channel | 230 | * struct sdma_channel - housekeeping for a SDMA channel |
231 | * | 231 | * |
232 | * @sdma pointer to the SDMA engine for this channel | 232 | * @sdma pointer to the SDMA engine for this channel |
233 | * @channel the channel number, matches dmaengine chan_id | 233 | * @channel the channel number, matches dmaengine chan_id + 1 |
234 | * @direction transfer type. Needed for setting SDMA script | 234 | * @direction transfer type. Needed for setting SDMA script |
235 | * @peripheral_type Peripheral type. Needed for setting SDMA script | 235 | * @peripheral_type Peripheral type. Needed for setting SDMA script |
236 | * @event_id0 aka dma request line | 236 | * @event_id0 aka dma request line |
@@ -301,6 +301,7 @@ struct sdma_firmware_header { | |||
301 | 301 | ||
302 | struct sdma_engine { | 302 | struct sdma_engine { |
303 | struct device *dev; | 303 | struct device *dev; |
304 | struct device_dma_parameters dma_parms; | ||
304 | struct sdma_channel channel[MAX_DMA_CHANNELS]; | 305 | struct sdma_channel channel[MAX_DMA_CHANNELS]; |
305 | struct sdma_channel_control *channel_control; | 306 | struct sdma_channel_control *channel_control; |
306 | void __iomem *regs; | 307 | void __iomem *regs; |
@@ -798,7 +799,7 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
798 | 799 | ||
799 | cookie = sdma_assign_cookie(sdmac); | 800 | cookie = sdma_assign_cookie(sdmac); |
800 | 801 | ||
801 | sdma_enable_channel(sdma, tx->chan->chan_id); | 802 | sdma_enable_channel(sdma, sdmac->channel); |
802 | 803 | ||
803 | spin_unlock_irq(&sdmac->lock); | 804 | spin_unlock_irq(&sdmac->lock); |
804 | 805 | ||
@@ -811,10 +812,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan) | |||
811 | struct imx_dma_data *data = chan->private; | 812 | struct imx_dma_data *data = chan->private; |
812 | int prio, ret; | 813 | int prio, ret; |
813 | 814 | ||
814 | /* No need to execute this for internal channel 0 */ | ||
815 | if (chan->chan_id == 0) | ||
816 | return 0; | ||
817 | |||
818 | if (!data) | 815 | if (!data) |
819 | return -EINVAL; | 816 | return -EINVAL; |
820 | 817 | ||
@@ -879,7 +876,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
879 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 876 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
880 | struct sdma_engine *sdma = sdmac->sdma; | 877 | struct sdma_engine *sdma = sdmac->sdma; |
881 | int ret, i, count; | 878 | int ret, i, count; |
882 | int channel = chan->chan_id; | 879 | int channel = sdmac->channel; |
883 | struct scatterlist *sg; | 880 | struct scatterlist *sg; |
884 | 881 | ||
885 | if (sdmac->status == DMA_IN_PROGRESS) | 882 | if (sdmac->status == DMA_IN_PROGRESS) |
@@ -924,10 +921,24 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
924 | ret = -EINVAL; | 921 | ret = -EINVAL; |
925 | goto err_out; | 922 | goto err_out; |
926 | } | 923 | } |
927 | if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) | 924 | |
925 | switch (sdmac->word_size) { | ||
926 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
928 | bd->mode.command = 0; | 927 | bd->mode.command = 0; |
929 | else | 928 | if (count & 3 || sg->dma_address & 3) |
930 | bd->mode.command = sdmac->word_size; | 929 | return NULL; |
930 | break; | ||
931 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
932 | bd->mode.command = 2; | ||
933 | if (count & 1 || sg->dma_address & 1) | ||
934 | return NULL; | ||
935 | break; | ||
936 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
937 | bd->mode.command = 1; | ||
938 | break; | ||
939 | default: | ||
940 | return NULL; | ||
941 | } | ||
931 | 942 | ||
932 | param = BD_DONE | BD_EXTD | BD_CONT; | 943 | param = BD_DONE | BD_EXTD | BD_CONT; |
933 | 944 | ||
@@ -963,7 +974,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( | |||
963 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 974 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
964 | struct sdma_engine *sdma = sdmac->sdma; | 975 | struct sdma_engine *sdma = sdmac->sdma; |
965 | int num_periods = buf_len / period_len; | 976 | int num_periods = buf_len / period_len; |
966 | int channel = chan->chan_id; | 977 | int channel = sdmac->channel; |
967 | int ret, i = 0, buf = 0; | 978 | int ret, i = 0, buf = 0; |
968 | 979 | ||
969 | dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); | 980 | dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); |
@@ -1237,7 +1248,6 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1237 | struct resource *iores; | 1248 | struct resource *iores; |
1238 | struct sdma_platform_data *pdata = pdev->dev.platform_data; | 1249 | struct sdma_platform_data *pdata = pdev->dev.platform_data; |
1239 | int i; | 1250 | int i; |
1240 | dma_cap_mask_t mask; | ||
1241 | struct sdma_engine *sdma; | 1251 | struct sdma_engine *sdma; |
1242 | 1252 | ||
1243 | sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); | 1253 | sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); |
@@ -1280,6 +1290,9 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1280 | 1290 | ||
1281 | sdma->version = pdata->sdma_version; | 1291 | sdma->version = pdata->sdma_version; |
1282 | 1292 | ||
1293 | dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); | ||
1294 | dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); | ||
1295 | |||
1283 | INIT_LIST_HEAD(&sdma->dma_device.channels); | 1296 | INIT_LIST_HEAD(&sdma->dma_device.channels); |
1284 | /* Initialize channel parameters */ | 1297 | /* Initialize channel parameters */ |
1285 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | 1298 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { |
@@ -1288,15 +1301,17 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1288 | sdmac->sdma = sdma; | 1301 | sdmac->sdma = sdma; |
1289 | spin_lock_init(&sdmac->lock); | 1302 | spin_lock_init(&sdmac->lock); |
1290 | 1303 | ||
1291 | dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); | ||
1292 | dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); | ||
1293 | |||
1294 | sdmac->chan.device = &sdma->dma_device; | 1304 | sdmac->chan.device = &sdma->dma_device; |
1295 | sdmac->chan.chan_id = i; | ||
1296 | sdmac->channel = i; | 1305 | sdmac->channel = i; |
1297 | 1306 | ||
1298 | /* Add the channel to the DMAC list */ | 1307 | /* |
1299 | list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels); | 1308 | * Add the channel to the DMAC list. Do not add channel 0 though |
1309 | * because we need it internally in the SDMA driver. This also means | ||
1310 | * that channel 0 in dmaengine counting matches sdma channel 1. | ||
1311 | */ | ||
1312 | if (i) | ||
1313 | list_add_tail(&sdmac->chan.device_node, | ||
1314 | &sdma->dma_device.channels); | ||
1300 | } | 1315 | } |
1301 | 1316 | ||
1302 | ret = sdma_init(sdma); | 1317 | ret = sdma_init(sdma); |
@@ -1317,6 +1332,8 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1317 | sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; | 1332 | sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; |
1318 | sdma->dma_device.device_control = sdma_control; | 1333 | sdma->dma_device.device_control = sdma_control; |
1319 | sdma->dma_device.device_issue_pending = sdma_issue_pending; | 1334 | sdma->dma_device.device_issue_pending = sdma_issue_pending; |
1335 | sdma->dma_device.dev->dma_parms = &sdma->dma_parms; | ||
1336 | dma_set_max_seg_size(sdma->dma_device.dev, 65535); | ||
1320 | 1337 | ||
1321 | ret = dma_async_device_register(&sdma->dma_device); | 1338 | ret = dma_async_device_register(&sdma->dma_device); |
1322 | if (ret) { | 1339 | if (ret) { |
@@ -1324,13 +1341,6 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1324 | goto err_init; | 1341 | goto err_init; |
1325 | } | 1342 | } |
1326 | 1343 | ||
1327 | /* request channel 0. This is an internal control channel | ||
1328 | * to the SDMA engine and not available to clients. | ||
1329 | */ | ||
1330 | dma_cap_zero(mask); | ||
1331 | dma_cap_set(DMA_SLAVE, mask); | ||
1332 | dma_request_channel(mask, NULL, NULL); | ||
1333 | |||
1334 | dev_info(sdma->dev, "initialized\n"); | 1344 | dev_info(sdma->dev, "initialized\n"); |
1335 | 1345 | ||
1336 | return 0; | 1346 | return 0; |