diff options
author | Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> | 2016-11-13 23:20:56 -0500 |
---|---|---|
committer | Mark Brown <broonie@kernel.org> | 2016-11-22 12:25:52 -0500 |
commit | 4821d914fe747a91453021675a74069776f0b819 (patch) | |
tree | 3778cad9a71165748e99c2a7c998a928475ba0da | |
parent | edce5c496c6af3e5ca6e1bb18f7cf4f6ef6226fa (diff) |
ASoC: rsnd: use dma_sync_single_for_xxx() for IOMMU
IOMMU needs DMA mapping function to use it. One solution is that
we can use DMA mapped dev on snd_pcm_lib_preallocate_pages_for_all()
for SNDRV_DMA_TYPE_DEV. But pcm_new and dma map timing are mismatched.
Thus, this patch uses SNDRV_DMA_TYPE_CONTINUOUS for pcm_new,
and use dma_sync_single_for_xxx() for each transfer.
Signed-off-by: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
Signed-off-by: Mark Brown <broonie@kernel.org>
-rw-r--r-- | sound/soc/sh/rcar/core.c | 4 | ||||
-rw-r--r-- | sound/soc/sh/rcar/dma.c | 84 |
2 files changed, 82 insertions, 6 deletions
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index 9ffa29941ceb..912dc62ff9c7 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c | |||
@@ -1126,8 +1126,8 @@ static int rsnd_pcm_new(struct snd_soc_pcm_runtime *rtd) | |||
1126 | 1126 | ||
1127 | return snd_pcm_lib_preallocate_pages_for_all( | 1127 | return snd_pcm_lib_preallocate_pages_for_all( |
1128 | rtd->pcm, | 1128 | rtd->pcm, |
1129 | SNDRV_DMA_TYPE_DEV, | 1129 | SNDRV_DMA_TYPE_CONTINUOUS, |
1130 | rtd->card->snd_card->dev, | 1130 | snd_dma_continuous_data(GFP_KERNEL), |
1131 | PREALLOC_BUFFER, PREALLOC_BUFFER_MAX); | 1131 | PREALLOC_BUFFER, PREALLOC_BUFFER_MAX); |
1132 | } | 1132 | } |
1133 | 1133 | ||
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c index 3c663a5cfe8b..1f405c833867 100644 --- a/sound/soc/sh/rcar/dma.c +++ b/sound/soc/sh/rcar/dma.c | |||
@@ -25,6 +25,10 @@ | |||
25 | 25 | ||
26 | struct rsnd_dmaen { | 26 | struct rsnd_dmaen { |
27 | struct dma_chan *chan; | 27 | struct dma_chan *chan; |
28 | dma_addr_t dma_buf; | ||
29 | unsigned int dma_len; | ||
30 | unsigned int dma_period; | ||
31 | unsigned int dma_cnt; | ||
28 | }; | 32 | }; |
29 | 33 | ||
30 | struct rsnd_dmapp { | 34 | struct rsnd_dmapp { |
@@ -58,10 +62,38 @@ struct rsnd_dma_ctrl { | |||
58 | /* | 62 | /* |
59 | * Audio DMAC | 63 | * Audio DMAC |
60 | */ | 64 | */ |
65 | #define rsnd_dmaen_sync(dmaen, io, i) __rsnd_dmaen_sync(dmaen, io, i, 1) | ||
66 | #define rsnd_dmaen_unsync(dmaen, io, i) __rsnd_dmaen_sync(dmaen, io, i, 0) | ||
67 | static void __rsnd_dmaen_sync(struct rsnd_dmaen *dmaen, struct rsnd_dai_stream *io, | ||
68 | int i, int sync) | ||
69 | { | ||
70 | struct device *dev = dmaen->chan->device->dev; | ||
71 | enum dma_data_direction dir; | ||
72 | int is_play = rsnd_io_is_play(io); | ||
73 | dma_addr_t buf; | ||
74 | int len, max; | ||
75 | size_t period; | ||
76 | |||
77 | len = dmaen->dma_len; | ||
78 | period = dmaen->dma_period; | ||
79 | max = len / period; | ||
80 | i = i % max; | ||
81 | buf = dmaen->dma_buf + (period * i); | ||
82 | |||
83 | dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE; | ||
84 | |||
85 | if (sync) | ||
86 | dma_sync_single_for_device(dev, buf, period, dir); | ||
87 | else | ||
88 | dma_sync_single_for_cpu(dev, buf, period, dir); | ||
89 | } | ||
90 | |||
61 | static void __rsnd_dmaen_complete(struct rsnd_mod *mod, | 91 | static void __rsnd_dmaen_complete(struct rsnd_mod *mod, |
62 | struct rsnd_dai_stream *io) | 92 | struct rsnd_dai_stream *io) |
63 | { | 93 | { |
64 | struct rsnd_priv *priv = rsnd_mod_to_priv(mod); | 94 | struct rsnd_priv *priv = rsnd_mod_to_priv(mod); |
95 | struct rsnd_dma *dma = rsnd_mod_to_dma(mod); | ||
96 | struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); | ||
65 | bool elapsed = false; | 97 | bool elapsed = false; |
66 | unsigned long flags; | 98 | unsigned long flags; |
67 | 99 | ||
@@ -78,9 +110,22 @@ static void __rsnd_dmaen_complete(struct rsnd_mod *mod, | |||
78 | */ | 110 | */ |
79 | spin_lock_irqsave(&priv->lock, flags); | 111 | spin_lock_irqsave(&priv->lock, flags); |
80 | 112 | ||
81 | if (rsnd_io_is_working(io)) | 113 | if (rsnd_io_is_working(io)) { |
114 | rsnd_dmaen_unsync(dmaen, io, dmaen->dma_cnt); | ||
115 | |||
116 | /* | ||
117 | * Next period is already started. | ||
118 | * Let's sync Next Next period | ||
119 | * see | ||
120 | * rsnd_dmaen_start() | ||
121 | */ | ||
122 | rsnd_dmaen_sync(dmaen, io, dmaen->dma_cnt + 2); | ||
123 | |||
82 | elapsed = rsnd_dai_pointer_update(io, io->byte_per_period); | 124 | elapsed = rsnd_dai_pointer_update(io, io->byte_per_period); |
83 | 125 | ||
126 | dmaen->dma_cnt++; | ||
127 | } | ||
128 | |||
84 | spin_unlock_irqrestore(&priv->lock, flags); | 129 | spin_unlock_irqrestore(&priv->lock, flags); |
85 | 130 | ||
86 | if (elapsed) | 131 | if (elapsed) |
@@ -116,7 +161,12 @@ static int rsnd_dmaen_stop(struct rsnd_mod *mod, | |||
116 | struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); | 161 | struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); |
117 | 162 | ||
118 | if (dmaen->chan) { | 163 | if (dmaen->chan) { |
164 | int is_play = rsnd_io_is_play(io); | ||
165 | |||
119 | dmaengine_terminate_all(dmaen->chan); | 166 | dmaengine_terminate_all(dmaen->chan); |
167 | dma_unmap_single(dmaen->chan->device->dev, | ||
168 | dmaen->dma_buf, dmaen->dma_len, | ||
169 | is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | ||
120 | } | 170 | } |
121 | 171 | ||
122 | return 0; | 172 | return 0; |
@@ -184,7 +234,11 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod, | |||
184 | struct device *dev = rsnd_priv_to_dev(priv); | 234 | struct device *dev = rsnd_priv_to_dev(priv); |
185 | struct dma_async_tx_descriptor *desc; | 235 | struct dma_async_tx_descriptor *desc; |
186 | struct dma_slave_config cfg = {}; | 236 | struct dma_slave_config cfg = {}; |
237 | dma_addr_t buf; | ||
238 | size_t len; | ||
239 | size_t period; | ||
187 | int is_play = rsnd_io_is_play(io); | 240 | int is_play = rsnd_io_is_play(io); |
241 | int i; | ||
188 | int ret; | 242 | int ret; |
189 | 243 | ||
190 | cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; | 244 | cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; |
@@ -201,10 +255,19 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod, | |||
201 | if (ret < 0) | 255 | if (ret < 0) |
202 | return ret; | 256 | return ret; |
203 | 257 | ||
258 | len = snd_pcm_lib_buffer_bytes(substream); | ||
259 | period = snd_pcm_lib_period_bytes(substream); | ||
260 | buf = dma_map_single(dmaen->chan->device->dev, | ||
261 | substream->runtime->dma_area, | ||
262 | len, | ||
263 | is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | ||
264 | if (dma_mapping_error(dmaen->chan->device->dev, buf)) { | ||
265 | dev_err(dev, "dma map failed\n"); | ||
266 | return -EIO; | ||
267 | } | ||
268 | |||
204 | desc = dmaengine_prep_dma_cyclic(dmaen->chan, | 269 | desc = dmaengine_prep_dma_cyclic(dmaen->chan, |
205 | substream->runtime->dma_addr, | 270 | buf, len, period, |
206 | snd_pcm_lib_buffer_bytes(substream), | ||
207 | snd_pcm_lib_period_bytes(substream), | ||
208 | is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, | 271 | is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, |
209 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 272 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
210 | 273 | ||
@@ -216,6 +279,19 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod, | |||
216 | desc->callback = rsnd_dmaen_complete; | 279 | desc->callback = rsnd_dmaen_complete; |
217 | desc->callback_param = rsnd_mod_get(dma); | 280 | desc->callback_param = rsnd_mod_get(dma); |
218 | 281 | ||
282 | dmaen->dma_buf = buf; | ||
283 | dmaen->dma_len = len; | ||
284 | dmaen->dma_period = period; | ||
285 | dmaen->dma_cnt = 0; | ||
286 | |||
287 | /* | ||
288 | * synchronize this and next period | ||
289 | * see | ||
290 | * __rsnd_dmaen_complete() | ||
291 | */ | ||
292 | for (i = 0; i < 2; i++) | ||
293 | rsnd_dmaen_sync(dmaen, io, i); | ||
294 | |||
219 | if (dmaengine_submit(desc) < 0) { | 295 | if (dmaengine_submit(desc) < 0) { |
220 | dev_err(dev, "dmaengine_submit() fail\n"); | 296 | dev_err(dev, "dmaengine_submit() fail\n"); |
221 | return -EIO; | 297 | return -EIO; |