aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKuninori Morimoto <kuninori.morimoto.gx@renesas.com>2016-11-13 23:20:40 -0500
committerMark Brown <broonie@kernel.org>2016-11-22 12:25:52 -0500
commitedce5c496c6af3e5ca6e1bb18f7cf4f6ef6226fa (patch)
tree31f4a729c1d96d0942a799ca006ee767b52f6250
parent3e58690b8dbddefb4422295b57a6f214e8aa03fd (diff)
ASoC: rsnd: Request/Release DMA channel each time
Current Renesas Sound driver requests DMA channel when .probe timing, and release it when .remove timing. And use DMA on .start/.stop But, Audio DMAC power ON was handled when request timing (= .probe), and power OFF was when release timing (= .remove). This means Audio DMAC power is always ON during driver was enabled. The best choice to solve this issue is that DMAEngine side handle this. But current DMAEngine API design can't solve atmic/non-atmic context issue for power ON/OFF. So next better choice is sound driver request/release DMA channel each time. This patch do it Signed-off-by: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> Signed-off-by: Mark Brown <broonie@kernel.org>
-rw-r--r--sound/soc/sh/rcar/dma.c184
1 files changed, 107 insertions, 77 deletions
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 2f0327714625..3c663a5cfe8b 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -34,6 +34,8 @@ struct rsnd_dmapp {
34 34
35struct rsnd_dma { 35struct rsnd_dma {
36 struct rsnd_mod mod; 36 struct rsnd_mod mod;
37 struct rsnd_mod *mod_from;
38 struct rsnd_mod *mod_to;
37 dma_addr_t src_addr; 39 dma_addr_t src_addr;
38 dma_addr_t dst_addr; 40 dma_addr_t dst_addr;
39 union { 41 union {
@@ -92,6 +94,20 @@ static void rsnd_dmaen_complete(void *data)
92 rsnd_mod_interrupt(mod, __rsnd_dmaen_complete); 94 rsnd_mod_interrupt(mod, __rsnd_dmaen_complete);
93} 95}
94 96
97static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io,
98 struct rsnd_mod *mod_from,
99 struct rsnd_mod *mod_to)
100{
101 if ((!mod_from && !mod_to) ||
102 (mod_from && mod_to))
103 return NULL;
104
105 if (mod_from)
106 return rsnd_mod_dma_req(io, mod_from);
107 else
108 return rsnd_mod_dma_req(io, mod_to);
109}
110
95static int rsnd_dmaen_stop(struct rsnd_mod *mod, 111static int rsnd_dmaen_stop(struct rsnd_mod *mod,
96 struct rsnd_dai_stream *io, 112 struct rsnd_dai_stream *io,
97 struct rsnd_priv *priv) 113 struct rsnd_priv *priv)
@@ -99,7 +115,61 @@ static int rsnd_dmaen_stop(struct rsnd_mod *mod,
99 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 115 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
100 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); 116 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
101 117
102 dmaengine_terminate_all(dmaen->chan); 118 if (dmaen->chan) {
119 dmaengine_terminate_all(dmaen->chan);
120 }
121
122 return 0;
123}
124
125static int rsnd_dmaen_nolock_stop(struct rsnd_mod *mod,
126 struct rsnd_dai_stream *io,
127 struct rsnd_priv *priv)
128{
129 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
130 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
131
132 /*
133 * DMAEngine release uses mutex lock.
134 * Thus, it shouldn't be called under spinlock.
135 * Let's call it under nolock_start
136 */
137 if (dmaen->chan)
138 dma_release_channel(dmaen->chan);
139
140 dmaen->chan = NULL;
141
142 return 0;
143}
144
145static int rsnd_dmaen_nolock_start(struct rsnd_mod *mod,
146 struct rsnd_dai_stream *io,
147 struct rsnd_priv *priv)
148{
149 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
150 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
151 struct device *dev = rsnd_priv_to_dev(priv);
152
153 if (dmaen->chan) {
154 dev_err(dev, "it already has dma channel\n");
155 return -EIO;
156 }
157
158 /*
159 * DMAEngine request uses mutex lock.
160 * Thus, it shouldn't be called under spinlock.
161 * Let's call it under nolock_start
162 */
163 dmaen->chan = rsnd_dmaen_request_channel(io,
164 dma->mod_from,
165 dma->mod_to);
166 if (IS_ERR_OR_NULL(dmaen->chan)) {
167 int ret = PTR_ERR(dmaen->chan);
168
169 dmaen->chan = NULL;
170 dev_err(dev, "can't get dma channel\n");
171 return ret;
172 }
103 173
104 return 0; 174 return 0;
105} 175}
@@ -113,7 +183,23 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
113 struct snd_pcm_substream *substream = io->substream; 183 struct snd_pcm_substream *substream = io->substream;
114 struct device *dev = rsnd_priv_to_dev(priv); 184 struct device *dev = rsnd_priv_to_dev(priv);
115 struct dma_async_tx_descriptor *desc; 185 struct dma_async_tx_descriptor *desc;
186 struct dma_slave_config cfg = {};
116 int is_play = rsnd_io_is_play(io); 187 int is_play = rsnd_io_is_play(io);
188 int ret;
189
190 cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
191 cfg.src_addr = dma->src_addr;
192 cfg.dst_addr = dma->dst_addr;
193 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
194 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
195
196 dev_dbg(dev, "%s[%d] %pad -> %pad\n",
197 rsnd_mod_name(mod), rsnd_mod_id(mod),
198 &cfg.src_addr, &cfg.dst_addr);
199
200 ret = dmaengine_slave_config(dmaen->chan, &cfg);
201 if (ret < 0)
202 return ret;
117 203
118 desc = dmaengine_prep_dma_cyclic(dmaen->chan, 204 desc = dmaengine_prep_dma_cyclic(dmaen->chan,
119 substream->runtime->dma_addr, 205 substream->runtime->dma_addr,
@@ -159,97 +245,39 @@ struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
159 return chan; 245 return chan;
160} 246}
161 247
162static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io,
163 struct rsnd_mod *mod_from,
164 struct rsnd_mod *mod_to)
165{
166 if ((!mod_from && !mod_to) ||
167 (mod_from && mod_to))
168 return NULL;
169
170 if (mod_from)
171 return rsnd_mod_dma_req(io, mod_from);
172 else
173 return rsnd_mod_dma_req(io, mod_to);
174}
175
176static int rsnd_dmaen_remove(struct rsnd_mod *mod,
177 struct rsnd_dai_stream *io,
178 struct rsnd_priv *priv)
179{
180 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
181 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
182
183 if (dmaen->chan)
184 dma_release_channel(dmaen->chan);
185
186 dmaen->chan = NULL;
187
188 return 0;
189}
190
191static int rsnd_dmaen_attach(struct rsnd_dai_stream *io, 248static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
192 struct rsnd_dma *dma, 249 struct rsnd_dma *dma,
193 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to) 250 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
194{ 251{
195 struct rsnd_mod *mod = rsnd_mod_get(dma);
196 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
197 struct rsnd_priv *priv = rsnd_io_to_priv(io); 252 struct rsnd_priv *priv = rsnd_io_to_priv(io);
198 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 253 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
199 struct device *dev = rsnd_priv_to_dev(priv); 254 struct dma_chan *chan;
200 struct dma_slave_config cfg = {}; 255
201 int is_play = rsnd_io_is_play(io); 256 /* try to get DMAEngine channel */
202 int ret; 257 chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
203 258 if (IS_ERR_OR_NULL(chan)) {
204 if (dmaen->chan) { 259 /*
205 dev_err(dev, "it already has dma channel\n"); 260 * DMA failed. try to PIO mode
206 return -EIO; 261 * see
207 } 262 * rsnd_ssi_fallback()
208 263 * rsnd_rdai_continuance_probe()
209 dmaen->chan = rsnd_dmaen_request_channel(io, mod_from, mod_to); 264 */
210 265 return -EAGAIN;
211 if (IS_ERR_OR_NULL(dmaen->chan)) {
212 dmaen->chan = NULL;
213 dev_err(dev, "can't get dma channel\n");
214 goto rsnd_dma_channel_err;
215 } 266 }
216 267
217 cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 268 dma_release_channel(chan);
218 cfg.src_addr = dma->src_addr;
219 cfg.dst_addr = dma->dst_addr;
220 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
221 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
222
223 dev_dbg(dev, "%s[%d] %pad -> %pad\n",
224 rsnd_mod_name(mod), rsnd_mod_id(mod),
225 &cfg.src_addr, &cfg.dst_addr);
226
227 ret = dmaengine_slave_config(dmaen->chan, &cfg);
228 if (ret < 0)
229 goto rsnd_dma_attach_err;
230 269
231 dmac->dmaen_num++; 270 dmac->dmaen_num++;
232 271
233 return 0; 272 return 0;
234
235rsnd_dma_attach_err:
236 rsnd_dmaen_remove(mod, io, priv);
237rsnd_dma_channel_err:
238
239 /*
240 * DMA failed. try to PIO mode
241 * see
242 * rsnd_ssi_fallback()
243 * rsnd_rdai_continuance_probe()
244 */
245 return -EAGAIN;
246} 273}
247 274
248static struct rsnd_mod_ops rsnd_dmaen_ops = { 275static struct rsnd_mod_ops rsnd_dmaen_ops = {
249 .name = "audmac", 276 .name = "audmac",
277 .nolock_start = rsnd_dmaen_nolock_start,
278 .nolock_stop = rsnd_dmaen_nolock_stop,
250 .start = rsnd_dmaen_start, 279 .start = rsnd_dmaen_start,
251 .stop = rsnd_dmaen_stop, 280 .stop = rsnd_dmaen_stop,
252 .remove = rsnd_dmaen_remove,
253}; 281};
254 282
255/* 283/*
@@ -671,9 +699,6 @@ int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
671 699
672 *dma_mod = rsnd_mod_get(dma); 700 *dma_mod = rsnd_mod_get(dma);
673 701
674 dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
675 dma->dst_addr = rsnd_dma_addr(io, mod_to, is_play, 0);
676
677 ret = rsnd_mod_init(priv, *dma_mod, ops, NULL, 702 ret = rsnd_mod_init(priv, *dma_mod, ops, NULL,
678 rsnd_mod_get_status, type, dma_id); 703 rsnd_mod_get_status, type, dma_id);
679 if (ret < 0) 704 if (ret < 0)
@@ -687,6 +712,11 @@ int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
687 ret = attach(io, dma, mod_from, mod_to); 712 ret = attach(io, dma, mod_from, mod_to);
688 if (ret < 0) 713 if (ret < 0)
689 return ret; 714 return ret;
715
716 dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
717 dma->dst_addr = rsnd_dma_addr(io, mod_to, is_play, 0);
718 dma->mod_from = mod_from;
719 dma->mod_to = mod_to;
690 } 720 }
691 721
692 ret = rsnd_dai_connect(*dma_mod, io, type); 722 ret = rsnd_dai_connect(*dma_mod, io, type);