aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorApelete Seketeli <apelete@seketeli.net>2014-07-21 00:37:45 -0400
committerUlf Hansson <ulf.hansson@linaro.org>2014-09-09 07:58:59 -0400
commitbb2f45927f8e0d1fc0633f65cc1f17a40c80bf24 (patch)
treed36d7d78861e7890dd1a18f59f314c9ec719b726
parent7ca27a6f80a4042666a28977ff8ee3aa527c6cd4 (diff)
mmc: jz4740: prepare next dma transfer in parallel with current transfer
Make use of the MMC asynchronous request capability to prepare the next DMA transfer request in parallel with the current transfer. This is done by adding pre-request and post-request callbacks that are used by the MMC framework during an active data transfer. It should help reduce the impact of DMA preparation overhead on the SD card performance. Signed-off-by: Apelete Seketeli <apelete@seketeli.net> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
-rw-r--r--drivers/mmc/host/jz4740_mmc.c138
1 files changed, 116 insertions, 22 deletions
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 049b13353917..14738cddcadb 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -114,6 +114,11 @@ enum jz4740_mmc_state {
114 JZ4740_MMC_STATE_DONE, 114 JZ4740_MMC_STATE_DONE,
115}; 115};
116 116
117struct jz4740_mmc_host_next {
118 int sg_len;
119 s32 cookie;
120};
121
117struct jz4740_mmc_host { 122struct jz4740_mmc_host {
118 struct mmc_host *mmc; 123 struct mmc_host *mmc;
119 struct platform_device *pdev; 124 struct platform_device *pdev;
@@ -143,6 +148,7 @@ struct jz4740_mmc_host {
143 /* DMA support */ 148 /* DMA support */
144 struct dma_chan *dma_rx; 149 struct dma_chan *dma_rx;
145 struct dma_chan *dma_tx; 150 struct dma_chan *dma_tx;
151 struct jz4740_mmc_host_next next_data;
146 bool use_dma; 152 bool use_dma;
147 int sg_len; 153 int sg_len;
148 154
@@ -184,6 +190,9 @@ static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
184 goto free_master_write; 190 goto free_master_write;
185 } 191 }
186 192
193 /* Initialize DMA pre request cookie */
194 host->next_data.cookie = 1;
195
187 return 0; 196 return 0;
188 197
189free_master_write: 198free_master_write:
@@ -196,23 +205,72 @@ static inline int jz4740_mmc_get_dma_dir(struct mmc_data *data)
196 return (data->flags & MMC_DATA_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 205 return (data->flags & MMC_DATA_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
197} 206}
198 207
208static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
209 struct mmc_data *data)
210{
211 return (data->flags & MMC_DATA_READ) ? host->dma_rx : host->dma_tx;
212}
213
199static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host, 214static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
200 struct mmc_data *data) 215 struct mmc_data *data)
201{ 216{
202 struct dma_chan *chan; 217 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
203 enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data); 218 enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
204 219
205 if (dir == DMA_TO_DEVICE)
206 chan = host->dma_tx;
207 else
208 chan = host->dma_rx;
209
210 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 220 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
211} 221}
212 222
223/* Prepares DMA data for current/next transfer, returns non-zero on failure */
224static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
225 struct mmc_data *data,
226 struct jz4740_mmc_host_next *next,
227 struct dma_chan *chan)
228{
229 struct jz4740_mmc_host_next *next_data = &host->next_data;
230 enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
231 int sg_len;
232
233 if (!next && data->host_cookie &&
234 data->host_cookie != host->next_data.cookie) {
235 dev_warn(mmc_dev(host->mmc),
236 "[%s] invalid cookie: data->host_cookie %d host->next_data.cookie %d\n",
237 __func__,
238 data->host_cookie,
239 host->next_data.cookie);
240 data->host_cookie = 0;
241 }
242
243 /* Check if next job is already prepared */
244 if (next || data->host_cookie != host->next_data.cookie) {
245 sg_len = dma_map_sg(chan->device->dev,
246 data->sg,
247 data->sg_len,
248 dir);
249
250 } else {
251 sg_len = next_data->sg_len;
252 next_data->sg_len = 0;
253 }
254
255 if (sg_len <= 0) {
256 dev_err(mmc_dev(host->mmc),
257 "Failed to map scatterlist for DMA operation\n");
258 return -EINVAL;
259 }
260
261 if (next) {
262 next->sg_len = sg_len;
263 data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
264 } else
265 host->sg_len = sg_len;
266
267 return 0;
268}
269
213static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host, 270static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
214 struct mmc_data *data) 271 struct mmc_data *data)
215{ 272{
273 int ret;
216 struct dma_chan *chan; 274 struct dma_chan *chan;
217 struct dma_async_tx_descriptor *desc; 275 struct dma_async_tx_descriptor *desc;
218 struct dma_slave_config conf = { 276 struct dma_slave_config conf = {
@@ -221,9 +279,8 @@ static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
221 .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, 279 .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
222 .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, 280 .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
223 }; 281 };
224 enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
225 282
226 if (dir == DMA_TO_DEVICE) { 283 if (data->flags & MMC_DATA_WRITE) {
227 conf.direction = DMA_MEM_TO_DEV; 284 conf.direction = DMA_MEM_TO_DEV;
228 conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO; 285 conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
229 conf.slave_id = JZ4740_DMA_TYPE_MMC_TRANSMIT; 286 conf.slave_id = JZ4740_DMA_TYPE_MMC_TRANSMIT;
@@ -235,16 +292,9 @@ static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
235 chan = host->dma_rx; 292 chan = host->dma_rx;
236 } 293 }
237 294
238 host->sg_len = dma_map_sg(chan->device->dev, 295 ret = jz4740_mmc_prepare_dma_data(host, data, NULL, chan);
239 data->sg, 296 if (ret)
240 data->sg_len, 297 return ret;
241 dir);
242
243 if (host->sg_len == 0) {
244 dev_err(mmc_dev(host->mmc),
245 "Failed to map scatterlist for DMA operation\n");
246 return -EINVAL;
247 }
248 298
249 dmaengine_slave_config(chan, &conf); 299 dmaengine_slave_config(chan, &conf);
250 desc = dmaengine_prep_slave_sg(chan, 300 desc = dmaengine_prep_slave_sg(chan,
@@ -269,6 +319,43 @@ dma_unmap:
269 return -ENOMEM; 319 return -ENOMEM;
270} 320}
271 321
322static void jz4740_mmc_pre_request(struct mmc_host *mmc,
323 struct mmc_request *mrq,
324 bool is_first_req)
325{
326 struct jz4740_mmc_host *host = mmc_priv(mmc);
327 struct mmc_data *data = mrq->data;
328 struct jz4740_mmc_host_next *next_data = &host->next_data;
329
330 BUG_ON(data->host_cookie);
331
332 if (host->use_dma) {
333 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
334
335 if (jz4740_mmc_prepare_dma_data(host, data, next_data, chan))
336 data->host_cookie = 0;
337 }
338}
339
340static void jz4740_mmc_post_request(struct mmc_host *mmc,
341 struct mmc_request *mrq,
342 int err)
343{
344 struct jz4740_mmc_host *host = mmc_priv(mmc);
345 struct mmc_data *data = mrq->data;
346
347 if (host->use_dma && data->host_cookie) {
348 jz4740_mmc_dma_unmap(host, data);
349 data->host_cookie = 0;
350 }
351
352 if (err) {
353 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
354
355 dmaengine_terminate_all(chan);
356 }
357}
358
272/*----------------------------------------------------------------------------*/ 359/*----------------------------------------------------------------------------*/
273 360
274static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host, 361static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
@@ -627,14 +714,19 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
627 714
628 case JZ4740_MMC_STATE_TRANSFER_DATA: 715 case JZ4740_MMC_STATE_TRANSFER_DATA:
629 if (host->use_dma) { 716 if (host->use_dma) {
630 /* Use DMA if enabled, data transfer direction was 717 /* Use DMA if enabled.
631 * defined before in jz_mmc_prepare_data_transfer(). 718 * Data transfer direction is defined later by
719 * relying on data flags in
720 * jz4740_mmc_prepare_dma_data() and
721 * jz4740_mmc_start_dma_transfer().
632 */ 722 */
633 timeout = jz4740_mmc_start_dma_transfer(host, data); 723 timeout = jz4740_mmc_start_dma_transfer(host, data);
634 data->bytes_xfered = data->blocks * data->blksz; 724 data->bytes_xfered = data->blocks * data->blksz;
635 } else if (data->flags & MMC_DATA_READ) 725 } else if (data->flags & MMC_DATA_READ)
636 /* If DMA is not enabled, rely on data flags 726 /* Use PIO if DMA is not enabled.
637 * to establish data transfer direction. 727 * Data transfer direction was defined before
728 * by relying on data flags in
729 * jz_mmc_prepare_data_transfer().
638 */ 730 */
639 timeout = jz4740_mmc_read_data(host, data); 731 timeout = jz4740_mmc_read_data(host, data);
640 else 732 else
@@ -809,6 +901,8 @@ static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
809 901
810static const struct mmc_host_ops jz4740_mmc_ops = { 902static const struct mmc_host_ops jz4740_mmc_ops = {
811 .request = jz4740_mmc_request, 903 .request = jz4740_mmc_request,
904 .pre_req = jz4740_mmc_pre_request,
905 .post_req = jz4740_mmc_post_request,
812 .set_ios = jz4740_mmc_set_ios, 906 .set_ios = jz4740_mmc_set_ios,
813 .get_ro = mmc_gpio_get_ro, 907 .get_ro = mmc_gpio_get_ro,
814 .get_cd = mmc_gpio_get_cd, 908 .get_cd = mmc_gpio_get_cd,