aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorPer Forlin <per.forlin@linaro.org>2011-07-01 12:55:24 -0400
committerChris Ball <cjb@laptop.org>2011-07-20 17:21:11 -0400
commit58c7ccbf9109abcc6b7ed2f76c21ebee244d31a8 (patch)
tree967a7f70d04bbe02e1999f48c66234b3b37dfc91 /drivers/mmc
parent9782aff8df7e32e7745cc484f34ec471d9251651 (diff)
mmc: mmci: implement pre_req() and post_req()
pre_req() runs dma_map_sg() and prepares the dma descriptor for the next mmc data transfer. post_req() runs dma_unmap_sg. If not calling pre_req() before mmci_request(), mmci_request() will prepare the cache and dma just like it did it before. It is optional to use pre_req() and post_req() for mmci. Signed-off-by: Per Forlin <per.forlin@linaro.org> Tested-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/host/mmci.c147
-rw-r--r--drivers/mmc/host/mmci.h8
2 files changed, 142 insertions, 13 deletions
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index fe140724a02e..fef7140eb1d0 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -226,6 +226,9 @@ static void __devinit mmci_dma_setup(struct mmci_host *host)
226 return; 226 return;
227 } 227 }
228 228
229 /* initialize pre request cookie */
230 host->next_data.cookie = 1;
231
229 /* Try to acquire a generic DMA engine slave channel */ 232 /* Try to acquire a generic DMA engine slave channel */
230 dma_cap_zero(mask); 233 dma_cap_zero(mask);
231 dma_cap_set(DMA_SLAVE, mask); 234 dma_cap_set(DMA_SLAVE, mask);
@@ -335,7 +338,8 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
335 dir = DMA_FROM_DEVICE; 338 dir = DMA_FROM_DEVICE;
336 } 339 }
337 340
338 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 341 if (!data->host_cookie)
342 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
339 343
340 /* 344 /*
341 * Use of DMA with scatter-gather is impossible. 345 * Use of DMA with scatter-gather is impossible.
@@ -353,7 +357,8 @@ static void mmci_dma_data_error(struct mmci_host *host)
353 dmaengine_terminate_all(host->dma_current); 357 dmaengine_terminate_all(host->dma_current);
354} 358}
355 359
356static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 360static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
361 struct mmci_host_next *next)
357{ 362{
358 struct variant_data *variant = host->variant; 363 struct variant_data *variant = host->variant;
359 struct dma_slave_config conf = { 364 struct dma_slave_config conf = {
@@ -364,13 +369,20 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
364 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 369 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
365 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 370 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
366 }; 371 };
367 struct mmc_data *data = host->data;
368 struct dma_chan *chan; 372 struct dma_chan *chan;
369 struct dma_device *device; 373 struct dma_device *device;
370 struct dma_async_tx_descriptor *desc; 374 struct dma_async_tx_descriptor *desc;
371 int nr_sg; 375 int nr_sg;
372 376
373 host->dma_current = NULL; 377 /* Check if next job is already prepared */
378 if (data->host_cookie && !next &&
379 host->dma_current && host->dma_desc_current)
380 return 0;
381
382 if (!next) {
383 host->dma_current = NULL;
384 host->dma_desc_current = NULL;
385 }
374 386
375 if (data->flags & MMC_DATA_READ) { 387 if (data->flags & MMC_DATA_READ) {
376 conf.direction = DMA_FROM_DEVICE; 388 conf.direction = DMA_FROM_DEVICE;
@@ -385,7 +397,7 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
385 return -EINVAL; 397 return -EINVAL;
386 398
387 /* If less than or equal to the fifo size, don't bother with DMA */ 399 /* If less than or equal to the fifo size, don't bother with DMA */
388 if (host->size <= variant->fifosize) 400 if (data->blksz * data->blocks <= variant->fifosize)
389 return -EINVAL; 401 return -EINVAL;
390 402
391 device = chan->device; 403 device = chan->device;
@@ -399,14 +411,38 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
399 if (!desc) 411 if (!desc)
400 goto unmap_exit; 412 goto unmap_exit;
401 413
402 /* Okay, go for it. */ 414 if (next) {
403 host->dma_current = chan; 415 next->dma_chan = chan;
416 next->dma_desc = desc;
417 } else {
418 host->dma_current = chan;
419 host->dma_desc_current = desc;
420 }
421
422 return 0;
404 423
424 unmap_exit:
425 if (!next)
426 dmaengine_terminate_all(chan);
427 dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
428 return -ENOMEM;
429}
430
431static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
432{
433 int ret;
434 struct mmc_data *data = host->data;
435
436 ret = mmci_dma_prep_data(host, host->data, NULL);
437 if (ret)
438 return ret;
439
440 /* Okay, go for it. */
405 dev_vdbg(mmc_dev(host->mmc), 441 dev_vdbg(mmc_dev(host->mmc),
406 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 442 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
407 data->sg_len, data->blksz, data->blocks, data->flags); 443 data->sg_len, data->blksz, data->blocks, data->flags);
408 dmaengine_submit(desc); 444 dmaengine_submit(host->dma_desc_current);
409 dma_async_issue_pending(chan); 445 dma_async_issue_pending(host->dma_current);
410 446
411 datactrl |= MCI_DPSM_DMAENABLE; 447 datactrl |= MCI_DPSM_DMAENABLE;
412 448
@@ -421,14 +457,90 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
421 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 457 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
422 host->base + MMCIMASK0); 458 host->base + MMCIMASK0);
423 return 0; 459 return 0;
460}
424 461
425unmap_exit: 462static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
426 dmaengine_terminate_all(chan); 463{
427 dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); 464 struct mmci_host_next *next = &host->next_data;
428 return -ENOMEM; 465
466 if (data->host_cookie && data->host_cookie != next->cookie) {
467 printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d"
468 " host->next_data.cookie %d\n",
469 __func__, data->host_cookie, host->next_data.cookie);
470 data->host_cookie = 0;
471 }
472
473 if (!data->host_cookie)
474 return;
475
476 host->dma_desc_current = next->dma_desc;
477 host->dma_current = next->dma_chan;
478
479 next->dma_desc = NULL;
480 next->dma_chan = NULL;
429} 481}
482
483static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
484 bool is_first_req)
485{
486 struct mmci_host *host = mmc_priv(mmc);
487 struct mmc_data *data = mrq->data;
488 struct mmci_host_next *nd = &host->next_data;
489
490 if (!data)
491 return;
492
493 if (data->host_cookie) {
494 data->host_cookie = 0;
495 return;
496 }
497
498 /* if config for dma */
499 if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
500 ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
501 if (mmci_dma_prep_data(host, data, nd))
502 data->host_cookie = 0;
503 else
504 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
505 }
506}
507
508static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
509 int err)
510{
511 struct mmci_host *host = mmc_priv(mmc);
512 struct mmc_data *data = mrq->data;
513 struct dma_chan *chan;
514 enum dma_data_direction dir;
515
516 if (!data)
517 return;
518
519 if (data->flags & MMC_DATA_READ) {
520 dir = DMA_FROM_DEVICE;
521 chan = host->dma_rx_channel;
522 } else {
523 dir = DMA_TO_DEVICE;
524 chan = host->dma_tx_channel;
525 }
526
527
528 /* if config for dma */
529 if (chan) {
530 if (err)
531 dmaengine_terminate_all(chan);
532 if (err || data->host_cookie)
533 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
534 data->sg_len, dir);
535 mrq->data->host_cookie = 0;
536 }
537}
538
430#else 539#else
431/* Blank functions if the DMA engine is not available */ 540/* Blank functions if the DMA engine is not available */
541static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
542{
543}
432static inline void mmci_dma_setup(struct mmci_host *host) 544static inline void mmci_dma_setup(struct mmci_host *host)
433{ 545{
434} 546}
@@ -449,6 +561,10 @@ static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datac
449{ 561{
450 return -ENOSYS; 562 return -ENOSYS;
451} 563}
564
565#define mmci_pre_request NULL
566#define mmci_post_request NULL
567
452#endif 568#endif
453 569
454static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 570static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
@@ -872,6 +988,9 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
872 988
873 host->mrq = mrq; 989 host->mrq = mrq;
874 990
991 if (mrq->data)
992 mmci_get_next_data(host, mrq->data);
993
875 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 994 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
876 mmci_start_data(host, mrq->data); 995 mmci_start_data(host, mrq->data);
877 996
@@ -986,6 +1105,8 @@ static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
986 1105
987static const struct mmc_host_ops mmci_ops = { 1106static const struct mmc_host_ops mmci_ops = {
988 .request = mmci_request, 1107 .request = mmci_request,
1108 .pre_req = mmci_pre_request,
1109 .post_req = mmci_post_request,
989 .set_ios = mmci_set_ios, 1110 .set_ios = mmci_set_ios,
990 .get_ro = mmci_get_ro, 1111 .get_ro = mmci_get_ro,
991 .get_cd = mmci_get_cd, 1112 .get_cd = mmci_get_cd,
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 2164e8c6476c..79e4143ab9df 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -166,6 +166,12 @@ struct clk;
166struct variant_data; 166struct variant_data;
167struct dma_chan; 167struct dma_chan;
168 168
169struct mmci_host_next {
170 struct dma_async_tx_descriptor *dma_desc;
171 struct dma_chan *dma_chan;
172 s32 cookie;
173};
174
169struct mmci_host { 175struct mmci_host {
170 phys_addr_t phybase; 176 phys_addr_t phybase;
171 void __iomem *base; 177 void __iomem *base;
@@ -203,6 +209,8 @@ struct mmci_host {
203 struct dma_chan *dma_current; 209 struct dma_chan *dma_current;
204 struct dma_chan *dma_rx_channel; 210 struct dma_chan *dma_rx_channel;
205 struct dma_chan *dma_tx_channel; 211 struct dma_chan *dma_tx_channel;
212 struct dma_async_tx_descriptor *dma_desc_current;
213 struct mmci_host_next next_data;
206 214
207#define dma_inprogress(host) ((host)->dma_current) 215#define dma_inprogress(host) ((host)->dma_current)
208#else 216#else