aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatt Porter <mporter@ti.com>2012-08-22 21:09:35 -0400
committerChris Ball <cjb@laptop.org>2012-09-19 04:29:44 -0400
commit5413da811fbb11bc0482c92cbb8415073591d462 (patch)
tree2d9af011b0a6b44f1d0a4ae58c2f4c9549eec3af
parentd8e2ac330f65bcf47e8894fe5331a7e8ee019c06 (diff)
mmc: davinci_mmc: convert to DMA engine API
Removes use of the DaVinci EDMA private DMA API and replaces it with use of the DMA engine API. Signed-off-by: Matt Porter <mporter@ti.com> Tested-by: Koen Kooi <koen@dominion.thruhere.net> Signed-off-by: Chris Ball <cjb@laptop.org>
-rw-r--r--drivers/mmc/host/davinci_mmc.c271
1 files changed, 82 insertions, 189 deletions
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 7cf6c624bf73..c5e1eebcd588 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -30,11 +30,12 @@
30#include <linux/io.h> 30#include <linux/io.h>
31#include <linux/irq.h> 31#include <linux/irq.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/dmaengine.h>
33#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35#include <linux/edma.h>
34#include <linux/mmc/mmc.h> 36#include <linux/mmc/mmc.h>
35 37
36#include <mach/mmc.h> 38#include <mach/mmc.h>
37#include <mach/edma.h>
38 39
39/* 40/*
40 * Register Definitions 41 * Register Definitions
@@ -200,21 +201,13 @@ struct mmc_davinci_host {
200 u32 bytes_left; 201 u32 bytes_left;
201 202
202 u32 rxdma, txdma; 203 u32 rxdma, txdma;
204 struct dma_chan *dma_tx;
205 struct dma_chan *dma_rx;
203 bool use_dma; 206 bool use_dma;
204 bool do_dma; 207 bool do_dma;
205 bool sdio_int; 208 bool sdio_int;
206 bool active_request; 209 bool active_request;
207 210
208 /* Scatterlist DMA uses one or more parameter RAM entries:
209 * the main one (associated with rxdma or txdma) plus zero or
210 * more links. The entries for a given transfer differ only
211 * by memory buffer (address, length) and link field.
212 */
213 struct edmacc_param tx_template;
214 struct edmacc_param rx_template;
215 unsigned n_link;
216 u32 links[MAX_NR_SG - 1];
217
218 /* For PIO we walk scatterlists one segment at a time. */ 211 /* For PIO we walk scatterlists one segment at a time. */
219 unsigned int sg_len; 212 unsigned int sg_len;
220 struct scatterlist *sg; 213 struct scatterlist *sg;
@@ -410,153 +403,74 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host,
410 403
411static void davinci_abort_dma(struct mmc_davinci_host *host) 404static void davinci_abort_dma(struct mmc_davinci_host *host)
412{ 405{
413 int sync_dev; 406 struct dma_chan *sync_dev;
414 407
415 if (host->data_dir == DAVINCI_MMC_DATADIR_READ) 408 if (host->data_dir == DAVINCI_MMC_DATADIR_READ)
416 sync_dev = host->rxdma; 409 sync_dev = host->dma_rx;
417 else 410 else
418 sync_dev = host->txdma; 411 sync_dev = host->dma_tx;
419
420 edma_stop(sync_dev);
421 edma_clean_channel(sync_dev);
422}
423
424static void
425mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data);
426
427static void mmc_davinci_dma_cb(unsigned channel, u16 ch_status, void *data)
428{
429 if (DMA_COMPLETE != ch_status) {
430 struct mmc_davinci_host *host = data;
431
432 /* Currently means: DMA Event Missed, or "null" transfer
433 * request was seen. In the future, TC errors (like bad
434 * addresses) might be presented too.
435 */
436 dev_warn(mmc_dev(host->mmc), "DMA %s error\n",
437 (host->data->flags & MMC_DATA_WRITE)
438 ? "write" : "read");
439 host->data->error = -EIO;
440 mmc_davinci_xfer_done(host, host->data);
441 }
442}
443
444/* Set up tx or rx template, to be modified and updated later */
445static void __init mmc_davinci_dma_setup(struct mmc_davinci_host *host,
446 bool tx, struct edmacc_param *template)
447{
448 unsigned sync_dev;
449 const u16 acnt = 4;
450 const u16 bcnt = rw_threshold >> 2;
451 const u16 ccnt = 0;
452 u32 src_port = 0;
453 u32 dst_port = 0;
454 s16 src_bidx, dst_bidx;
455 s16 src_cidx, dst_cidx;
456
457 /*
458 * A-B Sync transfer: each DMA request is for one "frame" of
459 * rw_threshold bytes, broken into "acnt"-size chunks repeated
460 * "bcnt" times. Each segment needs "ccnt" such frames; since
461 * we tell the block layer our mmc->max_seg_size limit, we can
462 * trust (later) that it's within bounds.
463 *
464 * The FIFOs are read/written in 4-byte chunks (acnt == 4) and
465 * EDMA will optimize memory operations to use larger bursts.
466 */
467 if (tx) {
468 sync_dev = host->txdma;
469
470 /* src_prt, ccnt, and link to be set up later */
471 src_bidx = acnt;
472 src_cidx = acnt * bcnt;
473
474 dst_port = host->mem_res->start + DAVINCI_MMCDXR;
475 dst_bidx = 0;
476 dst_cidx = 0;
477 } else {
478 sync_dev = host->rxdma;
479
480 src_port = host->mem_res->start + DAVINCI_MMCDRR;
481 src_bidx = 0;
482 src_cidx = 0;
483
484 /* dst_prt, ccnt, and link to be set up later */
485 dst_bidx = acnt;
486 dst_cidx = acnt * bcnt;
487 }
488
489 /*
490 * We can't use FIFO mode for the FIFOs because MMC FIFO addresses
491 * are not 256-bit (32-byte) aligned. So we use INCR, and the W8BIT
492 * parameter is ignored.
493 */
494 edma_set_src(sync_dev, src_port, INCR, W8BIT);
495 edma_set_dest(sync_dev, dst_port, INCR, W8BIT);
496 412
497 edma_set_src_index(sync_dev, src_bidx, src_cidx); 413 dmaengine_terminate_all(sync_dev);
498 edma_set_dest_index(sync_dev, dst_bidx, dst_cidx);
499
500 edma_set_transfer_params(sync_dev, acnt, bcnt, ccnt, 8, ABSYNC);
501
502 edma_read_slot(sync_dev, template);
503
504 /* don't bother with irqs or chaining */
505 template->opt |= EDMA_CHAN_SLOT(sync_dev) << 12;
506} 414}
507 415
508static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host, 416static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
509 struct mmc_data *data) 417 struct mmc_data *data)
510{ 418{
511 struct edmacc_param *template; 419 struct dma_chan *chan;
512 int channel, slot; 420 struct dma_async_tx_descriptor *desc;
513 unsigned link; 421 int ret = 0;
514 struct scatterlist *sg;
515 unsigned sg_len;
516 unsigned bytes_left = host->bytes_left;
517 const unsigned shift = ffs(rw_threshold) - 1;
518 422
519 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 423 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
520 template = &host->tx_template; 424 struct dma_slave_config dma_tx_conf = {
521 channel = host->txdma; 425 .direction = DMA_MEM_TO_DEV,
426 .dst_addr = host->mem_res->start + DAVINCI_MMCDXR,
427 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
428 .dst_maxburst =
429 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
430 };
431 chan = host->dma_tx;
432 dmaengine_slave_config(host->dma_tx, &dma_tx_conf);
433
434 desc = dmaengine_prep_slave_sg(host->dma_tx,
435 data->sg,
436 host->sg_len,
437 DMA_MEM_TO_DEV,
438 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
439 if (!desc) {
440 dev_dbg(mmc_dev(host->mmc),
441 "failed to allocate DMA TX descriptor");
442 ret = -1;
443 goto out;
444 }
522 } else { 445 } else {
523 template = &host->rx_template; 446 struct dma_slave_config dma_rx_conf = {
524 channel = host->rxdma; 447 .direction = DMA_DEV_TO_MEM,
525 } 448 .src_addr = host->mem_res->start + DAVINCI_MMCDRR,
526 449 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
527 /* We know sg_len and ccnt will never be out of range because 450 .src_maxburst =
528 * we told the mmc layer which in turn tells the block layer 451 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
529 * to ensure that it only hands us one scatterlist segment 452 };
530 * per EDMA PARAM entry. Update the PARAM 453 chan = host->dma_rx;
531 * entries needed for each segment of this scatterlist. 454 dmaengine_slave_config(host->dma_rx, &dma_rx_conf);
532 */ 455
533 for (slot = channel, link = 0, sg = data->sg, sg_len = host->sg_len; 456 desc = dmaengine_prep_slave_sg(host->dma_rx,
534 sg_len-- != 0 && bytes_left; 457 data->sg,
535 sg = sg_next(sg), slot = host->links[link++]) { 458 host->sg_len,
536 u32 buf = sg_dma_address(sg); 459 DMA_DEV_TO_MEM,
537 unsigned count = sg_dma_len(sg); 460 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
538 461 if (!desc) {
539 template->link_bcntrld = sg_len 462 dev_dbg(mmc_dev(host->mmc),
540 ? (EDMA_CHAN_SLOT(host->links[link]) << 5) 463 "failed to allocate DMA RX descriptor");
541 : 0xffff; 464 ret = -1;
542 465 goto out;
543 if (count > bytes_left) 466 }
544 count = bytes_left;
545 bytes_left -= count;
546
547 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
548 template->src = buf;
549 else
550 template->dst = buf;
551 template->ccnt = count >> shift;
552
553 edma_write_slot(slot, template);
554 } 467 }
555 468
556 if (host->version == MMC_CTLR_VERSION_2) 469 dmaengine_submit(desc);
557 edma_clear_event(channel); 470 dma_async_issue_pending(chan);
558 471
559 edma_start(channel); 472out:
473 return ret;
560} 474}
561 475
562static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, 476static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
@@ -564,6 +478,7 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
564{ 478{
565 int i; 479 int i;
566 int mask = rw_threshold - 1; 480 int mask = rw_threshold - 1;
481 int ret = 0;
567 482
568 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 483 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
569 ((data->flags & MMC_DATA_WRITE) 484 ((data->flags & MMC_DATA_WRITE)
@@ -583,70 +498,48 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
583 } 498 }
584 499
585 host->do_dma = 1; 500 host->do_dma = 1;
586 mmc_davinci_send_dma_request(host, data); 501 ret = mmc_davinci_send_dma_request(host, data);
587 502
588 return 0; 503 return ret;
589} 504}
590 505
591static void __init_or_module 506static void __init_or_module
592davinci_release_dma_channels(struct mmc_davinci_host *host) 507davinci_release_dma_channels(struct mmc_davinci_host *host)
593{ 508{
594 unsigned i;
595
596 if (!host->use_dma) 509 if (!host->use_dma)
597 return; 510 return;
598 511
599 for (i = 0; i < host->n_link; i++) 512 dma_release_channel(host->dma_tx);
600 edma_free_slot(host->links[i]); 513 dma_release_channel(host->dma_rx);
601
602 edma_free_channel(host->txdma);
603 edma_free_channel(host->rxdma);
604} 514}
605 515
606static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) 516static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
607{ 517{
608 u32 link_size; 518 int r;
609 int r, i; 519 dma_cap_mask_t mask;
610 520
611 /* Acquire master DMA write channel */ 521 dma_cap_zero(mask);
612 r = edma_alloc_channel(host->txdma, mmc_davinci_dma_cb, host, 522 dma_cap_set(DMA_SLAVE, mask);
613 EVENTQ_DEFAULT); 523
614 if (r < 0) { 524 host->dma_tx =
615 dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n", 525 dma_request_channel(mask, edma_filter_fn, &host->txdma);
616 "tx", r); 526 if (!host->dma_tx) {
617 return r; 527 dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n");
618 } 528 return -ENODEV;
619 mmc_davinci_dma_setup(host, true, &host->tx_template);
620
621 /* Acquire master DMA read channel */
622 r = edma_alloc_channel(host->rxdma, mmc_davinci_dma_cb, host,
623 EVENTQ_DEFAULT);
624 if (r < 0) {
625 dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n",
626 "rx", r);
627 goto free_master_write;
628 } 529 }
629 mmc_davinci_dma_setup(host, false, &host->rx_template);
630 530
631 /* Allocate parameter RAM slots, which will later be bound to a 531 host->dma_rx =
632 * channel as needed to handle a scatterlist. 532 dma_request_channel(mask, edma_filter_fn, &host->rxdma);
633 */ 533 if (!host->dma_rx) {
634 link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links)); 534 dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n");
635 for (i = 0; i < link_size; i++) { 535 r = -ENODEV;
636 r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY); 536 goto free_master_write;
637 if (r < 0) {
638 dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n",
639 r);
640 break;
641 }
642 host->links[i] = r;
643 } 537 }
644 host->n_link = i;
645 538
646 return 0; 539 return 0;
647 540
648free_master_write: 541free_master_write:
649 edma_free_channel(host->txdma); 542 dma_release_channel(host->dma_tx);
650 543
651 return r; 544 return r;
652} 545}
@@ -1359,7 +1252,7 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1359 * Each hw_seg uses one EDMA parameter RAM slot, always one 1252 * Each hw_seg uses one EDMA parameter RAM slot, always one
1360 * channel and then usually some linked slots. 1253 * channel and then usually some linked slots.
1361 */ 1254 */
1362 mmc->max_segs = 1 + host->n_link; 1255 mmc->max_segs = MAX_NR_SG;
1363 1256
1364 /* EDMA limit per hw segment (one or two MBytes) */ 1257 /* EDMA limit per hw segment (one or two MBytes) */
1365 mmc->max_seg_size = MAX_CCNT * rw_threshold; 1258 mmc->max_seg_size = MAX_CCNT * rw_threshold;