aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorJuha Yrjola <juha.yrjola@solidboot.com>2008-11-14 08:22:00 -0500
committerPierre Ossman <drzeus@drzeus.cx>2009-03-24 16:30:05 -0400
commit0ccd76d4c236a0cf71efe51848f15c3f5d951da7 (patch)
tree856a7855ab20a51da3fbb389f70401824379ae64 /drivers/mmc
parent4a694dc915c9a223044ce21fc0d99e63facd1d64 (diff)
omap_hsmmc: Implement scatter-gather emulation
Instead of using the bounce buffer, using scatter-gather emulation (as in the OMAP1/2 MMC driver) removes the need of one extra memory copy and improves performance. Signed-off-by: Juha Yrjola <juha.yrjola@solidboot.com> Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/host/omap_hsmmc.c172
1 files changed, 95 insertions, 77 deletions
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 1f84bd4a3b27..483e591f7bbb 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -100,9 +100,6 @@
100#define OMAP_MMC1_DEVID 0 100#define OMAP_MMC1_DEVID 0
101#define OMAP_MMC2_DEVID 1 101#define OMAP_MMC2_DEVID 1
102 102
103#define OMAP_MMC_DATADIR_NONE 0
104#define OMAP_MMC_DATADIR_READ 1
105#define OMAP_MMC_DATADIR_WRITE 2
106#define MMC_TIMEOUT_MS 20 103#define MMC_TIMEOUT_MS 20
107#define OMAP_MMC_MASTER_CLOCK 96000000 104#define OMAP_MMC_MASTER_CLOCK 96000000
108#define DRIVER_NAME "mmci-omap-hs" 105#define DRIVER_NAME "mmci-omap-hs"
@@ -138,16 +135,14 @@ struct mmc_omap_host {
138 resource_size_t mapbase; 135 resource_size_t mapbase;
139 unsigned int id; 136 unsigned int id;
140 unsigned int dma_len; 137 unsigned int dma_len;
141 unsigned int dma_dir; 138 unsigned int dma_sg_idx;
142 unsigned char bus_mode; 139 unsigned char bus_mode;
143 unsigned char datadir;
144 u32 *buffer; 140 u32 *buffer;
145 u32 bytesleft; 141 u32 bytesleft;
146 int suspended; 142 int suspended;
147 int irq; 143 int irq;
148 int carddetect; 144 int carddetect;
149 int use_dma, dma_ch; 145 int use_dma, dma_ch;
150 int initstr;
151 int slot_id; 146 int slot_id;
152 int dbclk_enabled; 147 int dbclk_enabled;
153 int response_busy; 148 int response_busy;
@@ -281,6 +276,15 @@ mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd,
281 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg); 276 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
282} 277}
283 278
279static int
280mmc_omap_get_dma_dir(struct mmc_omap_host *host, struct mmc_data *data)
281{
282 if (data->flags & MMC_DATA_WRITE)
283 return DMA_TO_DEVICE;
284 else
285 return DMA_FROM_DEVICE;
286}
287
284/* 288/*
285 * Notify the transfer complete to MMC core 289 * Notify the transfer complete to MMC core
286 */ 290 */
@@ -300,9 +304,7 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
300 304
301 if (host->use_dma && host->dma_ch != -1) 305 if (host->use_dma && host->dma_ch != -1)
302 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len, 306 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
303 host->dma_dir); 307 mmc_omap_get_dma_dir(host, data));
304
305 host->datadir = OMAP_MMC_DATADIR_NONE;
306 308
307 if (!data->error) 309 if (!data->error)
308 data->bytes_xfered += data->blocks * (data->blksz); 310 data->bytes_xfered += data->blocks * (data->blksz);
@@ -352,13 +354,12 @@ static void mmc_dma_cleanup(struct mmc_omap_host *host, int errno)
352 354
353 if (host->use_dma && host->dma_ch != -1) { 355 if (host->use_dma && host->dma_ch != -1) {
354 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len, 356 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
355 host->dma_dir); 357 mmc_omap_get_dma_dir(host, host->data));
356 omap_free_dma(host->dma_ch); 358 omap_free_dma(host->dma_ch);
357 host->dma_ch = -1; 359 host->dma_ch = -1;
358 up(&host->sem); 360 up(&host->sem);
359 } 361 }
360 host->data = NULL; 362 host->data = NULL;
361 host->datadir = OMAP_MMC_DATADIR_NONE;
362} 363}
363 364
364/* 365/*
@@ -592,6 +593,55 @@ static irqreturn_t omap_mmc_cd_handler(int irq, void *dev_id)
592 return IRQ_HANDLED; 593 return IRQ_HANDLED;
593} 594}
594 595
596static int mmc_omap_get_dma_sync_dev(struct mmc_omap_host *host,
597 struct mmc_data *data)
598{
599 int sync_dev;
600
601 if (data->flags & MMC_DATA_WRITE) {
602 if (host->id == OMAP_MMC1_DEVID)
603 sync_dev = OMAP24XX_DMA_MMC1_TX;
604 else
605 sync_dev = OMAP24XX_DMA_MMC2_TX;
606 } else {
607 if (host->id == OMAP_MMC1_DEVID)
608 sync_dev = OMAP24XX_DMA_MMC1_RX;
609 else
610 sync_dev = OMAP24XX_DMA_MMC2_RX;
611 }
612 return sync_dev;
613}
614
615static void mmc_omap_config_dma_params(struct mmc_omap_host *host,
616 struct mmc_data *data,
617 struct scatterlist *sgl)
618{
619 int blksz, nblk, dma_ch;
620
621 dma_ch = host->dma_ch;
622 if (data->flags & MMC_DATA_WRITE) {
623 omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
624 (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
625 omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
626 sg_dma_address(sgl), 0, 0);
627 } else {
628 omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
629 (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
630 omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
631 sg_dma_address(sgl), 0, 0);
632 }
633
634 blksz = host->data->blksz;
635 nblk = sg_dma_len(sgl) / blksz;
636
637 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
638 blksz / 4, nblk, OMAP_DMA_SYNC_FRAME,
639 mmc_omap_get_dma_sync_dev(host, data),
640 !(data->flags & MMC_DATA_WRITE));
641
642 omap_start_dma(dma_ch);
643}
644
595/* 645/*
596 * DMA call back function 646 * DMA call back function
597 */ 647 */
@@ -605,6 +655,14 @@ static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
605 if (host->dma_ch < 0) 655 if (host->dma_ch < 0)
606 return; 656 return;
607 657
658 host->dma_sg_idx++;
659 if (host->dma_sg_idx < host->dma_len) {
660 /* Fire up the next transfer. */
661 mmc_omap_config_dma_params(host, host->data,
662 host->data->sg + host->dma_sg_idx);
663 return;
664 }
665
608 omap_free_dma(host->dma_ch); 666 omap_free_dma(host->dma_ch);
609 host->dma_ch = -1; 667 host->dma_ch = -1;
610 /* 668 /*
@@ -615,38 +673,28 @@ static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
615} 673}
616 674
617/* 675/*
618 * Configure dma src and destination parameters
619 */
620static int mmc_omap_config_dma_param(int sync_dir, struct mmc_omap_host *host,
621 struct mmc_data *data)
622{
623 if (sync_dir == 0) {
624 omap_set_dma_dest_params(host->dma_ch, 0,
625 OMAP_DMA_AMODE_CONSTANT,
626 (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
627 omap_set_dma_src_params(host->dma_ch, 0,
628 OMAP_DMA_AMODE_POST_INC,
629 sg_dma_address(&data->sg[0]), 0, 0);
630 } else {
631 omap_set_dma_src_params(host->dma_ch, 0,
632 OMAP_DMA_AMODE_CONSTANT,
633 (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
634 omap_set_dma_dest_params(host->dma_ch, 0,
635 OMAP_DMA_AMODE_POST_INC,
636 sg_dma_address(&data->sg[0]), 0, 0);
637 }
638 return 0;
639}
640/*
641 * Routine to configure and start DMA for the MMC card 676 * Routine to configure and start DMA for the MMC card
642 */ 677 */
643static int 678static int
644mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req) 679mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req)
645{ 680{
646 int sync_dev, sync_dir = 0; 681 int dma_ch = 0, ret = 0, err = 1, i;
647 int dma_ch = 0, ret = 0, err = 1;
648 struct mmc_data *data = req->data; 682 struct mmc_data *data = req->data;
649 683
684 /* Sanity check: all the SG entries must be aligned by block size. */
685 for (i = 0; i < host->dma_len; i++) {
686 struct scatterlist *sgl;
687
688 sgl = data->sg + i;
689 if (sgl->length % data->blksz)
690 return -EINVAL;
691 }
692 if ((data->blksz % 4) != 0)
693 /* REVISIT: The MMC buffer increments only when MSB is written.
694 * Return error for blksz which is non multiple of four.
695 */
696 return -EINVAL;
697
650 /* 698 /*
651 * If for some reason the DMA transfer is still active, 699 * If for some reason the DMA transfer is still active,
652 * we wait for timeout period and free the dma 700 * we wait for timeout period and free the dma
@@ -665,49 +713,22 @@ mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req)
665 return err; 713 return err;
666 } 714 }
667 715
668 if (!(data->flags & MMC_DATA_WRITE)) { 716 ret = omap_request_dma(mmc_omap_get_dma_sync_dev(host, data), "MMC/SD",
669 host->dma_dir = DMA_FROM_DEVICE; 717 mmc_omap_dma_cb,host, &dma_ch);
670 if (host->id == OMAP_MMC1_DEVID)
671 sync_dev = OMAP24XX_DMA_MMC1_RX;
672 else
673 sync_dev = OMAP24XX_DMA_MMC2_RX;
674 } else {
675 host->dma_dir = DMA_TO_DEVICE;
676 if (host->id == OMAP_MMC1_DEVID)
677 sync_dev = OMAP24XX_DMA_MMC1_TX;
678 else
679 sync_dev = OMAP24XX_DMA_MMC2_TX;
680 }
681
682 ret = omap_request_dma(sync_dev, "MMC/SD", mmc_omap_dma_cb,
683 host, &dma_ch);
684 if (ret != 0) { 718 if (ret != 0) {
685 dev_dbg(mmc_dev(host->mmc), 719 dev_err(mmc_dev(host->mmc),
686 "%s: omap_request_dma() failed with %d\n", 720 "%s: omap_request_dma() failed with %d\n",
687 mmc_hostname(host->mmc), ret); 721 mmc_hostname(host->mmc), ret);
688 return ret; 722 return ret;
689 } 723 }
690 724
691 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, 725 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
692 data->sg_len, host->dma_dir); 726 data->sg_len, mmc_omap_get_dma_dir(host, data));
693 host->dma_ch = dma_ch; 727 host->dma_ch = dma_ch;
728 host->dma_sg_idx = 0;
694 729
695 if (!(data->flags & MMC_DATA_WRITE)) 730 mmc_omap_config_dma_params(host, data, data->sg);
696 mmc_omap_config_dma_param(1, host, data);
697 else
698 mmc_omap_config_dma_param(0, host, data);
699
700 if ((data->blksz % 4) == 0)
701 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
702 (data->blksz / 4), data->blocks, OMAP_DMA_SYNC_FRAME,
703 sync_dev, sync_dir);
704 else
705 /* REVISIT: The MMC buffer increments only when MSB is written.
706 * Return error for blksz which is non multiple of four.
707 */
708 return -EINVAL;
709 731
710 omap_start_dma(dma_ch);
711 return 0; 732 return 0;
712} 733}
713 734
@@ -757,7 +778,6 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
757 host->data = req->data; 778 host->data = req->data;
758 779
759 if (req->data == NULL) { 780 if (req->data == NULL) {
760 host->datadir = OMAP_MMC_DATADIR_NONE;
761 OMAP_HSMMC_WRITE(host->base, BLK, 0); 781 OMAP_HSMMC_WRITE(host->base, BLK, 0);
762 return 0; 782 return 0;
763 } 783 }
@@ -766,9 +786,6 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
766 | (req->data->blocks << 16)); 786 | (req->data->blocks << 16));
767 set_data_timeout(host, req); 787 set_data_timeout(host, req);
768 788
769 host->datadir = (req->data->flags & MMC_DATA_WRITE) ?
770 OMAP_MMC_DATADIR_WRITE : OMAP_MMC_DATADIR_READ;
771
772 if (host->use_dma) { 789 if (host->use_dma) {
773 ret = mmc_omap_start_dma_transfer(host, req); 790 ret = mmc_omap_start_dma_transfer(host, req);
774 if (ret != 0) { 791 if (ret != 0) {
@@ -1027,10 +1044,11 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
1027 else 1044 else
1028 host->dbclk_enabled = 1; 1045 host->dbclk_enabled = 1;
1029 1046
1030#ifdef CONFIG_MMC_BLOCK_BOUNCE 1047 /* Since we do only SG emulation, we can have as many segs
1031 mmc->max_phys_segs = 1; 1048 * as we want. */
1032 mmc->max_hw_segs = 1; 1049 mmc->max_phys_segs = 1024;
1033#endif 1050 mmc->max_hw_segs = 1024;
1051
1034 mmc->max_blk_size = 512; /* Block Length at max can be 1024 */ 1052 mmc->max_blk_size = 512; /* Block Length at max can be 1024 */
1035 mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */ 1053 mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */
1036 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1054 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;