aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-03-19 01:32:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-19 01:32:40 -0400
commita8c91da549f625d0600d5bd7e1831066b55edf0d (patch)
tree494738b095d7c96286c7b0d0d586c7b8fa594f5a /drivers/mmc
parent26b95cac5fddb2916e2cef76495073f9c37a7b54 (diff)
parentc07946a3350244d7c3d9bc1032325e04dd11575b (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc: (53 commits) mmc: dw_mmc: support mmc power control with regulator mmc: dw_mmc: fix suspend/resume operation mmc: dw_mmc: add quirks for unreliable card detect, and capabilities mmc: tmio: fix address in kunmap_atomic() calls mmc: core: reset card voltage after power off mmc: core: export function mmc_do_release_host() mmc: sdio: remember new card RCA when redetecting card mmc: dw_mmc: Remove set-but-unused variable. mmc: sdhci-esdhc-imx: add card detect on custom GPIO for mx25/35 mmc: sdhci-esdhc: broken card detection is not a default quirk mmc: sdhci-esdhc-imx: add write protect on custom GPIO on mx25/35 mmc: msm_sdcc: remove needless cache flush after dma_unmap_sg() mmc: sh_mmcif: support aggressive clock gating mmc: check if mmc cards < 2GB do sector addressing mmc: core: comment on why sdio_reset is done at init time mmc: dw_mmc: support DDR mode mmc: via-sdmmc: Remove set-but-unused variable. mmc: cb710: Return err value in cb710_wait_while_busy() mmc: sdhci-pci: Remove set-but-unused variable. mmc: mxs-mmc: add mmc host driver for i.MX23/28 ...
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/card/Kconfig3
-rw-r--r--drivers/mmc/card/block.c1
-rw-r--r--drivers/mmc/card/mmc_test.c271
-rw-r--r--drivers/mmc/core/Makefile3
-rw-r--r--drivers/mmc/core/core.c26
-rw-r--r--drivers/mmc/core/core.h2
-rw-r--r--drivers/mmc/core/host.c5
-rw-r--r--drivers/mmc/core/mmc.c86
-rw-r--r--drivers/mmc/core/quirks.c84
-rw-r--r--drivers/mmc/core/sd.c1
-rw-r--r--drivers/mmc/core/sdio.c9
-rw-r--r--drivers/mmc/host/Kconfig11
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/atmel-mci.c19
-rw-r--r--drivers/mmc/host/cb710-mmc.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c83
-rw-r--r--drivers/mmc/host/dw_mmc.h2
-rw-r--r--drivers/mmc/host/msm_sdcc.c8
-rw-r--r--drivers/mmc/host/mxcmmc.c181
-rw-r--r--drivers/mmc/host/mxs-mmc.c874
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c134
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h1
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c3
-rw-r--r--drivers/mmc/host/sdhci-pci.c12
-rw-r--r--drivers/mmc/host/sdhci-s3c.c3
-rw-r--r--drivers/mmc/host/sdhci-tegra.c6
-rw-r--r--drivers/mmc/host/sh_mmcif.c62
-rw-r--r--drivers/mmc/host/tmio_mmc.c96
-rw-r--r--drivers/mmc/host/via-sdmmc.c3
29 files changed, 1740 insertions, 252 deletions
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 2a876c4099cd..3b1f783bf924 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -58,12 +58,11 @@ config SDIO_UART
58 58
59config MMC_TEST 59config MMC_TEST
60 tristate "MMC host test driver" 60 tristate "MMC host test driver"
61 default n
62 help 61 help
63 Development driver that performs a series of reads and writes 62 Development driver that performs a series of reads and writes
64 to a memory card in order to expose certain well known bugs 63 to a memory card in order to expose certain well known bugs
65 in host controllers. The tests are executed by writing to the 64 in host controllers. The tests are executed by writing to the
66 "test" file in sysfs under each card. Note that whatever is 65 "test" file in debugfs under each card. Note that whatever is
67 on your card will be overwritten by these tests. 66 on your card will be overwritten by these tests.
68 67
69 This driver is only of interest to those developing or 68 This driver is only of interest to those developing or
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index bfc8a8ae55df..61d233a7c118 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -621,6 +621,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
621 md->disk->private_data = md; 621 md->disk->private_data = md;
622 md->disk->queue = md->queue.queue; 622 md->disk->queue = md->queue.queue;
623 md->disk->driverfs_dev = &card->dev; 623 md->disk->driverfs_dev = &card->dev;
624 set_disk_ro(md->disk, md->read_only);
624 625
625 /* 626 /*
626 * As discussed on lkml, GENHD_FL_REMOVABLE should: 627 * As discussed on lkml, GENHD_FL_REMOVABLE should:
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 21adc27f4132..5ec8eddfcf6e 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -88,6 +88,7 @@ struct mmc_test_area {
88 * @sectors: amount of sectors to check in one group 88 * @sectors: amount of sectors to check in one group
89 * @ts: time values of transfer 89 * @ts: time values of transfer
90 * @rate: calculated transfer rate 90 * @rate: calculated transfer rate
91 * @iops: I/O operations per second (times 100)
91 */ 92 */
92struct mmc_test_transfer_result { 93struct mmc_test_transfer_result {
93 struct list_head link; 94 struct list_head link;
@@ -95,6 +96,7 @@ struct mmc_test_transfer_result {
95 unsigned int sectors; 96 unsigned int sectors;
96 struct timespec ts; 97 struct timespec ts;
97 unsigned int rate; 98 unsigned int rate;
99 unsigned int iops;
98}; 100};
99 101
100/** 102/**
@@ -226,9 +228,10 @@ static int mmc_test_wait_busy(struct mmc_test_card *test)
226 228
227 if (!busy && mmc_test_busy(&cmd)) { 229 if (!busy && mmc_test_busy(&cmd)) {
228 busy = 1; 230 busy = 1;
229 printk(KERN_INFO "%s: Warning: Host did not " 231 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
230 "wait for busy state to end.\n", 232 printk(KERN_INFO "%s: Warning: Host did not "
231 mmc_hostname(test->card->host)); 233 "wait for busy state to end.\n",
234 mmc_hostname(test->card->host));
232 } 235 }
233 } while (mmc_test_busy(&cmd)); 236 } while (mmc_test_busy(&cmd));
234 237
@@ -494,7 +497,7 @@ static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
494 */ 497 */
495static void mmc_test_save_transfer_result(struct mmc_test_card *test, 498static void mmc_test_save_transfer_result(struct mmc_test_card *test,
496 unsigned int count, unsigned int sectors, struct timespec ts, 499 unsigned int count, unsigned int sectors, struct timespec ts,
497 unsigned int rate) 500 unsigned int rate, unsigned int iops)
498{ 501{
499 struct mmc_test_transfer_result *tr; 502 struct mmc_test_transfer_result *tr;
500 503
@@ -509,6 +512,7 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,
509 tr->sectors = sectors; 512 tr->sectors = sectors;
510 tr->ts = ts; 513 tr->ts = ts;
511 tr->rate = rate; 514 tr->rate = rate;
515 tr->iops = iops;
512 516
513 list_add_tail(&tr->link, &test->gr->tr_lst); 517 list_add_tail(&tr->link, &test->gr->tr_lst);
514} 518}
@@ -519,20 +523,22 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,
519static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes, 523static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
520 struct timespec *ts1, struct timespec *ts2) 524 struct timespec *ts1, struct timespec *ts2)
521{ 525{
522 unsigned int rate, sectors = bytes >> 9; 526 unsigned int rate, iops, sectors = bytes >> 9;
523 struct timespec ts; 527 struct timespec ts;
524 528
525 ts = timespec_sub(*ts2, *ts1); 529 ts = timespec_sub(*ts2, *ts1);
526 530
527 rate = mmc_test_rate(bytes, &ts); 531 rate = mmc_test_rate(bytes, &ts);
532 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
528 533
529 printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu " 534 printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
530 "seconds (%u kB/s, %u KiB/s)\n", 535 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
531 mmc_hostname(test->card->host), sectors, sectors >> 1, 536 mmc_hostname(test->card->host), sectors, sectors >> 1,
532 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec, 537 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
533 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024); 538 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
539 iops / 100, iops % 100);
534 540
535 mmc_test_save_transfer_result(test, 1, sectors, ts, rate); 541 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
536} 542}
537 543
538/* 544/*
@@ -542,22 +548,24 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
542 unsigned int count, struct timespec *ts1, 548 unsigned int count, struct timespec *ts1,
543 struct timespec *ts2) 549 struct timespec *ts2)
544{ 550{
545 unsigned int rate, sectors = bytes >> 9; 551 unsigned int rate, iops, sectors = bytes >> 9;
546 uint64_t tot = bytes * count; 552 uint64_t tot = bytes * count;
547 struct timespec ts; 553 struct timespec ts;
548 554
549 ts = timespec_sub(*ts2, *ts1); 555 ts = timespec_sub(*ts2, *ts1);
550 556
551 rate = mmc_test_rate(tot, &ts); 557 rate = mmc_test_rate(tot, &ts);
558 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
552 559
553 printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " 560 printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
554 "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n", 561 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
562 "%u.%02u IOPS)\n",
555 mmc_hostname(test->card->host), count, sectors, count, 563 mmc_hostname(test->card->host), count, sectors, count,
556 sectors >> 1, (sectors & 1 ? ".5" : ""), 564 sectors >> 1, (sectors & 1 ? ".5" : ""),
557 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec, 565 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
558 rate / 1000, rate / 1024); 566 rate / 1000, rate / 1024, iops / 100, iops % 100);
559 567
560 mmc_test_save_transfer_result(test, count, sectors, ts, rate); 568 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
561} 569}
562 570
563/* 571/*
@@ -1425,28 +1433,29 @@ static int mmc_test_area_cleanup(struct mmc_test_card *test)
1425} 1433}
1426 1434
1427/* 1435/*
1428 * Initialize an area for testing large transfers. The size of the area is the 1436 * Initialize an area for testing large transfers. The test area is set to the
1429 * preferred erase size which is a good size for optimal transfer speed. Note 1437 * middle of the card because cards may have different charateristics at the
1430 * that is typically 4MiB for modern cards. The test area is set to the middle 1438 * front (for FAT file system optimization). Optionally, the area is erased
1431 * of the card because cards may have different charateristics at the front 1439 * (if the card supports it) which may improve write performance. Optionally,
1432 * (for FAT file system optimization). Optionally, the area is erased (if the 1440 * the area is filled with data for subsequent read tests.
1433 * card supports it) which may improve write performance. Optionally, the area
1434 * is filled with data for subsequent read tests.
1435 */ 1441 */
1436static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill) 1442static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1437{ 1443{
1438 struct mmc_test_area *t = &test->area; 1444 struct mmc_test_area *t = &test->area;
1439 unsigned long min_sz = 64 * 1024; 1445 unsigned long min_sz = 64 * 1024, sz;
1440 int ret; 1446 int ret;
1441 1447
1442 ret = mmc_test_set_blksize(test, 512); 1448 ret = mmc_test_set_blksize(test, 512);
1443 if (ret) 1449 if (ret)
1444 return ret; 1450 return ret;
1445 1451
1446 if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9) 1452 /* Make the test area size about 4MiB */
1447 t->max_sz = TEST_AREA_MAX_SIZE; 1453 sz = (unsigned long)test->card->pref_erase << 9;
1448 else 1454 t->max_sz = sz;
1449 t->max_sz = (unsigned long)test->card->pref_erase << 9; 1455 while (t->max_sz < 4 * 1024 * 1024)
1456 t->max_sz += sz;
1457 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1458 t->max_sz -= sz;
1450 1459
1451 t->max_segs = test->card->host->max_segs; 1460 t->max_segs = test->card->host->max_segs;
1452 t->max_seg_sz = test->card->host->max_seg_size; 1461 t->max_seg_sz = test->card->host->max_seg_size;
@@ -1766,6 +1775,188 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1766 return 0; 1775 return 0;
1767} 1776}
1768 1777
1778static unsigned int rnd_next = 1;
1779
1780static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1781{
1782 uint64_t r;
1783
1784 rnd_next = rnd_next * 1103515245 + 12345;
1785 r = (rnd_next >> 16) & 0x7fff;
1786 return (r * rnd_cnt) >> 15;
1787}
1788
1789static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1790 unsigned long sz)
1791{
1792 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1793 unsigned int ssz;
1794 struct timespec ts1, ts2, ts;
1795 int ret;
1796
1797 ssz = sz >> 9;
1798
1799 rnd_addr = mmc_test_capacity(test->card) / 4;
1800 range1 = rnd_addr / test->card->pref_erase;
1801 range2 = range1 / ssz;
1802
1803 getnstimeofday(&ts1);
1804 for (cnt = 0; cnt < UINT_MAX; cnt++) {
1805 getnstimeofday(&ts2);
1806 ts = timespec_sub(ts2, ts1);
1807 if (ts.tv_sec >= 10)
1808 break;
1809 ea = mmc_test_rnd_num(range1);
1810 if (ea == last_ea)
1811 ea -= 1;
1812 last_ea = ea;
1813 dev_addr = rnd_addr + test->card->pref_erase * ea +
1814 ssz * mmc_test_rnd_num(range2);
1815 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1816 if (ret)
1817 return ret;
1818 }
1819 if (print)
1820 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1821 return 0;
1822}
1823
1824static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1825{
1826 unsigned int next;
1827 unsigned long sz;
1828 int ret;
1829
1830 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1831 /*
1832 * When writing, try to get more consistent results by running
1833 * the test twice with exactly the same I/O but outputting the
1834 * results only for the 2nd run.
1835 */
1836 if (write) {
1837 next = rnd_next;
1838 ret = mmc_test_rnd_perf(test, write, 0, sz);
1839 if (ret)
1840 return ret;
1841 rnd_next = next;
1842 }
1843 ret = mmc_test_rnd_perf(test, write, 1, sz);
1844 if (ret)
1845 return ret;
1846 }
1847 sz = test->area.max_tfr;
1848 if (write) {
1849 next = rnd_next;
1850 ret = mmc_test_rnd_perf(test, write, 0, sz);
1851 if (ret)
1852 return ret;
1853 rnd_next = next;
1854 }
1855 return mmc_test_rnd_perf(test, write, 1, sz);
1856}
1857
1858/*
1859 * Random read performance by transfer size.
1860 */
1861static int mmc_test_random_read_perf(struct mmc_test_card *test)
1862{
1863 return mmc_test_random_perf(test, 0);
1864}
1865
1866/*
1867 * Random write performance by transfer size.
1868 */
1869static int mmc_test_random_write_perf(struct mmc_test_card *test)
1870{
1871 return mmc_test_random_perf(test, 1);
1872}
1873
1874static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
1875 unsigned int tot_sz, int max_scatter)
1876{
1877 unsigned int dev_addr, i, cnt, sz, ssz;
1878 struct timespec ts1, ts2, ts;
1879 int ret;
1880
1881 sz = test->area.max_tfr;
1882 /*
1883 * In the case of a maximally scattered transfer, the maximum transfer
1884 * size is further limited by using PAGE_SIZE segments.
1885 */
1886 if (max_scatter) {
1887 struct mmc_test_area *t = &test->area;
1888 unsigned long max_tfr;
1889
1890 if (t->max_seg_sz >= PAGE_SIZE)
1891 max_tfr = t->max_segs * PAGE_SIZE;
1892 else
1893 max_tfr = t->max_segs * t->max_seg_sz;
1894 if (sz > max_tfr)
1895 sz = max_tfr;
1896 }
1897
1898 ssz = sz >> 9;
1899 dev_addr = mmc_test_capacity(test->card) / 4;
1900 if (tot_sz > dev_addr << 9)
1901 tot_sz = dev_addr << 9;
1902 cnt = tot_sz / sz;
1903 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
1904
1905 getnstimeofday(&ts1);
1906 for (i = 0; i < cnt; i++) {
1907 ret = mmc_test_area_io(test, sz, dev_addr, write,
1908 max_scatter, 0);
1909 if (ret)
1910 return ret;
1911 dev_addr += ssz;
1912 }
1913 getnstimeofday(&ts2);
1914
1915 ts = timespec_sub(ts2, ts1);
1916 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1917
1918 return 0;
1919}
1920
1921static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
1922{
1923 int ret, i;
1924
1925 for (i = 0; i < 10; i++) {
1926 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
1927 if (ret)
1928 return ret;
1929 }
1930 for (i = 0; i < 5; i++) {
1931 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
1932 if (ret)
1933 return ret;
1934 }
1935 for (i = 0; i < 3; i++) {
1936 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
1937 if (ret)
1938 return ret;
1939 }
1940
1941 return ret;
1942}
1943
1944/*
1945 * Large sequential read performance.
1946 */
1947static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
1948{
1949 return mmc_test_large_seq_perf(test, 0);
1950}
1951
1952/*
1953 * Large sequential write performance.
1954 */
1955static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
1956{
1957 return mmc_test_large_seq_perf(test, 1);
1958}
1959
1769static const struct mmc_test_case mmc_test_cases[] = { 1960static const struct mmc_test_case mmc_test_cases[] = {
1770 { 1961 {
1771 .name = "Basic write (no data verification)", 1962 .name = "Basic write (no data verification)",
@@ -2005,6 +2196,34 @@ static const struct mmc_test_case mmc_test_cases[] = {
2005 .cleanup = mmc_test_area_cleanup, 2196 .cleanup = mmc_test_area_cleanup,
2006 }, 2197 },
2007 2198
2199 {
2200 .name = "Random read performance by transfer size",
2201 .prepare = mmc_test_area_prepare,
2202 .run = mmc_test_random_read_perf,
2203 .cleanup = mmc_test_area_cleanup,
2204 },
2205
2206 {
2207 .name = "Random write performance by transfer size",
2208 .prepare = mmc_test_area_prepare,
2209 .run = mmc_test_random_write_perf,
2210 .cleanup = mmc_test_area_cleanup,
2211 },
2212
2213 {
2214 .name = "Large sequential read into scattered pages",
2215 .prepare = mmc_test_area_prepare,
2216 .run = mmc_test_large_seq_read_perf,
2217 .cleanup = mmc_test_area_cleanup,
2218 },
2219
2220 {
2221 .name = "Large sequential write from scattered pages",
2222 .prepare = mmc_test_area_prepare,
2223 .run = mmc_test_large_seq_write_perf,
2224 .cleanup = mmc_test_area_cleanup,
2225 },
2226
2008}; 2227};
2009 2228
2010static DEFINE_MUTEX(mmc_test_lock); 2229static DEFINE_MUTEX(mmc_test_lock);
@@ -2148,11 +2367,11 @@ static int mtf_test_show(struct seq_file *sf, void *data)
2148 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result); 2367 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
2149 2368
2150 list_for_each_entry(tr, &gr->tr_lst, link) { 2369 list_for_each_entry(tr, &gr->tr_lst, link) {
2151 seq_printf(sf, "%u %d %lu.%09lu %u\n", 2370 seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
2152 tr->count, tr->sectors, 2371 tr->count, tr->sectors,
2153 (unsigned long)tr->ts.tv_sec, 2372 (unsigned long)tr->ts.tv_sec,
2154 (unsigned long)tr->ts.tv_nsec, 2373 (unsigned long)tr->ts.tv_nsec,
2155 tr->rate); 2374 tr->rate, tr->iops / 100, tr->iops % 100);
2156 } 2375 }
2157 } 2376 }
2158 2377
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 86b479119332..639501970b41 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_MMC) += mmc_core.o
6mmc_core-y := core.o bus.o host.o \ 6mmc_core-y := core.o bus.o host.o \
7 mmc.o mmc_ops.o sd.o sd_ops.o \ 7 mmc.o mmc_ops.o sd.o sd_ops.o \
8 sdio.o sdio_ops.o sdio_bus.o \ 8 sdio.o sdio_ops.o sdio_bus.o \
9 sdio_cis.o sdio_io.o sdio_irq.o 9 sdio_cis.o sdio_io.o sdio_irq.o \
10 quirks.o
10 11
11mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o 12mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 150b5f3cd401..1f453acc8682 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -167,8 +167,6 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
167 167
168 WARN_ON(!host->claimed); 168 WARN_ON(!host->claimed);
169 169
170 led_trigger_event(host->led, LED_FULL);
171
172 mrq->cmd->error = 0; 170 mrq->cmd->error = 0;
173 mrq->cmd->mrq = mrq; 171 mrq->cmd->mrq = mrq;
174 if (mrq->data) { 172 if (mrq->data) {
@@ -194,6 +192,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
194 } 192 }
195 } 193 }
196 mmc_host_clk_ungate(host); 194 mmc_host_clk_ungate(host);
195 led_trigger_event(host->led, LED_FULL);
197 host->ops->request(host, mrq); 196 host->ops->request(host, mrq);
198} 197}
199 198
@@ -528,7 +527,14 @@ int mmc_try_claim_host(struct mmc_host *host)
528} 527}
529EXPORT_SYMBOL(mmc_try_claim_host); 528EXPORT_SYMBOL(mmc_try_claim_host);
530 529
531static void mmc_do_release_host(struct mmc_host *host) 530/**
531 * mmc_do_release_host - release a claimed host
532 * @host: mmc host to release
533 *
534 * If you successfully claimed a host, this function will
535 * release it again.
536 */
537void mmc_do_release_host(struct mmc_host *host)
532{ 538{
533 unsigned long flags; 539 unsigned long flags;
534 540
@@ -543,6 +549,7 @@ static void mmc_do_release_host(struct mmc_host *host)
543 wake_up(&host->wq); 549 wake_up(&host->wq);
544 } 550 }
545} 551}
552EXPORT_SYMBOL(mmc_do_release_host);
546 553
547void mmc_host_deeper_disable(struct work_struct *work) 554void mmc_host_deeper_disable(struct work_struct *work)
548{ 555{
@@ -1002,6 +1009,13 @@ static void mmc_power_off(struct mmc_host *host)
1002{ 1009{
1003 host->ios.clock = 0; 1010 host->ios.clock = 0;
1004 host->ios.vdd = 0; 1011 host->ios.vdd = 0;
1012
1013 /*
1014 * Reset ocr mask to be the highest possible voltage supported for
1015 * this mmc host. This value will be used at next power up.
1016 */
1017 host->ocr = 1 << (fls(host->ocr_avail) - 1);
1018
1005 if (!mmc_host_is_spi(host)) { 1019 if (!mmc_host_is_spi(host)) {
1006 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1020 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1007 host->ios.chip_select = MMC_CS_DONTCARE; 1021 host->ios.chip_select = MMC_CS_DONTCARE;
@@ -1495,6 +1509,12 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
1495 mmc_hostname(host), __func__, host->f_init); 1509 mmc_hostname(host), __func__, host->f_init);
1496#endif 1510#endif
1497 mmc_power_up(host); 1511 mmc_power_up(host);
1512
1513 /*
1514 * sdio_reset sends CMD52 to reset card. Since we do not know
1515 * if the card is being re-initialized, just send it. CMD52
1516 * should be ignored by SD/eMMC cards.
1517 */
1498 sdio_reset(host); 1518 sdio_reset(host);
1499 mmc_go_idle(host); 1519 mmc_go_idle(host);
1500 1520
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index ca1fdde29df6..20b1c0831eac 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -61,6 +61,8 @@ int mmc_attach_mmc(struct mmc_host *host);
61int mmc_attach_sd(struct mmc_host *host); 61int mmc_attach_sd(struct mmc_host *host);
62int mmc_attach_sdio(struct mmc_host *host); 62int mmc_attach_sdio(struct mmc_host *host);
63 63
64void mmc_fixup_device(struct mmc_card *card);
65
64/* Module parameters */ 66/* Module parameters */
65extern int use_spi_crc; 67extern int use_spi_crc;
66 68
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index b3ac6c5bc5c6..461e6a17fb90 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -160,10 +160,7 @@ static bool mmc_host_may_gate_card(struct mmc_card *card)
160 * gate the clock, because there is somebody out there that may still 160 * gate the clock, because there is somebody out there that may still
161 * be using it. 161 * be using it.
162 */ 162 */
163 if (mmc_card_sdio(card)) 163 return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING);
164 return false;
165
166 return true;
167} 164}
168 165
169/** 166/**
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 16006ef153fe..14e95f39a7bf 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -302,6 +302,44 @@ static int mmc_read_ext_csd(struct mmc_card *card)
302 } 302 }
303 303
304 if (card->ext_csd.rev >= 4) { 304 if (card->ext_csd.rev >= 4) {
305 /*
306 * Enhanced area feature support -- check whether the eMMC
307 * card has the Enhanced area enabled. If so, export enhanced
308 * area offset and size to user by adding sysfs interface.
309 */
310 if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
311 (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
312 u8 hc_erase_grp_sz =
313 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
314 u8 hc_wp_grp_sz =
315 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
316
317 card->ext_csd.enhanced_area_en = 1;
318 /*
319 * calculate the enhanced data area offset, in bytes
320 */
321 card->ext_csd.enhanced_area_offset =
322 (ext_csd[139] << 24) + (ext_csd[138] << 16) +
323 (ext_csd[137] << 8) + ext_csd[136];
324 if (mmc_card_blockaddr(card))
325 card->ext_csd.enhanced_area_offset <<= 9;
326 /*
327 * calculate the enhanced data area size, in kilobytes
328 */
329 card->ext_csd.enhanced_area_size =
330 (ext_csd[142] << 16) + (ext_csd[141] << 8) +
331 ext_csd[140];
332 card->ext_csd.enhanced_area_size *=
333 (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
334 card->ext_csd.enhanced_area_size <<= 9;
335 } else {
336 /*
337 * If the enhanced area is not enabled, disable these
338 * device attributes.
339 */
340 card->ext_csd.enhanced_area_offset = -EINVAL;
341 card->ext_csd.enhanced_area_size = -EINVAL;
342 }
305 card->ext_csd.sec_trim_mult = 343 card->ext_csd.sec_trim_mult =
306 ext_csd[EXT_CSD_SEC_TRIM_MULT]; 344 ext_csd[EXT_CSD_SEC_TRIM_MULT];
307 card->ext_csd.sec_erase_mult = 345 card->ext_csd.sec_erase_mult =
@@ -336,6 +374,9 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
336MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); 374MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
337MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); 375MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
338MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial); 376MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
377MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
378 card->ext_csd.enhanced_area_offset);
379MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
339 380
340static struct attribute *mmc_std_attrs[] = { 381static struct attribute *mmc_std_attrs[] = {
341 &dev_attr_cid.attr, 382 &dev_attr_cid.attr,
@@ -349,6 +390,8 @@ static struct attribute *mmc_std_attrs[] = {
349 &dev_attr_name.attr, 390 &dev_attr_name.attr,
350 &dev_attr_oemid.attr, 391 &dev_attr_oemid.attr,
351 &dev_attr_serial.attr, 392 &dev_attr_serial.attr,
393 &dev_attr_enhanced_area_offset.attr,
394 &dev_attr_enhanced_area_size.attr,
352 NULL, 395 NULL,
353}; 396};
354 397
@@ -378,6 +421,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
378 int err, ddr = 0; 421 int err, ddr = 0;
379 u32 cid[4]; 422 u32 cid[4];
380 unsigned int max_dtr; 423 unsigned int max_dtr;
424 u32 rocr;
381 425
382 BUG_ON(!host); 426 BUG_ON(!host);
383 WARN_ON(!host->claimed); 427 WARN_ON(!host->claimed);
@@ -391,7 +435,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
391 mmc_go_idle(host); 435 mmc_go_idle(host);
392 436
393 /* The extra bit indicates that we support high capacity */ 437 /* The extra bit indicates that we support high capacity */
394 err = mmc_send_op_cond(host, ocr | (1 << 30), NULL); 438 err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
395 if (err) 439 if (err)
396 goto err; 440 goto err;
397 441
@@ -479,11 +523,51 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
479 err = mmc_read_ext_csd(card); 523 err = mmc_read_ext_csd(card);
480 if (err) 524 if (err)
481 goto free_card; 525 goto free_card;
526
527 /* If doing byte addressing, check if required to do sector
528 * addressing. Handle the case of <2GB cards needing sector
529 * addressing. See section 8.1 JEDEC Standard JED84-A441;
530 * ocr register has bit 30 set for sector addressing.
531 */
532 if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
533 mmc_card_set_blockaddr(card);
534
482 /* Erase size depends on CSD and Extended CSD */ 535 /* Erase size depends on CSD and Extended CSD */
483 mmc_set_erase_size(card); 536 mmc_set_erase_size(card);
484 } 537 }
485 538
486 /* 539 /*
540 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
541 * bit. This bit will be lost everytime after a reset or power off.
542 */
543 if (card->ext_csd.enhanced_area_en) {
544 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
545 EXT_CSD_ERASE_GROUP_DEF, 1);
546
547 if (err && err != -EBADMSG)
548 goto free_card;
549
550 if (err) {
551 err = 0;
552 /*
553 * Just disable enhanced area off & sz
554 * will try to enable ERASE_GROUP_DEF
555 * during next time reinit
556 */
557 card->ext_csd.enhanced_area_offset = -EINVAL;
558 card->ext_csd.enhanced_area_size = -EINVAL;
559 } else {
560 card->ext_csd.erase_group_def = 1;
561 /*
562 * enable ERASE_GRP_DEF successfully.
563 * This will affect the erase size, so
564 * here need to reset erase size
565 */
566 mmc_set_erase_size(card);
567 }
568 }
569
570 /*
487 * Activate high speed (if supported) 571 * Activate high speed (if supported)
488 */ 572 */
489 if ((card->ext_csd.hs_max_dtr != 0) && 573 if ((card->ext_csd.hs_max_dtr != 0) &&
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
new file mode 100644
index 000000000000..11118b74eb20
--- /dev/null
+++ b/drivers/mmc/core/quirks.c
@@ -0,0 +1,84 @@
1/*
2 * This file contains work-arounds for many known sdio hardware
3 * bugs.
4 *
5 * Copyright (c) 2011 Pierre Tardy <tardyp@gmail.com>
6 * Inspired from pci fixup code:
7 * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
8 *
9 */
10
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/mmc/card.h>
14#include <linux/mod_devicetable.h>
15
16/*
17 * The world is not perfect and supplies us with broken mmc/sdio devices.
18 * For at least a part of these bugs we need a work-around
19 */
20
21struct mmc_fixup {
22 u16 vendor, device; /* You can use SDIO_ANY_ID here of course */
23 void (*vendor_fixup)(struct mmc_card *card, int data);
24 int data;
25};
26
27/*
28 * This hook just adds a quirk unconditionnally
29 */
30static void __maybe_unused add_quirk(struct mmc_card *card, int data)
31{
32 card->quirks |= data;
33}
34
35/*
36 * This hook just removes a quirk unconditionnally
37 */
38static void __maybe_unused remove_quirk(struct mmc_card *card, int data)
39{
40 card->quirks &= ~data;
41}
42
43/*
44 * This hook just adds a quirk for all sdio devices
45 */
46static void add_quirk_for_sdio_devices(struct mmc_card *card, int data)
47{
48 if (mmc_card_sdio(card))
49 card->quirks |= data;
50}
51
52#ifndef SDIO_VENDOR_ID_TI
53#define SDIO_VENDOR_ID_TI 0x0097
54#endif
55
56#ifndef SDIO_DEVICE_ID_TI_WL1271
57#define SDIO_DEVICE_ID_TI_WL1271 0x4076
58#endif
59
60static const struct mmc_fixup mmc_fixup_methods[] = {
61 /* by default sdio devices are considered CLK_GATING broken */
62 /* good cards will be whitelisted as they are tested */
63 { SDIO_ANY_ID, SDIO_ANY_ID,
64 add_quirk_for_sdio_devices, MMC_QUIRK_BROKEN_CLK_GATING },
65 { SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
66 remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING },
67 { 0 }
68};
69
70void mmc_fixup_device(struct mmc_card *card)
71{
72 const struct mmc_fixup *f;
73
74 for (f = mmc_fixup_methods; f->vendor_fixup; f++) {
75 if ((f->vendor == card->cis.vendor
76 || f->vendor == (u16) SDIO_ANY_ID) &&
77 (f->device == card->cis.device
78 || f->device == (u16) SDIO_ANY_ID)) {
79 dev_dbg(&card->dev, "calling %pF\n", f->vendor_fixup);
80 f->vendor_fixup(card, f->data);
81 }
82 }
83}
84EXPORT_SYMBOL(mmc_fixup_device);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index d18c32bca99b..6dac89fe0535 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -21,6 +21,7 @@
21#include "core.h" 21#include "core.h"
22#include "bus.h" 22#include "bus.h"
23#include "mmc_ops.h" 23#include "mmc_ops.h"
24#include "sd.h"
24#include "sd_ops.h" 25#include "sd_ops.h"
25 26
26static const unsigned int tran_exp[] = { 27static const unsigned int tran_exp[] = {
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index ebc62ad4cc56..db0f0b44d684 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -395,6 +395,14 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
395 if (err) 395 if (err)
396 goto remove; 396 goto remove;
397 397
398 /*
399 * Update oldcard with the new RCA received from the SDIO
400 * device -- we're doing this so that it's updated in the
401 * "card" struct when oldcard overwrites that later.
402 */
403 if (oldcard)
404 oldcard->rca = card->rca;
405
398 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); 406 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
399 } 407 }
400 408
@@ -458,6 +466,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
458 466
459 card = oldcard; 467 card = oldcard;
460 } 468 }
469 mmc_fixup_device(card);
461 470
462 if (card->type == MMC_TYPE_SD_COMBO) { 471 if (card->type == MMC_TYPE_SD_COMBO) {
463 err = mmc_sd_setup_card(host, card, oldcard != NULL); 472 err = mmc_sd_setup_card(host, card, oldcard != NULL);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 54f91321749a..1a21c6427a19 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -311,7 +311,7 @@ config MMC_MSM
311 311
312config MMC_MXC 312config MMC_MXC
313 tristate "Freescale i.MX2/3 Multimedia Card Interface support" 313 tristate "Freescale i.MX2/3 Multimedia Card Interface support"
314 depends on ARCH_MXC 314 depends on MACH_MX21 || MACH_MX27 || ARCH_MX31
315 help 315 help
316 This selects the Freescale i.MX2/3 Multimedia card Interface. 316 This selects the Freescale i.MX2/3 Multimedia card Interface.
317 If you have a i.MX platform with a Multimedia Card slot, 317 If you have a i.MX platform with a Multimedia Card slot,
@@ -319,6 +319,15 @@ config MMC_MXC
319 319
320 If unsure, say N. 320 If unsure, say N.
321 321
322config MMC_MXS
323 tristate "Freescale MXS Multimedia Card Interface support"
324 depends on ARCH_MXS && MXS_DMA
325 help
326 This selects the Freescale SSP MMC controller found on MXS based
327 platforms like mx23/28.
328
329 If unsure, say N.
330
322config MMC_TIFM_SD 331config MMC_TIFM_SD
323 tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)" 332 tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)"
324 depends on EXPERIMENTAL && PCI 333 depends on EXPERIMENTAL && PCI
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index e834fb223e9a..30aa6867745f 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
6obj-$(CONFIG_MMC_PXA) += pxamci.o 6obj-$(CONFIG_MMC_PXA) += pxamci.o
7obj-$(CONFIG_MMC_IMX) += imxmmc.o 7obj-$(CONFIG_MMC_IMX) += imxmmc.o
8obj-$(CONFIG_MMC_MXC) += mxcmmc.o 8obj-$(CONFIG_MMC_MXC) += mxcmmc.o
9obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
9obj-$(CONFIG_MMC_SDHCI) += sdhci.o 10obj-$(CONFIG_MMC_SDHCI) += sdhci.o
10obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o 11obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
11obj-$(CONFIG_MMC_SDHCI_PXA) += sdhci-pxa.o 12obj-$(CONFIG_MMC_SDHCI_PXA) += sdhci-pxa.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index ad2a7a032cdf..80bc9a5c25cc 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -578,7 +578,8 @@ static void atmci_dma_cleanup(struct atmel_mci *host)
578 struct mmc_data *data = host->data; 578 struct mmc_data *data = host->data;
579 579
580 if (data) 580 if (data)
581 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len, 581 dma_unmap_sg(host->dma.chan->device->dev,
582 data->sg, data->sg_len,
582 ((data->flags & MMC_DATA_WRITE) 583 ((data->flags & MMC_DATA_WRITE)
583 ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); 584 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
584} 585}
@@ -588,7 +589,7 @@ static void atmci_stop_dma(struct atmel_mci *host)
588 struct dma_chan *chan = host->data_chan; 589 struct dma_chan *chan = host->data_chan;
589 590
590 if (chan) { 591 if (chan) {
591 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); 592 dmaengine_terminate_all(chan);
592 atmci_dma_cleanup(host); 593 atmci_dma_cleanup(host);
593 } else { 594 } else {
594 /* Data transfer was stopped by the interrupt handler */ 595 /* Data transfer was stopped by the interrupt handler */
@@ -684,11 +685,11 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
684 else 685 else
685 direction = DMA_TO_DEVICE; 686 direction = DMA_TO_DEVICE;
686 687
687 sglen = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, direction); 688 sglen = dma_map_sg(chan->device->dev, data->sg,
688 if (sglen != data->sg_len) 689 data->sg_len, direction);
689 goto unmap_exit; 690
690 desc = chan->device->device_prep_slave_sg(chan, 691 desc = chan->device->device_prep_slave_sg(chan,
691 data->sg, data->sg_len, direction, 692 data->sg, sglen, direction,
692 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 693 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
693 if (!desc) 694 if (!desc)
694 goto unmap_exit; 695 goto unmap_exit;
@@ -699,7 +700,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
699 700
700 return 0; 701 return 0;
701unmap_exit: 702unmap_exit:
702 dma_unmap_sg(&host->pdev->dev, data->sg, sglen, direction); 703 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
703 return -ENOMEM; 704 return -ENOMEM;
704} 705}
705 706
@@ -709,8 +710,8 @@ static void atmci_submit_data(struct atmel_mci *host)
709 struct dma_async_tx_descriptor *desc = host->dma.data_desc; 710 struct dma_async_tx_descriptor *desc = host->dma.data_desc;
710 711
711 if (chan) { 712 if (chan) {
712 desc->tx_submit(desc); 713 dmaengine_submit(desc);
713 chan->device->device_issue_pending(chan); 714 dma_async_issue_pending(chan);
714 } 715 }
715} 716}
716 717
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 66b4ce587f4b..ce2a47b71dd6 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -205,7 +205,7 @@ static int cb710_wait_while_busy(struct cb710_slot *slot, uint8_t mask)
205 "WAIT12: waited %d loops, mask %02X, entry val %08X, exit val %08X\n", 205 "WAIT12: waited %d loops, mask %02X, entry val %08X, exit val %08X\n",
206 limit, mask, e, x); 206 limit, mask, e, x);
207#endif 207#endif
208 return 0; 208 return err;
209} 209}
210 210
211static void cb710_mmc_set_transfer_size(struct cb710_slot *slot, 211static void cb710_mmc_set_transfer_size(struct cb710_slot *slot,
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 2fcc82577c1b..5a614069cb00 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -32,6 +32,7 @@
32#include <linux/mmc/mmc.h> 32#include <linux/mmc/mmc.h>
33#include <linux/mmc/dw_mmc.h> 33#include <linux/mmc/dw_mmc.h>
34#include <linux/bitops.h> 34#include <linux/bitops.h>
35#include <linux/regulator/consumer.h>
35 36
36#include "dw_mmc.h" 37#include "dw_mmc.h"
37 38
@@ -562,7 +563,8 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
562 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); 563 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
563 564
564 /* enable clock */ 565 /* enable clock */
565 mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE); 566 mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE |
567 SDMMC_CLKEN_LOW_PWR);
566 568
567 /* inform CIU */ 569 /* inform CIU */
568 mci_send_cmd(slot, 570 mci_send_cmd(slot,
@@ -661,6 +663,7 @@ static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
661static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 663static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
662{ 664{
663 struct dw_mci_slot *slot = mmc_priv(mmc); 665 struct dw_mci_slot *slot = mmc_priv(mmc);
666 u32 regs;
664 667
665 /* set default 1 bit mode */ 668 /* set default 1 bit mode */
666 slot->ctype = SDMMC_CTYPE_1BIT; 669 slot->ctype = SDMMC_CTYPE_1BIT;
@@ -672,6 +675,16 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
672 case MMC_BUS_WIDTH_4: 675 case MMC_BUS_WIDTH_4:
673 slot->ctype = SDMMC_CTYPE_4BIT; 676 slot->ctype = SDMMC_CTYPE_4BIT;
674 break; 677 break;
678 case MMC_BUS_WIDTH_8:
679 slot->ctype = SDMMC_CTYPE_8BIT;
680 break;
681 }
682
683 /* DDR mode set */
684 if (ios->ddr) {
685 regs = mci_readl(slot->host, UHS_REG);
686 regs |= (0x1 << slot->id) << 16;
687 mci_writel(slot->host, UHS_REG, regs);
675 } 688 }
676 689
677 if (ios->clock) { 690 if (ios->clock) {
@@ -717,7 +730,9 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
717 struct dw_mci_board *brd = slot->host->pdata; 730 struct dw_mci_board *brd = slot->host->pdata;
718 731
719 /* Use platform get_cd function, else try onboard card detect */ 732 /* Use platform get_cd function, else try onboard card detect */
720 if (brd->get_cd) 733 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
734 present = 1;
735 else if (brd->get_cd)
721 present = !brd->get_cd(slot->id); 736 present = !brd->get_cd(slot->id);
722 else 737 else
723 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) 738 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
@@ -1019,13 +1034,10 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
1019 struct mmc_data *data = host->data; 1034 struct mmc_data *data = host->data;
1020 int shift = host->data_shift; 1035 int shift = host->data_shift;
1021 u32 status; 1036 u32 status;
1022 unsigned int nbytes = 0, len, old_len, count = 0; 1037 unsigned int nbytes = 0, len;
1023 1038
1024 do { 1039 do {
1025 len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift; 1040 len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
1026 if (count == 0)
1027 old_len = len;
1028
1029 if (offset + len <= sg->length) { 1041 if (offset + len <= sg->length) {
1030 host->pull_data(host, (void *)(buf + offset), len); 1042 host->pull_data(host, (void *)(buf + offset), len);
1031 1043
@@ -1070,7 +1082,6 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
1070 tasklet_schedule(&host->tasklet); 1082 tasklet_schedule(&host->tasklet);
1071 return; 1083 return;
1072 } 1084 }
1073 count++;
1074 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ 1085 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
1075 len = SDMMC_GET_FCNT(mci_readl(host, STATUS)); 1086 len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
1076 host->pio_offset = offset; 1087 host->pio_offset = offset;
@@ -1395,7 +1406,11 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1395 if (host->pdata->setpower) 1406 if (host->pdata->setpower)
1396 host->pdata->setpower(id, 0); 1407 host->pdata->setpower(id, 0);
1397 1408
1398 mmc->caps = 0; 1409 if (host->pdata->caps)
1410 mmc->caps = host->pdata->caps;
1411 else
1412 mmc->caps = 0;
1413
1399 if (host->pdata->get_bus_wd) 1414 if (host->pdata->get_bus_wd)
1400 if (host->pdata->get_bus_wd(slot->id) >= 4) 1415 if (host->pdata->get_bus_wd(slot->id) >= 4)
1401 mmc->caps |= MMC_CAP_4_BIT_DATA; 1416 mmc->caps |= MMC_CAP_4_BIT_DATA;
@@ -1426,6 +1441,13 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1426 } 1441 }
1427#endif /* CONFIG_MMC_DW_IDMAC */ 1442#endif /* CONFIG_MMC_DW_IDMAC */
1428 1443
1444 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
1445 if (IS_ERR(host->vmmc)) {
1446 printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc));
1447 host->vmmc = NULL;
1448 } else
1449 regulator_enable(host->vmmc);
1450
1429 if (dw_mci_get_cd(mmc)) 1451 if (dw_mci_get_cd(mmc))
1430 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1452 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1431 else 1453 else
@@ -1441,6 +1463,12 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1441 /* Card initially undetected */ 1463 /* Card initially undetected */
1442 slot->last_detect_state = 0; 1464 slot->last_detect_state = 0;
1443 1465
1466 /*
1467 * Card may have been plugged in prior to boot so we
1468 * need to run the detect tasklet
1469 */
1470 tasklet_schedule(&host->card_tasklet);
1471
1444 return 0; 1472 return 0;
1445} 1473}
1446 1474
@@ -1619,8 +1647,9 @@ static int dw_mci_probe(struct platform_device *pdev)
1619 */ 1647 */
1620 fifo_size = mci_readl(host, FIFOTH); 1648 fifo_size = mci_readl(host, FIFOTH);
1621 fifo_size = (fifo_size >> 16) & 0x7ff; 1649 fifo_size = (fifo_size >> 16) & 0x7ff;
1622 mci_writel(host, FIFOTH, ((0x2 << 28) | ((fifo_size/2 - 1) << 16) | 1650 host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
1623 ((fifo_size/2) << 0))); 1651 ((fifo_size/2) << 0));
1652 mci_writel(host, FIFOTH, host->fifoth_val);
1624 1653
1625 /* disable clock to CIU */ 1654 /* disable clock to CIU */
1626 mci_writel(host, CLKENA, 0); 1655 mci_writel(host, CLKENA, 0);
@@ -1683,6 +1712,12 @@ err_dmaunmap:
1683 host->sg_cpu, host->sg_dma); 1712 host->sg_cpu, host->sg_dma);
1684 iounmap(host->regs); 1713 iounmap(host->regs);
1685 1714
1715 if (host->vmmc) {
1716 regulator_disable(host->vmmc);
1717 regulator_put(host->vmmc);
1718 }
1719
1720
1686err_freehost: 1721err_freehost:
1687 kfree(host); 1722 kfree(host);
1688 return ret; 1723 return ret;
@@ -1714,6 +1749,11 @@ static int __exit dw_mci_remove(struct platform_device *pdev)
1714 if (host->use_dma && host->dma_ops->exit) 1749 if (host->use_dma && host->dma_ops->exit)
1715 host->dma_ops->exit(host); 1750 host->dma_ops->exit(host);
1716 1751
1752 if (host->vmmc) {
1753 regulator_disable(host->vmmc);
1754 regulator_put(host->vmmc);
1755 }
1756
1717 iounmap(host->regs); 1757 iounmap(host->regs);
1718 1758
1719 kfree(host); 1759 kfree(host);
@@ -1729,6 +1769,9 @@ static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
1729 int i, ret; 1769 int i, ret;
1730 struct dw_mci *host = platform_get_drvdata(pdev); 1770 struct dw_mci *host = platform_get_drvdata(pdev);
1731 1771
1772 if (host->vmmc)
1773 regulator_enable(host->vmmc);
1774
1732 for (i = 0; i < host->num_slots; i++) { 1775 for (i = 0; i < host->num_slots; i++) {
1733 struct dw_mci_slot *slot = host->slot[i]; 1776 struct dw_mci_slot *slot = host->slot[i];
1734 if (!slot) 1777 if (!slot)
@@ -1744,6 +1787,9 @@ static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
1744 } 1787 }
1745 } 1788 }
1746 1789
1790 if (host->vmmc)
1791 regulator_disable(host->vmmc);
1792
1747 return 0; 1793 return 0;
1748} 1794}
1749 1795
@@ -1752,6 +1798,23 @@ static int dw_mci_resume(struct platform_device *pdev)
1752 int i, ret; 1798 int i, ret;
1753 struct dw_mci *host = platform_get_drvdata(pdev); 1799 struct dw_mci *host = platform_get_drvdata(pdev);
1754 1800
1801 if (host->dma_ops->init)
1802 host->dma_ops->init(host);
1803
1804 if (!mci_wait_reset(&pdev->dev, host)) {
1805 ret = -ENODEV;
1806 return ret;
1807 }
1808
1809 /* Restore the old value at FIFOTH register */
1810 mci_writel(host, FIFOTH, host->fifoth_val);
1811
1812 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1813 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
1814 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
1815 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
1816 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
1817
1755 for (i = 0; i < host->num_slots; i++) { 1818 for (i = 0; i < host->num_slots; i++) {
1756 struct dw_mci_slot *slot = host->slot[i]; 1819 struct dw_mci_slot *slot = host->slot[i];
1757 if (!slot) 1820 if (!slot)
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 5dd55a75233d..23c662af5616 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -43,6 +43,7 @@
43#define SDMMC_USRID 0x068 43#define SDMMC_USRID 0x068
44#define SDMMC_VERID 0x06c 44#define SDMMC_VERID 0x06c
45#define SDMMC_HCON 0x070 45#define SDMMC_HCON 0x070
46#define SDMMC_UHS_REG 0x074
46#define SDMMC_BMOD 0x080 47#define SDMMC_BMOD 0x080
47#define SDMMC_PLDMND 0x084 48#define SDMMC_PLDMND 0x084
48#define SDMMC_DBADDR 0x088 49#define SDMMC_DBADDR 0x088
@@ -51,7 +52,6 @@
51#define SDMMC_DSCADDR 0x094 52#define SDMMC_DSCADDR 0x094
52#define SDMMC_BUFADDR 0x098 53#define SDMMC_BUFADDR 0x098
53#define SDMMC_DATA 0x100 54#define SDMMC_DATA 0x100
54#define SDMMC_DATA_ADR 0x100
55 55
56/* shift bit field */ 56/* shift bit field */
57#define _SBF(f, v) ((v) << (f)) 57#define _SBF(f, v) ((v) << (f))
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 97c9b3638d57..a4c865a5286b 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -267,14 +267,6 @@ msmsdcc_dma_complete_tlet(unsigned long data)
267 dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents, 267 dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
268 host->dma.dir); 268 host->dma.dir);
269 269
270 if (host->curr.user_pages) {
271 struct scatterlist *sg = host->dma.sg;
272 int i;
273
274 for (i = 0; i < host->dma.num_ents; i++)
275 flush_dcache_page(sg_page(sg++));
276 }
277
278 host->dma.sg = NULL; 270 host->dma.sg = NULL;
279 host->dma.busy = 0; 271 host->dma.busy = 0;
280 272
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 4428594261c5..cc20e0259325 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -32,16 +32,14 @@
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/gpio.h> 33#include <linux/gpio.h>
34#include <linux/regulator/consumer.h> 34#include <linux/regulator/consumer.h>
35#include <linux/dmaengine.h>
35 36
36#include <asm/dma.h> 37#include <asm/dma.h>
37#include <asm/irq.h> 38#include <asm/irq.h>
38#include <asm/sizes.h> 39#include <asm/sizes.h>
39#include <mach/mmc.h> 40#include <mach/mmc.h>
40 41
41#ifdef CONFIG_ARCH_MX2 42#include <mach/dma.h>
42#include <mach/dma-mx1-mx2.h>
43#define HAS_DMA
44#endif
45 43
46#define DRIVER_NAME "mxc-mmc" 44#define DRIVER_NAME "mxc-mmc"
47 45
@@ -118,7 +116,8 @@ struct mxcmci_host {
118 void __iomem *base; 116 void __iomem *base;
119 int irq; 117 int irq;
120 int detect_irq; 118 int detect_irq;
121 int dma; 119 struct dma_chan *dma;
120 struct dma_async_tx_descriptor *desc;
122 int do_dma; 121 int do_dma;
123 int default_irq_mask; 122 int default_irq_mask;
124 int use_sdio; 123 int use_sdio;
@@ -129,7 +128,6 @@ struct mxcmci_host {
129 struct mmc_command *cmd; 128 struct mmc_command *cmd;
130 struct mmc_data *data; 129 struct mmc_data *data;
131 130
132 unsigned int dma_nents;
133 unsigned int datasize; 131 unsigned int datasize;
134 unsigned int dma_dir; 132 unsigned int dma_dir;
135 133
@@ -144,6 +142,11 @@ struct mxcmci_host {
144 spinlock_t lock; 142 spinlock_t lock;
145 143
146 struct regulator *vcc; 144 struct regulator *vcc;
145
146 int burstlen;
147 int dmareq;
148 struct dma_slave_config dma_slave_config;
149 struct imx_dma_data dma_data;
147}; 150};
148 151
149static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); 152static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
@@ -206,17 +209,16 @@ static void mxcmci_softreset(struct mxcmci_host *host)
206 209
207 writew(0xff, host->base + MMC_REG_RES_TO); 210 writew(0xff, host->base + MMC_REG_RES_TO);
208} 211}
212static int mxcmci_setup_dma(struct mmc_host *mmc);
209 213
210static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) 214static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
211{ 215{
212 unsigned int nob = data->blocks; 216 unsigned int nob = data->blocks;
213 unsigned int blksz = data->blksz; 217 unsigned int blksz = data->blksz;
214 unsigned int datasize = nob * blksz; 218 unsigned int datasize = nob * blksz;
215#ifdef HAS_DMA
216 struct scatterlist *sg; 219 struct scatterlist *sg;
217 int i; 220 int i, nents;
218 int ret; 221
219#endif
220 if (data->flags & MMC_DATA_STREAM) 222 if (data->flags & MMC_DATA_STREAM)
221 nob = 0xffff; 223 nob = 0xffff;
222 224
@@ -227,7 +229,9 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
227 writew(blksz, host->base + MMC_REG_BLK_LEN); 229 writew(blksz, host->base + MMC_REG_BLK_LEN);
228 host->datasize = datasize; 230 host->datasize = datasize;
229 231
230#ifdef HAS_DMA 232 if (!mxcmci_use_dma(host))
233 return 0;
234
231 for_each_sg(data->sg, sg, data->sg_len, i) { 235 for_each_sg(data->sg, sg, data->sg_len, i) {
232 if (sg->offset & 3 || sg->length & 3) { 236 if (sg->offset & 3 || sg->length & 3) {
233 host->do_dma = 0; 237 host->do_dma = 0;
@@ -235,34 +239,30 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
235 } 239 }
236 } 240 }
237 241
238 if (data->flags & MMC_DATA_READ) { 242 if (data->flags & MMC_DATA_READ)
239 host->dma_dir = DMA_FROM_DEVICE; 243 host->dma_dir = DMA_FROM_DEVICE;
240 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, 244 else
241 data->sg_len, host->dma_dir);
242
243 ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents,
244 datasize,
245 host->res->start + MMC_REG_BUFFER_ACCESS,
246 DMA_MODE_READ);
247 } else {
248 host->dma_dir = DMA_TO_DEVICE; 245 host->dma_dir = DMA_TO_DEVICE;
249 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
250 data->sg_len, host->dma_dir);
251 246
252 ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, 247 nents = dma_map_sg(host->dma->device->dev, data->sg,
253 datasize, 248 data->sg_len, host->dma_dir);
254 host->res->start + MMC_REG_BUFFER_ACCESS, 249 if (nents != data->sg_len)
255 DMA_MODE_WRITE); 250 return -EINVAL;
256 } 251
252 host->desc = host->dma->device->device_prep_slave_sg(host->dma,
253 data->sg, data->sg_len, host->dma_dir,
254 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
257 255
258 if (ret) { 256 if (!host->desc) {
259 dev_err(mmc_dev(host->mmc), "failed to setup DMA : %d\n", ret); 257 dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
260 return ret; 258 host->dma_dir);
259 host->do_dma = 0;
260 return 0; /* Fall back to PIO */
261 } 261 }
262 wmb(); 262 wmb();
263 263
264 imx_dma_enable(host->dma); 264 dmaengine_submit(host->desc);
265#endif /* HAS_DMA */ 265
266 return 0; 266 return 0;
267} 267}
268 268
@@ -337,13 +337,11 @@ static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
337 struct mmc_data *data = host->data; 337 struct mmc_data *data = host->data;
338 int data_error; 338 int data_error;
339 339
340#ifdef HAS_DMA
341 if (mxcmci_use_dma(host)) { 340 if (mxcmci_use_dma(host)) {
342 imx_dma_disable(host->dma); 341 dmaengine_terminate_all(host->dma);
343 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents, 342 dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
344 host->dma_dir); 343 host->dma_dir);
345 } 344 }
346#endif
347 345
348 if (stat & STATUS_ERR_MASK) { 346 if (stat & STATUS_ERR_MASK) {
349 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", 347 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
@@ -545,7 +543,6 @@ static void mxcmci_datawork(struct work_struct *work)
545 } 543 }
546} 544}
547 545
548#ifdef HAS_DMA
549static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat) 546static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
550{ 547{
551 struct mmc_data *data = host->data; 548 struct mmc_data *data = host->data;
@@ -568,7 +565,6 @@ static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
568 mxcmci_finish_request(host, host->req); 565 mxcmci_finish_request(host, host->req);
569 } 566 }
570} 567}
571#endif /* HAS_DMA */
572 568
573static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat) 569static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
574{ 570{
@@ -606,12 +602,10 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)
606 sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio; 602 sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
607 spin_unlock_irqrestore(&host->lock, flags); 603 spin_unlock_irqrestore(&host->lock, flags);
608 604
609#ifdef HAS_DMA
610 if (mxcmci_use_dma(host) && 605 if (mxcmci_use_dma(host) &&
611 (stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE))) 606 (stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE)))
612 writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE, 607 writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
613 host->base + MMC_REG_STATUS); 608 host->base + MMC_REG_STATUS);
614#endif
615 609
616 if (sdio_irq) { 610 if (sdio_irq) {
617 writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS); 611 writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS);
@@ -621,14 +615,14 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)
621 if (stat & STATUS_END_CMD_RESP) 615 if (stat & STATUS_END_CMD_RESP)
622 mxcmci_cmd_done(host, stat); 616 mxcmci_cmd_done(host, stat);
623 617
624#ifdef HAS_DMA
625 if (mxcmci_use_dma(host) && 618 if (mxcmci_use_dma(host) &&
626 (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) 619 (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE)))
627 mxcmci_data_done(host, stat); 620 mxcmci_data_done(host, stat);
628#endif 621
629 if (host->default_irq_mask && 622 if (host->default_irq_mask &&
630 (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL))) 623 (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
631 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 624 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
625
632 return IRQ_HANDLED; 626 return IRQ_HANDLED;
633} 627}
634 628
@@ -642,9 +636,10 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
642 636
643 host->req = req; 637 host->req = req;
644 host->cmdat &= ~CMD_DAT_CONT_INIT; 638 host->cmdat &= ~CMD_DAT_CONT_INIT;
645#ifdef HAS_DMA 639
646 host->do_dma = 1; 640 if (host->dma)
647#endif 641 host->do_dma = 1;
642
648 if (req->data) { 643 if (req->data) {
649 error = mxcmci_setup_data(host, req->data); 644 error = mxcmci_setup_data(host, req->data);
650 if (error) { 645 if (error) {
@@ -660,6 +655,7 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
660 } 655 }
661 656
662 error = mxcmci_start_cmd(host, req->cmd, cmdat); 657 error = mxcmci_start_cmd(host, req->cmd, cmdat);
658
663out: 659out:
664 if (error) 660 if (error)
665 mxcmci_finish_request(host, req); 661 mxcmci_finish_request(host, req);
@@ -698,22 +694,46 @@ static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
698 prescaler, divider, clk_in, clk_ios); 694 prescaler, divider, clk_in, clk_ios);
699} 695}
700 696
697static int mxcmci_setup_dma(struct mmc_host *mmc)
698{
699 struct mxcmci_host *host = mmc_priv(mmc);
700 struct dma_slave_config *config = &host->dma_slave_config;
701
702 config->dst_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
703 config->src_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
704 config->dst_addr_width = 4;
705 config->src_addr_width = 4;
706 config->dst_maxburst = host->burstlen;
707 config->src_maxburst = host->burstlen;
708
709 return dmaengine_slave_config(host->dma, config);
710}
711
701static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 712static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
702{ 713{
703 struct mxcmci_host *host = mmc_priv(mmc); 714 struct mxcmci_host *host = mmc_priv(mmc);
704#ifdef HAS_DMA 715 int burstlen, ret;
705 unsigned int blen; 716
706 /* 717 /*
707 * use burstlen of 64 in 4 bit mode (--> reg value 0) 718 * use burstlen of 64 in 4 bit mode (--> reg value 0)
708 * use burstlen of 16 in 1 bit mode (--> reg value 16) 719 * use burstlen of 16 in 1 bit mode (--> reg value 16)
709 */ 720 */
710 if (ios->bus_width == MMC_BUS_WIDTH_4) 721 if (ios->bus_width == MMC_BUS_WIDTH_4)
711 blen = 0; 722 burstlen = 64;
712 else 723 else
713 blen = 16; 724 burstlen = 16;
725
726 if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
727 host->burstlen = burstlen;
728 ret = mxcmci_setup_dma(mmc);
729 if (ret) {
730 dev_err(mmc_dev(host->mmc),
731 "failed to config DMA channel. Falling back to PIO\n");
732 dma_release_channel(host->dma);
733 host->do_dma = 0;
734 }
735 }
714 736
715 imx_dma_config_burstlen(host->dma, blen);
716#endif
717 if (ios->bus_width == MMC_BUS_WIDTH_4) 737 if (ios->bus_width == MMC_BUS_WIDTH_4)
718 host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4; 738 host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
719 else 739 else
@@ -794,6 +814,18 @@ static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
794 host->caps |= MMC_CAP_4_BIT_DATA; 814 host->caps |= MMC_CAP_4_BIT_DATA;
795} 815}
796 816
817static bool filter(struct dma_chan *chan, void *param)
818{
819 struct mxcmci_host *host = param;
820
821 if (!imx_dma_is_general_purpose(chan))
822 return false;
823
824 chan->private = &host->dma_data;
825
826 return true;
827}
828
797static const struct mmc_host_ops mxcmci_ops = { 829static const struct mmc_host_ops mxcmci_ops = {
798 .request = mxcmci_request, 830 .request = mxcmci_request,
799 .set_ios = mxcmci_set_ios, 831 .set_ios = mxcmci_set_ios,
@@ -808,6 +840,7 @@ static int mxcmci_probe(struct platform_device *pdev)
808 struct mxcmci_host *host = NULL; 840 struct mxcmci_host *host = NULL;
809 struct resource *iores, *r; 841 struct resource *iores, *r;
810 int ret = 0, irq; 842 int ret = 0, irq;
843 dma_cap_mask_t mask;
811 844
812 printk(KERN_INFO "i.MX SDHC driver\n"); 845 printk(KERN_INFO "i.MX SDHC driver\n");
813 846
@@ -883,29 +916,23 @@ static int mxcmci_probe(struct platform_device *pdev)
883 916
884 writel(host->default_irq_mask, host->base + MMC_REG_INT_CNTR); 917 writel(host->default_irq_mask, host->base + MMC_REG_INT_CNTR);
885 918
886#ifdef HAS_DMA
887 host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW);
888 if (host->dma < 0) {
889 dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n");
890 ret = -EBUSY;
891 goto out_clk_put;
892 }
893
894 r = platform_get_resource(pdev, IORESOURCE_DMA, 0); 919 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
895 if (!r) { 920 if (r) {
896 ret = -EINVAL; 921 host->dmareq = r->start;
897 goto out_free_dma; 922 host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
923 host->dma_data.priority = DMA_PRIO_LOW;
924 host->dma_data.dma_request = host->dmareq;
925 dma_cap_zero(mask);
926 dma_cap_set(DMA_SLAVE, mask);
927 host->dma = dma_request_channel(mask, filter, host);
928 if (host->dma)
929 mmc->max_seg_size = dma_get_max_seg_size(
930 host->dma->device->dev);
898 } 931 }
899 932
900 ret = imx_dma_config_channel(host->dma, 933 if (!host->dma)
901 IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_FIFO, 934 dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n");
902 IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, 935
903 r->start, 0);
904 if (ret) {
905 dev_err(mmc_dev(host->mmc), "failed to config DMA channel\n");
906 goto out_free_dma;
907 }
908#endif
909 INIT_WORK(&host->datawork, mxcmci_datawork); 936 INIT_WORK(&host->datawork, mxcmci_datawork);
910 937
911 ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host); 938 ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host);
@@ -928,9 +955,8 @@ static int mxcmci_probe(struct platform_device *pdev)
928out_free_irq: 955out_free_irq:
929 free_irq(host->irq, host); 956 free_irq(host->irq, host);
930out_free_dma: 957out_free_dma:
931#ifdef HAS_DMA 958 if (host->dma)
932 imx_dma_free(host->dma); 959 dma_release_channel(host->dma);
933#endif
934out_clk_put: 960out_clk_put:
935 clk_disable(host->clk); 961 clk_disable(host->clk);
936 clk_put(host->clk); 962 clk_put(host->clk);
@@ -960,9 +986,10 @@ static int mxcmci_remove(struct platform_device *pdev)
960 986
961 free_irq(host->irq, host); 987 free_irq(host->irq, host);
962 iounmap(host->base); 988 iounmap(host->base);
963#ifdef HAS_DMA 989
964 imx_dma_free(host->dma); 990 if (host->dma)
965#endif 991 dma_release_channel(host->dma);
992
966 clk_disable(host->clk); 993 clk_disable(host->clk);
967 clk_put(host->clk); 994 clk_put(host->clk);
968 995
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
new file mode 100644
index 000000000000..99d39a6a1032
--- /dev/null
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -0,0 +1,874 @@
1/*
2 * Portions copyright (C) 2003 Russell King, PXA MMCI Driver
3 * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
4 *
5 * Copyright 2008 Embedded Alley Solutions, Inc.
6 * Copyright 2009-2011 Freescale Semiconductor, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/ioport.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
28#include <linux/interrupt.h>
29#include <linux/dma-mapping.h>
30#include <linux/dmaengine.h>
31#include <linux/highmem.h>
32#include <linux/clk.h>
33#include <linux/err.h>
34#include <linux/completion.h>
35#include <linux/mmc/host.h>
36#include <linux/mmc/mmc.h>
37#include <linux/mmc/sdio.h>
38#include <linux/gpio.h>
39#include <linux/regulator/consumer.h>
40
41#include <mach/mxs.h>
42#include <mach/common.h>
43#include <mach/dma.h>
44#include <mach/mmc.h>
45
46#define DRIVER_NAME "mxs-mmc"
47
48/* card detect polling timeout */
49#define MXS_MMC_DETECT_TIMEOUT (HZ/2)
50
51#define SSP_VERSION_LATEST 4
52#define ssp_is_old() (host->version < SSP_VERSION_LATEST)
53
54/* SSP registers */
55#define HW_SSP_CTRL0 0x000
56#define BM_SSP_CTRL0_RUN (1 << 29)
57#define BM_SSP_CTRL0_SDIO_IRQ_CHECK (1 << 28)
58#define BM_SSP_CTRL0_IGNORE_CRC (1 << 26)
59#define BM_SSP_CTRL0_READ (1 << 25)
60#define BM_SSP_CTRL0_DATA_XFER (1 << 24)
61#define BP_SSP_CTRL0_BUS_WIDTH (22)
62#define BM_SSP_CTRL0_BUS_WIDTH (0x3 << 22)
63#define BM_SSP_CTRL0_WAIT_FOR_IRQ (1 << 21)
64#define BM_SSP_CTRL0_LONG_RESP (1 << 19)
65#define BM_SSP_CTRL0_GET_RESP (1 << 17)
66#define BM_SSP_CTRL0_ENABLE (1 << 16)
67#define BP_SSP_CTRL0_XFER_COUNT (0)
68#define BM_SSP_CTRL0_XFER_COUNT (0xffff)
69#define HW_SSP_CMD0 0x010
70#define BM_SSP_CMD0_DBL_DATA_RATE_EN (1 << 25)
71#define BM_SSP_CMD0_SLOW_CLKING_EN (1 << 22)
72#define BM_SSP_CMD0_CONT_CLKING_EN (1 << 21)
73#define BM_SSP_CMD0_APPEND_8CYC (1 << 20)
74#define BP_SSP_CMD0_BLOCK_SIZE (16)
75#define BM_SSP_CMD0_BLOCK_SIZE (0xf << 16)
76#define BP_SSP_CMD0_BLOCK_COUNT (8)
77#define BM_SSP_CMD0_BLOCK_COUNT (0xff << 8)
78#define BP_SSP_CMD0_CMD (0)
79#define BM_SSP_CMD0_CMD (0xff)
80#define HW_SSP_CMD1 0x020
81#define HW_SSP_XFER_SIZE 0x030
82#define HW_SSP_BLOCK_SIZE 0x040
83#define BP_SSP_BLOCK_SIZE_BLOCK_COUNT (4)
84#define BM_SSP_BLOCK_SIZE_BLOCK_COUNT (0xffffff << 4)
85#define BP_SSP_BLOCK_SIZE_BLOCK_SIZE (0)
86#define BM_SSP_BLOCK_SIZE_BLOCK_SIZE (0xf)
87#define HW_SSP_TIMING (ssp_is_old() ? 0x050 : 0x070)
88#define BP_SSP_TIMING_TIMEOUT (16)
89#define BM_SSP_TIMING_TIMEOUT (0xffff << 16)
90#define BP_SSP_TIMING_CLOCK_DIVIDE (8)
91#define BM_SSP_TIMING_CLOCK_DIVIDE (0xff << 8)
92#define BP_SSP_TIMING_CLOCK_RATE (0)
93#define BM_SSP_TIMING_CLOCK_RATE (0xff)
94#define HW_SSP_CTRL1 (ssp_is_old() ? 0x060 : 0x080)
95#define BM_SSP_CTRL1_SDIO_IRQ (1 << 31)
96#define BM_SSP_CTRL1_SDIO_IRQ_EN (1 << 30)
97#define BM_SSP_CTRL1_RESP_ERR_IRQ (1 << 29)
98#define BM_SSP_CTRL1_RESP_ERR_IRQ_EN (1 << 28)
99#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ (1 << 27)
100#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN (1 << 26)
101#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ (1 << 25)
102#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN (1 << 24)
103#define BM_SSP_CTRL1_DATA_CRC_IRQ (1 << 23)
104#define BM_SSP_CTRL1_DATA_CRC_IRQ_EN (1 << 22)
105#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ (1 << 21)
106#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ_EN (1 << 20)
107#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ (1 << 17)
108#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN (1 << 16)
109#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ (1 << 15)
110#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ_EN (1 << 14)
111#define BM_SSP_CTRL1_DMA_ENABLE (1 << 13)
112#define BM_SSP_CTRL1_POLARITY (1 << 9)
113#define BP_SSP_CTRL1_WORD_LENGTH (4)
114#define BM_SSP_CTRL1_WORD_LENGTH (0xf << 4)
115#define BP_SSP_CTRL1_SSP_MODE (0)
116#define BM_SSP_CTRL1_SSP_MODE (0xf)
117#define HW_SSP_SDRESP0 (ssp_is_old() ? 0x080 : 0x0a0)
118#define HW_SSP_SDRESP1 (ssp_is_old() ? 0x090 : 0x0b0)
119#define HW_SSP_SDRESP2 (ssp_is_old() ? 0x0a0 : 0x0c0)
120#define HW_SSP_SDRESP3 (ssp_is_old() ? 0x0b0 : 0x0d0)
121#define HW_SSP_STATUS (ssp_is_old() ? 0x0c0 : 0x100)
122#define BM_SSP_STATUS_CARD_DETECT (1 << 28)
123#define BM_SSP_STATUS_SDIO_IRQ (1 << 17)
124#define HW_SSP_VERSION (cpu_is_mx23() ? 0x110 : 0x130)
125#define BP_SSP_VERSION_MAJOR (24)
126
127#define BF_SSP(value, field) (((value) << BP_SSP_##field) & BM_SSP_##field)
128
129#define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \
130 BM_SSP_CTRL1_RESP_ERR_IRQ | \
131 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \
132 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | \
133 BM_SSP_CTRL1_DATA_CRC_IRQ | \
134 BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | \
135 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \
136 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
137
138#define SSP_PIO_NUM 3
139
140struct mxs_mmc_host {
141 struct mmc_host *mmc;
142 struct mmc_request *mrq;
143 struct mmc_command *cmd;
144 struct mmc_data *data;
145
146 void __iomem *base;
147 int irq;
148 struct resource *res;
149 struct resource *dma_res;
150 struct clk *clk;
151 unsigned int clk_rate;
152
153 struct dma_chan *dmach;
154 struct mxs_dma_data dma_data;
155 unsigned int dma_dir;
156 u32 ssp_pio_words[SSP_PIO_NUM];
157
158 unsigned int version;
159 unsigned char bus_width;
160 spinlock_t lock;
161 int sdio_irq_en;
162};
163
164static int mxs_mmc_get_ro(struct mmc_host *mmc)
165{
166 struct mxs_mmc_host *host = mmc_priv(mmc);
167 struct mxs_mmc_platform_data *pdata =
168 mmc_dev(host->mmc)->platform_data;
169
170 if (!pdata)
171 return -EFAULT;
172
173 if (!gpio_is_valid(pdata->wp_gpio))
174 return -EINVAL;
175
176 return gpio_get_value(pdata->wp_gpio);
177}
178
179static int mxs_mmc_get_cd(struct mmc_host *mmc)
180{
181 struct mxs_mmc_host *host = mmc_priv(mmc);
182
183 return !(readl(host->base + HW_SSP_STATUS) &
184 BM_SSP_STATUS_CARD_DETECT);
185}
186
187static void mxs_mmc_reset(struct mxs_mmc_host *host)
188{
189 u32 ctrl0, ctrl1;
190
191 mxs_reset_block(host->base);
192
193 ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
194 ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
195 BF_SSP(0x7, CTRL1_WORD_LENGTH) |
196 BM_SSP_CTRL1_DMA_ENABLE |
197 BM_SSP_CTRL1_POLARITY |
198 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
199 BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
200 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
201 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
202 BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
203
204 writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
205 BF_SSP(2, TIMING_CLOCK_DIVIDE) |
206 BF_SSP(0, TIMING_CLOCK_RATE),
207 host->base + HW_SSP_TIMING);
208
209 if (host->sdio_irq_en) {
210 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
211 ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
212 }
213
214 writel(ctrl0, host->base + HW_SSP_CTRL0);
215 writel(ctrl1, host->base + HW_SSP_CTRL1);
216}
217
218static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
219 struct mmc_command *cmd);
220
221static void mxs_mmc_request_done(struct mxs_mmc_host *host)
222{
223 struct mmc_command *cmd = host->cmd;
224 struct mmc_data *data = host->data;
225 struct mmc_request *mrq = host->mrq;
226
227 if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
228 if (mmc_resp_type(cmd) & MMC_RSP_136) {
229 cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0);
230 cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1);
231 cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2);
232 cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3);
233 } else {
234 cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0);
235 }
236 }
237
238 if (data) {
239 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
240 data->sg_len, host->dma_dir);
241 /*
242 * If there was an error on any block, we mark all
243 * data blocks as being in error.
244 */
245 if (!data->error)
246 data->bytes_xfered = data->blocks * data->blksz;
247 else
248 data->bytes_xfered = 0;
249
250 host->data = NULL;
251 if (mrq->stop) {
252 mxs_mmc_start_cmd(host, mrq->stop);
253 return;
254 }
255 }
256
257 host->mrq = NULL;
258 mmc_request_done(host->mmc, mrq);
259}
260
261static void mxs_mmc_dma_irq_callback(void *param)
262{
263 struct mxs_mmc_host *host = param;
264
265 mxs_mmc_request_done(host);
266}
267
268static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
269{
270 struct mxs_mmc_host *host = dev_id;
271 struct mmc_command *cmd = host->cmd;
272 struct mmc_data *data = host->data;
273 u32 stat;
274
275 spin_lock(&host->lock);
276
277 stat = readl(host->base + HW_SSP_CTRL1);
278 writel(stat & MXS_MMC_IRQ_BITS,
279 host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
280
281 if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
282 mmc_signal_sdio_irq(host->mmc);
283
284 spin_unlock(&host->lock);
285
286 if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
287 cmd->error = -ETIMEDOUT;
288 else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
289 cmd->error = -EIO;
290
291 if (data) {
292 if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
293 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
294 data->error = -ETIMEDOUT;
295 else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
296 data->error = -EILSEQ;
297 else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
298 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
299 data->error = -EIO;
300 }
301
302 return IRQ_HANDLED;
303}
304
305static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
306 struct mxs_mmc_host *host, unsigned int append)
307{
308 struct dma_async_tx_descriptor *desc;
309 struct mmc_data *data = host->data;
310 struct scatterlist * sgl;
311 unsigned int sg_len;
312
313 if (data) {
314 /* data */
315 dma_map_sg(mmc_dev(host->mmc), data->sg,
316 data->sg_len, host->dma_dir);
317 sgl = data->sg;
318 sg_len = data->sg_len;
319 } else {
320 /* pio */
321 sgl = (struct scatterlist *) host->ssp_pio_words;
322 sg_len = SSP_PIO_NUM;
323 }
324
325 desc = host->dmach->device->device_prep_slave_sg(host->dmach,
326 sgl, sg_len, host->dma_dir, append);
327 if (desc) {
328 desc->callback = mxs_mmc_dma_irq_callback;
329 desc->callback_param = host;
330 } else {
331 if (data)
332 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
333 data->sg_len, host->dma_dir);
334 }
335
336 return desc;
337}
338
339static void mxs_mmc_bc(struct mxs_mmc_host *host)
340{
341 struct mmc_command *cmd = host->cmd;
342 struct dma_async_tx_descriptor *desc;
343 u32 ctrl0, cmd0, cmd1;
344
345 ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
346 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
347 cmd1 = cmd->arg;
348
349 if (host->sdio_irq_en) {
350 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
351 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
352 }
353
354 host->ssp_pio_words[0] = ctrl0;
355 host->ssp_pio_words[1] = cmd0;
356 host->ssp_pio_words[2] = cmd1;
357 host->dma_dir = DMA_NONE;
358 desc = mxs_mmc_prep_dma(host, 0);
359 if (!desc)
360 goto out;
361
362 dmaengine_submit(desc);
363 return;
364
365out:
366 dev_warn(mmc_dev(host->mmc),
367 "%s: failed to prep dma\n", __func__);
368}
369
370static void mxs_mmc_ac(struct mxs_mmc_host *host)
371{
372 struct mmc_command *cmd = host->cmd;
373 struct dma_async_tx_descriptor *desc;
374 u32 ignore_crc, get_resp, long_resp;
375 u32 ctrl0, cmd0, cmd1;
376
377 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
378 0 : BM_SSP_CTRL0_IGNORE_CRC;
379 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
380 BM_SSP_CTRL0_GET_RESP : 0;
381 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
382 BM_SSP_CTRL0_LONG_RESP : 0;
383
384 ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
385 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
386 cmd1 = cmd->arg;
387
388 if (host->sdio_irq_en) {
389 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
390 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
391 }
392
393 host->ssp_pio_words[0] = ctrl0;
394 host->ssp_pio_words[1] = cmd0;
395 host->ssp_pio_words[2] = cmd1;
396 host->dma_dir = DMA_NONE;
397 desc = mxs_mmc_prep_dma(host, 0);
398 if (!desc)
399 goto out;
400
401 dmaengine_submit(desc);
402 return;
403
404out:
405 dev_warn(mmc_dev(host->mmc),
406 "%s: failed to prep dma\n", __func__);
407}
408
409static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
410{
411 const unsigned int ssp_timeout_mul = 4096;
412 /*
413 * Calculate ticks in ms since ns are large numbers
414 * and might overflow
415 */
416 const unsigned int clock_per_ms = clock_rate / 1000;
417 const unsigned int ms = ns / 1000;
418 const unsigned int ticks = ms * clock_per_ms;
419 const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
420
421 WARN_ON(ssp_ticks == 0);
422 return ssp_ticks;
423}
424
425static void mxs_mmc_adtc(struct mxs_mmc_host *host)
426{
427 struct mmc_command *cmd = host->cmd;
428 struct mmc_data *data = cmd->data;
429 struct dma_async_tx_descriptor *desc;
430 struct scatterlist *sgl = data->sg, *sg;
431 unsigned int sg_len = data->sg_len;
432 int i;
433
434 unsigned short dma_data_dir, timeout;
435 unsigned int data_size = 0, log2_blksz;
436 unsigned int blocks = data->blocks;
437
438 u32 ignore_crc, get_resp, long_resp, read;
439 u32 ctrl0, cmd0, cmd1, val;
440
441 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
442 0 : BM_SSP_CTRL0_IGNORE_CRC;
443 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
444 BM_SSP_CTRL0_GET_RESP : 0;
445 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
446 BM_SSP_CTRL0_LONG_RESP : 0;
447
448 if (data->flags & MMC_DATA_WRITE) {
449 dma_data_dir = DMA_TO_DEVICE;
450 read = 0;
451 } else {
452 dma_data_dir = DMA_FROM_DEVICE;
453 read = BM_SSP_CTRL0_READ;
454 }
455
456 ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
457 ignore_crc | get_resp | long_resp |
458 BM_SSP_CTRL0_DATA_XFER | read |
459 BM_SSP_CTRL0_WAIT_FOR_IRQ |
460 BM_SSP_CTRL0_ENABLE;
461
462 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
463
464 /* get logarithm to base 2 of block size for setting register */
465 log2_blksz = ilog2(data->blksz);
466
467 /*
468 * take special care of the case that data size from data->sg
469 * is not equal to blocks x blksz
470 */
471 for_each_sg(sgl, sg, sg_len, i)
472 data_size += sg->length;
473
474 if (data_size != data->blocks * data->blksz)
475 blocks = 1;
476
477 /* xfer count, block size and count need to be set differently */
478 if (ssp_is_old()) {
479 ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
480 cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
481 BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
482 } else {
483 writel(data_size, host->base + HW_SSP_XFER_SIZE);
484 writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
485 BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
486 host->base + HW_SSP_BLOCK_SIZE);
487 }
488
489 if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
490 (cmd->opcode == SD_IO_RW_EXTENDED))
491 cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
492
493 cmd1 = cmd->arg;
494
495 if (host->sdio_irq_en) {
496 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
497 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
498 }
499
500 /* set the timeout count */
501 timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns);
502 val = readl(host->base + HW_SSP_TIMING);
503 val &= ~(BM_SSP_TIMING_TIMEOUT);
504 val |= BF_SSP(timeout, TIMING_TIMEOUT);
505 writel(val, host->base + HW_SSP_TIMING);
506
507 /* pio */
508 host->ssp_pio_words[0] = ctrl0;
509 host->ssp_pio_words[1] = cmd0;
510 host->ssp_pio_words[2] = cmd1;
511 host->dma_dir = DMA_NONE;
512 desc = mxs_mmc_prep_dma(host, 0);
513 if (!desc)
514 goto out;
515
516 /* append data sg */
517 WARN_ON(host->data != NULL);
518 host->data = data;
519 host->dma_dir = dma_data_dir;
520 desc = mxs_mmc_prep_dma(host, 1);
521 if (!desc)
522 goto out;
523
524 dmaengine_submit(desc);
525 return;
526out:
527 dev_warn(mmc_dev(host->mmc),
528 "%s: failed to prep dma\n", __func__);
529}
530
531static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
532 struct mmc_command *cmd)
533{
534 host->cmd = cmd;
535
536 switch (mmc_cmd_type(cmd)) {
537 case MMC_CMD_BC:
538 mxs_mmc_bc(host);
539 break;
540 case MMC_CMD_BCR:
541 mxs_mmc_ac(host);
542 break;
543 case MMC_CMD_AC:
544 mxs_mmc_ac(host);
545 break;
546 case MMC_CMD_ADTC:
547 mxs_mmc_adtc(host);
548 break;
549 default:
550 dev_warn(mmc_dev(host->mmc),
551 "%s: unknown MMC command\n", __func__);
552 break;
553 }
554}
555
556static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
557{
558 struct mxs_mmc_host *host = mmc_priv(mmc);
559
560 WARN_ON(host->mrq != NULL);
561 host->mrq = mrq;
562 mxs_mmc_start_cmd(host, mrq->cmd);
563}
564
565static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate)
566{
567 unsigned int ssp_rate, bit_rate;
568 u32 div1, div2;
569 u32 val;
570
571 ssp_rate = clk_get_rate(host->clk);
572
573 for (div1 = 2; div1 < 254; div1 += 2) {
574 div2 = ssp_rate / rate / div1;
575 if (div2 < 0x100)
576 break;
577 }
578
579 if (div1 >= 254) {
580 dev_err(mmc_dev(host->mmc),
581 "%s: cannot set clock to %d\n", __func__, rate);
582 return;
583 }
584
585 if (div2 == 0)
586 bit_rate = ssp_rate / div1;
587 else
588 bit_rate = ssp_rate / div1 / div2;
589
590 val = readl(host->base + HW_SSP_TIMING);
591 val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
592 val |= BF_SSP(div1, TIMING_CLOCK_DIVIDE);
593 val |= BF_SSP(div2 - 1, TIMING_CLOCK_RATE);
594 writel(val, host->base + HW_SSP_TIMING);
595
596 host->clk_rate = bit_rate;
597
598 dev_dbg(mmc_dev(host->mmc),
599 "%s: div1 %d, div2 %d, ssp %d, bit %d, rate %d\n",
600 __func__, div1, div2, ssp_rate, bit_rate, rate);
601}
602
603static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
604{
605 struct mxs_mmc_host *host = mmc_priv(mmc);
606
607 if (ios->bus_width == MMC_BUS_WIDTH_8)
608 host->bus_width = 2;
609 else if (ios->bus_width == MMC_BUS_WIDTH_4)
610 host->bus_width = 1;
611 else
612 host->bus_width = 0;
613
614 if (ios->clock)
615 mxs_mmc_set_clk_rate(host, ios->clock);
616}
617
618static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
619{
620 struct mxs_mmc_host *host = mmc_priv(mmc);
621 unsigned long flags;
622
623 spin_lock_irqsave(&host->lock, flags);
624
625 host->sdio_irq_en = enable;
626
627 if (enable) {
628 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
629 host->base + HW_SSP_CTRL0 + MXS_SET_ADDR);
630 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
631 host->base + HW_SSP_CTRL1 + MXS_SET_ADDR);
632
633 if (readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ)
634 mmc_signal_sdio_irq(host->mmc);
635
636 } else {
637 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
638 host->base + HW_SSP_CTRL0 + MXS_CLR_ADDR);
639 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
640 host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
641 }
642
643 spin_unlock_irqrestore(&host->lock, flags);
644}
645
646static const struct mmc_host_ops mxs_mmc_ops = {
647 .request = mxs_mmc_request,
648 .get_ro = mxs_mmc_get_ro,
649 .get_cd = mxs_mmc_get_cd,
650 .set_ios = mxs_mmc_set_ios,
651 .enable_sdio_irq = mxs_mmc_enable_sdio_irq,
652};
653
654static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
655{
656 struct mxs_mmc_host *host = param;
657
658 if (!mxs_dma_is_apbh(chan))
659 return false;
660
661 if (chan->chan_id != host->dma_res->start)
662 return false;
663
664 chan->private = &host->dma_data;
665
666 return true;
667}
668
669static int mxs_mmc_probe(struct platform_device *pdev)
670{
671 struct mxs_mmc_host *host;
672 struct mmc_host *mmc;
673 struct resource *iores, *dmares, *r;
674 struct mxs_mmc_platform_data *pdata;
675 int ret = 0, irq_err, irq_dma;
676 dma_cap_mask_t mask;
677
678 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
679 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
680 irq_err = platform_get_irq(pdev, 0);
681 irq_dma = platform_get_irq(pdev, 1);
682 if (!iores || !dmares || irq_err < 0 || irq_dma < 0)
683 return -EINVAL;
684
685 r = request_mem_region(iores->start, resource_size(iores), pdev->name);
686 if (!r)
687 return -EBUSY;
688
689 mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
690 if (!mmc) {
691 ret = -ENOMEM;
692 goto out_release_mem;
693 }
694
695 host = mmc_priv(mmc);
696 host->base = ioremap(r->start, resource_size(r));
697 if (!host->base) {
698 ret = -ENOMEM;
699 goto out_mmc_free;
700 }
701
702 /* only major verion does matter */
703 host->version = readl(host->base + HW_SSP_VERSION) >>
704 BP_SSP_VERSION_MAJOR;
705
706 host->mmc = mmc;
707 host->res = r;
708 host->dma_res = dmares;
709 host->irq = irq_err;
710 host->sdio_irq_en = 0;
711
712 host->clk = clk_get(&pdev->dev, NULL);
713 if (IS_ERR(host->clk)) {
714 ret = PTR_ERR(host->clk);
715 goto out_iounmap;
716 }
717 clk_enable(host->clk);
718
719 mxs_mmc_reset(host);
720
721 dma_cap_zero(mask);
722 dma_cap_set(DMA_SLAVE, mask);
723 host->dma_data.chan_irq = irq_dma;
724 host->dmach = dma_request_channel(mask, mxs_mmc_dma_filter, host);
725 if (!host->dmach) {
726 dev_err(mmc_dev(host->mmc),
727 "%s: failed to request dma\n", __func__);
728 goto out_clk_put;
729 }
730
731 /* set mmc core parameters */
732 mmc->ops = &mxs_mmc_ops;
733 mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
734 MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
735
736 pdata = mmc_dev(host->mmc)->platform_data;
737 if (pdata) {
738 if (pdata->flags & SLOTF_8_BIT_CAPABLE)
739 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
740 if (pdata->flags & SLOTF_4_BIT_CAPABLE)
741 mmc->caps |= MMC_CAP_4_BIT_DATA;
742 }
743
744 mmc->f_min = 400000;
745 mmc->f_max = 288000000;
746 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
747
748 mmc->max_segs = 52;
749 mmc->max_blk_size = 1 << 0xf;
750 mmc->max_blk_count = (ssp_is_old()) ? 0xff : 0xffffff;
751 mmc->max_req_size = (ssp_is_old()) ? 0xffff : 0xffffffff;
752 mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev);
753
754 platform_set_drvdata(pdev, mmc);
755
756 ret = request_irq(host->irq, mxs_mmc_irq_handler, 0, DRIVER_NAME, host);
757 if (ret)
758 goto out_free_dma;
759
760 spin_lock_init(&host->lock);
761
762 ret = mmc_add_host(mmc);
763 if (ret)
764 goto out_free_irq;
765
766 dev_info(mmc_dev(host->mmc), "initialized\n");
767
768 return 0;
769
770out_free_irq:
771 free_irq(host->irq, host);
772out_free_dma:
773 if (host->dmach)
774 dma_release_channel(host->dmach);
775out_clk_put:
776 clk_disable(host->clk);
777 clk_put(host->clk);
778out_iounmap:
779 iounmap(host->base);
780out_mmc_free:
781 mmc_free_host(mmc);
782out_release_mem:
783 release_mem_region(iores->start, resource_size(iores));
784 return ret;
785}
786
787static int mxs_mmc_remove(struct platform_device *pdev)
788{
789 struct mmc_host *mmc = platform_get_drvdata(pdev);
790 struct mxs_mmc_host *host = mmc_priv(mmc);
791 struct resource *res = host->res;
792
793 mmc_remove_host(mmc);
794
795 free_irq(host->irq, host);
796
797 platform_set_drvdata(pdev, NULL);
798
799 if (host->dmach)
800 dma_release_channel(host->dmach);
801
802 clk_disable(host->clk);
803 clk_put(host->clk);
804
805 iounmap(host->base);
806
807 mmc_free_host(mmc);
808
809 release_mem_region(res->start, resource_size(res));
810
811 return 0;
812}
813
814#ifdef CONFIG_PM
815static int mxs_mmc_suspend(struct device *dev)
816{
817 struct mmc_host *mmc = dev_get_drvdata(dev);
818 struct mxs_mmc_host *host = mmc_priv(mmc);
819 int ret = 0;
820
821 ret = mmc_suspend_host(mmc);
822
823 clk_disable(host->clk);
824
825 return ret;
826}
827
828static int mxs_mmc_resume(struct device *dev)
829{
830 struct mmc_host *mmc = dev_get_drvdata(dev);
831 struct mxs_mmc_host *host = mmc_priv(mmc);
832 int ret = 0;
833
834 clk_enable(host->clk);
835
836 ret = mmc_resume_host(mmc);
837
838 return ret;
839}
840
841static const struct dev_pm_ops mxs_mmc_pm_ops = {
842 .suspend = mxs_mmc_suspend,
843 .resume = mxs_mmc_resume,
844};
845#endif
846
847static struct platform_driver mxs_mmc_driver = {
848 .probe = mxs_mmc_probe,
849 .remove = mxs_mmc_remove,
850 .driver = {
851 .name = DRIVER_NAME,
852 .owner = THIS_MODULE,
853#ifdef CONFIG_PM
854 .pm = &mxs_mmc_pm_ops,
855#endif
856 },
857};
858
859static int __init mxs_mmc_init(void)
860{
861 return platform_driver_register(&mxs_mmc_driver);
862}
863
864static void __exit mxs_mmc_exit(void)
865{
866 platform_driver_unregister(&mxs_mmc_driver);
867}
868
869module_init(mxs_mmc_init);
870module_exit(mxs_mmc_exit);
871
872MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
873MODULE_AUTHOR("Freescale Semiconductor");
874MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 9b82910b9dbb..3b5248567973 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -15,9 +15,11 @@
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/gpio.h>
18#include <linux/mmc/host.h> 19#include <linux/mmc/host.h>
19#include <linux/mmc/sdhci-pltfm.h> 20#include <linux/mmc/sdhci-pltfm.h>
20#include <mach/hardware.h> 21#include <mach/hardware.h>
22#include <mach/esdhc.h>
21#include "sdhci.h" 23#include "sdhci.h"
22#include "sdhci-pltfm.h" 24#include "sdhci-pltfm.h"
23#include "sdhci-esdhc.h" 25#include "sdhci-esdhc.h"
@@ -30,6 +32,39 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i
30 writel(((readl(base) & ~(mask << shift)) | (val << shift)), base); 32 writel(((readl(base) & ~(mask << shift)) | (val << shift)), base);
31} 33}
32 34
35static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
36{
37 /* fake CARD_PRESENT flag on mx25/35 */
38 u32 val = readl(host->ioaddr + reg);
39
40 if (unlikely(reg == SDHCI_PRESENT_STATE)) {
41 struct esdhc_platform_data *boarddata =
42 host->mmc->parent->platform_data;
43
44 if (boarddata && gpio_is_valid(boarddata->cd_gpio)
45 && gpio_get_value(boarddata->cd_gpio))
46 /* no card, if a valid gpio says so... */
47 val &= SDHCI_CARD_PRESENT;
48 else
49 /* ... in all other cases assume card is present */
50 val |= SDHCI_CARD_PRESENT;
51 }
52
53 return val;
54}
55
56static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
57{
58 if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE))
59 /*
60 * these interrupts won't work with a custom card_detect gpio
61 * (only applied to mx25/35)
62 */
63 val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
64
65 writel(val, host->ioaddr + reg);
66}
67
33static u16 esdhc_readw_le(struct sdhci_host *host, int reg) 68static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
34{ 69{
35 if (unlikely(reg == SDHCI_HOST_VERSION)) 70 if (unlikely(reg == SDHCI_HOST_VERSION))
@@ -100,10 +135,39 @@ static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
100 return clk_get_rate(pltfm_host->clk) / 256 / 16; 135 return clk_get_rate(pltfm_host->clk) / 256 / 16;
101} 136}
102 137
138static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
139{
140 struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
141
142 if (boarddata && gpio_is_valid(boarddata->wp_gpio))
143 return gpio_get_value(boarddata->wp_gpio);
144 else
145 return -ENOSYS;
146}
147
148static struct sdhci_ops sdhci_esdhc_ops = {
149 .read_w = esdhc_readw_le,
150 .write_w = esdhc_writew_le,
151 .write_b = esdhc_writeb_le,
152 .set_clock = esdhc_set_clock,
153 .get_max_clock = esdhc_pltfm_get_max_clock,
154 .get_min_clock = esdhc_pltfm_get_min_clock,
155};
156
157static irqreturn_t cd_irq(int irq, void *data)
158{
159 struct sdhci_host *sdhost = (struct sdhci_host *)data;
160
161 tasklet_schedule(&sdhost->card_tasklet);
162 return IRQ_HANDLED;
163};
164
103static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata) 165static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata)
104{ 166{
105 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 167 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
168 struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
106 struct clk *clk; 169 struct clk *clk;
170 int err;
107 171
108 clk = clk_get(mmc_dev(host->mmc), NULL); 172 clk = clk_get(mmc_dev(host->mmc), NULL);
109 if (IS_ERR(clk)) { 173 if (IS_ERR(clk)) {
@@ -116,32 +180,78 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd
116 if (cpu_is_mx35() || cpu_is_mx51()) 180 if (cpu_is_mx35() || cpu_is_mx51())
117 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; 181 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
118 182
119 /* Fix errata ENGcm07207 which is present on i.MX25 and i.MX35 */ 183 if (cpu_is_mx25() || cpu_is_mx35()) {
120 if (cpu_is_mx25() || cpu_is_mx35()) 184 /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
121 host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK; 185 host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
186 /* write_protect can't be routed to controller, use gpio */
187 sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro;
188 }
189
190 if (boarddata) {
191 err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP");
192 if (err) {
193 dev_warn(mmc_dev(host->mmc),
194 "no write-protect pin available!\n");
195 boarddata->wp_gpio = err;
196 }
197
198 err = gpio_request_one(boarddata->cd_gpio, GPIOF_IN, "ESDHC_CD");
199 if (err) {
200 dev_warn(mmc_dev(host->mmc),
201 "no card-detect pin available!\n");
202 goto no_card_detect_pin;
203 }
204
205 /* i.MX5x has issues to be researched */
206 if (!cpu_is_mx25() && !cpu_is_mx35())
207 goto not_supported;
208
209 err = request_irq(gpio_to_irq(boarddata->cd_gpio), cd_irq,
210 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
211 mmc_hostname(host->mmc), host);
212 if (err) {
213 dev_warn(mmc_dev(host->mmc), "request irq error\n");
214 goto no_card_detect_irq;
215 }
216
217 sdhci_esdhc_ops.write_l = esdhc_writel_le;
218 sdhci_esdhc_ops.read_l = esdhc_readl_le;
219 /* Now we have a working card_detect again */
220 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
221 }
222
223 return 0;
122 224
225 no_card_detect_irq:
226 gpio_free(boarddata->cd_gpio);
227 no_card_detect_pin:
228 boarddata->cd_gpio = err;
229 not_supported:
123 return 0; 230 return 0;
124} 231}
125 232
126static void esdhc_pltfm_exit(struct sdhci_host *host) 233static void esdhc_pltfm_exit(struct sdhci_host *host)
127{ 234{
128 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 235 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
236 struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
237
238 if (boarddata && gpio_is_valid(boarddata->wp_gpio))
239 gpio_free(boarddata->wp_gpio);
240
241 if (boarddata && gpio_is_valid(boarddata->cd_gpio)) {
242 gpio_free(boarddata->cd_gpio);
243
244 if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION))
245 free_irq(gpio_to_irq(boarddata->cd_gpio), host);
246 }
129 247
130 clk_disable(pltfm_host->clk); 248 clk_disable(pltfm_host->clk);
131 clk_put(pltfm_host->clk); 249 clk_put(pltfm_host->clk);
132} 250}
133 251
134static struct sdhci_ops sdhci_esdhc_ops = {
135 .read_w = esdhc_readw_le,
136 .write_w = esdhc_writew_le,
137 .write_b = esdhc_writeb_le,
138 .set_clock = esdhc_set_clock,
139 .get_max_clock = esdhc_pltfm_get_max_clock,
140 .get_min_clock = esdhc_pltfm_get_min_clock,
141};
142
143struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { 252struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
144 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA, 253 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA
254 | SDHCI_QUIRK_BROKEN_CARD_DETECTION,
145 /* ADMA has issues. Might be fixable */ 255 /* ADMA has issues. Might be fixable */
146 .ops = &sdhci_esdhc_ops, 256 .ops = &sdhci_esdhc_ops,
147 .init = esdhc_pltfm_init, 257 .init = esdhc_pltfm_init,
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index afaf1bc4913a..c55aae828aac 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -19,7 +19,6 @@
19 */ 19 */
20 20
21#define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \ 21#define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
22 SDHCI_QUIRK_BROKEN_CARD_DETECTION | \
23 SDHCI_QUIRK_NO_BUSY_IRQ | \ 22 SDHCI_QUIRK_NO_BUSY_IRQ | \
24 SDHCI_QUIRK_NONSTANDARD_CLOCK | \ 23 SDHCI_QUIRK_NONSTANDARD_CLOCK | \
25 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ 24 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index fcd0e1fcba44..08161f690ae8 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -73,7 +73,8 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
73} 73}
74 74
75struct sdhci_of_data sdhci_esdhc = { 75struct sdhci_of_data sdhci_esdhc = {
76 .quirks = ESDHC_DEFAULT_QUIRKS, 76 /* card detection could be handled via GPIO */
77 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION,
77 .ops = { 78 .ops = {
78 .read_l = sdhci_be32bs_readl, 79 .read_l = sdhci_be32bs_readl,
79 .read_w = esdhc_readw, 80 .read_w = esdhc_readw,
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 0dc905b20eee..2f8d46854acd 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -547,6 +547,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
547 }, 547 },
548 548
549 { 549 {
550 .vendor = PCI_VENDOR_ID_RICOH,
551 .device = 0xe823,
552 .subvendor = PCI_ANY_ID,
553 .subdevice = PCI_ANY_ID,
554 .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc,
555 },
556
557 {
550 .vendor = PCI_VENDOR_ID_ENE, 558 .vendor = PCI_VENDOR_ID_ENE,
551 .device = PCI_DEVICE_ID_ENE_CB712_SD, 559 .device = PCI_DEVICE_ID_ENE_CB712_SD,
552 .subvendor = PCI_ANY_ID, 560 .subvendor = PCI_ANY_ID,
@@ -900,9 +908,6 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
900{ 908{
901 struct sdhci_pci_slot *slot; 909 struct sdhci_pci_slot *slot;
902 struct sdhci_host *host; 910 struct sdhci_host *host;
903
904 resource_size_t addr;
905
906 int ret; 911 int ret;
907 912
908 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { 913 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
@@ -949,7 +954,6 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
949 goto free; 954 goto free;
950 } 955 }
951 956
952 addr = pci_resource_start(pdev, bar);
953 host->ioaddr = pci_ioremap_bar(pdev, bar); 957 host->ioaddr = pci_ioremap_bar(pdev, bar);
954 if (!host->ioaddr) { 958 if (!host->ioaddr) {
955 dev_err(&pdev->dev, "failed to remap registers\n"); 959 dev_err(&pdev->dev, "failed to remap registers\n");
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 5309ab95aada..69e3ee321eb5 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -499,6 +499,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
499 * SDHCI block, or a missing configuration that needs to be set. */ 499 * SDHCI block, or a missing configuration that needs to be set. */
500 host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ; 500 host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;
501 501
502 /* This host supports the Auto CMD12 */
503 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
504
502 if (pdata->cd_type == S3C_SDHCI_CD_NONE || 505 if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
503 pdata->cd_type == S3C_SDHCI_CD_PERMANENT) 506 pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
504 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; 507 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 4823ee94a63f..f7e1f964395f 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -169,7 +169,7 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
169 if (rc) { 169 if (rc) {
170 dev_err(mmc_dev(host->mmc), 170 dev_err(mmc_dev(host->mmc),
171 "failed to allocate wp gpio\n"); 171 "failed to allocate wp gpio\n");
172 goto out_cd; 172 goto out_irq;
173 } 173 }
174 tegra_gpio_enable(plat->wp_gpio); 174 tegra_gpio_enable(plat->wp_gpio);
175 gpio_direction_input(plat->wp_gpio); 175 gpio_direction_input(plat->wp_gpio);
@@ -195,6 +195,9 @@ out_wp:
195 gpio_free(plat->wp_gpio); 195 gpio_free(plat->wp_gpio);
196 } 196 }
197 197
198out_irq:
199 if (gpio_is_valid(plat->cd_gpio))
200 free_irq(gpio_to_irq(plat->cd_gpio), host);
198out_cd: 201out_cd:
199 if (gpio_is_valid(plat->cd_gpio)) { 202 if (gpio_is_valid(plat->cd_gpio)) {
200 tegra_gpio_disable(plat->cd_gpio); 203 tegra_gpio_disable(plat->cd_gpio);
@@ -225,6 +228,7 @@ static void tegra_sdhci_pltfm_exit(struct sdhci_host *host)
225 } 228 }
226 229
227 if (gpio_is_valid(plat->cd_gpio)) { 230 if (gpio_is_valid(plat->cd_gpio)) {
231 free_irq(gpio_to_irq(plat->cd_gpio), host);
228 tegra_gpio_disable(plat->cd_gpio); 232 tegra_gpio_disable(plat->cd_gpio);
229 gpio_free(plat->cd_gpio); 233 gpio_free(plat->cd_gpio);
230 } 234 }
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 12884c270171..af97015a2fc7 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -169,7 +169,7 @@ struct sh_mmcif_host {
169 struct dma_chan *chan_rx; 169 struct dma_chan *chan_rx;
170 struct dma_chan *chan_tx; 170 struct dma_chan *chan_tx;
171 struct completion dma_complete; 171 struct completion dma_complete;
172 unsigned int dma_sglen; 172 bool dma_active;
173}; 173};
174 174
175static inline void sh_mmcif_bitset(struct sh_mmcif_host *host, 175static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
@@ -194,10 +194,12 @@ static void mmcif_dma_complete(void *arg)
194 return; 194 return;
195 195
196 if (host->data->flags & MMC_DATA_READ) 196 if (host->data->flags & MMC_DATA_READ)
197 dma_unmap_sg(&host->pd->dev, host->data->sg, host->dma_sglen, 197 dma_unmap_sg(host->chan_rx->device->dev,
198 host->data->sg, host->data->sg_len,
198 DMA_FROM_DEVICE); 199 DMA_FROM_DEVICE);
199 else 200 else
200 dma_unmap_sg(&host->pd->dev, host->data->sg, host->dma_sglen, 201 dma_unmap_sg(host->chan_tx->device->dev,
202 host->data->sg, host->data->sg_len,
201 DMA_TO_DEVICE); 203 DMA_TO_DEVICE);
202 204
203 complete(&host->dma_complete); 205 complete(&host->dma_complete);
@@ -211,9 +213,10 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
211 dma_cookie_t cookie = -EINVAL; 213 dma_cookie_t cookie = -EINVAL;
212 int ret; 214 int ret;
213 215
214 ret = dma_map_sg(&host->pd->dev, sg, host->data->sg_len, DMA_FROM_DEVICE); 216 ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
217 DMA_FROM_DEVICE);
215 if (ret > 0) { 218 if (ret > 0) {
216 host->dma_sglen = ret; 219 host->dma_active = true;
217 desc = chan->device->device_prep_slave_sg(chan, sg, ret, 220 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
218 DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 221 DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
219 } 222 }
@@ -221,14 +224,9 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
221 if (desc) { 224 if (desc) {
222 desc->callback = mmcif_dma_complete; 225 desc->callback = mmcif_dma_complete;
223 desc->callback_param = host; 226 desc->callback_param = host;
224 cookie = desc->tx_submit(desc); 227 cookie = dmaengine_submit(desc);
225 if (cookie < 0) { 228 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
226 desc = NULL; 229 dma_async_issue_pending(chan);
227 ret = cookie;
228 } else {
229 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
230 chan->device->device_issue_pending(chan);
231 }
232 } 230 }
233 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", 231 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
234 __func__, host->data->sg_len, ret, cookie); 232 __func__, host->data->sg_len, ret, cookie);
@@ -238,7 +236,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
238 if (ret >= 0) 236 if (ret >= 0)
239 ret = -EIO; 237 ret = -EIO;
240 host->chan_rx = NULL; 238 host->chan_rx = NULL;
241 host->dma_sglen = 0; 239 host->dma_active = false;
242 dma_release_channel(chan); 240 dma_release_channel(chan);
243 /* Free the Tx channel too */ 241 /* Free the Tx channel too */
244 chan = host->chan_tx; 242 chan = host->chan_tx;
@@ -263,9 +261,10 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
263 dma_cookie_t cookie = -EINVAL; 261 dma_cookie_t cookie = -EINVAL;
264 int ret; 262 int ret;
265 263
266 ret = dma_map_sg(&host->pd->dev, sg, host->data->sg_len, DMA_TO_DEVICE); 264 ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
265 DMA_TO_DEVICE);
267 if (ret > 0) { 266 if (ret > 0) {
268 host->dma_sglen = ret; 267 host->dma_active = true;
269 desc = chan->device->device_prep_slave_sg(chan, sg, ret, 268 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
270 DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 269 DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
271 } 270 }
@@ -273,14 +272,9 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
273 if (desc) { 272 if (desc) {
274 desc->callback = mmcif_dma_complete; 273 desc->callback = mmcif_dma_complete;
275 desc->callback_param = host; 274 desc->callback_param = host;
276 cookie = desc->tx_submit(desc); 275 cookie = dmaengine_submit(desc);
277 if (cookie < 0) { 276 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
278 desc = NULL; 277 dma_async_issue_pending(chan);
279 ret = cookie;
280 } else {
281 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
282 chan->device->device_issue_pending(chan);
283 }
284 } 278 }
285 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", 279 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
286 __func__, host->data->sg_len, ret, cookie); 280 __func__, host->data->sg_len, ret, cookie);
@@ -290,7 +284,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
290 if (ret >= 0) 284 if (ret >= 0)
291 ret = -EIO; 285 ret = -EIO;
292 host->chan_tx = NULL; 286 host->chan_tx = NULL;
293 host->dma_sglen = 0; 287 host->dma_active = false;
294 dma_release_channel(chan); 288 dma_release_channel(chan);
295 /* Free the Rx channel too */ 289 /* Free the Rx channel too */
296 chan = host->chan_rx; 290 chan = host->chan_rx;
@@ -317,7 +311,7 @@ static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
317static void sh_mmcif_request_dma(struct sh_mmcif_host *host, 311static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
318 struct sh_mmcif_plat_data *pdata) 312 struct sh_mmcif_plat_data *pdata)
319{ 313{
320 host->dma_sglen = 0; 314 host->dma_active = false;
321 315
322 /* We can only either use DMA for both Tx and Rx or not use it at all */ 316 /* We can only either use DMA for both Tx and Rx or not use it at all */
323 if (pdata->dma) { 317 if (pdata->dma) {
@@ -364,7 +358,7 @@ static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
364 dma_release_channel(chan); 358 dma_release_channel(chan);
365 } 359 }
366 360
367 host->dma_sglen = 0; 361 host->dma_active = false;
368} 362}
369 363
370static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) 364static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
@@ -753,7 +747,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
753 } 747 }
754 sh_mmcif_get_response(host, cmd); 748 sh_mmcif_get_response(host, cmd);
755 if (host->data) { 749 if (host->data) {
756 if (!host->dma_sglen) { 750 if (!host->dma_active) {
757 ret = sh_mmcif_data_trans(host, mrq, cmd->opcode); 751 ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
758 } else { 752 } else {
759 long time = 753 long time =
@@ -765,7 +759,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
765 ret = time; 759 ret = time;
766 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, 760 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
767 BUF_ACC_DMAREN | BUF_ACC_DMAWEN); 761 BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
768 host->dma_sglen = 0; 762 host->dma_active = false;
769 } 763 }
770 if (ret < 0) 764 if (ret < 0)
771 mrq->data->bytes_xfered = 0; 765 mrq->data->bytes_xfered = 0;
@@ -850,15 +844,15 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
850 struct sh_mmcif_host *host = mmc_priv(mmc); 844 struct sh_mmcif_host *host = mmc_priv(mmc);
851 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; 845 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
852 846
853 if (ios->power_mode == MMC_POWER_OFF) { 847 if (ios->power_mode == MMC_POWER_UP) {
848 if (p->set_pwr)
849 p->set_pwr(host->pd, ios->power_mode);
850 } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
854 /* clock stop */ 851 /* clock stop */
855 sh_mmcif_clock_control(host, 0); 852 sh_mmcif_clock_control(host, 0);
856 if (p->down_pwr) 853 if (ios->power_mode == MMC_POWER_OFF && p->down_pwr)
857 p->down_pwr(host->pd); 854 p->down_pwr(host->pd);
858 return; 855 return;
859 } else if (ios->power_mode == MMC_POWER_UP) {
860 if (p->set_pwr)
861 p->set_pwr(host->pd, ios->power_mode);
862 } 856 }
863 857
864 if (ios->clock) 858 if (ios->clock)
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index e3c6ef208391..ac52eb65395e 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -152,7 +152,6 @@ struct tmio_mmc_host {
152 struct tasklet_struct dma_complete; 152 struct tasklet_struct dma_complete;
153 struct tasklet_struct dma_issue; 153 struct tasklet_struct dma_issue;
154#ifdef CONFIG_TMIO_MMC_DMA 154#ifdef CONFIG_TMIO_MMC_DMA
155 unsigned int dma_sglen;
156 u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN))); 155 u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN)));
157 struct scatterlist bounce_sg; 156 struct scatterlist bounce_sg;
158#endif 157#endif
@@ -220,44 +219,48 @@ static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
220 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; 219 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
221} 220}
222 221
223static void tmio_mmc_kunmap_atomic(void *virt, unsigned long *flags) 222static void tmio_mmc_kunmap_atomic(struct scatterlist *sg, unsigned long *flags, void *virt)
224{ 223{
225 kunmap_atomic(virt, KM_BIO_SRC_IRQ); 224 kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ);
226 local_irq_restore(*flags); 225 local_irq_restore(*flags);
227} 226}
228 227
229#ifdef CONFIG_MMC_DEBUG 228#ifdef CONFIG_MMC_DEBUG
230 229
231#define STATUS_TO_TEXT(a) \ 230#define STATUS_TO_TEXT(a, status, i) \
232 do { \ 231 do { \
233 if (status & TMIO_STAT_##a) \ 232 if (status & TMIO_STAT_##a) { \
233 if (i++) \
234 printk(" | "); \
234 printk(#a); \ 235 printk(#a); \
236 } \
235 } while (0) 237 } while (0)
236 238
237void pr_debug_status(u32 status) 239void pr_debug_status(u32 status)
238{ 240{
241 int i = 0;
239 printk(KERN_DEBUG "status: %08x = ", status); 242 printk(KERN_DEBUG "status: %08x = ", status);
240 STATUS_TO_TEXT(CARD_REMOVE); 243 STATUS_TO_TEXT(CARD_REMOVE, status, i);
241 STATUS_TO_TEXT(CARD_INSERT); 244 STATUS_TO_TEXT(CARD_INSERT, status, i);
242 STATUS_TO_TEXT(SIGSTATE); 245 STATUS_TO_TEXT(SIGSTATE, status, i);
243 STATUS_TO_TEXT(WRPROTECT); 246 STATUS_TO_TEXT(WRPROTECT, status, i);
244 STATUS_TO_TEXT(CARD_REMOVE_A); 247 STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
245 STATUS_TO_TEXT(CARD_INSERT_A); 248 STATUS_TO_TEXT(CARD_INSERT_A, status, i);
246 STATUS_TO_TEXT(SIGSTATE_A); 249 STATUS_TO_TEXT(SIGSTATE_A, status, i);
247 STATUS_TO_TEXT(CMD_IDX_ERR); 250 STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
248 STATUS_TO_TEXT(STOPBIT_ERR); 251 STATUS_TO_TEXT(STOPBIT_ERR, status, i);
249 STATUS_TO_TEXT(ILL_FUNC); 252 STATUS_TO_TEXT(ILL_FUNC, status, i);
250 STATUS_TO_TEXT(CMD_BUSY); 253 STATUS_TO_TEXT(CMD_BUSY, status, i);
251 STATUS_TO_TEXT(CMDRESPEND); 254 STATUS_TO_TEXT(CMDRESPEND, status, i);
252 STATUS_TO_TEXT(DATAEND); 255 STATUS_TO_TEXT(DATAEND, status, i);
253 STATUS_TO_TEXT(CRCFAIL); 256 STATUS_TO_TEXT(CRCFAIL, status, i);
254 STATUS_TO_TEXT(DATATIMEOUT); 257 STATUS_TO_TEXT(DATATIMEOUT, status, i);
255 STATUS_TO_TEXT(CMDTIMEOUT); 258 STATUS_TO_TEXT(CMDTIMEOUT, status, i);
256 STATUS_TO_TEXT(RXOVERFLOW); 259 STATUS_TO_TEXT(RXOVERFLOW, status, i);
257 STATUS_TO_TEXT(TXUNDERRUN); 260 STATUS_TO_TEXT(TXUNDERRUN, status, i);
258 STATUS_TO_TEXT(RXRDY); 261 STATUS_TO_TEXT(RXRDY, status, i);
259 STATUS_TO_TEXT(TXRQ); 262 STATUS_TO_TEXT(TXRQ, status, i);
260 STATUS_TO_TEXT(ILL_ACCESS); 263 STATUS_TO_TEXT(ILL_ACCESS, status, i);
261 printk("\n"); 264 printk("\n");
262} 265}
263 266
@@ -507,7 +510,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
507 510
508 host->sg_off += count; 511 host->sg_off += count;
509 512
510 tmio_mmc_kunmap_atomic(sg_virt, &flags); 513 tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
511 514
512 if (host->sg_off == host->sg_ptr->length) 515 if (host->sg_off == host->sg_ptr->length)
513 tmio_mmc_next_sg(host); 516 tmio_mmc_next_sg(host);
@@ -767,7 +770,7 @@ static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
767 unsigned long flags; 770 unsigned long flags;
768 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); 771 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
769 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); 772 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
770 tmio_mmc_kunmap_atomic(sg_vaddr, &flags); 773 tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
771 } 774 }
772} 775}
773 776
@@ -825,23 +828,16 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
825 sg = host->sg_ptr; 828 sg = host->sg_ptr;
826 } 829 }
827 830
828 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE); 831 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
829 if (ret > 0) { 832 if (ret > 0)
830 host->dma_sglen = ret;
831 desc = chan->device->device_prep_slave_sg(chan, sg, ret, 833 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
832 DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 834 DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
833 }
834 835
835 if (desc) { 836 if (desc) {
836 desc->callback = tmio_dma_complete; 837 desc->callback = tmio_dma_complete;
837 desc->callback_param = host; 838 desc->callback_param = host;
838 cookie = desc->tx_submit(desc); 839 cookie = dmaengine_submit(desc);
839 if (cookie < 0) { 840 dma_async_issue_pending(chan);
840 desc = NULL;
841 ret = cookie;
842 } else {
843 chan->device->device_issue_pending(chan);
844 }
845 } 841 }
846 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 842 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
847 __func__, host->sg_len, ret, cookie, host->mrq); 843 __func__, host->sg_len, ret, cookie, host->mrq);
@@ -901,26 +897,20 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
901 void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); 897 void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
902 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 898 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
903 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); 899 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
904 tmio_mmc_kunmap_atomic(sg_vaddr, &flags); 900 tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
905 host->sg_ptr = &host->bounce_sg; 901 host->sg_ptr = &host->bounce_sg;
906 sg = host->sg_ptr; 902 sg = host->sg_ptr;
907 } 903 }
908 904
909 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE); 905 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
910 if (ret > 0) { 906 if (ret > 0)
911 host->dma_sglen = ret;
912 desc = chan->device->device_prep_slave_sg(chan, sg, ret, 907 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
913 DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 908 DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
914 }
915 909
916 if (desc) { 910 if (desc) {
917 desc->callback = tmio_dma_complete; 911 desc->callback = tmio_dma_complete;
918 desc->callback_param = host; 912 desc->callback_param = host;
919 cookie = desc->tx_submit(desc); 913 cookie = dmaengine_submit(desc);
920 if (cookie < 0) {
921 desc = NULL;
922 ret = cookie;
923 }
924 } 914 }
925 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 915 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
926 __func__, host->sg_len, ret, cookie, host->mrq); 916 __func__, host->sg_len, ret, cookie, host->mrq);
@@ -964,7 +954,7 @@ static void tmio_issue_tasklet_fn(unsigned long priv)
964 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; 954 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
965 struct dma_chan *chan = host->chan_tx; 955 struct dma_chan *chan = host->chan_tx;
966 956
967 chan->device->device_issue_pending(chan); 957 dma_async_issue_pending(chan);
968} 958}
969 959
970static void tmio_tasklet_fn(unsigned long arg) 960static void tmio_tasklet_fn(unsigned long arg)
@@ -978,10 +968,12 @@ static void tmio_tasklet_fn(unsigned long arg)
978 goto out; 968 goto out;
979 969
980 if (host->data->flags & MMC_DATA_READ) 970 if (host->data->flags & MMC_DATA_READ)
981 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen, 971 dma_unmap_sg(host->chan_rx->device->dev,
972 host->sg_ptr, host->sg_len,
982 DMA_FROM_DEVICE); 973 DMA_FROM_DEVICE);
983 else 974 else
984 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen, 975 dma_unmap_sg(host->chan_tx->device->dev,
976 host->sg_ptr, host->sg_len,
985 DMA_TO_DEVICE); 977 DMA_TO_DEVICE);
986 978
987 tmio_mmc_do_data_irq(host); 979 tmio_mmc_do_data_irq(host);
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 9ed84ddb4780..8c5b4881ccd6 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -802,12 +802,9 @@ static const struct mmc_host_ops via_sdc_ops = {
802 802
803static void via_reset_pcictrl(struct via_crdr_mmc_host *host) 803static void via_reset_pcictrl(struct via_crdr_mmc_host *host)
804{ 804{
805 void __iomem *addrbase;
806 unsigned long flags; 805 unsigned long flags;
807 u8 gatt; 806 u8 gatt;
808 807
809 addrbase = host->pcictrl_mmiobase;
810
811 spin_lock_irqsave(&host->lock, flags); 808 spin_lock_irqsave(&host->lock, flags);
812 809
813 via_save_pcictrlreg(host); 810 via_save_pcictrlreg(host);