aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/card/Kconfig4
-rw-r--r--drivers/mmc/card/block.c3
-rw-r--r--drivers/mmc/card/mmc_test.c272
-rw-r--r--drivers/mmc/card/queue.c3
-rw-r--r--drivers/mmc/card/sdio_uart.c4
-rw-r--r--drivers/mmc/core/Kconfig11
-rw-r--r--drivers/mmc/core/Makefile3
-rw-r--r--drivers/mmc/core/bus.c9
-rw-r--r--drivers/mmc/core/core.c235
-rw-r--r--drivers/mmc/core/core.h11
-rw-r--r--drivers/mmc/core/debugfs.c5
-rw-r--r--drivers/mmc/core/host.c202
-rw-r--r--drivers/mmc/core/host.h21
-rw-r--r--drivers/mmc/core/mmc.c177
-rw-r--r--drivers/mmc/core/mmc_ops.c103
-rw-r--r--drivers/mmc/core/mmc_ops.h1
-rw-r--r--drivers/mmc/core/quirks.c84
-rw-r--r--drivers/mmc/core/sd.c17
-rw-r--r--drivers/mmc/core/sd_ops.c14
-rw-r--r--drivers/mmc/core/sdio.c44
-rw-r--r--drivers/mmc/core/sdio_bus.c32
-rw-r--r--drivers/mmc/core/sdio_irq.c2
-rw-r--r--drivers/mmc/host/Kconfig70
-rw-r--r--drivers/mmc/host/Makefile12
-rw-r--r--drivers/mmc/host/at91_mci.c13
-rw-r--r--drivers/mmc/host/atmel-mci.c41
-rw-r--r--drivers/mmc/host/au1xmmc.c2
-rw-r--r--drivers/mmc/host/bfin_sdh.c2
-rw-r--r--drivers/mmc/host/cb710-mmc.c2
-rw-r--r--drivers/mmc/host/davinci_mmc.c80
-rw-r--r--drivers/mmc/host/dw_mmc.c1859
-rw-r--r--drivers/mmc/host/dw_mmc.h168
-rw-r--r--drivers/mmc/host/jz4740_mmc.c5
-rw-r--r--drivers/mmc/host/mmc_spi.c6
-rw-r--r--drivers/mmc/host/mmci.c503
-rw-r--r--drivers/mmc/host/mmci.h23
-rw-r--r--drivers/mmc/host/msm_sdcc.c239
-rw-r--r--drivers/mmc/host/msm_sdcc.h9
-rw-r--r--drivers/mmc/host/mxcmmc.c234
-rw-r--r--drivers/mmc/host/mxs-mmc.c874
-rw-r--r--drivers/mmc/host/of_mmc_spi.c28
-rw-r--r--drivers/mmc/host/omap.c32
-rw-r--r--drivers/mmc/host/omap_hsmmc.c45
-rw-r--r--drivers/mmc/host/s3cmci.c6
-rw-r--r--drivers/mmc/host/sdhci-dove.c70
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c212
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h4
-rw-r--r--drivers/mmc/host/sdhci-of-core.c28
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c4
-rw-r--r--drivers/mmc/host/sdhci-pci.c180
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c6
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h4
-rw-r--r--drivers/mmc/host/sdhci-s3c.c105
-rw-r--r--drivers/mmc/host/sdhci-spear.c2
-rw-r--r--drivers/mmc/host/sdhci-tegra.c261
-rw-r--r--drivers/mmc/host/sdhci.c54
-rw-r--r--drivers/mmc/host/sdhci.h4
-rw-r--r--drivers/mmc/host/sdricoh_cs.c4
-rw-r--r--drivers/mmc/host/sh_mmcif.c433
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c171
-rw-r--r--drivers/mmc/host/tmio_mmc.c878
-rw-r--r--drivers/mmc/host/tmio_mmc.h221
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c317
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c897
-rw-r--r--drivers/mmc/host/ushc.c1
-rw-r--r--drivers/mmc/host/via-sdmmc.c8
-rw-r--r--drivers/mmc/host/wbsd.c6
67 files changed, 7642 insertions, 1738 deletions
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 57e4416b9ef0..3b1f783bf924 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -16,6 +16,7 @@ config MMC_BLOCK
16 16
17config MMC_BLOCK_MINORS 17config MMC_BLOCK_MINORS
18 int "Number of minors per block device" 18 int "Number of minors per block device"
19 depends on MMC_BLOCK
19 range 4 256 20 range 4 256
20 default 8 21 default 8
21 help 22 help
@@ -57,12 +58,11 @@ config SDIO_UART
57 58
58config MMC_TEST 59config MMC_TEST
59 tristate "MMC host test driver" 60 tristate "MMC host test driver"
60 default n
61 help 61 help
62 Development driver that performs a series of reads and writes 62 Development driver that performs a series of reads and writes
63 to a memory card in order to expose certain well known bugs 63 to a memory card in order to expose certain well known bugs
64 in host controllers. The tests are executed by writing to the 64 in host controllers. The tests are executed by writing to the
65 "test" file in sysfs under each card. Note that whatever is 65 "test" file in debugfs under each card. Note that whatever is
66 on your card will be overwritten by these tests. 66 on your card will be overwritten by these tests.
67 67
68 This driver is only of interest to those developing or 68 This driver is only of interest to those developing or
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 217f82037fc1..61d233a7c118 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -257,7 +257,7 @@ static u32 get_card_status(struct mmc_card *card, struct request *req)
257 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 257 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
258 err = mmc_wait_for_cmd(card->host, &cmd, 0); 258 err = mmc_wait_for_cmd(card->host, &cmd, 0);
259 if (err) 259 if (err)
260 printk(KERN_ERR "%s: error %d sending status comand", 260 printk(KERN_ERR "%s: error %d sending status command",
261 req->rq_disk->disk_name, err); 261 req->rq_disk->disk_name, err);
262 return cmd.resp[0]; 262 return cmd.resp[0];
263} 263}
@@ -621,6 +621,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
621 md->disk->private_data = md; 621 md->disk->private_data = md;
622 md->disk->queue = md->queue.queue; 622 md->disk->queue = md->queue.queue;
623 md->disk->driverfs_dev = &card->dev; 623 md->disk->driverfs_dev = &card->dev;
624 set_disk_ro(md->disk, md->read_only);
624 625
625 /* 626 /*
626 * As discussed on lkml, GENHD_FL_REMOVABLE should: 627 * As discussed on lkml, GENHD_FL_REMOVABLE should:
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 21adc27f4132..abc1a63bcc5e 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -88,6 +88,7 @@ struct mmc_test_area {
88 * @sectors: amount of sectors to check in one group 88 * @sectors: amount of sectors to check in one group
89 * @ts: time values of transfer 89 * @ts: time values of transfer
90 * @rate: calculated transfer rate 90 * @rate: calculated transfer rate
91 * @iops: I/O operations per second (times 100)
91 */ 92 */
92struct mmc_test_transfer_result { 93struct mmc_test_transfer_result {
93 struct list_head link; 94 struct list_head link;
@@ -95,6 +96,7 @@ struct mmc_test_transfer_result {
95 unsigned int sectors; 96 unsigned int sectors;
96 struct timespec ts; 97 struct timespec ts;
97 unsigned int rate; 98 unsigned int rate;
99 unsigned int iops;
98}; 100};
99 101
100/** 102/**
@@ -226,9 +228,10 @@ static int mmc_test_wait_busy(struct mmc_test_card *test)
226 228
227 if (!busy && mmc_test_busy(&cmd)) { 229 if (!busy && mmc_test_busy(&cmd)) {
228 busy = 1; 230 busy = 1;
229 printk(KERN_INFO "%s: Warning: Host did not " 231 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
230 "wait for busy state to end.\n", 232 printk(KERN_INFO "%s: Warning: Host did not "
231 mmc_hostname(test->card->host)); 233 "wait for busy state to end.\n",
234 mmc_hostname(test->card->host));
232 } 235 }
233 } while (mmc_test_busy(&cmd)); 236 } while (mmc_test_busy(&cmd));
234 237
@@ -289,7 +292,7 @@ static void mmc_test_free_mem(struct mmc_test_mem *mem)
289} 292}
290 293
291/* 294/*
292 * Allocate a lot of memory, preferrably max_sz but at least min_sz. In case 295 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
293 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do 296 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
294 * not exceed a maximum number of segments and try not to make segments much 297 * not exceed a maximum number of segments and try not to make segments much
295 * bigger than maximum segment size. 298 * bigger than maximum segment size.
@@ -494,7 +497,7 @@ static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
494 */ 497 */
495static void mmc_test_save_transfer_result(struct mmc_test_card *test, 498static void mmc_test_save_transfer_result(struct mmc_test_card *test,
496 unsigned int count, unsigned int sectors, struct timespec ts, 499 unsigned int count, unsigned int sectors, struct timespec ts,
497 unsigned int rate) 500 unsigned int rate, unsigned int iops)
498{ 501{
499 struct mmc_test_transfer_result *tr; 502 struct mmc_test_transfer_result *tr;
500 503
@@ -509,6 +512,7 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,
509 tr->sectors = sectors; 512 tr->sectors = sectors;
510 tr->ts = ts; 513 tr->ts = ts;
511 tr->rate = rate; 514 tr->rate = rate;
515 tr->iops = iops;
512 516
513 list_add_tail(&tr->link, &test->gr->tr_lst); 517 list_add_tail(&tr->link, &test->gr->tr_lst);
514} 518}
@@ -519,20 +523,22 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,
519static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes, 523static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
520 struct timespec *ts1, struct timespec *ts2) 524 struct timespec *ts1, struct timespec *ts2)
521{ 525{
522 unsigned int rate, sectors = bytes >> 9; 526 unsigned int rate, iops, sectors = bytes >> 9;
523 struct timespec ts; 527 struct timespec ts;
524 528
525 ts = timespec_sub(*ts2, *ts1); 529 ts = timespec_sub(*ts2, *ts1);
526 530
527 rate = mmc_test_rate(bytes, &ts); 531 rate = mmc_test_rate(bytes, &ts);
532 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
528 533
529 printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu " 534 printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
530 "seconds (%u kB/s, %u KiB/s)\n", 535 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
531 mmc_hostname(test->card->host), sectors, sectors >> 1, 536 mmc_hostname(test->card->host), sectors, sectors >> 1,
532 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec, 537 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
533 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024); 538 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
539 iops / 100, iops % 100);
534 540
535 mmc_test_save_transfer_result(test, 1, sectors, ts, rate); 541 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
536} 542}
537 543
538/* 544/*
@@ -542,22 +548,24 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
542 unsigned int count, struct timespec *ts1, 548 unsigned int count, struct timespec *ts1,
543 struct timespec *ts2) 549 struct timespec *ts2)
544{ 550{
545 unsigned int rate, sectors = bytes >> 9; 551 unsigned int rate, iops, sectors = bytes >> 9;
546 uint64_t tot = bytes * count; 552 uint64_t tot = bytes * count;
547 struct timespec ts; 553 struct timespec ts;
548 554
549 ts = timespec_sub(*ts2, *ts1); 555 ts = timespec_sub(*ts2, *ts1);
550 556
551 rate = mmc_test_rate(tot, &ts); 557 rate = mmc_test_rate(tot, &ts);
558 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
552 559
553 printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " 560 printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
554 "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n", 561 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
562 "%u.%02u IOPS)\n",
555 mmc_hostname(test->card->host), count, sectors, count, 563 mmc_hostname(test->card->host), count, sectors, count,
556 sectors >> 1, (sectors & 1 ? ".5" : ""), 564 sectors >> 1, (sectors & 1 ? ".5" : ""),
557 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec, 565 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
558 rate / 1000, rate / 1024); 566 rate / 1000, rate / 1024, iops / 100, iops % 100);
559 567
560 mmc_test_save_transfer_result(test, count, sectors, ts, rate); 568 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
561} 569}
562 570
563/* 571/*
@@ -1425,28 +1433,29 @@ static int mmc_test_area_cleanup(struct mmc_test_card *test)
1425} 1433}
1426 1434
1427/* 1435/*
1428 * Initialize an area for testing large transfers. The size of the area is the 1436 * Initialize an area for testing large transfers. The test area is set to the
1429 * preferred erase size which is a good size for optimal transfer speed. Note 1437 * middle of the card because cards may have different charateristics at the
1430 * that is typically 4MiB for modern cards. The test area is set to the middle 1438 * front (for FAT file system optimization). Optionally, the area is erased
1431 * of the card because cards may have different charateristics at the front 1439 * (if the card supports it) which may improve write performance. Optionally,
1432 * (for FAT file system optimization). Optionally, the area is erased (if the 1440 * the area is filled with data for subsequent read tests.
1433 * card supports it) which may improve write performance. Optionally, the area
1434 * is filled with data for subsequent read tests.
1435 */ 1441 */
1436static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill) 1442static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1437{ 1443{
1438 struct mmc_test_area *t = &test->area; 1444 struct mmc_test_area *t = &test->area;
1439 unsigned long min_sz = 64 * 1024; 1445 unsigned long min_sz = 64 * 1024, sz;
1440 int ret; 1446 int ret;
1441 1447
1442 ret = mmc_test_set_blksize(test, 512); 1448 ret = mmc_test_set_blksize(test, 512);
1443 if (ret) 1449 if (ret)
1444 return ret; 1450 return ret;
1445 1451
1446 if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9) 1452 /* Make the test area size about 4MiB */
1447 t->max_sz = TEST_AREA_MAX_SIZE; 1453 sz = (unsigned long)test->card->pref_erase << 9;
1448 else 1454 t->max_sz = sz;
1449 t->max_sz = (unsigned long)test->card->pref_erase << 9; 1455 while (t->max_sz < 4 * 1024 * 1024)
1456 t->max_sz += sz;
1457 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1458 t->max_sz -= sz;
1450 1459
1451 t->max_segs = test->card->host->max_segs; 1460 t->max_segs = test->card->host->max_segs;
1452 t->max_seg_sz = test->card->host->max_seg_size; 1461 t->max_seg_sz = test->card->host->max_seg_size;
@@ -1766,6 +1775,187 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1766 return 0; 1775 return 0;
1767} 1776}
1768 1777
1778static unsigned int rnd_next = 1;
1779
1780static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1781{
1782 uint64_t r;
1783
1784 rnd_next = rnd_next * 1103515245 + 12345;
1785 r = (rnd_next >> 16) & 0x7fff;
1786 return (r * rnd_cnt) >> 15;
1787}
1788
1789static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1790 unsigned long sz)
1791{
1792 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1793 unsigned int ssz;
1794 struct timespec ts1, ts2, ts;
1795 int ret;
1796
1797 ssz = sz >> 9;
1798
1799 rnd_addr = mmc_test_capacity(test->card) / 4;
1800 range1 = rnd_addr / test->card->pref_erase;
1801 range2 = range1 / ssz;
1802
1803 getnstimeofday(&ts1);
1804 for (cnt = 0; cnt < UINT_MAX; cnt++) {
1805 getnstimeofday(&ts2);
1806 ts = timespec_sub(ts2, ts1);
1807 if (ts.tv_sec >= 10)
1808 break;
1809 ea = mmc_test_rnd_num(range1);
1810 if (ea == last_ea)
1811 ea -= 1;
1812 last_ea = ea;
1813 dev_addr = rnd_addr + test->card->pref_erase * ea +
1814 ssz * mmc_test_rnd_num(range2);
1815 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1816 if (ret)
1817 return ret;
1818 }
1819 if (print)
1820 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1821 return 0;
1822}
1823
1824static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1825{
1826 unsigned int next;
1827 unsigned long sz;
1828 int ret;
1829
1830 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1831 /*
1832 * When writing, try to get more consistent results by running
1833 * the test twice with exactly the same I/O but outputting the
1834 * results only for the 2nd run.
1835 */
1836 if (write) {
1837 next = rnd_next;
1838 ret = mmc_test_rnd_perf(test, write, 0, sz);
1839 if (ret)
1840 return ret;
1841 rnd_next = next;
1842 }
1843 ret = mmc_test_rnd_perf(test, write, 1, sz);
1844 if (ret)
1845 return ret;
1846 }
1847 sz = test->area.max_tfr;
1848 if (write) {
1849 next = rnd_next;
1850 ret = mmc_test_rnd_perf(test, write, 0, sz);
1851 if (ret)
1852 return ret;
1853 rnd_next = next;
1854 }
1855 return mmc_test_rnd_perf(test, write, 1, sz);
1856}
1857
1858/*
1859 * Random read performance by transfer size.
1860 */
1861static int mmc_test_random_read_perf(struct mmc_test_card *test)
1862{
1863 return mmc_test_random_perf(test, 0);
1864}
1865
1866/*
1867 * Random write performance by transfer size.
1868 */
1869static int mmc_test_random_write_perf(struct mmc_test_card *test)
1870{
1871 return mmc_test_random_perf(test, 1);
1872}
1873
1874static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
1875 unsigned int tot_sz, int max_scatter)
1876{
1877 unsigned int dev_addr, i, cnt, sz, ssz;
1878 struct timespec ts1, ts2;
1879 int ret;
1880
1881 sz = test->area.max_tfr;
1882 /*
1883 * In the case of a maximally scattered transfer, the maximum transfer
1884 * size is further limited by using PAGE_SIZE segments.
1885 */
1886 if (max_scatter) {
1887 struct mmc_test_area *t = &test->area;
1888 unsigned long max_tfr;
1889
1890 if (t->max_seg_sz >= PAGE_SIZE)
1891 max_tfr = t->max_segs * PAGE_SIZE;
1892 else
1893 max_tfr = t->max_segs * t->max_seg_sz;
1894 if (sz > max_tfr)
1895 sz = max_tfr;
1896 }
1897
1898 ssz = sz >> 9;
1899 dev_addr = mmc_test_capacity(test->card) / 4;
1900 if (tot_sz > dev_addr << 9)
1901 tot_sz = dev_addr << 9;
1902 cnt = tot_sz / sz;
1903 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
1904
1905 getnstimeofday(&ts1);
1906 for (i = 0; i < cnt; i++) {
1907 ret = mmc_test_area_io(test, sz, dev_addr, write,
1908 max_scatter, 0);
1909 if (ret)
1910 return ret;
1911 dev_addr += ssz;
1912 }
1913 getnstimeofday(&ts2);
1914
1915 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1916
1917 return 0;
1918}
1919
1920static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
1921{
1922 int ret, i;
1923
1924 for (i = 0; i < 10; i++) {
1925 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
1926 if (ret)
1927 return ret;
1928 }
1929 for (i = 0; i < 5; i++) {
1930 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
1931 if (ret)
1932 return ret;
1933 }
1934 for (i = 0; i < 3; i++) {
1935 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
1936 if (ret)
1937 return ret;
1938 }
1939
1940 return ret;
1941}
1942
1943/*
1944 * Large sequential read performance.
1945 */
1946static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
1947{
1948 return mmc_test_large_seq_perf(test, 0);
1949}
1950
1951/*
1952 * Large sequential write performance.
1953 */
1954static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
1955{
1956 return mmc_test_large_seq_perf(test, 1);
1957}
1958
1769static const struct mmc_test_case mmc_test_cases[] = { 1959static const struct mmc_test_case mmc_test_cases[] = {
1770 { 1960 {
1771 .name = "Basic write (no data verification)", 1961 .name = "Basic write (no data verification)",
@@ -2005,6 +2195,34 @@ static const struct mmc_test_case mmc_test_cases[] = {
2005 .cleanup = mmc_test_area_cleanup, 2195 .cleanup = mmc_test_area_cleanup,
2006 }, 2196 },
2007 2197
2198 {
2199 .name = "Random read performance by transfer size",
2200 .prepare = mmc_test_area_prepare,
2201 .run = mmc_test_random_read_perf,
2202 .cleanup = mmc_test_area_cleanup,
2203 },
2204
2205 {
2206 .name = "Random write performance by transfer size",
2207 .prepare = mmc_test_area_prepare,
2208 .run = mmc_test_random_write_perf,
2209 .cleanup = mmc_test_area_cleanup,
2210 },
2211
2212 {
2213 .name = "Large sequential read into scattered pages",
2214 .prepare = mmc_test_area_prepare,
2215 .run = mmc_test_large_seq_read_perf,
2216 .cleanup = mmc_test_area_cleanup,
2217 },
2218
2219 {
2220 .name = "Large sequential write from scattered pages",
2221 .prepare = mmc_test_area_prepare,
2222 .run = mmc_test_large_seq_write_perf,
2223 .cleanup = mmc_test_area_cleanup,
2224 },
2225
2008}; 2226};
2009 2227
2010static DEFINE_MUTEX(mmc_test_lock); 2228static DEFINE_MUTEX(mmc_test_lock);
@@ -2148,11 +2366,11 @@ static int mtf_test_show(struct seq_file *sf, void *data)
2148 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result); 2366 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
2149 2367
2150 list_for_each_entry(tr, &gr->tr_lst, link) { 2368 list_for_each_entry(tr, &gr->tr_lst, link) {
2151 seq_printf(sf, "%u %d %lu.%09lu %u\n", 2369 seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
2152 tr->count, tr->sectors, 2370 tr->count, tr->sectors,
2153 (unsigned long)tr->ts.tv_sec, 2371 (unsigned long)tr->ts.tv_sec,
2154 (unsigned long)tr->ts.tv_nsec, 2372 (unsigned long)tr->ts.tv_nsec,
2155 tr->rate); 2373 tr->rate, tr->iops / 100, tr->iops % 100);
2156 } 2374 }
2157 } 2375 }
2158 2376
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 4e42d030e097..2ae727568df9 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -55,8 +55,7 @@ static int mmc_queue_thread(void *d)
55 55
56 spin_lock_irq(q->queue_lock); 56 spin_lock_irq(q->queue_lock);
57 set_current_state(TASK_INTERRUPTIBLE); 57 set_current_state(TASK_INTERRUPTIBLE);
58 if (!blk_queue_plugged(q)) 58 req = blk_fetch_request(q);
59 req = blk_fetch_request(q);
60 mq->req = req; 59 mq->req = req;
61 spin_unlock_irq(q->queue_lock); 60 spin_unlock_irq(q->queue_lock);
62 61
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index a0716967b7c8..c8c9edb3d7cb 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -956,7 +956,7 @@ static int sdio_uart_break_ctl(struct tty_struct *tty, int break_state)
956 return 0; 956 return 0;
957} 957}
958 958
959static int sdio_uart_tiocmget(struct tty_struct *tty, struct file *file) 959static int sdio_uart_tiocmget(struct tty_struct *tty)
960{ 960{
961 struct sdio_uart_port *port = tty->driver_data; 961 struct sdio_uart_port *port = tty->driver_data;
962 int result; 962 int result;
@@ -970,7 +970,7 @@ static int sdio_uart_tiocmget(struct tty_struct *tty, struct file *file)
970 return result; 970 return result;
971} 971}
972 972
973static int sdio_uart_tiocmset(struct tty_struct *tty, struct file *file, 973static int sdio_uart_tiocmset(struct tty_struct *tty,
974 unsigned int set, unsigned int clear) 974 unsigned int set, unsigned int clear)
975{ 975{
976 struct sdio_uart_port *port = tty->driver_data; 976 struct sdio_uart_port *port = tty->driver_data;
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index bb22ffd76ef8..ef103871517f 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -16,3 +16,14 @@ config MMC_UNSAFE_RESUME
16 16
17 This option sets a default which can be overridden by the 17 This option sets a default which can be overridden by the
18 module parameter "removable=0" or "removable=1". 18 module parameter "removable=0" or "removable=1".
19
20config MMC_CLKGATE
21 bool "MMC host clock gating (EXPERIMENTAL)"
22 depends on EXPERIMENTAL
23 help
24 This will attempt to aggressively gate the clock to the MMC card.
25 This is done to save power due to gating off the logic and bus
26 noise when the MMC card is not in use. Your host driver has to
27 support handling this in order for it to be of any use.
28
29 If unsure, say N.
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 86b479119332..639501970b41 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_MMC) += mmc_core.o
6mmc_core-y := core.o bus.o host.o \ 6mmc_core-y := core.o bus.o host.o \
7 mmc.o mmc_ops.o sd.o sd_ops.o \ 7 mmc.o mmc_ops.o sd.o sd_ops.o \
8 sdio.o sdio_ops.o sdio_bus.o \ 8 sdio.o sdio_ops.o sdio_bus.o \
9 sdio_cis.o sdio_io.o sdio_irq.o 9 sdio_cis.o sdio_io.o sdio_irq.o \
10 quirks.o
10 11
11mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o 12mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index af8dc6a2a317..d6d62fd07ee9 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -284,6 +284,7 @@ int mmc_add_card(struct mmc_card *card)
284 type = "SD-combo"; 284 type = "SD-combo";
285 if (mmc_card_blockaddr(card)) 285 if (mmc_card_blockaddr(card))
286 type = "SDHC-combo"; 286 type = "SDHC-combo";
287 break;
287 default: 288 default:
288 type = "?"; 289 type = "?";
289 break; 290 break;
@@ -303,14 +304,14 @@ int mmc_add_card(struct mmc_card *card)
303 type, card->rca); 304 type, card->rca);
304 } 305 }
305 306
306 ret = device_add(&card->dev);
307 if (ret)
308 return ret;
309
310#ifdef CONFIG_DEBUG_FS 307#ifdef CONFIG_DEBUG_FS
311 mmc_add_card_debugfs(card); 308 mmc_add_card_debugfs(card);
312#endif 309#endif
313 310
311 ret = device_add(&card->dev);
312 if (ret)
313 return ret;
314
314 mmc_card_set_present(card); 315 mmc_card_set_present(card);
315 316
316 return 0; 317 return 0;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 31ae07a36576..1f453acc8682 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -22,6 +22,7 @@
22#include <linux/scatterlist.h> 22#include <linux/scatterlist.h>
23#include <linux/log2.h> 23#include <linux/log2.h>
24#include <linux/regulator/consumer.h> 24#include <linux/regulator/consumer.h>
25#include <linux/pm_runtime.h>
25 26
26#include <linux/mmc/card.h> 27#include <linux/mmc/card.h>
27#include <linux/mmc/host.h> 28#include <linux/mmc/host.h>
@@ -130,6 +131,8 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
130 131
131 if (mrq->done) 132 if (mrq->done)
132 mrq->done(mrq); 133 mrq->done(mrq);
134
135 mmc_host_clk_gate(host);
133 } 136 }
134} 137}
135 138
@@ -164,8 +167,6 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
164 167
165 WARN_ON(!host->claimed); 168 WARN_ON(!host->claimed);
166 169
167 led_trigger_event(host->led, LED_FULL);
168
169 mrq->cmd->error = 0; 170 mrq->cmd->error = 0;
170 mrq->cmd->mrq = mrq; 171 mrq->cmd->mrq = mrq;
171 if (mrq->data) { 172 if (mrq->data) {
@@ -190,6 +191,8 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
190 mrq->stop->mrq = mrq; 191 mrq->stop->mrq = mrq;
191 } 192 }
192 } 193 }
194 mmc_host_clk_ungate(host);
195 led_trigger_event(host->led, LED_FULL);
193 host->ops->request(host, mrq); 196 host->ops->request(host, mrq);
194} 197}
195 198
@@ -295,8 +298,9 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
295 unsigned int timeout_us, limit_us; 298 unsigned int timeout_us, limit_us;
296 299
297 timeout_us = data->timeout_ns / 1000; 300 timeout_us = data->timeout_ns / 1000;
298 timeout_us += data->timeout_clks * 1000 / 301 if (mmc_host_clk_rate(card->host))
299 (card->host->ios.clock / 1000); 302 timeout_us += data->timeout_clks * 1000 /
303 (mmc_host_clk_rate(card->host) / 1000);
300 304
301 if (data->flags & MMC_DATA_WRITE) 305 if (data->flags & MMC_DATA_WRITE)
302 /* 306 /*
@@ -523,7 +527,14 @@ int mmc_try_claim_host(struct mmc_host *host)
523} 527}
524EXPORT_SYMBOL(mmc_try_claim_host); 528EXPORT_SYMBOL(mmc_try_claim_host);
525 529
526static void mmc_do_release_host(struct mmc_host *host) 530/**
531 * mmc_do_release_host - release a claimed host
532 * @host: mmc host to release
533 *
534 * If you successfully claimed a host, this function will
535 * release it again.
536 */
537void mmc_do_release_host(struct mmc_host *host)
527{ 538{
528 unsigned long flags; 539 unsigned long flags;
529 540
@@ -538,6 +549,7 @@ static void mmc_do_release_host(struct mmc_host *host)
538 wake_up(&host->wq); 549 wake_up(&host->wq);
539 } 550 }
540} 551}
552EXPORT_SYMBOL(mmc_do_release_host);
541 553
542void mmc_host_deeper_disable(struct work_struct *work) 554void mmc_host_deeper_disable(struct work_struct *work)
543{ 555{
@@ -614,6 +626,8 @@ static inline void mmc_set_ios(struct mmc_host *host)
614 ios->power_mode, ios->chip_select, ios->vdd, 626 ios->power_mode, ios->chip_select, ios->vdd,
615 ios->bus_width, ios->timing); 627 ios->bus_width, ios->timing);
616 628
629 if (ios->clock > 0)
630 mmc_set_ungated(host);
617 host->ops->set_ios(host, ios); 631 host->ops->set_ios(host, ios);
618} 632}
619 633
@@ -641,6 +655,61 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
641 mmc_set_ios(host); 655 mmc_set_ios(host);
642} 656}
643 657
658#ifdef CONFIG_MMC_CLKGATE
659/*
660 * This gates the clock by setting it to 0 Hz.
661 */
662void mmc_gate_clock(struct mmc_host *host)
663{
664 unsigned long flags;
665
666 spin_lock_irqsave(&host->clk_lock, flags);
667 host->clk_old = host->ios.clock;
668 host->ios.clock = 0;
669 host->clk_gated = true;
670 spin_unlock_irqrestore(&host->clk_lock, flags);
671 mmc_set_ios(host);
672}
673
674/*
675 * This restores the clock from gating by using the cached
676 * clock value.
677 */
678void mmc_ungate_clock(struct mmc_host *host)
679{
680 /*
681 * We should previously have gated the clock, so the clock shall
682 * be 0 here! The clock may however be 0 during initialization,
683 * when some request operations are performed before setting
684 * the frequency. When ungate is requested in that situation
685 * we just ignore the call.
686 */
687 if (host->clk_old) {
688 BUG_ON(host->ios.clock);
689 /* This call will also set host->clk_gated to false */
690 mmc_set_clock(host, host->clk_old);
691 }
692}
693
694void mmc_set_ungated(struct mmc_host *host)
695{
696 unsigned long flags;
697
698 /*
699 * We've been given a new frequency while the clock is gated,
700 * so make sure we regard this as ungating it.
701 */
702 spin_lock_irqsave(&host->clk_lock, flags);
703 host->clk_gated = false;
704 spin_unlock_irqrestore(&host->clk_lock, flags);
705}
706
707#else
708void mmc_set_ungated(struct mmc_host *host)
709{
710}
711#endif
712
644/* 713/*
645 * Change the bus mode (open drain/push-pull) of a host. 714 * Change the bus mode (open drain/push-pull) of a host.
646 */ 715 */
@@ -940,6 +1009,13 @@ static void mmc_power_off(struct mmc_host *host)
940{ 1009{
941 host->ios.clock = 0; 1010 host->ios.clock = 0;
942 host->ios.vdd = 0; 1011 host->ios.vdd = 0;
1012
1013 /*
1014 * Reset ocr mask to be the highest possible voltage supported for
1015 * this mmc host. This value will be used at next power up.
1016 */
1017 host->ocr = 1 << (fls(host->ocr_avail) - 1);
1018
943 if (!mmc_host_is_spi(host)) { 1019 if (!mmc_host_is_spi(host)) {
944 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1020 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
945 host->ios.chip_select = MMC_CS_DONTCARE; 1021 host->ios.chip_select = MMC_CS_DONTCARE;
@@ -1424,35 +1500,63 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1424} 1500}
1425EXPORT_SYMBOL(mmc_set_blocklen); 1501EXPORT_SYMBOL(mmc_set_blocklen);
1426 1502
1503static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
1504{
1505 host->f_init = freq;
1506
1507#ifdef CONFIG_MMC_DEBUG
1508 pr_info("%s: %s: trying to init card at %u Hz\n",
1509 mmc_hostname(host), __func__, host->f_init);
1510#endif
1511 mmc_power_up(host);
1512
1513 /*
1514 * sdio_reset sends CMD52 to reset card. Since we do not know
1515 * if the card is being re-initialized, just send it. CMD52
1516 * should be ignored by SD/eMMC cards.
1517 */
1518 sdio_reset(host);
1519 mmc_go_idle(host);
1520
1521 mmc_send_if_cond(host, host->ocr_avail);
1522
1523 /* Order's important: probe SDIO, then SD, then MMC */
1524 if (!mmc_attach_sdio(host))
1525 return 0;
1526 if (!mmc_attach_sd(host))
1527 return 0;
1528 if (!mmc_attach_mmc(host))
1529 return 0;
1530
1531 mmc_power_off(host);
1532 return -EIO;
1533}
1534
1427void mmc_rescan(struct work_struct *work) 1535void mmc_rescan(struct work_struct *work)
1428{ 1536{
1537 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
1429 struct mmc_host *host = 1538 struct mmc_host *host =
1430 container_of(work, struct mmc_host, detect.work); 1539 container_of(work, struct mmc_host, detect.work);
1431 u32 ocr;
1432 int err;
1433 unsigned long flags;
1434 int i; 1540 int i;
1435 const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
1436
1437 spin_lock_irqsave(&host->lock, flags);
1438 1541
1439 if (host->rescan_disable) { 1542 if (host->rescan_disable)
1440 spin_unlock_irqrestore(&host->lock, flags);
1441 return; 1543 return;
1442 }
1443
1444 spin_unlock_irqrestore(&host->lock, flags);
1445
1446 1544
1447 mmc_bus_get(host); 1545 mmc_bus_get(host);
1448 1546
1449 /* if there is a card registered, check whether it is still present */ 1547 /*
1450 if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) 1548 * if there is a _removable_ card registered, check whether it is
1549 * still present
1550 */
1551 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
1552 && !(host->caps & MMC_CAP_NONREMOVABLE))
1451 host->bus_ops->detect(host); 1553 host->bus_ops->detect(host);
1452 1554
1555 /*
1556 * Let mmc_bus_put() free the bus/bus_ops if we've found that
1557 * the card is no longer present.
1558 */
1453 mmc_bus_put(host); 1559 mmc_bus_put(host);
1454
1455
1456 mmc_bus_get(host); 1560 mmc_bus_get(host);
1457 1561
1458 /* if there still is a card present, stop here */ 1562 /* if there still is a card present, stop here */
@@ -1461,8 +1565,6 @@ void mmc_rescan(struct work_struct *work)
1461 goto out; 1565 goto out;
1462 } 1566 }
1463 1567
1464 /* detect a newly inserted card */
1465
1466 /* 1568 /*
1467 * Only we can add a new handler, so it's safe to 1569 * Only we can add a new handler, so it's safe to
1468 * release the lock here. 1570 * release the lock here.
@@ -1472,72 +1574,16 @@ void mmc_rescan(struct work_struct *work)
1472 if (host->ops->get_cd && host->ops->get_cd(host) == 0) 1574 if (host->ops->get_cd && host->ops->get_cd(host) == 0)
1473 goto out; 1575 goto out;
1474 1576
1577 mmc_claim_host(host);
1475 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 1578 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
1476 mmc_claim_host(host); 1579 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
1477 1580 break;
1478 if (freqs[i] >= host->f_min) 1581 if (freqs[i] < host->f_min)
1479 host->f_init = freqs[i]; 1582 break;
1480 else if (!i || freqs[i-1] > host->f_min)
1481 host->f_init = host->f_min;
1482 else {
1483 mmc_release_host(host);
1484 goto out;
1485 }
1486#ifdef CONFIG_MMC_DEBUG
1487 pr_info("%s: %s: trying to init card at %u Hz\n",
1488 mmc_hostname(host), __func__, host->f_init);
1489#endif
1490 mmc_power_up(host);
1491 sdio_reset(host);
1492 mmc_go_idle(host);
1493
1494 mmc_send_if_cond(host, host->ocr_avail);
1495
1496 /*
1497 * First we search for SDIO...
1498 */
1499 err = mmc_send_io_op_cond(host, 0, &ocr);
1500 if (!err) {
1501 if (mmc_attach_sdio(host, ocr)) {
1502 mmc_claim_host(host);
1503 /*
1504 * Try SDMEM (but not MMC) even if SDIO
1505 * is broken.
1506 */
1507 if (mmc_send_app_op_cond(host, 0, &ocr))
1508 goto out_fail;
1509
1510 if (mmc_attach_sd(host, ocr))
1511 mmc_power_off(host);
1512 }
1513 goto out;
1514 }
1515
1516 /*
1517 * ...then normal SD...
1518 */
1519 err = mmc_send_app_op_cond(host, 0, &ocr);
1520 if (!err) {
1521 if (mmc_attach_sd(host, ocr))
1522 mmc_power_off(host);
1523 goto out;
1524 }
1525
1526 /*
1527 * ...and finally MMC.
1528 */
1529 err = mmc_send_op_cond(host, 0, &ocr);
1530 if (!err) {
1531 if (mmc_attach_mmc(host, ocr))
1532 mmc_power_off(host);
1533 goto out;
1534 }
1535
1536out_fail:
1537 mmc_release_host(host);
1538 mmc_power_off(host);
1539 } 1583 }
1540out: 1584 mmc_release_host(host);
1585
1586 out:
1541 if (host->caps & MMC_CAP_NEEDS_POLL) 1587 if (host->caps & MMC_CAP_NEEDS_POLL)
1542 mmc_schedule_delayed_work(&host->detect, HZ); 1588 mmc_schedule_delayed_work(&host->detect, HZ);
1543} 1589}
@@ -1721,6 +1767,18 @@ int mmc_resume_host(struct mmc_host *host)
1721 if (!(host->pm_flags & MMC_PM_KEEP_POWER)) { 1767 if (!(host->pm_flags & MMC_PM_KEEP_POWER)) {
1722 mmc_power_up(host); 1768 mmc_power_up(host);
1723 mmc_select_voltage(host, host->ocr); 1769 mmc_select_voltage(host, host->ocr);
1770 /*
1771 * Tell runtime PM core we just powered up the card,
1772 * since it still believes the card is powered off.
1773 * Note that currently runtime PM is only enabled
1774 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
1775 */
1776 if (mmc_card_sdio(host->card) &&
1777 (host->caps & MMC_CAP_POWER_OFF_CARD)) {
1778 pm_runtime_disable(&host->card->dev);
1779 pm_runtime_set_active(&host->card->dev);
1780 pm_runtime_enable(&host->card->dev);
1781 }
1724 } 1782 }
1725 BUG_ON(!host->bus_ops->resume); 1783 BUG_ON(!host->bus_ops->resume);
1726 err = host->bus_ops->resume(host); 1784 err = host->bus_ops->resume(host);
@@ -1773,6 +1831,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
1773 1831
1774 case PM_POST_SUSPEND: 1832 case PM_POST_SUSPEND:
1775 case PM_POST_HIBERNATION: 1833 case PM_POST_HIBERNATION:
1834 case PM_POST_RESTORE:
1776 1835
1777 spin_lock_irqsave(&host->lock, flags); 1836 spin_lock_irqsave(&host->lock, flags);
1778 host->rescan_disable = 0; 1837 host->rescan_disable = 0;
@@ -1789,7 +1848,7 @@ static int __init mmc_init(void)
1789{ 1848{
1790 int ret; 1849 int ret;
1791 1850
1792 workqueue = create_singlethread_workqueue("kmmcd"); 1851 workqueue = alloc_ordered_workqueue("kmmcd", 0);
1793 if (!workqueue) 1852 if (!workqueue)
1794 return -ENOMEM; 1853 return -ENOMEM;
1795 1854
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 77240cd11bcf..20b1c0831eac 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -33,6 +33,9 @@ void mmc_init_erase(struct mmc_card *card);
33 33
34void mmc_set_chip_select(struct mmc_host *host, int mode); 34void mmc_set_chip_select(struct mmc_host *host, int mode);
35void mmc_set_clock(struct mmc_host *host, unsigned int hz); 35void mmc_set_clock(struct mmc_host *host, unsigned int hz);
36void mmc_gate_clock(struct mmc_host *host);
37void mmc_ungate_clock(struct mmc_host *host);
38void mmc_set_ungated(struct mmc_host *host);
36void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); 39void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
37void mmc_set_bus_width(struct mmc_host *host, unsigned int width); 40void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
38void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width, 41void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,
@@ -54,9 +57,11 @@ void mmc_rescan(struct work_struct *work);
54void mmc_start_host(struct mmc_host *host); 57void mmc_start_host(struct mmc_host *host);
55void mmc_stop_host(struct mmc_host *host); 58void mmc_stop_host(struct mmc_host *host);
56 59
57int mmc_attach_mmc(struct mmc_host *host, u32 ocr); 60int mmc_attach_mmc(struct mmc_host *host);
58int mmc_attach_sd(struct mmc_host *host, u32 ocr); 61int mmc_attach_sd(struct mmc_host *host);
59int mmc_attach_sdio(struct mmc_host *host, u32 ocr); 62int mmc_attach_sdio(struct mmc_host *host);
63
64void mmc_fixup_device(struct mmc_card *card);
60 65
61/* Module parameters */ 66/* Module parameters */
62extern int use_spi_crc; 67extern int use_spi_crc;
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index eed1405fd742..998797ed67a6 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -183,6 +183,11 @@ void mmc_add_host_debugfs(struct mmc_host *host)
183 &mmc_clock_fops)) 183 &mmc_clock_fops))
184 goto err_node; 184 goto err_node;
185 185
186#ifdef CONFIG_MMC_CLKGATE
187 if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
188 root, &host->clk_delay))
189 goto err_node;
190#endif
186 return; 191 return;
187 192
188err_node: 193err_node:
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 10b8af27e03a..2b200c1cfbba 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved. 4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 * Copyright (C) 2007-2008 Pierre Ossman 5 * Copyright (C) 2007-2008 Pierre Ossman
6 * Copyright (C) 2010 Linus Walleij
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -20,6 +21,7 @@
20#include <linux/suspend.h> 21#include <linux/suspend.h>
21 22
22#include <linux/mmc/host.h> 23#include <linux/mmc/host.h>
24#include <linux/mmc/card.h>
23 25
24#include "core.h" 26#include "core.h"
25#include "host.h" 27#include "host.h"
@@ -50,6 +52,201 @@ void mmc_unregister_host_class(void)
50static DEFINE_IDR(mmc_host_idr); 52static DEFINE_IDR(mmc_host_idr);
51static DEFINE_SPINLOCK(mmc_host_lock); 53static DEFINE_SPINLOCK(mmc_host_lock);
52 54
55#ifdef CONFIG_MMC_CLKGATE
56
57/*
58 * Enabling clock gating will make the core call out to the host
59 * once up and once down when it performs a request or card operation
60 * intermingled in any fashion. The driver will see this through
61 * set_ios() operations with ios.clock field set to 0 to gate (disable)
62 * the block clock, and to the old frequency to enable it again.
63 */
64static void mmc_host_clk_gate_delayed(struct mmc_host *host)
65{
66 unsigned long tick_ns;
67 unsigned long freq = host->ios.clock;
68 unsigned long flags;
69
70 if (!freq) {
71 pr_debug("%s: frequency set to 0 in disable function, "
72 "this means the clock is already disabled.\n",
73 mmc_hostname(host));
74 return;
75 }
76 /*
77 * New requests may have appeared while we were scheduling,
78 * then there is no reason to delay the check before
79 * clk_disable().
80 */
81 spin_lock_irqsave(&host->clk_lock, flags);
82
83 /*
84 * Delay n bus cycles (at least 8 from MMC spec) before attempting
85 * to disable the MCI block clock. The reference count may have
86 * gone up again after this delay due to rescheduling!
87 */
88 if (!host->clk_requests) {
89 spin_unlock_irqrestore(&host->clk_lock, flags);
90 tick_ns = DIV_ROUND_UP(1000000000, freq);
91 ndelay(host->clk_delay * tick_ns);
92 } else {
93 /* New users appeared while waiting for this work */
94 spin_unlock_irqrestore(&host->clk_lock, flags);
95 return;
96 }
97 mmc_claim_host(host);
98 spin_lock_irqsave(&host->clk_lock, flags);
99 if (!host->clk_requests) {
100 spin_unlock_irqrestore(&host->clk_lock, flags);
101 /* This will set host->ios.clock to 0 */
102 mmc_gate_clock(host);
103 spin_lock_irqsave(&host->clk_lock, flags);
104 pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
105 }
106 spin_unlock_irqrestore(&host->clk_lock, flags);
107 mmc_release_host(host);
108}
109
110/*
111 * Internal work. Work to disable the clock at some later point.
112 */
113static void mmc_host_clk_gate_work(struct work_struct *work)
114{
115 struct mmc_host *host = container_of(work, struct mmc_host,
116 clk_gate_work);
117
118 mmc_host_clk_gate_delayed(host);
119}
120
121/**
122 * mmc_host_clk_ungate - ungate hardware MCI clocks
123 * @host: host to ungate.
124 *
125 * Makes sure the host ios.clock is restored to a non-zero value
126 * past this call. Increase clock reference count and ungate clock
127 * if we're the first user.
128 */
129void mmc_host_clk_ungate(struct mmc_host *host)
130{
131 unsigned long flags;
132
133 mmc_claim_host(host);
134 spin_lock_irqsave(&host->clk_lock, flags);
135 if (host->clk_gated) {
136 spin_unlock_irqrestore(&host->clk_lock, flags);
137 mmc_ungate_clock(host);
138 spin_lock_irqsave(&host->clk_lock, flags);
139 pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
140 }
141 host->clk_requests++;
142 spin_unlock_irqrestore(&host->clk_lock, flags);
143 mmc_release_host(host);
144}
145
146/**
147 * mmc_host_may_gate_card - check if this card may be gated
148 * @card: card to check.
149 */
150static bool mmc_host_may_gate_card(struct mmc_card *card)
151{
152 /* If there is no card we may gate it */
153 if (!card)
154 return true;
155 /*
156 * Don't gate SDIO cards! These need to be clocked at all times
157 * since they may be independent systems generating interrupts
158 * and other events. The clock requests counter from the core will
159 * go down to zero since the core does not need it, but we will not
160 * gate the clock, because there is somebody out there that may still
161 * be using it.
162 */
163 return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING);
164}
165
166/**
167 * mmc_host_clk_gate - gate off hardware MCI clocks
168 * @host: host to gate.
169 *
170 * Calls the host driver with ios.clock set to zero as often as possible
171 * in order to gate off hardware MCI clocks. Decrease clock reference
172 * count and schedule disabling of clock.
173 */
174void mmc_host_clk_gate(struct mmc_host *host)
175{
176 unsigned long flags;
177
178 spin_lock_irqsave(&host->clk_lock, flags);
179 host->clk_requests--;
180 if (mmc_host_may_gate_card(host->card) &&
181 !host->clk_requests)
182 schedule_work(&host->clk_gate_work);
183 spin_unlock_irqrestore(&host->clk_lock, flags);
184}
185
186/**
187 * mmc_host_clk_rate - get current clock frequency setting
188 * @host: host to get the clock frequency for.
189 *
190 * Returns current clock frequency regardless of gating.
191 */
192unsigned int mmc_host_clk_rate(struct mmc_host *host)
193{
194 unsigned long freq;
195 unsigned long flags;
196
197 spin_lock_irqsave(&host->clk_lock, flags);
198 if (host->clk_gated)
199 freq = host->clk_old;
200 else
201 freq = host->ios.clock;
202 spin_unlock_irqrestore(&host->clk_lock, flags);
203 return freq;
204}
205
206/**
207 * mmc_host_clk_init - set up clock gating code
208 * @host: host with potential clock to control
209 */
210static inline void mmc_host_clk_init(struct mmc_host *host)
211{
212 host->clk_requests = 0;
213 /* Hold MCI clock for 8 cycles by default */
214 host->clk_delay = 8;
215 host->clk_gated = false;
216 INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
217 spin_lock_init(&host->clk_lock);
218}
219
220/**
221 * mmc_host_clk_exit - shut down clock gating code
222 * @host: host with potential clock to control
223 */
224static inline void mmc_host_clk_exit(struct mmc_host *host)
225{
226 /*
227 * Wait for any outstanding gate and then make sure we're
228 * ungated before exiting.
229 */
230 if (cancel_work_sync(&host->clk_gate_work))
231 mmc_host_clk_gate_delayed(host);
232 if (host->clk_gated)
233 mmc_host_clk_ungate(host);
234 /* There should be only one user now */
235 WARN_ON(host->clk_requests > 1);
236}
237
238#else
239
240static inline void mmc_host_clk_init(struct mmc_host *host)
241{
242}
243
244static inline void mmc_host_clk_exit(struct mmc_host *host)
245{
246}
247
248#endif
249
53/** 250/**
54 * mmc_alloc_host - initialise the per-host structure. 251 * mmc_alloc_host - initialise the per-host structure.
55 * @extra: sizeof private data structure 252 * @extra: sizeof private data structure
@@ -82,6 +279,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
82 host->class_dev.class = &mmc_host_class; 279 host->class_dev.class = &mmc_host_class;
83 device_initialize(&host->class_dev); 280 device_initialize(&host->class_dev);
84 281
282 mmc_host_clk_init(host);
283
85 spin_lock_init(&host->lock); 284 spin_lock_init(&host->lock);
86 init_waitqueue_head(&host->wq); 285 init_waitqueue_head(&host->wq);
87 INIT_DELAYED_WORK(&host->detect, mmc_rescan); 286 INIT_DELAYED_WORK(&host->detect, mmc_rescan);
@@ -163,6 +362,8 @@ void mmc_remove_host(struct mmc_host *host)
163 device_del(&host->class_dev); 362 device_del(&host->class_dev);
164 363
165 led_trigger_unregister_simple(host->led); 364 led_trigger_unregister_simple(host->led);
365
366 mmc_host_clk_exit(host);
166} 367}
167 368
168EXPORT_SYMBOL(mmc_remove_host); 369EXPORT_SYMBOL(mmc_remove_host);
@@ -183,4 +384,3 @@ void mmc_free_host(struct mmc_host *host)
183} 384}
184 385
185EXPORT_SYMBOL(mmc_free_host); 386EXPORT_SYMBOL(mmc_free_host);
186
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 8c87e1109a34..de199f911928 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -10,10 +10,31 @@
10 */ 10 */
11#ifndef _MMC_CORE_HOST_H 11#ifndef _MMC_CORE_HOST_H
12#define _MMC_CORE_HOST_H 12#define _MMC_CORE_HOST_H
13#include <linux/mmc/host.h>
13 14
14int mmc_register_host_class(void); 15int mmc_register_host_class(void);
15void mmc_unregister_host_class(void); 16void mmc_unregister_host_class(void);
16 17
18#ifdef CONFIG_MMC_CLKGATE
19void mmc_host_clk_ungate(struct mmc_host *host);
20void mmc_host_clk_gate(struct mmc_host *host);
21unsigned int mmc_host_clk_rate(struct mmc_host *host);
22
23#else
24static inline void mmc_host_clk_ungate(struct mmc_host *host)
25{
26}
27
28static inline void mmc_host_clk_gate(struct mmc_host *host)
29{
30}
31
32static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
33{
34 return host->ios.clock;
35}
36#endif
37
17void mmc_host_deeper_disable(struct work_struct *work); 38void mmc_host_deeper_disable(struct work_struct *work);
18 39
19#endif 40#endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 77f93c3b8808..772d0d0a541b 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -302,6 +302,44 @@ static int mmc_read_ext_csd(struct mmc_card *card)
302 } 302 }
303 303
304 if (card->ext_csd.rev >= 4) { 304 if (card->ext_csd.rev >= 4) {
305 /*
306 * Enhanced area feature support -- check whether the eMMC
307 * card has the Enhanced area enabled. If so, export enhanced
308 * area offset and size to user by adding sysfs interface.
309 */
310 if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
311 (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
312 u8 hc_erase_grp_sz =
313 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
314 u8 hc_wp_grp_sz =
315 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
316
317 card->ext_csd.enhanced_area_en = 1;
318 /*
319 * calculate the enhanced data area offset, in bytes
320 */
321 card->ext_csd.enhanced_area_offset =
322 (ext_csd[139] << 24) + (ext_csd[138] << 16) +
323 (ext_csd[137] << 8) + ext_csd[136];
324 if (mmc_card_blockaddr(card))
325 card->ext_csd.enhanced_area_offset <<= 9;
326 /*
327 * calculate the enhanced data area size, in kilobytes
328 */
329 card->ext_csd.enhanced_area_size =
330 (ext_csd[142] << 16) + (ext_csd[141] << 8) +
331 ext_csd[140];
332 card->ext_csd.enhanced_area_size *=
333 (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
334 card->ext_csd.enhanced_area_size <<= 9;
335 } else {
336 /*
337 * If the enhanced area is not enabled, disable these
338 * device attributes.
339 */
340 card->ext_csd.enhanced_area_offset = -EINVAL;
341 card->ext_csd.enhanced_area_size = -EINVAL;
342 }
305 card->ext_csd.sec_trim_mult = 343 card->ext_csd.sec_trim_mult =
306 ext_csd[EXT_CSD_SEC_TRIM_MULT]; 344 ext_csd[EXT_CSD_SEC_TRIM_MULT];
307 card->ext_csd.sec_erase_mult = 345 card->ext_csd.sec_erase_mult =
@@ -336,6 +374,9 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
336MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); 374MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
337MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); 375MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
338MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial); 376MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
377MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
378 card->ext_csd.enhanced_area_offset);
379MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
339 380
340static struct attribute *mmc_std_attrs[] = { 381static struct attribute *mmc_std_attrs[] = {
341 &dev_attr_cid.attr, 382 &dev_attr_cid.attr,
@@ -349,6 +390,8 @@ static struct attribute *mmc_std_attrs[] = {
349 &dev_attr_name.attr, 390 &dev_attr_name.attr,
350 &dev_attr_oemid.attr, 391 &dev_attr_oemid.attr,
351 &dev_attr_serial.attr, 392 &dev_attr_serial.attr,
393 &dev_attr_enhanced_area_offset.attr,
394 &dev_attr_enhanced_area_size.attr,
352 NULL, 395 NULL,
353}; 396};
354 397
@@ -378,6 +421,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
378 int err, ddr = 0; 421 int err, ddr = 0;
379 u32 cid[4]; 422 u32 cid[4];
380 unsigned int max_dtr; 423 unsigned int max_dtr;
424 u32 rocr;
381 425
382 BUG_ON(!host); 426 BUG_ON(!host);
383 WARN_ON(!host->claimed); 427 WARN_ON(!host->claimed);
@@ -391,7 +435,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
391 mmc_go_idle(host); 435 mmc_go_idle(host);
392 436
393 /* The extra bit indicates that we support high capacity */ 437 /* The extra bit indicates that we support high capacity */
394 err = mmc_send_op_cond(host, ocr | (1 << 30), NULL); 438 err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
395 if (err) 439 if (err)
396 goto err; 440 goto err;
397 441
@@ -479,11 +523,51 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
479 err = mmc_read_ext_csd(card); 523 err = mmc_read_ext_csd(card);
480 if (err) 524 if (err)
481 goto free_card; 525 goto free_card;
526
527 /* If doing byte addressing, check if required to do sector
528 * addressing. Handle the case of <2GB cards needing sector
529 * addressing. See section 8.1 JEDEC Standard JED84-A441;
530 * ocr register has bit 30 set for sector addressing.
531 */
532 if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
533 mmc_card_set_blockaddr(card);
534
482 /* Erase size depends on CSD and Extended CSD */ 535 /* Erase size depends on CSD and Extended CSD */
483 mmc_set_erase_size(card); 536 mmc_set_erase_size(card);
484 } 537 }
485 538
486 /* 539 /*
540 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
541 * bit. This bit will be lost every time after a reset or power off.
542 */
543 if (card->ext_csd.enhanced_area_en) {
544 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
545 EXT_CSD_ERASE_GROUP_DEF, 1);
546
547 if (err && err != -EBADMSG)
548 goto free_card;
549
550 if (err) {
551 err = 0;
552 /*
553 * Just disable enhanced area off & sz
554 * will try to enable ERASE_GROUP_DEF
555 * during next time reinit
556 */
557 card->ext_csd.enhanced_area_offset = -EINVAL;
558 card->ext_csd.enhanced_area_size = -EINVAL;
559 } else {
560 card->ext_csd.erase_group_def = 1;
561 /*
562 * enable ERASE_GRP_DEF successfully.
563 * This will affect the erase size, so
564 * here need to reset erase size
565 */
566 mmc_set_erase_size(card);
567 }
568 }
569
570 /*
487 * Activate high speed (if supported) 571 * Activate high speed (if supported)
488 */ 572 */
489 if ((card->ext_csd.hs_max_dtr != 0) && 573 if ((card->ext_csd.hs_max_dtr != 0) &&
@@ -534,39 +618,57 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
534 */ 618 */
535 if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) && 619 if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
536 (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { 620 (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
537 unsigned ext_csd_bit, bus_width; 621 static unsigned ext_csd_bits[][2] = {
538 622 { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
539 if (host->caps & MMC_CAP_8_BIT_DATA) { 623 { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 },
540 if (ddr) 624 { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 },
541 ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_8; 625 };
542 else 626 static unsigned bus_widths[] = {
543 ext_csd_bit = EXT_CSD_BUS_WIDTH_8; 627 MMC_BUS_WIDTH_8,
544 bus_width = MMC_BUS_WIDTH_8; 628 MMC_BUS_WIDTH_4,
545 } else { 629 MMC_BUS_WIDTH_1
546 if (ddr) 630 };
547 ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_4; 631 unsigned idx, bus_width = 0;
548 else 632
549 ext_csd_bit = EXT_CSD_BUS_WIDTH_4; 633 if (host->caps & MMC_CAP_8_BIT_DATA)
550 bus_width = MMC_BUS_WIDTH_4; 634 idx = 0;
635 else
636 idx = 1;
637 for (; idx < ARRAY_SIZE(bus_widths); idx++) {
638 bus_width = bus_widths[idx];
639 if (bus_width == MMC_BUS_WIDTH_1)
640 ddr = 0; /* no DDR for 1-bit width */
641 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
642 EXT_CSD_BUS_WIDTH,
643 ext_csd_bits[idx][0]);
644 if (!err) {
645 mmc_set_bus_width_ddr(card->host,
646 bus_width, MMC_SDR_MODE);
647 /*
648 * If controller can't handle bus width test,
649 * use the highest bus width to maintain
650 * compatibility with previous MMC behavior.
651 */
652 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
653 break;
654 err = mmc_bus_test(card, bus_width);
655 if (!err)
656 break;
657 }
551 } 658 }
552 659
553 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 660 if (!err && ddr) {
554 EXT_CSD_BUS_WIDTH, ext_csd_bit); 661 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
555 662 EXT_CSD_BUS_WIDTH,
556 if (err && err != -EBADMSG) 663 ext_csd_bits[idx][1]);
557 goto free_card; 664 }
558
559 if (err) { 665 if (err) {
560 printk(KERN_WARNING "%s: switch to bus width %d ddr %d " 666 printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
561 "failed\n", mmc_hostname(card->host), 667 "failed\n", mmc_hostname(card->host),
562 1 << bus_width, ddr); 668 1 << bus_width, ddr);
563 err = 0; 669 goto free_card;
564 } else { 670 } else if (ddr) {
565 if (ddr) 671 mmc_card_set_ddr_mode(card);
566 mmc_card_set_ddr_mode(card);
567 else
568 ddr = MMC_SDR_MODE;
569
570 mmc_set_bus_width_ddr(card->host, bus_width, ddr); 672 mmc_set_bus_width_ddr(card->host, bus_width, ddr);
571 } 673 }
572 } 674 }
@@ -737,14 +839,21 @@ static void mmc_attach_bus_ops(struct mmc_host *host)
737/* 839/*
738 * Starting point for MMC card init. 840 * Starting point for MMC card init.
739 */ 841 */
740int mmc_attach_mmc(struct mmc_host *host, u32 ocr) 842int mmc_attach_mmc(struct mmc_host *host)
741{ 843{
742 int err; 844 int err;
845 u32 ocr;
743 846
744 BUG_ON(!host); 847 BUG_ON(!host);
745 WARN_ON(!host->claimed); 848 WARN_ON(!host->claimed);
746 849
850 err = mmc_send_op_cond(host, 0, &ocr);
851 if (err)
852 return err;
853
747 mmc_attach_bus_ops(host); 854 mmc_attach_bus_ops(host);
855 if (host->ocr_avail_mmc)
856 host->ocr_avail = host->ocr_avail_mmc;
748 857
749 /* 858 /*
750 * We need to get OCR a different way for SPI. 859 * We need to get OCR a different way for SPI.
@@ -784,20 +893,20 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr)
784 goto err; 893 goto err;
785 894
786 mmc_release_host(host); 895 mmc_release_host(host);
787
788 err = mmc_add_card(host->card); 896 err = mmc_add_card(host->card);
897 mmc_claim_host(host);
789 if (err) 898 if (err)
790 goto remove_card; 899 goto remove_card;
791 900
792 return 0; 901 return 0;
793 902
794remove_card: 903remove_card:
904 mmc_release_host(host);
795 mmc_remove_card(host->card); 905 mmc_remove_card(host->card);
796 host->card = NULL;
797 mmc_claim_host(host); 906 mmc_claim_host(host);
907 host->card = NULL;
798err: 908err:
799 mmc_detach_bus(host); 909 mmc_detach_bus(host);
800 mmc_release_host(host);
801 910
802 printk(KERN_ERR "%s: error %d whilst initialising MMC card\n", 911 printk(KERN_ERR "%s: error %d whilst initialising MMC card\n",
803 mmc_hostname(host), err); 912 mmc_hostname(host), err);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 326447c9ede8..f3b22bf89cc9 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -105,7 +105,7 @@ int mmc_go_idle(struct mmc_host *host)
105 * that in case of hardware that won't pull up DAT3/nCS otherwise. 105 * that in case of hardware that won't pull up DAT3/nCS otherwise.
106 * 106 *
107 * SPI hosts ignore ios.chip_select; it's managed according to 107 * SPI hosts ignore ios.chip_select; it's managed according to
108 * rules that must accomodate non-MMC slaves which this layer 108 * rules that must accommodate non-MMC slaves which this layer
109 * won't even know about. 109 * won't even know about.
110 */ 110 */
111 if (!mmc_host_is_spi(host)) { 111 if (!mmc_host_is_spi(host)) {
@@ -462,3 +462,104 @@ int mmc_send_status(struct mmc_card *card, u32 *status)
462 return 0; 462 return 0;
463} 463}
464 464
465static int
466mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
467 u8 len)
468{
469 struct mmc_request mrq;
470 struct mmc_command cmd;
471 struct mmc_data data;
472 struct scatterlist sg;
473 u8 *data_buf;
474 u8 *test_buf;
475 int i, err;
476 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
477 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
478
479 /* dma onto stack is unsafe/nonportable, but callers to this
480 * routine normally provide temporary on-stack buffers ...
481 */
482 data_buf = kmalloc(len, GFP_KERNEL);
483 if (!data_buf)
484 return -ENOMEM;
485
486 if (len == 8)
487 test_buf = testdata_8bit;
488 else if (len == 4)
489 test_buf = testdata_4bit;
490 else {
491 printk(KERN_ERR "%s: Invalid bus_width %d\n",
492 mmc_hostname(host), len);
493 kfree(data_buf);
494 return -EINVAL;
495 }
496
497 if (opcode == MMC_BUS_TEST_W)
498 memcpy(data_buf, test_buf, len);
499
500 memset(&mrq, 0, sizeof(struct mmc_request));
501 memset(&cmd, 0, sizeof(struct mmc_command));
502 memset(&data, 0, sizeof(struct mmc_data));
503
504 mrq.cmd = &cmd;
505 mrq.data = &data;
506 cmd.opcode = opcode;
507 cmd.arg = 0;
508
509 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
510 * rely on callers to never use this with "native" calls for reading
511 * CSD or CID. Native versions of those commands use the R2 type,
512 * not R1 plus a data block.
513 */
514 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
515
516 data.blksz = len;
517 data.blocks = 1;
518 if (opcode == MMC_BUS_TEST_R)
519 data.flags = MMC_DATA_READ;
520 else
521 data.flags = MMC_DATA_WRITE;
522
523 data.sg = &sg;
524 data.sg_len = 1;
525 sg_init_one(&sg, data_buf, len);
526 mmc_wait_for_req(host, &mrq);
527 err = 0;
528 if (opcode == MMC_BUS_TEST_R) {
529 for (i = 0; i < len / 4; i++)
530 if ((test_buf[i] ^ data_buf[i]) != 0xff) {
531 err = -EIO;
532 break;
533 }
534 }
535 kfree(data_buf);
536
537 if (cmd.error)
538 return cmd.error;
539 if (data.error)
540 return data.error;
541
542 return err;
543}
544
545int mmc_bus_test(struct mmc_card *card, u8 bus_width)
546{
547 int err, width;
548
549 if (bus_width == MMC_BUS_WIDTH_8)
550 width = 8;
551 else if (bus_width == MMC_BUS_WIDTH_4)
552 width = 4;
553 else if (bus_width == MMC_BUS_WIDTH_1)
554 return 0; /* no need for test */
555 else
556 return -EINVAL;
557
558 /*
559 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
560 * is a problem. This improves chances that the test will work.
561 */
562 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
563 err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
564 return err;
565}
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 653eb8e84178..e6d44b8a18db 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -26,6 +26,7 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid);
26int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp); 26int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
27int mmc_spi_set_crc(struct mmc_host *host, int use_crc); 27int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
28int mmc_card_sleepawake(struct mmc_host *host, int sleep); 28int mmc_card_sleepawake(struct mmc_host *host, int sleep);
29int mmc_bus_test(struct mmc_card *card, u8 bus_width);
29 30
30#endif 31#endif
31 32
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
new file mode 100644
index 000000000000..11118b74eb20
--- /dev/null
+++ b/drivers/mmc/core/quirks.c
@@ -0,0 +1,84 @@
1/*
2 * This file contains work-arounds for many known sdio hardware
3 * bugs.
4 *
5 * Copyright (c) 2011 Pierre Tardy <tardyp@gmail.com>
6 * Inspired from pci fixup code:
7 * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
8 *
9 */
10
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/mmc/card.h>
14#include <linux/mod_devicetable.h>
15
16/*
17 * The world is not perfect and supplies us with broken mmc/sdio devices.
18 * For at least a part of these bugs we need a work-around
19 */
20
21struct mmc_fixup {
22 u16 vendor, device; /* You can use SDIO_ANY_ID here of course */
23 void (*vendor_fixup)(struct mmc_card *card, int data);
24 int data;
25};
26
27/*
28 * This hook just adds a quirk unconditionnally
29 */
30static void __maybe_unused add_quirk(struct mmc_card *card, int data)
31{
32 card->quirks |= data;
33}
34
35/*
36 * This hook just removes a quirk unconditionnally
37 */
38static void __maybe_unused remove_quirk(struct mmc_card *card, int data)
39{
40 card->quirks &= ~data;
41}
42
43/*
44 * This hook just adds a quirk for all sdio devices
45 */
46static void add_quirk_for_sdio_devices(struct mmc_card *card, int data)
47{
48 if (mmc_card_sdio(card))
49 card->quirks |= data;
50}
51
52#ifndef SDIO_VENDOR_ID_TI
53#define SDIO_VENDOR_ID_TI 0x0097
54#endif
55
56#ifndef SDIO_DEVICE_ID_TI_WL1271
57#define SDIO_DEVICE_ID_TI_WL1271 0x4076
58#endif
59
60static const struct mmc_fixup mmc_fixup_methods[] = {
61 /* by default sdio devices are considered CLK_GATING broken */
62 /* good cards will be whitelisted as they are tested */
63 { SDIO_ANY_ID, SDIO_ANY_ID,
64 add_quirk_for_sdio_devices, MMC_QUIRK_BROKEN_CLK_GATING },
65 { SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
66 remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING },
67 { 0 }
68};
69
70void mmc_fixup_device(struct mmc_card *card)
71{
72 const struct mmc_fixup *f;
73
74 for (f = mmc_fixup_methods; f->vendor_fixup; f++) {
75 if ((f->vendor == card->cis.vendor
76 || f->vendor == (u16) SDIO_ANY_ID) &&
77 (f->device == card->cis.device
78 || f->device == (u16) SDIO_ANY_ID)) {
79 dev_dbg(&card->dev, "calling %pF\n", f->vendor_fixup);
80 f->vendor_fixup(card, f->data);
81 }
82 }
83}
84EXPORT_SYMBOL(mmc_fixup_device);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 49da4dffd28e..6dac89fe0535 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -21,6 +21,7 @@
21#include "core.h" 21#include "core.h"
22#include "bus.h" 22#include "bus.h"
23#include "mmc_ops.h" 23#include "mmc_ops.h"
24#include "sd.h"
24#include "sd_ops.h" 25#include "sd_ops.h"
25 26
26static const unsigned int tran_exp[] = { 27static const unsigned int tran_exp[] = {
@@ -764,14 +765,21 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host)
764/* 765/*
765 * Starting point for SD card init. 766 * Starting point for SD card init.
766 */ 767 */
767int mmc_attach_sd(struct mmc_host *host, u32 ocr) 768int mmc_attach_sd(struct mmc_host *host)
768{ 769{
769 int err; 770 int err;
771 u32 ocr;
770 772
771 BUG_ON(!host); 773 BUG_ON(!host);
772 WARN_ON(!host->claimed); 774 WARN_ON(!host->claimed);
773 775
776 err = mmc_send_app_op_cond(host, 0, &ocr);
777 if (err)
778 return err;
779
774 mmc_sd_attach_bus_ops(host); 780 mmc_sd_attach_bus_ops(host);
781 if (host->ocr_avail_sd)
782 host->ocr_avail = host->ocr_avail_sd;
775 783
776 /* 784 /*
777 * We need to get OCR a different way for SPI. 785 * We need to get OCR a different way for SPI.
@@ -795,7 +803,8 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr)
795 ocr &= ~0x7F; 803 ocr &= ~0x7F;
796 } 804 }
797 805
798 if (ocr & MMC_VDD_165_195) { 806 if ((ocr & MMC_VDD_165_195) &&
807 !(host->ocr_avail_sd & MMC_VDD_165_195)) {
799 printk(KERN_WARNING "%s: SD card claims to support the " 808 printk(KERN_WARNING "%s: SD card claims to support the "
800 "incompletely defined 'low voltage range'. This " 809 "incompletely defined 'low voltage range'. This "
801 "will be ignored.\n", mmc_hostname(host)); 810 "will be ignored.\n", mmc_hostname(host));
@@ -820,20 +829,20 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr)
820 goto err; 829 goto err;
821 830
822 mmc_release_host(host); 831 mmc_release_host(host);
823
824 err = mmc_add_card(host->card); 832 err = mmc_add_card(host->card);
833 mmc_claim_host(host);
825 if (err) 834 if (err)
826 goto remove_card; 835 goto remove_card;
827 836
828 return 0; 837 return 0;
829 838
830remove_card: 839remove_card:
840 mmc_release_host(host);
831 mmc_remove_card(host->card); 841 mmc_remove_card(host->card);
832 host->card = NULL; 842 host->card = NULL;
833 mmc_claim_host(host); 843 mmc_claim_host(host);
834err: 844err:
835 mmc_detach_bus(host); 845 mmc_detach_bus(host);
836 mmc_release_host(host);
837 846
838 printk(KERN_ERR "%s: error %d whilst initialising SD card\n", 847 printk(KERN_ERR "%s: error %d whilst initialising SD card\n",
839 mmc_hostname(host), err); 848 mmc_hostname(host), err);
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 797cdb5887fd..76af349c14b4 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -9,6 +9,7 @@
9 * your option) any later version. 9 * your option) any later version.
10 */ 10 */
11 11
12#include <linux/slab.h>
12#include <linux/types.h> 13#include <linux/types.h>
13#include <linux/scatterlist.h> 14#include <linux/scatterlist.h>
14 15
@@ -252,6 +253,7 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
252 struct mmc_command cmd; 253 struct mmc_command cmd;
253 struct mmc_data data; 254 struct mmc_data data;
254 struct scatterlist sg; 255 struct scatterlist sg;
256 void *data_buf;
255 257
256 BUG_ON(!card); 258 BUG_ON(!card);
257 BUG_ON(!card->host); 259 BUG_ON(!card->host);
@@ -263,6 +265,13 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
263 if (err) 265 if (err)
264 return err; 266 return err;
265 267
268 /* dma onto stack is unsafe/nonportable, but callers to this
269 * routine normally provide temporary on-stack buffers ...
270 */
271 data_buf = kmalloc(sizeof(card->raw_scr), GFP_KERNEL);
272 if (data_buf == NULL)
273 return -ENOMEM;
274
266 memset(&mrq, 0, sizeof(struct mmc_request)); 275 memset(&mrq, 0, sizeof(struct mmc_request));
267 memset(&cmd, 0, sizeof(struct mmc_command)); 276 memset(&cmd, 0, sizeof(struct mmc_command));
268 memset(&data, 0, sizeof(struct mmc_data)); 277 memset(&data, 0, sizeof(struct mmc_data));
@@ -280,12 +289,15 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
280 data.sg = &sg; 289 data.sg = &sg;
281 data.sg_len = 1; 290 data.sg_len = 1;
282 291
283 sg_init_one(&sg, scr, 8); 292 sg_init_one(&sg, data_buf, 8);
284 293
285 mmc_set_data_timeout(&data, card); 294 mmc_set_data_timeout(&data, card);
286 295
287 mmc_wait_for_req(card->host, &mrq); 296 mmc_wait_for_req(card->host, &mrq);
288 297
298 memcpy(scr, data_buf, sizeof(card->raw_scr));
299 kfree(data_buf);
300
289 if (cmd.error) 301 if (cmd.error)
290 return cmd.error; 302 return cmd.error;
291 if (data.error) 303 if (data.error)
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index efef5f94ac42..db0f0b44d684 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -395,6 +395,14 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
395 if (err) 395 if (err)
396 goto remove; 396 goto remove;
397 397
398 /*
399 * Update oldcard with the new RCA received from the SDIO
400 * device -- we're doing this so that it's updated in the
401 * "card" struct when oldcard overwrites that later.
402 */
403 if (oldcard)
404 oldcard->rca = card->rca;
405
398 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); 406 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
399 } 407 }
400 408
@@ -458,6 +466,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
458 466
459 card = oldcard; 467 card = oldcard;
460 } 468 }
469 mmc_fixup_device(card);
461 470
462 if (card->type == MMC_TYPE_SD_COMBO) { 471 if (card->type == MMC_TYPE_SD_COMBO) {
463 err = mmc_sd_setup_card(host, card, oldcard != NULL); 472 err = mmc_sd_setup_card(host, card, oldcard != NULL);
@@ -627,15 +636,27 @@ static int mmc_sdio_suspend(struct mmc_host *host)
627 636
628static int mmc_sdio_resume(struct mmc_host *host) 637static int mmc_sdio_resume(struct mmc_host *host)
629{ 638{
630 int i, err; 639 int i, err = 0;
631 640
632 BUG_ON(!host); 641 BUG_ON(!host);
633 BUG_ON(!host->card); 642 BUG_ON(!host->card);
634 643
635 /* Basic card reinitialization. */ 644 /* Basic card reinitialization. */
636 mmc_claim_host(host); 645 mmc_claim_host(host);
637 err = mmc_sdio_init_card(host, host->ocr, host->card, 646
647 /* No need to reinitialize powered-resumed nonremovable cards */
648 if (mmc_card_is_removable(host) || !mmc_card_is_powered_resumed(host))
649 err = mmc_sdio_init_card(host, host->ocr, host->card,
638 (host->pm_flags & MMC_PM_KEEP_POWER)); 650 (host->pm_flags & MMC_PM_KEEP_POWER));
651 else if (mmc_card_is_powered_resumed(host)) {
652 /* We may have switched to 1-bit mode during suspend */
653 err = sdio_enable_4bit_bus(host->card);
654 if (err > 0) {
655 mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
656 err = 0;
657 }
658 }
659
639 if (!err && host->sdio_irqs) 660 if (!err && host->sdio_irqs)
640 mmc_signal_sdio_irq(host); 661 mmc_signal_sdio_irq(host);
641 mmc_release_host(host); 662 mmc_release_host(host);
@@ -690,16 +711,22 @@ static const struct mmc_bus_ops mmc_sdio_ops = {
690/* 711/*
691 * Starting point for SDIO card init. 712 * Starting point for SDIO card init.
692 */ 713 */
693int mmc_attach_sdio(struct mmc_host *host, u32 ocr) 714int mmc_attach_sdio(struct mmc_host *host)
694{ 715{
695 int err; 716 int err, i, funcs;
696 int i, funcs; 717 u32 ocr;
697 struct mmc_card *card; 718 struct mmc_card *card;
698 719
699 BUG_ON(!host); 720 BUG_ON(!host);
700 WARN_ON(!host->claimed); 721 WARN_ON(!host->claimed);
701 722
723 err = mmc_send_io_op_cond(host, 0, &ocr);
724 if (err)
725 return err;
726
702 mmc_attach_bus(host, &mmc_sdio_ops); 727 mmc_attach_bus(host, &mmc_sdio_ops);
728 if (host->ocr_avail_sdio)
729 host->ocr_avail = host->ocr_avail_sdio;
703 730
704 /* 731 /*
705 * Sanity check the voltages that the card claims to 732 * Sanity check the voltages that the card claims to
@@ -769,11 +796,10 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
769 pm_runtime_enable(&card->sdio_func[i]->dev); 796 pm_runtime_enable(&card->sdio_func[i]->dev);
770 } 797 }
771 798
772 mmc_release_host(host);
773
774 /* 799 /*
775 * First add the card to the driver model... 800 * First add the card to the driver model...
776 */ 801 */
802 mmc_release_host(host);
777 err = mmc_add_card(host->card); 803 err = mmc_add_card(host->card);
778 if (err) 804 if (err)
779 goto remove_added; 805 goto remove_added;
@@ -787,6 +813,7 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
787 goto remove_added; 813 goto remove_added;
788 } 814 }
789 815
816 mmc_claim_host(host);
790 return 0; 817 return 0;
791 818
792 819
@@ -796,11 +823,12 @@ remove_added:
796 mmc_claim_host(host); 823 mmc_claim_host(host);
797remove: 824remove:
798 /* And with lock if it hasn't been added. */ 825 /* And with lock if it hasn't been added. */
826 mmc_release_host(host);
799 if (host->card) 827 if (host->card)
800 mmc_sdio_remove(host); 828 mmc_sdio_remove(host);
829 mmc_claim_host(host);
801err: 830err:
802 mmc_detach_bus(host); 831 mmc_detach_bus(host);
803 mmc_release_host(host);
804 832
805 printk(KERN_ERR "%s: error %d whilst initialising SDIO card\n", 833 printk(KERN_ERR "%s: error %d whilst initialising SDIO card\n",
806 mmc_hostname(host), err); 834 mmc_hostname(host), err);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 203da443e339..d29b9c36919a 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -197,44 +197,12 @@ out:
197 197
198#ifdef CONFIG_PM_RUNTIME 198#ifdef CONFIG_PM_RUNTIME
199 199
200static int sdio_bus_pm_prepare(struct device *dev)
201{
202 struct sdio_func *func = dev_to_sdio_func(dev);
203
204 /*
205 * Resume an SDIO device which was suspended at run time at this
206 * point, in order to allow standard SDIO suspend/resume paths
207 * to keep working as usual.
208 *
209 * Ultimately, the SDIO driver itself will decide (in its
210 * suspend handler, or lack thereof) whether the card should be
211 * removed or kept, and if kept, at what power state.
212 *
213 * At this point, PM core have increased our use count, so it's
214 * safe to directly resume the device. After system is resumed
215 * again, PM core will drop back its runtime PM use count, and if
216 * needed device will be suspended again.
217 *
218 * The end result is guaranteed to be a power state that is
219 * coherent with the device's runtime PM use count.
220 *
221 * The return value of pm_runtime_resume is deliberately unchecked
222 * since there is little point in failing system suspend if a
223 * device can't be resumed.
224 */
225 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
226 pm_runtime_resume(dev);
227
228 return 0;
229}
230
231static const struct dev_pm_ops sdio_bus_pm_ops = { 200static const struct dev_pm_ops sdio_bus_pm_ops = {
232 SET_RUNTIME_PM_OPS( 201 SET_RUNTIME_PM_OPS(
233 pm_generic_runtime_suspend, 202 pm_generic_runtime_suspend,
234 pm_generic_runtime_resume, 203 pm_generic_runtime_resume,
235 pm_generic_runtime_idle 204 pm_generic_runtime_idle
236 ) 205 )
237 .prepare = sdio_bus_pm_prepare,
238}; 206};
239 207
240#define SDIO_PM_OPS_PTR (&sdio_bus_pm_ops) 208#define SDIO_PM_OPS_PTR (&sdio_bus_pm_ops)
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index bb192f90e8e9..b3001617e67d 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -45,7 +45,7 @@ static int process_sdio_pending_irqs(struct mmc_card *card)
45 struct sdio_func *func = card->sdio_func[i - 1]; 45 struct sdio_func *func = card->sdio_func[i - 1];
46 if (!func) { 46 if (!func) {
47 printk(KERN_WARNING "%s: pending IRQ for " 47 printk(KERN_WARNING "%s: pending IRQ for "
48 "non-existant function\n", 48 "non-existent function\n",
49 mmc_card_id(card)); 49 mmc_card_id(card));
50 ret = -EINVAL; 50 ret = -EINVAL;
51 } else if (func->irq_handler) { 51 } else if (func->irq_handler) {
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index d618e8673996..94df40531c38 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -83,7 +83,7 @@ config MMC_RICOH_MMC
83 83
84config MMC_SDHCI_OF 84config MMC_SDHCI_OF
85 tristate "SDHCI support on OpenFirmware platforms" 85 tristate "SDHCI support on OpenFirmware platforms"
86 depends on MMC_SDHCI && PPC_OF 86 depends on MMC_SDHCI && OF
87 help 87 help
88 This selects the OF support for Secure Digital Host Controller 88 This selects the OF support for Secure Digital Host Controller
89 Interfaces. 89 Interfaces.
@@ -93,6 +93,7 @@ config MMC_SDHCI_OF
93config MMC_SDHCI_OF_ESDHC 93config MMC_SDHCI_OF_ESDHC
94 bool "SDHCI OF support for the Freescale eSDHC controller" 94 bool "SDHCI OF support for the Freescale eSDHC controller"
95 depends on MMC_SDHCI_OF 95 depends on MMC_SDHCI_OF
96 depends on PPC_OF
96 select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER 97 select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
97 help 98 help
98 This selects the Freescale eSDHC controller support. 99 This selects the Freescale eSDHC controller support.
@@ -102,6 +103,7 @@ config MMC_SDHCI_OF_ESDHC
102config MMC_SDHCI_OF_HLWD 103config MMC_SDHCI_OF_HLWD
103 bool "SDHCI OF support for the Nintendo Wii SDHCI controllers" 104 bool "SDHCI OF support for the Nintendo Wii SDHCI controllers"
104 depends on MMC_SDHCI_OF 105 depends on MMC_SDHCI_OF
106 depends on PPC_OF
105 select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER 107 select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
106 help 108 help
107 This selects the Secure Digital Host Controller Interface (SDHCI) 109 This selects the Secure Digital Host Controller Interface (SDHCI)
@@ -140,6 +142,27 @@ config MMC_SDHCI_ESDHC_IMX
140 142
141 If unsure, say N. 143 If unsure, say N.
142 144
145config MMC_SDHCI_DOVE
146 bool "SDHCI support on Marvell's Dove SoC"
147 depends on ARCH_DOVE
148 depends on MMC_SDHCI_PLTFM
149 select MMC_SDHCI_IO_ACCESSORS
150 help
151 This selects the Secure Digital Host Controller Interface in
152 Marvell's Dove SoC.
153
154 If unsure, say N.
155
156config MMC_SDHCI_TEGRA
157 tristate "SDHCI platform support for the Tegra SD/MMC Controller"
158 depends on MMC_SDHCI_PLTFM && ARCH_TEGRA
159 select MMC_SDHCI_IO_ACCESSORS
160 help
161 This selects the Tegra SD/MMC controller. If you have a Tegra
162 platform with SD or MMC devices, say Y or M here.
163
164 If unsure, say N.
165
143config MMC_SDHCI_S3C 166config MMC_SDHCI_S3C
144 tristate "SDHCI support on Samsung S3C SoC" 167 tristate "SDHCI support on Samsung S3C SoC"
145 depends on MMC_SDHCI && PLAT_SAMSUNG 168 depends on MMC_SDHCI && PLAT_SAMSUNG
@@ -202,7 +225,7 @@ config MMC_OMAP
202 225
203config MMC_OMAP_HS 226config MMC_OMAP_HS
204 tristate "TI OMAP High Speed Multimedia Card Interface support" 227 tristate "TI OMAP High Speed Multimedia Card Interface support"
205 depends on ARCH_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4 228 depends on SOC_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4
206 help 229 help
207 This selects the TI OMAP High Speed Multimedia card Interface. 230 This selects the TI OMAP High Speed Multimedia card Interface.
208 If you have an OMAP2430 or OMAP3 board or OMAP4 board with a 231 If you have an OMAP2430 or OMAP3 board or OMAP4 board with a
@@ -288,7 +311,7 @@ config MMC_MSM
288 311
289config MMC_MXC 312config MMC_MXC
290 tristate "Freescale i.MX2/3 Multimedia Card Interface support" 313 tristate "Freescale i.MX2/3 Multimedia Card Interface support"
291 depends on ARCH_MXC 314 depends on MACH_MX21 || MACH_MX27 || ARCH_MX31
292 help 315 help
293 This selects the Freescale i.MX2/3 Multimedia card Interface. 316 This selects the Freescale i.MX2/3 Multimedia card Interface.
294 If you have a i.MX platform with a Multimedia Card slot, 317 If you have a i.MX platform with a Multimedia Card slot,
@@ -296,6 +319,15 @@ config MMC_MXC
296 319
297 If unsure, say N. 320 If unsure, say N.
298 321
322config MMC_MXS
323 tristate "Freescale MXS Multimedia Card Interface support"
324 depends on ARCH_MXS && MXS_DMA
325 help
326 This selects the Freescale SSP MMC controller found on MXS based
327 platforms like mx23/28.
328
329 If unsure, say N.
330
299config MMC_TIFM_SD 331config MMC_TIFM_SD
300 tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)" 332 tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)"
301 depends on EXPERIMENTAL && PCI 333 depends on EXPERIMENTAL && PCI
@@ -407,13 +439,25 @@ config MMC_SDRICOH_CS
407 To compile this driver as a module, choose M here: the 439 To compile this driver as a module, choose M here: the
408 module will be called sdricoh_cs. 440 module will be called sdricoh_cs.
409 441
442config MMC_TMIO_CORE
443 tristate
444
410config MMC_TMIO 445config MMC_TMIO
411 tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support" 446 tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support"
412 depends on MFD_TMIO || MFD_ASIC3 || MFD_SH_MOBILE_SDHI 447 depends on MFD_TMIO || MFD_ASIC3
448 select MMC_TMIO_CORE
413 help 449 help
414 This provides support for the SD/MMC cell found in TC6393XB, 450 This provides support for the SD/MMC cell found in TC6393XB,
415 T7L66XB and also HTC ASIC3 451 T7L66XB and also HTC ASIC3
416 452
453config MMC_SDHI
454 tristate "SH-Mobile SDHI SD/SDIO controller support"
455 depends on SUPERH || ARCH_SHMOBILE
456 select MMC_TMIO_CORE
457 help
458 This provides support for the SDHI SD/SDIO controller found in
459 SuperH and ARM SH-Mobile SoCs
460
417config MMC_CB710 461config MMC_CB710
418 tristate "ENE CB710 MMC/SD Interface support" 462 tristate "ENE CB710 MMC/SD Interface support"
419 depends on PCI 463 depends on PCI
@@ -458,11 +502,27 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
458 help 502 help
459 If you say yes here SD-Cards may work on the EZkit. 503 If you say yes here SD-Cards may work on the EZkit.
460 504
505config MMC_DW
506 tristate "Synopsys DesignWare Memory Card Interface"
507 depends on ARM
508 help
509 This selects support for the Synopsys DesignWare Mobile Storage IP
510 block, this provides host support for SD and MMC interfaces, in both
511 PIO and external DMA modes.
512
513config MMC_DW_IDMAC
514 bool "Internal DMAC interface"
515 depends on MMC_DW
516 help
517 This selects support for the internal DMAC block within the Synopsys
518 Designware Mobile Storage IP block. This disables the external DMA
519 interface.
520
461config MMC_SH_MMCIF 521config MMC_SH_MMCIF
462 tristate "SuperH Internal MMCIF support" 522 tristate "SuperH Internal MMCIF support"
463 depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE) 523 depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
464 help 524 help
465 This selects the MMC Host Interface controler (MMCIF). 525 This selects the MMC Host Interface controller (MMCIF).
466 526
467 This driver supports MMCIF in sh7724/sh7757/sh7372. 527 This driver supports MMCIF in sh7724/sh7757/sh7372.
468 528
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 7b645ff43b30..4f1df0aae574 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
6obj-$(CONFIG_MMC_PXA) += pxamci.o 6obj-$(CONFIG_MMC_PXA) += pxamci.o
7obj-$(CONFIG_MMC_IMX) += imxmmc.o 7obj-$(CONFIG_MMC_IMX) += imxmmc.o
8obj-$(CONFIG_MMC_MXC) += mxcmmc.o 8obj-$(CONFIG_MMC_MXC) += mxcmmc.o
9obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
9obj-$(CONFIG_MMC_SDHCI) += sdhci.o 10obj-$(CONFIG_MMC_SDHCI) += sdhci.o
10obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o 11obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
11obj-$(CONFIG_MMC_SDHCI_PXA) += sdhci-pxa.o 12obj-$(CONFIG_MMC_SDHCI_PXA) += sdhci-pxa.o
@@ -28,9 +29,16 @@ endif
28obj-$(CONFIG_MMC_S3C) += s3cmci.o 29obj-$(CONFIG_MMC_S3C) += s3cmci.o
29obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o 30obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
30obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o 31obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
31obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 32obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o
33tmio_mmc_core-y := tmio_mmc_pio.o
34ifneq ($(CONFIG_MMC_SDHI),n)
35tmio_mmc_core-y += tmio_mmc_dma.o
36endif
37obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o
38obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
32obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 39obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
33obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o 40obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
41obj-$(CONFIG_MMC_DW) += dw_mmc.o
34obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o 42obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
35obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o 43obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
36obj-$(CONFIG_MMC_USHC) += ushc.o 44obj-$(CONFIG_MMC_USHC) += ushc.o
@@ -39,6 +47,8 @@ obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o
39sdhci-platform-y := sdhci-pltfm.o 47sdhci-platform-y := sdhci-pltfm.o
40sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o 48sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
41sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o 49sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
50sdhci-platform-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
51sdhci-platform-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
42 52
43obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o 53obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
44sdhci-of-y := sdhci-of-core.o 54sdhci-of-y := sdhci-of-core.o
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 591ab540b407..d3e6a962f423 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -69,6 +69,7 @@
69#include <linux/highmem.h> 69#include <linux/highmem.h>
70 70
71#include <linux/mmc/host.h> 71#include <linux/mmc/host.h>
72#include <linux/mmc/sdio.h>
72 73
73#include <asm/io.h> 74#include <asm/io.h>
74#include <asm/irq.h> 75#include <asm/irq.h>
@@ -493,10 +494,14 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command
493 else if (data->flags & MMC_DATA_WRITE) 494 else if (data->flags & MMC_DATA_WRITE)
494 cmdr |= AT91_MCI_TRCMD_START; 495 cmdr |= AT91_MCI_TRCMD_START;
495 496
496 if (data->flags & MMC_DATA_STREAM) 497 if (cmd->opcode == SD_IO_RW_EXTENDED) {
497 cmdr |= AT91_MCI_TRTYP_STREAM; 498 cmdr |= AT91_MCI_TRTYP_SDIO_BLOCK;
498 if (data->blocks > 1) 499 } else {
499 cmdr |= AT91_MCI_TRTYP_MULTIPLE; 500 if (data->flags & MMC_DATA_STREAM)
501 cmdr |= AT91_MCI_TRTYP_STREAM;
502 if (data->blocks > 1)
503 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
504 }
500 } 505 }
501 else { 506 else {
502 block_length = 0; 507 block_length = 0;
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 301351a5d838..ea3888b65d5d 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -26,6 +26,7 @@
26#include <linux/stat.h> 26#include <linux/stat.h>
27 27
28#include <linux/mmc/host.h> 28#include <linux/mmc/host.h>
29#include <linux/mmc/sdio.h>
29 30
30#include <mach/atmel-mci.h> 31#include <mach/atmel-mci.h>
31#include <linux/atmel-mci.h> 32#include <linux/atmel-mci.h>
@@ -126,7 +127,7 @@ struct atmel_mci_dma {
126 * EVENT_DATA_COMPLETE is set in @pending_events, all data-related 127 * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
127 * interrupts must be disabled and @data_status updated with a 128 * interrupts must be disabled and @data_status updated with a
128 * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the 129 * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
129 * CMDRDY interupt must be disabled and @cmd_status updated with a 130 * CMDRDY interrupt must be disabled and @cmd_status updated with a
130 * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the 131 * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
131 * bytes_xfered field of @data must be written. This is ensured by 132 * bytes_xfered field of @data must be written. This is ensured by
132 * using barriers. 133 * using barriers.
@@ -532,12 +533,17 @@ static u32 atmci_prepare_command(struct mmc_host *mmc,
532 data = cmd->data; 533 data = cmd->data;
533 if (data) { 534 if (data) {
534 cmdr |= MCI_CMDR_START_XFER; 535 cmdr |= MCI_CMDR_START_XFER;
535 if (data->flags & MMC_DATA_STREAM) 536
536 cmdr |= MCI_CMDR_STREAM; 537 if (cmd->opcode == SD_IO_RW_EXTENDED) {
537 else if (data->blocks > 1) 538 cmdr |= MCI_CMDR_SDIO_BLOCK;
538 cmdr |= MCI_CMDR_MULTI_BLOCK; 539 } else {
539 else 540 if (data->flags & MMC_DATA_STREAM)
540 cmdr |= MCI_CMDR_BLOCK; 541 cmdr |= MCI_CMDR_STREAM;
542 else if (data->blocks > 1)
543 cmdr |= MCI_CMDR_MULTI_BLOCK;
544 else
545 cmdr |= MCI_CMDR_BLOCK;
546 }
541 547
542 if (data->flags & MMC_DATA_READ) 548 if (data->flags & MMC_DATA_READ)
543 cmdr |= MCI_CMDR_TRDIR_READ; 549 cmdr |= MCI_CMDR_TRDIR_READ;
@@ -572,7 +578,8 @@ static void atmci_dma_cleanup(struct atmel_mci *host)
572 struct mmc_data *data = host->data; 578 struct mmc_data *data = host->data;
573 579
574 if (data) 580 if (data)
575 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len, 581 dma_unmap_sg(host->dma.chan->device->dev,
582 data->sg, data->sg_len,
576 ((data->flags & MMC_DATA_WRITE) 583 ((data->flags & MMC_DATA_WRITE)
577 ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); 584 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
578} 585}
@@ -582,7 +589,7 @@ static void atmci_stop_dma(struct atmel_mci *host)
582 struct dma_chan *chan = host->data_chan; 589 struct dma_chan *chan = host->data_chan;
583 590
584 if (chan) { 591 if (chan) {
585 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); 592 dmaengine_terminate_all(chan);
586 atmci_dma_cleanup(host); 593 atmci_dma_cleanup(host);
587 } else { 594 } else {
588 /* Data transfer was stopped by the interrupt handler */ 595 /* Data transfer was stopped by the interrupt handler */
@@ -678,11 +685,11 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
678 else 685 else
679 direction = DMA_TO_DEVICE; 686 direction = DMA_TO_DEVICE;
680 687
681 sglen = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, direction); 688 sglen = dma_map_sg(chan->device->dev, data->sg,
682 if (sglen != data->sg_len) 689 data->sg_len, direction);
683 goto unmap_exit; 690
684 desc = chan->device->device_prep_slave_sg(chan, 691 desc = chan->device->device_prep_slave_sg(chan,
685 data->sg, data->sg_len, direction, 692 data->sg, sglen, direction,
686 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 693 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
687 if (!desc) 694 if (!desc)
688 goto unmap_exit; 695 goto unmap_exit;
@@ -693,7 +700,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
693 700
694 return 0; 701 return 0;
695unmap_exit: 702unmap_exit:
696 dma_unmap_sg(&host->pdev->dev, data->sg, sglen, direction); 703 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
697 return -ENOMEM; 704 return -ENOMEM;
698} 705}
699 706
@@ -703,8 +710,8 @@ static void atmci_submit_data(struct atmel_mci *host)
703 struct dma_async_tx_descriptor *desc = host->dma.data_desc; 710 struct dma_async_tx_descriptor *desc = host->dma.data_desc;
704 711
705 if (chan) { 712 if (chan) {
706 desc->tx_submit(desc); 713 dmaengine_submit(desc);
707 chan->device->device_issue_pending(chan); 714 dma_async_issue_pending(chan);
708 } 715 }
709} 716}
710 717
@@ -1075,7 +1082,7 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1075 /* 1082 /*
1076 * Update the MMC clock rate if necessary. This may be 1083 * Update the MMC clock rate if necessary. This may be
1077 * necessary if set_ios() is called when a different slot is 1084 * necessary if set_ios() is called when a different slot is
1078 * busy transfering data. 1085 * busy transferring data.
1079 */ 1086 */
1080 if (host->need_clock_update) { 1087 if (host->need_clock_update) {
1081 mci_writel(host, MR, host->mode_reg); 1088 mci_writel(host, MR, host->mode_reg);
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 41e5a60493ad..ef72e874ca36 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -192,7 +192,7 @@ static inline void SEND_STOP(struct au1xmmc_host *host)
192 au_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host)); 192 au_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host));
193 au_sync(); 193 au_sync();
194 194
195 /* Send the stop commmand */ 195 /* Send the stop command */
196 au_writel(STOP_CMD, HOST_CMD(host)); 196 au_writel(STOP_CMD, HOST_CMD(host));
197} 197}
198 198
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index bac7d62866b7..0371bf502249 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -462,7 +462,7 @@ static int __devinit sdh_probe(struct platform_device *pdev)
462 goto out; 462 goto out;
463 } 463 }
464 464
465 mmc = mmc_alloc_host(sizeof(*mmc), &pdev->dev); 465 mmc = mmc_alloc_host(sizeof(struct sdh_host), &pdev->dev);
466 if (!mmc) { 466 if (!mmc) {
467 ret = -ENOMEM; 467 ret = -ENOMEM;
468 goto out; 468 goto out;
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 66b4ce587f4b..ce2a47b71dd6 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -205,7 +205,7 @@ static int cb710_wait_while_busy(struct cb710_slot *slot, uint8_t mask)
205 "WAIT12: waited %d loops, mask %02X, entry val %08X, exit val %08X\n", 205 "WAIT12: waited %d loops, mask %02X, entry val %08X, exit val %08X\n",
206 limit, mask, e, x); 206 limit, mask, e, x);
207#endif 207#endif
208 return 0; 208 return err;
209} 209}
210 210
211static void cb710_mmc_set_transfer_size(struct cb710_slot *slot, 211static void cb710_mmc_set_transfer_size(struct cb710_slot *slot,
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index e15547cf701f..0076c7448fe6 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -66,8 +66,8 @@
66#define DAVINCI_MMCBLNC 0x60 66#define DAVINCI_MMCBLNC 0x60
67#define DAVINCI_SDIOCTL 0x64 67#define DAVINCI_SDIOCTL 0x64
68#define DAVINCI_SDIOST0 0x68 68#define DAVINCI_SDIOST0 0x68
69#define DAVINCI_SDIOEN 0x6C 69#define DAVINCI_SDIOIEN 0x6C
70#define DAVINCI_SDIOST 0x70 70#define DAVINCI_SDIOIST 0x70
71#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */ 71#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */
72 72
73/* DAVINCI_MMCCTL definitions */ 73/* DAVINCI_MMCCTL definitions */
@@ -131,6 +131,14 @@
131#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */ 131#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */
132#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */ 132#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */
133 133
134/* DAVINCI_SDIOST0 definitions */
135#define SDIOST0_DAT1_HI BIT(0)
136
137/* DAVINCI_SDIOIEN definitions */
138#define SDIOIEN_IOINTEN BIT(0)
139
140/* DAVINCI_SDIOIST definitions */
141#define SDIOIST_IOINT BIT(0)
134 142
135/* MMCSD Init clock in Hz in opendrain mode */ 143/* MMCSD Init clock in Hz in opendrain mode */
136#define MMCSD_INIT_CLOCK 200000 144#define MMCSD_INIT_CLOCK 200000
@@ -164,7 +172,7 @@ struct mmc_davinci_host {
164 unsigned int mmc_input_clk; 172 unsigned int mmc_input_clk;
165 void __iomem *base; 173 void __iomem *base;
166 struct resource *mem_res; 174 struct resource *mem_res;
167 int irq; 175 int mmc_irq, sdio_irq;
168 unsigned char bus_mode; 176 unsigned char bus_mode;
169 177
170#define DAVINCI_MMC_DATADIR_NONE 0 178#define DAVINCI_MMC_DATADIR_NONE 0
@@ -184,6 +192,7 @@ struct mmc_davinci_host {
184 u32 rxdma, txdma; 192 u32 rxdma, txdma;
185 bool use_dma; 193 bool use_dma;
186 bool do_dma; 194 bool do_dma;
195 bool sdio_int;
187 196
188 /* Scatterlist DMA uses one or more parameter RAM entries: 197 /* Scatterlist DMA uses one or more parameter RAM entries:
189 * the main one (associated with rxdma or txdma) plus zero or 198 * the main one (associated with rxdma or txdma) plus zero or
@@ -480,7 +489,7 @@ static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
480 struct scatterlist *sg; 489 struct scatterlist *sg;
481 unsigned sg_len; 490 unsigned sg_len;
482 unsigned bytes_left = host->bytes_left; 491 unsigned bytes_left = host->bytes_left;
483 const unsigned shift = ffs(rw_threshold) - 1;; 492 const unsigned shift = ffs(rw_threshold) - 1;
484 493
485 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 494 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
486 template = &host->tx_template; 495 template = &host->tx_template;
@@ -866,6 +875,19 @@ mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
866{ 875{
867 host->data = NULL; 876 host->data = NULL;
868 877
878 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) {
879 /*
880 * SDIO Interrupt Detection work-around as suggested by
881 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata
882 * 2.1.6): Signal SDIO interrupt only if it is enabled by core
883 */
884 if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) &
885 SDIOST0_DAT1_HI)) {
886 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
887 mmc_signal_sdio_irq(host->mmc);
888 }
889 }
890
869 if (host->do_dma) { 891 if (host->do_dma) {
870 davinci_abort_dma(host); 892 davinci_abort_dma(host);
871 893
@@ -932,6 +954,21 @@ davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
932 mmc_davinci_reset_ctrl(host, 0); 954 mmc_davinci_reset_ctrl(host, 0);
933} 955}
934 956
957static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id)
958{
959 struct mmc_davinci_host *host = dev_id;
960 unsigned int status;
961
962 status = readl(host->base + DAVINCI_SDIOIST);
963 if (status & SDIOIST_IOINT) {
964 dev_dbg(mmc_dev(host->mmc),
965 "SDIO interrupt status %x\n", status);
966 writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
967 mmc_signal_sdio_irq(host->mmc);
968 }
969 return IRQ_HANDLED;
970}
971
935static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) 972static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
936{ 973{
937 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; 974 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id;
@@ -1076,11 +1113,32 @@ static int mmc_davinci_get_ro(struct mmc_host *mmc)
1076 return config->get_ro(pdev->id); 1113 return config->get_ro(pdev->id);
1077} 1114}
1078 1115
1116static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1117{
1118 struct mmc_davinci_host *host = mmc_priv(mmc);
1119
1120 if (enable) {
1121 if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) {
1122 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
1123 mmc_signal_sdio_irq(host->mmc);
1124 } else {
1125 host->sdio_int = true;
1126 writel(readl(host->base + DAVINCI_SDIOIEN) |
1127 SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN);
1128 }
1129 } else {
1130 host->sdio_int = false;
1131 writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN,
1132 host->base + DAVINCI_SDIOIEN);
1133 }
1134}
1135
1079static struct mmc_host_ops mmc_davinci_ops = { 1136static struct mmc_host_ops mmc_davinci_ops = {
1080 .request = mmc_davinci_request, 1137 .request = mmc_davinci_request,
1081 .set_ios = mmc_davinci_set_ios, 1138 .set_ios = mmc_davinci_set_ios,
1082 .get_cd = mmc_davinci_get_cd, 1139 .get_cd = mmc_davinci_get_cd,
1083 .get_ro = mmc_davinci_get_ro, 1140 .get_ro = mmc_davinci_get_ro,
1141 .enable_sdio_irq = mmc_davinci_enable_sdio_irq,
1084}; 1142};
1085 1143
1086/*----------------------------------------------------------------------*/ 1144/*----------------------------------------------------------------------*/
@@ -1209,7 +1267,8 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1209 host->nr_sg = MAX_NR_SG; 1267 host->nr_sg = MAX_NR_SG;
1210 1268
1211 host->use_dma = use_dma; 1269 host->use_dma = use_dma;
1212 host->irq = irq; 1270 host->mmc_irq = irq;
1271 host->sdio_irq = platform_get_irq(pdev, 1);
1213 1272
1214 if (host->use_dma && davinci_acquire_dma_channels(host) != 0) 1273 if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
1215 host->use_dma = 0; 1274 host->use_dma = 0;
@@ -1270,6 +1329,13 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1270 if (ret) 1329 if (ret)
1271 goto out; 1330 goto out;
1272 1331
1332 if (host->sdio_irq >= 0) {
1333 ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0,
1334 mmc_hostname(mmc), host);
1335 if (!ret)
1336 mmc->caps |= MMC_CAP_SDIO_IRQ;
1337 }
1338
1273 rename_region(mem, mmc_hostname(mmc)); 1339 rename_region(mem, mmc_hostname(mmc));
1274 1340
1275 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", 1341 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
@@ -1313,7 +1379,9 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
1313 mmc_davinci_cpufreq_deregister(host); 1379 mmc_davinci_cpufreq_deregister(host);
1314 1380
1315 mmc_remove_host(host->mmc); 1381 mmc_remove_host(host->mmc);
1316 free_irq(host->irq, host); 1382 free_irq(host->mmc_irq, host);
1383 if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
1384 free_irq(host->sdio_irq, host);
1317 1385
1318 davinci_release_dma_channels(host); 1386 davinci_release_dma_channels(host);
1319 1387
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
new file mode 100644
index 000000000000..87e1f57ec9ba
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc.c
@@ -0,0 +1,1859 @@
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/scatterlist.h>
26#include <linux/seq_file.h>
27#include <linux/slab.h>
28#include <linux/stat.h>
29#include <linux/delay.h>
30#include <linux/irq.h>
31#include <linux/mmc/host.h>
32#include <linux/mmc/mmc.h>
33#include <linux/mmc/dw_mmc.h>
34#include <linux/bitops.h>
35#include <linux/regulator/consumer.h>
36
37#include "dw_mmc.h"
38
39/* Common flag combinations */
40#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \
41 SDMMC_INT_HTO | SDMMC_INT_SBE | \
42 SDMMC_INT_EBE)
43#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
44 SDMMC_INT_RESP_ERR)
45#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
46 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
47#define DW_MCI_SEND_STATUS 1
48#define DW_MCI_RECV_STATUS 2
49#define DW_MCI_DMA_THRESHOLD 16
50
51#ifdef CONFIG_MMC_DW_IDMAC
52struct idmac_desc {
53 u32 des0; /* Control Descriptor */
54#define IDMAC_DES0_DIC BIT(1)
55#define IDMAC_DES0_LD BIT(2)
56#define IDMAC_DES0_FD BIT(3)
57#define IDMAC_DES0_CH BIT(4)
58#define IDMAC_DES0_ER BIT(5)
59#define IDMAC_DES0_CES BIT(30)
60#define IDMAC_DES0_OWN BIT(31)
61
62 u32 des1; /* Buffer sizes */
63#define IDMAC_SET_BUFFER1_SIZE(d, s) \
64 ((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff))
65
66 u32 des2; /* buffer 1 physical address */
67
68 u32 des3; /* buffer 2 physical address */
69};
70#endif /* CONFIG_MMC_DW_IDMAC */
71
72/**
73 * struct dw_mci_slot - MMC slot state
74 * @mmc: The mmc_host representing this slot.
75 * @host: The MMC controller this slot is using.
76 * @ctype: Card type for this slot.
77 * @mrq: mmc_request currently being processed or waiting to be
78 * processed, or NULL when the slot is idle.
79 * @queue_node: List node for placing this node in the @queue list of
80 * &struct dw_mci.
81 * @clock: Clock rate configured by set_ios(). Protected by host->lock.
82 * @flags: Random state bits associated with the slot.
83 * @id: Number of this slot.
84 * @last_detect_state: Most recently observed card detect state.
85 */
86struct dw_mci_slot {
87 struct mmc_host *mmc;
88 struct dw_mci *host;
89
90 u32 ctype;
91
92 struct mmc_request *mrq;
93 struct list_head queue_node;
94
95 unsigned int clock;
96 unsigned long flags;
97#define DW_MMC_CARD_PRESENT 0
98#define DW_MMC_CARD_NEED_INIT 1
99 int id;
100 int last_detect_state;
101};
102
103#if defined(CONFIG_DEBUG_FS)
104static int dw_mci_req_show(struct seq_file *s, void *v)
105{
106 struct dw_mci_slot *slot = s->private;
107 struct mmc_request *mrq;
108 struct mmc_command *cmd;
109 struct mmc_command *stop;
110 struct mmc_data *data;
111
112 /* Make sure we get a consistent snapshot */
113 spin_lock_bh(&slot->host->lock);
114 mrq = slot->mrq;
115
116 if (mrq) {
117 cmd = mrq->cmd;
118 data = mrq->data;
119 stop = mrq->stop;
120
121 if (cmd)
122 seq_printf(s,
123 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
124 cmd->opcode, cmd->arg, cmd->flags,
125 cmd->resp[0], cmd->resp[1], cmd->resp[2],
126 cmd->resp[2], cmd->error);
127 if (data)
128 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
129 data->bytes_xfered, data->blocks,
130 data->blksz, data->flags, data->error);
131 if (stop)
132 seq_printf(s,
133 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
134 stop->opcode, stop->arg, stop->flags,
135 stop->resp[0], stop->resp[1], stop->resp[2],
136 stop->resp[2], stop->error);
137 }
138
139 spin_unlock_bh(&slot->host->lock);
140
141 return 0;
142}
143
144static int dw_mci_req_open(struct inode *inode, struct file *file)
145{
146 return single_open(file, dw_mci_req_show, inode->i_private);
147}
148
149static const struct file_operations dw_mci_req_fops = {
150 .owner = THIS_MODULE,
151 .open = dw_mci_req_open,
152 .read = seq_read,
153 .llseek = seq_lseek,
154 .release = single_release,
155};
156
157static int dw_mci_regs_show(struct seq_file *s, void *v)
158{
159 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
160 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
161 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
162 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
163 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
164 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
165
166 return 0;
167}
168
169static int dw_mci_regs_open(struct inode *inode, struct file *file)
170{
171 return single_open(file, dw_mci_regs_show, inode->i_private);
172}
173
174static const struct file_operations dw_mci_regs_fops = {
175 .owner = THIS_MODULE,
176 .open = dw_mci_regs_open,
177 .read = seq_read,
178 .llseek = seq_lseek,
179 .release = single_release,
180};
181
182static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
183{
184 struct mmc_host *mmc = slot->mmc;
185 struct dw_mci *host = slot->host;
186 struct dentry *root;
187 struct dentry *node;
188
189 root = mmc->debugfs_root;
190 if (!root)
191 return;
192
193 node = debugfs_create_file("regs", S_IRUSR, root, host,
194 &dw_mci_regs_fops);
195 if (!node)
196 goto err;
197
198 node = debugfs_create_file("req", S_IRUSR, root, slot,
199 &dw_mci_req_fops);
200 if (!node)
201 goto err;
202
203 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
204 if (!node)
205 goto err;
206
207 node = debugfs_create_x32("pending_events", S_IRUSR, root,
208 (u32 *)&host->pending_events);
209 if (!node)
210 goto err;
211
212 node = debugfs_create_x32("completed_events", S_IRUSR, root,
213 (u32 *)&host->completed_events);
214 if (!node)
215 goto err;
216
217 return;
218
219err:
220 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
221}
222#endif /* defined(CONFIG_DEBUG_FS) */
223
224static void dw_mci_set_timeout(struct dw_mci *host)
225{
226 /* timeout (maximum) */
227 mci_writel(host, TMOUT, 0xffffffff);
228}
229
230static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
231{
232 struct mmc_data *data;
233 u32 cmdr;
234 cmd->error = -EINPROGRESS;
235
236 cmdr = cmd->opcode;
237
238 if (cmdr == MMC_STOP_TRANSMISSION)
239 cmdr |= SDMMC_CMD_STOP;
240 else
241 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
242
243 if (cmd->flags & MMC_RSP_PRESENT) {
244 /* We expect a response, so set this bit */
245 cmdr |= SDMMC_CMD_RESP_EXP;
246 if (cmd->flags & MMC_RSP_136)
247 cmdr |= SDMMC_CMD_RESP_LONG;
248 }
249
250 if (cmd->flags & MMC_RSP_CRC)
251 cmdr |= SDMMC_CMD_RESP_CRC;
252
253 data = cmd->data;
254 if (data) {
255 cmdr |= SDMMC_CMD_DAT_EXP;
256 if (data->flags & MMC_DATA_STREAM)
257 cmdr |= SDMMC_CMD_STRM_MODE;
258 if (data->flags & MMC_DATA_WRITE)
259 cmdr |= SDMMC_CMD_DAT_WR;
260 }
261
262 return cmdr;
263}
264
265static void dw_mci_start_command(struct dw_mci *host,
266 struct mmc_command *cmd, u32 cmd_flags)
267{
268 host->cmd = cmd;
269 dev_vdbg(&host->pdev->dev,
270 "start command: ARGR=0x%08x CMDR=0x%08x\n",
271 cmd->arg, cmd_flags);
272
273 mci_writel(host, CMDARG, cmd->arg);
274 wmb();
275
276 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
277}
278
279static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
280{
281 dw_mci_start_command(host, data->stop, host->stop_cmdr);
282}
283
284/* DMA interface functions */
285static void dw_mci_stop_dma(struct dw_mci *host)
286{
287 if (host->use_dma) {
288 host->dma_ops->stop(host);
289 host->dma_ops->cleanup(host);
290 } else {
291 /* Data transfer was stopped by the interrupt handler */
292 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
293 }
294}
295
296#ifdef CONFIG_MMC_DW_IDMAC
297static void dw_mci_dma_cleanup(struct dw_mci *host)
298{
299 struct mmc_data *data = host->data;
300
301 if (data)
302 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
303 ((data->flags & MMC_DATA_WRITE)
304 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
305}
306
307static void dw_mci_idmac_stop_dma(struct dw_mci *host)
308{
309 u32 temp;
310
311 /* Disable and reset the IDMAC interface */
312 temp = mci_readl(host, CTRL);
313 temp &= ~SDMMC_CTRL_USE_IDMAC;
314 temp |= SDMMC_CTRL_DMA_RESET;
315 mci_writel(host, CTRL, temp);
316
317 /* Stop the IDMAC running */
318 temp = mci_readl(host, BMOD);
319 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
320 mci_writel(host, BMOD, temp);
321}
322
323static void dw_mci_idmac_complete_dma(struct dw_mci *host)
324{
325 struct mmc_data *data = host->data;
326
327 dev_vdbg(&host->pdev->dev, "DMA complete\n");
328
329 host->dma_ops->cleanup(host);
330
331 /*
332 * If the card was removed, data will be NULL. No point in trying to
333 * send the stop command or waiting for NBUSY in this case.
334 */
335 if (data) {
336 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
337 tasklet_schedule(&host->tasklet);
338 }
339}
340
341static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
342 unsigned int sg_len)
343{
344 int i;
345 struct idmac_desc *desc = host->sg_cpu;
346
347 for (i = 0; i < sg_len; i++, desc++) {
348 unsigned int length = sg_dma_len(&data->sg[i]);
349 u32 mem_addr = sg_dma_address(&data->sg[i]);
350
351 /* Set the OWN bit and disable interrupts for this descriptor */
352 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
353
354 /* Buffer length */
355 IDMAC_SET_BUFFER1_SIZE(desc, length);
356
357 /* Physical address to DMA to/from */
358 desc->des2 = mem_addr;
359 }
360
361 /* Set first descriptor */
362 desc = host->sg_cpu;
363 desc->des0 |= IDMAC_DES0_FD;
364
365 /* Set last descriptor */
366 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
367 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
368 desc->des0 |= IDMAC_DES0_LD;
369
370 wmb();
371}
372
373static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
374{
375 u32 temp;
376
377 dw_mci_translate_sglist(host, host->data, sg_len);
378
379 /* Select IDMAC interface */
380 temp = mci_readl(host, CTRL);
381 temp |= SDMMC_CTRL_USE_IDMAC;
382 mci_writel(host, CTRL, temp);
383
384 wmb();
385
386 /* Enable the IDMAC */
387 temp = mci_readl(host, BMOD);
388 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
389 mci_writel(host, BMOD, temp);
390
391 /* Start it running */
392 mci_writel(host, PLDMND, 1);
393}
394
395static int dw_mci_idmac_init(struct dw_mci *host)
396{
397 struct idmac_desc *p;
398 int i;
399
400 /* Number of descriptors in the ring buffer */
401 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
402
403 /* Forward link the descriptor list */
404 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
405 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
406
407 /* Set the last descriptor as the end-of-ring descriptor */
408 p->des3 = host->sg_dma;
409 p->des0 = IDMAC_DES0_ER;
410
411 /* Mask out interrupts - get Tx & Rx complete only */
412 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
413 SDMMC_IDMAC_INT_TI);
414
415 /* Set the descriptor base address */
416 mci_writel(host, DBADDR, host->sg_dma);
417 return 0;
418}
419
420static struct dw_mci_dma_ops dw_mci_idmac_ops = {
421 .init = dw_mci_idmac_init,
422 .start = dw_mci_idmac_start_dma,
423 .stop = dw_mci_idmac_stop_dma,
424 .complete = dw_mci_idmac_complete_dma,
425 .cleanup = dw_mci_dma_cleanup,
426};
427#endif /* CONFIG_MMC_DW_IDMAC */
428
429static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
430{
431 struct scatterlist *sg;
432 unsigned int i, direction, sg_len;
433 u32 temp;
434
435 /* If we don't have a channel, we can't do DMA */
436 if (!host->use_dma)
437 return -ENODEV;
438
439 /*
440 * We don't do DMA on "complex" transfers, i.e. with
441 * non-word-aligned buffers or lengths. Also, we don't bother
442 * with all the DMA setup overhead for short transfers.
443 */
444 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
445 return -EINVAL;
446 if (data->blksz & 3)
447 return -EINVAL;
448
449 for_each_sg(data->sg, sg, data->sg_len, i) {
450 if (sg->offset & 3 || sg->length & 3)
451 return -EINVAL;
452 }
453
454 if (data->flags & MMC_DATA_READ)
455 direction = DMA_FROM_DEVICE;
456 else
457 direction = DMA_TO_DEVICE;
458
459 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
460 direction);
461
462 dev_vdbg(&host->pdev->dev,
463 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
464 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
465 sg_len);
466
467 /* Enable the DMA interface */
468 temp = mci_readl(host, CTRL);
469 temp |= SDMMC_CTRL_DMA_ENABLE;
470 mci_writel(host, CTRL, temp);
471
472 /* Disable RX/TX IRQs, let DMA handle it */
473 temp = mci_readl(host, INTMASK);
474 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
475 mci_writel(host, INTMASK, temp);
476
477 host->dma_ops->start(host, sg_len);
478
479 return 0;
480}
481
482static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
483{
484 u32 temp;
485
486 data->error = -EINPROGRESS;
487
488 WARN_ON(host->data);
489 host->sg = NULL;
490 host->data = data;
491
492 if (dw_mci_submit_data_dma(host, data)) {
493 host->sg = data->sg;
494 host->pio_offset = 0;
495 if (data->flags & MMC_DATA_READ)
496 host->dir_status = DW_MCI_RECV_STATUS;
497 else
498 host->dir_status = DW_MCI_SEND_STATUS;
499
500 temp = mci_readl(host, INTMASK);
501 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
502 mci_writel(host, INTMASK, temp);
503
504 temp = mci_readl(host, CTRL);
505 temp &= ~SDMMC_CTRL_DMA_ENABLE;
506 mci_writel(host, CTRL, temp);
507 }
508}
509
510static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
511{
512 struct dw_mci *host = slot->host;
513 unsigned long timeout = jiffies + msecs_to_jiffies(500);
514 unsigned int cmd_status = 0;
515
516 mci_writel(host, CMDARG, arg);
517 wmb();
518 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
519
520 while (time_before(jiffies, timeout)) {
521 cmd_status = mci_readl(host, CMD);
522 if (!(cmd_status & SDMMC_CMD_START))
523 return;
524 }
525 dev_err(&slot->mmc->class_dev,
526 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
527 cmd, arg, cmd_status);
528}
529
530static void dw_mci_setup_bus(struct dw_mci_slot *slot)
531{
532 struct dw_mci *host = slot->host;
533 u32 div;
534
535 if (slot->clock != host->current_speed) {
536 if (host->bus_hz % slot->clock)
537 /*
538 * move the + 1 after the divide to prevent
539 * over-clocking the card.
540 */
541 div = ((host->bus_hz / slot->clock) >> 1) + 1;
542 else
543 div = (host->bus_hz / slot->clock) >> 1;
544
545 dev_info(&slot->mmc->class_dev,
546 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
547 " div = %d)\n", slot->id, host->bus_hz, slot->clock,
548 div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
549
550 /* disable clock */
551 mci_writel(host, CLKENA, 0);
552 mci_writel(host, CLKSRC, 0);
553
554 /* inform CIU */
555 mci_send_cmd(slot,
556 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
557
558 /* set clock to desired speed */
559 mci_writel(host, CLKDIV, div);
560
561 /* inform CIU */
562 mci_send_cmd(slot,
563 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
564
565 /* enable clock */
566 mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE |
567 SDMMC_CLKEN_LOW_PWR);
568
569 /* inform CIU */
570 mci_send_cmd(slot,
571 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
572
573 host->current_speed = slot->clock;
574 }
575
576 /* Set the current slot bus width */
577 mci_writel(host, CTYPE, slot->ctype);
578}
579
580static void dw_mci_start_request(struct dw_mci *host,
581 struct dw_mci_slot *slot)
582{
583 struct mmc_request *mrq;
584 struct mmc_command *cmd;
585 struct mmc_data *data;
586 u32 cmdflags;
587
588 mrq = slot->mrq;
589 if (host->pdata->select_slot)
590 host->pdata->select_slot(slot->id);
591
592 /* Slot specific timing and width adjustment */
593 dw_mci_setup_bus(slot);
594
595 host->cur_slot = slot;
596 host->mrq = mrq;
597
598 host->pending_events = 0;
599 host->completed_events = 0;
600 host->data_status = 0;
601
602 data = mrq->data;
603 if (data) {
604 dw_mci_set_timeout(host);
605 mci_writel(host, BYTCNT, data->blksz*data->blocks);
606 mci_writel(host, BLKSIZ, data->blksz);
607 }
608
609 cmd = mrq->cmd;
610 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
611
612 /* this is the first command, send the initialization clock */
613 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
614 cmdflags |= SDMMC_CMD_INIT;
615
616 if (data) {
617 dw_mci_submit_data(host, data);
618 wmb();
619 }
620
621 dw_mci_start_command(host, cmd, cmdflags);
622
623 if (mrq->stop)
624 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
625}
626
627static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
628 struct mmc_request *mrq)
629{
630 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
631 host->state);
632
633 spin_lock_bh(&host->lock);
634 slot->mrq = mrq;
635
636 if (host->state == STATE_IDLE) {
637 host->state = STATE_SENDING_CMD;
638 dw_mci_start_request(host, slot);
639 } else {
640 list_add_tail(&slot->queue_node, &host->queue);
641 }
642
643 spin_unlock_bh(&host->lock);
644}
645
646static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
647{
648 struct dw_mci_slot *slot = mmc_priv(mmc);
649 struct dw_mci *host = slot->host;
650
651 WARN_ON(slot->mrq);
652
653 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
654 mrq->cmd->error = -ENOMEDIUM;
655 mmc_request_done(mmc, mrq);
656 return;
657 }
658
659 /* We don't support multiple blocks of weird lengths. */
660 dw_mci_queue_request(host, slot, mrq);
661}
662
663static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
664{
665 struct dw_mci_slot *slot = mmc_priv(mmc);
666 u32 regs;
667
668 /* set default 1 bit mode */
669 slot->ctype = SDMMC_CTYPE_1BIT;
670
671 switch (ios->bus_width) {
672 case MMC_BUS_WIDTH_1:
673 slot->ctype = SDMMC_CTYPE_1BIT;
674 break;
675 case MMC_BUS_WIDTH_4:
676 slot->ctype = SDMMC_CTYPE_4BIT;
677 break;
678 case MMC_BUS_WIDTH_8:
679 slot->ctype = SDMMC_CTYPE_8BIT;
680 break;
681 }
682
683 /* DDR mode set */
684 if (ios->ddr) {
685 regs = mci_readl(slot->host, UHS_REG);
686 regs |= (0x1 << slot->id) << 16;
687 mci_writel(slot->host, UHS_REG, regs);
688 }
689
690 if (ios->clock) {
691 /*
692 * Use mirror of ios->clock to prevent race with mmc
693 * core ios update when finding the minimum.
694 */
695 slot->clock = ios->clock;
696 }
697
698 switch (ios->power_mode) {
699 case MMC_POWER_UP:
700 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
701 break;
702 default:
703 break;
704 }
705}
706
707static int dw_mci_get_ro(struct mmc_host *mmc)
708{
709 int read_only;
710 struct dw_mci_slot *slot = mmc_priv(mmc);
711 struct dw_mci_board *brd = slot->host->pdata;
712
713 /* Use platform get_ro function, else try on board write protect */
714 if (brd->get_ro)
715 read_only = brd->get_ro(slot->id);
716 else
717 read_only =
718 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
719
720 dev_dbg(&mmc->class_dev, "card is %s\n",
721 read_only ? "read-only" : "read-write");
722
723 return read_only;
724}
725
726static int dw_mci_get_cd(struct mmc_host *mmc)
727{
728 int present;
729 struct dw_mci_slot *slot = mmc_priv(mmc);
730 struct dw_mci_board *brd = slot->host->pdata;
731
732 /* Use platform get_cd function, else try onboard card detect */
733 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
734 present = 1;
735 else if (brd->get_cd)
736 present = !brd->get_cd(slot->id);
737 else
738 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
739 == 0 ? 1 : 0;
740
741 if (present)
742 dev_dbg(&mmc->class_dev, "card is present\n");
743 else
744 dev_dbg(&mmc->class_dev, "card is not present\n");
745
746 return present;
747}
748
749static const struct mmc_host_ops dw_mci_ops = {
750 .request = dw_mci_request,
751 .set_ios = dw_mci_set_ios,
752 .get_ro = dw_mci_get_ro,
753 .get_cd = dw_mci_get_cd,
754};
755
756static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
757 __releases(&host->lock)
758 __acquires(&host->lock)
759{
760 struct dw_mci_slot *slot;
761 struct mmc_host *prev_mmc = host->cur_slot->mmc;
762
763 WARN_ON(host->cmd || host->data);
764
765 host->cur_slot->mrq = NULL;
766 host->mrq = NULL;
767 if (!list_empty(&host->queue)) {
768 slot = list_entry(host->queue.next,
769 struct dw_mci_slot, queue_node);
770 list_del(&slot->queue_node);
771 dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
772 mmc_hostname(slot->mmc));
773 host->state = STATE_SENDING_CMD;
774 dw_mci_start_request(host, slot);
775 } else {
776 dev_vdbg(&host->pdev->dev, "list empty\n");
777 host->state = STATE_IDLE;
778 }
779
780 spin_unlock(&host->lock);
781 mmc_request_done(prev_mmc, mrq);
782 spin_lock(&host->lock);
783}
784
785static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
786{
787 u32 status = host->cmd_status;
788
789 host->cmd_status = 0;
790
791 /* Read the response from the card (up to 16 bytes) */
792 if (cmd->flags & MMC_RSP_PRESENT) {
793 if (cmd->flags & MMC_RSP_136) {
794 cmd->resp[3] = mci_readl(host, RESP0);
795 cmd->resp[2] = mci_readl(host, RESP1);
796 cmd->resp[1] = mci_readl(host, RESP2);
797 cmd->resp[0] = mci_readl(host, RESP3);
798 } else {
799 cmd->resp[0] = mci_readl(host, RESP0);
800 cmd->resp[1] = 0;
801 cmd->resp[2] = 0;
802 cmd->resp[3] = 0;
803 }
804 }
805
806 if (status & SDMMC_INT_RTO)
807 cmd->error = -ETIMEDOUT;
808 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
809 cmd->error = -EILSEQ;
810 else if (status & SDMMC_INT_RESP_ERR)
811 cmd->error = -EIO;
812 else
813 cmd->error = 0;
814
815 if (cmd->error) {
816 /* newer ip versions need a delay between retries */
817 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
818 mdelay(20);
819
820 if (cmd->data) {
821 host->data = NULL;
822 dw_mci_stop_dma(host);
823 }
824 }
825}
826
827static void dw_mci_tasklet_func(unsigned long priv)
828{
829 struct dw_mci *host = (struct dw_mci *)priv;
830 struct mmc_data *data;
831 struct mmc_command *cmd;
832 enum dw_mci_state state;
833 enum dw_mci_state prev_state;
834 u32 status;
835
836 spin_lock(&host->lock);
837
838 state = host->state;
839 data = host->data;
840
841 do {
842 prev_state = state;
843
844 switch (state) {
845 case STATE_IDLE:
846 break;
847
848 case STATE_SENDING_CMD:
849 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
850 &host->pending_events))
851 break;
852
853 cmd = host->cmd;
854 host->cmd = NULL;
855 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
856 dw_mci_command_complete(host, host->mrq->cmd);
857 if (!host->mrq->data || cmd->error) {
858 dw_mci_request_end(host, host->mrq);
859 goto unlock;
860 }
861
862 prev_state = state = STATE_SENDING_DATA;
863 /* fall through */
864
865 case STATE_SENDING_DATA:
866 if (test_and_clear_bit(EVENT_DATA_ERROR,
867 &host->pending_events)) {
868 dw_mci_stop_dma(host);
869 if (data->stop)
870 send_stop_cmd(host, data);
871 state = STATE_DATA_ERROR;
872 break;
873 }
874
875 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
876 &host->pending_events))
877 break;
878
879 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
880 prev_state = state = STATE_DATA_BUSY;
881 /* fall through */
882
883 case STATE_DATA_BUSY:
884 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
885 &host->pending_events))
886 break;
887
888 host->data = NULL;
889 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
890 status = host->data_status;
891
892 if (status & DW_MCI_DATA_ERROR_FLAGS) {
893 if (status & SDMMC_INT_DTO) {
894 dev_err(&host->pdev->dev,
895 "data timeout error\n");
896 data->error = -ETIMEDOUT;
897 } else if (status & SDMMC_INT_DCRC) {
898 dev_err(&host->pdev->dev,
899 "data CRC error\n");
900 data->error = -EILSEQ;
901 } else {
902 dev_err(&host->pdev->dev,
903 "data FIFO error "
904 "(status=%08x)\n",
905 status);
906 data->error = -EIO;
907 }
908 } else {
909 data->bytes_xfered = data->blocks * data->blksz;
910 data->error = 0;
911 }
912
913 if (!data->stop) {
914 dw_mci_request_end(host, host->mrq);
915 goto unlock;
916 }
917
918 prev_state = state = STATE_SENDING_STOP;
919 if (!data->error)
920 send_stop_cmd(host, data);
921 /* fall through */
922
923 case STATE_SENDING_STOP:
924 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
925 &host->pending_events))
926 break;
927
928 host->cmd = NULL;
929 dw_mci_command_complete(host, host->mrq->stop);
930 dw_mci_request_end(host, host->mrq);
931 goto unlock;
932
933 case STATE_DATA_ERROR:
934 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
935 &host->pending_events))
936 break;
937
938 state = STATE_DATA_BUSY;
939 break;
940 }
941 } while (state != prev_state);
942
943 host->state = state;
944unlock:
945 spin_unlock(&host->lock);
946
947}
948
949static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
950{
951 u16 *pdata = (u16 *)buf;
952
953 WARN_ON(cnt % 2 != 0);
954
955 cnt = cnt >> 1;
956 while (cnt > 0) {
957 mci_writew(host, DATA, *pdata++);
958 cnt--;
959 }
960}
961
962static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
963{
964 u16 *pdata = (u16 *)buf;
965
966 WARN_ON(cnt % 2 != 0);
967
968 cnt = cnt >> 1;
969 while (cnt > 0) {
970 *pdata++ = mci_readw(host, DATA);
971 cnt--;
972 }
973}
974
975static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
976{
977 u32 *pdata = (u32 *)buf;
978
979 WARN_ON(cnt % 4 != 0);
980 WARN_ON((unsigned long)pdata & 0x3);
981
982 cnt = cnt >> 2;
983 while (cnt > 0) {
984 mci_writel(host, DATA, *pdata++);
985 cnt--;
986 }
987}
988
989static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
990{
991 u32 *pdata = (u32 *)buf;
992
993 WARN_ON(cnt % 4 != 0);
994 WARN_ON((unsigned long)pdata & 0x3);
995
996 cnt = cnt >> 2;
997 while (cnt > 0) {
998 *pdata++ = mci_readl(host, DATA);
999 cnt--;
1000 }
1001}
1002
1003static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1004{
1005 u64 *pdata = (u64 *)buf;
1006
1007 WARN_ON(cnt % 8 != 0);
1008
1009 cnt = cnt >> 3;
1010 while (cnt > 0) {
1011 mci_writeq(host, DATA, *pdata++);
1012 cnt--;
1013 }
1014}
1015
1016static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1017{
1018 u64 *pdata = (u64 *)buf;
1019
1020 WARN_ON(cnt % 8 != 0);
1021
1022 cnt = cnt >> 3;
1023 while (cnt > 0) {
1024 *pdata++ = mci_readq(host, DATA);
1025 cnt--;
1026 }
1027}
1028
1029static void dw_mci_read_data_pio(struct dw_mci *host)
1030{
1031 struct scatterlist *sg = host->sg;
1032 void *buf = sg_virt(sg);
1033 unsigned int offset = host->pio_offset;
1034 struct mmc_data *data = host->data;
1035 int shift = host->data_shift;
1036 u32 status;
1037 unsigned int nbytes = 0, len;
1038
1039 do {
1040 len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
1041 if (offset + len <= sg->length) {
1042 host->pull_data(host, (void *)(buf + offset), len);
1043
1044 offset += len;
1045 nbytes += len;
1046
1047 if (offset == sg->length) {
1048 flush_dcache_page(sg_page(sg));
1049 host->sg = sg = sg_next(sg);
1050 if (!sg)
1051 goto done;
1052
1053 offset = 0;
1054 buf = sg_virt(sg);
1055 }
1056 } else {
1057 unsigned int remaining = sg->length - offset;
1058 host->pull_data(host, (void *)(buf + offset),
1059 remaining);
1060 nbytes += remaining;
1061
1062 flush_dcache_page(sg_page(sg));
1063 host->sg = sg = sg_next(sg);
1064 if (!sg)
1065 goto done;
1066
1067 offset = len - remaining;
1068 buf = sg_virt(sg);
1069 host->pull_data(host, buf, offset);
1070 nbytes += offset;
1071 }
1072
1073 status = mci_readl(host, MINTSTS);
1074 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1075 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1076 host->data_status = status;
1077 data->bytes_xfered += nbytes;
1078 smp_wmb();
1079
1080 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1081
1082 tasklet_schedule(&host->tasklet);
1083 return;
1084 }
1085 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
1086 len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
1087 host->pio_offset = offset;
1088 data->bytes_xfered += nbytes;
1089 return;
1090
1091done:
1092 data->bytes_xfered += nbytes;
1093 smp_wmb();
1094 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1095}
1096
1097static void dw_mci_write_data_pio(struct dw_mci *host)
1098{
1099 struct scatterlist *sg = host->sg;
1100 void *buf = sg_virt(sg);
1101 unsigned int offset = host->pio_offset;
1102 struct mmc_data *data = host->data;
1103 int shift = host->data_shift;
1104 u32 status;
1105 unsigned int nbytes = 0, len;
1106
1107 do {
1108 len = SDMMC_FIFO_SZ -
1109 (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
1110 if (offset + len <= sg->length) {
1111 host->push_data(host, (void *)(buf + offset), len);
1112
1113 offset += len;
1114 nbytes += len;
1115 if (offset == sg->length) {
1116 host->sg = sg = sg_next(sg);
1117 if (!sg)
1118 goto done;
1119
1120 offset = 0;
1121 buf = sg_virt(sg);
1122 }
1123 } else {
1124 unsigned int remaining = sg->length - offset;
1125
1126 host->push_data(host, (void *)(buf + offset),
1127 remaining);
1128 nbytes += remaining;
1129
1130 host->sg = sg = sg_next(sg);
1131 if (!sg)
1132 goto done;
1133
1134 offset = len - remaining;
1135 buf = sg_virt(sg);
1136 host->push_data(host, (void *)buf, offset);
1137 nbytes += offset;
1138 }
1139
1140 status = mci_readl(host, MINTSTS);
1141 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1142 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1143 host->data_status = status;
1144 data->bytes_xfered += nbytes;
1145
1146 smp_wmb();
1147
1148 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1149
1150 tasklet_schedule(&host->tasklet);
1151 return;
1152 }
1153 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1154
1155 host->pio_offset = offset;
1156 data->bytes_xfered += nbytes;
1157
1158 return;
1159
1160done:
1161 data->bytes_xfered += nbytes;
1162 smp_wmb();
1163 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1164}
1165
1166static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1167{
1168 if (!host->cmd_status)
1169 host->cmd_status = status;
1170
1171 smp_wmb();
1172
1173 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1174 tasklet_schedule(&host->tasklet);
1175}
1176
1177static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1178{
1179 struct dw_mci *host = dev_id;
1180 u32 status, pending;
1181 unsigned int pass_count = 0;
1182
1183 do {
1184 status = mci_readl(host, RINTSTS);
1185 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1186
1187 /*
1188 * DTO fix - version 2.10a and below, and only if internal DMA
1189 * is configured.
1190 */
1191 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1192 if (!pending &&
1193 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1194 pending |= SDMMC_INT_DATA_OVER;
1195 }
1196
1197 if (!pending)
1198 break;
1199
1200 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1201 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1202 host->cmd_status = status;
1203 smp_wmb();
1204 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1205 tasklet_schedule(&host->tasklet);
1206 }
1207
1208 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1209 /* if there is an error report DATA_ERROR */
1210 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1211 host->data_status = status;
1212 smp_wmb();
1213 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1214 tasklet_schedule(&host->tasklet);
1215 }
1216
1217 if (pending & SDMMC_INT_DATA_OVER) {
1218 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1219 if (!host->data_status)
1220 host->data_status = status;
1221 smp_wmb();
1222 if (host->dir_status == DW_MCI_RECV_STATUS) {
1223 if (host->sg != NULL)
1224 dw_mci_read_data_pio(host);
1225 }
1226 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1227 tasklet_schedule(&host->tasklet);
1228 }
1229
1230 if (pending & SDMMC_INT_RXDR) {
1231 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1232 if (host->sg)
1233 dw_mci_read_data_pio(host);
1234 }
1235
1236 if (pending & SDMMC_INT_TXDR) {
1237 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1238 if (host->sg)
1239 dw_mci_write_data_pio(host);
1240 }
1241
1242 if (pending & SDMMC_INT_CMD_DONE) {
1243 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1244 dw_mci_cmd_interrupt(host, status);
1245 }
1246
1247 if (pending & SDMMC_INT_CD) {
1248 mci_writel(host, RINTSTS, SDMMC_INT_CD);
1249 tasklet_schedule(&host->card_tasklet);
1250 }
1251
1252 } while (pass_count++ < 5);
1253
1254#ifdef CONFIG_MMC_DW_IDMAC
1255 /* Handle DMA interrupts */
1256 pending = mci_readl(host, IDSTS);
1257 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1258 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1259 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1260 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1261 host->dma_ops->complete(host);
1262 }
1263#endif
1264
1265 return IRQ_HANDLED;
1266}
1267
1268static void dw_mci_tasklet_card(unsigned long data)
1269{
1270 struct dw_mci *host = (struct dw_mci *)data;
1271 int i;
1272
1273 for (i = 0; i < host->num_slots; i++) {
1274 struct dw_mci_slot *slot = host->slot[i];
1275 struct mmc_host *mmc = slot->mmc;
1276 struct mmc_request *mrq;
1277 int present;
1278 u32 ctrl;
1279
1280 present = dw_mci_get_cd(mmc);
1281 while (present != slot->last_detect_state) {
1282 spin_lock(&host->lock);
1283
1284 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1285 present ? "inserted" : "removed");
1286
1287 /* Card change detected */
1288 slot->last_detect_state = present;
1289
1290 /* Power up slot */
1291 if (present != 0) {
1292 if (host->pdata->setpower)
1293 host->pdata->setpower(slot->id,
1294 mmc->ocr_avail);
1295
1296 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1297 }
1298
1299 /* Clean up queue if present */
1300 mrq = slot->mrq;
1301 if (mrq) {
1302 if (mrq == host->mrq) {
1303 host->data = NULL;
1304 host->cmd = NULL;
1305
1306 switch (host->state) {
1307 case STATE_IDLE:
1308 break;
1309 case STATE_SENDING_CMD:
1310 mrq->cmd->error = -ENOMEDIUM;
1311 if (!mrq->data)
1312 break;
1313 /* fall through */
1314 case STATE_SENDING_DATA:
1315 mrq->data->error = -ENOMEDIUM;
1316 dw_mci_stop_dma(host);
1317 break;
1318 case STATE_DATA_BUSY:
1319 case STATE_DATA_ERROR:
1320 if (mrq->data->error == -EINPROGRESS)
1321 mrq->data->error = -ENOMEDIUM;
1322 if (!mrq->stop)
1323 break;
1324 /* fall through */
1325 case STATE_SENDING_STOP:
1326 mrq->stop->error = -ENOMEDIUM;
1327 break;
1328 }
1329
1330 dw_mci_request_end(host, mrq);
1331 } else {
1332 list_del(&slot->queue_node);
1333 mrq->cmd->error = -ENOMEDIUM;
1334 if (mrq->data)
1335 mrq->data->error = -ENOMEDIUM;
1336 if (mrq->stop)
1337 mrq->stop->error = -ENOMEDIUM;
1338
1339 spin_unlock(&host->lock);
1340 mmc_request_done(slot->mmc, mrq);
1341 spin_lock(&host->lock);
1342 }
1343 }
1344
1345 /* Power down slot */
1346 if (present == 0) {
1347 if (host->pdata->setpower)
1348 host->pdata->setpower(slot->id, 0);
1349 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1350
1351 /*
1352 * Clear down the FIFO - doing so generates a
1353 * block interrupt, hence setting the
1354 * scatter-gather pointer to NULL.
1355 */
1356 host->sg = NULL;
1357
1358 ctrl = mci_readl(host, CTRL);
1359 ctrl |= SDMMC_CTRL_FIFO_RESET;
1360 mci_writel(host, CTRL, ctrl);
1361
1362#ifdef CONFIG_MMC_DW_IDMAC
1363 ctrl = mci_readl(host, BMOD);
1364 ctrl |= 0x01; /* Software reset of DMA */
1365 mci_writel(host, BMOD, ctrl);
1366#endif
1367
1368 }
1369
1370 spin_unlock(&host->lock);
1371 present = dw_mci_get_cd(mmc);
1372 }
1373
1374 mmc_detect_change(slot->mmc,
1375 msecs_to_jiffies(host->pdata->detect_delay_ms));
1376 }
1377}
1378
1379static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1380{
1381 struct mmc_host *mmc;
1382 struct dw_mci_slot *slot;
1383
1384 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->pdev->dev);
1385 if (!mmc)
1386 return -ENOMEM;
1387
1388 slot = mmc_priv(mmc);
1389 slot->id = id;
1390 slot->mmc = mmc;
1391 slot->host = host;
1392
1393 mmc->ops = &dw_mci_ops;
1394 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
1395 mmc->f_max = host->bus_hz;
1396
1397 if (host->pdata->get_ocr)
1398 mmc->ocr_avail = host->pdata->get_ocr(id);
1399 else
1400 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1401
1402 /*
1403 * Start with slot power disabled, it will be enabled when a card
1404 * is detected.
1405 */
1406 if (host->pdata->setpower)
1407 host->pdata->setpower(id, 0);
1408
1409 if (host->pdata->caps)
1410 mmc->caps = host->pdata->caps;
1411 else
1412 mmc->caps = 0;
1413
1414 if (host->pdata->get_bus_wd)
1415 if (host->pdata->get_bus_wd(slot->id) >= 4)
1416 mmc->caps |= MMC_CAP_4_BIT_DATA;
1417
1418 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
1419 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1420
1421#ifdef CONFIG_MMC_DW_IDMAC
1422 mmc->max_segs = host->ring_size;
1423 mmc->max_blk_size = 65536;
1424 mmc->max_blk_count = host->ring_size;
1425 mmc->max_seg_size = 0x1000;
1426 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
1427#else
1428 if (host->pdata->blk_settings) {
1429 mmc->max_segs = host->pdata->blk_settings->max_segs;
1430 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
1431 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
1432 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
1433 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
1434 } else {
1435 /* Useful defaults if platform data is unset. */
1436 mmc->max_segs = 64;
1437 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
1438 mmc->max_blk_count = 512;
1439 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1440 mmc->max_seg_size = mmc->max_req_size;
1441 }
1442#endif /* CONFIG_MMC_DW_IDMAC */
1443
1444 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
1445 if (IS_ERR(host->vmmc)) {
1446 printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc));
1447 host->vmmc = NULL;
1448 } else
1449 regulator_enable(host->vmmc);
1450
1451 if (dw_mci_get_cd(mmc))
1452 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1453 else
1454 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1455
1456 host->slot[id] = slot;
1457 mmc_add_host(mmc);
1458
1459#if defined(CONFIG_DEBUG_FS)
1460 dw_mci_init_debugfs(slot);
1461#endif
1462
1463 /* Card initially undetected */
1464 slot->last_detect_state = 0;
1465
1466 /*
1467 * Card may have been plugged in prior to boot so we
1468 * need to run the detect tasklet
1469 */
1470 tasklet_schedule(&host->card_tasklet);
1471
1472 return 0;
1473}
1474
1475static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
1476{
1477 /* Shutdown detect IRQ */
1478 if (slot->host->pdata->exit)
1479 slot->host->pdata->exit(id);
1480
1481 /* Debugfs stuff is cleaned up by mmc core */
1482 mmc_remove_host(slot->mmc);
1483 slot->host->slot[id] = NULL;
1484 mmc_free_host(slot->mmc);
1485}
1486
1487static void dw_mci_init_dma(struct dw_mci *host)
1488{
1489 /* Alloc memory for sg translation */
1490 host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, PAGE_SIZE,
1491 &host->sg_dma, GFP_KERNEL);
1492 if (!host->sg_cpu) {
1493 dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n",
1494 __func__);
1495 goto no_dma;
1496 }
1497
1498 /* Determine which DMA interface to use */
1499#ifdef CONFIG_MMC_DW_IDMAC
1500 host->dma_ops = &dw_mci_idmac_ops;
1501 dev_info(&host->pdev->dev, "Using internal DMA controller.\n");
1502#endif
1503
1504 if (!host->dma_ops)
1505 goto no_dma;
1506
1507 if (host->dma_ops->init) {
1508 if (host->dma_ops->init(host)) {
1509 dev_err(&host->pdev->dev, "%s: Unable to initialize "
1510 "DMA Controller.\n", __func__);
1511 goto no_dma;
1512 }
1513 } else {
1514 dev_err(&host->pdev->dev, "DMA initialization not found.\n");
1515 goto no_dma;
1516 }
1517
1518 host->use_dma = 1;
1519 return;
1520
1521no_dma:
1522 dev_info(&host->pdev->dev, "Using PIO mode.\n");
1523 host->use_dma = 0;
1524 return;
1525}
1526
1527static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
1528{
1529 unsigned long timeout = jiffies + msecs_to_jiffies(500);
1530 unsigned int ctrl;
1531
1532 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
1533 SDMMC_CTRL_DMA_RESET));
1534
1535 /* wait till resets clear */
1536 do {
1537 ctrl = mci_readl(host, CTRL);
1538 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
1539 SDMMC_CTRL_DMA_RESET)))
1540 return true;
1541 } while (time_before(jiffies, timeout));
1542
1543 dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
1544
1545 return false;
1546}
1547
1548static int dw_mci_probe(struct platform_device *pdev)
1549{
1550 struct dw_mci *host;
1551 struct resource *regs;
1552 struct dw_mci_board *pdata;
1553 int irq, ret, i, width;
1554 u32 fifo_size;
1555
1556 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1557 if (!regs)
1558 return -ENXIO;
1559
1560 irq = platform_get_irq(pdev, 0);
1561 if (irq < 0)
1562 return irq;
1563
1564 host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL);
1565 if (!host)
1566 return -ENOMEM;
1567
1568 host->pdev = pdev;
1569 host->pdata = pdata = pdev->dev.platform_data;
1570 if (!pdata || !pdata->init) {
1571 dev_err(&pdev->dev,
1572 "Platform data must supply init function\n");
1573 ret = -ENODEV;
1574 goto err_freehost;
1575 }
1576
1577 if (!pdata->select_slot && pdata->num_slots > 1) {
1578 dev_err(&pdev->dev,
1579 "Platform data must supply select_slot function\n");
1580 ret = -ENODEV;
1581 goto err_freehost;
1582 }
1583
1584 if (!pdata->bus_hz) {
1585 dev_err(&pdev->dev,
1586 "Platform data must supply bus speed\n");
1587 ret = -ENODEV;
1588 goto err_freehost;
1589 }
1590
1591 host->bus_hz = pdata->bus_hz;
1592 host->quirks = pdata->quirks;
1593
1594 spin_lock_init(&host->lock);
1595 INIT_LIST_HEAD(&host->queue);
1596
1597 ret = -ENOMEM;
1598 host->regs = ioremap(regs->start, regs->end - regs->start + 1);
1599 if (!host->regs)
1600 goto err_freehost;
1601
1602 host->dma_ops = pdata->dma_ops;
1603 dw_mci_init_dma(host);
1604
1605 /*
1606 * Get the host data width - this assumes that HCON has been set with
1607 * the correct values.
1608 */
1609 i = (mci_readl(host, HCON) >> 7) & 0x7;
1610 if (!i) {
1611 host->push_data = dw_mci_push_data16;
1612 host->pull_data = dw_mci_pull_data16;
1613 width = 16;
1614 host->data_shift = 1;
1615 } else if (i == 2) {
1616 host->push_data = dw_mci_push_data64;
1617 host->pull_data = dw_mci_pull_data64;
1618 width = 64;
1619 host->data_shift = 3;
1620 } else {
1621 /* Check for a reserved value, and warn if it is */
1622 WARN((i != 1),
1623 "HCON reports a reserved host data width!\n"
1624 "Defaulting to 32-bit access.\n");
1625 host->push_data = dw_mci_push_data32;
1626 host->pull_data = dw_mci_pull_data32;
1627 width = 32;
1628 host->data_shift = 2;
1629 }
1630
1631 /* Reset all blocks */
1632 if (!mci_wait_reset(&pdev->dev, host)) {
1633 ret = -ENODEV;
1634 goto err_dmaunmap;
1635 }
1636
1637 /* Clear the interrupts for the host controller */
1638 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1639 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
1640
1641 /* Put in max timeout */
1642 mci_writel(host, TMOUT, 0xFFFFFFFF);
1643
1644 /*
1645 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
1646 * Tx Mark = fifo_size / 2 DMA Size = 8
1647 */
1648 fifo_size = mci_readl(host, FIFOTH);
1649 fifo_size = (fifo_size >> 16) & 0x7ff;
1650 host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
1651 ((fifo_size/2) << 0));
1652 mci_writel(host, FIFOTH, host->fifoth_val);
1653
1654 /* disable clock to CIU */
1655 mci_writel(host, CLKENA, 0);
1656 mci_writel(host, CLKSRC, 0);
1657
1658 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
1659 tasklet_init(&host->card_tasklet,
1660 dw_mci_tasklet_card, (unsigned long)host);
1661
1662 ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host);
1663 if (ret)
1664 goto err_dmaunmap;
1665
1666 platform_set_drvdata(pdev, host);
1667
1668 if (host->pdata->num_slots)
1669 host->num_slots = host->pdata->num_slots;
1670 else
1671 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
1672
1673 /* We need at least one slot to succeed */
1674 for (i = 0; i < host->num_slots; i++) {
1675 ret = dw_mci_init_slot(host, i);
1676 if (ret) {
1677 ret = -ENODEV;
1678 goto err_init_slot;
1679 }
1680 }
1681
1682 /*
1683 * Enable interrupts for command done, data over, data empty, card det,
1684 * receive ready and error such as transmit, receive timeout, crc error
1685 */
1686 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1687 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
1688 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
1689 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
1690 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
1691
1692 dev_info(&pdev->dev, "DW MMC controller at irq %d, "
1693 "%d bit host data width\n", irq, width);
1694 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
1695 dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n");
1696
1697 return 0;
1698
1699err_init_slot:
1700 /* De-init any initialized slots */
1701 while (i > 0) {
1702 if (host->slot[i])
1703 dw_mci_cleanup_slot(host->slot[i], i);
1704 i--;
1705 }
1706 free_irq(irq, host);
1707
1708err_dmaunmap:
1709 if (host->use_dma && host->dma_ops->exit)
1710 host->dma_ops->exit(host);
1711 dma_free_coherent(&host->pdev->dev, PAGE_SIZE,
1712 host->sg_cpu, host->sg_dma);
1713 iounmap(host->regs);
1714
1715 if (host->vmmc) {
1716 regulator_disable(host->vmmc);
1717 regulator_put(host->vmmc);
1718 }
1719
1720
1721err_freehost:
1722 kfree(host);
1723 return ret;
1724}
1725
1726static int __exit dw_mci_remove(struct platform_device *pdev)
1727{
1728 struct dw_mci *host = platform_get_drvdata(pdev);
1729 int i;
1730
1731 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1732 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
1733
1734 platform_set_drvdata(pdev, NULL);
1735
1736 for (i = 0; i < host->num_slots; i++) {
1737 dev_dbg(&pdev->dev, "remove slot %d\n", i);
1738 if (host->slot[i])
1739 dw_mci_cleanup_slot(host->slot[i], i);
1740 }
1741
1742 /* disable clock to CIU */
1743 mci_writel(host, CLKENA, 0);
1744 mci_writel(host, CLKSRC, 0);
1745
1746 free_irq(platform_get_irq(pdev, 0), host);
1747 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
1748
1749 if (host->use_dma && host->dma_ops->exit)
1750 host->dma_ops->exit(host);
1751
1752 if (host->vmmc) {
1753 regulator_disable(host->vmmc);
1754 regulator_put(host->vmmc);
1755 }
1756
1757 iounmap(host->regs);
1758
1759 kfree(host);
1760 return 0;
1761}
1762
1763#ifdef CONFIG_PM
1764/*
1765 * TODO: we should probably disable the clock to the card in the suspend path.
1766 */
1767static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
1768{
1769 int i, ret;
1770 struct dw_mci *host = platform_get_drvdata(pdev);
1771
1772 if (host->vmmc)
1773 regulator_enable(host->vmmc);
1774
1775 for (i = 0; i < host->num_slots; i++) {
1776 struct dw_mci_slot *slot = host->slot[i];
1777 if (!slot)
1778 continue;
1779 ret = mmc_suspend_host(slot->mmc);
1780 if (ret < 0) {
1781 while (--i >= 0) {
1782 slot = host->slot[i];
1783 if (slot)
1784 mmc_resume_host(host->slot[i]->mmc);
1785 }
1786 return ret;
1787 }
1788 }
1789
1790 if (host->vmmc)
1791 regulator_disable(host->vmmc);
1792
1793 return 0;
1794}
1795
1796static int dw_mci_resume(struct platform_device *pdev)
1797{
1798 int i, ret;
1799 struct dw_mci *host = platform_get_drvdata(pdev);
1800
1801 if (host->dma_ops->init)
1802 host->dma_ops->init(host);
1803
1804 if (!mci_wait_reset(&pdev->dev, host)) {
1805 ret = -ENODEV;
1806 return ret;
1807 }
1808
1809 /* Restore the old value at FIFOTH register */
1810 mci_writel(host, FIFOTH, host->fifoth_val);
1811
1812 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1813 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
1814 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
1815 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
1816 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
1817
1818 for (i = 0; i < host->num_slots; i++) {
1819 struct dw_mci_slot *slot = host->slot[i];
1820 if (!slot)
1821 continue;
1822 ret = mmc_resume_host(host->slot[i]->mmc);
1823 if (ret < 0)
1824 return ret;
1825 }
1826
1827 return 0;
1828}
1829#else
1830#define dw_mci_suspend NULL
1831#define dw_mci_resume NULL
1832#endif /* CONFIG_PM */
1833
1834static struct platform_driver dw_mci_driver = {
1835 .remove = __exit_p(dw_mci_remove),
1836 .suspend = dw_mci_suspend,
1837 .resume = dw_mci_resume,
1838 .driver = {
1839 .name = "dw_mmc",
1840 },
1841};
1842
1843static int __init dw_mci_init(void)
1844{
1845 return platform_driver_probe(&dw_mci_driver, dw_mci_probe);
1846}
1847
1848static void __exit dw_mci_exit(void)
1849{
1850 platform_driver_unregister(&dw_mci_driver);
1851}
1852
1853module_init(dw_mci_init);
1854module_exit(dw_mci_exit);
1855
1856MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
1857MODULE_AUTHOR("NXP Semiconductor VietNam");
1858MODULE_AUTHOR("Imagination Technologies Ltd");
1859MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
new file mode 100644
index 000000000000..23c662af5616
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc.h
@@ -0,0 +1,168 @@
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef _DW_MMC_H_
15#define _DW_MMC_H_
16
17#define SDMMC_CTRL 0x000
18#define SDMMC_PWREN 0x004
19#define SDMMC_CLKDIV 0x008
20#define SDMMC_CLKSRC 0x00c
21#define SDMMC_CLKENA 0x010
22#define SDMMC_TMOUT 0x014
23#define SDMMC_CTYPE 0x018
24#define SDMMC_BLKSIZ 0x01c
25#define SDMMC_BYTCNT 0x020
26#define SDMMC_INTMASK 0x024
27#define SDMMC_CMDARG 0x028
28#define SDMMC_CMD 0x02c
29#define SDMMC_RESP0 0x030
30#define SDMMC_RESP1 0x034
31#define SDMMC_RESP2 0x038
32#define SDMMC_RESP3 0x03c
33#define SDMMC_MINTSTS 0x040
34#define SDMMC_RINTSTS 0x044
35#define SDMMC_STATUS 0x048
36#define SDMMC_FIFOTH 0x04c
37#define SDMMC_CDETECT 0x050
38#define SDMMC_WRTPRT 0x054
39#define SDMMC_GPIO 0x058
40#define SDMMC_TCBCNT 0x05c
41#define SDMMC_TBBCNT 0x060
42#define SDMMC_DEBNCE 0x064
43#define SDMMC_USRID 0x068
44#define SDMMC_VERID 0x06c
45#define SDMMC_HCON 0x070
46#define SDMMC_UHS_REG 0x074
47#define SDMMC_BMOD 0x080
48#define SDMMC_PLDMND 0x084
49#define SDMMC_DBADDR 0x088
50#define SDMMC_IDSTS 0x08c
51#define SDMMC_IDINTEN 0x090
52#define SDMMC_DSCADDR 0x094
53#define SDMMC_BUFADDR 0x098
54#define SDMMC_DATA 0x100
55
56/* shift bit field */
57#define _SBF(f, v) ((v) << (f))
58
59/* Control register defines */
60#define SDMMC_CTRL_USE_IDMAC BIT(25)
61#define SDMMC_CTRL_CEATA_INT_EN BIT(11)
62#define SDMMC_CTRL_SEND_AS_CCSD BIT(10)
63#define SDMMC_CTRL_SEND_CCSD BIT(9)
64#define SDMMC_CTRL_ABRT_READ_DATA BIT(8)
65#define SDMMC_CTRL_SEND_IRQ_RESP BIT(7)
66#define SDMMC_CTRL_READ_WAIT BIT(6)
67#define SDMMC_CTRL_DMA_ENABLE BIT(5)
68#define SDMMC_CTRL_INT_ENABLE BIT(4)
69#define SDMMC_CTRL_DMA_RESET BIT(2)
70#define SDMMC_CTRL_FIFO_RESET BIT(1)
71#define SDMMC_CTRL_RESET BIT(0)
72/* Clock Enable register defines */
73#define SDMMC_CLKEN_LOW_PWR BIT(16)
74#define SDMMC_CLKEN_ENABLE BIT(0)
75/* time-out register defines */
76#define SDMMC_TMOUT_DATA(n) _SBF(8, (n))
77#define SDMMC_TMOUT_DATA_MSK 0xFFFFFF00
78#define SDMMC_TMOUT_RESP(n) ((n) & 0xFF)
79#define SDMMC_TMOUT_RESP_MSK 0xFF
80/* card-type register defines */
81#define SDMMC_CTYPE_8BIT BIT(16)
82#define SDMMC_CTYPE_4BIT BIT(0)
83#define SDMMC_CTYPE_1BIT 0
84/* Interrupt status & mask register defines */
85#define SDMMC_INT_SDIO BIT(16)
86#define SDMMC_INT_EBE BIT(15)
87#define SDMMC_INT_ACD BIT(14)
88#define SDMMC_INT_SBE BIT(13)
89#define SDMMC_INT_HLE BIT(12)
90#define SDMMC_INT_FRUN BIT(11)
91#define SDMMC_INT_HTO BIT(10)
92#define SDMMC_INT_DTO BIT(9)
93#define SDMMC_INT_RTO BIT(8)
94#define SDMMC_INT_DCRC BIT(7)
95#define SDMMC_INT_RCRC BIT(6)
96#define SDMMC_INT_RXDR BIT(5)
97#define SDMMC_INT_TXDR BIT(4)
98#define SDMMC_INT_DATA_OVER BIT(3)
99#define SDMMC_INT_CMD_DONE BIT(2)
100#define SDMMC_INT_RESP_ERR BIT(1)
101#define SDMMC_INT_CD BIT(0)
102#define SDMMC_INT_ERROR 0xbfc2
103/* Command register defines */
104#define SDMMC_CMD_START BIT(31)
105#define SDMMC_CMD_CCS_EXP BIT(23)
106#define SDMMC_CMD_CEATA_RD BIT(22)
107#define SDMMC_CMD_UPD_CLK BIT(21)
108#define SDMMC_CMD_INIT BIT(15)
109#define SDMMC_CMD_STOP BIT(14)
110#define SDMMC_CMD_PRV_DAT_WAIT BIT(13)
111#define SDMMC_CMD_SEND_STOP BIT(12)
112#define SDMMC_CMD_STRM_MODE BIT(11)
113#define SDMMC_CMD_DAT_WR BIT(10)
114#define SDMMC_CMD_DAT_EXP BIT(9)
115#define SDMMC_CMD_RESP_CRC BIT(8)
116#define SDMMC_CMD_RESP_LONG BIT(7)
117#define SDMMC_CMD_RESP_EXP BIT(6)
118#define SDMMC_CMD_INDX(n) ((n) & 0x1F)
119/* Status register defines */
120#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF)
121#define SDMMC_FIFO_SZ 32
122/* Internal DMAC interrupt defines */
123#define SDMMC_IDMAC_INT_AI BIT(9)
124#define SDMMC_IDMAC_INT_NI BIT(8)
125#define SDMMC_IDMAC_INT_CES BIT(5)
126#define SDMMC_IDMAC_INT_DU BIT(4)
127#define SDMMC_IDMAC_INT_FBE BIT(2)
128#define SDMMC_IDMAC_INT_RI BIT(1)
129#define SDMMC_IDMAC_INT_TI BIT(0)
130/* Internal DMAC bus mode bits */
131#define SDMMC_IDMAC_ENABLE BIT(7)
132#define SDMMC_IDMAC_FB BIT(1)
133#define SDMMC_IDMAC_SWRESET BIT(0)
134
135/* Register access macros */
136#define mci_readl(dev, reg) \
137 __raw_readl(dev->regs + SDMMC_##reg)
138#define mci_writel(dev, reg, value) \
139 __raw_writel((value), dev->regs + SDMMC_##reg)
140
141/* 16-bit FIFO access macros */
142#define mci_readw(dev, reg) \
143 __raw_readw(dev->regs + SDMMC_##reg)
144#define mci_writew(dev, reg, value) \
145 __raw_writew((value), dev->regs + SDMMC_##reg)
146
147/* 64-bit FIFO access macros */
148#ifdef readq
149#define mci_readq(dev, reg) \
150 __raw_readq(dev->regs + SDMMC_##reg)
151#define mci_writeq(dev, reg, value) \
152 __raw_writeq((value), dev->regs + SDMMC_##reg)
153#else
154/*
155 * Dummy readq implementation for architectures that don't define it.
156 *
157 * We would assume that none of these architectures would configure
158 * the IP block with a 64bit FIFO width, so this code will never be
159 * executed on those machines. Defining these macros here keeps the
160 * rest of the code free from ifdefs.
161 */
162#define mci_readq(dev, reg) \
163 (*(volatile u64 __force *)(dev->regs + SDMMC_##reg))
164#define mci_writeq(dev, reg, value) \
165 (*(volatile u64 __force *)(dev->regs + SDMMC_##reg) = value)
166#endif
167
168#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index b3a0ab0e4c2b..74218ad677e4 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/mmc/host.h> 16#include <linux/mmc/host.h>
17#include <linux/err.h>
17#include <linux/io.h> 18#include <linux/io.h>
18#include <linux/irq.h> 19#include <linux/irq.h>
19#include <linux/interrupt.h> 20#include <linux/interrupt.h>
@@ -827,8 +828,8 @@ static int __devinit jz4740_mmc_probe(struct platform_device* pdev)
827 } 828 }
828 829
829 host->clk = clk_get(&pdev->dev, "mmc"); 830 host->clk = clk_get(&pdev->dev, "mmc");
830 if (!host->clk) { 831 if (IS_ERR(host->clk)) {
831 ret = -ENOENT; 832 ret = PTR_ERR(host->clk);
832 dev_err(&pdev->dev, "Failed to get mmc clock\n"); 833 dev_err(&pdev->dev, "Failed to get mmc clock\n");
833 goto err_free_host; 834 goto err_free_host;
834 } 835 }
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index fd877f633dd2..7c1e16aaf17f 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -99,7 +99,7 @@
99#define r1b_timeout (HZ * 3) 99#define r1b_timeout (HZ * 3)
100 100
101/* One of the critical speed parameters is the amount of data which may 101/* One of the critical speed parameters is the amount of data which may
102 * be transfered in one command. If this value is too low, the SD card 102 * be transferred in one command. If this value is too low, the SD card
103 * controller has to do multiple partial block writes (argggh!). With 103 * controller has to do multiple partial block writes (argggh!). With
104 * today (2008) SD cards there is little speed gain if we transfer more 104 * today (2008) SD cards there is little speed gain if we transfer more
105 * than 64 KBytes at a time. So use this value until there is any indication 105 * than 64 KBytes at a time. So use this value until there is any indication
@@ -1516,21 +1516,17 @@ static int __devexit mmc_spi_remove(struct spi_device *spi)
1516 return 0; 1516 return 0;
1517} 1517}
1518 1518
1519#if defined(CONFIG_OF)
1520static struct of_device_id mmc_spi_of_match_table[] __devinitdata = { 1519static struct of_device_id mmc_spi_of_match_table[] __devinitdata = {
1521 { .compatible = "mmc-spi-slot", }, 1520 { .compatible = "mmc-spi-slot", },
1522 {}, 1521 {},
1523}; 1522};
1524#endif
1525 1523
1526static struct spi_driver mmc_spi_driver = { 1524static struct spi_driver mmc_spi_driver = {
1527 .driver = { 1525 .driver = {
1528 .name = "mmc_spi", 1526 .name = "mmc_spi",
1529 .bus = &spi_bus_type, 1527 .bus = &spi_bus_type,
1530 .owner = THIS_MODULE, 1528 .owner = THIS_MODULE,
1531#if defined(CONFIG_OF)
1532 .of_match_table = mmc_spi_of_match_table, 1529 .of_match_table = mmc_spi_of_match_table,
1533#endif
1534 }, 1530 },
1535 .probe = mmc_spi_probe, 1531 .probe = mmc_spi_probe,
1536 .remove = __devexit_p(mmc_spi_remove), 1532 .remove = __devexit_p(mmc_spi_remove),
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 87b4fc6c98c2..b4a7e4fba90f 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -2,7 +2,7 @@
2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3 * 3 *
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson AB. 5 * Copyright (C) 2010 ST-Ericsson SA
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -14,17 +14,21 @@
14#include <linux/ioport.h> 14#include <linux/ioport.h>
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/kernel.h>
17#include <linux/delay.h> 18#include <linux/delay.h>
18#include <linux/err.h> 19#include <linux/err.h>
19#include <linux/highmem.h> 20#include <linux/highmem.h>
20#include <linux/log2.h> 21#include <linux/log2.h>
21#include <linux/mmc/host.h> 22#include <linux/mmc/host.h>
23#include <linux/mmc/card.h>
22#include <linux/amba/bus.h> 24#include <linux/amba/bus.h>
23#include <linux/clk.h> 25#include <linux/clk.h>
24#include <linux/scatterlist.h> 26#include <linux/scatterlist.h>
25#include <linux/gpio.h> 27#include <linux/gpio.h>
26#include <linux/amba/mmci.h>
27#include <linux/regulator/consumer.h> 28#include <linux/regulator/consumer.h>
29#include <linux/dmaengine.h>
30#include <linux/dma-mapping.h>
31#include <linux/amba/mmci.h>
28 32
29#include <asm/div64.h> 33#include <asm/div64.h>
30#include <asm/io.h> 34#include <asm/io.h>
@@ -45,6 +49,8 @@ static unsigned int fmax = 515633;
45 * is asserted (likewise for RX) 49 * is asserted (likewise for RX)
46 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 50 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
47 * is asserted (likewise for RX) 51 * is asserted (likewise for RX)
52 * @sdio: variant supports SDIO
53 * @st_clkdiv: true if using a ST-specific clock divider algorithm
48 */ 54 */
49struct variant_data { 55struct variant_data {
50 unsigned int clkreg; 56 unsigned int clkreg;
@@ -52,6 +58,8 @@ struct variant_data {
52 unsigned int datalength_bits; 58 unsigned int datalength_bits;
53 unsigned int fifosize; 59 unsigned int fifosize;
54 unsigned int fifohalfsize; 60 unsigned int fifohalfsize;
61 bool sdio;
62 bool st_clkdiv;
55}; 63};
56 64
57static struct variant_data variant_arm = { 65static struct variant_data variant_arm = {
@@ -60,11 +68,18 @@ static struct variant_data variant_arm = {
60 .datalength_bits = 16, 68 .datalength_bits = 16,
61}; 69};
62 70
71static struct variant_data variant_arm_extended_fifo = {
72 .fifosize = 128 * 4,
73 .fifohalfsize = 64 * 4,
74 .datalength_bits = 16,
75};
76
63static struct variant_data variant_u300 = { 77static struct variant_data variant_u300 = {
64 .fifosize = 16 * 4, 78 .fifosize = 16 * 4,
65 .fifohalfsize = 8 * 4, 79 .fifohalfsize = 8 * 4,
66 .clkreg_enable = 1 << 13, /* HWFCEN */ 80 .clkreg_enable = 1 << 13, /* HWFCEN */
67 .datalength_bits = 16, 81 .datalength_bits = 16,
82 .sdio = true,
68}; 83};
69 84
70static struct variant_data variant_ux500 = { 85static struct variant_data variant_ux500 = {
@@ -73,7 +88,10 @@ static struct variant_data variant_ux500 = {
73 .clkreg = MCI_CLK_ENABLE, 88 .clkreg = MCI_CLK_ENABLE,
74 .clkreg_enable = 1 << 14, /* HWFCEN */ 89 .clkreg_enable = 1 << 14, /* HWFCEN */
75 .datalength_bits = 24, 90 .datalength_bits = 24,
91 .sdio = true,
92 .st_clkdiv = true,
76}; 93};
94
77/* 95/*
78 * This must be called with host->lock held 96 * This must be called with host->lock held
79 */ 97 */
@@ -86,7 +104,22 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
86 if (desired >= host->mclk) { 104 if (desired >= host->mclk) {
87 clk = MCI_CLK_BYPASS; 105 clk = MCI_CLK_BYPASS;
88 host->cclk = host->mclk; 106 host->cclk = host->mclk;
107 } else if (variant->st_clkdiv) {
108 /*
109 * DB8500 TRM says f = mclk / (clkdiv + 2)
110 * => clkdiv = (mclk / f) - 2
111 * Round the divider up so we don't exceed the max
112 * frequency
113 */
114 clk = DIV_ROUND_UP(host->mclk, desired) - 2;
115 if (clk >= 256)
116 clk = 255;
117 host->cclk = host->mclk / (clk + 2);
89 } else { 118 } else {
119 /*
120 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
121 * => clkdiv = mclk / (2 * f) - 1
122 */
90 clk = host->mclk / (2 * desired) - 1; 123 clk = host->mclk / (2 * desired) - 1;
91 if (clk >= 256) 124 if (clk >= 256)
92 clk = 255; 125 clk = 255;
@@ -117,9 +150,6 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
117 host->mrq = NULL; 150 host->mrq = NULL;
118 host->cmd = NULL; 151 host->cmd = NULL;
119 152
120 if (mrq->data)
121 mrq->data->bytes_xfered = host->data_xfered;
122
123 /* 153 /*
124 * Need to drop the host lock here; mmc_request_done may call 154 * Need to drop the host lock here; mmc_request_done may call
125 * back into the driver... 155 * back into the driver...
@@ -129,10 +159,26 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
129 spin_lock(&host->lock); 159 spin_lock(&host->lock);
130} 160}
131 161
162static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
163{
164 void __iomem *base = host->base;
165
166 if (host->singleirq) {
167 unsigned int mask0 = readl(base + MMCIMASK0);
168
169 mask0 &= ~MCI_IRQ1MASK;
170 mask0 |= mask;
171
172 writel(mask0, base + MMCIMASK0);
173 }
174
175 writel(mask, base + MMCIMASK1);
176}
177
132static void mmci_stop_data(struct mmci_host *host) 178static void mmci_stop_data(struct mmci_host *host)
133{ 179{
134 writel(0, host->base + MMCIDATACTRL); 180 writel(0, host->base + MMCIDATACTRL);
135 writel(0, host->base + MMCIMASK1); 181 mmci_set_mask1(host, 0);
136 host->data = NULL; 182 host->data = NULL;
137} 183}
138 184
@@ -148,6 +194,248 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
148 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 194 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
149} 195}
150 196
197/*
198 * All the DMA operation mode stuff goes inside this ifdef.
199 * This assumes that you have a generic DMA device interface,
200 * no custom DMA interfaces are supported.
201 */
202#ifdef CONFIG_DMA_ENGINE
203static void __devinit mmci_dma_setup(struct mmci_host *host)
204{
205 struct mmci_platform_data *plat = host->plat;
206 const char *rxname, *txname;
207 dma_cap_mask_t mask;
208
209 if (!plat || !plat->dma_filter) {
210 dev_info(mmc_dev(host->mmc), "no DMA platform data\n");
211 return;
212 }
213
214 /* Try to acquire a generic DMA engine slave channel */
215 dma_cap_zero(mask);
216 dma_cap_set(DMA_SLAVE, mask);
217
218 /*
219 * If only an RX channel is specified, the driver will
220 * attempt to use it bidirectionally, however if it is
221 * is specified but cannot be located, DMA will be disabled.
222 */
223 if (plat->dma_rx_param) {
224 host->dma_rx_channel = dma_request_channel(mask,
225 plat->dma_filter,
226 plat->dma_rx_param);
227 /* E.g if no DMA hardware is present */
228 if (!host->dma_rx_channel)
229 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
230 }
231
232 if (plat->dma_tx_param) {
233 host->dma_tx_channel = dma_request_channel(mask,
234 plat->dma_filter,
235 plat->dma_tx_param);
236 if (!host->dma_tx_channel)
237 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
238 } else {
239 host->dma_tx_channel = host->dma_rx_channel;
240 }
241
242 if (host->dma_rx_channel)
243 rxname = dma_chan_name(host->dma_rx_channel);
244 else
245 rxname = "none";
246
247 if (host->dma_tx_channel)
248 txname = dma_chan_name(host->dma_tx_channel);
249 else
250 txname = "none";
251
252 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
253 rxname, txname);
254
255 /*
256 * Limit the maximum segment size in any SG entry according to
257 * the parameters of the DMA engine device.
258 */
259 if (host->dma_tx_channel) {
260 struct device *dev = host->dma_tx_channel->device->dev;
261 unsigned int max_seg_size = dma_get_max_seg_size(dev);
262
263 if (max_seg_size < host->mmc->max_seg_size)
264 host->mmc->max_seg_size = max_seg_size;
265 }
266 if (host->dma_rx_channel) {
267 struct device *dev = host->dma_rx_channel->device->dev;
268 unsigned int max_seg_size = dma_get_max_seg_size(dev);
269
270 if (max_seg_size < host->mmc->max_seg_size)
271 host->mmc->max_seg_size = max_seg_size;
272 }
273}
274
275/*
276 * This is used in __devinit or __devexit so inline it
277 * so it can be discarded.
278 */
279static inline void mmci_dma_release(struct mmci_host *host)
280{
281 struct mmci_platform_data *plat = host->plat;
282
283 if (host->dma_rx_channel)
284 dma_release_channel(host->dma_rx_channel);
285 if (host->dma_tx_channel && plat->dma_tx_param)
286 dma_release_channel(host->dma_tx_channel);
287 host->dma_rx_channel = host->dma_tx_channel = NULL;
288}
289
290static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
291{
292 struct dma_chan *chan = host->dma_current;
293 enum dma_data_direction dir;
294 u32 status;
295 int i;
296
297 /* Wait up to 1ms for the DMA to complete */
298 for (i = 0; ; i++) {
299 status = readl(host->base + MMCISTATUS);
300 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
301 break;
302 udelay(10);
303 }
304
305 /*
306 * Check to see whether we still have some data left in the FIFO -
307 * this catches DMA controllers which are unable to monitor the
308 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
309 * contiguous buffers. On TX, we'll get a FIFO underrun error.
310 */
311 if (status & MCI_RXDATAAVLBLMASK) {
312 dmaengine_terminate_all(chan);
313 if (!data->error)
314 data->error = -EIO;
315 }
316
317 if (data->flags & MMC_DATA_WRITE) {
318 dir = DMA_TO_DEVICE;
319 } else {
320 dir = DMA_FROM_DEVICE;
321 }
322
323 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
324
325 /*
326 * Use of DMA with scatter-gather is impossible.
327 * Give up with DMA and switch back to PIO mode.
328 */
329 if (status & MCI_RXDATAAVLBLMASK) {
330 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
331 mmci_dma_release(host);
332 }
333}
334
335static void mmci_dma_data_error(struct mmci_host *host)
336{
337 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
338 dmaengine_terminate_all(host->dma_current);
339}
340
341static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
342{
343 struct variant_data *variant = host->variant;
344 struct dma_slave_config conf = {
345 .src_addr = host->phybase + MMCIFIFO,
346 .dst_addr = host->phybase + MMCIFIFO,
347 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
348 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
349 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
350 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
351 };
352 struct mmc_data *data = host->data;
353 struct dma_chan *chan;
354 struct dma_device *device;
355 struct dma_async_tx_descriptor *desc;
356 int nr_sg;
357
358 host->dma_current = NULL;
359
360 if (data->flags & MMC_DATA_READ) {
361 conf.direction = DMA_FROM_DEVICE;
362 chan = host->dma_rx_channel;
363 } else {
364 conf.direction = DMA_TO_DEVICE;
365 chan = host->dma_tx_channel;
366 }
367
368 /* If there's no DMA channel, fall back to PIO */
369 if (!chan)
370 return -EINVAL;
371
372 /* If less than or equal to the fifo size, don't bother with DMA */
373 if (host->size <= variant->fifosize)
374 return -EINVAL;
375
376 device = chan->device;
377 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
378 if (nr_sg == 0)
379 return -EINVAL;
380
381 dmaengine_slave_config(chan, &conf);
382 desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
383 conf.direction, DMA_CTRL_ACK);
384 if (!desc)
385 goto unmap_exit;
386
387 /* Okay, go for it. */
388 host->dma_current = chan;
389
390 dev_vdbg(mmc_dev(host->mmc),
391 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
392 data->sg_len, data->blksz, data->blocks, data->flags);
393 dmaengine_submit(desc);
394 dma_async_issue_pending(chan);
395
396 datactrl |= MCI_DPSM_DMAENABLE;
397
398 /* Trigger the DMA transfer */
399 writel(datactrl, host->base + MMCIDATACTRL);
400
401 /*
402 * Let the MMCI say when the data is ended and it's time
403 * to fire next DMA request. When that happens, MMCI will
404 * call mmci_data_end()
405 */
406 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
407 host->base + MMCIMASK0);
408 return 0;
409
410unmap_exit:
411 dmaengine_terminate_all(chan);
412 dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
413 return -ENOMEM;
414}
415#else
416/* Blank functions if the DMA engine is not available */
417static inline void mmci_dma_setup(struct mmci_host *host)
418{
419}
420
421static inline void mmci_dma_release(struct mmci_host *host)
422{
423}
424
425static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
426{
427}
428
429static inline void mmci_dma_data_error(struct mmci_host *host)
430{
431}
432
433static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
434{
435 return -ENOSYS;
436}
437#endif
438
151static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 439static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
152{ 440{
153 struct variant_data *variant = host->variant; 441 struct variant_data *variant = host->variant;
@@ -161,9 +449,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
161 449
162 host->data = data; 450 host->data = data;
163 host->size = data->blksz * data->blocks; 451 host->size = data->blksz * data->blocks;
164 host->data_xfered = 0; 452 data->bytes_xfered = 0;
165
166 mmci_init_sg(host, data);
167 453
168 clks = (unsigned long long)data->timeout_ns * host->cclk; 454 clks = (unsigned long long)data->timeout_ns * host->cclk;
169 do_div(clks, 1000000000UL); 455 do_div(clks, 1000000000UL);
@@ -178,15 +464,29 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
178 BUG_ON(1 << blksz_bits != data->blksz); 464 BUG_ON(1 << blksz_bits != data->blksz);
179 465
180 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 466 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
181 if (data->flags & MMC_DATA_READ) { 467
468 if (data->flags & MMC_DATA_READ)
182 datactrl |= MCI_DPSM_DIRECTION; 469 datactrl |= MCI_DPSM_DIRECTION;
470
471 /*
472 * Attempt to use DMA operation mode, if this
473 * should fail, fall back to PIO mode
474 */
475 if (!mmci_dma_start_data(host, datactrl))
476 return;
477
478 /* IRQ mode, map the SG list for CPU reading/writing */
479 mmci_init_sg(host, data);
480
481 if (data->flags & MMC_DATA_READ) {
183 irqmask = MCI_RXFIFOHALFFULLMASK; 482 irqmask = MCI_RXFIFOHALFFULLMASK;
184 483
185 /* 484 /*
186 * If we have less than a FIFOSIZE of bytes to transfer, 485 * If we have less than the fifo 'half-full' threshold to
187 * trigger a PIO interrupt as soon as any data is available. 486 * transfer, trigger a PIO interrupt as soon as any data
487 * is available.
188 */ 488 */
189 if (host->size < variant->fifosize) 489 if (host->size < variant->fifohalfsize)
190 irqmask |= MCI_RXDATAAVLBLMASK; 490 irqmask |= MCI_RXDATAAVLBLMASK;
191 } else { 491 } else {
192 /* 492 /*
@@ -196,9 +496,14 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
196 irqmask = MCI_TXFIFOHALFEMPTYMASK; 496 irqmask = MCI_TXFIFOHALFEMPTYMASK;
197 } 497 }
198 498
499 /* The ST Micro variants has a special bit to enable SDIO */
500 if (variant->sdio && host->mmc->card)
501 if (mmc_card_sdio(host->mmc->card))
502 datactrl |= MCI_ST_DPSM_SDIOEN;
503
199 writel(datactrl, base + MMCIDATACTRL); 504 writel(datactrl, base + MMCIDATACTRL);
200 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 505 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
201 writel(irqmask, base + MMCIMASK1); 506 mmci_set_mask1(host, irqmask);
202} 507}
203 508
204static void 509static void
@@ -233,49 +538,56 @@ static void
233mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 538mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
234 unsigned int status) 539 unsigned int status)
235{ 540{
236 if (status & MCI_DATABLOCKEND) { 541 /* First check for errors */
237 host->data_xfered += data->blksz; 542 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
238#ifdef CONFIG_ARCH_U300 543 u32 remain, success;
544
545 /* Terminate the DMA transfer */
546 if (dma_inprogress(host))
547 mmci_dma_data_error(host);
548
239 /* 549 /*
240 * On the U300 some signal or other is 550 * Calculate how far we are into the transfer. Note that
241 * badly routed so that a data write does 551 * the data counter gives the number of bytes transferred
242 * not properly terminate with a MCI_DATAEND 552 * on the MMC bus, not on the host side. On reads, this
243 * status flag. This quirk will make writes 553 * can be as much as a FIFO-worth of data ahead. This
244 * work again. 554 * matters for FIFO overruns only.
245 */ 555 */
246 if (data->flags & MMC_DATA_WRITE) 556 remain = readl(host->base + MMCIDATACNT);
247 status |= MCI_DATAEND; 557 success = data->blksz * data->blocks - remain;
248#endif 558
249 } 559 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
250 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 560 status, success);
251 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status); 561 if (status & MCI_DATACRCFAIL) {
252 if (status & MCI_DATACRCFAIL) 562 /* Last block was not successful */
563 success -= 1;
253 data->error = -EILSEQ; 564 data->error = -EILSEQ;
254 else if (status & MCI_DATATIMEOUT) 565 } else if (status & MCI_DATATIMEOUT) {
255 data->error = -ETIMEDOUT; 566 data->error = -ETIMEDOUT;
256 else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) 567 } else if (status & MCI_TXUNDERRUN) {
568 data->error = -EIO;
569 } else if (status & MCI_RXOVERRUN) {
570 if (success > host->variant->fifosize)
571 success -= host->variant->fifosize;
572 else
573 success = 0;
257 data->error = -EIO; 574 data->error = -EIO;
258 status |= MCI_DATAEND;
259
260 /*
261 * We hit an error condition. Ensure that any data
262 * partially written to a page is properly coherent.
263 */
264 if (data->flags & MMC_DATA_READ) {
265 struct sg_mapping_iter *sg_miter = &host->sg_miter;
266 unsigned long flags;
267
268 local_irq_save(flags);
269 if (sg_miter_next(sg_miter)) {
270 flush_dcache_page(sg_miter->page);
271 sg_miter_stop(sg_miter);
272 }
273 local_irq_restore(flags);
274 } 575 }
576 data->bytes_xfered = round_down(success, data->blksz);
275 } 577 }
276 if (status & MCI_DATAEND) { 578
579 if (status & MCI_DATABLOCKEND)
580 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
581
582 if (status & MCI_DATAEND || data->error) {
583 if (dma_inprogress(host))
584 mmci_dma_unmap(host, data);
277 mmci_stop_data(host); 585 mmci_stop_data(host);
278 586
587 if (!data->error)
588 /* The error clause is handled above, success! */
589 data->bytes_xfered = data->blksz * data->blocks;
590
279 if (!data->stop) { 591 if (!data->stop) {
280 mmci_request_end(host, data->mrq); 592 mmci_request_end(host, data->mrq);
281 } else { 593 } else {
@@ -292,15 +604,15 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
292 604
293 host->cmd = NULL; 605 host->cmd = NULL;
294 606
295 cmd->resp[0] = readl(base + MMCIRESPONSE0);
296 cmd->resp[1] = readl(base + MMCIRESPONSE1);
297 cmd->resp[2] = readl(base + MMCIRESPONSE2);
298 cmd->resp[3] = readl(base + MMCIRESPONSE3);
299
300 if (status & MCI_CMDTIMEOUT) { 607 if (status & MCI_CMDTIMEOUT) {
301 cmd->error = -ETIMEDOUT; 608 cmd->error = -ETIMEDOUT;
302 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 609 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
303 cmd->error = -EILSEQ; 610 cmd->error = -EILSEQ;
611 } else {
612 cmd->resp[0] = readl(base + MMCIRESPONSE0);
613 cmd->resp[1] = readl(base + MMCIRESPONSE1);
614 cmd->resp[2] = readl(base + MMCIRESPONSE2);
615 cmd->resp[3] = readl(base + MMCIRESPONSE3);
304 } 616 }
305 617
306 if (!cmd->data || cmd->error) { 618 if (!cmd->data || cmd->error) {
@@ -356,7 +668,32 @@ static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int rem
356 variant->fifosize : variant->fifohalfsize; 668 variant->fifosize : variant->fifohalfsize;
357 count = min(remain, maxcnt); 669 count = min(remain, maxcnt);
358 670
359 writesl(base + MMCIFIFO, ptr, count >> 2); 671 /*
672 * The ST Micro variant for SDIO transfer sizes
673 * less then 8 bytes should have clock H/W flow
674 * control disabled.
675 */
676 if (variant->sdio &&
677 mmc_card_sdio(host->mmc->card)) {
678 if (count < 8)
679 writel(readl(host->base + MMCICLOCK) &
680 ~variant->clkreg_enable,
681 host->base + MMCICLOCK);
682 else
683 writel(readl(host->base + MMCICLOCK) |
684 variant->clkreg_enable,
685 host->base + MMCICLOCK);
686 }
687
688 /*
689 * SDIO especially may want to send something that is
690 * not divisible by 4 (as opposed to card sectors
691 * etc), and the FIFO only accept full 32-bit writes.
692 * So compensate by adding +3 on the count, a single
693 * byte become a 32bit write, 7 bytes will be two
694 * 32bit writes etc.
695 */
696 writesl(base + MMCIFIFO, ptr, (count + 3) >> 2);
360 697
361 ptr += count; 698 ptr += count;
362 remain -= count; 699 remain -= count;
@@ -422,9 +759,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
422 if (remain) 759 if (remain)
423 break; 760 break;
424 761
425 if (status & MCI_RXACTIVE)
426 flush_dcache_page(sg_miter->page);
427
428 status = readl(base + MMCISTATUS); 762 status = readl(base + MMCISTATUS);
429 } while (1); 763 } while (1);
430 764
@@ -433,11 +767,11 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
433 local_irq_restore(flags); 767 local_irq_restore(flags);
434 768
435 /* 769 /*
436 * If we're nearing the end of the read, switch to 770 * If we have less than the fifo 'half-full' threshold to transfer,
437 * "any data available" mode. 771 * trigger a PIO interrupt as soon as any data is available.
438 */ 772 */
439 if (status & MCI_RXACTIVE && host->size < variant->fifosize) 773 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
440 writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1); 774 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
441 775
442 /* 776 /*
443 * If we run out of data, disable the data IRQs; this 777 * If we run out of data, disable the data IRQs; this
@@ -446,7 +780,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
446 * stops us racing with our data end IRQ. 780 * stops us racing with our data end IRQ.
447 */ 781 */
448 if (host->size == 0) { 782 if (host->size == 0) {
449 writel(0, base + MMCIMASK1); 783 mmci_set_mask1(host, 0);
450 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 784 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
451 } 785 }
452 786
@@ -469,6 +803,14 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
469 struct mmc_data *data; 803 struct mmc_data *data;
470 804
471 status = readl(host->base + MMCISTATUS); 805 status = readl(host->base + MMCISTATUS);
806
807 if (host->singleirq) {
808 if (status & readl(host->base + MMCIMASK1))
809 mmci_pio_irq(irq, dev_id);
810
811 status &= ~MCI_IRQ1MASK;
812 }
813
472 status &= readl(host->base + MMCIMASK0); 814 status &= readl(host->base + MMCIMASK0);
473 writel(status, host->base + MMCICLEAR); 815 writel(status, host->base + MMCICLEAR);
474 816
@@ -629,7 +971,8 @@ static const struct mmc_host_ops mmci_ops = {
629 .get_cd = mmci_get_cd, 971 .get_cd = mmci_get_cd,
630}; 972};
631 973
632static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) 974static int __devinit mmci_probe(struct amba_device *dev,
975 const struct amba_id *id)
633{ 976{
634 struct mmci_platform_data *plat = dev->dev.platform_data; 977 struct mmci_platform_data *plat = dev->dev.platform_data;
635 struct variant_data *variant = id->data; 978 struct variant_data *variant = id->data;
@@ -692,6 +1035,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
692 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1035 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
693 host->mclk); 1036 host->mclk);
694 } 1037 }
1038 host->phybase = dev->res.start;
695 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1039 host->base = ioremap(dev->res.start, resource_size(&dev->res));
696 if (!host->base) { 1040 if (!host->base) {
697 ret = -ENOMEM; 1041 ret = -ENOMEM;
@@ -806,19 +1150,27 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
806 if (ret) 1150 if (ret)
807 goto unmap; 1151 goto unmap;
808 1152
809 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host); 1153 if (dev->irq[1] == NO_IRQ)
810 if (ret) 1154 host->singleirq = true;
811 goto irq0_free; 1155 else {
1156 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
1157 DRIVER_NAME " (pio)", host);
1158 if (ret)
1159 goto irq0_free;
1160 }
812 1161
813 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1162 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
814 1163
815 amba_set_drvdata(dev, mmc); 1164 amba_set_drvdata(dev, mmc);
816 1165
817 mmc_add_host(mmc); 1166 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
1167 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
1168 amba_rev(dev), (unsigned long long)dev->res.start,
1169 dev->irq[0], dev->irq[1]);
818 1170
819 dev_info(&dev->dev, "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n", 1171 mmci_dma_setup(host);
820 mmc_hostname(mmc), amba_rev(dev), amba_config(dev), 1172
821 (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); 1173 mmc_add_host(mmc);
822 1174
823 return 0; 1175 return 0;
824 1176
@@ -863,8 +1215,10 @@ static int __devexit mmci_remove(struct amba_device *dev)
863 writel(0, host->base + MMCICOMMAND); 1215 writel(0, host->base + MMCICOMMAND);
864 writel(0, host->base + MMCIDATACTRL); 1216 writel(0, host->base + MMCIDATACTRL);
865 1217
1218 mmci_dma_release(host);
866 free_irq(dev->irq[0], host); 1219 free_irq(dev->irq[0], host);
867 free_irq(dev->irq[1], host); 1220 if (!host->singleirq)
1221 free_irq(dev->irq[1], host);
868 1222
869 if (host->gpio_wp != -ENOSYS) 1223 if (host->gpio_wp != -ENOSYS)
870 gpio_free(host->gpio_wp); 1224 gpio_free(host->gpio_wp);
@@ -929,10 +1283,15 @@ static int mmci_resume(struct amba_device *dev)
929static struct amba_id mmci_ids[] = { 1283static struct amba_id mmci_ids[] = {
930 { 1284 {
931 .id = 0x00041180, 1285 .id = 0x00041180,
932 .mask = 0x000fffff, 1286 .mask = 0xff0fffff,
933 .data = &variant_arm, 1287 .data = &variant_arm,
934 }, 1288 },
935 { 1289 {
1290 .id = 0x01041180,
1291 .mask = 0xff0fffff,
1292 .data = &variant_arm_extended_fifo,
1293 },
1294 {
936 .id = 0x00041181, 1295 .id = 0x00041181,
937 .mask = 0x000fffff, 1296 .mask = 0x000fffff,
938 .data = &variant_arm, 1297 .data = &variant_arm,
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 4ae887fc0189..ec9a7bc6d0df 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -137,14 +137,21 @@
137#define MCI_IRQENABLE \ 137#define MCI_IRQENABLE \
138 (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \ 138 (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
139 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \ 139 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
140 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK) 140 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK)
141
142/* These interrupts are directed to IRQ1 when two IRQ lines are available */
143#define MCI_IRQ1MASK \
144 (MCI_RXFIFOHALFFULLMASK | MCI_RXDATAAVLBLMASK | \
145 MCI_TXFIFOHALFEMPTYMASK)
141 146
142#define NR_SG 16 147#define NR_SG 16
143 148
144struct clk; 149struct clk;
145struct variant_data; 150struct variant_data;
151struct dma_chan;
146 152
147struct mmci_host { 153struct mmci_host {
154 phys_addr_t phybase;
148 void __iomem *base; 155 void __iomem *base;
149 struct mmc_request *mrq; 156 struct mmc_request *mrq;
150 struct mmc_command *cmd; 157 struct mmc_command *cmd;
@@ -154,8 +161,7 @@ struct mmci_host {
154 int gpio_cd; 161 int gpio_cd;
155 int gpio_wp; 162 int gpio_wp;
156 int gpio_cd_irq; 163 int gpio_cd_irq;
157 164 bool singleirq;
158 unsigned int data_xfered;
159 165
160 spinlock_t lock; 166 spinlock_t lock;
161 167
@@ -175,5 +181,16 @@ struct mmci_host {
175 struct sg_mapping_iter sg_miter; 181 struct sg_mapping_iter sg_miter;
176 unsigned int size; 182 unsigned int size;
177 struct regulator *vcc; 183 struct regulator *vcc;
184
185#ifdef CONFIG_DMA_ENGINE
186 /* DMA stuff */
187 struct dma_chan *dma_current;
188 struct dma_chan *dma_rx_channel;
189 struct dma_chan *dma_tx_channel;
190
191#define dma_inprogress(host) ((host)->dma_current)
192#else
193#define dma_inprogress(host) (0)
194#endif
178}; 195};
179 196
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 1290d14c5839..a4c865a5286b 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -36,6 +36,7 @@
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/memory.h> 37#include <linux/memory.h>
38#include <linux/gfp.h> 38#include <linux/gfp.h>
39#include <linux/gpio.h>
39 40
40#include <asm/cacheflush.h> 41#include <asm/cacheflush.h>
41#include <asm/div64.h> 42#include <asm/div64.h>
@@ -44,6 +45,7 @@
44#include <mach/mmc.h> 45#include <mach/mmc.h>
45#include <mach/msm_iomap.h> 46#include <mach/msm_iomap.h>
46#include <mach/dma.h> 47#include <mach/dma.h>
48#include <mach/clk.h>
47 49
48#include "msm_sdcc.h" 50#include "msm_sdcc.h"
49 51
@@ -126,6 +128,40 @@ static void
126msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, 128msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
127 u32 c); 129 u32 c);
128 130
131static void msmsdcc_reset_and_restore(struct msmsdcc_host *host)
132{
133 u32 mci_clk = 0;
134 u32 mci_mask0 = 0;
135 int ret = 0;
136
137 /* Save the controller state */
138 mci_clk = readl(host->base + MMCICLOCK);
139 mci_mask0 = readl(host->base + MMCIMASK0);
140
141 /* Reset the controller */
142 ret = clk_reset(host->clk, CLK_RESET_ASSERT);
143 if (ret)
144 pr_err("%s: Clock assert failed at %u Hz with err %d\n",
145 mmc_hostname(host->mmc), host->clk_rate, ret);
146
147 ret = clk_reset(host->clk, CLK_RESET_DEASSERT);
148 if (ret)
149 pr_err("%s: Clock deassert failed at %u Hz with err %d\n",
150 mmc_hostname(host->mmc), host->clk_rate, ret);
151
152 pr_info("%s: Controller has been re-initialiazed\n",
153 mmc_hostname(host->mmc));
154
155 /* Restore the contoller state */
156 writel(host->pwr, host->base + MMCIPOWER);
157 writel(mci_clk, host->base + MMCICLOCK);
158 writel(mci_mask0, host->base + MMCIMASK0);
159 ret = clk_set_rate(host->clk, host->clk_rate);
160 if (ret)
161 pr_err("%s: Failed to set clk rate %u Hz (%d)\n",
162 mmc_hostname(host->mmc), host->clk_rate, ret);
163}
164
129static void 165static void
130msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq) 166msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
131{ 167{
@@ -155,7 +191,7 @@ static void
155msmsdcc_stop_data(struct msmsdcc_host *host) 191msmsdcc_stop_data(struct msmsdcc_host *host)
156{ 192{
157 host->curr.data = NULL; 193 host->curr.data = NULL;
158 host->curr.got_dataend = host->curr.got_datablkend = 0; 194 host->curr.got_dataend = 0;
159} 195}
160 196
161uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host) 197uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
@@ -189,61 +225,52 @@ msmsdcc_dma_exec_func(struct msm_dmov_cmd *cmd)
189} 225}
190 226
191static void 227static void
192msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd, 228msmsdcc_dma_complete_tlet(unsigned long data)
193 unsigned int result,
194 struct msm_dmov_errdata *err)
195{ 229{
196 struct msmsdcc_dma_data *dma_data = 230 struct msmsdcc_host *host = (struct msmsdcc_host *)data;
197 container_of(cmd, struct msmsdcc_dma_data, hdr);
198 struct msmsdcc_host *host = dma_data->host;
199 unsigned long flags; 231 unsigned long flags;
200 struct mmc_request *mrq; 232 struct mmc_request *mrq;
233 struct msm_dmov_errdata err;
201 234
202 spin_lock_irqsave(&host->lock, flags); 235 spin_lock_irqsave(&host->lock, flags);
203 host->dma.active = 0; 236 host->dma.active = 0;
204 237
238 err = host->dma.err;
205 mrq = host->curr.mrq; 239 mrq = host->curr.mrq;
206 BUG_ON(!mrq); 240 BUG_ON(!mrq);
207 WARN_ON(!mrq->data); 241 WARN_ON(!mrq->data);
208 242
209 if (!(result & DMOV_RSLT_VALID)) { 243 if (!(host->dma.result & DMOV_RSLT_VALID)) {
210 pr_err("msmsdcc: Invalid DataMover result\n"); 244 pr_err("msmsdcc: Invalid DataMover result\n");
211 goto out; 245 goto out;
212 } 246 }
213 247
214 if (result & DMOV_RSLT_DONE) { 248 if (host->dma.result & DMOV_RSLT_DONE) {
215 host->curr.data_xfered = host->curr.xfer_size; 249 host->curr.data_xfered = host->curr.xfer_size;
216 } else { 250 } else {
217 /* Error or flush */ 251 /* Error or flush */
218 if (result & DMOV_RSLT_ERROR) 252 if (host->dma.result & DMOV_RSLT_ERROR)
219 pr_err("%s: DMA error (0x%.8x)\n", 253 pr_err("%s: DMA error (0x%.8x)\n",
220 mmc_hostname(host->mmc), result); 254 mmc_hostname(host->mmc), host->dma.result);
221 if (result & DMOV_RSLT_FLUSH) 255 if (host->dma.result & DMOV_RSLT_FLUSH)
222 pr_err("%s: DMA channel flushed (0x%.8x)\n", 256 pr_err("%s: DMA channel flushed (0x%.8x)\n",
223 mmc_hostname(host->mmc), result); 257 mmc_hostname(host->mmc), host->dma.result);
224 if (err) 258
225 pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n", 259 pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n",
226 err->flush[0], err->flush[1], err->flush[2], 260 err.flush[0], err.flush[1], err.flush[2],
227 err->flush[3], err->flush[4], err->flush[5]); 261 err.flush[3], err.flush[4], err.flush[5]);
262
263 msmsdcc_reset_and_restore(host);
228 if (!mrq->data->error) 264 if (!mrq->data->error)
229 mrq->data->error = -EIO; 265 mrq->data->error = -EIO;
230 } 266 }
231 dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents, 267 dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
232 host->dma.dir); 268 host->dma.dir);
233 269
234 if (host->curr.user_pages) {
235 struct scatterlist *sg = host->dma.sg;
236 int i;
237
238 for (i = 0; i < host->dma.num_ents; i++)
239 flush_dcache_page(sg_page(sg++));
240 }
241
242 host->dma.sg = NULL; 270 host->dma.sg = NULL;
243 host->dma.busy = 0; 271 host->dma.busy = 0;
244 272
245 if ((host->curr.got_dataend && host->curr.got_datablkend) 273 if (host->curr.got_dataend || mrq->data->error) {
246 || mrq->data->error) {
247 274
248 /* 275 /*
249 * If we've already gotten our DATAEND / DATABLKEND 276 * If we've already gotten our DATAEND / DATABLKEND
@@ -273,6 +300,22 @@ out:
273 return; 300 return;
274} 301}
275 302
303static void
304msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
305 unsigned int result,
306 struct msm_dmov_errdata *err)
307{
308 struct msmsdcc_dma_data *dma_data =
309 container_of(cmd, struct msmsdcc_dma_data, hdr);
310 struct msmsdcc_host *host = dma_data->host;
311
312 dma_data->result = result;
313 if (err)
314 memcpy(&dma_data->err, err, sizeof(struct msm_dmov_errdata));
315
316 tasklet_schedule(&host->dma_tlet);
317}
318
276static int validate_dma(struct msmsdcc_host *host, struct mmc_data *data) 319static int validate_dma(struct msmsdcc_host *host, struct mmc_data *data)
277{ 320{
278 if (host->dma.channel == -1) 321 if (host->dma.channel == -1)
@@ -333,14 +376,30 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
333 host->curr.user_pages = 0; 376 host->curr.user_pages = 0;
334 377
335 box = &nc->cmd[0]; 378 box = &nc->cmd[0];
336 for (i = 0; i < host->dma.num_ents; i++) {
337 box->cmd = CMD_MODE_BOX;
338 379
339 /* Initialize sg dma address */ 380 /* location of command block must be 64 bit aligned */
340 sg->dma_address = page_to_dma(mmc_dev(host->mmc), sg_page(sg)) 381 BUG_ON(host->dma.cmd_busaddr & 0x07);
341 + sg->offset;
342 382
343 if (i == (host->dma.num_ents - 1)) 383 nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
384 host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
385 DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
386 host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
387
388 n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
389 host->dma.num_ents, host->dma.dir);
390 if (n == 0) {
391 printk(KERN_ERR "%s: Unable to map in all sg elements\n",
392 mmc_hostname(host->mmc));
393 host->dma.sg = NULL;
394 host->dma.num_ents = 0;
395 return -ENOMEM;
396 }
397
398 for_each_sg(host->dma.sg, sg, n, i) {
399
400 box->cmd = CMD_MODE_BOX;
401
402 if (i == n - 1)
344 box->cmd |= CMD_LC; 403 box->cmd |= CMD_LC;
345 rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ? 404 rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
346 (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 : 405 (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
@@ -368,27 +427,6 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
368 box->cmd |= CMD_DST_CRCI(crci); 427 box->cmd |= CMD_DST_CRCI(crci);
369 } 428 }
370 box++; 429 box++;
371 sg++;
372 }
373
374 /* location of command block must be 64 bit aligned */
375 BUG_ON(host->dma.cmd_busaddr & 0x07);
376
377 nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
378 host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
379 DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
380 host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
381
382 n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
383 host->dma.num_ents, host->dma.dir);
384/* dsb inside dma_map_sg will write nc out to mem as well */
385
386 if (n != host->dma.num_ents) {
387 printk(KERN_ERR "%s: Unable to map in all sg elements\n",
388 mmc_hostname(host->mmc));
389 host->dma.sg = NULL;
390 host->dma.num_ents = 0;
391 return -ENOMEM;
392 } 430 }
393 431
394 return 0; 432 return 0;
@@ -424,6 +462,11 @@ msmsdcc_start_command_deferred(struct msmsdcc_host *host,
424 (cmd->opcode == 53)) 462 (cmd->opcode == 53))
425 *c |= MCI_CSPM_DATCMD; 463 *c |= MCI_CSPM_DATCMD;
426 464
465 if (host->prog_scan && (cmd->opcode == 12)) {
466 *c |= MCI_CPSM_PROGENA;
467 host->prog_enable = true;
468 }
469
427 if (cmd == cmd->mrq->stop) 470 if (cmd == cmd->mrq->stop)
428 *c |= MCI_CSPM_MCIABORT; 471 *c |= MCI_CSPM_MCIABORT;
429 472
@@ -450,7 +493,6 @@ msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data,
450 host->curr.xfer_remain = host->curr.xfer_size; 493 host->curr.xfer_remain = host->curr.xfer_size;
451 host->curr.data_xfered = 0; 494 host->curr.data_xfered = 0;
452 host->curr.got_dataend = 0; 495 host->curr.got_dataend = 0;
453 host->curr.got_datablkend = 0;
454 496
455 memset(&host->pio, 0, sizeof(host->pio)); 497 memset(&host->pio, 0, sizeof(host->pio));
456 498
@@ -494,6 +536,8 @@ msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data,
494 host->cmd_c = c; 536 host->cmd_c = c;
495 } 537 }
496 msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr); 538 msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr);
539 if (data->flags & MMC_DATA_WRITE)
540 host->prog_scan = true;
497 } else { 541 } else {
498 msmsdcc_writel(host, timeout, MMCIDATATIMER); 542 msmsdcc_writel(host, timeout, MMCIDATATIMER);
499 543
@@ -555,6 +599,9 @@ msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)
555 uint32_t *ptr = (uint32_t *) buffer; 599 uint32_t *ptr = (uint32_t *) buffer;
556 int count = 0; 600 int count = 0;
557 601
602 if (remain % 4)
603 remain = ((remain >> 2) + 1) << 2;
604
558 while (msmsdcc_readl(host, MMCISTATUS) & MCI_RXDATAAVLBL) { 605 while (msmsdcc_readl(host, MMCISTATUS) & MCI_RXDATAAVLBL) {
559 *ptr = msmsdcc_readl(host, MMCIFIFO + (count % MCI_FIFOSIZE)); 606 *ptr = msmsdcc_readl(host, MMCIFIFO + (count % MCI_FIFOSIZE));
560 ptr++; 607 ptr++;
@@ -575,13 +622,14 @@ msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer,
575 char *ptr = buffer; 622 char *ptr = buffer;
576 623
577 do { 624 do {
578 unsigned int count, maxcnt; 625 unsigned int count, maxcnt, sz;
579 626
580 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : 627 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE :
581 MCI_FIFOHALFSIZE; 628 MCI_FIFOHALFSIZE;
582 count = min(remain, maxcnt); 629 count = min(remain, maxcnt);
583 630
584 writesl(base + MMCIFIFO, ptr, count >> 2); 631 sz = count % 4 ? (count >> 2) + 1 : (count >> 2);
632 writesl(base + MMCIFIFO, ptr, sz);
585 ptr += count; 633 ptr += count;
586 remain -= count; 634 remain -= count;
587 635
@@ -702,10 +750,26 @@ static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
702 msm_dmov_stop_cmd(host->dma.channel, 750 msm_dmov_stop_cmd(host->dma.channel,
703 &host->dma.hdr, 0); 751 &host->dma.hdr, 0);
704 else if (host->curr.data) { /* Non DMA */ 752 else if (host->curr.data) { /* Non DMA */
753 msmsdcc_reset_and_restore(host);
705 msmsdcc_stop_data(host); 754 msmsdcc_stop_data(host);
706 msmsdcc_request_end(host, cmd->mrq); 755 msmsdcc_request_end(host, cmd->mrq);
707 } else /* host->data == NULL */ 756 } else { /* host->data == NULL */
708 msmsdcc_request_end(host, cmd->mrq); 757 if (!cmd->error && host->prog_enable) {
758 if (status & MCI_PROGDONE) {
759 host->prog_scan = false;
760 host->prog_enable = false;
761 msmsdcc_request_end(host, cmd->mrq);
762 } else {
763 host->curr.cmd = cmd;
764 }
765 } else {
766 if (host->prog_enable) {
767 host->prog_scan = false;
768 host->prog_enable = false;
769 }
770 msmsdcc_request_end(host, cmd->mrq);
771 }
772 }
709 } else if (cmd->data) 773 } else if (cmd->data)
710 if (!(cmd->data->flags & MMC_DATA_READ)) 774 if (!(cmd->data->flags & MMC_DATA_READ))
711 msmsdcc_start_data(host, cmd->data, 775 msmsdcc_start_data(host, cmd->data,
@@ -719,7 +783,7 @@ msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
719 struct mmc_data *data = host->curr.data; 783 struct mmc_data *data = host->curr.data;
720 784
721 if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL | 785 if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
722 MCI_CMDTIMEOUT) && host->curr.cmd) { 786 MCI_CMDTIMEOUT | MCI_PROGDONE) && host->curr.cmd) {
723 msmsdcc_do_cmdirq(host, status); 787 msmsdcc_do_cmdirq(host, status);
724 } 788 }
725 789
@@ -735,6 +799,7 @@ msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
735 msm_dmov_stop_cmd(host->dma.channel, 799 msm_dmov_stop_cmd(host->dma.channel,
736 &host->dma.hdr, 0); 800 &host->dma.hdr, 0);
737 else { 801 else {
802 msmsdcc_reset_and_restore(host);
738 if (host->curr.data) 803 if (host->curr.data)
739 msmsdcc_stop_data(host); 804 msmsdcc_stop_data(host);
740 if (!data->stop) 805 if (!data->stop)
@@ -748,14 +813,10 @@ msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
748 if (!host->curr.got_dataend && (status & MCI_DATAEND)) 813 if (!host->curr.got_dataend && (status & MCI_DATAEND))
749 host->curr.got_dataend = 1; 814 host->curr.got_dataend = 1;
750 815
751 if (!host->curr.got_datablkend && (status & MCI_DATABLOCKEND))
752 host->curr.got_datablkend = 1;
753
754 /* 816 /*
755 * If DMA is still in progress, we complete via the completion handler 817 * If DMA is still in progress, we complete via the completion handler
756 */ 818 */
757 if (host->curr.got_dataend && host->curr.got_datablkend && 819 if (host->curr.got_dataend && !host->dma.busy) {
758 !host->dma.busy) {
759 /* 820 /*
760 * There appears to be an issue in the controller where 821 * There appears to be an issue in the controller where
761 * if you request a small block transfer (< fifo size), 822 * if you request a small block transfer (< fifo size),
@@ -792,8 +853,7 @@ msmsdcc_irq(int irq, void *dev_id)
792 853
793 do { 854 do {
794 status = msmsdcc_readl(host, MMCISTATUS); 855 status = msmsdcc_readl(host, MMCISTATUS);
795 status &= (msmsdcc_readl(host, MMCIMASK0) | 856 status &= msmsdcc_readl(host, MMCIMASK0);
796 MCI_DATABLOCKENDMASK);
797 msmsdcc_writel(host, status, MMCICLEAR); 857 msmsdcc_writel(host, status, MMCICLEAR);
798 858
799 if (status & MCI_SDIOINTR) 859 if (status & MCI_SDIOINTR)
@@ -874,6 +934,38 @@ msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
874 spin_unlock_irqrestore(&host->lock, flags); 934 spin_unlock_irqrestore(&host->lock, flags);
875} 935}
876 936
937static void msmsdcc_setup_gpio(struct msmsdcc_host *host, bool enable)
938{
939 struct msm_mmc_gpio_data *curr;
940 int i, rc = 0;
941
942 if (!host->plat->gpio_data && host->gpio_config_status == enable)
943 return;
944
945 curr = host->plat->gpio_data;
946 for (i = 0; i < curr->size; i++) {
947 if (enable) {
948 rc = gpio_request(curr->gpio[i].no,
949 curr->gpio[i].name);
950 if (rc) {
951 pr_err("%s: gpio_request(%d, %s) failed %d\n",
952 mmc_hostname(host->mmc),
953 curr->gpio[i].no,
954 curr->gpio[i].name, rc);
955 goto free_gpios;
956 }
957 } else {
958 gpio_free(curr->gpio[i].no);
959 }
960 }
961 host->gpio_config_status = enable;
962 return;
963
964free_gpios:
965 for (; i >= 0; i--)
966 gpio_free(curr->gpio[i].no);
967}
968
877static void 969static void
878msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 970msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
879{ 971{
@@ -886,6 +978,8 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
886 978
887 msmsdcc_enable_clocks(host); 979 msmsdcc_enable_clocks(host);
888 980
981 spin_unlock_irqrestore(&host->lock, flags);
982
889 if (ios->clock) { 983 if (ios->clock) {
890 if (ios->clock != host->clk_rate) { 984 if (ios->clock != host->clk_rate) {
891 rc = clk_set_rate(host->clk, ios->clock); 985 rc = clk_set_rate(host->clk, ios->clock);
@@ -912,9 +1006,11 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
912 1006
913 switch (ios->power_mode) { 1007 switch (ios->power_mode) {
914 case MMC_POWER_OFF: 1008 case MMC_POWER_OFF:
1009 msmsdcc_setup_gpio(host, false);
915 break; 1010 break;
916 case MMC_POWER_UP: 1011 case MMC_POWER_UP:
917 pwr |= MCI_PWR_UP; 1012 pwr |= MCI_PWR_UP;
1013 msmsdcc_setup_gpio(host, true);
918 break; 1014 break;
919 case MMC_POWER_ON: 1015 case MMC_POWER_ON:
920 pwr |= MCI_PWR_ON; 1016 pwr |= MCI_PWR_ON;
@@ -931,9 +1027,10 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
931 msmsdcc_writel(host, pwr, MMCIPOWER); 1027 msmsdcc_writel(host, pwr, MMCIPOWER);
932 } 1028 }
933#if BUSCLK_PWRSAVE 1029#if BUSCLK_PWRSAVE
1030 spin_lock_irqsave(&host->lock, flags);
934 msmsdcc_disable_clocks(host, 1); 1031 msmsdcc_disable_clocks(host, 1);
935#endif
936 spin_unlock_irqrestore(&host->lock, flags); 1032 spin_unlock_irqrestore(&host->lock, flags);
1033#endif
937} 1034}
938 1035
939static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable) 1036static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -1118,6 +1215,9 @@ msmsdcc_probe(struct platform_device *pdev)
1118 host->dmares = dmares; 1215 host->dmares = dmares;
1119 spin_lock_init(&host->lock); 1216 spin_lock_init(&host->lock);
1120 1217
1218 tasklet_init(&host->dma_tlet, msmsdcc_dma_complete_tlet,
1219 (unsigned long)host);
1220
1121 /* 1221 /*
1122 * Setup DMA 1222 * Setup DMA
1123 */ 1223 */
@@ -1256,9 +1356,6 @@ msmsdcc_probe(struct platform_device *pdev)
1256 if (host->timer.function) 1356 if (host->timer.function)
1257 pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc)); 1357 pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
1258 1358
1259#if BUSCLK_PWRSAVE
1260 msmsdcc_disable_clocks(host, 1);
1261#endif
1262 return 0; 1359 return 0;
1263 cmd_irq_free: 1360 cmd_irq_free:
1264 free_irq(cmd_irqres->start, host); 1361 free_irq(cmd_irqres->start, host);
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index ff2b0f74f6f4..42d7bbc977c5 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -138,7 +138,7 @@
138#define MCI_IRQENABLE \ 138#define MCI_IRQENABLE \
139 (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \ 139 (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
140 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \ 140 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
141 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATAENDMASK) 141 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATAENDMASK|MCI_PROGDONEMASK)
142 142
143/* 143/*
144 * The size of the FIFO in bytes. 144 * The size of the FIFO in bytes.
@@ -172,6 +172,8 @@ struct msmsdcc_dma_data {
172 struct msmsdcc_host *host; 172 struct msmsdcc_host *host;
173 int busy; /* Set if DM is busy */ 173 int busy; /* Set if DM is busy */
174 int active; 174 int active;
175 unsigned int result;
176 struct msm_dmov_errdata err;
175}; 177};
176 178
177struct msmsdcc_pio_data { 179struct msmsdcc_pio_data {
@@ -188,7 +190,6 @@ struct msmsdcc_curr_req {
188 unsigned int xfer_remain; /* Bytes remaining to send */ 190 unsigned int xfer_remain; /* Bytes remaining to send */
189 unsigned int data_xfered; /* Bytes acked by BLKEND irq */ 191 unsigned int data_xfered; /* Bytes acked by BLKEND irq */
190 int got_dataend; 192 int got_dataend;
191 int got_datablkend;
192 int user_pages; 193 int user_pages;
193}; 194};
194 195
@@ -235,13 +236,17 @@ struct msmsdcc_host {
235 int cmdpoll; 236 int cmdpoll;
236 struct msmsdcc_stats stats; 237 struct msmsdcc_stats stats;
237 238
239 struct tasklet_struct dma_tlet;
238 /* Command parameters */ 240 /* Command parameters */
239 unsigned int cmd_timeout; 241 unsigned int cmd_timeout;
240 unsigned int cmd_pio_irqmask; 242 unsigned int cmd_pio_irqmask;
241 unsigned int cmd_datactrl; 243 unsigned int cmd_datactrl;
242 struct mmc_command *cmd_cmd; 244 struct mmc_command *cmd_cmd;
243 u32 cmd_c; 245 u32 cmd_c;
246 bool gpio_config_status;
244 247
248 bool prog_scan;
249 bool prog_enable;
245}; 250};
246 251
247#endif 252#endif
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index bdd2cbb87cba..cc20e0259325 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -31,16 +31,15 @@
31#include <linux/clk.h> 31#include <linux/clk.h>
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/gpio.h> 33#include <linux/gpio.h>
34#include <linux/regulator/consumer.h>
35#include <linux/dmaengine.h>
34 36
35#include <asm/dma.h> 37#include <asm/dma.h>
36#include <asm/irq.h> 38#include <asm/irq.h>
37#include <asm/sizes.h> 39#include <asm/sizes.h>
38#include <mach/mmc.h> 40#include <mach/mmc.h>
39 41
40#ifdef CONFIG_ARCH_MX2 42#include <mach/dma.h>
41#include <mach/dma-mx1-mx2.h>
42#define HAS_DMA
43#endif
44 43
45#define DRIVER_NAME "mxc-mmc" 44#define DRIVER_NAME "mxc-mmc"
46 45
@@ -117,7 +116,8 @@ struct mxcmci_host {
117 void __iomem *base; 116 void __iomem *base;
118 int irq; 117 int irq;
119 int detect_irq; 118 int detect_irq;
120 int dma; 119 struct dma_chan *dma;
120 struct dma_async_tx_descriptor *desc;
121 int do_dma; 121 int do_dma;
122 int default_irq_mask; 122 int default_irq_mask;
123 int use_sdio; 123 int use_sdio;
@@ -128,7 +128,6 @@ struct mxcmci_host {
128 struct mmc_command *cmd; 128 struct mmc_command *cmd;
129 struct mmc_data *data; 129 struct mmc_data *data;
130 130
131 unsigned int dma_nents;
132 unsigned int datasize; 131 unsigned int datasize;
133 unsigned int dma_dir; 132 unsigned int dma_dir;
134 133
@@ -141,10 +140,54 @@ struct mxcmci_host {
141 140
142 struct work_struct datawork; 141 struct work_struct datawork;
143 spinlock_t lock; 142 spinlock_t lock;
143
144 struct regulator *vcc;
145
146 int burstlen;
147 int dmareq;
148 struct dma_slave_config dma_slave_config;
149 struct imx_dma_data dma_data;
144}; 150};
145 151
146static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); 152static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
147 153
154static inline void mxcmci_init_ocr(struct mxcmci_host *host)
155{
156 host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
157
158 if (IS_ERR(host->vcc)) {
159 host->vcc = NULL;
160 } else {
161 host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
162 if (host->pdata && host->pdata->ocr_avail)
163 dev_warn(mmc_dev(host->mmc),
164 "pdata->ocr_avail will not be used\n");
165 }
166
167 if (host->vcc == NULL) {
168 /* fall-back to platform data */
169 if (host->pdata && host->pdata->ocr_avail)
170 host->mmc->ocr_avail = host->pdata->ocr_avail;
171 else
172 host->mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
173 }
174}
175
176static inline void mxcmci_set_power(struct mxcmci_host *host,
177 unsigned char power_mode,
178 unsigned int vdd)
179{
180 if (host->vcc) {
181 if (power_mode == MMC_POWER_UP)
182 mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
183 else if (power_mode == MMC_POWER_OFF)
184 mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
185 }
186
187 if (host->pdata && host->pdata->setpower)
188 host->pdata->setpower(mmc_dev(host->mmc), vdd);
189}
190
148static inline int mxcmci_use_dma(struct mxcmci_host *host) 191static inline int mxcmci_use_dma(struct mxcmci_host *host)
149{ 192{
150 return host->do_dma; 193 return host->do_dma;
@@ -166,17 +209,16 @@ static void mxcmci_softreset(struct mxcmci_host *host)
166 209
167 writew(0xff, host->base + MMC_REG_RES_TO); 210 writew(0xff, host->base + MMC_REG_RES_TO);
168} 211}
212static int mxcmci_setup_dma(struct mmc_host *mmc);
169 213
170static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) 214static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
171{ 215{
172 unsigned int nob = data->blocks; 216 unsigned int nob = data->blocks;
173 unsigned int blksz = data->blksz; 217 unsigned int blksz = data->blksz;
174 unsigned int datasize = nob * blksz; 218 unsigned int datasize = nob * blksz;
175#ifdef HAS_DMA
176 struct scatterlist *sg; 219 struct scatterlist *sg;
177 int i; 220 int i, nents;
178 int ret; 221
179#endif
180 if (data->flags & MMC_DATA_STREAM) 222 if (data->flags & MMC_DATA_STREAM)
181 nob = 0xffff; 223 nob = 0xffff;
182 224
@@ -187,7 +229,9 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
187 writew(blksz, host->base + MMC_REG_BLK_LEN); 229 writew(blksz, host->base + MMC_REG_BLK_LEN);
188 host->datasize = datasize; 230 host->datasize = datasize;
189 231
190#ifdef HAS_DMA 232 if (!mxcmci_use_dma(host))
233 return 0;
234
191 for_each_sg(data->sg, sg, data->sg_len, i) { 235 for_each_sg(data->sg, sg, data->sg_len, i) {
192 if (sg->offset & 3 || sg->length & 3) { 236 if (sg->offset & 3 || sg->length & 3) {
193 host->do_dma = 0; 237 host->do_dma = 0;
@@ -195,34 +239,30 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
195 } 239 }
196 } 240 }
197 241
198 if (data->flags & MMC_DATA_READ) { 242 if (data->flags & MMC_DATA_READ)
199 host->dma_dir = DMA_FROM_DEVICE; 243 host->dma_dir = DMA_FROM_DEVICE;
200 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, 244 else
201 data->sg_len, host->dma_dir);
202
203 ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents,
204 datasize,
205 host->res->start + MMC_REG_BUFFER_ACCESS,
206 DMA_MODE_READ);
207 } else {
208 host->dma_dir = DMA_TO_DEVICE; 245 host->dma_dir = DMA_TO_DEVICE;
209 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
210 data->sg_len, host->dma_dir);
211 246
212 ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, 247 nents = dma_map_sg(host->dma->device->dev, data->sg,
213 datasize, 248 data->sg_len, host->dma_dir);
214 host->res->start + MMC_REG_BUFFER_ACCESS, 249 if (nents != data->sg_len)
215 DMA_MODE_WRITE); 250 return -EINVAL;
216 } 251
252 host->desc = host->dma->device->device_prep_slave_sg(host->dma,
253 data->sg, data->sg_len, host->dma_dir,
254 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
217 255
218 if (ret) { 256 if (!host->desc) {
219 dev_err(mmc_dev(host->mmc), "failed to setup DMA : %d\n", ret); 257 dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
220 return ret; 258 host->dma_dir);
259 host->do_dma = 0;
260 return 0; /* Fall back to PIO */
221 } 261 }
222 wmb(); 262 wmb();
223 263
224 imx_dma_enable(host->dma); 264 dmaengine_submit(host->desc);
225#endif /* HAS_DMA */ 265
226 return 0; 266 return 0;
227} 267}
228 268
@@ -297,13 +337,11 @@ static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
297 struct mmc_data *data = host->data; 337 struct mmc_data *data = host->data;
298 int data_error; 338 int data_error;
299 339
300#ifdef HAS_DMA
301 if (mxcmci_use_dma(host)) { 340 if (mxcmci_use_dma(host)) {
302 imx_dma_disable(host->dma); 341 dmaengine_terminate_all(host->dma);
303 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents, 342 dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
304 host->dma_dir); 343 host->dma_dir);
305 } 344 }
306#endif
307 345
308 if (stat & STATUS_ERR_MASK) { 346 if (stat & STATUS_ERR_MASK) {
309 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", 347 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
@@ -505,7 +543,6 @@ static void mxcmci_datawork(struct work_struct *work)
505 } 543 }
506} 544}
507 545
508#ifdef HAS_DMA
509static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat) 546static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
510{ 547{
511 struct mmc_data *data = host->data; 548 struct mmc_data *data = host->data;
@@ -528,7 +565,6 @@ static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
528 mxcmci_finish_request(host, host->req); 565 mxcmci_finish_request(host, host->req);
529 } 566 }
530} 567}
531#endif /* HAS_DMA */
532 568
533static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat) 569static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
534{ 570{
@@ -566,12 +602,10 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)
566 sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio; 602 sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
567 spin_unlock_irqrestore(&host->lock, flags); 603 spin_unlock_irqrestore(&host->lock, flags);
568 604
569#ifdef HAS_DMA
570 if (mxcmci_use_dma(host) && 605 if (mxcmci_use_dma(host) &&
571 (stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE))) 606 (stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE)))
572 writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE, 607 writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
573 host->base + MMC_REG_STATUS); 608 host->base + MMC_REG_STATUS);
574#endif
575 609
576 if (sdio_irq) { 610 if (sdio_irq) {
577 writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS); 611 writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS);
@@ -581,14 +615,14 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)
581 if (stat & STATUS_END_CMD_RESP) 615 if (stat & STATUS_END_CMD_RESP)
582 mxcmci_cmd_done(host, stat); 616 mxcmci_cmd_done(host, stat);
583 617
584#ifdef HAS_DMA
585 if (mxcmci_use_dma(host) && 618 if (mxcmci_use_dma(host) &&
586 (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) 619 (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE)))
587 mxcmci_data_done(host, stat); 620 mxcmci_data_done(host, stat);
588#endif 621
589 if (host->default_irq_mask && 622 if (host->default_irq_mask &&
590 (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL))) 623 (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
591 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 624 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
625
592 return IRQ_HANDLED; 626 return IRQ_HANDLED;
593} 627}
594 628
@@ -602,9 +636,10 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
602 636
603 host->req = req; 637 host->req = req;
604 host->cmdat &= ~CMD_DAT_CONT_INIT; 638 host->cmdat &= ~CMD_DAT_CONT_INIT;
605#ifdef HAS_DMA 639
606 host->do_dma = 1; 640 if (host->dma)
607#endif 641 host->do_dma = 1;
642
608 if (req->data) { 643 if (req->data) {
609 error = mxcmci_setup_data(host, req->data); 644 error = mxcmci_setup_data(host, req->data);
610 if (error) { 645 if (error) {
@@ -620,6 +655,7 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
620 } 655 }
621 656
622 error = mxcmci_start_cmd(host, req->cmd, cmdat); 657 error = mxcmci_start_cmd(host, req->cmd, cmdat);
658
623out: 659out:
624 if (error) 660 if (error)
625 mxcmci_finish_request(host, req); 661 mxcmci_finish_request(host, req);
@@ -658,31 +694,55 @@ static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
658 prescaler, divider, clk_in, clk_ios); 694 prescaler, divider, clk_in, clk_ios);
659} 695}
660 696
697static int mxcmci_setup_dma(struct mmc_host *mmc)
698{
699 struct mxcmci_host *host = mmc_priv(mmc);
700 struct dma_slave_config *config = &host->dma_slave_config;
701
702 config->dst_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
703 config->src_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
704 config->dst_addr_width = 4;
705 config->src_addr_width = 4;
706 config->dst_maxburst = host->burstlen;
707 config->src_maxburst = host->burstlen;
708
709 return dmaengine_slave_config(host->dma, config);
710}
711
661static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 712static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
662{ 713{
663 struct mxcmci_host *host = mmc_priv(mmc); 714 struct mxcmci_host *host = mmc_priv(mmc);
664#ifdef HAS_DMA 715 int burstlen, ret;
665 unsigned int blen; 716
666 /* 717 /*
667 * use burstlen of 64 in 4 bit mode (--> reg value 0) 718 * use burstlen of 64 in 4 bit mode (--> reg value 0)
668 * use burstlen of 16 in 1 bit mode (--> reg value 16) 719 * use burstlen of 16 in 1 bit mode (--> reg value 16)
669 */ 720 */
670 if (ios->bus_width == MMC_BUS_WIDTH_4) 721 if (ios->bus_width == MMC_BUS_WIDTH_4)
671 blen = 0; 722 burstlen = 64;
672 else 723 else
673 blen = 16; 724 burstlen = 16;
725
726 if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
727 host->burstlen = burstlen;
728 ret = mxcmci_setup_dma(mmc);
729 if (ret) {
730 dev_err(mmc_dev(host->mmc),
731 "failed to config DMA channel. Falling back to PIO\n");
732 dma_release_channel(host->dma);
733 host->do_dma = 0;
734 }
735 }
674 736
675 imx_dma_config_burstlen(host->dma, blen);
676#endif
677 if (ios->bus_width == MMC_BUS_WIDTH_4) 737 if (ios->bus_width == MMC_BUS_WIDTH_4)
678 host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4; 738 host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
679 else 739 else
680 host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4; 740 host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
681 741
682 if (host->power_mode != ios->power_mode) { 742 if (host->power_mode != ios->power_mode) {
683 if (host->pdata && host->pdata->setpower) 743 mxcmci_set_power(host, ios->power_mode, ios->vdd);
684 host->pdata->setpower(mmc_dev(mmc), ios->vdd);
685 host->power_mode = ios->power_mode; 744 host->power_mode = ios->power_mode;
745
686 if (ios->power_mode == MMC_POWER_ON) 746 if (ios->power_mode == MMC_POWER_ON)
687 host->cmdat |= CMD_DAT_CONT_INIT; 747 host->cmdat |= CMD_DAT_CONT_INIT;
688 } 748 }
@@ -754,6 +814,18 @@ static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
754 host->caps |= MMC_CAP_4_BIT_DATA; 814 host->caps |= MMC_CAP_4_BIT_DATA;
755} 815}
756 816
817static bool filter(struct dma_chan *chan, void *param)
818{
819 struct mxcmci_host *host = param;
820
821 if (!imx_dma_is_general_purpose(chan))
822 return false;
823
824 chan->private = &host->dma_data;
825
826 return true;
827}
828
757static const struct mmc_host_ops mxcmci_ops = { 829static const struct mmc_host_ops mxcmci_ops = {
758 .request = mxcmci_request, 830 .request = mxcmci_request,
759 .set_ios = mxcmci_set_ios, 831 .set_ios = mxcmci_set_ios,
@@ -768,6 +840,7 @@ static int mxcmci_probe(struct platform_device *pdev)
768 struct mxcmci_host *host = NULL; 840 struct mxcmci_host *host = NULL;
769 struct resource *iores, *r; 841 struct resource *iores, *r;
770 int ret = 0, irq; 842 int ret = 0, irq;
843 dma_cap_mask_t mask;
771 844
772 printk(KERN_INFO "i.MX SDHC driver\n"); 845 printk(KERN_INFO "i.MX SDHC driver\n");
773 846
@@ -807,10 +880,7 @@ static int mxcmci_probe(struct platform_device *pdev)
807 host->pdata = pdev->dev.platform_data; 880 host->pdata = pdev->dev.platform_data;
808 spin_lock_init(&host->lock); 881 spin_lock_init(&host->lock);
809 882
810 if (host->pdata && host->pdata->ocr_avail) 883 mxcmci_init_ocr(host);
811 mmc->ocr_avail = host->pdata->ocr_avail;
812 else
813 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
814 884
815 if (host->pdata && host->pdata->dat3_card_detect) 885 if (host->pdata && host->pdata->dat3_card_detect)
816 host->default_irq_mask = 886 host->default_irq_mask =
@@ -846,29 +916,23 @@ static int mxcmci_probe(struct platform_device *pdev)
846 916
847 writel(host->default_irq_mask, host->base + MMC_REG_INT_CNTR); 917 writel(host->default_irq_mask, host->base + MMC_REG_INT_CNTR);
848 918
849#ifdef HAS_DMA
850 host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW);
851 if (host->dma < 0) {
852 dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n");
853 ret = -EBUSY;
854 goto out_clk_put;
855 }
856
857 r = platform_get_resource(pdev, IORESOURCE_DMA, 0); 919 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
858 if (!r) { 920 if (r) {
859 ret = -EINVAL; 921 host->dmareq = r->start;
860 goto out_free_dma; 922 host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
923 host->dma_data.priority = DMA_PRIO_LOW;
924 host->dma_data.dma_request = host->dmareq;
925 dma_cap_zero(mask);
926 dma_cap_set(DMA_SLAVE, mask);
927 host->dma = dma_request_channel(mask, filter, host);
928 if (host->dma)
929 mmc->max_seg_size = dma_get_max_seg_size(
930 host->dma->device->dev);
861 } 931 }
862 932
863 ret = imx_dma_config_channel(host->dma, 933 if (!host->dma)
864 IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_FIFO, 934 dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n");
865 IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, 935
866 r->start, 0);
867 if (ret) {
868 dev_err(mmc_dev(host->mmc), "failed to config DMA channel\n");
869 goto out_free_dma;
870 }
871#endif
872 INIT_WORK(&host->datawork, mxcmci_datawork); 936 INIT_WORK(&host->datawork, mxcmci_datawork);
873 937
874 ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host); 938 ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host);
@@ -891,9 +955,8 @@ static int mxcmci_probe(struct platform_device *pdev)
891out_free_irq: 955out_free_irq:
892 free_irq(host->irq, host); 956 free_irq(host->irq, host);
893out_free_dma: 957out_free_dma:
894#ifdef HAS_DMA 958 if (host->dma)
895 imx_dma_free(host->dma); 959 dma_release_channel(host->dma);
896#endif
897out_clk_put: 960out_clk_put:
898 clk_disable(host->clk); 961 clk_disable(host->clk);
899 clk_put(host->clk); 962 clk_put(host->clk);
@@ -915,19 +978,22 @@ static int mxcmci_remove(struct platform_device *pdev)
915 978
916 mmc_remove_host(mmc); 979 mmc_remove_host(mmc);
917 980
981 if (host->vcc)
982 regulator_put(host->vcc);
983
918 if (host->pdata && host->pdata->exit) 984 if (host->pdata && host->pdata->exit)
919 host->pdata->exit(&pdev->dev, mmc); 985 host->pdata->exit(&pdev->dev, mmc);
920 986
921 free_irq(host->irq, host); 987 free_irq(host->irq, host);
922 iounmap(host->base); 988 iounmap(host->base);
923#ifdef HAS_DMA 989
924 imx_dma_free(host->dma); 990 if (host->dma)
925#endif 991 dma_release_channel(host->dma);
992
926 clk_disable(host->clk); 993 clk_disable(host->clk);
927 clk_put(host->clk); 994 clk_put(host->clk);
928 995
929 release_mem_region(host->res->start, resource_size(host->res)); 996 release_mem_region(host->res->start, resource_size(host->res));
930 release_resource(host->res);
931 997
932 mmc_free_host(mmc); 998 mmc_free_host(mmc);
933 999
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
new file mode 100644
index 000000000000..99d39a6a1032
--- /dev/null
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -0,0 +1,874 @@
1/*
2 * Portions copyright (C) 2003 Russell King, PXA MMCI Driver
3 * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
4 *
5 * Copyright 2008 Embedded Alley Solutions, Inc.
6 * Copyright 2009-2011 Freescale Semiconductor, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/ioport.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
28#include <linux/interrupt.h>
29#include <linux/dma-mapping.h>
30#include <linux/dmaengine.h>
31#include <linux/highmem.h>
32#include <linux/clk.h>
33#include <linux/err.h>
34#include <linux/completion.h>
35#include <linux/mmc/host.h>
36#include <linux/mmc/mmc.h>
37#include <linux/mmc/sdio.h>
38#include <linux/gpio.h>
39#include <linux/regulator/consumer.h>
40
41#include <mach/mxs.h>
42#include <mach/common.h>
43#include <mach/dma.h>
44#include <mach/mmc.h>
45
46#define DRIVER_NAME "mxs-mmc"
47
48/* card detect polling timeout */
49#define MXS_MMC_DETECT_TIMEOUT (HZ/2)
50
51#define SSP_VERSION_LATEST 4
52#define ssp_is_old() (host->version < SSP_VERSION_LATEST)
53
54/* SSP registers */
55#define HW_SSP_CTRL0 0x000
56#define BM_SSP_CTRL0_RUN (1 << 29)
57#define BM_SSP_CTRL0_SDIO_IRQ_CHECK (1 << 28)
58#define BM_SSP_CTRL0_IGNORE_CRC (1 << 26)
59#define BM_SSP_CTRL0_READ (1 << 25)
60#define BM_SSP_CTRL0_DATA_XFER (1 << 24)
61#define BP_SSP_CTRL0_BUS_WIDTH (22)
62#define BM_SSP_CTRL0_BUS_WIDTH (0x3 << 22)
63#define BM_SSP_CTRL0_WAIT_FOR_IRQ (1 << 21)
64#define BM_SSP_CTRL0_LONG_RESP (1 << 19)
65#define BM_SSP_CTRL0_GET_RESP (1 << 17)
66#define BM_SSP_CTRL0_ENABLE (1 << 16)
67#define BP_SSP_CTRL0_XFER_COUNT (0)
68#define BM_SSP_CTRL0_XFER_COUNT (0xffff)
69#define HW_SSP_CMD0 0x010
70#define BM_SSP_CMD0_DBL_DATA_RATE_EN (1 << 25)
71#define BM_SSP_CMD0_SLOW_CLKING_EN (1 << 22)
72#define BM_SSP_CMD0_CONT_CLKING_EN (1 << 21)
73#define BM_SSP_CMD0_APPEND_8CYC (1 << 20)
74#define BP_SSP_CMD0_BLOCK_SIZE (16)
75#define BM_SSP_CMD0_BLOCK_SIZE (0xf << 16)
76#define BP_SSP_CMD0_BLOCK_COUNT (8)
77#define BM_SSP_CMD0_BLOCK_COUNT (0xff << 8)
78#define BP_SSP_CMD0_CMD (0)
79#define BM_SSP_CMD0_CMD (0xff)
80#define HW_SSP_CMD1 0x020
81#define HW_SSP_XFER_SIZE 0x030
82#define HW_SSP_BLOCK_SIZE 0x040
83#define BP_SSP_BLOCK_SIZE_BLOCK_COUNT (4)
84#define BM_SSP_BLOCK_SIZE_BLOCK_COUNT (0xffffff << 4)
85#define BP_SSP_BLOCK_SIZE_BLOCK_SIZE (0)
86#define BM_SSP_BLOCK_SIZE_BLOCK_SIZE (0xf)
87#define HW_SSP_TIMING (ssp_is_old() ? 0x050 : 0x070)
88#define BP_SSP_TIMING_TIMEOUT (16)
89#define BM_SSP_TIMING_TIMEOUT (0xffff << 16)
90#define BP_SSP_TIMING_CLOCK_DIVIDE (8)
91#define BM_SSP_TIMING_CLOCK_DIVIDE (0xff << 8)
92#define BP_SSP_TIMING_CLOCK_RATE (0)
93#define BM_SSP_TIMING_CLOCK_RATE (0xff)
94#define HW_SSP_CTRL1 (ssp_is_old() ? 0x060 : 0x080)
95#define BM_SSP_CTRL1_SDIO_IRQ (1 << 31)
96#define BM_SSP_CTRL1_SDIO_IRQ_EN (1 << 30)
97#define BM_SSP_CTRL1_RESP_ERR_IRQ (1 << 29)
98#define BM_SSP_CTRL1_RESP_ERR_IRQ_EN (1 << 28)
99#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ (1 << 27)
100#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN (1 << 26)
101#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ (1 << 25)
102#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN (1 << 24)
103#define BM_SSP_CTRL1_DATA_CRC_IRQ (1 << 23)
104#define BM_SSP_CTRL1_DATA_CRC_IRQ_EN (1 << 22)
105#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ (1 << 21)
106#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ_EN (1 << 20)
107#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ (1 << 17)
108#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN (1 << 16)
109#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ (1 << 15)
110#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ_EN (1 << 14)
111#define BM_SSP_CTRL1_DMA_ENABLE (1 << 13)
112#define BM_SSP_CTRL1_POLARITY (1 << 9)
113#define BP_SSP_CTRL1_WORD_LENGTH (4)
114#define BM_SSP_CTRL1_WORD_LENGTH (0xf << 4)
115#define BP_SSP_CTRL1_SSP_MODE (0)
116#define BM_SSP_CTRL1_SSP_MODE (0xf)
117#define HW_SSP_SDRESP0 (ssp_is_old() ? 0x080 : 0x0a0)
118#define HW_SSP_SDRESP1 (ssp_is_old() ? 0x090 : 0x0b0)
119#define HW_SSP_SDRESP2 (ssp_is_old() ? 0x0a0 : 0x0c0)
120#define HW_SSP_SDRESP3 (ssp_is_old() ? 0x0b0 : 0x0d0)
121#define HW_SSP_STATUS (ssp_is_old() ? 0x0c0 : 0x100)
122#define BM_SSP_STATUS_CARD_DETECT (1 << 28)
123#define BM_SSP_STATUS_SDIO_IRQ (1 << 17)
124#define HW_SSP_VERSION (cpu_is_mx23() ? 0x110 : 0x130)
125#define BP_SSP_VERSION_MAJOR (24)
126
127#define BF_SSP(value, field) (((value) << BP_SSP_##field) & BM_SSP_##field)
128
129#define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \
130 BM_SSP_CTRL1_RESP_ERR_IRQ | \
131 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \
132 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | \
133 BM_SSP_CTRL1_DATA_CRC_IRQ | \
134 BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | \
135 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \
136 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
137
138#define SSP_PIO_NUM 3
139
140struct mxs_mmc_host {
141 struct mmc_host *mmc;
142 struct mmc_request *mrq;
143 struct mmc_command *cmd;
144 struct mmc_data *data;
145
146 void __iomem *base;
147 int irq;
148 struct resource *res;
149 struct resource *dma_res;
150 struct clk *clk;
151 unsigned int clk_rate;
152
153 struct dma_chan *dmach;
154 struct mxs_dma_data dma_data;
155 unsigned int dma_dir;
156 u32 ssp_pio_words[SSP_PIO_NUM];
157
158 unsigned int version;
159 unsigned char bus_width;
160 spinlock_t lock;
161 int sdio_irq_en;
162};
163
164static int mxs_mmc_get_ro(struct mmc_host *mmc)
165{
166 struct mxs_mmc_host *host = mmc_priv(mmc);
167 struct mxs_mmc_platform_data *pdata =
168 mmc_dev(host->mmc)->platform_data;
169
170 if (!pdata)
171 return -EFAULT;
172
173 if (!gpio_is_valid(pdata->wp_gpio))
174 return -EINVAL;
175
176 return gpio_get_value(pdata->wp_gpio);
177}
178
179static int mxs_mmc_get_cd(struct mmc_host *mmc)
180{
181 struct mxs_mmc_host *host = mmc_priv(mmc);
182
183 return !(readl(host->base + HW_SSP_STATUS) &
184 BM_SSP_STATUS_CARD_DETECT);
185}
186
187static void mxs_mmc_reset(struct mxs_mmc_host *host)
188{
189 u32 ctrl0, ctrl1;
190
191 mxs_reset_block(host->base);
192
193 ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
194 ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
195 BF_SSP(0x7, CTRL1_WORD_LENGTH) |
196 BM_SSP_CTRL1_DMA_ENABLE |
197 BM_SSP_CTRL1_POLARITY |
198 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
199 BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
200 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
201 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
202 BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
203
204 writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
205 BF_SSP(2, TIMING_CLOCK_DIVIDE) |
206 BF_SSP(0, TIMING_CLOCK_RATE),
207 host->base + HW_SSP_TIMING);
208
209 if (host->sdio_irq_en) {
210 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
211 ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
212 }
213
214 writel(ctrl0, host->base + HW_SSP_CTRL0);
215 writel(ctrl1, host->base + HW_SSP_CTRL1);
216}
217
218static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
219 struct mmc_command *cmd);
220
221static void mxs_mmc_request_done(struct mxs_mmc_host *host)
222{
223 struct mmc_command *cmd = host->cmd;
224 struct mmc_data *data = host->data;
225 struct mmc_request *mrq = host->mrq;
226
227 if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
228 if (mmc_resp_type(cmd) & MMC_RSP_136) {
229 cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0);
230 cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1);
231 cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2);
232 cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3);
233 } else {
234 cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0);
235 }
236 }
237
238 if (data) {
239 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
240 data->sg_len, host->dma_dir);
241 /*
242 * If there was an error on any block, we mark all
243 * data blocks as being in error.
244 */
245 if (!data->error)
246 data->bytes_xfered = data->blocks * data->blksz;
247 else
248 data->bytes_xfered = 0;
249
250 host->data = NULL;
251 if (mrq->stop) {
252 mxs_mmc_start_cmd(host, mrq->stop);
253 return;
254 }
255 }
256
257 host->mrq = NULL;
258 mmc_request_done(host->mmc, mrq);
259}
260
261static void mxs_mmc_dma_irq_callback(void *param)
262{
263 struct mxs_mmc_host *host = param;
264
265 mxs_mmc_request_done(host);
266}
267
268static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
269{
270 struct mxs_mmc_host *host = dev_id;
271 struct mmc_command *cmd = host->cmd;
272 struct mmc_data *data = host->data;
273 u32 stat;
274
275 spin_lock(&host->lock);
276
277 stat = readl(host->base + HW_SSP_CTRL1);
278 writel(stat & MXS_MMC_IRQ_BITS,
279 host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
280
281 if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
282 mmc_signal_sdio_irq(host->mmc);
283
284 spin_unlock(&host->lock);
285
286 if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
287 cmd->error = -ETIMEDOUT;
288 else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
289 cmd->error = -EIO;
290
291 if (data) {
292 if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
293 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
294 data->error = -ETIMEDOUT;
295 else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
296 data->error = -EILSEQ;
297 else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
298 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
299 data->error = -EIO;
300 }
301
302 return IRQ_HANDLED;
303}
304
305static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
306 struct mxs_mmc_host *host, unsigned int append)
307{
308 struct dma_async_tx_descriptor *desc;
309 struct mmc_data *data = host->data;
310 struct scatterlist * sgl;
311 unsigned int sg_len;
312
313 if (data) {
314 /* data */
315 dma_map_sg(mmc_dev(host->mmc), data->sg,
316 data->sg_len, host->dma_dir);
317 sgl = data->sg;
318 sg_len = data->sg_len;
319 } else {
320 /* pio */
321 sgl = (struct scatterlist *) host->ssp_pio_words;
322 sg_len = SSP_PIO_NUM;
323 }
324
325 desc = host->dmach->device->device_prep_slave_sg(host->dmach,
326 sgl, sg_len, host->dma_dir, append);
327 if (desc) {
328 desc->callback = mxs_mmc_dma_irq_callback;
329 desc->callback_param = host;
330 } else {
331 if (data)
332 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
333 data->sg_len, host->dma_dir);
334 }
335
336 return desc;
337}
338
339static void mxs_mmc_bc(struct mxs_mmc_host *host)
340{
341 struct mmc_command *cmd = host->cmd;
342 struct dma_async_tx_descriptor *desc;
343 u32 ctrl0, cmd0, cmd1;
344
345 ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
346 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
347 cmd1 = cmd->arg;
348
349 if (host->sdio_irq_en) {
350 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
351 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
352 }
353
354 host->ssp_pio_words[0] = ctrl0;
355 host->ssp_pio_words[1] = cmd0;
356 host->ssp_pio_words[2] = cmd1;
357 host->dma_dir = DMA_NONE;
358 desc = mxs_mmc_prep_dma(host, 0);
359 if (!desc)
360 goto out;
361
362 dmaengine_submit(desc);
363 return;
364
365out:
366 dev_warn(mmc_dev(host->mmc),
367 "%s: failed to prep dma\n", __func__);
368}
369
370static void mxs_mmc_ac(struct mxs_mmc_host *host)
371{
372 struct mmc_command *cmd = host->cmd;
373 struct dma_async_tx_descriptor *desc;
374 u32 ignore_crc, get_resp, long_resp;
375 u32 ctrl0, cmd0, cmd1;
376
377 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
378 0 : BM_SSP_CTRL0_IGNORE_CRC;
379 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
380 BM_SSP_CTRL0_GET_RESP : 0;
381 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
382 BM_SSP_CTRL0_LONG_RESP : 0;
383
384 ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
385 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
386 cmd1 = cmd->arg;
387
388 if (host->sdio_irq_en) {
389 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
390 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
391 }
392
393 host->ssp_pio_words[0] = ctrl0;
394 host->ssp_pio_words[1] = cmd0;
395 host->ssp_pio_words[2] = cmd1;
396 host->dma_dir = DMA_NONE;
397 desc = mxs_mmc_prep_dma(host, 0);
398 if (!desc)
399 goto out;
400
401 dmaengine_submit(desc);
402 return;
403
404out:
405 dev_warn(mmc_dev(host->mmc),
406 "%s: failed to prep dma\n", __func__);
407}
408
409static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
410{
411 const unsigned int ssp_timeout_mul = 4096;
412 /*
413 * Calculate ticks in ms since ns are large numbers
414 * and might overflow
415 */
416 const unsigned int clock_per_ms = clock_rate / 1000;
417 const unsigned int ms = ns / 1000;
418 const unsigned int ticks = ms * clock_per_ms;
419 const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
420
421 WARN_ON(ssp_ticks == 0);
422 return ssp_ticks;
423}
424
425static void mxs_mmc_adtc(struct mxs_mmc_host *host)
426{
427 struct mmc_command *cmd = host->cmd;
428 struct mmc_data *data = cmd->data;
429 struct dma_async_tx_descriptor *desc;
430 struct scatterlist *sgl = data->sg, *sg;
431 unsigned int sg_len = data->sg_len;
432 int i;
433
434 unsigned short dma_data_dir, timeout;
435 unsigned int data_size = 0, log2_blksz;
436 unsigned int blocks = data->blocks;
437
438 u32 ignore_crc, get_resp, long_resp, read;
439 u32 ctrl0, cmd0, cmd1, val;
440
441 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
442 0 : BM_SSP_CTRL0_IGNORE_CRC;
443 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
444 BM_SSP_CTRL0_GET_RESP : 0;
445 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
446 BM_SSP_CTRL0_LONG_RESP : 0;
447
448 if (data->flags & MMC_DATA_WRITE) {
449 dma_data_dir = DMA_TO_DEVICE;
450 read = 0;
451 } else {
452 dma_data_dir = DMA_FROM_DEVICE;
453 read = BM_SSP_CTRL0_READ;
454 }
455
456 ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
457 ignore_crc | get_resp | long_resp |
458 BM_SSP_CTRL0_DATA_XFER | read |
459 BM_SSP_CTRL0_WAIT_FOR_IRQ |
460 BM_SSP_CTRL0_ENABLE;
461
462 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
463
464 /* get logarithm to base 2 of block size for setting register */
465 log2_blksz = ilog2(data->blksz);
466
467 /*
468 * take special care of the case that data size from data->sg
469 * is not equal to blocks x blksz
470 */
471 for_each_sg(sgl, sg, sg_len, i)
472 data_size += sg->length;
473
474 if (data_size != data->blocks * data->blksz)
475 blocks = 1;
476
477 /* xfer count, block size and count need to be set differently */
478 if (ssp_is_old()) {
479 ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
480 cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
481 BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
482 } else {
483 writel(data_size, host->base + HW_SSP_XFER_SIZE);
484 writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
485 BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
486 host->base + HW_SSP_BLOCK_SIZE);
487 }
488
489 if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
490 (cmd->opcode == SD_IO_RW_EXTENDED))
491 cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
492
493 cmd1 = cmd->arg;
494
495 if (host->sdio_irq_en) {
496 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
497 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
498 }
499
500 /* set the timeout count */
501 timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns);
502 val = readl(host->base + HW_SSP_TIMING);
503 val &= ~(BM_SSP_TIMING_TIMEOUT);
504 val |= BF_SSP(timeout, TIMING_TIMEOUT);
505 writel(val, host->base + HW_SSP_TIMING);
506
507 /* pio */
508 host->ssp_pio_words[0] = ctrl0;
509 host->ssp_pio_words[1] = cmd0;
510 host->ssp_pio_words[2] = cmd1;
511 host->dma_dir = DMA_NONE;
512 desc = mxs_mmc_prep_dma(host, 0);
513 if (!desc)
514 goto out;
515
516 /* append data sg */
517 WARN_ON(host->data != NULL);
518 host->data = data;
519 host->dma_dir = dma_data_dir;
520 desc = mxs_mmc_prep_dma(host, 1);
521 if (!desc)
522 goto out;
523
524 dmaengine_submit(desc);
525 return;
526out:
527 dev_warn(mmc_dev(host->mmc),
528 "%s: failed to prep dma\n", __func__);
529}
530
531static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
532 struct mmc_command *cmd)
533{
534 host->cmd = cmd;
535
536 switch (mmc_cmd_type(cmd)) {
537 case MMC_CMD_BC:
538 mxs_mmc_bc(host);
539 break;
540 case MMC_CMD_BCR:
541 mxs_mmc_ac(host);
542 break;
543 case MMC_CMD_AC:
544 mxs_mmc_ac(host);
545 break;
546 case MMC_CMD_ADTC:
547 mxs_mmc_adtc(host);
548 break;
549 default:
550 dev_warn(mmc_dev(host->mmc),
551 "%s: unknown MMC command\n", __func__);
552 break;
553 }
554}
555
556static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
557{
558 struct mxs_mmc_host *host = mmc_priv(mmc);
559
560 WARN_ON(host->mrq != NULL);
561 host->mrq = mrq;
562 mxs_mmc_start_cmd(host, mrq->cmd);
563}
564
565static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate)
566{
567 unsigned int ssp_rate, bit_rate;
568 u32 div1, div2;
569 u32 val;
570
571 ssp_rate = clk_get_rate(host->clk);
572
573 for (div1 = 2; div1 < 254; div1 += 2) {
574 div2 = ssp_rate / rate / div1;
575 if (div2 < 0x100)
576 break;
577 }
578
579 if (div1 >= 254) {
580 dev_err(mmc_dev(host->mmc),
581 "%s: cannot set clock to %d\n", __func__, rate);
582 return;
583 }
584
585 if (div2 == 0)
586 bit_rate = ssp_rate / div1;
587 else
588 bit_rate = ssp_rate / div1 / div2;
589
590 val = readl(host->base + HW_SSP_TIMING);
591 val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
592 val |= BF_SSP(div1, TIMING_CLOCK_DIVIDE);
593 val |= BF_SSP(div2 - 1, TIMING_CLOCK_RATE);
594 writel(val, host->base + HW_SSP_TIMING);
595
596 host->clk_rate = bit_rate;
597
598 dev_dbg(mmc_dev(host->mmc),
599 "%s: div1 %d, div2 %d, ssp %d, bit %d, rate %d\n",
600 __func__, div1, div2, ssp_rate, bit_rate, rate);
601}
602
603static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
604{
605 struct mxs_mmc_host *host = mmc_priv(mmc);
606
607 if (ios->bus_width == MMC_BUS_WIDTH_8)
608 host->bus_width = 2;
609 else if (ios->bus_width == MMC_BUS_WIDTH_4)
610 host->bus_width = 1;
611 else
612 host->bus_width = 0;
613
614 if (ios->clock)
615 mxs_mmc_set_clk_rate(host, ios->clock);
616}
617
618static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
619{
620 struct mxs_mmc_host *host = mmc_priv(mmc);
621 unsigned long flags;
622
623 spin_lock_irqsave(&host->lock, flags);
624
625 host->sdio_irq_en = enable;
626
627 if (enable) {
628 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
629 host->base + HW_SSP_CTRL0 + MXS_SET_ADDR);
630 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
631 host->base + HW_SSP_CTRL1 + MXS_SET_ADDR);
632
633 if (readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ)
634 mmc_signal_sdio_irq(host->mmc);
635
636 } else {
637 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
638 host->base + HW_SSP_CTRL0 + MXS_CLR_ADDR);
639 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
640 host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
641 }
642
643 spin_unlock_irqrestore(&host->lock, flags);
644}
645
646static const struct mmc_host_ops mxs_mmc_ops = {
647 .request = mxs_mmc_request,
648 .get_ro = mxs_mmc_get_ro,
649 .get_cd = mxs_mmc_get_cd,
650 .set_ios = mxs_mmc_set_ios,
651 .enable_sdio_irq = mxs_mmc_enable_sdio_irq,
652};
653
654static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
655{
656 struct mxs_mmc_host *host = param;
657
658 if (!mxs_dma_is_apbh(chan))
659 return false;
660
661 if (chan->chan_id != host->dma_res->start)
662 return false;
663
664 chan->private = &host->dma_data;
665
666 return true;
667}
668
669static int mxs_mmc_probe(struct platform_device *pdev)
670{
671 struct mxs_mmc_host *host;
672 struct mmc_host *mmc;
673 struct resource *iores, *dmares, *r;
674 struct mxs_mmc_platform_data *pdata;
675 int ret = 0, irq_err, irq_dma;
676 dma_cap_mask_t mask;
677
678 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
679 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
680 irq_err = platform_get_irq(pdev, 0);
681 irq_dma = platform_get_irq(pdev, 1);
682 if (!iores || !dmares || irq_err < 0 || irq_dma < 0)
683 return -EINVAL;
684
685 r = request_mem_region(iores->start, resource_size(iores), pdev->name);
686 if (!r)
687 return -EBUSY;
688
689 mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
690 if (!mmc) {
691 ret = -ENOMEM;
692 goto out_release_mem;
693 }
694
695 host = mmc_priv(mmc);
696 host->base = ioremap(r->start, resource_size(r));
697 if (!host->base) {
698 ret = -ENOMEM;
699 goto out_mmc_free;
700 }
701
702 /* only major verion does matter */
703 host->version = readl(host->base + HW_SSP_VERSION) >>
704 BP_SSP_VERSION_MAJOR;
705
706 host->mmc = mmc;
707 host->res = r;
708 host->dma_res = dmares;
709 host->irq = irq_err;
710 host->sdio_irq_en = 0;
711
712 host->clk = clk_get(&pdev->dev, NULL);
713 if (IS_ERR(host->clk)) {
714 ret = PTR_ERR(host->clk);
715 goto out_iounmap;
716 }
717 clk_enable(host->clk);
718
719 mxs_mmc_reset(host);
720
721 dma_cap_zero(mask);
722 dma_cap_set(DMA_SLAVE, mask);
723 host->dma_data.chan_irq = irq_dma;
724 host->dmach = dma_request_channel(mask, mxs_mmc_dma_filter, host);
725 if (!host->dmach) {
726 dev_err(mmc_dev(host->mmc),
727 "%s: failed to request dma\n", __func__);
728 goto out_clk_put;
729 }
730
731 /* set mmc core parameters */
732 mmc->ops = &mxs_mmc_ops;
733 mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
734 MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
735
736 pdata = mmc_dev(host->mmc)->platform_data;
737 if (pdata) {
738 if (pdata->flags & SLOTF_8_BIT_CAPABLE)
739 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
740 if (pdata->flags & SLOTF_4_BIT_CAPABLE)
741 mmc->caps |= MMC_CAP_4_BIT_DATA;
742 }
743
744 mmc->f_min = 400000;
745 mmc->f_max = 288000000;
746 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
747
748 mmc->max_segs = 52;
749 mmc->max_blk_size = 1 << 0xf;
750 mmc->max_blk_count = (ssp_is_old()) ? 0xff : 0xffffff;
751 mmc->max_req_size = (ssp_is_old()) ? 0xffff : 0xffffffff;
752 mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev);
753
754 platform_set_drvdata(pdev, mmc);
755
756 ret = request_irq(host->irq, mxs_mmc_irq_handler, 0, DRIVER_NAME, host);
757 if (ret)
758 goto out_free_dma;
759
760 spin_lock_init(&host->lock);
761
762 ret = mmc_add_host(mmc);
763 if (ret)
764 goto out_free_irq;
765
766 dev_info(mmc_dev(host->mmc), "initialized\n");
767
768 return 0;
769
770out_free_irq:
771 free_irq(host->irq, host);
772out_free_dma:
773 if (host->dmach)
774 dma_release_channel(host->dmach);
775out_clk_put:
776 clk_disable(host->clk);
777 clk_put(host->clk);
778out_iounmap:
779 iounmap(host->base);
780out_mmc_free:
781 mmc_free_host(mmc);
782out_release_mem:
783 release_mem_region(iores->start, resource_size(iores));
784 return ret;
785}
786
787static int mxs_mmc_remove(struct platform_device *pdev)
788{
789 struct mmc_host *mmc = platform_get_drvdata(pdev);
790 struct mxs_mmc_host *host = mmc_priv(mmc);
791 struct resource *res = host->res;
792
793 mmc_remove_host(mmc);
794
795 free_irq(host->irq, host);
796
797 platform_set_drvdata(pdev, NULL);
798
799 if (host->dmach)
800 dma_release_channel(host->dmach);
801
802 clk_disable(host->clk);
803 clk_put(host->clk);
804
805 iounmap(host->base);
806
807 mmc_free_host(mmc);
808
809 release_mem_region(res->start, resource_size(res));
810
811 return 0;
812}
813
814#ifdef CONFIG_PM
815static int mxs_mmc_suspend(struct device *dev)
816{
817 struct mmc_host *mmc = dev_get_drvdata(dev);
818 struct mxs_mmc_host *host = mmc_priv(mmc);
819 int ret = 0;
820
821 ret = mmc_suspend_host(mmc);
822
823 clk_disable(host->clk);
824
825 return ret;
826}
827
828static int mxs_mmc_resume(struct device *dev)
829{
830 struct mmc_host *mmc = dev_get_drvdata(dev);
831 struct mxs_mmc_host *host = mmc_priv(mmc);
832 int ret = 0;
833
834 clk_enable(host->clk);
835
836 ret = mmc_resume_host(mmc);
837
838 return ret;
839}
840
841static const struct dev_pm_ops mxs_mmc_pm_ops = {
842 .suspend = mxs_mmc_suspend,
843 .resume = mxs_mmc_resume,
844};
845#endif
846
847static struct platform_driver mxs_mmc_driver = {
848 .probe = mxs_mmc_probe,
849 .remove = mxs_mmc_remove,
850 .driver = {
851 .name = DRIVER_NAME,
852 .owner = THIS_MODULE,
853#ifdef CONFIG_PM
854 .pm = &mxs_mmc_pm_ops,
855#endif
856 },
857};
858
859static int __init mxs_mmc_init(void)
860{
861 return platform_driver_register(&mxs_mmc_driver);
862}
863
864static void __exit mxs_mmc_exit(void)
865{
866 platform_driver_unregister(&mxs_mmc_driver);
867}
868
869module_init(mxs_mmc_init);
870module_exit(mxs_mmc_exit);
871
872MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
873MODULE_AUTHOR("Freescale Semiconductor");
874MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index 1247e5de9faa..e2aecb7f1d5c 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -15,9 +15,11 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/irq.h>
18#include <linux/gpio.h> 19#include <linux/gpio.h>
19#include <linux/of.h> 20#include <linux/of.h>
20#include <linux/of_gpio.h> 21#include <linux/of_gpio.h>
22#include <linux/of_irq.h>
21#include <linux/spi/spi.h> 23#include <linux/spi/spi.h>
22#include <linux/spi/mmc_spi.h> 24#include <linux/spi/mmc_spi.h>
23#include <linux/mmc/core.h> 25#include <linux/mmc/core.h>
@@ -34,6 +36,7 @@ enum {
34struct of_mmc_spi { 36struct of_mmc_spi {
35 int gpios[NUM_GPIOS]; 37 int gpios[NUM_GPIOS];
36 bool alow_gpios[NUM_GPIOS]; 38 bool alow_gpios[NUM_GPIOS];
39 int detect_irq;
37 struct mmc_spi_platform_data pdata; 40 struct mmc_spi_platform_data pdata;
38}; 41};
39 42
@@ -61,6 +64,22 @@ static int of_mmc_spi_get_ro(struct device *dev)
61 return of_mmc_spi_read_gpio(dev, WP_GPIO); 64 return of_mmc_spi_read_gpio(dev, WP_GPIO);
62} 65}
63 66
67static int of_mmc_spi_init(struct device *dev,
68 irqreturn_t (*irqhandler)(int, void *), void *mmc)
69{
70 struct of_mmc_spi *oms = to_of_mmc_spi(dev);
71
72 return request_threaded_irq(oms->detect_irq, NULL, irqhandler, 0,
73 dev_name(dev), mmc);
74}
75
76static void of_mmc_spi_exit(struct device *dev, void *mmc)
77{
78 struct of_mmc_spi *oms = to_of_mmc_spi(dev);
79
80 free_irq(oms->detect_irq, mmc);
81}
82
64struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi) 83struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
65{ 84{
66 struct device *dev = &spi->dev; 85 struct device *dev = &spi->dev;
@@ -121,8 +140,13 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
121 if (gpio_is_valid(oms->gpios[WP_GPIO])) 140 if (gpio_is_valid(oms->gpios[WP_GPIO]))
122 oms->pdata.get_ro = of_mmc_spi_get_ro; 141 oms->pdata.get_ro = of_mmc_spi_get_ro;
123 142
124 /* We don't support interrupts yet, let's poll. */ 143 oms->detect_irq = irq_of_parse_and_map(np, 0);
125 oms->pdata.caps |= MMC_CAP_NEEDS_POLL; 144 if (oms->detect_irq != NO_IRQ) {
145 oms->pdata.init = of_mmc_spi_init;
146 oms->pdata.exit = of_mmc_spi_exit;
147 } else {
148 oms->pdata.caps |= MMC_CAP_NEEDS_POLL;
149 }
126 150
127 dev->platform_data = &oms->pdata; 151 dev->platform_data = &oms->pdata;
128 return dev->platform_data; 152 return dev->platform_data;
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 0c7e37f496ef..a6c329040140 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -173,6 +173,8 @@ struct mmc_omap_host {
173 struct omap_mmc_platform_data *pdata; 173 struct omap_mmc_platform_data *pdata;
174}; 174};
175 175
176static struct workqueue_struct *mmc_omap_wq;
177
176static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot) 178static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)
177{ 179{
178 unsigned long tick_ns; 180 unsigned long tick_ns;
@@ -289,7 +291,7 @@ static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled)
289 host->next_slot = new_slot; 291 host->next_slot = new_slot;
290 host->mmc = new_slot->mmc; 292 host->mmc = new_slot->mmc;
291 spin_unlock_irqrestore(&host->slot_lock, flags); 293 spin_unlock_irqrestore(&host->slot_lock, flags);
292 schedule_work(&host->slot_release_work); 294 queue_work(mmc_omap_wq, &host->slot_release_work);
293 return; 295 return;
294 } 296 }
295 297
@@ -457,7 +459,7 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
457 } 459 }
458 460
459 host->stop_data = data; 461 host->stop_data = data;
460 schedule_work(&host->send_stop_work); 462 queue_work(mmc_omap_wq, &host->send_stop_work);
461} 463}
462 464
463static void 465static void
@@ -637,7 +639,7 @@ mmc_omap_cmd_timer(unsigned long data)
637 OMAP_MMC_WRITE(host, IE, 0); 639 OMAP_MMC_WRITE(host, IE, 0);
638 disable_irq(host->irq); 640 disable_irq(host->irq);
639 host->abort = 1; 641 host->abort = 1;
640 schedule_work(&host->cmd_abort_work); 642 queue_work(mmc_omap_wq, &host->cmd_abort_work);
641 } 643 }
642 spin_unlock_irqrestore(&host->slot_lock, flags); 644 spin_unlock_irqrestore(&host->slot_lock, flags);
643} 645}
@@ -826,11 +828,11 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
826 host->abort = 1; 828 host->abort = 1;
827 OMAP_MMC_WRITE(host, IE, 0); 829 OMAP_MMC_WRITE(host, IE, 0);
828 disable_irq_nosync(host->irq); 830 disable_irq_nosync(host->irq);
829 schedule_work(&host->cmd_abort_work); 831 queue_work(mmc_omap_wq, &host->cmd_abort_work);
830 return IRQ_HANDLED; 832 return IRQ_HANDLED;
831 } 833 }
832 834
833 if (end_command) 835 if (end_command && host->cmd)
834 mmc_omap_cmd_done(host, host->cmd); 836 mmc_omap_cmd_done(host, host->cmd);
835 if (host->data != NULL) { 837 if (host->data != NULL) {
836 if (transfer_error) 838 if (transfer_error)
@@ -1387,7 +1389,7 @@ static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
1387 1389
1388 tasklet_kill(&slot->cover_tasklet); 1390 tasklet_kill(&slot->cover_tasklet);
1389 del_timer_sync(&slot->cover_timer); 1391 del_timer_sync(&slot->cover_timer);
1390 flush_scheduled_work(); 1392 flush_workqueue(mmc_omap_wq);
1391 1393
1392 mmc_remove_host(mmc); 1394 mmc_remove_host(mmc);
1393 mmc_free_host(mmc); 1395 mmc_free_host(mmc);
@@ -1415,7 +1417,7 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1415 if (res == NULL || irq < 0) 1417 if (res == NULL || irq < 0)
1416 return -ENXIO; 1418 return -ENXIO;
1417 1419
1418 res = request_mem_region(res->start, res->end - res->start + 1, 1420 res = request_mem_region(res->start, resource_size(res),
1419 pdev->name); 1421 pdev->name);
1420 if (res == NULL) 1422 if (res == NULL)
1421 return -EBUSY; 1423 return -EBUSY;
@@ -1455,7 +1457,7 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1455 1457
1456 host->irq = irq; 1458 host->irq = irq;
1457 host->phys_base = host->mem_res->start; 1459 host->phys_base = host->mem_res->start;
1458 host->virt_base = ioremap(res->start, res->end - res->start + 1); 1460 host->virt_base = ioremap(res->start, resource_size(res));
1459 if (!host->virt_base) 1461 if (!host->virt_base)
1460 goto err_ioremap; 1462 goto err_ioremap;
1461 1463
@@ -1512,7 +1514,7 @@ err_free_mmc_host:
1512err_ioremap: 1514err_ioremap:
1513 kfree(host); 1515 kfree(host);
1514err_free_mem_region: 1516err_free_mem_region:
1515 release_mem_region(res->start, res->end - res->start + 1); 1517 release_mem_region(res->start, resource_size(res));
1516 return ret; 1518 return ret;
1517} 1519}
1518 1520
@@ -1608,12 +1610,22 @@ static struct platform_driver mmc_omap_driver = {
1608 1610
1609static int __init mmc_omap_init(void) 1611static int __init mmc_omap_init(void)
1610{ 1612{
1611 return platform_driver_probe(&mmc_omap_driver, mmc_omap_probe); 1613 int ret;
1614
1615 mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
1616 if (!mmc_omap_wq)
1617 return -ENOMEM;
1618
1619 ret = platform_driver_probe(&mmc_omap_driver, mmc_omap_probe);
1620 if (ret)
1621 destroy_workqueue(mmc_omap_wq);
1622 return ret;
1612} 1623}
1613 1624
1614static void __exit mmc_omap_exit(void) 1625static void __exit mmc_omap_exit(void)
1615{ 1626{
1616 platform_driver_unregister(&mmc_omap_driver); 1627 platform_driver_unregister(&mmc_omap_driver);
1628 destroy_workqueue(mmc_omap_wq);
1617} 1629}
1618 1630
1619module_init(mmc_omap_init); 1631module_init(mmc_omap_init);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 5d46021cbb57..259ece047afc 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -118,7 +118,7 @@
118 118
119#define MMC_TIMEOUT_MS 20 119#define MMC_TIMEOUT_MS 20
120#define OMAP_MMC_MASTER_CLOCK 96000000 120#define OMAP_MMC_MASTER_CLOCK 96000000
121#define DRIVER_NAME "mmci-omap-hs" 121#define DRIVER_NAME "omap_hsmmc"
122 122
123/* Timeouts for entering power saving states on inactivity, msec */ 123/* Timeouts for entering power saving states on inactivity, msec */
124#define OMAP_MMC_DISABLED_TIMEOUT 100 124#define OMAP_MMC_DISABLED_TIMEOUT 100
@@ -260,7 +260,7 @@ static int omap_hsmmc_1_set_power(struct device *dev, int slot, int power_on,
260 return ret; 260 return ret;
261} 261}
262 262
263static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on, 263static int omap_hsmmc_235_set_power(struct device *dev, int slot, int power_on,
264 int vdd) 264 int vdd)
265{ 265{
266 struct omap_hsmmc_host *host = 266 struct omap_hsmmc_host *host =
@@ -316,6 +316,12 @@ static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on,
316 return ret; 316 return ret;
317} 317}
318 318
319static int omap_hsmmc_4_set_power(struct device *dev, int slot, int power_on,
320 int vdd)
321{
322 return 0;
323}
324
319static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep, 325static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep,
320 int vdd, int cardsleep) 326 int vdd, int cardsleep)
321{ 327{
@@ -326,7 +332,7 @@ static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep,
326 return regulator_set_mode(host->vcc, mode); 332 return regulator_set_mode(host->vcc, mode);
327} 333}
328 334
329static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep, 335static int omap_hsmmc_235_set_sleep(struct device *dev, int slot, int sleep,
330 int vdd, int cardsleep) 336 int vdd, int cardsleep)
331{ 337{
332 struct omap_hsmmc_host *host = 338 struct omap_hsmmc_host *host =
@@ -365,6 +371,12 @@ static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep,
365 return regulator_enable(host->vcc_aux); 371 return regulator_enable(host->vcc_aux);
366} 372}
367 373
374static int omap_hsmmc_4_set_sleep(struct device *dev, int slot, int sleep,
375 int vdd, int cardsleep)
376{
377 return 0;
378}
379
368static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) 380static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
369{ 381{
370 struct regulator *reg; 382 struct regulator *reg;
@@ -379,10 +391,14 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
379 break; 391 break;
380 case OMAP_MMC2_DEVID: 392 case OMAP_MMC2_DEVID:
381 case OMAP_MMC3_DEVID: 393 case OMAP_MMC3_DEVID:
394 case OMAP_MMC5_DEVID:
382 /* Off-chip level shifting, or none */ 395 /* Off-chip level shifting, or none */
383 mmc_slot(host).set_power = omap_hsmmc_23_set_power; 396 mmc_slot(host).set_power = omap_hsmmc_235_set_power;
384 mmc_slot(host).set_sleep = omap_hsmmc_23_set_sleep; 397 mmc_slot(host).set_sleep = omap_hsmmc_235_set_sleep;
385 break; 398 break;
399 case OMAP_MMC4_DEVID:
400 mmc_slot(host).set_power = omap_hsmmc_4_set_power;
401 mmc_slot(host).set_sleep = omap_hsmmc_4_set_sleep;
386 default: 402 default:
387 pr_err("MMC%d configuration not supported!\n", host->id); 403 pr_err("MMC%d configuration not supported!\n", host->id);
388 return -EINVAL; 404 return -EINVAL;
@@ -1555,7 +1571,7 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1555 break; 1571 break;
1556 } 1572 }
1557 1573
1558 if (host->id == OMAP_MMC1_DEVID) { 1574 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
1559 /* Only MMC1 can interface at 3V without some flavor 1575 /* Only MMC1 can interface at 3V without some flavor
1560 * of external transceiver; but they all handle 1.8V. 1576 * of external transceiver; but they all handle 1.8V.
1561 */ 1577 */
@@ -1647,7 +1663,7 @@ static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
1647 u32 hctl, capa, value; 1663 u32 hctl, capa, value;
1648 1664
1649 /* Only MMC1 supports 3.0V */ 1665 /* Only MMC1 supports 3.0V */
1650 if (host->id == OMAP_MMC1_DEVID) { 1666 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
1651 hctl = SDVS30; 1667 hctl = SDVS30;
1652 capa = VS30 | VS18; 1668 capa = VS30 | VS18;
1653 } else { 1669 } else {
@@ -2031,8 +2047,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2031 2047
2032 res->start += pdata->reg_offset; 2048 res->start += pdata->reg_offset;
2033 res->end += pdata->reg_offset; 2049 res->end += pdata->reg_offset;
2034 res = request_mem_region(res->start, res->end - res->start + 1, 2050 res = request_mem_region(res->start, resource_size(res), pdev->name);
2035 pdev->name);
2036 if (res == NULL) 2051 if (res == NULL)
2037 return -EBUSY; 2052 return -EBUSY;
2038 2053
@@ -2101,14 +2116,14 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2101 /* we start off in DISABLED state */ 2116 /* we start off in DISABLED state */
2102 host->dpm_state = DISABLED; 2117 host->dpm_state = DISABLED;
2103 2118
2104 if (mmc_host_enable(host->mmc) != 0) { 2119 if (clk_enable(host->iclk) != 0) {
2105 clk_put(host->iclk); 2120 clk_put(host->iclk);
2106 clk_put(host->fclk); 2121 clk_put(host->fclk);
2107 goto err1; 2122 goto err1;
2108 } 2123 }
2109 2124
2110 if (clk_enable(host->iclk) != 0) { 2125 if (mmc_host_enable(host->mmc) != 0) {
2111 mmc_host_disable(host->mmc); 2126 clk_disable(host->iclk);
2112 clk_put(host->iclk); 2127 clk_put(host->iclk);
2113 clk_put(host->fclk); 2128 clk_put(host->fclk);
2114 goto err1; 2129 goto err1;
@@ -2271,7 +2286,7 @@ err1:
2271err_alloc: 2286err_alloc:
2272 omap_hsmmc_gpio_free(pdata); 2287 omap_hsmmc_gpio_free(pdata);
2273err: 2288err:
2274 release_mem_region(res->start, res->end - res->start + 1); 2289 release_mem_region(res->start, resource_size(res));
2275 return ret; 2290 return ret;
2276} 2291}
2277 2292
@@ -2290,7 +2305,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2290 free_irq(host->irq, host); 2305 free_irq(host->irq, host);
2291 if (mmc_slot(host).card_detect_irq) 2306 if (mmc_slot(host).card_detect_irq)
2292 free_irq(mmc_slot(host).card_detect_irq, host); 2307 free_irq(mmc_slot(host).card_detect_irq, host);
2293 flush_scheduled_work(); 2308 flush_work_sync(&host->mmc_carddetect_work);
2294 2309
2295 mmc_host_disable(host->mmc); 2310 mmc_host_disable(host->mmc);
2296 clk_disable(host->iclk); 2311 clk_disable(host->iclk);
@@ -2308,7 +2323,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2308 2323
2309 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2324 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2310 if (res) 2325 if (res)
2311 release_mem_region(res->start, res->end - res->start + 1); 2326 release_mem_region(res->start, resource_size(res));
2312 platform_set_drvdata(pdev, NULL); 2327 platform_set_drvdata(pdev, NULL);
2313 2328
2314 return 0; 2329 return 0;
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 1ccd4b256cee..a04f87d7ee3d 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -874,7 +874,7 @@ static void finalize_request(struct s3cmci_host *host)
874 if (!mrq->data) 874 if (!mrq->data)
875 goto request_done; 875 goto request_done;
876 876
877 /* Calulate the amout of bytes transfer if there was no error */ 877 /* Calculate the amout of bytes transfer if there was no error */
878 if (mrq->data->error == 0) { 878 if (mrq->data->error == 0) {
879 mrq->data->bytes_xfered = 879 mrq->data->bytes_xfered =
880 (mrq->data->blocks * mrq->data->blksz); 880 (mrq->data->blocks * mrq->data->blksz);
@@ -882,7 +882,7 @@ static void finalize_request(struct s3cmci_host *host)
882 mrq->data->bytes_xfered = 0; 882 mrq->data->bytes_xfered = 0;
883 } 883 }
884 884
885 /* If we had an error while transfering data we flush the 885 /* If we had an error while transferring data we flush the
886 * DMA channel and the fifo to clear out any garbage. */ 886 * DMA channel and the fifo to clear out any garbage. */
887 if (mrq->data->error != 0) { 887 if (mrq->data->error != 0) {
888 if (s3cmci_host_usedma(host)) 888 if (s3cmci_host_usedma(host))
@@ -980,7 +980,7 @@ static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data)
980 980
981 if ((data->blksz & 3) != 0) { 981 if ((data->blksz & 3) != 0) {
982 /* We cannot deal with unaligned blocks with more than 982 /* We cannot deal with unaligned blocks with more than
983 * one block being transfered. */ 983 * one block being transferred. */
984 984
985 if (data->blocks > 1) { 985 if (data->blocks > 1) {
986 pr_warning("%s: can't do non-word sized block transfers (blksz %d)\n", __func__, data->blksz); 986 pr_warning("%s: can't do non-word sized block transfers (blksz %d)\n", __func__, data->blksz);
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
new file mode 100644
index 000000000000..2aeef4ffed8c
--- /dev/null
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -0,0 +1,70 @@
1/*
2 * sdhci-dove.c Support for SDHCI on Marvell's Dove SoC
3 *
4 * Author: Saeed Bishara <saeed@marvell.com>
5 * Mike Rapoport <mike@compulab.co.il>
6 * Based on sdhci-cns3xxx.c
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include <linux/io.h>
23#include <linux/mmc/host.h>
24
25#include "sdhci.h"
26#include "sdhci-pltfm.h"
27
28static u16 sdhci_dove_readw(struct sdhci_host *host, int reg)
29{
30 u16 ret;
31
32 switch (reg) {
33 case SDHCI_HOST_VERSION:
34 case SDHCI_SLOT_INT_STATUS:
35 /* those registers don't exist */
36 return 0;
37 default:
38 ret = readw(host->ioaddr + reg);
39 }
40 return ret;
41}
42
43static u32 sdhci_dove_readl(struct sdhci_host *host, int reg)
44{
45 u32 ret;
46
47 switch (reg) {
48 case SDHCI_CAPABILITIES:
49 ret = readl(host->ioaddr + reg);
50 /* Mask the support for 3.0V */
51 ret &= ~SDHCI_CAN_VDD_300;
52 break;
53 default:
54 ret = readl(host->ioaddr + reg);
55 }
56 return ret;
57}
58
59static struct sdhci_ops sdhci_dove_ops = {
60 .read_w = sdhci_dove_readw,
61 .read_l = sdhci_dove_readl,
62};
63
64struct sdhci_pltfm_data sdhci_dove_pdata = {
65 .ops = &sdhci_dove_ops,
66 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
67 SDHCI_QUIRK_NO_BUSY_IRQ |
68 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
69 SDHCI_QUIRK_FORCE_DMA,
70};
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 9b82910b9dbb..a19967d0bfc4 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -15,13 +15,41 @@
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/gpio.h>
19#include <linux/slab.h>
18#include <linux/mmc/host.h> 20#include <linux/mmc/host.h>
19#include <linux/mmc/sdhci-pltfm.h> 21#include <linux/mmc/sdhci-pltfm.h>
22#include <linux/mmc/mmc.h>
23#include <linux/mmc/sdio.h>
20#include <mach/hardware.h> 24#include <mach/hardware.h>
25#include <mach/esdhc.h>
21#include "sdhci.h" 26#include "sdhci.h"
22#include "sdhci-pltfm.h" 27#include "sdhci-pltfm.h"
23#include "sdhci-esdhc.h" 28#include "sdhci-esdhc.h"
24 29
30/* VENDOR SPEC register */
31#define SDHCI_VENDOR_SPEC 0xC0
32#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002
33
34#define ESDHC_FLAG_GPIO_FOR_CD_WP (1 << 0)
35/*
36 * The CMDTYPE of the CMD register (offset 0xE) should be set to
37 * "11" when the STOP CMD12 is issued on imx53 to abort one
38 * open ended multi-blk IO. Otherwise the TC INT wouldn't
39 * be generated.
40 * In exact block transfer, the controller doesn't complete the
41 * operations automatically as required at the end of the
42 * transfer and remains on hold if the abort command is not sent.
43 * As a result, the TC flag is not asserted and SW received timeout
44 * exeception. Bit1 of Vendor Spec registor is used to fix it.
45 */
46#define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1)
47
48struct pltfm_imx_data {
49 int flags;
50 u32 scratchpad;
51};
52
25static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) 53static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg)
26{ 54{
27 void __iomem *base = host->ioaddr + (reg & ~0x3); 55 void __iomem *base = host->ioaddr + (reg & ~0x3);
@@ -30,6 +58,56 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i
30 writel(((readl(base) & ~(mask << shift)) | (val << shift)), base); 58 writel(((readl(base) & ~(mask << shift)) | (val << shift)), base);
31} 59}
32 60
61static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
62{
63 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
64 struct pltfm_imx_data *imx_data = pltfm_host->priv;
65
66 /* fake CARD_PRESENT flag on mx25/35 */
67 u32 val = readl(host->ioaddr + reg);
68
69 if (unlikely((reg == SDHCI_PRESENT_STATE)
70 && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) {
71 struct esdhc_platform_data *boarddata =
72 host->mmc->parent->platform_data;
73
74 if (boarddata && gpio_is_valid(boarddata->cd_gpio)
75 && gpio_get_value(boarddata->cd_gpio))
76 /* no card, if a valid gpio says so... */
77 val &= SDHCI_CARD_PRESENT;
78 else
79 /* ... in all other cases assume card is present */
80 val |= SDHCI_CARD_PRESENT;
81 }
82
83 return val;
84}
85
86static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
87{
88 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
89 struct pltfm_imx_data *imx_data = pltfm_host->priv;
90
91 if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)
92 && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP)))
93 /*
94 * these interrupts won't work with a custom card_detect gpio
95 * (only applied to mx25/35)
96 */
97 val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
98
99 if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
100 && (reg == SDHCI_INT_STATUS)
101 && (val & SDHCI_INT_DATA_END))) {
102 u32 v;
103 v = readl(host->ioaddr + SDHCI_VENDOR_SPEC);
104 v &= ~SDHCI_VENDOR_SPEC_SDIO_QUIRK;
105 writel(v, host->ioaddr + SDHCI_VENDOR_SPEC);
106 }
107
108 writel(val, host->ioaddr + reg);
109}
110
33static u16 esdhc_readw_le(struct sdhci_host *host, int reg) 111static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
34{ 112{
35 if (unlikely(reg == SDHCI_HOST_VERSION)) 113 if (unlikely(reg == SDHCI_HOST_VERSION))
@@ -41,6 +119,7 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
41static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) 119static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
42{ 120{
43 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 121 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
122 struct pltfm_imx_data *imx_data = pltfm_host->priv;
44 123
45 switch (reg) { 124 switch (reg) {
46 case SDHCI_TRANSFER_MODE: 125 case SDHCI_TRANSFER_MODE:
@@ -48,10 +127,22 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
48 * Postpone this write, we must do it together with a 127 * Postpone this write, we must do it together with a
49 * command write that is down below. 128 * command write that is down below.
50 */ 129 */
51 pltfm_host->scratchpad = val; 130 if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
131 && (host->cmd->opcode == SD_IO_RW_EXTENDED)
132 && (host->cmd->data->blocks > 1)
133 && (host->cmd->data->flags & MMC_DATA_READ)) {
134 u32 v;
135 v = readl(host->ioaddr + SDHCI_VENDOR_SPEC);
136 v |= SDHCI_VENDOR_SPEC_SDIO_QUIRK;
137 writel(v, host->ioaddr + SDHCI_VENDOR_SPEC);
138 }
139 imx_data->scratchpad = val;
52 return; 140 return;
53 case SDHCI_COMMAND: 141 case SDHCI_COMMAND:
54 writel(val << 16 | pltfm_host->scratchpad, 142 if ((host->cmd->opcode == MMC_STOP_TRANSMISSION)
143 && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
144 val |= SDHCI_CMD_ABORTCMD;
145 writel(val << 16 | imx_data->scratchpad,
55 host->ioaddr + SDHCI_TRANSFER_MODE); 146 host->ioaddr + SDHCI_TRANSFER_MODE);
56 return; 147 return;
57 case SDHCI_BLOCK_SIZE: 148 case SDHCI_BLOCK_SIZE:
@@ -100,10 +191,42 @@ static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
100 return clk_get_rate(pltfm_host->clk) / 256 / 16; 191 return clk_get_rate(pltfm_host->clk) / 256 / 16;
101} 192}
102 193
194static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
195{
196 struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
197
198 if (boarddata && gpio_is_valid(boarddata->wp_gpio))
199 return gpio_get_value(boarddata->wp_gpio);
200 else
201 return -ENOSYS;
202}
203
204static struct sdhci_ops sdhci_esdhc_ops = {
205 .read_l = esdhc_readl_le,
206 .read_w = esdhc_readw_le,
207 .write_l = esdhc_writel_le,
208 .write_w = esdhc_writew_le,
209 .write_b = esdhc_writeb_le,
210 .set_clock = esdhc_set_clock,
211 .get_max_clock = esdhc_pltfm_get_max_clock,
212 .get_min_clock = esdhc_pltfm_get_min_clock,
213};
214
215static irqreturn_t cd_irq(int irq, void *data)
216{
217 struct sdhci_host *sdhost = (struct sdhci_host *)data;
218
219 tasklet_schedule(&sdhost->card_tasklet);
220 return IRQ_HANDLED;
221};
222
103static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata) 223static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata)
104{ 224{
105 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 225 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
226 struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
106 struct clk *clk; 227 struct clk *clk;
228 int err;
229 struct pltfm_imx_data *imx_data;
107 230
108 clk = clk_get(mmc_dev(host->mmc), NULL); 231 clk = clk_get(mmc_dev(host->mmc), NULL);
109 if (IS_ERR(clk)) { 232 if (IS_ERR(clk)) {
@@ -113,35 +236,94 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd
113 clk_enable(clk); 236 clk_enable(clk);
114 pltfm_host->clk = clk; 237 pltfm_host->clk = clk;
115 238
116 if (cpu_is_mx35() || cpu_is_mx51()) 239 imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL);
240 if (!imx_data) {
241 clk_disable(pltfm_host->clk);
242 clk_put(pltfm_host->clk);
243 return -ENOMEM;
244 }
245 pltfm_host->priv = imx_data;
246
247 if (!cpu_is_mx25())
117 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; 248 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
118 249
119 /* Fix errata ENGcm07207 which is present on i.MX25 and i.MX35 */ 250 if (cpu_is_mx25() || cpu_is_mx35()) {
120 if (cpu_is_mx25() || cpu_is_mx35()) 251 /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
121 host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK; 252 host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
253 /* write_protect can't be routed to controller, use gpio */
254 sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro;
255 }
256
257 if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51()))
258 imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT;
259
260 if (boarddata) {
261 err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP");
262 if (err) {
263 dev_warn(mmc_dev(host->mmc),
264 "no write-protect pin available!\n");
265 boarddata->wp_gpio = err;
266 }
122 267
268 err = gpio_request_one(boarddata->cd_gpio, GPIOF_IN, "ESDHC_CD");
269 if (err) {
270 dev_warn(mmc_dev(host->mmc),
271 "no card-detect pin available!\n");
272 goto no_card_detect_pin;
273 }
274
275 /* i.MX5x has issues to be researched */
276 if (!cpu_is_mx25() && !cpu_is_mx35())
277 goto not_supported;
278
279 err = request_irq(gpio_to_irq(boarddata->cd_gpio), cd_irq,
280 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
281 mmc_hostname(host->mmc), host);
282 if (err) {
283 dev_warn(mmc_dev(host->mmc), "request irq error\n");
284 goto no_card_detect_irq;
285 }
286
287 imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD_WP;
288 /* Now we have a working card_detect again */
289 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
290 }
291
292 return 0;
293
294 no_card_detect_irq:
295 gpio_free(boarddata->cd_gpio);
296 no_card_detect_pin:
297 boarddata->cd_gpio = err;
298 not_supported:
299 kfree(imx_data);
123 return 0; 300 return 0;
124} 301}
125 302
126static void esdhc_pltfm_exit(struct sdhci_host *host) 303static void esdhc_pltfm_exit(struct sdhci_host *host)
127{ 304{
128 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 305 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
306 struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
307 struct pltfm_imx_data *imx_data = pltfm_host->priv;
308
309 if (boarddata && gpio_is_valid(boarddata->wp_gpio))
310 gpio_free(boarddata->wp_gpio);
311
312 if (boarddata && gpio_is_valid(boarddata->cd_gpio)) {
313 gpio_free(boarddata->cd_gpio);
314
315 if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION))
316 free_irq(gpio_to_irq(boarddata->cd_gpio), host);
317 }
129 318
130 clk_disable(pltfm_host->clk); 319 clk_disable(pltfm_host->clk);
131 clk_put(pltfm_host->clk); 320 clk_put(pltfm_host->clk);
321 kfree(imx_data);
132} 322}
133 323
134static struct sdhci_ops sdhci_esdhc_ops = {
135 .read_w = esdhc_readw_le,
136 .write_w = esdhc_writew_le,
137 .write_b = esdhc_writeb_le,
138 .set_clock = esdhc_set_clock,
139 .get_max_clock = esdhc_pltfm_get_max_clock,
140 .get_min_clock = esdhc_pltfm_get_min_clock,
141};
142
143struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { 324struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
144 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA, 325 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA
326 | SDHCI_QUIRK_BROKEN_CARD_DETECTION,
145 /* ADMA has issues. Might be fixable */ 327 /* ADMA has issues. Might be fixable */
146 .ops = &sdhci_esdhc_ops, 328 .ops = &sdhci_esdhc_ops,
147 .init = esdhc_pltfm_init, 329 .init = esdhc_pltfm_init,
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index afaf1bc4913a..c3b08f111942 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -19,13 +19,11 @@
19 */ 19 */
20 20
21#define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \ 21#define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
22 SDHCI_QUIRK_BROKEN_CARD_DETECTION | \
23 SDHCI_QUIRK_NO_BUSY_IRQ | \ 22 SDHCI_QUIRK_NO_BUSY_IRQ | \
24 SDHCI_QUIRK_NONSTANDARD_CLOCK | \ 23 SDHCI_QUIRK_NONSTANDARD_CLOCK | \
25 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ 24 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
26 SDHCI_QUIRK_PIO_NEEDS_DELAY | \ 25 SDHCI_QUIRK_PIO_NEEDS_DELAY | \
27 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | \ 26 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
28 SDHCI_QUIRK_NO_CARD_NO_RESET)
29 27
30#define ESDHC_SYSTEM_CONTROL 0x2c 28#define ESDHC_SYSTEM_CONTROL 0x2c
31#define ESDHC_CLOCK_MASK 0x0000fff0 29#define ESDHC_CLOCK_MASK 0x0000fff0
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
index c51b71174c1d..f9b611fc773e 100644
--- a/drivers/mmc/host/sdhci-of-core.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -13,6 +13,7 @@
13 * your option) any later version. 13 * your option) any later version.
14 */ 14 */
15 15
16#include <linux/err.h>
16#include <linux/module.h> 17#include <linux/module.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/io.h> 19#include <linux/io.h>
@@ -20,8 +21,12 @@
20#include <linux/delay.h> 21#include <linux/delay.h>
21#include <linux/of.h> 22#include <linux/of.h>
22#include <linux/of_platform.h> 23#include <linux/of_platform.h>
24#include <linux/of_address.h>
25#include <linux/of_irq.h>
23#include <linux/mmc/host.h> 26#include <linux/mmc/host.h>
27#ifdef CONFIG_PPC
24#include <asm/machdep.h> 28#include <asm/machdep.h>
29#endif
25#include "sdhci-of.h" 30#include "sdhci-of.h"
26#include "sdhci.h" 31#include "sdhci.h"
27 32
@@ -112,20 +117,27 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np)
112 return true; 117 return true;
113 118
114 /* Old device trees don't have the wp-inverted property. */ 119 /* Old device trees don't have the wp-inverted property. */
120#ifdef CONFIG_PPC
115 return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds); 121 return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds);
122#else
123 return false;
124#endif
116} 125}
117 126
118static int __devinit sdhci_of_probe(struct platform_device *ofdev, 127static int __devinit sdhci_of_probe(struct platform_device *ofdev)
119 const struct of_device_id *match)
120{ 128{
121 struct device_node *np = ofdev->dev.of_node; 129 struct device_node *np = ofdev->dev.of_node;
122 struct sdhci_of_data *sdhci_of_data = match->data; 130 struct sdhci_of_data *sdhci_of_data;
123 struct sdhci_host *host; 131 struct sdhci_host *host;
124 struct sdhci_of_host *of_host; 132 struct sdhci_of_host *of_host;
125 const u32 *clk; 133 const __be32 *clk;
126 int size; 134 int size;
127 int ret; 135 int ret;
128 136
137 if (!ofdev->dev.of_match)
138 return -EINVAL;
139 sdhci_of_data = ofdev->dev.of_match->data;
140
129 if (!of_device_is_available(np)) 141 if (!of_device_is_available(np))
130 return -ENODEV; 142 return -ENODEV;
131 143
@@ -166,7 +178,7 @@ static int __devinit sdhci_of_probe(struct platform_device *ofdev,
166 178
167 clk = of_get_property(np, "clock-frequency", &size); 179 clk = of_get_property(np, "clock-frequency", &size);
168 if (clk && size == sizeof(*clk) && *clk) 180 if (clk && size == sizeof(*clk) && *clk)
169 of_host->clock = *clk; 181 of_host->clock = be32_to_cpup(clk);
170 182
171 ret = sdhci_add_host(host); 183 ret = sdhci_add_host(host);
172 if (ret) 184 if (ret)
@@ -208,7 +220,7 @@ static const struct of_device_id sdhci_of_match[] = {
208}; 220};
209MODULE_DEVICE_TABLE(of, sdhci_of_match); 221MODULE_DEVICE_TABLE(of, sdhci_of_match);
210 222
211static struct of_platform_driver sdhci_of_driver = { 223static struct platform_driver sdhci_of_driver = {
212 .driver = { 224 .driver = {
213 .name = "sdhci-of", 225 .name = "sdhci-of",
214 .owner = THIS_MODULE, 226 .owner = THIS_MODULE,
@@ -222,13 +234,13 @@ static struct of_platform_driver sdhci_of_driver = {
222 234
223static int __init sdhci_of_init(void) 235static int __init sdhci_of_init(void)
224{ 236{
225 return of_register_platform_driver(&sdhci_of_driver); 237 return platform_driver_register(&sdhci_of_driver);
226} 238}
227module_init(sdhci_of_init); 239module_init(sdhci_of_init);
228 240
229static void __exit sdhci_of_exit(void) 241static void __exit sdhci_of_exit(void)
230{ 242{
231 of_unregister_platform_driver(&sdhci_of_driver); 243 platform_driver_unregister(&sdhci_of_driver);
232} 244}
233module_exit(sdhci_of_exit); 245module_exit(sdhci_of_exit);
234 246
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index fcd0e1fcba44..ba40d6d035c7 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -73,7 +73,9 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
73} 73}
74 74
75struct sdhci_of_data sdhci_esdhc = { 75struct sdhci_of_data sdhci_esdhc = {
76 .quirks = ESDHC_DEFAULT_QUIRKS, 76 /* card detection could be handled via GPIO */
77 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
78 | SDHCI_QUIRK_NO_CARD_NO_RESET,
77 .ops = { 79 .ops = {
78 .read_l = sdhci_be32bs_readl, 80 .read_l = sdhci_be32bs_readl,
79 .read_w = esdhc_readw, 81 .read_w = esdhc_readw,
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 3d9c2460d437..f8b5f37007b2 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -176,6 +176,74 @@ static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc_sdio = {
176 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 176 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
177}; 177};
178 178
179/* O2Micro extra registers */
180#define O2_SD_LOCK_WP 0xD3
181#define O2_SD_MULTI_VCC3V 0xEE
182#define O2_SD_CLKREQ 0xEC
183#define O2_SD_CAPS 0xE0
184#define O2_SD_ADMA1 0xE2
185#define O2_SD_ADMA2 0xE7
186#define O2_SD_INF_MOD 0xF1
187
188static int o2_probe(struct sdhci_pci_chip *chip)
189{
190 int ret;
191 u8 scratch;
192
193 switch (chip->pdev->device) {
194 case PCI_DEVICE_ID_O2_8220:
195 case PCI_DEVICE_ID_O2_8221:
196 case PCI_DEVICE_ID_O2_8320:
197 case PCI_DEVICE_ID_O2_8321:
198 /* This extra setup is required due to broken ADMA. */
199 ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
200 if (ret)
201 return ret;
202 scratch &= 0x7f;
203 pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
204
205 /* Set Multi 3 to VCC3V# */
206 pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08);
207
208 /* Disable CLK_REQ# support after media DET */
209 ret = pci_read_config_byte(chip->pdev, O2_SD_CLKREQ, &scratch);
210 if (ret)
211 return ret;
212 scratch |= 0x20;
213 pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
214
215 /* Choose capabilities, enable SDMA. We have to write 0x01
216 * to the capabilities register first to unlock it.
217 */
218 ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
219 if (ret)
220 return ret;
221 scratch |= 0x01;
222 pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
223 pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
224
225 /* Disable ADMA1/2 */
226 pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39);
227 pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08);
228
229 /* Disable the infinite transfer mode */
230 ret = pci_read_config_byte(chip->pdev, O2_SD_INF_MOD, &scratch);
231 if (ret)
232 return ret;
233 scratch |= 0x08;
234 pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
235
236 /* Lock WP */
237 ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
238 if (ret)
239 return ret;
240 scratch |= 0x80;
241 pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
242 }
243
244 return 0;
245}
246
179static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) 247static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
180{ 248{
181 u8 scratch; 249 u8 scratch;
@@ -204,6 +272,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
204static int jmicron_probe(struct sdhci_pci_chip *chip) 272static int jmicron_probe(struct sdhci_pci_chip *chip)
205{ 273{
206 int ret; 274 int ret;
275 u16 mmcdev = 0;
207 276
208 if (chip->pdev->revision == 0) { 277 if (chip->pdev->revision == 0) {
209 chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR | 278 chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
@@ -225,12 +294,17 @@ static int jmicron_probe(struct sdhci_pci_chip *chip)
225 * 2. The MMC interface has a lower subfunction number 294 * 2. The MMC interface has a lower subfunction number
226 * than the SD interface. 295 * than the SD interface.
227 */ 296 */
228 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) { 297 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD)
298 mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC;
299 else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD)
300 mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD;
301
302 if (mmcdev) {
229 struct pci_dev *sd_dev; 303 struct pci_dev *sd_dev;
230 304
231 sd_dev = NULL; 305 sd_dev = NULL;
232 while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON, 306 while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
233 PCI_DEVICE_ID_JMICRON_JMB38X_MMC, sd_dev)) != NULL) { 307 mmcdev, sd_dev)) != NULL) {
234 if ((PCI_SLOT(chip->pdev->devfn) == 308 if ((PCI_SLOT(chip->pdev->devfn) ==
235 PCI_SLOT(sd_dev->devfn)) && 309 PCI_SLOT(sd_dev->devfn)) &&
236 (chip->pdev->bus == sd_dev->bus)) 310 (chip->pdev->bus == sd_dev->bus))
@@ -290,13 +364,25 @@ static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
290 slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; 364 slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
291 } 365 }
292 366
367 /* JM388 MMC doesn't support 1.8V while SD supports it */
368 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
369 slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 |
370 MMC_VDD_29_30 | MMC_VDD_30_31 |
371 MMC_VDD_165_195; /* allow 1.8V */
372 slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
373 MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */
374 }
375
293 /* 376 /*
294 * The secondary interface requires a bit set to get the 377 * The secondary interface requires a bit set to get the
295 * interrupts. 378 * interrupts.
296 */ 379 */
297 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) 380 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
381 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
298 jmicron_enable_mmc(slot->host, 1); 382 jmicron_enable_mmc(slot->host, 1);
299 383
384 slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
385
300 return 0; 386 return 0;
301} 387}
302 388
@@ -305,7 +391,8 @@ static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
305 if (dead) 391 if (dead)
306 return; 392 return;
307 393
308 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) 394 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
395 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
309 jmicron_enable_mmc(slot->host, 0); 396 jmicron_enable_mmc(slot->host, 0);
310} 397}
311 398
@@ -313,7 +400,8 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
313{ 400{
314 int i; 401 int i;
315 402
316 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) { 403 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
404 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
317 for (i = 0;i < chip->num_slots;i++) 405 for (i = 0;i < chip->num_slots;i++)
318 jmicron_enable_mmc(chip->slots[i]->host, 0); 406 jmicron_enable_mmc(chip->slots[i]->host, 0);
319 } 407 }
@@ -325,7 +413,8 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
325{ 413{
326 int ret, i; 414 int ret, i;
327 415
328 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) { 416 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
417 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
329 for (i = 0;i < chip->num_slots;i++) 418 for (i = 0;i < chip->num_slots;i++)
330 jmicron_enable_mmc(chip->slots[i]->host, 1); 419 jmicron_enable_mmc(chip->slots[i]->host, 1);
331 } 420 }
@@ -339,6 +428,10 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
339 return 0; 428 return 0;
340} 429}
341 430
431static const struct sdhci_pci_fixes sdhci_o2 = {
432 .probe = o2_probe,
433};
434
342static const struct sdhci_pci_fixes sdhci_jmicron = { 435static const struct sdhci_pci_fixes sdhci_jmicron = {
343 .probe = jmicron_probe, 436 .probe = jmicron_probe,
344 437
@@ -454,6 +547,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
454 }, 547 },
455 548
456 { 549 {
550 .vendor = PCI_VENDOR_ID_RICOH,
551 .device = 0xe823,
552 .subvendor = PCI_ANY_ID,
553 .subdevice = PCI_ANY_ID,
554 .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc,
555 },
556
557 {
457 .vendor = PCI_VENDOR_ID_ENE, 558 .vendor = PCI_VENDOR_ID_ENE,
458 .device = PCI_DEVICE_ID_ENE_CB712_SD, 559 .device = PCI_DEVICE_ID_ENE_CB712_SD,
459 .subvendor = PCI_ANY_ID, 560 .subvendor = PCI_ANY_ID,
@@ -510,6 +611,22 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
510 }, 611 },
511 612
512 { 613 {
614 .vendor = PCI_VENDOR_ID_JMICRON,
615 .device = PCI_DEVICE_ID_JMICRON_JMB388_SD,
616 .subvendor = PCI_ANY_ID,
617 .subdevice = PCI_ANY_ID,
618 .driver_data = (kernel_ulong_t)&sdhci_jmicron,
619 },
620
621 {
622 .vendor = PCI_VENDOR_ID_JMICRON,
623 .device = PCI_DEVICE_ID_JMICRON_JMB388_ESD,
624 .subvendor = PCI_ANY_ID,
625 .subdevice = PCI_ANY_ID,
626 .driver_data = (kernel_ulong_t)&sdhci_jmicron,
627 },
628
629 {
513 .vendor = PCI_VENDOR_ID_SYSKONNECT, 630 .vendor = PCI_VENDOR_ID_SYSKONNECT,
514 .device = 0x8000, 631 .device = 0x8000,
515 .subvendor = PCI_ANY_ID, 632 .subvendor = PCI_ANY_ID,
@@ -589,6 +706,46 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
589 .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio, 706 .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
590 }, 707 },
591 708
709 {
710 .vendor = PCI_VENDOR_ID_O2,
711 .device = PCI_DEVICE_ID_O2_8120,
712 .subvendor = PCI_ANY_ID,
713 .subdevice = PCI_ANY_ID,
714 .driver_data = (kernel_ulong_t)&sdhci_o2,
715 },
716
717 {
718 .vendor = PCI_VENDOR_ID_O2,
719 .device = PCI_DEVICE_ID_O2_8220,
720 .subvendor = PCI_ANY_ID,
721 .subdevice = PCI_ANY_ID,
722 .driver_data = (kernel_ulong_t)&sdhci_o2,
723 },
724
725 {
726 .vendor = PCI_VENDOR_ID_O2,
727 .device = PCI_DEVICE_ID_O2_8221,
728 .subvendor = PCI_ANY_ID,
729 .subdevice = PCI_ANY_ID,
730 .driver_data = (kernel_ulong_t)&sdhci_o2,
731 },
732
733 {
734 .vendor = PCI_VENDOR_ID_O2,
735 .device = PCI_DEVICE_ID_O2_8320,
736 .subvendor = PCI_ANY_ID,
737 .subdevice = PCI_ANY_ID,
738 .driver_data = (kernel_ulong_t)&sdhci_o2,
739 },
740
741 {
742 .vendor = PCI_VENDOR_ID_O2,
743 .device = PCI_DEVICE_ID_O2_8321,
744 .subvendor = PCI_ANY_ID,
745 .subdevice = PCI_ANY_ID,
746 .driver_data = (kernel_ulong_t)&sdhci_o2,
747 },
748
592 { /* Generic SD host controller */ 749 { /* Generic SD host controller */
593 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) 750 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
594 }, 751 },
@@ -751,9 +908,6 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
751{ 908{
752 struct sdhci_pci_slot *slot; 909 struct sdhci_pci_slot *slot;
753 struct sdhci_host *host; 910 struct sdhci_host *host;
754
755 resource_size_t addr;
756
757 int ret; 911 int ret;
758 912
759 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { 913 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
@@ -800,10 +954,10 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
800 goto free; 954 goto free;
801 } 955 }
802 956
803 addr = pci_resource_start(pdev, bar);
804 host->ioaddr = pci_ioremap_bar(pdev, bar); 957 host->ioaddr = pci_ioremap_bar(pdev, bar);
805 if (!host->ioaddr) { 958 if (!host->ioaddr) {
806 dev_err(&pdev->dev, "failed to remap registers\n"); 959 dev_err(&pdev->dev, "failed to remap registers\n");
960 ret = -ENOMEM;
807 goto release; 961 goto release;
808 } 962 }
809 963
@@ -863,16 +1017,14 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
863 struct sdhci_pci_chip *chip; 1017 struct sdhci_pci_chip *chip;
864 struct sdhci_pci_slot *slot; 1018 struct sdhci_pci_slot *slot;
865 1019
866 u8 slots, rev, first_bar; 1020 u8 slots, first_bar;
867 int ret, i; 1021 int ret, i;
868 1022
869 BUG_ON(pdev == NULL); 1023 BUG_ON(pdev == NULL);
870 BUG_ON(ent == NULL); 1024 BUG_ON(ent == NULL);
871 1025
872 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev);
873
874 dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", 1026 dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n",
875 (int)pdev->vendor, (int)pdev->device, (int)rev); 1027 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
876 1028
877 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); 1029 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
878 if (ret) 1030 if (ret)
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 0502f89f662b..dbab0407f4b6 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -170,6 +170,12 @@ static const struct platform_device_id sdhci_pltfm_ids[] = {
170#ifdef CONFIG_MMC_SDHCI_ESDHC_IMX 170#ifdef CONFIG_MMC_SDHCI_ESDHC_IMX
171 { "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata }, 171 { "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata },
172#endif 172#endif
173#ifdef CONFIG_MMC_SDHCI_DOVE
174 { "sdhci-dove", (kernel_ulong_t)&sdhci_dove_pdata },
175#endif
176#ifdef CONFIG_MMC_SDHCI_TEGRA
177 { "sdhci-tegra", (kernel_ulong_t)&sdhci_tegra_pdata },
178#endif
173 { }, 179 { },
174}; 180};
175MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids); 181MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids);
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index c1bfe48af56a..2b37016ad0ac 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -17,10 +17,12 @@
17 17
18struct sdhci_pltfm_host { 18struct sdhci_pltfm_host {
19 struct clk *clk; 19 struct clk *clk;
20 u32 scratchpad; /* to handle quirks across io-accessor calls */ 20 void *priv; /* to handle quirks across io-accessor calls */
21}; 21};
22 22
23extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata; 23extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata;
24extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata; 24extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata;
25extern struct sdhci_pltfm_data sdhci_dove_pdata;
26extern struct sdhci_pltfm_data sdhci_tegra_pdata;
25 27
26#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */ 28#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index aacb862ecc8a..69e3ee321eb5 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -130,6 +130,15 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
130 if (!clksrc) 130 if (!clksrc)
131 return UINT_MAX; 131 return UINT_MAX;
132 132
133 /*
134 * Clock divider's step is different as 1 from that of host controller
135 * when 'clk_type' is S3C_SDHCI_CLK_DIV_EXTERNAL.
136 */
137 if (ourhost->pdata->clk_type) {
138 rate = clk_round_rate(clksrc, wanted);
139 return wanted - rate;
140 }
141
133 rate = clk_get_rate(clksrc); 142 rate = clk_get_rate(clksrc);
134 143
135 for (div = 1; div < 256; div *= 2) { 144 for (div = 1; div < 256; div *= 2) {
@@ -232,10 +241,79 @@ static unsigned int sdhci_s3c_get_min_clock(struct sdhci_host *host)
232 return min; 241 return min;
233} 242}
234 243
244/* sdhci_cmu_get_max_clk - callback to get maximum clock frequency.*/
245static unsigned int sdhci_cmu_get_max_clock(struct sdhci_host *host)
246{
247 struct sdhci_s3c *ourhost = to_s3c(host);
248
249 return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], UINT_MAX);
250}
251
252/* sdhci_cmu_get_min_clock - callback to get minimal supported clock value. */
253static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host)
254{
255 struct sdhci_s3c *ourhost = to_s3c(host);
256
257 /*
258 * initial clock can be in the frequency range of
259 * 100KHz-400KHz, so we set it as max value.
260 */
261 return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], 400000);
262}
263
264/* sdhci_cmu_set_clock - callback on clock change.*/
265static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
266{
267 struct sdhci_s3c *ourhost = to_s3c(host);
268
269 /* don't bother if the clock is going off */
270 if (clock == 0)
271 return;
272
273 sdhci_s3c_set_clock(host, clock);
274
275 clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
276
277 host->clock = clock;
278}
279
280/**
281 * sdhci_s3c_platform_8bit_width - support 8bit buswidth
282 * @host: The SDHCI host being queried
283 * @width: MMC_BUS_WIDTH_ macro for the bus width being requested
284 *
285 * We have 8-bit width support but is not a v3 controller.
286 * So we add platform_8bit_width() and support 8bit width.
287 */
288static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
289{
290 u8 ctrl;
291
292 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
293
294 switch (width) {
295 case MMC_BUS_WIDTH_8:
296 ctrl |= SDHCI_CTRL_8BITBUS;
297 ctrl &= ~SDHCI_CTRL_4BITBUS;
298 break;
299 case MMC_BUS_WIDTH_4:
300 ctrl |= SDHCI_CTRL_4BITBUS;
301 ctrl &= ~SDHCI_CTRL_8BITBUS;
302 break;
303 default:
304 break;
305 }
306
307 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
308
309 return 0;
310}
311
235static struct sdhci_ops sdhci_s3c_ops = { 312static struct sdhci_ops sdhci_s3c_ops = {
236 .get_max_clock = sdhci_s3c_get_max_clk, 313 .get_max_clock = sdhci_s3c_get_max_clk,
237 .set_clock = sdhci_s3c_set_clock, 314 .set_clock = sdhci_s3c_set_clock,
238 .get_min_clock = sdhci_s3c_get_min_clock, 315 .get_min_clock = sdhci_s3c_get_min_clock,
316 .platform_8bit_width = sdhci_s3c_platform_8bit_width,
239}; 317};
240 318
241static void sdhci_s3c_notify_change(struct platform_device *dev, int state) 319static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
@@ -361,6 +439,13 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
361 439
362 clks++; 440 clks++;
363 sc->clk_bus[ptr] = clk; 441 sc->clk_bus[ptr] = clk;
442
443 /*
444 * save current clock index to know which clock bus
445 * is used later in overriding functions.
446 */
447 sc->cur_clk = ptr;
448
364 clk_enable(clk); 449 clk_enable(clk);
365 450
366 dev_info(dev, "clock source %d: %s (%ld Hz)\n", 451 dev_info(dev, "clock source %d: %s (%ld Hz)\n",
@@ -414,6 +499,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
414 * SDHCI block, or a missing configuration that needs to be set. */ 499 * SDHCI block, or a missing configuration that needs to be set. */
415 host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ; 500 host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;
416 501
502 /* This host supports the Auto CMD12 */
503 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
504
417 if (pdata->cd_type == S3C_SDHCI_CD_NONE || 505 if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
418 pdata->cd_type == S3C_SDHCI_CD_PERMANENT) 506 pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
419 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; 507 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
@@ -421,12 +509,29 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
421 if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT) 509 if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
422 host->mmc->caps = MMC_CAP_NONREMOVABLE; 510 host->mmc->caps = MMC_CAP_NONREMOVABLE;
423 511
512 if (pdata->host_caps)
513 host->mmc->caps |= pdata->host_caps;
514
424 host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR | 515 host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
425 SDHCI_QUIRK_32BIT_DMA_SIZE); 516 SDHCI_QUIRK_32BIT_DMA_SIZE);
426 517
427 /* HSMMC on Samsung SoCs uses SDCLK as timeout clock */ 518 /* HSMMC on Samsung SoCs uses SDCLK as timeout clock */
428 host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; 519 host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
429 520
521 /*
522 * If controller does not have internal clock divider,
523 * we can use overriding functions instead of default.
524 */
525 if (pdata->clk_type) {
526 sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
527 sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
528 sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
529 }
530
531 /* It supports additional host capabilities if needed */
532 if (pdata->host_caps)
533 host->mmc->caps |= pdata->host_caps;
534
430 ret = sdhci_add_host(host); 535 ret = sdhci_add_host(host);
431 if (ret) { 536 if (ret) {
432 dev_err(dev, "sdhci_add_host() failed\n"); 537 dev_err(dev, "sdhci_add_host() failed\n");
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index d70c54c7b70a..60a4c97d3d18 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -50,7 +50,7 @@ static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id)
50 /* val == 1 -> card removed, val == 0 -> card inserted */ 50 /* val == 1 -> card removed, val == 0 -> card inserted */
51 /* if card removed - set irq for low level, else vice versa */ 51 /* if card removed - set irq for low level, else vice versa */
52 gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH; 52 gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH;
53 set_irq_type(irq, gpio_irq_type); 53 irq_set_irq_type(irq, gpio_irq_type);
54 54
55 if (sdhci->data->card_power_gpio >= 0) { 55 if (sdhci->data->card_power_gpio >= 0) {
56 if (!sdhci->data->power_always_enb) { 56 if (!sdhci->data->power_always_enb) {
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
new file mode 100644
index 000000000000..f7e1f964395f
--- /dev/null
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -0,0 +1,261 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/err.h>
16#include <linux/init.h>
17#include <linux/platform_device.h>
18#include <linux/clk.h>
19#include <linux/io.h>
20#include <linux/gpio.h>
21#include <linux/mmc/card.h>
22#include <linux/mmc/host.h>
23
24#include <mach/gpio.h>
25#include <mach/sdhci.h>
26
27#include "sdhci.h"
28#include "sdhci-pltfm.h"
29
30static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
31{
32 u32 val;
33
34 if (unlikely(reg == SDHCI_PRESENT_STATE)) {
35 /* Use wp_gpio here instead? */
36 val = readl(host->ioaddr + reg);
37 return val | SDHCI_WRITE_PROTECT;
38 }
39
40 return readl(host->ioaddr + reg);
41}
42
43static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
44{
45 if (unlikely(reg == SDHCI_HOST_VERSION)) {
46 /* Erratum: Version register is invalid in HW. */
47 return SDHCI_SPEC_200;
48 }
49
50 return readw(host->ioaddr + reg);
51}
52
53static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
54{
55 /* Seems like we're getting spurious timeout and crc errors, so
56 * disable signalling of them. In case of real errors software
57 * timers should take care of eventually detecting them.
58 */
59 if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
60 val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
61
62 writel(val, host->ioaddr + reg);
63
64 if (unlikely(reg == SDHCI_INT_ENABLE)) {
65 /* Erratum: Must enable block gap interrupt detection */
66 u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
67 if (val & SDHCI_INT_CARD_INT)
68 gap_ctrl |= 0x8;
69 else
70 gap_ctrl &= ~0x8;
71 writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
72 }
73}
74
75static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
76{
77 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
78 struct tegra_sdhci_platform_data *plat;
79
80 plat = pdev->dev.platform_data;
81
82 if (!gpio_is_valid(plat->wp_gpio))
83 return -1;
84
85 return gpio_get_value(plat->wp_gpio);
86}
87
88static irqreturn_t carddetect_irq(int irq, void *data)
89{
90 struct sdhci_host *sdhost = (struct sdhci_host *)data;
91
92 tasklet_schedule(&sdhost->card_tasklet);
93 return IRQ_HANDLED;
94};
95
96static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
97{
98 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
99 struct tegra_sdhci_platform_data *plat;
100 u32 ctrl;
101
102 plat = pdev->dev.platform_data;
103
104 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
105 if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
106 ctrl &= ~SDHCI_CTRL_4BITBUS;
107 ctrl |= SDHCI_CTRL_8BITBUS;
108 } else {
109 ctrl &= ~SDHCI_CTRL_8BITBUS;
110 if (bus_width == MMC_BUS_WIDTH_4)
111 ctrl |= SDHCI_CTRL_4BITBUS;
112 else
113 ctrl &= ~SDHCI_CTRL_4BITBUS;
114 }
115 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
116 return 0;
117}
118
119
120static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
121 struct sdhci_pltfm_data *pdata)
122{
123 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
124 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
125 struct tegra_sdhci_platform_data *plat;
126 struct clk *clk;
127 int rc;
128
129 plat = pdev->dev.platform_data;
130 if (plat == NULL) {
131 dev_err(mmc_dev(host->mmc), "missing platform data\n");
132 return -ENXIO;
133 }
134
135 if (gpio_is_valid(plat->power_gpio)) {
136 rc = gpio_request(plat->power_gpio, "sdhci_power");
137 if (rc) {
138 dev_err(mmc_dev(host->mmc),
139 "failed to allocate power gpio\n");
140 goto out;
141 }
142 tegra_gpio_enable(plat->power_gpio);
143 gpio_direction_output(plat->power_gpio, 1);
144 }
145
146 if (gpio_is_valid(plat->cd_gpio)) {
147 rc = gpio_request(plat->cd_gpio, "sdhci_cd");
148 if (rc) {
149 dev_err(mmc_dev(host->mmc),
150 "failed to allocate cd gpio\n");
151 goto out_power;
152 }
153 tegra_gpio_enable(plat->cd_gpio);
154 gpio_direction_input(plat->cd_gpio);
155
156 rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq,
157 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
158 mmc_hostname(host->mmc), host);
159
160 if (rc) {
161 dev_err(mmc_dev(host->mmc), "request irq error\n");
162 goto out_cd;
163 }
164
165 }
166
167 if (gpio_is_valid(plat->wp_gpio)) {
168 rc = gpio_request(plat->wp_gpio, "sdhci_wp");
169 if (rc) {
170 dev_err(mmc_dev(host->mmc),
171 "failed to allocate wp gpio\n");
172 goto out_irq;
173 }
174 tegra_gpio_enable(plat->wp_gpio);
175 gpio_direction_input(plat->wp_gpio);
176 }
177
178 clk = clk_get(mmc_dev(host->mmc), NULL);
179 if (IS_ERR(clk)) {
180 dev_err(mmc_dev(host->mmc), "clk err\n");
181 rc = PTR_ERR(clk);
182 goto out_wp;
183 }
184 clk_enable(clk);
185 pltfm_host->clk = clk;
186
187 if (plat->is_8bit)
188 host->mmc->caps |= MMC_CAP_8_BIT_DATA;
189
190 return 0;
191
192out_wp:
193 if (gpio_is_valid(plat->wp_gpio)) {
194 tegra_gpio_disable(plat->wp_gpio);
195 gpio_free(plat->wp_gpio);
196 }
197
198out_irq:
199 if (gpio_is_valid(plat->cd_gpio))
200 free_irq(gpio_to_irq(plat->cd_gpio), host);
201out_cd:
202 if (gpio_is_valid(plat->cd_gpio)) {
203 tegra_gpio_disable(plat->cd_gpio);
204 gpio_free(plat->cd_gpio);
205 }
206
207out_power:
208 if (gpio_is_valid(plat->power_gpio)) {
209 tegra_gpio_disable(plat->power_gpio);
210 gpio_free(plat->power_gpio);
211 }
212
213out:
214 return rc;
215}
216
217static void tegra_sdhci_pltfm_exit(struct sdhci_host *host)
218{
219 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
220 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
221 struct tegra_sdhci_platform_data *plat;
222
223 plat = pdev->dev.platform_data;
224
225 if (gpio_is_valid(plat->wp_gpio)) {
226 tegra_gpio_disable(plat->wp_gpio);
227 gpio_free(plat->wp_gpio);
228 }
229
230 if (gpio_is_valid(plat->cd_gpio)) {
231 free_irq(gpio_to_irq(plat->cd_gpio), host);
232 tegra_gpio_disable(plat->cd_gpio);
233 gpio_free(plat->cd_gpio);
234 }
235
236 if (gpio_is_valid(plat->power_gpio)) {
237 tegra_gpio_disable(plat->power_gpio);
238 gpio_free(plat->power_gpio);
239 }
240
241 clk_disable(pltfm_host->clk);
242 clk_put(pltfm_host->clk);
243}
244
245static struct sdhci_ops tegra_sdhci_ops = {
246 .get_ro = tegra_sdhci_get_ro,
247 .read_l = tegra_sdhci_readl,
248 .read_w = tegra_sdhci_readw,
249 .write_l = tegra_sdhci_writel,
250 .platform_8bit_width = tegra_sdhci_8bit,
251};
252
253struct sdhci_pltfm_data sdhci_tegra_pdata = {
254 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
255 SDHCI_QUIRK_SINGLE_POWER_WRITE |
256 SDHCI_QUIRK_NO_HISPD_BIT |
257 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
258 .ops = &tegra_sdhci_ops,
259 .init = tegra_sdhci_pltfm_init,
260 .exit = tegra_sdhci_pltfm_exit,
261};
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index a25db426c910..5d20661bc357 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/leds.h> 24#include <linux/leds.h>
25 25
26#include <linux/mmc/mmc.h>
26#include <linux/mmc/host.h> 27#include <linux/mmc/host.h>
27 28
28#include "sdhci.h" 29#include "sdhci.h"
@@ -77,8 +78,11 @@ static void sdhci_dumpregs(struct sdhci_host *host)
77 printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", 78 printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
78 sdhci_readw(host, SDHCI_ACMD12_ERR), 79 sdhci_readw(host, SDHCI_ACMD12_ERR),
79 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 80 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
80 printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Max curr: 0x%08x\n", 81 printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
81 sdhci_readl(host, SDHCI_CAPABILITIES), 82 sdhci_readl(host, SDHCI_CAPABILITIES),
83 sdhci_readl(host, SDHCI_CAPABILITIES_1));
84 printk(KERN_DEBUG DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
85 sdhci_readw(host, SDHCI_COMMAND),
82 sdhci_readl(host, SDHCI_MAX_CURRENT)); 86 sdhci_readl(host, SDHCI_MAX_CURRENT));
83 87
84 if (host->flags & SDHCI_USE_ADMA) 88 if (host->flags & SDHCI_USE_ADMA)
@@ -1330,6 +1334,13 @@ static void sdhci_tasklet_finish(unsigned long param)
1330 1334
1331 host = (struct sdhci_host*)param; 1335 host = (struct sdhci_host*)param;
1332 1336
1337 /*
1338 * If this tasklet gets rescheduled while running, it will
1339 * be run again afterwards but without any active request.
1340 */
1341 if (!host->mrq)
1342 return;
1343
1333 spin_lock_irqsave(&host->lock, flags); 1344 spin_lock_irqsave(&host->lock, flags);
1334 1345
1335 del_timer(&host->timer); 1346 del_timer(&host->timer);
@@ -1341,7 +1352,7 @@ static void sdhci_tasklet_finish(unsigned long param)
1341 * upon error conditions. 1352 * upon error conditions.
1342 */ 1353 */
1343 if (!(host->flags & SDHCI_DEVICE_DEAD) && 1354 if (!(host->flags & SDHCI_DEVICE_DEAD) &&
1344 (mrq->cmd->error || 1355 ((mrq->cmd && mrq->cmd->error) ||
1345 (mrq->data && (mrq->data->error || 1356 (mrq->data && (mrq->data->error ||
1346 (mrq->data->stop && mrq->data->stop->error))) || 1357 (mrq->data->stop && mrq->data->stop->error))) ||
1347 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { 1358 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
@@ -1518,7 +1529,11 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1518 1529
1519 if (intmask & SDHCI_INT_DATA_TIMEOUT) 1530 if (intmask & SDHCI_INT_DATA_TIMEOUT)
1520 host->data->error = -ETIMEDOUT; 1531 host->data->error = -ETIMEDOUT;
1521 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) 1532 else if (intmask & SDHCI_INT_DATA_END_BIT)
1533 host->data->error = -EILSEQ;
1534 else if ((intmask & SDHCI_INT_DATA_CRC) &&
1535 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
1536 != MMC_BUS_TEST_R)
1522 host->data->error = -EILSEQ; 1537 host->data->error = -EILSEQ;
1523 else if (intmask & SDHCI_INT_ADMA_ERROR) { 1538 else if (intmask & SDHCI_INT_ADMA_ERROR) {
1524 printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc)); 1539 printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
@@ -1736,7 +1751,7 @@ EXPORT_SYMBOL_GPL(sdhci_alloc_host);
1736int sdhci_add_host(struct sdhci_host *host) 1751int sdhci_add_host(struct sdhci_host *host)
1737{ 1752{
1738 struct mmc_host *mmc; 1753 struct mmc_host *mmc;
1739 unsigned int caps; 1754 unsigned int caps, ocr_avail;
1740 int ret; 1755 int ret;
1741 1756
1742 WARN_ON(host == NULL); 1757 WARN_ON(host == NULL);
@@ -1890,13 +1905,26 @@ int sdhci_add_host(struct sdhci_host *host)
1890 mmc_card_is_removable(mmc)) 1905 mmc_card_is_removable(mmc))
1891 mmc->caps |= MMC_CAP_NEEDS_POLL; 1906 mmc->caps |= MMC_CAP_NEEDS_POLL;
1892 1907
1893 mmc->ocr_avail = 0; 1908 ocr_avail = 0;
1894 if (caps & SDHCI_CAN_VDD_330) 1909 if (caps & SDHCI_CAN_VDD_330)
1895 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; 1910 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
1896 if (caps & SDHCI_CAN_VDD_300) 1911 if (caps & SDHCI_CAN_VDD_300)
1897 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31; 1912 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
1898 if (caps & SDHCI_CAN_VDD_180) 1913 if (caps & SDHCI_CAN_VDD_180)
1899 mmc->ocr_avail |= MMC_VDD_165_195; 1914 ocr_avail |= MMC_VDD_165_195;
1915
1916 mmc->ocr_avail = ocr_avail;
1917 mmc->ocr_avail_sdio = ocr_avail;
1918 if (host->ocr_avail_sdio)
1919 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
1920 mmc->ocr_avail_sd = ocr_avail;
1921 if (host->ocr_avail_sd)
1922 mmc->ocr_avail_sd &= host->ocr_avail_sd;
1923 else /* normal SD controllers don't support 1.8V */
1924 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
1925 mmc->ocr_avail_mmc = ocr_avail;
1926 if (host->ocr_avail_mmc)
1927 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
1900 1928
1901 if (mmc->ocr_avail == 0) { 1929 if (mmc->ocr_avail == 0) {
1902 printk(KERN_ERR "%s: Hardware doesn't report any " 1930 printk(KERN_ERR "%s: Hardware doesn't report any "
@@ -1928,10 +1956,14 @@ int sdhci_add_host(struct sdhci_host *host)
1928 * of bytes. When doing hardware scatter/gather, each entry cannot 1956 * of bytes. When doing hardware scatter/gather, each entry cannot
1929 * be larger than 64 KiB though. 1957 * be larger than 64 KiB though.
1930 */ 1958 */
1931 if (host->flags & SDHCI_USE_ADMA) 1959 if (host->flags & SDHCI_USE_ADMA) {
1932 mmc->max_seg_size = 65536; 1960 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
1933 else 1961 mmc->max_seg_size = 65535;
1962 else
1963 mmc->max_seg_size = 65536;
1964 } else {
1934 mmc->max_seg_size = mmc->max_req_size; 1965 mmc->max_seg_size = mmc->max_req_size;
1966 }
1935 1967
1936 /* 1968 /*
1937 * Maximum block size. This varies from controller to controller and 1969 * Maximum block size. This varies from controller to controller and
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index e42d7f00c060..25e8bde600d1 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -45,6 +45,7 @@
45#define SDHCI_CMD_CRC 0x08 45#define SDHCI_CMD_CRC 0x08
46#define SDHCI_CMD_INDEX 0x10 46#define SDHCI_CMD_INDEX 0x10
47#define SDHCI_CMD_DATA 0x20 47#define SDHCI_CMD_DATA 0x20
48#define SDHCI_CMD_ABORTCMD 0xC0
48 49
49#define SDHCI_CMD_RESP_NONE 0x00 50#define SDHCI_CMD_RESP_NONE 0x00
50#define SDHCI_CMD_RESP_LONG 0x01 51#define SDHCI_CMD_RESP_LONG 0x01
@@ -52,6 +53,7 @@
52#define SDHCI_CMD_RESP_SHORT_BUSY 0x03 53#define SDHCI_CMD_RESP_SHORT_BUSY 0x03
53 54
54#define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff)) 55#define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff))
56#define SDHCI_GET_CMD(c) ((c>>8) & 0x3f)
55 57
56#define SDHCI_RESPONSE 0x10 58#define SDHCI_RESPONSE 0x10
57 59
@@ -165,7 +167,7 @@
165#define SDHCI_CAN_VDD_180 0x04000000 167#define SDHCI_CAN_VDD_180 0x04000000
166#define SDHCI_CAN_64BIT 0x10000000 168#define SDHCI_CAN_64BIT 0x10000000
167 169
168/* 44-47 reserved for more caps */ 170#define SDHCI_CAPABILITIES_1 0x44
169 171
170#define SDHCI_MAX_CURRENT 0x48 172#define SDHCI_MAX_CURRENT 0x48
171 173
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index f472c2714eb8..bbc298fd2a15 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -446,7 +446,7 @@ static int sdricoh_init_mmc(struct pci_dev *pci_dev,
446 mmc->max_seg_size = 1024 * 512; 446 mmc->max_seg_size = 1024 * 512;
447 mmc->max_blk_size = 512; 447 mmc->max_blk_size = 512;
448 448
449 /* reset the controler */ 449 /* reset the controller */
450 if (sdricoh_reset(host)) { 450 if (sdricoh_reset(host)) {
451 dev_dbg(dev, "could not reset\n"); 451 dev_dbg(dev, "could not reset\n");
452 result = -EIO; 452 result = -EIO;
@@ -478,7 +478,7 @@ static int sdricoh_pcmcia_probe(struct pcmcia_device *pcmcia_dev)
478 dev_info(&pcmcia_dev->dev, "Searching MMC controller for pcmcia device" 478 dev_info(&pcmcia_dev->dev, "Searching MMC controller for pcmcia device"
479 " %s %s ...\n", pcmcia_dev->prod_id[0], pcmcia_dev->prod_id[1]); 479 " %s %s ...\n", pcmcia_dev->prod_id[0], pcmcia_dev->prod_id[1]);
480 480
481 /* search pci cardbus bridge that contains the mmc controler */ 481 /* search pci cardbus bridge that contains the mmc controller */
482 /* the io region is already claimed by yenta_socket... */ 482 /* the io region is already claimed by yenta_socket... */
483 while ((pci_dev = 483 while ((pci_dev =
484 pci_get_device(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, 484 pci_get_device(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476,
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index ddd09840520b..af97015a2fc7 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -16,16 +16,19 @@
16 * 16 *
17 */ 17 */
18 18
19#include <linux/clk.h>
20#include <linux/completion.h>
21#include <linux/delay.h>
19#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
20#include <linux/mmc/host.h> 23#include <linux/dmaengine.h>
21#include <linux/mmc/card.h> 24#include <linux/mmc/card.h>
22#include <linux/mmc/core.h> 25#include <linux/mmc/core.h>
26#include <linux/mmc/host.h>
23#include <linux/mmc/mmc.h> 27#include <linux/mmc/mmc.h>
24#include <linux/mmc/sdio.h> 28#include <linux/mmc/sdio.h>
25#include <linux/delay.h>
26#include <linux/platform_device.h>
27#include <linux/clk.h>
28#include <linux/mmc/sh_mmcif.h> 29#include <linux/mmc/sh_mmcif.h>
30#include <linux/pagemap.h>
31#include <linux/platform_device.h>
29 32
30#define DRIVER_NAME "sh_mmcif" 33#define DRIVER_NAME "sh_mmcif"
31#define DRIVER_VERSION "2010-04-28" 34#define DRIVER_VERSION "2010-04-28"
@@ -62,25 +65,6 @@
62/* CE_BLOCK_SET */ 65/* CE_BLOCK_SET */
63#define BLOCK_SIZE_MASK 0x0000ffff 66#define BLOCK_SIZE_MASK 0x0000ffff
64 67
65/* CE_CLK_CTRL */
66#define CLK_ENABLE (1 << 24) /* 1: output mmc clock */
67#define CLK_CLEAR ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
68#define CLK_SUP_PCLK ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
69#define SRSPTO_256 ((1 << 13) | (0 << 12)) /* resp timeout */
70#define SRBSYTO_29 ((1 << 11) | (1 << 10) | \
71 (1 << 9) | (1 << 8)) /* resp busy timeout */
72#define SRWDTO_29 ((1 << 7) | (1 << 6) | \
73 (1 << 5) | (1 << 4)) /* read/write timeout */
74#define SCCSTO_29 ((1 << 3) | (1 << 2) | \
75 (1 << 1) | (1 << 0)) /* ccs timeout */
76
77/* CE_BUF_ACC */
78#define BUF_ACC_DMAWEN (1 << 25)
79#define BUF_ACC_DMAREN (1 << 24)
80#define BUF_ACC_BUSW_32 (0 << 17)
81#define BUF_ACC_BUSW_16 (1 << 17)
82#define BUF_ACC_ATYP (1 << 16)
83
84/* CE_INT */ 68/* CE_INT */
85#define INT_CCSDE (1 << 29) 69#define INT_CCSDE (1 << 29)
86#define INT_CMD12DRE (1 << 26) 70#define INT_CMD12DRE (1 << 26)
@@ -165,10 +149,6 @@
165 STS2_AC12BSYTO | STS2_RSPBSYTO | \ 149 STS2_AC12BSYTO | STS2_RSPBSYTO | \
166 STS2_AC12RSPTO | STS2_RSPTO) 150 STS2_AC12RSPTO | STS2_RSPTO)
167 151
168/* CE_VERSION */
169#define SOFT_RST_ON (1 << 31)
170#define SOFT_RST_OFF (0 << 31)
171
172#define CLKDEV_EMMC_DATA 52000000 /* 52MHz */ 152#define CLKDEV_EMMC_DATA 52000000 /* 52MHz */
173#define CLKDEV_MMC_DATA 20000000 /* 20MHz */ 153#define CLKDEV_MMC_DATA 20000000 /* 20MHz */
174#define CLKDEV_INIT 400000 /* 400 KHz */ 154#define CLKDEV_INIT 400000 /* 400 KHz */
@@ -176,18 +156,21 @@
176struct sh_mmcif_host { 156struct sh_mmcif_host {
177 struct mmc_host *mmc; 157 struct mmc_host *mmc;
178 struct mmc_data *data; 158 struct mmc_data *data;
179 struct mmc_command *cmd;
180 struct platform_device *pd; 159 struct platform_device *pd;
181 struct clk *hclk; 160 struct clk *hclk;
182 unsigned int clk; 161 unsigned int clk;
183 int bus_width; 162 int bus_width;
184 u16 wait_int; 163 bool sd_error;
185 u16 sd_error;
186 long timeout; 164 long timeout;
187 void __iomem *addr; 165 void __iomem *addr;
188 wait_queue_head_t intr_wait; 166 struct completion intr_wait;
189};
190 167
168 /* DMA support */
169 struct dma_chan *chan_rx;
170 struct dma_chan *chan_tx;
171 struct completion dma_complete;
172 bool dma_active;
173};
191 174
192static inline void sh_mmcif_bitset(struct sh_mmcif_host *host, 175static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
193 unsigned int reg, u32 val) 176 unsigned int reg, u32 val)
@@ -201,6 +184,182 @@ static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
201 writel(~val & readl(host->addr + reg), host->addr + reg); 184 writel(~val & readl(host->addr + reg), host->addr + reg);
202} 185}
203 186
187static void mmcif_dma_complete(void *arg)
188{
189 struct sh_mmcif_host *host = arg;
190 dev_dbg(&host->pd->dev, "Command completed\n");
191
192 if (WARN(!host->data, "%s: NULL data in DMA completion!\n",
193 dev_name(&host->pd->dev)))
194 return;
195
196 if (host->data->flags & MMC_DATA_READ)
197 dma_unmap_sg(host->chan_rx->device->dev,
198 host->data->sg, host->data->sg_len,
199 DMA_FROM_DEVICE);
200 else
201 dma_unmap_sg(host->chan_tx->device->dev,
202 host->data->sg, host->data->sg_len,
203 DMA_TO_DEVICE);
204
205 complete(&host->dma_complete);
206}
207
208static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
209{
210 struct scatterlist *sg = host->data->sg;
211 struct dma_async_tx_descriptor *desc = NULL;
212 struct dma_chan *chan = host->chan_rx;
213 dma_cookie_t cookie = -EINVAL;
214 int ret;
215
216 ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
217 DMA_FROM_DEVICE);
218 if (ret > 0) {
219 host->dma_active = true;
220 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
221 DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
222 }
223
224 if (desc) {
225 desc->callback = mmcif_dma_complete;
226 desc->callback_param = host;
227 cookie = dmaengine_submit(desc);
228 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
229 dma_async_issue_pending(chan);
230 }
231 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
232 __func__, host->data->sg_len, ret, cookie);
233
234 if (!desc) {
235 /* DMA failed, fall back to PIO */
236 if (ret >= 0)
237 ret = -EIO;
238 host->chan_rx = NULL;
239 host->dma_active = false;
240 dma_release_channel(chan);
241 /* Free the Tx channel too */
242 chan = host->chan_tx;
243 if (chan) {
244 host->chan_tx = NULL;
245 dma_release_channel(chan);
246 }
247 dev_warn(&host->pd->dev,
248 "DMA failed: %d, falling back to PIO\n", ret);
249 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
250 }
251
252 dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
253 desc, cookie, host->data->sg_len);
254}
255
256static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
257{
258 struct scatterlist *sg = host->data->sg;
259 struct dma_async_tx_descriptor *desc = NULL;
260 struct dma_chan *chan = host->chan_tx;
261 dma_cookie_t cookie = -EINVAL;
262 int ret;
263
264 ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
265 DMA_TO_DEVICE);
266 if (ret > 0) {
267 host->dma_active = true;
268 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
269 DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
270 }
271
272 if (desc) {
273 desc->callback = mmcif_dma_complete;
274 desc->callback_param = host;
275 cookie = dmaengine_submit(desc);
276 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
277 dma_async_issue_pending(chan);
278 }
279 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
280 __func__, host->data->sg_len, ret, cookie);
281
282 if (!desc) {
283 /* DMA failed, fall back to PIO */
284 if (ret >= 0)
285 ret = -EIO;
286 host->chan_tx = NULL;
287 host->dma_active = false;
288 dma_release_channel(chan);
289 /* Free the Rx channel too */
290 chan = host->chan_rx;
291 if (chan) {
292 host->chan_rx = NULL;
293 dma_release_channel(chan);
294 }
295 dev_warn(&host->pd->dev,
296 "DMA failed: %d, falling back to PIO\n", ret);
297 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
298 }
299
300 dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
301 desc, cookie);
302}
303
304static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
305{
306 dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
307 chan->private = arg;
308 return true;
309}
310
311static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
312 struct sh_mmcif_plat_data *pdata)
313{
314 host->dma_active = false;
315
316 /* We can only either use DMA for both Tx and Rx or not use it at all */
317 if (pdata->dma) {
318 dma_cap_mask_t mask;
319
320 dma_cap_zero(mask);
321 dma_cap_set(DMA_SLAVE, mask);
322
323 host->chan_tx = dma_request_channel(mask, sh_mmcif_filter,
324 &pdata->dma->chan_priv_tx);
325 dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
326 host->chan_tx);
327
328 if (!host->chan_tx)
329 return;
330
331 host->chan_rx = dma_request_channel(mask, sh_mmcif_filter,
332 &pdata->dma->chan_priv_rx);
333 dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
334 host->chan_rx);
335
336 if (!host->chan_rx) {
337 dma_release_channel(host->chan_tx);
338 host->chan_tx = NULL;
339 return;
340 }
341
342 init_completion(&host->dma_complete);
343 }
344}
345
346static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
347{
348 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
349 /* Descriptors are freed automatically */
350 if (host->chan_tx) {
351 struct dma_chan *chan = host->chan_tx;
352 host->chan_tx = NULL;
353 dma_release_channel(chan);
354 }
355 if (host->chan_rx) {
356 struct dma_chan *chan = host->chan_rx;
357 host->chan_rx = NULL;
358 dma_release_channel(chan);
359 }
360
361 host->dma_active = false;
362}
204 363
205static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) 364static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
206{ 365{
@@ -239,13 +398,12 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
239 u32 state1, state2; 398 u32 state1, state2;
240 int ret, timeout = 10000000; 399 int ret, timeout = 10000000;
241 400
242 host->sd_error = 0; 401 host->sd_error = false;
243 host->wait_int = 0;
244 402
245 state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1); 403 state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
246 state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2); 404 state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
247 pr_debug("%s: ERR HOST_STS1 = %08x\n", DRIVER_NAME, state1); 405 dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
248 pr_debug("%s: ERR HOST_STS2 = %08x\n", DRIVER_NAME, state2); 406 dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
249 407
250 if (state1 & STS1_CMDSEQ) { 408 if (state1 & STS1_CMDSEQ) {
251 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK); 409 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
@@ -253,8 +411,8 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
253 while (1) { 411 while (1) {
254 timeout--; 412 timeout--;
255 if (timeout < 0) { 413 if (timeout < 0) {
256 pr_err(DRIVER_NAME": Forceed end of " \ 414 dev_err(&host->pd->dev,
257 "command sequence timeout err\n"); 415 "Forceed end of command sequence timeout err\n");
258 return -EIO; 416 return -EIO;
259 } 417 }
260 if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1) 418 if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
@@ -263,18 +421,18 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
263 mdelay(1); 421 mdelay(1);
264 } 422 }
265 sh_mmcif_sync_reset(host); 423 sh_mmcif_sync_reset(host);
266 pr_debug(DRIVER_NAME": Forced end of command sequence\n"); 424 dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
267 return -EIO; 425 return -EIO;
268 } 426 }
269 427
270 if (state2 & STS2_CRC_ERR) { 428 if (state2 & STS2_CRC_ERR) {
271 pr_debug(DRIVER_NAME": Happened CRC error\n"); 429 dev_dbg(&host->pd->dev, ": Happened CRC error\n");
272 ret = -EIO; 430 ret = -EIO;
273 } else if (state2 & STS2_TIMEOUT_ERR) { 431 } else if (state2 & STS2_TIMEOUT_ERR) {
274 pr_debug(DRIVER_NAME": Happened Timeout error\n"); 432 dev_dbg(&host->pd->dev, ": Happened Timeout error\n");
275 ret = -ETIMEDOUT; 433 ret = -ETIMEDOUT;
276 } else { 434 } else {
277 pr_debug(DRIVER_NAME": Happened End/Index error\n"); 435 dev_dbg(&host->pd->dev, ": Happened End/Index error\n");
278 ret = -EIO; 436 ret = -EIO;
279 } 437 }
280 return ret; 438 return ret;
@@ -287,17 +445,13 @@ static int sh_mmcif_single_read(struct sh_mmcif_host *host,
287 long time; 445 long time;
288 u32 blocksize, i, *p = sg_virt(data->sg); 446 u32 blocksize, i, *p = sg_virt(data->sg);
289 447
290 host->wait_int = 0;
291
292 /* buf read enable */ 448 /* buf read enable */
293 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); 449 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
294 time = wait_event_interruptible_timeout(host->intr_wait, 450 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
295 host->wait_int == 1 || 451 host->timeout);
296 host->sd_error == 1, host->timeout); 452 if (time <= 0 || host->sd_error)
297 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
298 return sh_mmcif_error_manage(host); 453 return sh_mmcif_error_manage(host);
299 454
300 host->wait_int = 0;
301 blocksize = (BLOCK_SIZE_MASK & 455 blocksize = (BLOCK_SIZE_MASK &
302 sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3; 456 sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
303 for (i = 0; i < blocksize / 4; i++) 457 for (i = 0; i < blocksize / 4; i++)
@@ -305,13 +459,11 @@ static int sh_mmcif_single_read(struct sh_mmcif_host *host,
305 459
306 /* buffer read end */ 460 /* buffer read end */
307 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); 461 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
308 time = wait_event_interruptible_timeout(host->intr_wait, 462 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
309 host->wait_int == 1 || 463 host->timeout);
310 host->sd_error == 1, host->timeout); 464 if (time <= 0 || host->sd_error)
311 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
312 return sh_mmcif_error_manage(host); 465 return sh_mmcif_error_manage(host);
313 466
314 host->wait_int = 0;
315 return 0; 467 return 0;
316} 468}
317 469
@@ -326,19 +478,15 @@ static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
326 MMCIF_CE_BLOCK_SET); 478 MMCIF_CE_BLOCK_SET);
327 for (j = 0; j < data->sg_len; j++) { 479 for (j = 0; j < data->sg_len; j++) {
328 p = sg_virt(data->sg); 480 p = sg_virt(data->sg);
329 host->wait_int = 0;
330 for (sec = 0; sec < data->sg->length / blocksize; sec++) { 481 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
331 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); 482 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
332 /* buf read enable */ 483 /* buf read enable */
333 time = wait_event_interruptible_timeout(host->intr_wait, 484 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
334 host->wait_int == 1 || 485 host->timeout);
335 host->sd_error == 1, host->timeout);
336 486
337 if (host->wait_int != 1 && 487 if (time <= 0 || host->sd_error)
338 (time == 0 || host->sd_error != 0))
339 return sh_mmcif_error_manage(host); 488 return sh_mmcif_error_manage(host);
340 489
341 host->wait_int = 0;
342 for (i = 0; i < blocksize / 4; i++) 490 for (i = 0; i < blocksize / 4; i++)
343 *p++ = sh_mmcif_readl(host->addr, 491 *p++ = sh_mmcif_readl(host->addr,
344 MMCIF_CE_DATA); 492 MMCIF_CE_DATA);
@@ -356,17 +504,14 @@ static int sh_mmcif_single_write(struct sh_mmcif_host *host,
356 long time; 504 long time;
357 u32 blocksize, i, *p = sg_virt(data->sg); 505 u32 blocksize, i, *p = sg_virt(data->sg);
358 506
359 host->wait_int = 0;
360 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); 507 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
361 508
362 /* buf write enable */ 509 /* buf write enable */
363 time = wait_event_interruptible_timeout(host->intr_wait, 510 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
364 host->wait_int == 1 || 511 host->timeout);
365 host->sd_error == 1, host->timeout); 512 if (time <= 0 || host->sd_error)
366 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
367 return sh_mmcif_error_manage(host); 513 return sh_mmcif_error_manage(host);
368 514
369 host->wait_int = 0;
370 blocksize = (BLOCK_SIZE_MASK & 515 blocksize = (BLOCK_SIZE_MASK &
371 sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3; 516 sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
372 for (i = 0; i < blocksize / 4; i++) 517 for (i = 0; i < blocksize / 4; i++)
@@ -375,13 +520,11 @@ static int sh_mmcif_single_write(struct sh_mmcif_host *host,
375 /* buffer write end */ 520 /* buffer write end */
376 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); 521 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
377 522
378 time = wait_event_interruptible_timeout(host->intr_wait, 523 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
379 host->wait_int == 1 || 524 host->timeout);
380 host->sd_error == 1, host->timeout); 525 if (time <= 0 || host->sd_error)
381 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
382 return sh_mmcif_error_manage(host); 526 return sh_mmcif_error_manage(host);
383 527
384 host->wait_int = 0;
385 return 0; 528 return 0;
386} 529}
387 530
@@ -397,19 +540,15 @@ static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
397 540
398 for (j = 0; j < data->sg_len; j++) { 541 for (j = 0; j < data->sg_len; j++) {
399 p = sg_virt(data->sg); 542 p = sg_virt(data->sg);
400 host->wait_int = 0;
401 for (sec = 0; sec < data->sg->length / blocksize; sec++) { 543 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
402 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); 544 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
403 /* buf write enable*/ 545 /* buf write enable*/
404 time = wait_event_interruptible_timeout(host->intr_wait, 546 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
405 host->wait_int == 1 || 547 host->timeout);
406 host->sd_error == 1, host->timeout);
407 548
408 if (host->wait_int != 1 && 549 if (time <= 0 || host->sd_error)
409 (time == 0 || host->sd_error != 0))
410 return sh_mmcif_error_manage(host); 550 return sh_mmcif_error_manage(host);
411 551
412 host->wait_int = 0;
413 for (i = 0; i < blocksize / 4; i++) 552 for (i = 0; i < blocksize / 4; i++)
414 sh_mmcif_writel(host->addr, 553 sh_mmcif_writel(host->addr,
415 MMCIF_CE_DATA, *p++); 554 MMCIF_CE_DATA, *p++);
@@ -457,7 +596,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
457 tmp |= CMD_SET_RTYP_17B; 596 tmp |= CMD_SET_RTYP_17B;
458 break; 597 break;
459 default: 598 default:
460 pr_err(DRIVER_NAME": Not support type response.\n"); 599 dev_err(&host->pd->dev, "Unsupported response type.\n");
461 break; 600 break;
462 } 601 }
463 switch (opc) { 602 switch (opc) {
@@ -485,7 +624,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
485 tmp |= CMD_SET_DATW_8; 624 tmp |= CMD_SET_DATW_8;
486 break; 625 break;
487 default: 626 default:
488 pr_err(DRIVER_NAME": Not support bus width.\n"); 627 dev_err(&host->pd->dev, "Unsupported bus width.\n");
489 break; 628 break;
490 } 629 }
491 } 630 }
@@ -513,10 +652,10 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
513 return opc = ((opc << 24) | tmp); 652 return opc = ((opc << 24) | tmp);
514} 653}
515 654
516static u32 sh_mmcif_data_trans(struct sh_mmcif_host *host, 655static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
517 struct mmc_request *mrq, u32 opc) 656 struct mmc_request *mrq, u32 opc)
518{ 657{
519 u32 ret; 658 int ret;
520 659
521 switch (opc) { 660 switch (opc) {
522 case MMC_READ_MULTIPLE_BLOCK: 661 case MMC_READ_MULTIPLE_BLOCK:
@@ -533,7 +672,7 @@ static u32 sh_mmcif_data_trans(struct sh_mmcif_host *host,
533 ret = sh_mmcif_single_read(host, mrq); 672 ret = sh_mmcif_single_read(host, mrq);
534 break; 673 break;
535 default: 674 default:
536 pr_err(DRIVER_NAME": NOT SUPPORT CMD = d'%08d\n", opc); 675 dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
537 ret = -EINVAL; 676 ret = -EINVAL;
538 break; 677 break;
539 } 678 }
@@ -547,8 +686,6 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
547 int ret = 0, mask = 0; 686 int ret = 0, mask = 0;
548 u32 opc = cmd->opcode; 687 u32 opc = cmd->opcode;
549 688
550 host->cmd = cmd;
551
552 switch (opc) { 689 switch (opc) {
553 /* respons busy check */ 690 /* respons busy check */
554 case MMC_SWITCH: 691 case MMC_SWITCH:
@@ -579,13 +716,12 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
579 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask); 716 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
580 /* set arg */ 717 /* set arg */
581 sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg); 718 sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
582 host->wait_int = 0;
583 /* set cmd */ 719 /* set cmd */
584 sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc); 720 sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
585 721
586 time = wait_event_interruptible_timeout(host->intr_wait, 722 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
587 host->wait_int == 1 || host->sd_error == 1, host->timeout); 723 host->timeout);
588 if (host->wait_int != 1 && time == 0) { 724 if (time <= 0) {
589 cmd->error = sh_mmcif_error_manage(host); 725 cmd->error = sh_mmcif_error_manage(host);
590 return; 726 return;
591 } 727 }
@@ -597,26 +733,34 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
597 cmd->error = -ETIMEDOUT; 733 cmd->error = -ETIMEDOUT;
598 break; 734 break;
599 default: 735 default:
600 pr_debug("%s: Cmd(d'%d) err\n", 736 dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n",
601 DRIVER_NAME, cmd->opcode); 737 cmd->opcode);
602 cmd->error = sh_mmcif_error_manage(host); 738 cmd->error = sh_mmcif_error_manage(host);
603 break; 739 break;
604 } 740 }
605 host->sd_error = 0; 741 host->sd_error = false;
606 host->wait_int = 0;
607 return; 742 return;
608 } 743 }
609 if (!(cmd->flags & MMC_RSP_PRESENT)) { 744 if (!(cmd->flags & MMC_RSP_PRESENT)) {
610 cmd->error = ret; 745 cmd->error = 0;
611 host->wait_int = 0;
612 return; 746 return;
613 } 747 }
614 if (host->wait_int == 1) { 748 sh_mmcif_get_response(host, cmd);
615 sh_mmcif_get_response(host, cmd);
616 host->wait_int = 0;
617 }
618 if (host->data) { 749 if (host->data) {
619 ret = sh_mmcif_data_trans(host, mrq, cmd->opcode); 750 if (!host->dma_active) {
751 ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
752 } else {
753 long time =
754 wait_for_completion_interruptible_timeout(&host->dma_complete,
755 host->timeout);
756 if (!time)
757 ret = -ETIMEDOUT;
758 else if (time < 0)
759 ret = time;
760 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
761 BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
762 host->dma_active = false;
763 }
620 if (ret < 0) 764 if (ret < 0)
621 mrq->data->bytes_xfered = 0; 765 mrq->data->bytes_xfered = 0;
622 else 766 else
@@ -636,20 +780,18 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
636 else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) 780 else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
637 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); 781 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
638 else { 782 else {
639 pr_err(DRIVER_NAME": not support stop cmd\n"); 783 dev_err(&host->pd->dev, "unsupported stop cmd\n");
640 cmd->error = sh_mmcif_error_manage(host); 784 cmd->error = sh_mmcif_error_manage(host);
641 return; 785 return;
642 } 786 }
643 787
644 time = wait_event_interruptible_timeout(host->intr_wait, 788 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
645 host->wait_int == 1 || 789 host->timeout);
646 host->sd_error == 1, host->timeout); 790 if (time <= 0 || host->sd_error) {
647 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) {
648 cmd->error = sh_mmcif_error_manage(host); 791 cmd->error = sh_mmcif_error_manage(host);
649 return; 792 return;
650 } 793 }
651 sh_mmcif_get_cmd12response(host, cmd); 794 sh_mmcif_get_cmd12response(host, cmd);
652 host->wait_int = 0;
653 cmd->error = 0; 795 cmd->error = 0;
654} 796}
655 797
@@ -676,6 +818,15 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
676 break; 818 break;
677 } 819 }
678 host->data = mrq->data; 820 host->data = mrq->data;
821 if (mrq->data) {
822 if (mrq->data->flags & MMC_DATA_READ) {
823 if (host->chan_rx)
824 sh_mmcif_start_dma_rx(host);
825 } else {
826 if (host->chan_tx)
827 sh_mmcif_start_dma_tx(host);
828 }
829 }
679 sh_mmcif_start_cmd(host, mrq, mrq->cmd); 830 sh_mmcif_start_cmd(host, mrq, mrq->cmd);
680 host->data = NULL; 831 host->data = NULL;
681 832
@@ -693,15 +844,15 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
693 struct sh_mmcif_host *host = mmc_priv(mmc); 844 struct sh_mmcif_host *host = mmc_priv(mmc);
694 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; 845 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
695 846
696 if (ios->power_mode == MMC_POWER_OFF) { 847 if (ios->power_mode == MMC_POWER_UP) {
848 if (p->set_pwr)
849 p->set_pwr(host->pd, ios->power_mode);
850 } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
697 /* clock stop */ 851 /* clock stop */
698 sh_mmcif_clock_control(host, 0); 852 sh_mmcif_clock_control(host, 0);
699 if (p->down_pwr) 853 if (ios->power_mode == MMC_POWER_OFF && p->down_pwr)
700 p->down_pwr(host->pd); 854 p->down_pwr(host->pd);
701 return; 855 return;
702 } else if (ios->power_mode == MMC_POWER_UP) {
703 if (p->set_pwr)
704 p->set_pwr(host->pd, ios->power_mode);
705 } 856 }
706 857
707 if (ios->clock) 858 if (ios->clock)
@@ -735,7 +886,7 @@ static void sh_mmcif_detect(struct mmc_host *mmc)
735static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) 886static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
736{ 887{
737 struct sh_mmcif_host *host = dev_id; 888 struct sh_mmcif_host *host = dev_id;
738 u32 state = 0; 889 u32 state;
739 int err = 0; 890 int err = 0;
740 891
741 state = sh_mmcif_readl(host->addr, MMCIF_CE_INT); 892 state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
@@ -774,17 +925,19 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
774 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); 925 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
775 err = 1; 926 err = 1;
776 } else { 927 } else {
777 pr_debug("%s: Not support int\n", DRIVER_NAME); 928 dev_dbg(&host->pd->dev, "Not support int\n");
778 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); 929 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
779 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); 930 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
780 err = 1; 931 err = 1;
781 } 932 }
782 if (err) { 933 if (err) {
783 host->sd_error = 1; 934 host->sd_error = true;
784 pr_debug("%s: int err state = %08x\n", DRIVER_NAME, state); 935 dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
785 } 936 }
786 host->wait_int = 1; 937 if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
787 wake_up(&host->intr_wait); 938 complete(&host->intr_wait);
939 else
940 dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
788 941
789 return IRQ_HANDLED; 942 return IRQ_HANDLED;
790} 943}
@@ -793,8 +946,8 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
793{ 946{
794 int ret = 0, irq[2]; 947 int ret = 0, irq[2];
795 struct mmc_host *mmc; 948 struct mmc_host *mmc;
796 struct sh_mmcif_host *host = NULL; 949 struct sh_mmcif_host *host;
797 struct sh_mmcif_plat_data *pd = NULL; 950 struct sh_mmcif_plat_data *pd;
798 struct resource *res; 951 struct resource *res;
799 void __iomem *reg; 952 void __iomem *reg;
800 char clk_name[8]; 953 char clk_name[8];
@@ -802,7 +955,7 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
802 irq[0] = platform_get_irq(pdev, 0); 955 irq[0] = platform_get_irq(pdev, 0);
803 irq[1] = platform_get_irq(pdev, 1); 956 irq[1] = platform_get_irq(pdev, 1);
804 if (irq[0] < 0 || irq[1] < 0) { 957 if (irq[0] < 0 || irq[1] < 0) {
805 pr_err(DRIVER_NAME": Get irq error\n"); 958 dev_err(&pdev->dev, "Get irq error\n");
806 return -ENXIO; 959 return -ENXIO;
807 } 960 }
808 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 961 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -815,7 +968,7 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
815 dev_err(&pdev->dev, "ioremap error.\n"); 968 dev_err(&pdev->dev, "ioremap error.\n");
816 return -ENOMEM; 969 return -ENOMEM;
817 } 970 }
818 pd = (struct sh_mmcif_plat_data *)(pdev->dev.platform_data); 971 pd = pdev->dev.platform_data;
819 if (!pd) { 972 if (!pd) {
820 dev_err(&pdev->dev, "sh_mmcif plat data error.\n"); 973 dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
821 ret = -ENXIO; 974 ret = -ENXIO;
@@ -842,7 +995,7 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
842 host->clk = clk_get_rate(host->hclk); 995 host->clk = clk_get_rate(host->hclk);
843 host->pd = pdev; 996 host->pd = pdev;
844 997
845 init_waitqueue_head(&host->intr_wait); 998 init_completion(&host->intr_wait);
846 999
847 mmc->ops = &sh_mmcif_ops; 1000 mmc->ops = &sh_mmcif_ops;
848 mmc->f_max = host->clk; 1001 mmc->f_max = host->clk;
@@ -858,33 +1011,37 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
858 mmc->caps = MMC_CAP_MMC_HIGHSPEED; 1011 mmc->caps = MMC_CAP_MMC_HIGHSPEED;
859 if (pd->caps) 1012 if (pd->caps)
860 mmc->caps |= pd->caps; 1013 mmc->caps |= pd->caps;
861 mmc->max_segs = 128; 1014 mmc->max_segs = 32;
862 mmc->max_blk_size = 512; 1015 mmc->max_blk_size = 512;
863 mmc->max_blk_count = 65535; 1016 mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
864 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1017 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
865 mmc->max_seg_size = mmc->max_req_size; 1018 mmc->max_seg_size = mmc->max_req_size;
866 1019
867 sh_mmcif_sync_reset(host); 1020 sh_mmcif_sync_reset(host);
868 platform_set_drvdata(pdev, host); 1021 platform_set_drvdata(pdev, host);
1022
1023 /* See if we also get DMA */
1024 sh_mmcif_request_dma(host, pd);
1025
869 mmc_add_host(mmc); 1026 mmc_add_host(mmc);
870 1027
871 ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host); 1028 ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
872 if (ret) { 1029 if (ret) {
873 pr_err(DRIVER_NAME": request_irq error (sh_mmc:error)\n"); 1030 dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
874 goto clean_up2; 1031 goto clean_up2;
875 } 1032 }
876 ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host); 1033 ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
877 if (ret) { 1034 if (ret) {
878 free_irq(irq[0], host); 1035 free_irq(irq[0], host);
879 pr_err(DRIVER_NAME": request_irq error (sh_mmc:int)\n"); 1036 dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
880 goto clean_up2; 1037 goto clean_up2;
881 } 1038 }
882 1039
883 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); 1040 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
884 sh_mmcif_detect(host->mmc); 1041 sh_mmcif_detect(host->mmc);
885 1042
886 pr_info("%s: driver version %s\n", DRIVER_NAME, DRIVER_VERSION); 1043 dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
887 pr_debug("%s: chip ver H'%04x\n", DRIVER_NAME, 1044 dev_dbg(&pdev->dev, "chip ver H'%04x\n",
888 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); 1045 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
889 return ret; 1046 return ret;
890 1047
@@ -903,20 +1060,22 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
903 struct sh_mmcif_host *host = platform_get_drvdata(pdev); 1060 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
904 int irq[2]; 1061 int irq[2];
905 1062
906 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); 1063 mmc_remove_host(host->mmc);
907 1064 sh_mmcif_release_dma(host);
908 irq[0] = platform_get_irq(pdev, 0);
909 irq[1] = platform_get_irq(pdev, 1);
910 1065
911 if (host->addr) 1066 if (host->addr)
912 iounmap(host->addr); 1067 iounmap(host->addr);
913 1068
914 platform_set_drvdata(pdev, NULL); 1069 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
915 mmc_remove_host(host->mmc); 1070
1071 irq[0] = platform_get_irq(pdev, 0);
1072 irq[1] = platform_get_irq(pdev, 1);
916 1073
917 free_irq(irq[0], host); 1074 free_irq(irq[0], host);
918 free_irq(irq[1], host); 1075 free_irq(irq[1], host);
919 1076
1077 platform_set_drvdata(pdev, NULL);
1078
920 clk_disable(host->hclk); 1079 clk_disable(host->hclk);
921 mmc_free_host(host->mmc); 1080 mmc_free_host(host->mmc);
922 1081
@@ -947,5 +1106,5 @@ module_exit(sh_mmcif_exit);
947 1106
948MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver"); 1107MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
949MODULE_LICENSE("GPL"); 1108MODULE_LICENSE("GPL");
950MODULE_ALIAS(DRIVER_NAME); 1109MODULE_ALIAS("platform:" DRIVER_NAME);
951MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>"); 1110MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
new file mode 100644
index 000000000000..cc701236d16f
--- /dev/null
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -0,0 +1,171 @@
1/*
2 * SuperH Mobile SDHI
3 *
4 * Copyright (C) 2009 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Based on "Compaq ASIC3 support":
11 *
12 * Copyright 2001 Compaq Computer Corporation.
13 * Copyright 2004-2005 Phil Blundell
14 * Copyright 2007-2008 OpenedHand Ltd.
15 *
16 * Authors: Phil Blundell <pb@handhelds.org>,
17 * Samuel Ortiz <sameo@openedhand.com>
18 *
19 */
20
21#include <linux/kernel.h>
22#include <linux/clk.h>
23#include <linux/slab.h>
24#include <linux/platform_device.h>
25#include <linux/mmc/host.h>
26#include <linux/mmc/sh_mobile_sdhi.h>
27#include <linux/mfd/tmio.h>
28#include <linux/sh_dma.h>
29
30#include "tmio_mmc.h"
31
32struct sh_mobile_sdhi {
33 struct clk *clk;
34 struct tmio_mmc_data mmc_data;
35 struct sh_dmae_slave param_tx;
36 struct sh_dmae_slave param_rx;
37 struct tmio_mmc_dma dma_priv;
38};
39
40static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state)
41{
42 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
43
44 if (p && p->set_pwr)
45 p->set_pwr(pdev, state);
46}
47
48static int sh_mobile_sdhi_get_cd(struct platform_device *pdev)
49{
50 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
51
52 if (p && p->get_cd)
53 return p->get_cd(pdev);
54 else
55 return -ENOSYS;
56}
57
58static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
59{
60 struct sh_mobile_sdhi *priv;
61 struct tmio_mmc_data *mmc_data;
62 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
63 struct tmio_mmc_host *host;
64 char clk_name[8];
65 int ret;
66
67 priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
68 if (priv == NULL) {
69 dev_err(&pdev->dev, "kzalloc failed\n");
70 return -ENOMEM;
71 }
72
73 mmc_data = &priv->mmc_data;
74
75 snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id);
76 priv->clk = clk_get(&pdev->dev, clk_name);
77 if (IS_ERR(priv->clk)) {
78 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
79 ret = PTR_ERR(priv->clk);
80 goto eclkget;
81 }
82
83 clk_enable(priv->clk);
84
85 mmc_data->hclk = clk_get_rate(priv->clk);
86 mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
87 mmc_data->get_cd = sh_mobile_sdhi_get_cd;
88 mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
89 if (p) {
90 mmc_data->flags = p->tmio_flags;
91 mmc_data->ocr_mask = p->tmio_ocr_mask;
92 mmc_data->capabilities |= p->tmio_caps;
93
94 if (p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
95 priv->param_tx.slave_id = p->dma_slave_tx;
96 priv->param_rx.slave_id = p->dma_slave_rx;
97 priv->dma_priv.chan_priv_tx = &priv->param_tx;
98 priv->dma_priv.chan_priv_rx = &priv->param_rx;
99 priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */
100 mmc_data->dma = &priv->dma_priv;
101 }
102 }
103
104 /*
105 * All SDHI blocks support 2-byte and larger block sizes in 4-bit
106 * bus width mode.
107 */
108 mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES;
109
110 /*
111 * All SDHI blocks support SDIO IRQ signalling.
112 */
113 mmc_data->flags |= TMIO_MMC_SDIO_IRQ;
114
115 ret = tmio_mmc_host_probe(&host, pdev, mmc_data);
116 if (ret < 0)
117 goto eprobe;
118
119 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
120 (unsigned long)host->ctl, host->irq);
121
122 return ret;
123
124eprobe:
125 clk_disable(priv->clk);
126 clk_put(priv->clk);
127eclkget:
128 kfree(priv);
129 return ret;
130}
131
132static int sh_mobile_sdhi_remove(struct platform_device *pdev)
133{
134 struct mmc_host *mmc = platform_get_drvdata(pdev);
135 struct tmio_mmc_host *host = mmc_priv(mmc);
136 struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data);
137
138 tmio_mmc_host_remove(host);
139 clk_disable(priv->clk);
140 clk_put(priv->clk);
141 kfree(priv);
142
143 return 0;
144}
145
146static struct platform_driver sh_mobile_sdhi_driver = {
147 .driver = {
148 .name = "sh_mobile_sdhi",
149 .owner = THIS_MODULE,
150 },
151 .probe = sh_mobile_sdhi_probe,
152 .remove = __devexit_p(sh_mobile_sdhi_remove),
153};
154
155static int __init sh_mobile_sdhi_init(void)
156{
157 return platform_driver_register(&sh_mobile_sdhi_driver);
158}
159
160static void __exit sh_mobile_sdhi_exit(void)
161{
162 platform_driver_unregister(&sh_mobile_sdhi_driver);
163}
164
165module_init(sh_mobile_sdhi_init);
166module_exit(sh_mobile_sdhi_exit);
167
168MODULE_DESCRIPTION("SuperH Mobile SDHI driver");
169MODULE_AUTHOR("Magnus Damm");
170MODULE_LICENSE("GPL v2");
171MODULE_ALIAS("platform:sh_mobile_sdhi");
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index e7765a89593e..79c568461d59 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * linux/drivers/mmc/tmio_mmc.c 2 * linux/drivers/mmc/host/tmio_mmc.c
3 * 3 *
4 * Copyright (C) 2004 Ian Molton 4 * Copyright (C) 2007 Ian Molton
5 * Copyright (C) 2007 Ian Molton 5 * Copyright (C) 2004 Ian Molton
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -11,781 +11,22 @@
11 * Driver for the MMC / SD / SDIO cell found in: 11 * Driver for the MMC / SD / SDIO cell found in:
12 * 12 *
13 * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 13 * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
14 *
15 * This driver draws mainly on scattered spec sheets, Reverse engineering
16 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
17 * support). (Further 4 bit support from a later datasheet).
18 *
19 * TODO:
20 * Investigate using a workqueue for PIO transfers
21 * Eliminate FIXMEs
22 * SDIO support
23 * Better Power management
24 * Handle MMC errors better
25 * double buffer support
26 *
27 */ 14 */
28#include <linux/module.h> 15
29#include <linux/irq.h>
30#include <linux/device.h> 16#include <linux/device.h>
31#include <linux/delay.h>
32#include <linux/dmaengine.h>
33#include <linux/mmc/host.h>
34#include <linux/mfd/core.h> 17#include <linux/mfd/core.h>
35#include <linux/mfd/tmio.h> 18#include <linux/mfd/tmio.h>
19#include <linux/mmc/host.h>
20#include <linux/module.h>
21#include <linux/pagemap.h>
22#include <linux/scatterlist.h>
36 23
37#include "tmio_mmc.h" 24#include "tmio_mmc.h"
38 25
39static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
40{
41 u32 clk = 0, clock;
42
43 if (new_clock) {
44 for (clock = host->mmc->f_min, clk = 0x80000080;
45 new_clock >= (clock<<1); clk >>= 1)
46 clock <<= 1;
47 clk |= 0x100;
48 }
49
50 if (host->set_clk_div)
51 host->set_clk_div(host->pdev, (clk>>22) & 1);
52
53 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
54}
55
56static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
57{
58 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
59 msleep(10);
60 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
61 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
62 msleep(10);
63}
64
65static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
66{
67 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
68 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
69 msleep(10);
70 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
71 msleep(10);
72}
73
74static void reset(struct tmio_mmc_host *host)
75{
76 /* FIXME - should we set stop clock reg here */
77 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
78 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
79 msleep(10);
80 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
81 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
82 msleep(10);
83}
84
85static void
86tmio_mmc_finish_request(struct tmio_mmc_host *host)
87{
88 struct mmc_request *mrq = host->mrq;
89
90 host->mrq = NULL;
91 host->cmd = NULL;
92 host->data = NULL;
93
94 mmc_request_done(host->mmc, mrq);
95}
96
97/* These are the bitmasks the tmio chip requires to implement the MMC response
98 * types. Note that R1 and R6 are the same in this scheme. */
99#define APP_CMD 0x0040
100#define RESP_NONE 0x0300
101#define RESP_R1 0x0400
102#define RESP_R1B 0x0500
103#define RESP_R2 0x0600
104#define RESP_R3 0x0700
105#define DATA_PRESENT 0x0800
106#define TRANSFER_READ 0x1000
107#define TRANSFER_MULTI 0x2000
108#define SECURITY_CMD 0x4000
109
110static int
111tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
112{
113 struct mmc_data *data = host->data;
114 int c = cmd->opcode;
115
116 /* Command 12 is handled by hardware */
117 if (cmd->opcode == 12 && !cmd->arg) {
118 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
119 return 0;
120 }
121
122 switch (mmc_resp_type(cmd)) {
123 case MMC_RSP_NONE: c |= RESP_NONE; break;
124 case MMC_RSP_R1: c |= RESP_R1; break;
125 case MMC_RSP_R1B: c |= RESP_R1B; break;
126 case MMC_RSP_R2: c |= RESP_R2; break;
127 case MMC_RSP_R3: c |= RESP_R3; break;
128 default:
129 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
130 return -EINVAL;
131 }
132
133 host->cmd = cmd;
134
135/* FIXME - this seems to be ok commented out but the spec suggest this bit
136 * should be set when issuing app commands.
137 * if(cmd->flags & MMC_FLAG_ACMD)
138 * c |= APP_CMD;
139 */
140 if (data) {
141 c |= DATA_PRESENT;
142 if (data->blocks > 1) {
143 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
144 c |= TRANSFER_MULTI;
145 }
146 if (data->flags & MMC_DATA_READ)
147 c |= TRANSFER_READ;
148 }
149
150 enable_mmc_irqs(host, TMIO_MASK_CMD);
151
152 /* Fire off the command */
153 sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
154 sd_ctrl_write16(host, CTL_SD_CMD, c);
155
156 return 0;
157}
158
159/*
160 * This chip always returns (at least?) as much data as you ask for.
161 * I'm unsure what happens if you ask for less than a block. This should be
162 * looked into to ensure that a funny length read doesnt hose the controller.
163 */
164static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
165{
166 struct mmc_data *data = host->data;
167 void *sg_virt;
168 unsigned short *buf;
169 unsigned int count;
170 unsigned long flags;
171
172 if (!data) {
173 pr_debug("Spurious PIO IRQ\n");
174 return;
175 }
176
177 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
178 buf = (unsigned short *)(sg_virt + host->sg_off);
179
180 count = host->sg_ptr->length - host->sg_off;
181 if (count > data->blksz)
182 count = data->blksz;
183
184 pr_debug("count: %08x offset: %08x flags %08x\n",
185 count, host->sg_off, data->flags);
186
187 /* Transfer the data */
188 if (data->flags & MMC_DATA_READ)
189 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
190 else
191 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
192
193 host->sg_off += count;
194
195 tmio_mmc_kunmap_atomic(sg_virt, &flags);
196
197 if (host->sg_off == host->sg_ptr->length)
198 tmio_mmc_next_sg(host);
199
200 return;
201}
202
203static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
204{
205 struct mmc_data *data = host->data;
206 struct mmc_command *stop;
207
208 host->data = NULL;
209
210 if (!data) {
211 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
212 return;
213 }
214 stop = data->stop;
215
216 /* FIXME - return correct transfer count on errors */
217 if (!data->error)
218 data->bytes_xfered = data->blocks * data->blksz;
219 else
220 data->bytes_xfered = 0;
221
222 pr_debug("Completed data request\n");
223
224 /*
225 * FIXME: other drivers allow an optional stop command of any given type
226 * which we dont do, as the chip can auto generate them.
227 * Perhaps we can be smarter about when to use auto CMD12 and
228 * only issue the auto request when we know this is the desired
229 * stop command, allowing fallback to the stop command the
230 * upper layers expect. For now, we do what works.
231 */
232
233 if (data->flags & MMC_DATA_READ) {
234 if (!host->chan_rx)
235 disable_mmc_irqs(host, TMIO_MASK_READOP);
236 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
237 host->mrq);
238 } else {
239 if (!host->chan_tx)
240 disable_mmc_irqs(host, TMIO_MASK_WRITEOP);
241 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
242 host->mrq);
243 }
244
245 if (stop) {
246 if (stop->opcode == 12 && !stop->arg)
247 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
248 else
249 BUG();
250 }
251
252 tmio_mmc_finish_request(host);
253}
254
255static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
256{
257 struct mmc_data *data = host->data;
258
259 if (!data)
260 return;
261
262 if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
263 /*
264 * Has all data been written out yet? Testing on SuperH showed,
265 * that in most cases the first interrupt comes already with the
266 * BUSY status bit clear, but on some operations, like mount or
267 * in the beginning of a write / sync / umount, there is one
268 * DATAEND interrupt with the BUSY bit set, in this cases
269 * waiting for one more interrupt fixes the problem.
270 */
271 if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
272 disable_mmc_irqs(host, TMIO_STAT_DATAEND);
273 tasklet_schedule(&host->dma_complete);
274 }
275 } else if (host->chan_rx && (data->flags & MMC_DATA_READ)) {
276 disable_mmc_irqs(host, TMIO_STAT_DATAEND);
277 tasklet_schedule(&host->dma_complete);
278 } else {
279 tmio_mmc_do_data_irq(host);
280 }
281}
282
283static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
284 unsigned int stat)
285{
286 struct mmc_command *cmd = host->cmd;
287 int i, addr;
288
289 if (!host->cmd) {
290 pr_debug("Spurious CMD irq\n");
291 return;
292 }
293
294 host->cmd = NULL;
295
296 /* This controller is sicker than the PXA one. Not only do we need to
297 * drop the top 8 bits of the first response word, we also need to
298 * modify the order of the response for short response command types.
299 */
300
301 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
302 cmd->resp[i] = sd_ctrl_read32(host, addr);
303
304 if (cmd->flags & MMC_RSP_136) {
305 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
306 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
307 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
308 cmd->resp[3] <<= 8;
309 } else if (cmd->flags & MMC_RSP_R3) {
310 cmd->resp[0] = cmd->resp[3];
311 }
312
313 if (stat & TMIO_STAT_CMDTIMEOUT)
314 cmd->error = -ETIMEDOUT;
315 else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
316 cmd->error = -EILSEQ;
317
318 /* If there is data to handle we enable data IRQs here, and
319 * we will ultimatley finish the request in the data_end handler.
320 * If theres no data or we encountered an error, finish now.
321 */
322 if (host->data && !cmd->error) {
323 if (host->data->flags & MMC_DATA_READ) {
324 if (!host->chan_rx)
325 enable_mmc_irqs(host, TMIO_MASK_READOP);
326 } else {
327 struct dma_chan *chan = host->chan_tx;
328 if (!chan)
329 enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
330 else
331 tasklet_schedule(&host->dma_issue);
332 }
333 } else {
334 tmio_mmc_finish_request(host);
335 }
336
337 return;
338}
339
340static irqreturn_t tmio_mmc_irq(int irq, void *devid)
341{
342 struct tmio_mmc_host *host = devid;
343 unsigned int ireg, irq_mask, status;
344
345 pr_debug("MMC IRQ begin\n");
346
347 status = sd_ctrl_read32(host, CTL_STATUS);
348 irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
349 ireg = status & TMIO_MASK_IRQ & ~irq_mask;
350
351 pr_debug_status(status);
352 pr_debug_status(ireg);
353
354 if (!ireg) {
355 disable_mmc_irqs(host, status & ~irq_mask);
356
357 pr_warning("tmio_mmc: Spurious irq, disabling! "
358 "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
359 pr_debug_status(status);
360
361 goto out;
362 }
363
364 while (ireg) {
365 /* Card insert / remove attempts */
366 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
367 ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
368 TMIO_STAT_CARD_REMOVE);
369 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
370 }
371
372 /* CRC and other errors */
373/* if (ireg & TMIO_STAT_ERR_IRQ)
374 * handled |= tmio_error_irq(host, irq, stat);
375 */
376
377 /* Command completion */
378 if (ireg & TMIO_MASK_CMD) {
379 ack_mmc_irqs(host, TMIO_MASK_CMD);
380 tmio_mmc_cmd_irq(host, status);
381 }
382
383 /* Data transfer */
384 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
385 ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
386 tmio_mmc_pio_irq(host);
387 }
388
389 /* Data transfer completion */
390 if (ireg & TMIO_STAT_DATAEND) {
391 ack_mmc_irqs(host, TMIO_STAT_DATAEND);
392 tmio_mmc_data_irq(host);
393 }
394
395 /* Check status - keep going until we've handled it all */
396 status = sd_ctrl_read32(host, CTL_STATUS);
397 irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
398 ireg = status & TMIO_MASK_IRQ & ~irq_mask;
399
400 pr_debug("Status at end of loop: %08x\n", status);
401 pr_debug_status(status);
402 }
403 pr_debug("MMC IRQ end\n");
404
405out:
406 return IRQ_HANDLED;
407}
408
409#ifdef CONFIG_TMIO_MMC_DMA
410static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
411{
412#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
413 /* Switch DMA mode on or off - SuperH specific? */
414 sd_ctrl_write16(host, 0xd8, enable ? 2 : 0);
415#endif
416}
417
418static void tmio_dma_complete(void *arg)
419{
420 struct tmio_mmc_host *host = arg;
421
422 dev_dbg(&host->pdev->dev, "Command completed\n");
423
424 if (!host->data)
425 dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n");
426 else
427 enable_mmc_irqs(host, TMIO_STAT_DATAEND);
428}
429
430static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
431{
432 struct scatterlist *sg = host->sg_ptr;
433 struct dma_async_tx_descriptor *desc = NULL;
434 struct dma_chan *chan = host->chan_rx;
435 int ret;
436
437 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
438 if (ret > 0) {
439 host->dma_sglen = ret;
440 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
441 DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
442 }
443
444 if (desc) {
445 host->desc = desc;
446 desc->callback = tmio_dma_complete;
447 desc->callback_param = host;
448 host->cookie = desc->tx_submit(desc);
449 if (host->cookie < 0) {
450 host->desc = NULL;
451 ret = host->cookie;
452 } else {
453 chan->device->device_issue_pending(chan);
454 }
455 }
456 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
457 __func__, host->sg_len, ret, host->cookie, host->mrq);
458
459 if (!host->desc) {
460 /* DMA failed, fall back to PIO */
461 if (ret >= 0)
462 ret = -EIO;
463 host->chan_rx = NULL;
464 dma_release_channel(chan);
465 /* Free the Tx channel too */
466 chan = host->chan_tx;
467 if (chan) {
468 host->chan_tx = NULL;
469 dma_release_channel(chan);
470 }
471 dev_warn(&host->pdev->dev,
472 "DMA failed: %d, falling back to PIO\n", ret);
473 tmio_mmc_enable_dma(host, false);
474 reset(host);
475 /* Fail this request, let above layers recover */
476 host->mrq->cmd->error = ret;
477 tmio_mmc_finish_request(host);
478 }
479
480 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
481 desc, host->cookie, host->sg_len);
482
483 return ret > 0 ? 0 : ret;
484}
485
486static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
487{
488 struct scatterlist *sg = host->sg_ptr;
489 struct dma_async_tx_descriptor *desc = NULL;
490 struct dma_chan *chan = host->chan_tx;
491 int ret;
492
493 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
494 if (ret > 0) {
495 host->dma_sglen = ret;
496 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
497 DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
498 }
499
500 if (desc) {
501 host->desc = desc;
502 desc->callback = tmio_dma_complete;
503 desc->callback_param = host;
504 host->cookie = desc->tx_submit(desc);
505 if (host->cookie < 0) {
506 host->desc = NULL;
507 ret = host->cookie;
508 }
509 }
510 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
511 __func__, host->sg_len, ret, host->cookie, host->mrq);
512
513 if (!host->desc) {
514 /* DMA failed, fall back to PIO */
515 if (ret >= 0)
516 ret = -EIO;
517 host->chan_tx = NULL;
518 dma_release_channel(chan);
519 /* Free the Rx channel too */
520 chan = host->chan_rx;
521 if (chan) {
522 host->chan_rx = NULL;
523 dma_release_channel(chan);
524 }
525 dev_warn(&host->pdev->dev,
526 "DMA failed: %d, falling back to PIO\n", ret);
527 tmio_mmc_enable_dma(host, false);
528 reset(host);
529 /* Fail this request, let above layers recover */
530 host->mrq->cmd->error = ret;
531 tmio_mmc_finish_request(host);
532 }
533
534 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
535 desc, host->cookie);
536
537 return ret > 0 ? 0 : ret;
538}
539
540static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
541 struct mmc_data *data)
542{
543 if (data->flags & MMC_DATA_READ) {
544 if (host->chan_rx)
545 return tmio_mmc_start_dma_rx(host);
546 } else {
547 if (host->chan_tx)
548 return tmio_mmc_start_dma_tx(host);
549 }
550
551 return 0;
552}
553
554static void tmio_issue_tasklet_fn(unsigned long priv)
555{
556 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
557 struct dma_chan *chan = host->chan_tx;
558
559 chan->device->device_issue_pending(chan);
560}
561
562static void tmio_tasklet_fn(unsigned long arg)
563{
564 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
565
566 if (host->data->flags & MMC_DATA_READ)
567 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
568 DMA_FROM_DEVICE);
569 else
570 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
571 DMA_TO_DEVICE);
572
573 tmio_mmc_do_data_irq(host);
574}
575
576/* It might be necessary to make filter MFD specific */
577static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
578{
579 dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
580 chan->private = arg;
581 return true;
582}
583
584static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
585 struct tmio_mmc_data *pdata)
586{
587 host->cookie = -EINVAL;
588 host->desc = NULL;
589
590 /* We can only either use DMA for both Tx and Rx or not use it at all */
591 if (pdata->dma) {
592 dma_cap_mask_t mask;
593
594 dma_cap_zero(mask);
595 dma_cap_set(DMA_SLAVE, mask);
596
597 host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
598 pdata->dma->chan_priv_tx);
599 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
600 host->chan_tx);
601
602 if (!host->chan_tx)
603 return;
604
605 host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
606 pdata->dma->chan_priv_rx);
607 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
608 host->chan_rx);
609
610 if (!host->chan_rx) {
611 dma_release_channel(host->chan_tx);
612 host->chan_tx = NULL;
613 return;
614 }
615
616 tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host);
617 tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host);
618
619 tmio_mmc_enable_dma(host, true);
620 }
621}
622
623static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
624{
625 if (host->chan_tx) {
626 struct dma_chan *chan = host->chan_tx;
627 host->chan_tx = NULL;
628 dma_release_channel(chan);
629 }
630 if (host->chan_rx) {
631 struct dma_chan *chan = host->chan_rx;
632 host->chan_rx = NULL;
633 dma_release_channel(chan);
634 }
635
636 host->cookie = -EINVAL;
637 host->desc = NULL;
638}
639#else
640static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
641 struct mmc_data *data)
642{
643 return 0;
644}
645
646static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
647 struct tmio_mmc_data *pdata)
648{
649 host->chan_tx = NULL;
650 host->chan_rx = NULL;
651}
652
653static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
654{
655}
656#endif
657
658static int tmio_mmc_start_data(struct tmio_mmc_host *host,
659 struct mmc_data *data)
660{
661 struct mfd_cell *cell = host->pdev->dev.platform_data;
662 struct tmio_mmc_data *pdata = cell->driver_data;
663
664 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
665 data->blksz, data->blocks);
666
667 /* Some hardware cannot perform 2 byte requests in 4 bit mode */
668 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
669 int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
670
671 if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
672 pr_err("%s: %d byte block unsupported in 4 bit mode\n",
673 mmc_hostname(host->mmc), data->blksz);
674 return -EINVAL;
675 }
676 }
677
678 tmio_mmc_init_sg(host, data);
679 host->data = data;
680
681 /* Set transfer length / blocksize */
682 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
683 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
684
685 return tmio_mmc_start_dma(host, data);
686}
687
688/* Process requests from the MMC layer */
689static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
690{
691 struct tmio_mmc_host *host = mmc_priv(mmc);
692 int ret;
693
694 if (host->mrq)
695 pr_debug("request not null\n");
696
697 host->mrq = mrq;
698
699 if (mrq->data) {
700 ret = tmio_mmc_start_data(host, mrq->data);
701 if (ret)
702 goto fail;
703 }
704
705 ret = tmio_mmc_start_command(host, mrq->cmd);
706 if (!ret)
707 return;
708
709fail:
710 mrq->cmd->error = ret;
711 mmc_request_done(mmc, mrq);
712}
713
714/* Set MMC clock / power.
715 * Note: This controller uses a simple divider scheme therefore it cannot
716 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
717 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
718 * slowest setting.
719 */
720static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
721{
722 struct tmio_mmc_host *host = mmc_priv(mmc);
723
724 if (ios->clock)
725 tmio_mmc_set_clock(host, ios->clock);
726
727 /* Power sequence - OFF -> ON -> UP */
728 switch (ios->power_mode) {
729 case MMC_POWER_OFF: /* power down SD bus */
730 if (host->set_pwr)
731 host->set_pwr(host->pdev, 0);
732 tmio_mmc_clk_stop(host);
733 break;
734 case MMC_POWER_ON: /* power up SD bus */
735 if (host->set_pwr)
736 host->set_pwr(host->pdev, 1);
737 break;
738 case MMC_POWER_UP: /* start bus clock */
739 tmio_mmc_clk_start(host);
740 break;
741 }
742
743 switch (ios->bus_width) {
744 case MMC_BUS_WIDTH_1:
745 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
746 break;
747 case MMC_BUS_WIDTH_4:
748 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
749 break;
750 }
751
752 /* Let things settle. delay taken from winCE driver */
753 udelay(140);
754}
755
756static int tmio_mmc_get_ro(struct mmc_host *mmc)
757{
758 struct tmio_mmc_host *host = mmc_priv(mmc);
759 struct mfd_cell *cell = host->pdev->dev.platform_data;
760 struct tmio_mmc_data *pdata = cell->driver_data;
761
762 return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
763 (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1;
764}
765
766static int tmio_mmc_get_cd(struct mmc_host *mmc)
767{
768 struct tmio_mmc_host *host = mmc_priv(mmc);
769 struct mfd_cell *cell = host->pdev->dev.platform_data;
770 struct tmio_mmc_data *pdata = cell->driver_data;
771
772 if (!pdata->get_cd)
773 return -ENOSYS;
774 else
775 return pdata->get_cd(host->pdev);
776}
777
778static const struct mmc_host_ops tmio_mmc_ops = {
779 .request = tmio_mmc_request,
780 .set_ios = tmio_mmc_set_ios,
781 .get_ro = tmio_mmc_get_ro,
782 .get_cd = tmio_mmc_get_cd,
783};
784
785#ifdef CONFIG_PM 26#ifdef CONFIG_PM
786static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) 27static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
787{ 28{
788 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 29 const struct mfd_cell *cell = mfd_get_cell(dev);
789 struct mmc_host *mmc = platform_get_drvdata(dev); 30 struct mmc_host *mmc = platform_get_drvdata(dev);
790 int ret; 31 int ret;
791 32
@@ -800,7 +41,7 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
800 41
801static int tmio_mmc_resume(struct platform_device *dev) 42static int tmio_mmc_resume(struct platform_device *dev)
802{ 43{
803 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 44 const struct mfd_cell *cell = mfd_get_cell(dev);
804 struct mmc_host *mmc = platform_get_drvdata(dev); 45 struct mmc_host *mmc = platform_get_drvdata(dev);
805 int ret = 0; 46 int ret = 0;
806 47
@@ -821,125 +62,54 @@ out:
821#define tmio_mmc_resume NULL 62#define tmio_mmc_resume NULL
822#endif 63#endif
823 64
824static int __devinit tmio_mmc_probe(struct platform_device *dev) 65static int __devinit tmio_mmc_probe(struct platform_device *pdev)
825{ 66{
826 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 67 const struct mfd_cell *cell = mfd_get_cell(pdev);
827 struct tmio_mmc_data *pdata; 68 struct tmio_mmc_data *pdata;
828 struct resource *res_ctl;
829 struct tmio_mmc_host *host; 69 struct tmio_mmc_host *host;
830 struct mmc_host *mmc;
831 int ret = -EINVAL; 70 int ret = -EINVAL;
832 u32 irq_mask = TMIO_MASK_CMD;
833 71
834 if (dev->num_resources != 2) 72 if (pdev->num_resources != 2)
835 goto out; 73 goto out;
836 74
837 res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0); 75 pdata = mfd_get_data(pdev);
838 if (!res_ctl)
839 goto out;
840
841 pdata = cell->driver_data;
842 if (!pdata || !pdata->hclk) 76 if (!pdata || !pdata->hclk)
843 goto out; 77 goto out;
844 78
845 ret = -ENOMEM;
846
847 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev);
848 if (!mmc)
849 goto out;
850
851 host = mmc_priv(mmc);
852 host->mmc = mmc;
853 host->pdev = dev;
854 platform_set_drvdata(dev, mmc);
855
856 host->set_pwr = pdata->set_pwr;
857 host->set_clk_div = pdata->set_clk_div;
858
859 /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
860 host->bus_shift = resource_size(res_ctl) >> 10;
861
862 host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
863 if (!host->ctl)
864 goto host_free;
865
866 mmc->ops = &tmio_mmc_ops;
867 mmc->caps = MMC_CAP_4_BIT_DATA;
868 mmc->caps |= pdata->capabilities;
869 mmc->f_max = pdata->hclk;
870 mmc->f_min = mmc->f_max / 512;
871 if (pdata->ocr_mask)
872 mmc->ocr_avail = pdata->ocr_mask;
873 else
874 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
875
876 /* Tell the MFD core we are ready to be enabled */ 79 /* Tell the MFD core we are ready to be enabled */
877 if (cell->enable) { 80 if (cell->enable) {
878 ret = cell->enable(dev); 81 ret = cell->enable(pdev);
879 if (ret) 82 if (ret)
880 goto unmap_ctl; 83 goto out;
881 } 84 }
882 85
883 tmio_mmc_clk_stop(host); 86 ret = tmio_mmc_host_probe(&host, pdev, pdata);
884 reset(host);
885
886 ret = platform_get_irq(dev, 0);
887 if (ret >= 0)
888 host->irq = ret;
889 else
890 goto cell_disable;
891
892 disable_mmc_irqs(host, TMIO_MASK_ALL);
893
894 ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
895 IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host);
896 if (ret) 87 if (ret)
897 goto cell_disable; 88 goto cell_disable;
898 89
899 /* See if we also get DMA */
900 tmio_mmc_request_dma(host, pdata);
901
902 mmc_add_host(mmc);
903
904 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), 90 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
905 (unsigned long)host->ctl, host->irq); 91 (unsigned long)host->ctl, host->irq);
906 92
907 /* Unmask the IRQs we want to know about */
908 if (!host->chan_rx)
909 irq_mask |= TMIO_MASK_READOP;
910 if (!host->chan_tx)
911 irq_mask |= TMIO_MASK_WRITEOP;
912 enable_mmc_irqs(host, irq_mask);
913
914 return 0; 93 return 0;
915 94
916cell_disable: 95cell_disable:
917 if (cell->disable) 96 if (cell->disable)
918 cell->disable(dev); 97 cell->disable(pdev);
919unmap_ctl:
920 iounmap(host->ctl);
921host_free:
922 mmc_free_host(mmc);
923out: 98out:
924 return ret; 99 return ret;
925} 100}
926 101
927static int __devexit tmio_mmc_remove(struct platform_device *dev) 102static int __devexit tmio_mmc_remove(struct platform_device *pdev)
928{ 103{
929 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 104 const struct mfd_cell *cell = mfd_get_cell(pdev);
930 struct mmc_host *mmc = platform_get_drvdata(dev); 105 struct mmc_host *mmc = platform_get_drvdata(pdev);
931 106
932 platform_set_drvdata(dev, NULL); 107 platform_set_drvdata(pdev, NULL);
933 108
934 if (mmc) { 109 if (mmc) {
935 struct tmio_mmc_host *host = mmc_priv(mmc); 110 tmio_mmc_host_remove(mmc_priv(mmc));
936 mmc_remove_host(mmc);
937 tmio_mmc_release_dma(host);
938 free_irq(host->irq, host);
939 if (cell->disable) 111 if (cell->disable)
940 cell->disable(dev); 112 cell->disable(pdev);
941 iounmap(host->ctl);
942 mmc_free_host(mmc);
943 } 113 }
944 114
945 return 0; 115 return 0;
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 0fedc78e3ea5..099ed49a259b 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -1,58 +1,30 @@
1/* Definitons for use with the tmio_mmc.c 1/*
2 * linux/drivers/mmc/host/tmio_mmc.h
2 * 3 *
3 * (c) 2004 Ian Molton <spyro@f2s.com> 4 * Copyright (C) 2007 Ian Molton
4 * (c) 2007 Ian Molton <spyro@f2s.com> 5 * Copyright (C) 2004 Ian Molton
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
9 * 10 *
11 * Driver for the MMC / SD / SDIO cell found in:
12 *
13 * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
10 */ 14 */
11 15
16#ifndef TMIO_MMC_H
17#define TMIO_MMC_H
18
12#include <linux/highmem.h> 19#include <linux/highmem.h>
13#include <linux/interrupt.h> 20#include <linux/mmc/tmio.h>
14#include <linux/dmaengine.h> 21#include <linux/pagemap.h>
15 22
16#define CTL_SD_CMD 0x00 23/* Definitions for values the CTRL_SDIO_STATUS register can take. */
17#define CTL_ARG_REG 0x04 24#define TMIO_SDIO_STAT_IOIRQ 0x0001
18#define CTL_STOP_INTERNAL_ACTION 0x08 25#define TMIO_SDIO_STAT_EXPUB52 0x4000
19#define CTL_XFER_BLK_COUNT 0xa 26#define TMIO_SDIO_STAT_EXWT 0x8000
20#define CTL_RESPONSE 0x0c 27#define TMIO_SDIO_MASK_ALL 0xc007
21#define CTL_STATUS 0x1c
22#define CTL_IRQ_MASK 0x20
23#define CTL_SD_CARD_CLK_CTL 0x24
24#define CTL_SD_XFER_LEN 0x26
25#define CTL_SD_MEM_CARD_OPT 0x28
26#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
27#define CTL_SD_DATA_PORT 0x30
28#define CTL_TRANSACTION_CTL 0x34
29#define CTL_RESET_SD 0xe0
30#define CTL_SDIO_REGS 0x100
31#define CTL_CLK_AND_WAIT_CTL 0x138
32#define CTL_RESET_SDIO 0x1e0
33
34/* Definitions for values the CTRL_STATUS register can take. */
35#define TMIO_STAT_CMDRESPEND 0x00000001
36#define TMIO_STAT_DATAEND 0x00000004
37#define TMIO_STAT_CARD_REMOVE 0x00000008
38#define TMIO_STAT_CARD_INSERT 0x00000010
39#define TMIO_STAT_SIGSTATE 0x00000020
40#define TMIO_STAT_WRPROTECT 0x00000080
41#define TMIO_STAT_CARD_REMOVE_A 0x00000100
42#define TMIO_STAT_CARD_INSERT_A 0x00000200
43#define TMIO_STAT_SIGSTATE_A 0x00000400
44#define TMIO_STAT_CMD_IDX_ERR 0x00010000
45#define TMIO_STAT_CRCFAIL 0x00020000
46#define TMIO_STAT_STOPBIT_ERR 0x00040000
47#define TMIO_STAT_DATATIMEOUT 0x00080000
48#define TMIO_STAT_RXOVERFLOW 0x00100000
49#define TMIO_STAT_TXUNDERRUN 0x00200000
50#define TMIO_STAT_CMDTIMEOUT 0x00400000
51#define TMIO_STAT_RXRDY 0x01000000
52#define TMIO_STAT_TXRQ 0x02000000
53#define TMIO_STAT_ILL_FUNC 0x20000000
54#define TMIO_STAT_CMD_BUSY 0x40000000
55#define TMIO_STAT_ILL_ACCESS 0x80000000
56 28
57/* Define some IRQ masks */ 29/* Define some IRQ masks */
58/* This is the mask used at reset by the chip */ 30/* This is the mask used at reset by the chip */
@@ -63,28 +35,7 @@
63 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) 35 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
64#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) 36#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
65 37
66 38struct tmio_mmc_data;
67#define enable_mmc_irqs(host, i) \
68 do { \
69 u32 mask;\
70 mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
71 mask &= ~((i) & TMIO_MASK_IRQ); \
72 sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
73 } while (0)
74
75#define disable_mmc_irqs(host, i) \
76 do { \
77 u32 mask;\
78 mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
79 mask |= ((i) & TMIO_MASK_IRQ); \
80 sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
81 } while (0)
82
83#define ack_mmc_irqs(host, i) \
84 do { \
85 sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
86 } while (0)
87
88 39
89struct tmio_mmc_host { 40struct tmio_mmc_host {
90 void __iomem *ctl; 41 void __iomem *ctl;
@@ -94,6 +45,7 @@ struct tmio_mmc_host {
94 struct mmc_data *data; 45 struct mmc_data *data;
95 struct mmc_host *mmc; 46 struct mmc_host *mmc;
96 int irq; 47 int irq;
48 unsigned int sdio_irq_enabled;
97 49
98 /* Callbacks for clock / power control */ 50 /* Callbacks for clock / power control */
99 void (*set_pwr)(struct platform_device *host, int state); 51 void (*set_pwr)(struct platform_device *host, int state);
@@ -101,128 +53,71 @@ struct tmio_mmc_host {
101 53
102 /* pio related stuff */ 54 /* pio related stuff */
103 struct scatterlist *sg_ptr; 55 struct scatterlist *sg_ptr;
56 struct scatterlist *sg_orig;
104 unsigned int sg_len; 57 unsigned int sg_len;
105 unsigned int sg_off; 58 unsigned int sg_off;
106 59
107 struct platform_device *pdev; 60 struct platform_device *pdev;
61 struct tmio_mmc_data *pdata;
108 62
109 /* DMA support */ 63 /* DMA support */
64 bool force_pio;
110 struct dma_chan *chan_rx; 65 struct dma_chan *chan_rx;
111 struct dma_chan *chan_tx; 66 struct dma_chan *chan_tx;
112 struct tasklet_struct dma_complete; 67 struct tasklet_struct dma_complete;
113 struct tasklet_struct dma_issue; 68 struct tasklet_struct dma_issue;
114#ifdef CONFIG_TMIO_MMC_DMA 69 struct scatterlist bounce_sg;
115 struct dma_async_tx_descriptor *desc; 70 u8 *bounce_buf;
116 unsigned int dma_sglen;
117 dma_cookie_t cookie;
118#endif
119};
120
121#include <linux/io.h>
122
123static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
124{
125 return readw(host->ctl + (addr << host->bus_shift));
126}
127
128static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
129 u16 *buf, int count)
130{
131 readsw(host->ctl + (addr << host->bus_shift), buf, count);
132}
133
134static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
135{
136 return readw(host->ctl + (addr << host->bus_shift)) |
137 readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
138}
139
140static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
141 u16 val)
142{
143 writew(val, host->ctl + (addr << host->bus_shift));
144}
145
146static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
147 u16 *buf, int count)
148{
149 writesw(host->ctl + (addr << host->bus_shift), buf, count);
150}
151 71
152static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, 72 /* Track lost interrupts */
153 u32 val) 73 struct delayed_work delayed_reset_work;
154{ 74 spinlock_t lock;
155 writew(val, host->ctl + (addr << host->bus_shift)); 75 unsigned long last_req_ts;
156 writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); 76};
157}
158
159#include <linux/scatterlist.h>
160#include <linux/blkdev.h>
161 77
162static inline void tmio_mmc_init_sg(struct tmio_mmc_host *host, 78int tmio_mmc_host_probe(struct tmio_mmc_host **host,
163 struct mmc_data *data) 79 struct platform_device *pdev,
164{ 80 struct tmio_mmc_data *pdata);
165 host->sg_len = data->sg_len; 81void tmio_mmc_host_remove(struct tmio_mmc_host *host);
166 host->sg_ptr = data->sg; 82void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
167 host->sg_off = 0;
168}
169 83
170static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host) 84void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
171{ 85void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
172 host->sg_ptr = sg_next(host->sg_ptr);
173 host->sg_off = 0;
174 return --host->sg_len;
175}
176 86
177static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, 87static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
178 unsigned long *flags) 88 unsigned long *flags)
179{ 89{
180 local_irq_save(*flags); 90 local_irq_save(*flags);
181 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; 91 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
182} 92}
183 93
184static inline void tmio_mmc_kunmap_atomic(void *virt, 94static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg,
185 unsigned long *flags) 95 unsigned long *flags, void *virt)
186{ 96{
187 kunmap_atomic(virt, KM_BIO_SRC_IRQ); 97 kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ);
188 local_irq_restore(*flags); 98 local_irq_restore(*flags);
189} 99}
190 100
191#ifdef CONFIG_MMC_DEBUG 101#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
102void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data);
103void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata);
104void tmio_mmc_release_dma(struct tmio_mmc_host *host);
105#else
106static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
107 struct mmc_data *data)
108{
109}
192 110
193#define STATUS_TO_TEXT(a) \ 111static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
194 do { \ 112 struct tmio_mmc_data *pdata)
195 if (status & TMIO_STAT_##a) \ 113{
196 printk(#a); \ 114 host->chan_tx = NULL;
197 } while (0) 115 host->chan_rx = NULL;
116}
198 117
199void pr_debug_status(u32 status) 118static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
200{ 119{
201 printk(KERN_DEBUG "status: %08x = ", status);
202 STATUS_TO_TEXT(CARD_REMOVE);
203 STATUS_TO_TEXT(CARD_INSERT);
204 STATUS_TO_TEXT(SIGSTATE);
205 STATUS_TO_TEXT(WRPROTECT);
206 STATUS_TO_TEXT(CARD_REMOVE_A);
207 STATUS_TO_TEXT(CARD_INSERT_A);
208 STATUS_TO_TEXT(SIGSTATE_A);
209 STATUS_TO_TEXT(CMD_IDX_ERR);
210 STATUS_TO_TEXT(STOPBIT_ERR);
211 STATUS_TO_TEXT(ILL_FUNC);
212 STATUS_TO_TEXT(CMD_BUSY);
213 STATUS_TO_TEXT(CMDRESPEND);
214 STATUS_TO_TEXT(DATAEND);
215 STATUS_TO_TEXT(CRCFAIL);
216 STATUS_TO_TEXT(DATATIMEOUT);
217 STATUS_TO_TEXT(CMDTIMEOUT);
218 STATUS_TO_TEXT(RXOVERFLOW);
219 STATUS_TO_TEXT(TXUNDERRUN);
220 STATUS_TO_TEXT(RXRDY);
221 STATUS_TO_TEXT(TXRQ);
222 STATUS_TO_TEXT(ILL_ACCESS);
223 printk("\n");
224} 120}
121#endif
225 122
226#else
227#define pr_debug_status(s) do { } while (0)
228#endif 123#endif
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
new file mode 100644
index 000000000000..d3de74ab633e
--- /dev/null
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -0,0 +1,317 @@
1/*
2 * linux/drivers/mmc/tmio_mmc_dma.c
3 *
4 * Copyright (C) 2010-2011 Guennadi Liakhovetski
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * DMA function for TMIO MMC implementations
11 */
12
13#include <linux/device.h>
14#include <linux/dmaengine.h>
15#include <linux/mfd/tmio.h>
16#include <linux/mmc/host.h>
17#include <linux/mmc/tmio.h>
18#include <linux/pagemap.h>
19#include <linux/scatterlist.h>
20
21#include "tmio_mmc.h"
22
23#define TMIO_MMC_MIN_DMA_LEN 8
24
25static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
26{
27#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
28 /* Switch DMA mode on or off - SuperH specific? */
29 writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift));
30#endif
31}
32
33static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
34{
35 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
36 struct dma_async_tx_descriptor *desc = NULL;
37 struct dma_chan *chan = host->chan_rx;
38 struct tmio_mmc_data *pdata = host->pdata;
39 dma_cookie_t cookie;
40 int ret, i;
41 bool aligned = true, multiple = true;
42 unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
43
44 for_each_sg(sg, sg_tmp, host->sg_len, i) {
45 if (sg_tmp->offset & align)
46 aligned = false;
47 if (sg_tmp->length & align) {
48 multiple = false;
49 break;
50 }
51 }
52
53 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
54 (align & PAGE_MASK))) || !multiple) {
55 ret = -EINVAL;
56 goto pio;
57 }
58
59 if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
60 host->force_pio = true;
61 return;
62 }
63
64 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY);
65
66 /* The only sg element can be unaligned, use our bounce buffer then */
67 if (!aligned) {
68 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
69 host->sg_ptr = &host->bounce_sg;
70 sg = host->sg_ptr;
71 }
72
73 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
74 if (ret > 0)
75 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
76 DMA_FROM_DEVICE, DMA_CTRL_ACK);
77
78 if (desc) {
79 cookie = dmaengine_submit(desc);
80 if (cookie < 0) {
81 desc = NULL;
82 ret = cookie;
83 }
84 }
85 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
86 __func__, host->sg_len, ret, cookie, host->mrq);
87
88pio:
89 if (!desc) {
90 /* DMA failed, fall back to PIO */
91 if (ret >= 0)
92 ret = -EIO;
93 host->chan_rx = NULL;
94 dma_release_channel(chan);
95 /* Free the Tx channel too */
96 chan = host->chan_tx;
97 if (chan) {
98 host->chan_tx = NULL;
99 dma_release_channel(chan);
100 }
101 dev_warn(&host->pdev->dev,
102 "DMA failed: %d, falling back to PIO\n", ret);
103 tmio_mmc_enable_dma(host, false);
104 }
105
106 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
107 desc, cookie, host->sg_len);
108}
109
110static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
111{
112 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
113 struct dma_async_tx_descriptor *desc = NULL;
114 struct dma_chan *chan = host->chan_tx;
115 struct tmio_mmc_data *pdata = host->pdata;
116 dma_cookie_t cookie;
117 int ret, i;
118 bool aligned = true, multiple = true;
119 unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
120
121 for_each_sg(sg, sg_tmp, host->sg_len, i) {
122 if (sg_tmp->offset & align)
123 aligned = false;
124 if (sg_tmp->length & align) {
125 multiple = false;
126 break;
127 }
128 }
129
130 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
131 (align & PAGE_MASK))) || !multiple) {
132 ret = -EINVAL;
133 goto pio;
134 }
135
136 if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
137 host->force_pio = true;
138 return;
139 }
140
141 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ);
142
143 /* The only sg element can be unaligned, use our bounce buffer then */
144 if (!aligned) {
145 unsigned long flags;
146 void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
147 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
148 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
149 tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
150 host->sg_ptr = &host->bounce_sg;
151 sg = host->sg_ptr;
152 }
153
154 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
155 if (ret > 0)
156 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
157 DMA_TO_DEVICE, DMA_CTRL_ACK);
158
159 if (desc) {
160 cookie = dmaengine_submit(desc);
161 if (cookie < 0) {
162 desc = NULL;
163 ret = cookie;
164 }
165 }
166 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
167 __func__, host->sg_len, ret, cookie, host->mrq);
168
169pio:
170 if (!desc) {
171 /* DMA failed, fall back to PIO */
172 if (ret >= 0)
173 ret = -EIO;
174 host->chan_tx = NULL;
175 dma_release_channel(chan);
176 /* Free the Rx channel too */
177 chan = host->chan_rx;
178 if (chan) {
179 host->chan_rx = NULL;
180 dma_release_channel(chan);
181 }
182 dev_warn(&host->pdev->dev,
183 "DMA failed: %d, falling back to PIO\n", ret);
184 tmio_mmc_enable_dma(host, false);
185 }
186
187 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
188 desc, cookie);
189}
190
191void tmio_mmc_start_dma(struct tmio_mmc_host *host,
192 struct mmc_data *data)
193{
194 if (data->flags & MMC_DATA_READ) {
195 if (host->chan_rx)
196 tmio_mmc_start_dma_rx(host);
197 } else {
198 if (host->chan_tx)
199 tmio_mmc_start_dma_tx(host);
200 }
201}
202
203static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
204{
205 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
206 struct dma_chan *chan = NULL;
207
208 spin_lock_irq(&host->lock);
209
210 if (host && host->data) {
211 if (host->data->flags & MMC_DATA_READ)
212 chan = host->chan_rx;
213 else
214 chan = host->chan_tx;
215 }
216
217 spin_unlock_irq(&host->lock);
218
219 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
220
221 if (chan)
222 dma_async_issue_pending(chan);
223}
224
225static void tmio_mmc_tasklet_fn(unsigned long arg)
226{
227 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
228
229 spin_lock_irq(&host->lock);
230
231 if (!host->data)
232 goto out;
233
234 if (host->data->flags & MMC_DATA_READ)
235 dma_unmap_sg(host->chan_rx->device->dev,
236 host->sg_ptr, host->sg_len,
237 DMA_FROM_DEVICE);
238 else
239 dma_unmap_sg(host->chan_tx->device->dev,
240 host->sg_ptr, host->sg_len,
241 DMA_TO_DEVICE);
242
243 tmio_mmc_do_data_irq(host);
244out:
245 spin_unlock_irq(&host->lock);
246}
247
248/* It might be necessary to make filter MFD specific */
249static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
250{
251 dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
252 chan->private = arg;
253 return true;
254}
255
256void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
257{
258 /* We can only either use DMA for both Tx and Rx or not use it at all */
259 if (pdata->dma) {
260 dma_cap_mask_t mask;
261
262 dma_cap_zero(mask);
263 dma_cap_set(DMA_SLAVE, mask);
264
265 host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
266 pdata->dma->chan_priv_tx);
267 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
268 host->chan_tx);
269
270 if (!host->chan_tx)
271 return;
272
273 host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
274 pdata->dma->chan_priv_rx);
275 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
276 host->chan_rx);
277
278 if (!host->chan_rx)
279 goto ereqrx;
280
281 host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
282 if (!host->bounce_buf)
283 goto ebouncebuf;
284
285 tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host);
286 tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
287
288 tmio_mmc_enable_dma(host, true);
289
290 return;
291ebouncebuf:
292 dma_release_channel(host->chan_rx);
293 host->chan_rx = NULL;
294ereqrx:
295 dma_release_channel(host->chan_tx);
296 host->chan_tx = NULL;
297 return;
298 }
299}
300
301void tmio_mmc_release_dma(struct tmio_mmc_host *host)
302{
303 if (host->chan_tx) {
304 struct dma_chan *chan = host->chan_tx;
305 host->chan_tx = NULL;
306 dma_release_channel(chan);
307 }
308 if (host->chan_rx) {
309 struct dma_chan *chan = host->chan_rx;
310 host->chan_rx = NULL;
311 dma_release_channel(chan);
312 }
313 if (host->bounce_buf) {
314 free_pages((unsigned long)host->bounce_buf, 0);
315 host->bounce_buf = NULL;
316 }
317}
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
new file mode 100644
index 000000000000..710339a85c84
--- /dev/null
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -0,0 +1,897 @@
1/*
2 * linux/drivers/mmc/host/tmio_mmc_pio.c
3 *
4 * Copyright (C) 2011 Guennadi Liakhovetski
5 * Copyright (C) 2007 Ian Molton
6 * Copyright (C) 2004 Ian Molton
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Driver for the MMC / SD / SDIO IP found in:
13 *
14 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
15 *
16 * This driver draws mainly on scattered spec sheets, Reverse engineering
17 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
18 * support). (Further 4 bit support from a later datasheet).
19 *
20 * TODO:
21 * Investigate using a workqueue for PIO transfers
22 * Eliminate FIXMEs
23 * SDIO support
24 * Better Power management
25 * Handle MMC errors better
26 * double buffer support
27 *
28 */
29
30#include <linux/delay.h>
31#include <linux/device.h>
32#include <linux/highmem.h>
33#include <linux/interrupt.h>
34#include <linux/io.h>
35#include <linux/irq.h>
36#include <linux/mfd/tmio.h>
37#include <linux/mmc/host.h>
38#include <linux/mmc/tmio.h>
39#include <linux/module.h>
40#include <linux/pagemap.h>
41#include <linux/platform_device.h>
42#include <linux/scatterlist.h>
43#include <linux/workqueue.h>
44#include <linux/spinlock.h>
45
46#include "tmio_mmc.h"
47
48static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
49{
50 return readw(host->ctl + (addr << host->bus_shift));
51}
52
53static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
54 u16 *buf, int count)
55{
56 readsw(host->ctl + (addr << host->bus_shift), buf, count);
57}
58
59static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
60{
61 return readw(host->ctl + (addr << host->bus_shift)) |
62 readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
63}
64
65static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
66{
67 writew(val, host->ctl + (addr << host->bus_shift));
68}
69
70static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
71 u16 *buf, int count)
72{
73 writesw(host->ctl + (addr << host->bus_shift), buf, count);
74}
75
76static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
77{
78 writew(val, host->ctl + (addr << host->bus_shift));
79 writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
80}
81
82void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
83{
84 u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ);
85 sd_ctrl_write32(host, CTL_IRQ_MASK, mask);
86}
87
88void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
89{
90 u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ);
91 sd_ctrl_write32(host, CTL_IRQ_MASK, mask);
92}
93
94static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
95{
96 sd_ctrl_write32(host, CTL_STATUS, ~i);
97}
98
99static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
100{
101 host->sg_len = data->sg_len;
102 host->sg_ptr = data->sg;
103 host->sg_orig = data->sg;
104 host->sg_off = 0;
105}
106
107static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
108{
109 host->sg_ptr = sg_next(host->sg_ptr);
110 host->sg_off = 0;
111 return --host->sg_len;
112}
113
114#ifdef CONFIG_MMC_DEBUG
115
116#define STATUS_TO_TEXT(a, status, i) \
117 do { \
118 if (status & TMIO_STAT_##a) { \
119 if (i++) \
120 printk(" | "); \
121 printk(#a); \
122 } \
123 } while (0)
124
125static void pr_debug_status(u32 status)
126{
127 int i = 0;
128 printk(KERN_DEBUG "status: %08x = ", status);
129 STATUS_TO_TEXT(CARD_REMOVE, status, i);
130 STATUS_TO_TEXT(CARD_INSERT, status, i);
131 STATUS_TO_TEXT(SIGSTATE, status, i);
132 STATUS_TO_TEXT(WRPROTECT, status, i);
133 STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
134 STATUS_TO_TEXT(CARD_INSERT_A, status, i);
135 STATUS_TO_TEXT(SIGSTATE_A, status, i);
136 STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
137 STATUS_TO_TEXT(STOPBIT_ERR, status, i);
138 STATUS_TO_TEXT(ILL_FUNC, status, i);
139 STATUS_TO_TEXT(CMD_BUSY, status, i);
140 STATUS_TO_TEXT(CMDRESPEND, status, i);
141 STATUS_TO_TEXT(DATAEND, status, i);
142 STATUS_TO_TEXT(CRCFAIL, status, i);
143 STATUS_TO_TEXT(DATATIMEOUT, status, i);
144 STATUS_TO_TEXT(CMDTIMEOUT, status, i);
145 STATUS_TO_TEXT(RXOVERFLOW, status, i);
146 STATUS_TO_TEXT(TXUNDERRUN, status, i);
147 STATUS_TO_TEXT(RXRDY, status, i);
148 STATUS_TO_TEXT(TXRQ, status, i);
149 STATUS_TO_TEXT(ILL_ACCESS, status, i);
150 printk("\n");
151}
152
153#else
154#define pr_debug_status(s) do { } while (0)
155#endif
156
157static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
158{
159 struct tmio_mmc_host *host = mmc_priv(mmc);
160
161 if (enable) {
162 host->sdio_irq_enabled = 1;
163 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
164 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK,
165 (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ));
166 } else {
167 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL);
168 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
169 host->sdio_irq_enabled = 0;
170 }
171}
172
173static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
174{
175 u32 clk = 0, clock;
176
177 if (new_clock) {
178 for (clock = host->mmc->f_min, clk = 0x80000080;
179 new_clock >= (clock<<1); clk >>= 1)
180 clock <<= 1;
181 clk |= 0x100;
182 }
183
184 if (host->set_clk_div)
185 host->set_clk_div(host->pdev, (clk>>22) & 1);
186
187 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
188}
189
190static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
191{
192 struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
193
194 /* implicit BUG_ON(!res) */
195 if (resource_size(res) > 0x100) {
196 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
197 msleep(10);
198 }
199
200 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
201 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
202 msleep(10);
203}
204
205static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
206{
207 struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
208
209 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
210 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
211 msleep(10);
212
213 /* implicit BUG_ON(!res) */
214 if (resource_size(res) > 0x100) {
215 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
216 msleep(10);
217 }
218}
219
220static void tmio_mmc_reset(struct tmio_mmc_host *host)
221{
222 struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
223
224 /* FIXME - should we set stop clock reg here */
225 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
226 /* implicit BUG_ON(!res) */
227 if (resource_size(res) > 0x100)
228 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
229 msleep(10);
230 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
231 if (resource_size(res) > 0x100)
232 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
233 msleep(10);
234}
235
236static void tmio_mmc_reset_work(struct work_struct *work)
237{
238 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
239 delayed_reset_work.work);
240 struct mmc_request *mrq;
241 unsigned long flags;
242
243 spin_lock_irqsave(&host->lock, flags);
244 mrq = host->mrq;
245
246 /* request already finished */
247 if (!mrq
248 || time_is_after_jiffies(host->last_req_ts +
249 msecs_to_jiffies(2000))) {
250 spin_unlock_irqrestore(&host->lock, flags);
251 return;
252 }
253
254 dev_warn(&host->pdev->dev,
255 "timeout waiting for hardware interrupt (CMD%u)\n",
256 mrq->cmd->opcode);
257
258 if (host->data)
259 host->data->error = -ETIMEDOUT;
260 else if (host->cmd)
261 host->cmd->error = -ETIMEDOUT;
262 else
263 mrq->cmd->error = -ETIMEDOUT;
264
265 host->cmd = NULL;
266 host->data = NULL;
267 host->mrq = NULL;
268 host->force_pio = false;
269
270 spin_unlock_irqrestore(&host->lock, flags);
271
272 tmio_mmc_reset(host);
273
274 mmc_request_done(host->mmc, mrq);
275}
276
277static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
278{
279 struct mmc_request *mrq = host->mrq;
280
281 if (!mrq)
282 return;
283
284 host->mrq = NULL;
285 host->cmd = NULL;
286 host->data = NULL;
287 host->force_pio = false;
288
289 cancel_delayed_work(&host->delayed_reset_work);
290
291 mmc_request_done(host->mmc, mrq);
292}
293
294/* These are the bitmasks the tmio chip requires to implement the MMC response
295 * types. Note that R1 and R6 are the same in this scheme. */
296#define APP_CMD 0x0040
297#define RESP_NONE 0x0300
298#define RESP_R1 0x0400
299#define RESP_R1B 0x0500
300#define RESP_R2 0x0600
301#define RESP_R3 0x0700
302#define DATA_PRESENT 0x0800
303#define TRANSFER_READ 0x1000
304#define TRANSFER_MULTI 0x2000
305#define SECURITY_CMD 0x4000
306
307static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
308{
309 struct mmc_data *data = host->data;
310 int c = cmd->opcode;
311
312 /* Command 12 is handled by hardware */
313 if (cmd->opcode == 12 && !cmd->arg) {
314 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
315 return 0;
316 }
317
318 switch (mmc_resp_type(cmd)) {
319 case MMC_RSP_NONE: c |= RESP_NONE; break;
320 case MMC_RSP_R1: c |= RESP_R1; break;
321 case MMC_RSP_R1B: c |= RESP_R1B; break;
322 case MMC_RSP_R2: c |= RESP_R2; break;
323 case MMC_RSP_R3: c |= RESP_R3; break;
324 default:
325 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
326 return -EINVAL;
327 }
328
329 host->cmd = cmd;
330
331/* FIXME - this seems to be ok commented out but the spec suggest this bit
332 * should be set when issuing app commands.
333 * if(cmd->flags & MMC_FLAG_ACMD)
334 * c |= APP_CMD;
335 */
336 if (data) {
337 c |= DATA_PRESENT;
338 if (data->blocks > 1) {
339 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
340 c |= TRANSFER_MULTI;
341 }
342 if (data->flags & MMC_DATA_READ)
343 c |= TRANSFER_READ;
344 }
345
346 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD);
347
348 /* Fire off the command */
349 sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
350 sd_ctrl_write16(host, CTL_SD_CMD, c);
351
352 return 0;
353}
354
355/*
356 * This chip always returns (at least?) as much data as you ask for.
357 * I'm unsure what happens if you ask for less than a block. This should be
358 * looked into to ensure that a funny length read doesn't hose the controller.
359 */
360static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
361{
362 struct mmc_data *data = host->data;
363 void *sg_virt;
364 unsigned short *buf;
365 unsigned int count;
366 unsigned long flags;
367
368 if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
369 pr_err("PIO IRQ in DMA mode!\n");
370 return;
371 } else if (!data) {
372 pr_debug("Spurious PIO IRQ\n");
373 return;
374 }
375
376 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
377 buf = (unsigned short *)(sg_virt + host->sg_off);
378
379 count = host->sg_ptr->length - host->sg_off;
380 if (count > data->blksz)
381 count = data->blksz;
382
383 pr_debug("count: %08x offset: %08x flags %08x\n",
384 count, host->sg_off, data->flags);
385
386 /* Transfer the data */
387 if (data->flags & MMC_DATA_READ)
388 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
389 else
390 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
391
392 host->sg_off += count;
393
394 tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
395
396 if (host->sg_off == host->sg_ptr->length)
397 tmio_mmc_next_sg(host);
398
399 return;
400}
401
402static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
403{
404 if (host->sg_ptr == &host->bounce_sg) {
405 unsigned long flags;
406 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
407 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
408 tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
409 }
410}
411
412/* needs to be called with host->lock held */
413void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
414{
415 struct mmc_data *data = host->data;
416 struct mmc_command *stop;
417
418 host->data = NULL;
419
420 if (!data) {
421 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
422 return;
423 }
424 stop = data->stop;
425
426 /* FIXME - return correct transfer count on errors */
427 if (!data->error)
428 data->bytes_xfered = data->blocks * data->blksz;
429 else
430 data->bytes_xfered = 0;
431
432 pr_debug("Completed data request\n");
433
434 /*
435 * FIXME: other drivers allow an optional stop command of any given type
436 * which we dont do, as the chip can auto generate them.
437 * Perhaps we can be smarter about when to use auto CMD12 and
438 * only issue the auto request when we know this is the desired
439 * stop command, allowing fallback to the stop command the
440 * upper layers expect. For now, we do what works.
441 */
442
443 if (data->flags & MMC_DATA_READ) {
444 if (host->chan_rx && !host->force_pio)
445 tmio_mmc_check_bounce_buffer(host);
446 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
447 host->mrq);
448 } else {
449 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
450 host->mrq);
451 }
452
453 if (stop) {
454 if (stop->opcode == 12 && !stop->arg)
455 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
456 else
457 BUG();
458 }
459
460 tmio_mmc_finish_request(host);
461}
462
463static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
464{
465 struct mmc_data *data;
466 spin_lock(&host->lock);
467 data = host->data;
468
469 if (!data)
470 goto out;
471
472 if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
473 /*
474 * Has all data been written out yet? Testing on SuperH showed,
475 * that in most cases the first interrupt comes already with the
476 * BUSY status bit clear, but on some operations, like mount or
477 * in the beginning of a write / sync / umount, there is one
478 * DATAEND interrupt with the BUSY bit set, in this cases
479 * waiting for one more interrupt fixes the problem.
480 */
481 if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
482 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
483 tasklet_schedule(&host->dma_complete);
484 }
485 } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
486 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
487 tasklet_schedule(&host->dma_complete);
488 } else {
489 tmio_mmc_do_data_irq(host);
490 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
491 }
492out:
493 spin_unlock(&host->lock);
494}
495
496static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
497 unsigned int stat)
498{
499 struct mmc_command *cmd = host->cmd;
500 int i, addr;
501
502 spin_lock(&host->lock);
503
504 if (!host->cmd) {
505 pr_debug("Spurious CMD irq\n");
506 goto out;
507 }
508
509 host->cmd = NULL;
510
511 /* This controller is sicker than the PXA one. Not only do we need to
512 * drop the top 8 bits of the first response word, we also need to
513 * modify the order of the response for short response command types.
514 */
515
516 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
517 cmd->resp[i] = sd_ctrl_read32(host, addr);
518
519 if (cmd->flags & MMC_RSP_136) {
520 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
521 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
522 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
523 cmd->resp[3] <<= 8;
524 } else if (cmd->flags & MMC_RSP_R3) {
525 cmd->resp[0] = cmd->resp[3];
526 }
527
528 if (stat & TMIO_STAT_CMDTIMEOUT)
529 cmd->error = -ETIMEDOUT;
530 else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
531 cmd->error = -EILSEQ;
532
533 /* If there is data to handle we enable data IRQs here, and
534 * we will ultimatley finish the request in the data_end handler.
535 * If theres no data or we encountered an error, finish now.
536 */
537 if (host->data && !cmd->error) {
538 if (host->data->flags & MMC_DATA_READ) {
539 if (host->force_pio || !host->chan_rx)
540 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
541 else
542 tasklet_schedule(&host->dma_issue);
543 } else {
544 if (host->force_pio || !host->chan_tx)
545 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
546 else
547 tasklet_schedule(&host->dma_issue);
548 }
549 } else {
550 tmio_mmc_finish_request(host);
551 }
552
553out:
554 spin_unlock(&host->lock);
555}
556
557static irqreturn_t tmio_mmc_irq(int irq, void *devid)
558{
559 struct tmio_mmc_host *host = devid;
560 struct tmio_mmc_data *pdata = host->pdata;
561 unsigned int ireg, irq_mask, status;
562 unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
563
564 pr_debug("MMC IRQ begin\n");
565
566 status = sd_ctrl_read32(host, CTL_STATUS);
567 irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
568 ireg = status & TMIO_MASK_IRQ & ~irq_mask;
569
570 sdio_ireg = 0;
571 if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) {
572 sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
573 sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK);
574 sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask;
575
576 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL);
577
578 if (sdio_ireg && !host->sdio_irq_enabled) {
579 pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
580 sdio_status, sdio_irq_mask, sdio_ireg);
581 tmio_mmc_enable_sdio_irq(host->mmc, 0);
582 goto out;
583 }
584
585 if (host->mmc->caps & MMC_CAP_SDIO_IRQ &&
586 sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
587 mmc_signal_sdio_irq(host->mmc);
588
589 if (sdio_ireg)
590 goto out;
591 }
592
593 pr_debug_status(status);
594 pr_debug_status(ireg);
595
596 if (!ireg) {
597 tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask);
598
599 pr_warning("tmio_mmc: Spurious irq, disabling! "
600 "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
601 pr_debug_status(status);
602
603 goto out;
604 }
605
606 while (ireg) {
607 /* Card insert / remove attempts */
608 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
609 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
610 TMIO_STAT_CARD_REMOVE);
611 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
612 }
613
614 /* CRC and other errors */
615/* if (ireg & TMIO_STAT_ERR_IRQ)
616 * handled |= tmio_error_irq(host, irq, stat);
617 */
618
619 /* Command completion */
620 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
621 tmio_mmc_ack_mmc_irqs(host,
622 TMIO_STAT_CMDRESPEND |
623 TMIO_STAT_CMDTIMEOUT);
624 tmio_mmc_cmd_irq(host, status);
625 }
626
627 /* Data transfer */
628 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
629 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
630 tmio_mmc_pio_irq(host);
631 }
632
633 /* Data transfer completion */
634 if (ireg & TMIO_STAT_DATAEND) {
635 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
636 tmio_mmc_data_irq(host);
637 }
638
639 /* Check status - keep going until we've handled it all */
640 status = sd_ctrl_read32(host, CTL_STATUS);
641 irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
642 ireg = status & TMIO_MASK_IRQ & ~irq_mask;
643
644 pr_debug("Status at end of loop: %08x\n", status);
645 pr_debug_status(status);
646 }
647 pr_debug("MMC IRQ end\n");
648
649out:
650 return IRQ_HANDLED;
651}
652
653static int tmio_mmc_start_data(struct tmio_mmc_host *host,
654 struct mmc_data *data)
655{
656 struct tmio_mmc_data *pdata = host->pdata;
657
658 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
659 data->blksz, data->blocks);
660
661 /* Some hardware cannot perform 2 byte requests in 4 bit mode */
662 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
663 int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
664
665 if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
666 pr_err("%s: %d byte block unsupported in 4 bit mode\n",
667 mmc_hostname(host->mmc), data->blksz);
668 return -EINVAL;
669 }
670 }
671
672 tmio_mmc_init_sg(host, data);
673 host->data = data;
674
675 /* Set transfer length / blocksize */
676 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
677 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
678
679 tmio_mmc_start_dma(host, data);
680
681 return 0;
682}
683
684/* Process requests from the MMC layer */
685static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
686{
687 struct tmio_mmc_host *host = mmc_priv(mmc);
688 int ret;
689
690 if (host->mrq)
691 pr_debug("request not null\n");
692
693 host->last_req_ts = jiffies;
694 wmb();
695 host->mrq = mrq;
696
697 if (mrq->data) {
698 ret = tmio_mmc_start_data(host, mrq->data);
699 if (ret)
700 goto fail;
701 }
702
703 ret = tmio_mmc_start_command(host, mrq->cmd);
704 if (!ret) {
705 schedule_delayed_work(&host->delayed_reset_work,
706 msecs_to_jiffies(2000));
707 return;
708 }
709
710fail:
711 host->mrq = NULL;
712 host->force_pio = false;
713 mrq->cmd->error = ret;
714 mmc_request_done(mmc, mrq);
715}
716
717/* Set MMC clock / power.
718 * Note: This controller uses a simple divider scheme therefore it cannot
719 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
720 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
721 * slowest setting.
722 */
723static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
724{
725 struct tmio_mmc_host *host = mmc_priv(mmc);
726
727 if (ios->clock)
728 tmio_mmc_set_clock(host, ios->clock);
729
730 /* Power sequence - OFF -> UP -> ON */
731 if (ios->power_mode == MMC_POWER_UP) {
732 /* power up SD bus */
733 if (host->set_pwr)
734 host->set_pwr(host->pdev, 1);
735 } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
736 /* power down SD bus */
737 if (ios->power_mode == MMC_POWER_OFF && host->set_pwr)
738 host->set_pwr(host->pdev, 0);
739 tmio_mmc_clk_stop(host);
740 } else {
741 /* start bus clock */
742 tmio_mmc_clk_start(host);
743 }
744
745 switch (ios->bus_width) {
746 case MMC_BUS_WIDTH_1:
747 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
748 break;
749 case MMC_BUS_WIDTH_4:
750 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
751 break;
752 }
753
754 /* Let things settle. delay taken from winCE driver */
755 udelay(140);
756}
757
758static int tmio_mmc_get_ro(struct mmc_host *mmc)
759{
760 struct tmio_mmc_host *host = mmc_priv(mmc);
761 struct tmio_mmc_data *pdata = host->pdata;
762
763 return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
764 !(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
765}
766
767static int tmio_mmc_get_cd(struct mmc_host *mmc)
768{
769 struct tmio_mmc_host *host = mmc_priv(mmc);
770 struct tmio_mmc_data *pdata = host->pdata;
771
772 if (!pdata->get_cd)
773 return -ENOSYS;
774 else
775 return pdata->get_cd(host->pdev);
776}
777
778static const struct mmc_host_ops tmio_mmc_ops = {
779 .request = tmio_mmc_request,
780 .set_ios = tmio_mmc_set_ios,
781 .get_ro = tmio_mmc_get_ro,
782 .get_cd = tmio_mmc_get_cd,
783 .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
784};
785
786int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
787 struct platform_device *pdev,
788 struct tmio_mmc_data *pdata)
789{
790 struct tmio_mmc_host *_host;
791 struct mmc_host *mmc;
792 struct resource *res_ctl;
793 int ret;
794 u32 irq_mask = TMIO_MASK_CMD;
795
796 res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
797 if (!res_ctl)
798 return -EINVAL;
799
800 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
801 if (!mmc)
802 return -ENOMEM;
803
804 _host = mmc_priv(mmc);
805 _host->pdata = pdata;
806 _host->mmc = mmc;
807 _host->pdev = pdev;
808 platform_set_drvdata(pdev, mmc);
809
810 _host->set_pwr = pdata->set_pwr;
811 _host->set_clk_div = pdata->set_clk_div;
812
813 /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
814 _host->bus_shift = resource_size(res_ctl) >> 10;
815
816 _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
817 if (!_host->ctl) {
818 ret = -ENOMEM;
819 goto host_free;
820 }
821
822 mmc->ops = &tmio_mmc_ops;
823 mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
824 mmc->f_max = pdata->hclk;
825 mmc->f_min = mmc->f_max / 512;
826 mmc->max_segs = 32;
827 mmc->max_blk_size = 512;
828 mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
829 mmc->max_segs;
830 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
831 mmc->max_seg_size = mmc->max_req_size;
832 if (pdata->ocr_mask)
833 mmc->ocr_avail = pdata->ocr_mask;
834 else
835 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
836
837 tmio_mmc_clk_stop(_host);
838 tmio_mmc_reset(_host);
839
840 ret = platform_get_irq(pdev, 0);
841 if (ret < 0)
842 goto unmap_ctl;
843
844 _host->irq = ret;
845
846 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
847 if (pdata->flags & TMIO_MMC_SDIO_IRQ)
848 tmio_mmc_enable_sdio_irq(mmc, 0);
849
850 ret = request_irq(_host->irq, tmio_mmc_irq, IRQF_DISABLED |
851 IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), _host);
852 if (ret)
853 goto unmap_ctl;
854
855 spin_lock_init(&_host->lock);
856
857 /* Init delayed work for request timeouts */
858 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
859
860 /* See if we also get DMA */
861 tmio_mmc_request_dma(_host, pdata);
862
863 mmc_add_host(mmc);
864
865 /* Unmask the IRQs we want to know about */
866 if (!_host->chan_rx)
867 irq_mask |= TMIO_MASK_READOP;
868 if (!_host->chan_tx)
869 irq_mask |= TMIO_MASK_WRITEOP;
870
871 tmio_mmc_enable_mmc_irqs(_host, irq_mask);
872
873 *host = _host;
874
875 return 0;
876
877unmap_ctl:
878 iounmap(_host->ctl);
879host_free:
880 mmc_free_host(mmc);
881
882 return ret;
883}
884EXPORT_SYMBOL(tmio_mmc_host_probe);
885
886void tmio_mmc_host_remove(struct tmio_mmc_host *host)
887{
888 mmc_remove_host(host->mmc);
889 cancel_delayed_work_sync(&host->delayed_reset_work);
890 tmio_mmc_release_dma(host);
891 free_irq(host->irq, host);
892 iounmap(host->ctl);
893 mmc_free_host(host->mmc);
894}
895EXPORT_SYMBOL(tmio_mmc_host_remove);
896
897MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index f8f65df9b017..f08f944ac53c 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -19,7 +19,6 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/usb.h> 20#include <linux/usb.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/usb.h>
23#include <linux/slab.h> 22#include <linux/slab.h>
24#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
25#include <linux/mmc/host.h> 24#include <linux/mmc/host.h>
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 9ed84ddb4780..4dfe2c02ea91 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -802,12 +802,9 @@ static const struct mmc_host_ops via_sdc_ops = {
802 802
803static void via_reset_pcictrl(struct via_crdr_mmc_host *host) 803static void via_reset_pcictrl(struct via_crdr_mmc_host *host)
804{ 804{
805 void __iomem *addrbase;
806 unsigned long flags; 805 unsigned long flags;
807 u8 gatt; 806 u8 gatt;
808 807
809 addrbase = host->pcictrl_mmiobase;
810
811 spin_lock_irqsave(&host->lock, flags); 808 spin_lock_irqsave(&host->lock, flags);
812 809
813 via_save_pcictrlreg(host); 810 via_save_pcictrlreg(host);
@@ -1090,14 +1087,13 @@ static int __devinit via_sd_probe(struct pci_dev *pcidev,
1090 struct mmc_host *mmc; 1087 struct mmc_host *mmc;
1091 struct via_crdr_mmc_host *sdhost; 1088 struct via_crdr_mmc_host *sdhost;
1092 u32 base, len; 1089 u32 base, len;
1093 u8 rev, gatt; 1090 u8 gatt;
1094 int ret; 1091 int ret;
1095 1092
1096 pci_read_config_byte(pcidev, PCI_CLASS_REVISION, &rev);
1097 pr_info(DRV_NAME 1093 pr_info(DRV_NAME
1098 ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n", 1094 ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n",
1099 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, 1095 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1100 (int)rev); 1096 (int)pcidev->revision);
1101 1097
1102 ret = pci_enable_device(pcidev); 1098 ret = pci_enable_device(pcidev);
1103 if (ret) 1099 if (ret)
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 7fca0a386ba0..62e5a4d171e1 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -484,7 +484,7 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
484 484
485 /* 485 /*
486 * Check that we aren't being called after the 486 * Check that we aren't being called after the
487 * entire buffer has been transfered. 487 * entire buffer has been transferred.
488 */ 488 */
489 if (host->num_sg == 0) 489 if (host->num_sg == 0)
490 return; 490 return;
@@ -828,7 +828,7 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
828 /* 828 /*
829 * If this is a data transfer the request 829 * If this is a data transfer the request
830 * will be finished after the data has 830 * will be finished after the data has
831 * transfered. 831 * transferred.
832 */ 832 */
833 if (cmd->data && !cmd->error) { 833 if (cmd->data && !cmd->error) {
834 /* 834 /*
@@ -904,7 +904,7 @@ static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
904 setup &= ~WBSD_DAT3_H; 904 setup &= ~WBSD_DAT3_H;
905 905
906 /* 906 /*
907 * We cannot resume card detection immediatly 907 * We cannot resume card detection immediately
908 * because of capacitance and delays in the chip. 908 * because of capacitance and delays in the chip.
909 */ 909 */
910 mod_timer(&host->ignore_timer, jiffies + HZ / 100); 910 mod_timer(&host->ignore_timer, jiffies + HZ / 100);