aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/Makefile4
-rw-r--r--drivers/mmc/card/Kconfig21
-rw-r--r--drivers/mmc/card/Makefile4
-rw-r--r--drivers/mmc/card/block.c781
-rw-r--r--drivers/mmc/card/mmc_test.c811
-rw-r--r--drivers/mmc/card/queue.c37
-rw-r--r--drivers/mmc/card/queue.h3
-rw-r--r--drivers/mmc/card/sdio_uart.c4
-rw-r--r--drivers/mmc/core/Kconfig11
-rw-r--r--drivers/mmc/core/Makefile7
-rw-r--r--drivers/mmc/core/bus.c78
-rw-r--r--drivers/mmc/core/bus.h2
-rw-r--r--drivers/mmc/core/core.c379
-rw-r--r--drivers/mmc/core/core.h17
-rw-r--r--drivers/mmc/core/debugfs.c41
-rw-r--r--drivers/mmc/core/host.c210
-rw-r--r--drivers/mmc/core/host.h21
-rw-r--r--drivers/mmc/core/mmc.c400
-rw-r--r--drivers/mmc/core/mmc_ops.c169
-rw-r--r--drivers/mmc/core/mmc_ops.h2
-rw-r--r--drivers/mmc/core/quirks.c79
-rw-r--r--drivers/mmc/core/sd.c432
-rw-r--r--drivers/mmc/core/sd.h2
-rw-r--r--drivers/mmc/core/sd_ops.c63
-rw-r--r--drivers/mmc/core/sdio.c160
-rw-r--r--drivers/mmc/core/sdio_bus.c66
-rw-r--r--drivers/mmc/core/sdio_irq.c35
-rw-r--r--drivers/mmc/core/sdio_ops.c18
-rw-r--r--drivers/mmc/host/Kconfig140
-rw-r--r--drivers/mmc/host/Makefile20
-rw-r--r--drivers/mmc/host/at91_mci.c24
-rw-r--r--drivers/mmc/host/atmel-mci.c48
-rw-r--r--drivers/mmc/host/au1xmmc.c6
-rw-r--r--drivers/mmc/host/bfin_sdh.c4
-rw-r--r--drivers/mmc/host/cb710-mmc.c56
-rw-r--r--drivers/mmc/host/davinci_mmc.c88
-rw-r--r--drivers/mmc/host/dw_mmc.c1859
-rw-r--r--drivers/mmc/host/dw_mmc.h168
-rw-r--r--drivers/mmc/host/imxmmc.c3
-rw-r--r--drivers/mmc/host/jz4740_mmc.c8
-rw-r--r--drivers/mmc/host/mmc_spi.c30
-rw-r--r--drivers/mmc/host/mmci.c633
-rw-r--r--drivers/mmc/host/mmci.h87
-rw-r--r--drivers/mmc/host/msm_sdcc.c242
-rw-r--r--drivers/mmc/host/msm_sdcc.h9
-rw-r--r--drivers/mmc/host/mvsdio.c3
-rw-r--r--drivers/mmc/host/mxcmmc.c237
-rw-r--r--drivers/mmc/host/mxs-mmc.c874
-rw-r--r--drivers/mmc/host/of_mmc_spi.c33
-rw-r--r--drivers/mmc/host/omap.c35
-rw-r--r--drivers/mmc/host/omap_hsmmc.c125
-rw-r--r--drivers/mmc/host/pxamci.c43
-rw-r--r--drivers/mmc/host/s3cmci.c9
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c2
-rw-r--r--drivers/mmc/host/sdhci-dove.c70
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c331
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h81
-rw-r--r--drivers/mmc/host/sdhci-of-core.c31
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c72
-rw-r--r--drivers/mmc/host/sdhci-pci.c337
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c50
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h12
-rw-r--r--drivers/mmc/host/sdhci-pxa.c303
-rw-r--r--drivers/mmc/host/sdhci-s3c.c105
-rw-r--r--drivers/mmc/host/sdhci-spear.c2
-rw-r--r--drivers/mmc/host/sdhci-tegra.c263
-rw-r--r--drivers/mmc/host/sdhci.c1016
-rw-r--r--drivers/mmc/host/sdhci.h222
-rw-r--r--drivers/mmc/host/sdricoh_cs.c11
-rw-r--r--drivers/mmc/host/sh_mmcif.c556
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c216
-rw-r--r--drivers/mmc/host/tifm_sd.c3
-rw-r--r--drivers/mmc/host/tmio_mmc.c880
-rw-r--r--drivers/mmc/host/tmio_mmc.h233
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c320
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c1037
-rw-r--r--drivers/mmc/host/ushc.c579
-rw-r--r--drivers/mmc/host/via-sdmmc.c11
-rw-r--r--drivers/mmc/host/vub300.c2503
-rw-r--r--drivers/mmc/host/wbsd.c9
80 files changed, 15293 insertions, 2603 deletions
diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile
index 9979f5e9765b..12eef393e216 100644
--- a/drivers/mmc/Makefile
+++ b/drivers/mmc/Makefile
@@ -2,9 +2,7 @@
2# Makefile for the kernel mmc device drivers. 2# Makefile for the kernel mmc device drivers.
3# 3#
4 4
5ifeq ($(CONFIG_MMC_DEBUG),y) 5subdir-ccflags-$(CONFIG_MMC_DEBUG) := -DDEBUG
6 EXTRA_CFLAGS += -DDEBUG
7endif
8 6
9obj-$(CONFIG_MMC) += core/ 7obj-$(CONFIG_MMC) += core/
10obj-$(CONFIG_MMC) += card/ 8obj-$(CONFIG_MMC) += card/
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 3f2a912659af..3b1f783bf924 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -14,6 +14,24 @@ config MMC_BLOCK
14 mount the filesystem. Almost everyone wishing MMC support 14 mount the filesystem. Almost everyone wishing MMC support
15 should say Y or M here. 15 should say Y or M here.
16 16
17config MMC_BLOCK_MINORS
18 int "Number of minors per block device"
19 depends on MMC_BLOCK
20 range 4 256
21 default 8
22 help
23 Number of minors per block device. One is needed for every
24 partition on the disk (plus one for the whole disk).
25
26 Number of total MMC minors available is 256, so your number
27 of supported block devices will be limited to 256 divided
28 by this number.
29
30 Default is 8 to be backwards compatible with previous
31 hardwired device numbering.
32
33 If unsure, say 8 here.
34
17config MMC_BLOCK_BOUNCE 35config MMC_BLOCK_BOUNCE
18 bool "Use bounce buffer for simple hosts" 36 bool "Use bounce buffer for simple hosts"
19 depends on MMC_BLOCK 37 depends on MMC_BLOCK
@@ -40,12 +58,11 @@ config SDIO_UART
40 58
41config MMC_TEST 59config MMC_TEST
42 tristate "MMC host test driver" 60 tristate "MMC host test driver"
43 default n
44 help 61 help
45 Development driver that performs a series of reads and writes 62 Development driver that performs a series of reads and writes
46 to a memory card in order to expose certain well known bugs 63 to a memory card in order to expose certain well known bugs
47 in host controllers. The tests are executed by writing to the 64 in host controllers. The tests are executed by writing to the
48 "test" file in sysfs under each card. Note that whatever is 65 "test" file in debugfs under each card. Note that whatever is
49 on your card will be overwritten by these tests. 66 on your card will be overwritten by these tests.
50 67
51 This driver is only of interest to those developing or 68 This driver is only of interest to those developing or
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
index 0d407514f67d..c73b406a06cd 100644
--- a/drivers/mmc/card/Makefile
+++ b/drivers/mmc/card/Makefile
@@ -2,10 +2,6 @@
2# Makefile for MMC/SD card drivers 2# Makefile for MMC/SD card drivers
3# 3#
4 4
5ifeq ($(CONFIG_MMC_DEBUG),y)
6 EXTRA_CFLAGS += -DDEBUG
7endif
8
9obj-$(CONFIG_MMC_BLOCK) += mmc_block.o 5obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
10mmc_block-objs := block.o queue.o 6mmc_block-objs := block.o queue.o
11obj-$(CONFIG_MMC_TEST) += mmc_test.o 7obj-$(CONFIG_MMC_TEST) += mmc_test.o
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index d545f79f6000..f85e42224559 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -29,10 +29,13 @@
29#include <linux/kdev_t.h> 29#include <linux/kdev_t.h>
30#include <linux/blkdev.h> 30#include <linux/blkdev.h>
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/smp_lock.h>
33#include <linux/scatterlist.h> 32#include <linux/scatterlist.h>
34#include <linux/string_helpers.h> 33#include <linux/string_helpers.h>
34#include <linux/delay.h>
35#include <linux/capability.h>
36#include <linux/compat.h>
35 37
38#include <linux/mmc/ioctl.h>
36#include <linux/mmc/card.h> 39#include <linux/mmc/card.h>
37#include <linux/mmc/host.h> 40#include <linux/mmc/host.h>
38#include <linux/mmc/mmc.h> 41#include <linux/mmc/mmc.h>
@@ -44,14 +47,35 @@
44#include "queue.h" 47#include "queue.h"
45 48
46MODULE_ALIAS("mmc:block"); 49MODULE_ALIAS("mmc:block");
50#ifdef MODULE_PARAM_PREFIX
51#undef MODULE_PARAM_PREFIX
52#endif
53#define MODULE_PARAM_PREFIX "mmcblk."
54
55#define INAND_CMD38_ARG_EXT_CSD 113
56#define INAND_CMD38_ARG_ERASE 0x00
57#define INAND_CMD38_ARG_TRIM 0x01
58#define INAND_CMD38_ARG_SECERASE 0x80
59#define INAND_CMD38_ARG_SECTRIM1 0x81
60#define INAND_CMD38_ARG_SECTRIM2 0x88
61
62static DEFINE_MUTEX(block_mutex);
63
64/*
65 * The defaults come from config options but can be overriden by module
66 * or bootarg options.
67 */
68static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
47 69
48/* 70/*
49 * max 8 partitions per card 71 * We've only got one major, so number of mmcblk devices is
72 * limited to 256 / number of minors per device.
50 */ 73 */
51#define MMC_SHIFT 3 74static int max_devices;
52#define MMC_NUM_MINORS (256 >> MMC_SHIFT)
53 75
54static DECLARE_BITMAP(dev_use, MMC_NUM_MINORS); 76/* 256 minors, so at most 256 separate devices */
77static DECLARE_BITMAP(dev_use, 256);
78static DECLARE_BITMAP(name_use, 256);
55 79
56/* 80/*
57 * There is one mmc_blk_data per slot. 81 * There is one mmc_blk_data per slot.
@@ -60,13 +84,31 @@ struct mmc_blk_data {
60 spinlock_t lock; 84 spinlock_t lock;
61 struct gendisk *disk; 85 struct gendisk *disk;
62 struct mmc_queue queue; 86 struct mmc_queue queue;
87 struct list_head part;
88
89 unsigned int flags;
90#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
91#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
63 92
64 unsigned int usage; 93 unsigned int usage;
65 unsigned int read_only; 94 unsigned int read_only;
95 unsigned int part_type;
96 unsigned int name_idx;
97
98 /*
99 * Only set in main mmc_blk_data associated
100 * with mmc_card with mmc_set_drvdata, and keeps
101 * track of the current selected device partition.
102 */
103 unsigned int part_curr;
104 struct device_attribute force_ro;
66}; 105};
67 106
68static DEFINE_MUTEX(open_lock); 107static DEFINE_MUTEX(open_lock);
69 108
109module_param(perdev_minors, int, 0444);
110MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
111
70static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) 112static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
71{ 113{
72 struct mmc_blk_data *md; 114 struct mmc_blk_data *md;
@@ -82,17 +124,22 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
82 return md; 124 return md;
83} 125}
84 126
127static inline int mmc_get_devidx(struct gendisk *disk)
128{
129 int devmaj = MAJOR(disk_devt(disk));
130 int devidx = MINOR(disk_devt(disk)) / perdev_minors;
131
132 if (!devmaj)
133 devidx = disk->first_minor / perdev_minors;
134 return devidx;
135}
136
85static void mmc_blk_put(struct mmc_blk_data *md) 137static void mmc_blk_put(struct mmc_blk_data *md)
86{ 138{
87 mutex_lock(&open_lock); 139 mutex_lock(&open_lock);
88 md->usage--; 140 md->usage--;
89 if (md->usage == 0) { 141 if (md->usage == 0) {
90 int devmaj = MAJOR(disk_devt(md->disk)); 142 int devidx = mmc_get_devidx(md->disk);
91 int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;
92
93 if (!devmaj)
94 devidx = md->disk->first_minor >> MMC_SHIFT;
95
96 blk_cleanup_queue(md->queue.queue); 143 blk_cleanup_queue(md->queue.queue);
97 144
98 __clear_bit(devidx, dev_use); 145 __clear_bit(devidx, dev_use);
@@ -103,12 +150,44 @@ static void mmc_blk_put(struct mmc_blk_data *md)
103 mutex_unlock(&open_lock); 150 mutex_unlock(&open_lock);
104} 151}
105 152
153static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
154 char *buf)
155{
156 int ret;
157 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
158
159 ret = snprintf(buf, PAGE_SIZE, "%d",
160 get_disk_ro(dev_to_disk(dev)) ^
161 md->read_only);
162 mmc_blk_put(md);
163 return ret;
164}
165
166static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
167 const char *buf, size_t count)
168{
169 int ret;
170 char *end;
171 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
172 unsigned long set = simple_strtoul(buf, &end, 0);
173 if (end == buf) {
174 ret = -EINVAL;
175 goto out;
176 }
177
178 set_disk_ro(dev_to_disk(dev), set || md->read_only);
179 ret = count;
180out:
181 mmc_blk_put(md);
182 return ret;
183}
184
106static int mmc_blk_open(struct block_device *bdev, fmode_t mode) 185static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
107{ 186{
108 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); 187 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
109 int ret = -ENXIO; 188 int ret = -ENXIO;
110 189
111 lock_kernel(); 190 mutex_lock(&block_mutex);
112 if (md) { 191 if (md) {
113 if (md->usage == 2) 192 if (md->usage == 2)
114 check_disk_change(bdev); 193 check_disk_change(bdev);
@@ -119,7 +198,7 @@ static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
119 ret = -EROFS; 198 ret = -EROFS;
120 } 199 }
121 } 200 }
122 unlock_kernel(); 201 mutex_unlock(&block_mutex);
123 202
124 return ret; 203 return ret;
125} 204}
@@ -128,9 +207,9 @@ static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
128{ 207{
129 struct mmc_blk_data *md = disk->private_data; 208 struct mmc_blk_data *md = disk->private_data;
130 209
131 lock_kernel(); 210 mutex_lock(&block_mutex);
132 mmc_blk_put(md); 211 mmc_blk_put(md);
133 unlock_kernel(); 212 mutex_unlock(&block_mutex);
134 return 0; 213 return 0;
135} 214}
136 215
@@ -143,35 +222,255 @@ mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
143 return 0; 222 return 0;
144} 223}
145 224
225struct mmc_blk_ioc_data {
226 struct mmc_ioc_cmd ic;
227 unsigned char *buf;
228 u64 buf_bytes;
229};
230
231static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
232 struct mmc_ioc_cmd __user *user)
233{
234 struct mmc_blk_ioc_data *idata;
235 int err;
236
237 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
238 if (!idata) {
239 err = -ENOMEM;
240 goto out;
241 }
242
243 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
244 err = -EFAULT;
245 goto idata_err;
246 }
247
248 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
249 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
250 err = -EOVERFLOW;
251 goto idata_err;
252 }
253
254 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
255 if (!idata->buf) {
256 err = -ENOMEM;
257 goto idata_err;
258 }
259
260 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
261 idata->ic.data_ptr, idata->buf_bytes)) {
262 err = -EFAULT;
263 goto copy_err;
264 }
265
266 return idata;
267
268copy_err:
269 kfree(idata->buf);
270idata_err:
271 kfree(idata);
272out:
273 return ERR_PTR(err);
274}
275
276static int mmc_blk_ioctl_cmd(struct block_device *bdev,
277 struct mmc_ioc_cmd __user *ic_ptr)
278{
279 struct mmc_blk_ioc_data *idata;
280 struct mmc_blk_data *md;
281 struct mmc_card *card;
282 struct mmc_command cmd = {0};
283 struct mmc_data data = {0};
284 struct mmc_request mrq = {0};
285 struct scatterlist sg;
286 int err;
287
288 /*
289 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
290 * whole block device, not on a partition. This prevents overspray
291 * between sibling partitions.
292 */
293 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
294 return -EPERM;
295
296 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
297 if (IS_ERR(idata))
298 return PTR_ERR(idata);
299
300 cmd.opcode = idata->ic.opcode;
301 cmd.arg = idata->ic.arg;
302 cmd.flags = idata->ic.flags;
303
304 data.sg = &sg;
305 data.sg_len = 1;
306 data.blksz = idata->ic.blksz;
307 data.blocks = idata->ic.blocks;
308
309 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
310
311 if (idata->ic.write_flag)
312 data.flags = MMC_DATA_WRITE;
313 else
314 data.flags = MMC_DATA_READ;
315
316 mrq.cmd = &cmd;
317 mrq.data = &data;
318
319 md = mmc_blk_get(bdev->bd_disk);
320 if (!md) {
321 err = -EINVAL;
322 goto cmd_done;
323 }
324
325 card = md->queue.card;
326 if (IS_ERR(card)) {
327 err = PTR_ERR(card);
328 goto cmd_done;
329 }
330
331 mmc_claim_host(card->host);
332
333 if (idata->ic.is_acmd) {
334 err = mmc_app_cmd(card->host, card);
335 if (err)
336 goto cmd_rel_host;
337 }
338
339 /* data.flags must already be set before doing this. */
340 mmc_set_data_timeout(&data, card);
341 /* Allow overriding the timeout_ns for empirical tuning. */
342 if (idata->ic.data_timeout_ns)
343 data.timeout_ns = idata->ic.data_timeout_ns;
344
345 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
346 /*
347 * Pretend this is a data transfer and rely on the host driver
348 * to compute timeout. When all host drivers support
349 * cmd.cmd_timeout for R1B, this can be changed to:
350 *
351 * mrq.data = NULL;
352 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
353 */
354 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
355 }
356
357 mmc_wait_for_req(card->host, &mrq);
358
359 if (cmd.error) {
360 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
361 __func__, cmd.error);
362 err = cmd.error;
363 goto cmd_rel_host;
364 }
365 if (data.error) {
366 dev_err(mmc_dev(card->host), "%s: data error %d\n",
367 __func__, data.error);
368 err = data.error;
369 goto cmd_rel_host;
370 }
371
372 /*
373 * According to the SD specs, some commands require a delay after
374 * issuing the command.
375 */
376 if (idata->ic.postsleep_min_us)
377 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
378
379 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
380 err = -EFAULT;
381 goto cmd_rel_host;
382 }
383
384 if (!idata->ic.write_flag) {
385 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
386 idata->buf, idata->buf_bytes)) {
387 err = -EFAULT;
388 goto cmd_rel_host;
389 }
390 }
391
392cmd_rel_host:
393 mmc_release_host(card->host);
394
395cmd_done:
396 mmc_blk_put(md);
397 kfree(idata->buf);
398 kfree(idata);
399 return err;
400}
401
402static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
403 unsigned int cmd, unsigned long arg)
404{
405 int ret = -EINVAL;
406 if (cmd == MMC_IOC_CMD)
407 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
408 return ret;
409}
410
411#ifdef CONFIG_COMPAT
412static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
413 unsigned int cmd, unsigned long arg)
414{
415 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
416}
417#endif
418
146static const struct block_device_operations mmc_bdops = { 419static const struct block_device_operations mmc_bdops = {
147 .open = mmc_blk_open, 420 .open = mmc_blk_open,
148 .release = mmc_blk_release, 421 .release = mmc_blk_release,
149 .getgeo = mmc_blk_getgeo, 422 .getgeo = mmc_blk_getgeo,
150 .owner = THIS_MODULE, 423 .owner = THIS_MODULE,
424 .ioctl = mmc_blk_ioctl,
425#ifdef CONFIG_COMPAT
426 .compat_ioctl = mmc_blk_compat_ioctl,
427#endif
151}; 428};
152 429
153struct mmc_blk_request { 430struct mmc_blk_request {
154 struct mmc_request mrq; 431 struct mmc_request mrq;
432 struct mmc_command sbc;
155 struct mmc_command cmd; 433 struct mmc_command cmd;
156 struct mmc_command stop; 434 struct mmc_command stop;
157 struct mmc_data data; 435 struct mmc_data data;
158}; 436};
159 437
438static inline int mmc_blk_part_switch(struct mmc_card *card,
439 struct mmc_blk_data *md)
440{
441 int ret;
442 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
443 if (main_md->part_curr == md->part_type)
444 return 0;
445
446 if (mmc_card_mmc(card)) {
447 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
448 card->ext_csd.part_config |= md->part_type;
449
450 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
451 EXT_CSD_PART_CONFIG, card->ext_csd.part_config,
452 card->ext_csd.part_time);
453 if (ret)
454 return ret;
455}
456
457 main_md->part_curr = md->part_type;
458 return 0;
459}
460
160static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) 461static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
161{ 462{
162 int err; 463 int err;
163 u32 result; 464 u32 result;
164 __be32 *blocks; 465 __be32 *blocks;
165 466
166 struct mmc_request mrq; 467 struct mmc_request mrq = {0};
167 struct mmc_command cmd; 468 struct mmc_command cmd = {0};
168 struct mmc_data data; 469 struct mmc_data data = {0};
169 unsigned int timeout_us; 470 unsigned int timeout_us;
170 471
171 struct scatterlist sg; 472 struct scatterlist sg;
172 473
173 memset(&cmd, 0, sizeof(struct mmc_command));
174
175 cmd.opcode = MMC_APP_CMD; 474 cmd.opcode = MMC_APP_CMD;
176 cmd.arg = card->rca << 16; 475 cmd.arg = card->rca << 16;
177 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 476 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
@@ -188,8 +487,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
188 cmd.arg = 0; 487 cmd.arg = 0;
189 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 488 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
190 489
191 memset(&data, 0, sizeof(struct mmc_data));
192
193 data.timeout_ns = card->csd.tacc_ns * 100; 490 data.timeout_ns = card->csd.tacc_ns * 100;
194 data.timeout_clks = card->csd.tacc_clks * 100; 491 data.timeout_clks = card->csd.tacc_clks * 100;
195 492
@@ -208,8 +505,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
208 data.sg = &sg; 505 data.sg = &sg;
209 data.sg_len = 1; 506 data.sg_len = 1;
210 507
211 memset(&mrq, 0, sizeof(struct mmc_request));
212
213 mrq.cmd = &cmd; 508 mrq.cmd = &cmd;
214 mrq.data = &data; 509 mrq.data = &data;
215 510
@@ -232,17 +527,16 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
232 527
233static u32 get_card_status(struct mmc_card *card, struct request *req) 528static u32 get_card_status(struct mmc_card *card, struct request *req)
234{ 529{
235 struct mmc_command cmd; 530 struct mmc_command cmd = {0};
236 int err; 531 int err;
237 532
238 memset(&cmd, 0, sizeof(struct mmc_command));
239 cmd.opcode = MMC_SEND_STATUS; 533 cmd.opcode = MMC_SEND_STATUS;
240 if (!mmc_host_is_spi(card->host)) 534 if (!mmc_host_is_spi(card->host))
241 cmd.arg = card->rca << 16; 535 cmd.arg = card->rca << 16;
242 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 536 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
243 err = mmc_wait_for_cmd(card->host, &cmd, 0); 537 err = mmc_wait_for_cmd(card->host, &cmd, 0);
244 if (err) 538 if (err)
245 printk(KERN_ERR "%s: error %d sending status comand", 539 printk(KERN_ERR "%s: error %d sending status command",
246 req->rq_disk->disk_name, err); 540 req->rq_disk->disk_name, err);
247 return cmd.resp[0]; 541 return cmd.resp[0];
248} 542}
@@ -254,8 +548,6 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
254 unsigned int from, nr, arg; 548 unsigned int from, nr, arg;
255 int err = 0; 549 int err = 0;
256 550
257 mmc_claim_host(card->host);
258
259 if (!mmc_can_erase(card)) { 551 if (!mmc_can_erase(card)) {
260 err = -EOPNOTSUPP; 552 err = -EOPNOTSUPP;
261 goto out; 553 goto out;
@@ -269,14 +561,22 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
269 else 561 else
270 arg = MMC_ERASE_ARG; 562 arg = MMC_ERASE_ARG;
271 563
564 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
565 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
566 INAND_CMD38_ARG_EXT_CSD,
567 arg == MMC_TRIM_ARG ?
568 INAND_CMD38_ARG_TRIM :
569 INAND_CMD38_ARG_ERASE,
570 0);
571 if (err)
572 goto out;
573 }
272 err = mmc_erase(card, from, nr, arg); 574 err = mmc_erase(card, from, nr, arg);
273out: 575out:
274 spin_lock_irq(&md->lock); 576 spin_lock_irq(&md->lock);
275 __blk_end_request(req, err, blk_rq_bytes(req)); 577 __blk_end_request(req, err, blk_rq_bytes(req));
276 spin_unlock_irq(&md->lock); 578 spin_unlock_irq(&md->lock);
277 579
278 mmc_release_host(card->host);
279
280 return err ? 0 : 1; 580 return err ? 0 : 1;
281} 581}
282 582
@@ -288,8 +588,6 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
288 unsigned int from, nr, arg; 588 unsigned int from, nr, arg;
289 int err = 0; 589 int err = 0;
290 590
291 mmc_claim_host(card->host);
292
293 if (!mmc_can_secure_erase_trim(card)) { 591 if (!mmc_can_secure_erase_trim(card)) {
294 err = -EOPNOTSUPP; 592 err = -EOPNOTSUPP;
295 goto out; 593 goto out;
@@ -303,19 +601,74 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
303 else 601 else
304 arg = MMC_SECURE_ERASE_ARG; 602 arg = MMC_SECURE_ERASE_ARG;
305 603
604 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
605 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
606 INAND_CMD38_ARG_EXT_CSD,
607 arg == MMC_SECURE_TRIM1_ARG ?
608 INAND_CMD38_ARG_SECTRIM1 :
609 INAND_CMD38_ARG_SECERASE,
610 0);
611 if (err)
612 goto out;
613 }
306 err = mmc_erase(card, from, nr, arg); 614 err = mmc_erase(card, from, nr, arg);
307 if (!err && arg == MMC_SECURE_TRIM1_ARG) 615 if (!err && arg == MMC_SECURE_TRIM1_ARG) {
616 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
617 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
618 INAND_CMD38_ARG_EXT_CSD,
619 INAND_CMD38_ARG_SECTRIM2,
620 0);
621 if (err)
622 goto out;
623 }
308 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); 624 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
625 }
309out: 626out:
310 spin_lock_irq(&md->lock); 627 spin_lock_irq(&md->lock);
311 __blk_end_request(req, err, blk_rq_bytes(req)); 628 __blk_end_request(req, err, blk_rq_bytes(req));
312 spin_unlock_irq(&md->lock); 629 spin_unlock_irq(&md->lock);
313 630
314 mmc_release_host(card->host);
315
316 return err ? 0 : 1; 631 return err ? 0 : 1;
317} 632}
318 633
634static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
635{
636 struct mmc_blk_data *md = mq->data;
637
638 /*
639 * No-op, only service this because we need REQ_FUA for reliable
640 * writes.
641 */
642 spin_lock_irq(&md->lock);
643 __blk_end_request_all(req, 0);
644 spin_unlock_irq(&md->lock);
645
646 return 1;
647}
648
649/*
650 * Reformat current write as a reliable write, supporting
651 * both legacy and the enhanced reliable write MMC cards.
652 * In each transfer we'll handle only as much as a single
653 * reliable write can handle, thus finish the request in
654 * partial completions.
655 */
656static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
657 struct mmc_card *card,
658 struct request *req)
659{
660 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
661 /* Legacy mode imposes restrictions on transfers. */
662 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
663 brq->data.blocks = 1;
664
665 if (brq->data.blocks > card->ext_csd.rel_sectors)
666 brq->data.blocks = card->ext_csd.rel_sectors;
667 else if (brq->data.blocks < card->ext_csd.rel_sectors)
668 brq->data.blocks = 1;
669 }
670}
671
319static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) 672static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
320{ 673{
321 struct mmc_blk_data *md = mq->data; 674 struct mmc_blk_data *md = mq->data;
@@ -323,10 +676,17 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
323 struct mmc_blk_request brq; 676 struct mmc_blk_request brq;
324 int ret = 1, disable_multi = 0; 677 int ret = 1, disable_multi = 0;
325 678
326 mmc_claim_host(card->host); 679 /*
680 * Reliable writes are used to implement Forced Unit Access and
681 * REQ_META accesses, and are supported only on MMCs.
682 */
683 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
684 (req->cmd_flags & REQ_META)) &&
685 (rq_data_dir(req) == WRITE) &&
686 (md->flags & MMC_BLK_REL_WR);
327 687
328 do { 688 do {
329 struct mmc_command cmd; 689 struct mmc_command cmd = {0};
330 u32 readcmd, writecmd, status = 0; 690 u32 readcmd, writecmd, status = 0;
331 691
332 memset(&brq, 0, sizeof(struct mmc_blk_request)); 692 memset(&brq, 0, sizeof(struct mmc_blk_request));
@@ -359,12 +719,12 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
359 if (disable_multi && brq.data.blocks > 1) 719 if (disable_multi && brq.data.blocks > 1)
360 brq.data.blocks = 1; 720 brq.data.blocks = 1;
361 721
362 if (brq.data.blocks > 1) { 722 if (brq.data.blocks > 1 || do_rel_wr) {
363 /* SPI multiblock writes terminate using a special 723 /* SPI multiblock writes terminate using a special
364 * token, not a STOP_TRANSMISSION request. 724 * token, not a STOP_TRANSMISSION request.
365 */ 725 */
366 if (!mmc_host_is_spi(card->host) 726 if (!mmc_host_is_spi(card->host) ||
367 || rq_data_dir(req) == READ) 727 rq_data_dir(req) == READ)
368 brq.mrq.stop = &brq.stop; 728 brq.mrq.stop = &brq.stop;
369 readcmd = MMC_READ_MULTIPLE_BLOCK; 729 readcmd = MMC_READ_MULTIPLE_BLOCK;
370 writecmd = MMC_WRITE_MULTIPLE_BLOCK; 730 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
@@ -373,7 +733,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
373 readcmd = MMC_READ_SINGLE_BLOCK; 733 readcmd = MMC_READ_SINGLE_BLOCK;
374 writecmd = MMC_WRITE_BLOCK; 734 writecmd = MMC_WRITE_BLOCK;
375 } 735 }
376
377 if (rq_data_dir(req) == READ) { 736 if (rq_data_dir(req) == READ) {
378 brq.cmd.opcode = readcmd; 737 brq.cmd.opcode = readcmd;
379 brq.data.flags |= MMC_DATA_READ; 738 brq.data.flags |= MMC_DATA_READ;
@@ -382,6 +741,38 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
382 brq.data.flags |= MMC_DATA_WRITE; 741 brq.data.flags |= MMC_DATA_WRITE;
383 } 742 }
384 743
744 if (do_rel_wr)
745 mmc_apply_rel_rw(&brq, card, req);
746
747 /*
748 * Pre-defined multi-block transfers are preferable to
749 * open ended-ones (and necessary for reliable writes).
750 * However, it is not sufficient to just send CMD23,
751 * and avoid the final CMD12, as on an error condition
752 * CMD12 (stop) needs to be sent anyway. This, coupled
753 * with Auto-CMD23 enhancements provided by some
754 * hosts, means that the complexity of dealing
755 * with this is best left to the host. If CMD23 is
756 * supported by card and host, we'll fill sbc in and let
757 * the host deal with handling it correctly. This means
758 * that for hosts that don't expose MMC_CAP_CMD23, no
759 * change of behavior will be observed.
760 *
761 * N.B: Some MMC cards experience perf degradation.
762 * We'll avoid using CMD23-bounded multiblock writes for
763 * these, while retaining features like reliable writes.
764 */
765
766 if ((md->flags & MMC_BLK_CMD23) &&
767 mmc_op_multi(brq.cmd.opcode) &&
768 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
769 brq.sbc.opcode = MMC_SET_BLOCK_COUNT;
770 brq.sbc.arg = brq.data.blocks |
771 (do_rel_wr ? (1 << 31) : 0);
772 brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
773 brq.mrq.sbc = &brq.sbc;
774 }
775
385 mmc_set_data_timeout(&brq.data, card); 776 mmc_set_data_timeout(&brq.data, card);
386 777
387 brq.data.sg = mq->sg; 778 brq.data.sg = mq->sg;
@@ -417,7 +808,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
417 * until later as we need to wait for the card to leave 808 * until later as we need to wait for the card to leave
418 * programming mode even when things go wrong. 809 * programming mode even when things go wrong.
419 */ 810 */
420 if (brq.cmd.error || brq.data.error || brq.stop.error) { 811 if (brq.sbc.error || brq.cmd.error ||
812 brq.data.error || brq.stop.error) {
421 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) { 813 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
422 /* Redo read one sector at a time */ 814 /* Redo read one sector at a time */
423 printk(KERN_WARNING "%s: retrying using single " 815 printk(KERN_WARNING "%s: retrying using single "
@@ -428,6 +820,13 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
428 status = get_card_status(card, req); 820 status = get_card_status(card, req);
429 } 821 }
430 822
823 if (brq.sbc.error) {
824 printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT "
825 "command, response %#x, card status %#x\n",
826 req->rq_disk->disk_name, brq.sbc.error,
827 brq.sbc.resp[0], status);
828 }
829
431 if (brq.cmd.error) { 830 if (brq.cmd.error) {
432 printk(KERN_ERR "%s: error %d sending read/write " 831 printk(KERN_ERR "%s: error %d sending read/write "
433 "command, response %#x, card status %#x\n", 832 "command, response %#x, card status %#x\n",
@@ -506,8 +905,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
506 spin_unlock_irq(&md->lock); 905 spin_unlock_irq(&md->lock);
507 } while (ret); 906 } while (ret);
508 907
509 mmc_release_host(card->host);
510
511 return 1; 908 return 1;
512 909
513 cmd_err: 910 cmd_err:
@@ -534,8 +931,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
534 spin_unlock_irq(&md->lock); 931 spin_unlock_irq(&md->lock);
535 } 932 }
536 933
537 mmc_release_host(card->host);
538
539 spin_lock_irq(&md->lock); 934 spin_lock_irq(&md->lock);
540 while (ret) 935 while (ret)
541 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); 936 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
@@ -546,14 +941,31 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
546 941
547static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) 942static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
548{ 943{
944 int ret;
945 struct mmc_blk_data *md = mq->data;
946 struct mmc_card *card = md->queue.card;
947
948 mmc_claim_host(card->host);
949 ret = mmc_blk_part_switch(card, md);
950 if (ret) {
951 ret = 0;
952 goto out;
953 }
954
549 if (req->cmd_flags & REQ_DISCARD) { 955 if (req->cmd_flags & REQ_DISCARD) {
550 if (req->cmd_flags & REQ_SECURE) 956 if (req->cmd_flags & REQ_SECURE)
551 return mmc_blk_issue_secdiscard_rq(mq, req); 957 ret = mmc_blk_issue_secdiscard_rq(mq, req);
552 else 958 else
553 return mmc_blk_issue_discard_rq(mq, req); 959 ret = mmc_blk_issue_discard_rq(mq, req);
960 } else if (req->cmd_flags & REQ_FLUSH) {
961 ret = mmc_blk_issue_flush(mq, req);
554 } else { 962 } else {
555 return mmc_blk_issue_rw_rq(mq, req); 963 ret = mmc_blk_issue_rw_rq(mq, req);
556 } 964 }
965
966out:
967 mmc_release_host(card->host);
968 return ret;
557} 969}
558 970
559static inline int mmc_blk_readonly(struct mmc_card *card) 971static inline int mmc_blk_readonly(struct mmc_card *card)
@@ -562,13 +974,17 @@ static inline int mmc_blk_readonly(struct mmc_card *card)
562 !(card->csd.cmdclass & CCC_BLOCK_WRITE); 974 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
563} 975}
564 976
565static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) 977static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
978 struct device *parent,
979 sector_t size,
980 bool default_ro,
981 const char *subname)
566{ 982{
567 struct mmc_blk_data *md; 983 struct mmc_blk_data *md;
568 int devidx, ret; 984 int devidx, ret;
569 985
570 devidx = find_first_zero_bit(dev_use, MMC_NUM_MINORS); 986 devidx = find_first_zero_bit(dev_use, max_devices);
571 if (devidx >= MMC_NUM_MINORS) 987 if (devidx >= max_devices)
572 return ERR_PTR(-ENOSPC); 988 return ERR_PTR(-ENOSPC);
573 __set_bit(devidx, dev_use); 989 __set_bit(devidx, dev_use);
574 990
@@ -578,6 +994,19 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
578 goto out; 994 goto out;
579 } 995 }
580 996
997 /*
998 * !subname implies we are creating main mmc_blk_data that will be
999 * associated with mmc_card with mmc_set_drvdata. Due to device
1000 * partitions, devidx will not coincide with a per-physical card
1001 * index anymore so we keep track of a name index.
1002 */
1003 if (!subname) {
1004 md->name_idx = find_first_zero_bit(name_use, max_devices);
1005 __set_bit(md->name_idx, name_use);
1006 }
1007 else
1008 md->name_idx = ((struct mmc_blk_data *)
1009 dev_to_disk(parent)->private_data)->name_idx;
581 1010
582 /* 1011 /*
583 * Set the read-only status based on the supported commands 1012 * Set the read-only status based on the supported commands
@@ -585,16 +1014,17 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
585 */ 1014 */
586 md->read_only = mmc_blk_readonly(card); 1015 md->read_only = mmc_blk_readonly(card);
587 1016
588 md->disk = alloc_disk(1 << MMC_SHIFT); 1017 md->disk = alloc_disk(perdev_minors);
589 if (md->disk == NULL) { 1018 if (md->disk == NULL) {
590 ret = -ENOMEM; 1019 ret = -ENOMEM;
591 goto err_kfree; 1020 goto err_kfree;
592 } 1021 }
593 1022
594 spin_lock_init(&md->lock); 1023 spin_lock_init(&md->lock);
1024 INIT_LIST_HEAD(&md->part);
595 md->usage = 1; 1025 md->usage = 1;
596 1026
597 ret = mmc_init_queue(&md->queue, card, &md->lock); 1027 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
598 if (ret) 1028 if (ret)
599 goto err_putdisk; 1029 goto err_putdisk;
600 1030
@@ -602,11 +1032,12 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
602 md->queue.data = md; 1032 md->queue.data = md;
603 1033
604 md->disk->major = MMC_BLOCK_MAJOR; 1034 md->disk->major = MMC_BLOCK_MAJOR;
605 md->disk->first_minor = devidx << MMC_SHIFT; 1035 md->disk->first_minor = devidx * perdev_minors;
606 md->disk->fops = &mmc_bdops; 1036 md->disk->fops = &mmc_bdops;
607 md->disk->private_data = md; 1037 md->disk->private_data = md;
608 md->disk->queue = md->queue.queue; 1038 md->disk->queue = md->queue.queue;
609 md->disk->driverfs_dev = &card->dev; 1039 md->disk->driverfs_dev = parent;
1040 set_disk_ro(md->disk, md->read_only || default_ro);
610 1041
611 /* 1042 /*
612 * As discussed on lkml, GENHD_FL_REMOVABLE should: 1043 * As discussed on lkml, GENHD_FL_REMOVABLE should:
@@ -620,65 +1051,204 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
620 * messages to tell when the card is present. 1051 * messages to tell when the card is present.
621 */ 1052 */
622 1053
623 sprintf(md->disk->disk_name, "mmcblk%d", devidx); 1054 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
1055 "mmcblk%d%s", md->name_idx, subname ? subname : "");
624 1056
625 blk_queue_logical_block_size(md->queue.queue, 512); 1057 blk_queue_logical_block_size(md->queue.queue, 512);
1058 set_capacity(md->disk, size);
1059
1060 if (mmc_host_cmd23(card->host)) {
1061 if (mmc_card_mmc(card) ||
1062 (mmc_card_sd(card) &&
1063 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
1064 md->flags |= MMC_BLK_CMD23;
1065 }
1066
1067 if (mmc_card_mmc(card) &&
1068 md->flags & MMC_BLK_CMD23 &&
1069 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
1070 card->ext_csd.rel_sectors)) {
1071 md->flags |= MMC_BLK_REL_WR;
1072 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
1073 }
1074
1075 return md;
1076
1077 err_putdisk:
1078 put_disk(md->disk);
1079 err_kfree:
1080 kfree(md);
1081 out:
1082 return ERR_PTR(ret);
1083}
1084
1085static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
1086{
1087 sector_t size;
1088 struct mmc_blk_data *md;
626 1089
627 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 1090 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
628 /* 1091 /*
629 * The EXT_CSD sector count is in number or 512 byte 1092 * The EXT_CSD sector count is in number or 512 byte
630 * sectors. 1093 * sectors.
631 */ 1094 */
632 set_capacity(md->disk, card->ext_csd.sectors); 1095 size = card->ext_csd.sectors;
633 } else { 1096 } else {
634 /* 1097 /*
635 * The CSD capacity field is in units of read_blkbits. 1098 * The CSD capacity field is in units of read_blkbits.
636 * set_capacity takes units of 512 bytes. 1099 * set_capacity takes units of 512 bytes.
637 */ 1100 */
638 set_capacity(md->disk, 1101 size = card->csd.capacity << (card->csd.read_blkbits - 9);
639 card->csd.capacity << (card->csd.read_blkbits - 9));
640 } 1102 }
1103
1104 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL);
641 return md; 1105 return md;
1106}
642 1107
643 err_putdisk: 1108static int mmc_blk_alloc_part(struct mmc_card *card,
644 put_disk(md->disk); 1109 struct mmc_blk_data *md,
645 err_kfree: 1110 unsigned int part_type,
646 kfree(md); 1111 sector_t size,
647 out: 1112 bool default_ro,
648 return ERR_PTR(ret); 1113 const char *subname)
1114{
1115 char cap_str[10];
1116 struct mmc_blk_data *part_md;
1117
1118 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
1119 subname);
1120 if (IS_ERR(part_md))
1121 return PTR_ERR(part_md);
1122 part_md->part_type = part_type;
1123 list_add(&part_md->part, &md->part);
1124
1125 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
1126 cap_str, sizeof(cap_str));
1127 printk(KERN_INFO "%s: %s %s partition %u %s\n",
1128 part_md->disk->disk_name, mmc_card_id(card),
1129 mmc_card_name(card), part_md->part_type, cap_str);
1130 return 0;
1131}
1132
1133static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
1134{
1135 int ret = 0;
1136
1137 if (!mmc_card_mmc(card))
1138 return 0;
1139
1140 if (card->ext_csd.boot_size) {
1141 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0,
1142 card->ext_csd.boot_size >> 9,
1143 true,
1144 "boot0");
1145 if (ret)
1146 return ret;
1147 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1,
1148 card->ext_csd.boot_size >> 9,
1149 true,
1150 "boot1");
1151 if (ret)
1152 return ret;
1153 }
1154
1155 return ret;
649} 1156}
650 1157
651static int 1158static int
652mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) 1159mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
653{ 1160{
654 struct mmc_command cmd;
655 int err; 1161 int err;
656 1162
657 /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
658 if (mmc_card_blockaddr(card))
659 return 0;
660
661 mmc_claim_host(card->host); 1163 mmc_claim_host(card->host);
662 cmd.opcode = MMC_SET_BLOCKLEN; 1164 err = mmc_set_blocklen(card, 512);
663 cmd.arg = 512;
664 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
665 err = mmc_wait_for_cmd(card->host, &cmd, 5);
666 mmc_release_host(card->host); 1165 mmc_release_host(card->host);
667 1166
668 if (err) { 1167 if (err) {
669 printk(KERN_ERR "%s: unable to set block size to %d: %d\n", 1168 printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
670 md->disk->disk_name, cmd.arg, err); 1169 md->disk->disk_name, err);
671 return -EINVAL; 1170 return -EINVAL;
672 } 1171 }
673 1172
674 return 0; 1173 return 0;
675} 1174}
676 1175
1176static void mmc_blk_remove_req(struct mmc_blk_data *md)
1177{
1178 if (md) {
1179 if (md->disk->flags & GENHD_FL_UP) {
1180 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
1181
1182 /* Stop new requests from getting into the queue */
1183 del_gendisk(md->disk);
1184 }
1185
1186 /* Then flush out any already in there */
1187 mmc_cleanup_queue(&md->queue);
1188 mmc_blk_put(md);
1189 }
1190}
1191
1192static void mmc_blk_remove_parts(struct mmc_card *card,
1193 struct mmc_blk_data *md)
1194{
1195 struct list_head *pos, *q;
1196 struct mmc_blk_data *part_md;
1197
1198 __clear_bit(md->name_idx, name_use);
1199 list_for_each_safe(pos, q, &md->part) {
1200 part_md = list_entry(pos, struct mmc_blk_data, part);
1201 list_del(pos);
1202 mmc_blk_remove_req(part_md);
1203 }
1204}
1205
1206static int mmc_add_disk(struct mmc_blk_data *md)
1207{
1208 int ret;
1209
1210 add_disk(md->disk);
1211 md->force_ro.show = force_ro_show;
1212 md->force_ro.store = force_ro_store;
1213 sysfs_attr_init(&md->force_ro.attr);
1214 md->force_ro.attr.name = "force_ro";
1215 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
1216 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
1217 if (ret)
1218 del_gendisk(md->disk);
1219
1220 return ret;
1221}
1222
1223static const struct mmc_fixup blk_fixups[] =
1224{
1225 MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1226 MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1227 MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1228 MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1229 MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1230
1231 /*
1232 * Some MMC cards experience performance degradation with CMD23
1233 * instead of CMD12-bounded multiblock transfers. For now we'll
1234 * black list what's bad...
1235 * - Certain Toshiba cards.
1236 *
1237 * N.B. This doesn't affect SD cards.
1238 */
1239 MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1240 MMC_QUIRK_BLK_NO_CMD23),
1241 MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1242 MMC_QUIRK_BLK_NO_CMD23),
1243 MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1244 MMC_QUIRK_BLK_NO_CMD23),
1245 END_FIXUP
1246};
1247
677static int mmc_blk_probe(struct mmc_card *card) 1248static int mmc_blk_probe(struct mmc_card *card)
678{ 1249{
679 struct mmc_blk_data *md; 1250 struct mmc_blk_data *md, *part_md;
680 int err; 1251 int err;
681
682 char cap_str[10]; 1252 char cap_str[10];
683 1253
684 /* 1254 /*
@@ -701,14 +1271,24 @@ static int mmc_blk_probe(struct mmc_card *card)
701 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 1271 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
702 cap_str, md->read_only ? "(ro)" : ""); 1272 cap_str, md->read_only ? "(ro)" : "");
703 1273
1274 if (mmc_blk_alloc_parts(card, md))
1275 goto out;
1276
704 mmc_set_drvdata(card, md); 1277 mmc_set_drvdata(card, md);
705 add_disk(md->disk); 1278 mmc_fixup_device(card, blk_fixups);
1279
1280 if (mmc_add_disk(md))
1281 goto out;
1282
1283 list_for_each_entry(part_md, &md->part, part) {
1284 if (mmc_add_disk(part_md))
1285 goto out;
1286 }
706 return 0; 1287 return 0;
707 1288
708 out: 1289 out:
709 mmc_cleanup_queue(&md->queue); 1290 mmc_blk_remove_parts(card, md);
710 mmc_blk_put(md); 1291 mmc_blk_remove_req(md);
711
712 return err; 1292 return err;
713} 1293}
714 1294
@@ -716,36 +1296,46 @@ static void mmc_blk_remove(struct mmc_card *card)
716{ 1296{
717 struct mmc_blk_data *md = mmc_get_drvdata(card); 1297 struct mmc_blk_data *md = mmc_get_drvdata(card);
718 1298
719 if (md) { 1299 mmc_blk_remove_parts(card, md);
720 /* Stop new requests from getting into the queue */ 1300 mmc_claim_host(card->host);
721 del_gendisk(md->disk); 1301 mmc_blk_part_switch(card, md);
722 1302 mmc_release_host(card->host);
723 /* Then flush out any already in there */ 1303 mmc_blk_remove_req(md);
724 mmc_cleanup_queue(&md->queue);
725
726 mmc_blk_put(md);
727 }
728 mmc_set_drvdata(card, NULL); 1304 mmc_set_drvdata(card, NULL);
729} 1305}
730 1306
731#ifdef CONFIG_PM 1307#ifdef CONFIG_PM
732static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state) 1308static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
733{ 1309{
1310 struct mmc_blk_data *part_md;
734 struct mmc_blk_data *md = mmc_get_drvdata(card); 1311 struct mmc_blk_data *md = mmc_get_drvdata(card);
735 1312
736 if (md) { 1313 if (md) {
737 mmc_queue_suspend(&md->queue); 1314 mmc_queue_suspend(&md->queue);
1315 list_for_each_entry(part_md, &md->part, part) {
1316 mmc_queue_suspend(&part_md->queue);
1317 }
738 } 1318 }
739 return 0; 1319 return 0;
740} 1320}
741 1321
742static int mmc_blk_resume(struct mmc_card *card) 1322static int mmc_blk_resume(struct mmc_card *card)
743{ 1323{
1324 struct mmc_blk_data *part_md;
744 struct mmc_blk_data *md = mmc_get_drvdata(card); 1325 struct mmc_blk_data *md = mmc_get_drvdata(card);
745 1326
746 if (md) { 1327 if (md) {
747 mmc_blk_set_blksize(md, card); 1328 mmc_blk_set_blksize(md, card);
1329
1330 /*
1331 * Resume involves the card going into idle state,
1332 * so current partition is always the main one.
1333 */
1334 md->part_curr = md->part_type;
748 mmc_queue_resume(&md->queue); 1335 mmc_queue_resume(&md->queue);
1336 list_for_each_entry(part_md, &md->part, part) {
1337 mmc_queue_resume(&part_md->queue);
1338 }
749 } 1339 }
750 return 0; 1340 return 0;
751} 1341}
@@ -768,6 +1358,11 @@ static int __init mmc_blk_init(void)
768{ 1358{
769 int res; 1359 int res;
770 1360
1361 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
1362 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
1363
1364 max_devices = 256 / perdev_minors;
1365
771 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); 1366 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
772 if (res) 1367 if (res)
773 goto out; 1368 goto out;
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 5dd8576b5c18..233cdfae92f4 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -17,6 +17,11 @@
17 17
18#include <linux/scatterlist.h> 18#include <linux/scatterlist.h>
19#include <linux/swap.h> /* For nr_free_buffer_pages() */ 19#include <linux/swap.h> /* For nr_free_buffer_pages() */
20#include <linux/list.h>
21
22#include <linux/debugfs.h>
23#include <linux/uaccess.h>
24#include <linux/seq_file.h>
20 25
21#define RESULT_OK 0 26#define RESULT_OK 0
22#define RESULT_FAIL 1 27#define RESULT_FAIL 1
@@ -56,7 +61,9 @@ struct mmc_test_mem {
56 * struct mmc_test_area - information for performance tests. 61 * struct mmc_test_area - information for performance tests.
57 * @max_sz: test area size (in bytes) 62 * @max_sz: test area size (in bytes)
58 * @dev_addr: address on card at which to do performance tests 63 * @dev_addr: address on card at which to do performance tests
59 * @max_segs: maximum segments in scatterlist @sg 64 * @max_tfr: maximum transfer size allowed by driver (in bytes)
65 * @max_segs: maximum segments allowed by driver in scatterlist @sg
66 * @max_seg_sz: maximum segment size allowed by driver
60 * @blocks: number of (512 byte) blocks currently mapped by @sg 67 * @blocks: number of (512 byte) blocks currently mapped by @sg
61 * @sg_len: length of currently mapped scatterlist @sg 68 * @sg_len: length of currently mapped scatterlist @sg
62 * @mem: allocated memory 69 * @mem: allocated memory
@@ -65,7 +72,9 @@ struct mmc_test_mem {
65struct mmc_test_area { 72struct mmc_test_area {
66 unsigned long max_sz; 73 unsigned long max_sz;
67 unsigned int dev_addr; 74 unsigned int dev_addr;
75 unsigned int max_tfr;
68 unsigned int max_segs; 76 unsigned int max_segs;
77 unsigned int max_seg_sz;
69 unsigned int blocks; 78 unsigned int blocks;
70 unsigned int sg_len; 79 unsigned int sg_len;
71 struct mmc_test_mem *mem; 80 struct mmc_test_mem *mem;
@@ -73,12 +82,59 @@ struct mmc_test_area {
73}; 82};
74 83
75/** 84/**
85 * struct mmc_test_transfer_result - transfer results for performance tests.
86 * @link: double-linked list
87 * @count: amount of group of sectors to check
88 * @sectors: amount of sectors to check in one group
89 * @ts: time values of transfer
90 * @rate: calculated transfer rate
91 * @iops: I/O operations per second (times 100)
92 */
93struct mmc_test_transfer_result {
94 struct list_head link;
95 unsigned int count;
96 unsigned int sectors;
97 struct timespec ts;
98 unsigned int rate;
99 unsigned int iops;
100};
101
102/**
103 * struct mmc_test_general_result - results for tests.
104 * @link: double-linked list
105 * @card: card under test
106 * @testcase: number of test case
107 * @result: result of test run
108 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
109 */
110struct mmc_test_general_result {
111 struct list_head link;
112 struct mmc_card *card;
113 int testcase;
114 int result;
115 struct list_head tr_lst;
116};
117
118/**
119 * struct mmc_test_dbgfs_file - debugfs related file.
120 * @link: double-linked list
121 * @card: card under test
122 * @file: file created under debugfs
123 */
124struct mmc_test_dbgfs_file {
125 struct list_head link;
126 struct mmc_card *card;
127 struct dentry *file;
128};
129
130/**
76 * struct mmc_test_card - test information. 131 * struct mmc_test_card - test information.
77 * @card: card under test 132 * @card: card under test
78 * @scratch: transfer buffer 133 * @scratch: transfer buffer
79 * @buffer: transfer buffer 134 * @buffer: transfer buffer
80 * @highmem: buffer for highmem tests 135 * @highmem: buffer for highmem tests
81 * @area: information for performance tests 136 * @area: information for performance tests
137 * @gr: pointer to results of current testcase
82 */ 138 */
83struct mmc_test_card { 139struct mmc_test_card {
84 struct mmc_card *card; 140 struct mmc_card *card;
@@ -88,7 +144,8 @@ struct mmc_test_card {
88#ifdef CONFIG_HIGHMEM 144#ifdef CONFIG_HIGHMEM
89 struct page *highmem; 145 struct page *highmem;
90#endif 146#endif
91 struct mmc_test_area area; 147 struct mmc_test_area area;
148 struct mmc_test_general_result *gr;
92}; 149};
93 150
94/*******************************************************************/ 151/*******************************************************************/
@@ -100,17 +157,7 @@ struct mmc_test_card {
100 */ 157 */
101static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size) 158static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
102{ 159{
103 struct mmc_command cmd; 160 return mmc_set_blocklen(test->card, size);
104 int ret;
105
106 cmd.opcode = MMC_SET_BLOCKLEN;
107 cmd.arg = size;
108 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
109 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
110 if (ret)
111 return ret;
112
113 return 0;
114} 161}
115 162
116/* 163/*
@@ -165,7 +212,7 @@ static int mmc_test_busy(struct mmc_command *cmd)
165static int mmc_test_wait_busy(struct mmc_test_card *test) 212static int mmc_test_wait_busy(struct mmc_test_card *test)
166{ 213{
167 int ret, busy; 214 int ret, busy;
168 struct mmc_command cmd; 215 struct mmc_command cmd = {0};
169 216
170 busy = 0; 217 busy = 0;
171 do { 218 do {
@@ -181,9 +228,10 @@ static int mmc_test_wait_busy(struct mmc_test_card *test)
181 228
182 if (!busy && mmc_test_busy(&cmd)) { 229 if (!busy && mmc_test_busy(&cmd)) {
183 busy = 1; 230 busy = 1;
184 printk(KERN_INFO "%s: Warning: Host did not " 231 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
185 "wait for busy state to end.\n", 232 printk(KERN_INFO "%s: Warning: Host did not "
186 mmc_hostname(test->card->host)); 233 "wait for busy state to end.\n",
234 mmc_hostname(test->card->host));
187 } 235 }
188 } while (mmc_test_busy(&cmd)); 236 } while (mmc_test_busy(&cmd));
189 237
@@ -198,18 +246,13 @@ static int mmc_test_buffer_transfer(struct mmc_test_card *test,
198{ 246{
199 int ret; 247 int ret;
200 248
201 struct mmc_request mrq; 249 struct mmc_request mrq = {0};
202 struct mmc_command cmd; 250 struct mmc_command cmd = {0};
203 struct mmc_command stop; 251 struct mmc_command stop = {0};
204 struct mmc_data data; 252 struct mmc_data data = {0};
205 253
206 struct scatterlist sg; 254 struct scatterlist sg;
207 255
208 memset(&mrq, 0, sizeof(struct mmc_request));
209 memset(&cmd, 0, sizeof(struct mmc_command));
210 memset(&data, 0, sizeof(struct mmc_data));
211 memset(&stop, 0, sizeof(struct mmc_command));
212
213 mrq.cmd = &cmd; 256 mrq.cmd = &cmd;
214 mrq.data = &data; 257 mrq.data = &data;
215 mrq.stop = &stop; 258 mrq.stop = &stop;
@@ -244,28 +287,39 @@ static void mmc_test_free_mem(struct mmc_test_mem *mem)
244} 287}
245 288
246/* 289/*
247 * Allocate a lot of memory, preferrably max_sz but at least min_sz. In case 290 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
248 * there isn't much memory do not exceed 1/16th total lowmem pages. 291 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
292 * not exceed a maximum number of segments and try not to make segments much
293 * bigger than maximum segment size.
249 */ 294 */
250static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz, 295static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
251 unsigned long max_sz) 296 unsigned long max_sz,
297 unsigned int max_segs,
298 unsigned int max_seg_sz)
252{ 299{
253 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE); 300 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
254 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE); 301 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
302 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
255 unsigned long page_cnt = 0; 303 unsigned long page_cnt = 0;
256 unsigned long limit = nr_free_buffer_pages() >> 4; 304 unsigned long limit = nr_free_buffer_pages() >> 4;
257 struct mmc_test_mem *mem; 305 struct mmc_test_mem *mem;
258 306
259 if (max_page_cnt > limit) 307 if (max_page_cnt > limit)
260 max_page_cnt = limit; 308 max_page_cnt = limit;
261 if (max_page_cnt < min_page_cnt) 309 if (min_page_cnt > max_page_cnt)
262 max_page_cnt = min_page_cnt; 310 min_page_cnt = max_page_cnt;
311
312 if (max_seg_page_cnt > max_page_cnt)
313 max_seg_page_cnt = max_page_cnt;
314
315 if (max_segs > max_page_cnt)
316 max_segs = max_page_cnt;
263 317
264 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL); 318 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
265 if (!mem) 319 if (!mem)
266 return NULL; 320 return NULL;
267 321
268 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_page_cnt, 322 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
269 GFP_KERNEL); 323 GFP_KERNEL);
270 if (!mem->arr) 324 if (!mem->arr)
271 goto out_free; 325 goto out_free;
@@ -276,7 +330,7 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
276 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN | 330 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
277 __GFP_NORETRY; 331 __GFP_NORETRY;
278 332
279 order = get_order(max_page_cnt << PAGE_SHIFT); 333 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
280 while (1) { 334 while (1) {
281 page = alloc_pages(flags, order); 335 page = alloc_pages(flags, order);
282 if (page || !order) 336 if (page || !order)
@@ -295,6 +349,11 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
295 break; 349 break;
296 max_page_cnt -= 1UL << order; 350 max_page_cnt -= 1UL << order;
297 page_cnt += 1UL << order; 351 page_cnt += 1UL << order;
352 if (mem->cnt >= max_segs) {
353 if (page_cnt < min_page_cnt)
354 goto out_free;
355 break;
356 }
298 } 357 }
299 358
300 return mem; 359 return mem;
@@ -310,7 +369,8 @@ out_free:
310 */ 369 */
311static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz, 370static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
312 struct scatterlist *sglist, int repeat, 371 struct scatterlist *sglist, int repeat,
313 unsigned int max_segs, unsigned int *sg_len) 372 unsigned int max_segs, unsigned int max_seg_sz,
373 unsigned int *sg_len)
314{ 374{
315 struct scatterlist *sg = NULL; 375 struct scatterlist *sg = NULL;
316 unsigned int i; 376 unsigned int i;
@@ -322,8 +382,10 @@ static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
322 for (i = 0; i < mem->cnt; i++) { 382 for (i = 0; i < mem->cnt; i++) {
323 unsigned long len = PAGE_SIZE << mem->arr[i].order; 383 unsigned long len = PAGE_SIZE << mem->arr[i].order;
324 384
325 if (sz < len) 385 if (len > sz)
326 len = sz; 386 len = sz;
387 if (len > max_seg_sz)
388 len = max_seg_sz;
327 if (sg) 389 if (sg)
328 sg = sg_next(sg); 390 sg = sg_next(sg);
329 else 391 else
@@ -355,6 +417,7 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
355 unsigned long sz, 417 unsigned long sz,
356 struct scatterlist *sglist, 418 struct scatterlist *sglist,
357 unsigned int max_segs, 419 unsigned int max_segs,
420 unsigned int max_seg_sz,
358 unsigned int *sg_len) 421 unsigned int *sg_len)
359{ 422{
360 struct scatterlist *sg = NULL; 423 struct scatterlist *sg = NULL;
@@ -365,7 +428,7 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
365 sg_init_table(sglist, max_segs); 428 sg_init_table(sglist, max_segs);
366 429
367 *sg_len = 0; 430 *sg_len = 0;
368 while (sz && i) { 431 while (sz) {
369 base = page_address(mem->arr[--i].page); 432 base = page_address(mem->arr[--i].page);
370 cnt = 1 << mem->arr[i].order; 433 cnt = 1 << mem->arr[i].order;
371 while (sz && cnt) { 434 while (sz && cnt) {
@@ -374,7 +437,9 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
374 continue; 437 continue;
375 last_addr = addr; 438 last_addr = addr;
376 len = PAGE_SIZE; 439 len = PAGE_SIZE;
377 if (sz < len) 440 if (len > max_seg_sz)
441 len = max_seg_sz;
442 if (len > sz)
378 len = sz; 443 len = sz;
379 if (sg) 444 if (sg)
380 sg = sg_next(sg); 445 sg = sg_next(sg);
@@ -386,6 +451,8 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
386 sz -= len; 451 sz -= len;
387 *sg_len += 1; 452 *sg_len += 1;
388 } 453 }
454 if (i == 0)
455 i = mem->cnt;
389 } 456 }
390 457
391 if (sg) 458 if (sg)
@@ -421,23 +488,52 @@ static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
421} 488}
422 489
423/* 490/*
491 * Save transfer results for future usage
492 */
493static void mmc_test_save_transfer_result(struct mmc_test_card *test,
494 unsigned int count, unsigned int sectors, struct timespec ts,
495 unsigned int rate, unsigned int iops)
496{
497 struct mmc_test_transfer_result *tr;
498
499 if (!test->gr)
500 return;
501
502 tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
503 if (!tr)
504 return;
505
506 tr->count = count;
507 tr->sectors = sectors;
508 tr->ts = ts;
509 tr->rate = rate;
510 tr->iops = iops;
511
512 list_add_tail(&tr->link, &test->gr->tr_lst);
513}
514
515/*
424 * Print the transfer rate. 516 * Print the transfer rate.
425 */ 517 */
426static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes, 518static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
427 struct timespec *ts1, struct timespec *ts2) 519 struct timespec *ts1, struct timespec *ts2)
428{ 520{
429 unsigned int rate, sectors = bytes >> 9; 521 unsigned int rate, iops, sectors = bytes >> 9;
430 struct timespec ts; 522 struct timespec ts;
431 523
432 ts = timespec_sub(*ts2, *ts1); 524 ts = timespec_sub(*ts2, *ts1);
433 525
434 rate = mmc_test_rate(bytes, &ts); 526 rate = mmc_test_rate(bytes, &ts);
527 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
435 528
436 printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu " 529 printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
437 "seconds (%u kB/s, %u KiB/s)\n", 530 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
438 mmc_hostname(test->card->host), sectors, sectors >> 1, 531 mmc_hostname(test->card->host), sectors, sectors >> 1,
439 (sectors == 1 ? ".5" : ""), (unsigned long)ts.tv_sec, 532 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
440 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024); 533 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
534 iops / 100, iops % 100);
535
536 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
441} 537}
442 538
443/* 539/*
@@ -447,20 +543,24 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
447 unsigned int count, struct timespec *ts1, 543 unsigned int count, struct timespec *ts1,
448 struct timespec *ts2) 544 struct timespec *ts2)
449{ 545{
450 unsigned int rate, sectors = bytes >> 9; 546 unsigned int rate, iops, sectors = bytes >> 9;
451 uint64_t tot = bytes * count; 547 uint64_t tot = bytes * count;
452 struct timespec ts; 548 struct timespec ts;
453 549
454 ts = timespec_sub(*ts2, *ts1); 550 ts = timespec_sub(*ts2, *ts1);
455 551
456 rate = mmc_test_rate(tot, &ts); 552 rate = mmc_test_rate(tot, &ts);
553 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
457 554
458 printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " 555 printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
459 "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n", 556 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
557 "%u.%02u IOPS)\n",
460 mmc_hostname(test->card->host), count, sectors, count, 558 mmc_hostname(test->card->host), count, sectors, count,
461 sectors >> 1, (sectors == 1 ? ".5" : ""), 559 sectors >> 1, (sectors & 1 ? ".5" : ""),
462 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec, 560 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
463 rate / 1000, rate / 1024); 561 rate / 1000, rate / 1024, iops / 100, iops % 100);
562
563 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
464} 564}
465 565
466/* 566/*
@@ -626,15 +726,10 @@ static int mmc_test_simple_transfer(struct mmc_test_card *test,
626 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 726 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
627 unsigned blocks, unsigned blksz, int write) 727 unsigned blocks, unsigned blksz, int write)
628{ 728{
629 struct mmc_request mrq; 729 struct mmc_request mrq = {0};
630 struct mmc_command cmd; 730 struct mmc_command cmd = {0};
631 struct mmc_command stop; 731 struct mmc_command stop = {0};
632 struct mmc_data data; 732 struct mmc_data data = {0};
633
634 memset(&mrq, 0, sizeof(struct mmc_request));
635 memset(&cmd, 0, sizeof(struct mmc_command));
636 memset(&data, 0, sizeof(struct mmc_data));
637 memset(&stop, 0, sizeof(struct mmc_command));
638 733
639 mrq.cmd = &cmd; 734 mrq.cmd = &cmd;
640 mrq.data = &data; 735 mrq.data = &data;
@@ -656,18 +751,13 @@ static int mmc_test_simple_transfer(struct mmc_test_card *test,
656static int mmc_test_broken_transfer(struct mmc_test_card *test, 751static int mmc_test_broken_transfer(struct mmc_test_card *test,
657 unsigned blocks, unsigned blksz, int write) 752 unsigned blocks, unsigned blksz, int write)
658{ 753{
659 struct mmc_request mrq; 754 struct mmc_request mrq = {0};
660 struct mmc_command cmd; 755 struct mmc_command cmd = {0};
661 struct mmc_command stop; 756 struct mmc_command stop = {0};
662 struct mmc_data data; 757 struct mmc_data data = {0};
663 758
664 struct scatterlist sg; 759 struct scatterlist sg;
665 760
666 memset(&mrq, 0, sizeof(struct mmc_request));
667 memset(&cmd, 0, sizeof(struct mmc_command));
668 memset(&data, 0, sizeof(struct mmc_data));
669 memset(&stop, 0, sizeof(struct mmc_command));
670
671 mrq.cmd = &cmd; 761 mrq.cmd = &cmd;
672 mrq.data = &data; 762 mrq.data = &data;
673 mrq.stop = &stop; 763 mrq.stop = &stop;
@@ -1215,16 +1305,22 @@ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1215 int max_scatter) 1305 int max_scatter)
1216{ 1306{
1217 struct mmc_test_area *t = &test->area; 1307 struct mmc_test_area *t = &test->area;
1308 int err;
1218 1309
1219 t->blocks = sz >> 9; 1310 t->blocks = sz >> 9;
1220 1311
1221 if (max_scatter) { 1312 if (max_scatter) {
1222 return mmc_test_map_sg_max_scatter(t->mem, sz, t->sg, 1313 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1223 t->max_segs, &t->sg_len); 1314 t->max_segs, t->max_seg_sz,
1224 } else {
1225 return mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1226 &t->sg_len); 1315 &t->sg_len);
1316 } else {
1317 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1318 t->max_seg_sz, &t->sg_len);
1227 } 1319 }
1320 if (err)
1321 printk(KERN_INFO "%s: Failed to map sg list\n",
1322 mmc_hostname(test->card->host));
1323 return err;
1228} 1324}
1229 1325
1230/* 1326/*
@@ -1249,6 +1345,22 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1249 struct timespec ts1, ts2; 1345 struct timespec ts1, ts2;
1250 int ret; 1346 int ret;
1251 1347
1348 /*
1349 * In the case of a maximally scattered transfer, the maximum transfer
1350 * size is further limited by using PAGE_SIZE segments.
1351 */
1352 if (max_scatter) {
1353 struct mmc_test_area *t = &test->area;
1354 unsigned long max_tfr;
1355
1356 if (t->max_seg_sz >= PAGE_SIZE)
1357 max_tfr = t->max_segs * PAGE_SIZE;
1358 else
1359 max_tfr = t->max_segs * t->max_seg_sz;
1360 if (sz > max_tfr)
1361 sz = max_tfr;
1362 }
1363
1252 ret = mmc_test_area_map(test, sz, max_scatter); 1364 ret = mmc_test_area_map(test, sz, max_scatter);
1253 if (ret) 1365 if (ret)
1254 return ret; 1366 return ret;
@@ -1274,8 +1386,9 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1274 */ 1386 */
1275static int mmc_test_area_fill(struct mmc_test_card *test) 1387static int mmc_test_area_fill(struct mmc_test_card *test)
1276{ 1388{
1277 return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr, 1389 struct mmc_test_area *t = &test->area;
1278 1, 0, 0); 1390
1391 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1279} 1392}
1280 1393
1281/* 1394/*
@@ -1288,7 +1401,7 @@ static int mmc_test_area_erase(struct mmc_test_card *test)
1288 if (!mmc_can_erase(test->card)) 1401 if (!mmc_can_erase(test->card))
1289 return 0; 1402 return 0;
1290 1403
1291 return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9, 1404 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1292 MMC_ERASE_ARG); 1405 MMC_ERASE_ARG);
1293} 1406}
1294 1407
@@ -1306,38 +1419,52 @@ static int mmc_test_area_cleanup(struct mmc_test_card *test)
1306} 1419}
1307 1420
1308/* 1421/*
1309 * Initialize an area for testing large transfers. The size of the area is the 1422 * Initialize an area for testing large transfers. The test area is set to the
1310 * preferred erase size which is a good size for optimal transfer speed. Note 1423 * middle of the card because cards may have different charateristics at the
1311 * that is typically 4MiB for modern cards. The test area is set to the middle 1424 * front (for FAT file system optimization). Optionally, the area is erased
1312 * of the card because cards may have different charateristics at the front 1425 * (if the card supports it) which may improve write performance. Optionally,
1313 * (for FAT file system optimization). Optionally, the area is erased (if the 1426 * the area is filled with data for subsequent read tests.
1314 * card supports it) which may improve write performance. Optionally, the area
1315 * is filled with data for subsequent read tests.
1316 */ 1427 */
1317static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill) 1428static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1318{ 1429{
1319 struct mmc_test_area *t = &test->area; 1430 struct mmc_test_area *t = &test->area;
1320 unsigned long min_sz = 64 * 1024; 1431 unsigned long min_sz = 64 * 1024, sz;
1321 int ret; 1432 int ret;
1322 1433
1323 ret = mmc_test_set_blksize(test, 512); 1434 ret = mmc_test_set_blksize(test, 512);
1324 if (ret) 1435 if (ret)
1325 return ret; 1436 return ret;
1326 1437
1327 if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9) 1438 /* Make the test area size about 4MiB */
1328 t->max_sz = TEST_AREA_MAX_SIZE; 1439 sz = (unsigned long)test->card->pref_erase << 9;
1329 else 1440 t->max_sz = sz;
1330 t->max_sz = (unsigned long)test->card->pref_erase << 9; 1441 while (t->max_sz < 4 * 1024 * 1024)
1442 t->max_sz += sz;
1443 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1444 t->max_sz -= sz;
1445
1446 t->max_segs = test->card->host->max_segs;
1447 t->max_seg_sz = test->card->host->max_seg_size;
1448
1449 t->max_tfr = t->max_sz;
1450 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1451 t->max_tfr = test->card->host->max_blk_count << 9;
1452 if (t->max_tfr > test->card->host->max_req_size)
1453 t->max_tfr = test->card->host->max_req_size;
1454 if (t->max_tfr / t->max_seg_sz > t->max_segs)
1455 t->max_tfr = t->max_segs * t->max_seg_sz;
1456
1331 /* 1457 /*
1332 * Try to allocate enough memory for the whole area. Less is OK 1458 * Try to allocate enough memory for a max. sized transfer. Less is OK
1333 * because the same memory can be mapped into the scatterlist more than 1459 * because the same memory can be mapped into the scatterlist more than
1334 * once. 1460 * once. Also, take into account the limits imposed on scatterlist
1461 * segments by the host driver.
1335 */ 1462 */
1336 t->mem = mmc_test_alloc_mem(min_sz, t->max_sz); 1463 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1464 t->max_seg_sz);
1337 if (!t->mem) 1465 if (!t->mem)
1338 return -ENOMEM; 1466 return -ENOMEM;
1339 1467
1340 t->max_segs = DIV_ROUND_UP(t->max_sz, PAGE_SIZE);
1341 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL); 1468 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1342 if (!t->sg) { 1469 if (!t->sg) {
1343 ret = -ENOMEM; 1470 ret = -ENOMEM;
@@ -1401,8 +1528,10 @@ static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1401static int mmc_test_best_performance(struct mmc_test_card *test, int write, 1528static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1402 int max_scatter) 1529 int max_scatter)
1403{ 1530{
1404 return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr, 1531 struct mmc_test_area *t = &test->area;
1405 write, max_scatter, 1); 1532
1533 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1534 max_scatter, 1);
1406} 1535}
1407 1536
1408/* 1537/*
@@ -1442,17 +1571,19 @@ static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1442 */ 1571 */
1443static int mmc_test_profile_read_perf(struct mmc_test_card *test) 1572static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1444{ 1573{
1574 struct mmc_test_area *t = &test->area;
1445 unsigned long sz; 1575 unsigned long sz;
1446 unsigned int dev_addr; 1576 unsigned int dev_addr;
1447 int ret; 1577 int ret;
1448 1578
1449 for (sz = 512; sz < test->area.max_sz; sz <<= 1) { 1579 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1450 dev_addr = test->area.dev_addr + (sz >> 9); 1580 dev_addr = t->dev_addr + (sz >> 9);
1451 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1581 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1452 if (ret) 1582 if (ret)
1453 return ret; 1583 return ret;
1454 } 1584 }
1455 dev_addr = test->area.dev_addr; 1585 sz = t->max_tfr;
1586 dev_addr = t->dev_addr;
1456 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1587 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1457} 1588}
1458 1589
@@ -1461,6 +1592,7 @@ static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1461 */ 1592 */
1462static int mmc_test_profile_write_perf(struct mmc_test_card *test) 1593static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1463{ 1594{
1595 struct mmc_test_area *t = &test->area;
1464 unsigned long sz; 1596 unsigned long sz;
1465 unsigned int dev_addr; 1597 unsigned int dev_addr;
1466 int ret; 1598 int ret;
@@ -1468,8 +1600,8 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1468 ret = mmc_test_area_erase(test); 1600 ret = mmc_test_area_erase(test);
1469 if (ret) 1601 if (ret)
1470 return ret; 1602 return ret;
1471 for (sz = 512; sz < test->area.max_sz; sz <<= 1) { 1603 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1472 dev_addr = test->area.dev_addr + (sz >> 9); 1604 dev_addr = t->dev_addr + (sz >> 9);
1473 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1605 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1474 if (ret) 1606 if (ret)
1475 return ret; 1607 return ret;
@@ -1477,7 +1609,8 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1477 ret = mmc_test_area_erase(test); 1609 ret = mmc_test_area_erase(test);
1478 if (ret) 1610 if (ret)
1479 return ret; 1611 return ret;
1480 dev_addr = test->area.dev_addr; 1612 sz = t->max_tfr;
1613 dev_addr = t->dev_addr;
1481 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1614 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1482} 1615}
1483 1616
@@ -1486,6 +1619,7 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1486 */ 1619 */
1487static int mmc_test_profile_trim_perf(struct mmc_test_card *test) 1620static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1488{ 1621{
1622 struct mmc_test_area *t = &test->area;
1489 unsigned long sz; 1623 unsigned long sz;
1490 unsigned int dev_addr; 1624 unsigned int dev_addr;
1491 struct timespec ts1, ts2; 1625 struct timespec ts1, ts2;
@@ -1497,8 +1631,8 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1497 if (!mmc_can_erase(test->card)) 1631 if (!mmc_can_erase(test->card))
1498 return RESULT_UNSUP_HOST; 1632 return RESULT_UNSUP_HOST;
1499 1633
1500 for (sz = 512; sz < test->area.max_sz; sz <<= 1) { 1634 for (sz = 512; sz < t->max_sz; sz <<= 1) {
1501 dev_addr = test->area.dev_addr + (sz >> 9); 1635 dev_addr = t->dev_addr + (sz >> 9);
1502 getnstimeofday(&ts1); 1636 getnstimeofday(&ts1);
1503 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1637 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1504 if (ret) 1638 if (ret)
@@ -1506,7 +1640,7 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1506 getnstimeofday(&ts2); 1640 getnstimeofday(&ts2);
1507 mmc_test_print_rate(test, sz, &ts1, &ts2); 1641 mmc_test_print_rate(test, sz, &ts1, &ts2);
1508 } 1642 }
1509 dev_addr = test->area.dev_addr; 1643 dev_addr = t->dev_addr;
1510 getnstimeofday(&ts1); 1644 getnstimeofday(&ts1);
1511 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1645 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1512 if (ret) 1646 if (ret)
@@ -1516,29 +1650,66 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1516 return 0; 1650 return 0;
1517} 1651}
1518 1652
1653static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1654{
1655 struct mmc_test_area *t = &test->area;
1656 unsigned int dev_addr, i, cnt;
1657 struct timespec ts1, ts2;
1658 int ret;
1659
1660 cnt = t->max_sz / sz;
1661 dev_addr = t->dev_addr;
1662 getnstimeofday(&ts1);
1663 for (i = 0; i < cnt; i++) {
1664 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1665 if (ret)
1666 return ret;
1667 dev_addr += (sz >> 9);
1668 }
1669 getnstimeofday(&ts2);
1670 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1671 return 0;
1672}
1673
1519/* 1674/*
1520 * Consecutive read performance by transfer size. 1675 * Consecutive read performance by transfer size.
1521 */ 1676 */
1522static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test) 1677static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1523{ 1678{
1679 struct mmc_test_area *t = &test->area;
1524 unsigned long sz; 1680 unsigned long sz;
1681 int ret;
1682
1683 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1684 ret = mmc_test_seq_read_perf(test, sz);
1685 if (ret)
1686 return ret;
1687 }
1688 sz = t->max_tfr;
1689 return mmc_test_seq_read_perf(test, sz);
1690}
1691
1692static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1693{
1694 struct mmc_test_area *t = &test->area;
1525 unsigned int dev_addr, i, cnt; 1695 unsigned int dev_addr, i, cnt;
1526 struct timespec ts1, ts2; 1696 struct timespec ts1, ts2;
1527 int ret; 1697 int ret;
1528 1698
1529 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) { 1699 ret = mmc_test_area_erase(test);
1530 cnt = test->area.max_sz / sz; 1700 if (ret)
1531 dev_addr = test->area.dev_addr; 1701 return ret;
1532 getnstimeofday(&ts1); 1702 cnt = t->max_sz / sz;
1533 for (i = 0; i < cnt; i++) { 1703 dev_addr = t->dev_addr;
1534 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0); 1704 getnstimeofday(&ts1);
1535 if (ret) 1705 for (i = 0; i < cnt; i++) {
1536 return ret; 1706 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1537 dev_addr += (sz >> 9); 1707 if (ret)
1538 } 1708 return ret;
1539 getnstimeofday(&ts2); 1709 dev_addr += (sz >> 9);
1540 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1541 } 1710 }
1711 getnstimeofday(&ts2);
1712 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1542 return 0; 1713 return 0;
1543} 1714}
1544 1715
@@ -1547,28 +1718,17 @@ static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1547 */ 1718 */
1548static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test) 1719static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1549{ 1720{
1721 struct mmc_test_area *t = &test->area;
1550 unsigned long sz; 1722 unsigned long sz;
1551 unsigned int dev_addr, i, cnt;
1552 struct timespec ts1, ts2;
1553 int ret; 1723 int ret;
1554 1724
1555 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) { 1725 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1556 ret = mmc_test_area_erase(test); 1726 ret = mmc_test_seq_write_perf(test, sz);
1557 if (ret) 1727 if (ret)
1558 return ret; 1728 return ret;
1559 cnt = test->area.max_sz / sz;
1560 dev_addr = test->area.dev_addr;
1561 getnstimeofday(&ts1);
1562 for (i = 0; i < cnt; i++) {
1563 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1564 if (ret)
1565 return ret;
1566 dev_addr += (sz >> 9);
1567 }
1568 getnstimeofday(&ts2);
1569 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1570 } 1729 }
1571 return 0; 1730 sz = t->max_tfr;
1731 return mmc_test_seq_write_perf(test, sz);
1572} 1732}
1573 1733
1574/* 1734/*
@@ -1576,6 +1736,7 @@ static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1576 */ 1736 */
1577static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) 1737static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1578{ 1738{
1739 struct mmc_test_area *t = &test->area;
1579 unsigned long sz; 1740 unsigned long sz;
1580 unsigned int dev_addr, i, cnt; 1741 unsigned int dev_addr, i, cnt;
1581 struct timespec ts1, ts2; 1742 struct timespec ts1, ts2;
@@ -1587,15 +1748,15 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1587 if (!mmc_can_erase(test->card)) 1748 if (!mmc_can_erase(test->card))
1588 return RESULT_UNSUP_HOST; 1749 return RESULT_UNSUP_HOST;
1589 1750
1590 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) { 1751 for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1591 ret = mmc_test_area_erase(test); 1752 ret = mmc_test_area_erase(test);
1592 if (ret) 1753 if (ret)
1593 return ret; 1754 return ret;
1594 ret = mmc_test_area_fill(test); 1755 ret = mmc_test_area_fill(test);
1595 if (ret) 1756 if (ret)
1596 return ret; 1757 return ret;
1597 cnt = test->area.max_sz / sz; 1758 cnt = t->max_sz / sz;
1598 dev_addr = test->area.dev_addr; 1759 dev_addr = t->dev_addr;
1599 getnstimeofday(&ts1); 1760 getnstimeofday(&ts1);
1600 for (i = 0; i < cnt; i++) { 1761 for (i = 0; i < cnt; i++) {
1601 ret = mmc_erase(test->card, dev_addr, sz >> 9, 1762 ret = mmc_erase(test->card, dev_addr, sz >> 9,
@@ -1610,6 +1771,189 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1610 return 0; 1771 return 0;
1611} 1772}
1612 1773
1774static unsigned int rnd_next = 1;
1775
1776static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1777{
1778 uint64_t r;
1779
1780 rnd_next = rnd_next * 1103515245 + 12345;
1781 r = (rnd_next >> 16) & 0x7fff;
1782 return (r * rnd_cnt) >> 15;
1783}
1784
1785static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1786 unsigned long sz)
1787{
1788 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1789 unsigned int ssz;
1790 struct timespec ts1, ts2, ts;
1791 int ret;
1792
1793 ssz = sz >> 9;
1794
1795 rnd_addr = mmc_test_capacity(test->card) / 4;
1796 range1 = rnd_addr / test->card->pref_erase;
1797 range2 = range1 / ssz;
1798
1799 getnstimeofday(&ts1);
1800 for (cnt = 0; cnt < UINT_MAX; cnt++) {
1801 getnstimeofday(&ts2);
1802 ts = timespec_sub(ts2, ts1);
1803 if (ts.tv_sec >= 10)
1804 break;
1805 ea = mmc_test_rnd_num(range1);
1806 if (ea == last_ea)
1807 ea -= 1;
1808 last_ea = ea;
1809 dev_addr = rnd_addr + test->card->pref_erase * ea +
1810 ssz * mmc_test_rnd_num(range2);
1811 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1812 if (ret)
1813 return ret;
1814 }
1815 if (print)
1816 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1817 return 0;
1818}
1819
1820static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1821{
1822 struct mmc_test_area *t = &test->area;
1823 unsigned int next;
1824 unsigned long sz;
1825 int ret;
1826
1827 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1828 /*
1829 * When writing, try to get more consistent results by running
1830 * the test twice with exactly the same I/O but outputting the
1831 * results only for the 2nd run.
1832 */
1833 if (write) {
1834 next = rnd_next;
1835 ret = mmc_test_rnd_perf(test, write, 0, sz);
1836 if (ret)
1837 return ret;
1838 rnd_next = next;
1839 }
1840 ret = mmc_test_rnd_perf(test, write, 1, sz);
1841 if (ret)
1842 return ret;
1843 }
1844 sz = t->max_tfr;
1845 if (write) {
1846 next = rnd_next;
1847 ret = mmc_test_rnd_perf(test, write, 0, sz);
1848 if (ret)
1849 return ret;
1850 rnd_next = next;
1851 }
1852 return mmc_test_rnd_perf(test, write, 1, sz);
1853}
1854
1855/*
1856 * Random read performance by transfer size.
1857 */
1858static int mmc_test_random_read_perf(struct mmc_test_card *test)
1859{
1860 return mmc_test_random_perf(test, 0);
1861}
1862
1863/*
1864 * Random write performance by transfer size.
1865 */
1866static int mmc_test_random_write_perf(struct mmc_test_card *test)
1867{
1868 return mmc_test_random_perf(test, 1);
1869}
1870
1871static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
1872 unsigned int tot_sz, int max_scatter)
1873{
1874 struct mmc_test_area *t = &test->area;
1875 unsigned int dev_addr, i, cnt, sz, ssz;
1876 struct timespec ts1, ts2;
1877 int ret;
1878
1879 sz = t->max_tfr;
1880
1881 /*
1882 * In the case of a maximally scattered transfer, the maximum transfer
1883 * size is further limited by using PAGE_SIZE segments.
1884 */
1885 if (max_scatter) {
1886 unsigned long max_tfr;
1887
1888 if (t->max_seg_sz >= PAGE_SIZE)
1889 max_tfr = t->max_segs * PAGE_SIZE;
1890 else
1891 max_tfr = t->max_segs * t->max_seg_sz;
1892 if (sz > max_tfr)
1893 sz = max_tfr;
1894 }
1895
1896 ssz = sz >> 9;
1897 dev_addr = mmc_test_capacity(test->card) / 4;
1898 if (tot_sz > dev_addr << 9)
1899 tot_sz = dev_addr << 9;
1900 cnt = tot_sz / sz;
1901 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
1902
1903 getnstimeofday(&ts1);
1904 for (i = 0; i < cnt; i++) {
1905 ret = mmc_test_area_io(test, sz, dev_addr, write,
1906 max_scatter, 0);
1907 if (ret)
1908 return ret;
1909 dev_addr += ssz;
1910 }
1911 getnstimeofday(&ts2);
1912
1913 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1914
1915 return 0;
1916}
1917
1918static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
1919{
1920 int ret, i;
1921
1922 for (i = 0; i < 10; i++) {
1923 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
1924 if (ret)
1925 return ret;
1926 }
1927 for (i = 0; i < 5; i++) {
1928 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
1929 if (ret)
1930 return ret;
1931 }
1932 for (i = 0; i < 3; i++) {
1933 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
1934 if (ret)
1935 return ret;
1936 }
1937
1938 return ret;
1939}
1940
1941/*
1942 * Large sequential read performance.
1943 */
1944static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
1945{
1946 return mmc_test_large_seq_perf(test, 0);
1947}
1948
1949/*
1950 * Large sequential write performance.
1951 */
1952static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
1953{
1954 return mmc_test_large_seq_perf(test, 1);
1955}
1956
1613static const struct mmc_test_case mmc_test_cases[] = { 1957static const struct mmc_test_case mmc_test_cases[] = {
1614 { 1958 {
1615 .name = "Basic write (no data verification)", 1959 .name = "Basic write (no data verification)",
@@ -1849,10 +2193,40 @@ static const struct mmc_test_case mmc_test_cases[] = {
1849 .cleanup = mmc_test_area_cleanup, 2193 .cleanup = mmc_test_area_cleanup,
1850 }, 2194 },
1851 2195
2196 {
2197 .name = "Random read performance by transfer size",
2198 .prepare = mmc_test_area_prepare,
2199 .run = mmc_test_random_read_perf,
2200 .cleanup = mmc_test_area_cleanup,
2201 },
2202
2203 {
2204 .name = "Random write performance by transfer size",
2205 .prepare = mmc_test_area_prepare,
2206 .run = mmc_test_random_write_perf,
2207 .cleanup = mmc_test_area_cleanup,
2208 },
2209
2210 {
2211 .name = "Large sequential read into scattered pages",
2212 .prepare = mmc_test_area_prepare,
2213 .run = mmc_test_large_seq_read_perf,
2214 .cleanup = mmc_test_area_cleanup,
2215 },
2216
2217 {
2218 .name = "Large sequential write from scattered pages",
2219 .prepare = mmc_test_area_prepare,
2220 .run = mmc_test_large_seq_write_perf,
2221 .cleanup = mmc_test_area_cleanup,
2222 },
2223
1852}; 2224};
1853 2225
1854static DEFINE_MUTEX(mmc_test_lock); 2226static DEFINE_MUTEX(mmc_test_lock);
1855 2227
2228static LIST_HEAD(mmc_test_result);
2229
1856static void mmc_test_run(struct mmc_test_card *test, int testcase) 2230static void mmc_test_run(struct mmc_test_card *test, int testcase)
1857{ 2231{
1858 int i, ret; 2232 int i, ret;
@@ -1863,6 +2237,8 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
1863 mmc_claim_host(test->card->host); 2237 mmc_claim_host(test->card->host);
1864 2238
1865 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) { 2239 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2240 struct mmc_test_general_result *gr;
2241
1866 if (testcase && ((i + 1) != testcase)) 2242 if (testcase && ((i + 1) != testcase))
1867 continue; 2243 continue;
1868 2244
@@ -1881,6 +2257,25 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
1881 } 2257 }
1882 } 2258 }
1883 2259
2260 gr = kzalloc(sizeof(struct mmc_test_general_result),
2261 GFP_KERNEL);
2262 if (gr) {
2263 INIT_LIST_HEAD(&gr->tr_lst);
2264
2265 /* Assign data what we know already */
2266 gr->card = test->card;
2267 gr->testcase = i;
2268
2269 /* Append container to global one */
2270 list_add_tail(&gr->link, &mmc_test_result);
2271
2272 /*
2273 * Save the pointer to created container in our private
2274 * structure.
2275 */
2276 test->gr = gr;
2277 }
2278
1884 ret = mmc_test_cases[i].run(test); 2279 ret = mmc_test_cases[i].run(test);
1885 switch (ret) { 2280 switch (ret) {
1886 case RESULT_OK: 2281 case RESULT_OK:
@@ -1906,6 +2301,10 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
1906 mmc_hostname(test->card->host), ret); 2301 mmc_hostname(test->card->host), ret);
1907 } 2302 }
1908 2303
2304 /* Save the result */
2305 if (gr)
2306 gr->result = ret;
2307
1909 if (mmc_test_cases[i].cleanup) { 2308 if (mmc_test_cases[i].cleanup) {
1910 ret = mmc_test_cases[i].cleanup(test); 2309 ret = mmc_test_cases[i].cleanup(test);
1911 if (ret) { 2310 if (ret) {
@@ -1923,30 +2322,95 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
1923 mmc_hostname(test->card->host)); 2322 mmc_hostname(test->card->host));
1924} 2323}
1925 2324
1926static ssize_t mmc_test_show(struct device *dev, 2325static void mmc_test_free_result(struct mmc_card *card)
1927 struct device_attribute *attr, char *buf) 2326{
2327 struct mmc_test_general_result *gr, *grs;
2328
2329 mutex_lock(&mmc_test_lock);
2330
2331 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2332 struct mmc_test_transfer_result *tr, *trs;
2333
2334 if (card && gr->card != card)
2335 continue;
2336
2337 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2338 list_del(&tr->link);
2339 kfree(tr);
2340 }
2341
2342 list_del(&gr->link);
2343 kfree(gr);
2344 }
2345
2346 mutex_unlock(&mmc_test_lock);
2347}
2348
2349static LIST_HEAD(mmc_test_file_test);
2350
2351static int mtf_test_show(struct seq_file *sf, void *data)
1928{ 2352{
2353 struct mmc_card *card = (struct mmc_card *)sf->private;
2354 struct mmc_test_general_result *gr;
2355
1929 mutex_lock(&mmc_test_lock); 2356 mutex_lock(&mmc_test_lock);
2357
2358 list_for_each_entry(gr, &mmc_test_result, link) {
2359 struct mmc_test_transfer_result *tr;
2360
2361 if (gr->card != card)
2362 continue;
2363
2364 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
2365
2366 list_for_each_entry(tr, &gr->tr_lst, link) {
2367 seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
2368 tr->count, tr->sectors,
2369 (unsigned long)tr->ts.tv_sec,
2370 (unsigned long)tr->ts.tv_nsec,
2371 tr->rate, tr->iops / 100, tr->iops % 100);
2372 }
2373 }
2374
1930 mutex_unlock(&mmc_test_lock); 2375 mutex_unlock(&mmc_test_lock);
1931 2376
1932 return 0; 2377 return 0;
1933} 2378}
1934 2379
1935static ssize_t mmc_test_store(struct device *dev, 2380static int mtf_test_open(struct inode *inode, struct file *file)
1936 struct device_attribute *attr, const char *buf, size_t count)
1937{ 2381{
1938 struct mmc_card *card; 2382 return single_open(file, mtf_test_show, inode->i_private);
2383}
2384
2385static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2386 size_t count, loff_t *pos)
2387{
2388 struct seq_file *sf = (struct seq_file *)file->private_data;
2389 struct mmc_card *card = (struct mmc_card *)sf->private;
1939 struct mmc_test_card *test; 2390 struct mmc_test_card *test;
1940 int testcase; 2391 char lbuf[12];
2392 long testcase;
2393
2394 if (count >= sizeof(lbuf))
2395 return -EINVAL;
1941 2396
1942 card = container_of(dev, struct mmc_card, dev); 2397 if (copy_from_user(lbuf, buf, count))
2398 return -EFAULT;
2399 lbuf[count] = '\0';
1943 2400
1944 testcase = simple_strtol(buf, NULL, 10); 2401 if (strict_strtol(lbuf, 10, &testcase))
2402 return -EINVAL;
1945 2403
1946 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL); 2404 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
1947 if (!test) 2405 if (!test)
1948 return -ENOMEM; 2406 return -ENOMEM;
1949 2407
2408 /*
2409 * Remove all test cases associated with given card. Thus we have only
2410 * actual data of the last run.
2411 */
2412 mmc_test_free_result(card);
2413
1950 test->card = card; 2414 test->card = card;
1951 2415
1952 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); 2416 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
@@ -1973,16 +2437,78 @@ static ssize_t mmc_test_store(struct device *dev,
1973 return count; 2437 return count;
1974} 2438}
1975 2439
1976static DEVICE_ATTR(test, S_IWUSR | S_IRUGO, mmc_test_show, mmc_test_store); 2440static const struct file_operations mmc_test_fops_test = {
2441 .open = mtf_test_open,
2442 .read = seq_read,
2443 .write = mtf_test_write,
2444 .llseek = seq_lseek,
2445 .release = single_release,
2446};
2447
2448static void mmc_test_free_file_test(struct mmc_card *card)
2449{
2450 struct mmc_test_dbgfs_file *df, *dfs;
2451
2452 mutex_lock(&mmc_test_lock);
2453
2454 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
2455 if (card && df->card != card)
2456 continue;
2457 debugfs_remove(df->file);
2458 list_del(&df->link);
2459 kfree(df);
2460 }
2461
2462 mutex_unlock(&mmc_test_lock);
2463}
2464
2465static int mmc_test_register_file_test(struct mmc_card *card)
2466{
2467 struct dentry *file = NULL;
2468 struct mmc_test_dbgfs_file *df;
2469 int ret = 0;
2470
2471 mutex_lock(&mmc_test_lock);
2472
2473 if (card->debugfs_root)
2474 file = debugfs_create_file("test", S_IWUSR | S_IRUGO,
2475 card->debugfs_root, card, &mmc_test_fops_test);
2476
2477 if (IS_ERR_OR_NULL(file)) {
2478 dev_err(&card->dev,
2479 "Can't create file. Perhaps debugfs is disabled.\n");
2480 ret = -ENODEV;
2481 goto err;
2482 }
2483
2484 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
2485 if (!df) {
2486 debugfs_remove(file);
2487 dev_err(&card->dev,
2488 "Can't allocate memory for internal usage.\n");
2489 ret = -ENOMEM;
2490 goto err;
2491 }
2492
2493 df->card = card;
2494 df->file = file;
2495
2496 list_add(&df->link, &mmc_test_file_test);
2497
2498err:
2499 mutex_unlock(&mmc_test_lock);
2500
2501 return ret;
2502}
1977 2503
1978static int mmc_test_probe(struct mmc_card *card) 2504static int mmc_test_probe(struct mmc_card *card)
1979{ 2505{
1980 int ret; 2506 int ret;
1981 2507
1982 if ((card->type != MMC_TYPE_MMC) && (card->type != MMC_TYPE_SD)) 2508 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
1983 return -ENODEV; 2509 return -ENODEV;
1984 2510
1985 ret = device_create_file(&card->dev, &dev_attr_test); 2511 ret = mmc_test_register_file_test(card);
1986 if (ret) 2512 if (ret)
1987 return ret; 2513 return ret;
1988 2514
@@ -1993,7 +2519,8 @@ static int mmc_test_probe(struct mmc_card *card)
1993 2519
1994static void mmc_test_remove(struct mmc_card *card) 2520static void mmc_test_remove(struct mmc_card *card)
1995{ 2521{
1996 device_remove_file(&card->dev, &dev_attr_test); 2522 mmc_test_free_result(card);
2523 mmc_test_free_file_test(card);
1997} 2524}
1998 2525
1999static struct mmc_driver mmc_driver = { 2526static struct mmc_driver mmc_driver = {
@@ -2011,6 +2538,10 @@ static int __init mmc_test_init(void)
2011 2538
2012static void __exit mmc_test_exit(void) 2539static void __exit mmc_test_exit(void)
2013{ 2540{
2541 /* Clear stalled data if card is still plugged */
2542 mmc_test_free_result(NULL);
2543 mmc_test_free_file_test(NULL);
2544
2014 mmc_unregister_driver(&mmc_driver); 2545 mmc_unregister_driver(&mmc_driver);
2015} 2546}
2016 2547
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index e876678176be..6413afa318d2 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -55,8 +55,7 @@ static int mmc_queue_thread(void *d)
55 55
56 spin_lock_irq(q->queue_lock); 56 spin_lock_irq(q->queue_lock);
57 set_current_state(TASK_INTERRUPTIBLE); 57 set_current_state(TASK_INTERRUPTIBLE);
58 if (!blk_queue_plugged(q)) 58 req = blk_fetch_request(q);
59 req = blk_fetch_request(q);
60 mq->req = req; 59 mq->req = req;
61 spin_unlock_irq(q->queue_lock); 60 spin_unlock_irq(q->queue_lock);
62 61
@@ -107,10 +106,12 @@ static void mmc_request(struct request_queue *q)
107 * @mq: mmc queue 106 * @mq: mmc queue
108 * @card: mmc card to attach this queue 107 * @card: mmc card to attach this queue
109 * @lock: queue lock 108 * @lock: queue lock
109 * @subname: partition subname
110 * 110 *
111 * Initialise a MMC card request queue. 111 * Initialise a MMC card request queue.
112 */ 112 */
113int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) 113int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
114 spinlock_t *lock, const char *subname)
114{ 115{
115 struct mmc_host *host = card->host; 116 struct mmc_host *host = card->host;
116 u64 limit = BLK_BOUNCE_HIGH; 117 u64 limit = BLK_BOUNCE_HIGH;
@@ -128,26 +129,20 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
128 mq->req = NULL; 129 mq->req = NULL;
129 130
130 blk_queue_prep_rq(mq->queue, mmc_prep_request); 131 blk_queue_prep_rq(mq->queue, mmc_prep_request);
131 blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN);
132 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 132 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
133 if (mmc_can_erase(card)) { 133 if (mmc_can_erase(card)) {
134 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); 134 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
135 mq->queue->limits.max_discard_sectors = UINT_MAX; 135 mq->queue->limits.max_discard_sectors = UINT_MAX;
136 if (card->erased_byte == 0) 136 if (card->erased_byte == 0)
137 mq->queue->limits.discard_zeroes_data = 1; 137 mq->queue->limits.discard_zeroes_data = 1;
138 if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) { 138 mq->queue->limits.discard_granularity = card->pref_erase << 9;
139 mq->queue->limits.discard_granularity =
140 card->erase_size << 9;
141 mq->queue->limits.discard_alignment =
142 card->erase_size << 9;
143 }
144 if (mmc_can_secure_erase_trim(card)) 139 if (mmc_can_secure_erase_trim(card))
145 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, 140 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
146 mq->queue); 141 mq->queue);
147 } 142 }
148 143
149#ifdef CONFIG_MMC_BLOCK_BOUNCE 144#ifdef CONFIG_MMC_BLOCK_BOUNCE
150 if (host->max_hw_segs == 1) { 145 if (host->max_segs == 1) {
151 unsigned int bouncesz; 146 unsigned int bouncesz;
152 147
153 bouncesz = MMC_QUEUE_BOUNCESZ; 148 bouncesz = MMC_QUEUE_BOUNCESZ;
@@ -197,21 +192,23 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
197 blk_queue_bounce_limit(mq->queue, limit); 192 blk_queue_bounce_limit(mq->queue, limit);
198 blk_queue_max_hw_sectors(mq->queue, 193 blk_queue_max_hw_sectors(mq->queue,
199 min(host->max_blk_count, host->max_req_size / 512)); 194 min(host->max_blk_count, host->max_req_size / 512));
200 blk_queue_max_segments(mq->queue, host->max_hw_segs); 195 blk_queue_max_segments(mq->queue, host->max_segs);
201 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 196 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
202 197
203 mq->sg = kmalloc(sizeof(struct scatterlist) * 198 mq->sg = kmalloc(sizeof(struct scatterlist) *
204 host->max_phys_segs, GFP_KERNEL); 199 host->max_segs, GFP_KERNEL);
205 if (!mq->sg) { 200 if (!mq->sg) {
206 ret = -ENOMEM; 201 ret = -ENOMEM;
207 goto cleanup_queue; 202 goto cleanup_queue;
208 } 203 }
209 sg_init_table(mq->sg, host->max_phys_segs); 204 sg_init_table(mq->sg, host->max_segs);
210 } 205 }
211 206
212 init_MUTEX(&mq->thread_sem); 207 sema_init(&mq->thread_sem, 1);
208
209 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
210 host->index, subname ? subname : "");
213 211
214 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
215 if (IS_ERR(mq->thread)) { 212 if (IS_ERR(mq->thread)) {
216 ret = PTR_ERR(mq->thread); 213 ret = PTR_ERR(mq->thread);
217 goto free_bounce_sg; 214 goto free_bounce_sg;
@@ -343,18 +340,14 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
343 */ 340 */
344void mmc_queue_bounce_pre(struct mmc_queue *mq) 341void mmc_queue_bounce_pre(struct mmc_queue *mq)
345{ 342{
346 unsigned long flags;
347
348 if (!mq->bounce_buf) 343 if (!mq->bounce_buf)
349 return; 344 return;
350 345
351 if (rq_data_dir(mq->req) != WRITE) 346 if (rq_data_dir(mq->req) != WRITE)
352 return; 347 return;
353 348
354 local_irq_save(flags);
355 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, 349 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
356 mq->bounce_buf, mq->sg[0].length); 350 mq->bounce_buf, mq->sg[0].length);
357 local_irq_restore(flags);
358} 351}
359 352
360/* 353/*
@@ -363,17 +356,13 @@ void mmc_queue_bounce_pre(struct mmc_queue *mq)
363 */ 356 */
364void mmc_queue_bounce_post(struct mmc_queue *mq) 357void mmc_queue_bounce_post(struct mmc_queue *mq)
365{ 358{
366 unsigned long flags;
367
368 if (!mq->bounce_buf) 359 if (!mq->bounce_buf)
369 return; 360 return;
370 361
371 if (rq_data_dir(mq->req) != READ) 362 if (rq_data_dir(mq->req) != READ)
372 return; 363 return;
373 364
374 local_irq_save(flags);
375 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, 365 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
376 mq->bounce_buf, mq->sg[0].length); 366 mq->bounce_buf, mq->sg[0].length);
377 local_irq_restore(flags);
378} 367}
379 368
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 64e66e0d4994..6223ef8dc9cd 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -19,7 +19,8 @@ struct mmc_queue {
19 unsigned int bounce_sg_len; 19 unsigned int bounce_sg_len;
20}; 20};
21 21
22extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *); 22extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
23 const char *);
23extern void mmc_cleanup_queue(struct mmc_queue *); 24extern void mmc_cleanup_queue(struct mmc_queue *);
24extern void mmc_queue_suspend(struct mmc_queue *); 25extern void mmc_queue_suspend(struct mmc_queue *);
25extern void mmc_queue_resume(struct mmc_queue *); 26extern void mmc_queue_resume(struct mmc_queue *);
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index a0716967b7c8..c8c9edb3d7cb 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -956,7 +956,7 @@ static int sdio_uart_break_ctl(struct tty_struct *tty, int break_state)
956 return 0; 956 return 0;
957} 957}
958 958
959static int sdio_uart_tiocmget(struct tty_struct *tty, struct file *file) 959static int sdio_uart_tiocmget(struct tty_struct *tty)
960{ 960{
961 struct sdio_uart_port *port = tty->driver_data; 961 struct sdio_uart_port *port = tty->driver_data;
962 int result; 962 int result;
@@ -970,7 +970,7 @@ static int sdio_uart_tiocmget(struct tty_struct *tty, struct file *file)
970 return result; 970 return result;
971} 971}
972 972
973static int sdio_uart_tiocmset(struct tty_struct *tty, struct file *file, 973static int sdio_uart_tiocmset(struct tty_struct *tty,
974 unsigned int set, unsigned int clear) 974 unsigned int set, unsigned int clear)
975{ 975{
976 struct sdio_uart_port *port = tty->driver_data; 976 struct sdio_uart_port *port = tty->driver_data;
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index bb22ffd76ef8..ef103871517f 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -16,3 +16,14 @@ config MMC_UNSAFE_RESUME
16 16
17 This option sets a default which can be overridden by the 17 This option sets a default which can be overridden by the
18 module parameter "removable=0" or "removable=1". 18 module parameter "removable=0" or "removable=1".
19
20config MMC_CLKGATE
21 bool "MMC host clock gating (EXPERIMENTAL)"
22 depends on EXPERIMENTAL
23 help
24 This will attempt to aggressively gate the clock to the MMC card.
25 This is done to save power due to gating off the logic and bus
26 noise when the MMC card is not in use. Your host driver has to
27 support handling this in order for it to be of any use.
28
29 If unsure, say N.
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 889e5f898f6f..639501970b41 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -2,14 +2,11 @@
2# Makefile for the kernel mmc core. 2# Makefile for the kernel mmc core.
3# 3#
4 4
5ifeq ($(CONFIG_MMC_DEBUG),y)
6 EXTRA_CFLAGS += -DDEBUG
7endif
8
9obj-$(CONFIG_MMC) += mmc_core.o 5obj-$(CONFIG_MMC) += mmc_core.o
10mmc_core-y := core.o bus.o host.o \ 6mmc_core-y := core.o bus.o host.o \
11 mmc.o mmc_ops.o sd.o sd_ops.o \ 7 mmc.o mmc_ops.o sd.o sd_ops.o \
12 sdio.o sdio_ops.o sdio_bus.o \ 8 sdio.o sdio_ops.o sdio_bus.o \
13 sdio_cis.o sdio_io.o sdio_irq.o 9 sdio_cis.o sdio_io.o sdio_irq.o \
10 quirks.o
14 11
15mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o 12mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 7cd9749dc21d..393d817ed040 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -14,6 +14,7 @@
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/pm_runtime.h>
17 18
18#include <linux/mmc/card.h> 19#include <linux/mmc/card.h>
19#include <linux/mmc/host.h> 20#include <linux/mmc/host.h>
@@ -22,13 +23,12 @@
22#include "sdio_cis.h" 23#include "sdio_cis.h"
23#include "bus.h" 24#include "bus.h"
24 25
25#define dev_to_mmc_card(d) container_of(d, struct mmc_card, dev)
26#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv) 26#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv)
27 27
28static ssize_t mmc_type_show(struct device *dev, 28static ssize_t mmc_type_show(struct device *dev,
29 struct device_attribute *attr, char *buf) 29 struct device_attribute *attr, char *buf)
30{ 30{
31 struct mmc_card *card = dev_to_mmc_card(dev); 31 struct mmc_card *card = mmc_dev_to_card(dev);
32 32
33 switch (card->type) { 33 switch (card->type) {
34 case MMC_TYPE_MMC: 34 case MMC_TYPE_MMC:
@@ -62,7 +62,7 @@ static int mmc_bus_match(struct device *dev, struct device_driver *drv)
62static int 62static int
63mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 63mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
64{ 64{
65 struct mmc_card *card = dev_to_mmc_card(dev); 65 struct mmc_card *card = mmc_dev_to_card(dev);
66 const char *type; 66 const char *type;
67 int retval = 0; 67 int retval = 0;
68 68
@@ -105,7 +105,7 @@ mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
105static int mmc_bus_probe(struct device *dev) 105static int mmc_bus_probe(struct device *dev)
106{ 106{
107 struct mmc_driver *drv = to_mmc_driver(dev->driver); 107 struct mmc_driver *drv = to_mmc_driver(dev->driver);
108 struct mmc_card *card = dev_to_mmc_card(dev); 108 struct mmc_card *card = mmc_dev_to_card(dev);
109 109
110 return drv->probe(card); 110 return drv->probe(card);
111} 111}
@@ -113,7 +113,7 @@ static int mmc_bus_probe(struct device *dev)
113static int mmc_bus_remove(struct device *dev) 113static int mmc_bus_remove(struct device *dev)
114{ 114{
115 struct mmc_driver *drv = to_mmc_driver(dev->driver); 115 struct mmc_driver *drv = to_mmc_driver(dev->driver);
116 struct mmc_card *card = dev_to_mmc_card(dev); 116 struct mmc_card *card = mmc_dev_to_card(dev);
117 117
118 drv->remove(card); 118 drv->remove(card);
119 119
@@ -123,7 +123,7 @@ static int mmc_bus_remove(struct device *dev)
123static int mmc_bus_suspend(struct device *dev, pm_message_t state) 123static int mmc_bus_suspend(struct device *dev, pm_message_t state)
124{ 124{
125 struct mmc_driver *drv = to_mmc_driver(dev->driver); 125 struct mmc_driver *drv = to_mmc_driver(dev->driver);
126 struct mmc_card *card = dev_to_mmc_card(dev); 126 struct mmc_card *card = mmc_dev_to_card(dev);
127 int ret = 0; 127 int ret = 0;
128 128
129 if (dev->driver && drv->suspend) 129 if (dev->driver && drv->suspend)
@@ -134,7 +134,7 @@ static int mmc_bus_suspend(struct device *dev, pm_message_t state)
134static int mmc_bus_resume(struct device *dev) 134static int mmc_bus_resume(struct device *dev)
135{ 135{
136 struct mmc_driver *drv = to_mmc_driver(dev->driver); 136 struct mmc_driver *drv = to_mmc_driver(dev->driver);
137 struct mmc_card *card = dev_to_mmc_card(dev); 137 struct mmc_card *card = mmc_dev_to_card(dev);
138 int ret = 0; 138 int ret = 0;
139 139
140 if (dev->driver && drv->resume) 140 if (dev->driver && drv->resume)
@@ -142,6 +142,41 @@ static int mmc_bus_resume(struct device *dev)
142 return ret; 142 return ret;
143} 143}
144 144
145#ifdef CONFIG_PM_RUNTIME
146
147static int mmc_runtime_suspend(struct device *dev)
148{
149 struct mmc_card *card = mmc_dev_to_card(dev);
150
151 return mmc_power_save_host(card->host);
152}
153
154static int mmc_runtime_resume(struct device *dev)
155{
156 struct mmc_card *card = mmc_dev_to_card(dev);
157
158 return mmc_power_restore_host(card->host);
159}
160
161static int mmc_runtime_idle(struct device *dev)
162{
163 return pm_runtime_suspend(dev);
164}
165
166static const struct dev_pm_ops mmc_bus_pm_ops = {
167 .runtime_suspend = mmc_runtime_suspend,
168 .runtime_resume = mmc_runtime_resume,
169 .runtime_idle = mmc_runtime_idle,
170};
171
172#define MMC_PM_OPS_PTR (&mmc_bus_pm_ops)
173
174#else /* !CONFIG_PM_RUNTIME */
175
176#define MMC_PM_OPS_PTR NULL
177
178#endif /* !CONFIG_PM_RUNTIME */
179
145static struct bus_type mmc_bus_type = { 180static struct bus_type mmc_bus_type = {
146 .name = "mmc", 181 .name = "mmc",
147 .dev_attrs = mmc_dev_attrs, 182 .dev_attrs = mmc_dev_attrs,
@@ -151,6 +186,7 @@ static struct bus_type mmc_bus_type = {
151 .remove = mmc_bus_remove, 186 .remove = mmc_bus_remove,
152 .suspend = mmc_bus_suspend, 187 .suspend = mmc_bus_suspend,
153 .resume = mmc_bus_resume, 188 .resume = mmc_bus_resume,
189 .pm = MMC_PM_OPS_PTR,
154}; 190};
155 191
156int mmc_register_bus(void) 192int mmc_register_bus(void)
@@ -189,7 +225,7 @@ EXPORT_SYMBOL(mmc_unregister_driver);
189 225
190static void mmc_release_card(struct device *dev) 226static void mmc_release_card(struct device *dev)
191{ 227{
192 struct mmc_card *card = dev_to_mmc_card(dev); 228 struct mmc_card *card = mmc_dev_to_card(dev);
193 229
194 sdio_free_common_cis(card); 230 sdio_free_common_cis(card);
195 231
@@ -238,8 +274,12 @@ int mmc_add_card(struct mmc_card *card)
238 break; 274 break;
239 case MMC_TYPE_SD: 275 case MMC_TYPE_SD:
240 type = "SD"; 276 type = "SD";
241 if (mmc_card_blockaddr(card)) 277 if (mmc_card_blockaddr(card)) {
242 type = "SDHC"; 278 if (mmc_card_ext_capacity(card))
279 type = "SDXC";
280 else
281 type = "SDHC";
282 }
243 break; 283 break;
244 case MMC_TYPE_SDIO: 284 case MMC_TYPE_SDIO:
245 type = "SDIO"; 285 type = "SDIO";
@@ -248,31 +288,35 @@ int mmc_add_card(struct mmc_card *card)
248 type = "SD-combo"; 288 type = "SD-combo";
249 if (mmc_card_blockaddr(card)) 289 if (mmc_card_blockaddr(card))
250 type = "SDHC-combo"; 290 type = "SDHC-combo";
291 break;
251 default: 292 default:
252 type = "?"; 293 type = "?";
253 break; 294 break;
254 } 295 }
255 296
256 if (mmc_host_is_spi(card->host)) { 297 if (mmc_host_is_spi(card->host)) {
257 printk(KERN_INFO "%s: new %s%s card on SPI\n", 298 printk(KERN_INFO "%s: new %s%s%s card on SPI\n",
258 mmc_hostname(card->host), 299 mmc_hostname(card->host),
259 mmc_card_highspeed(card) ? "high speed " : "", 300 mmc_card_highspeed(card) ? "high speed " : "",
301 mmc_card_ddr_mode(card) ? "DDR " : "",
260 type); 302 type);
261 } else { 303 } else {
262 printk(KERN_INFO "%s: new %s%s card at address %04x\n", 304 printk(KERN_INFO "%s: new %s%s%s card at address %04x\n",
263 mmc_hostname(card->host), 305 mmc_hostname(card->host),
264 mmc_card_highspeed(card) ? "high speed " : "", 306 mmc_sd_card_uhs(card) ? "ultra high speed " :
307 (mmc_card_highspeed(card) ? "high speed " : ""),
308 mmc_card_ddr_mode(card) ? "DDR " : "",
265 type, card->rca); 309 type, card->rca);
266 } 310 }
267 311
268 ret = device_add(&card->dev);
269 if (ret)
270 return ret;
271
272#ifdef CONFIG_DEBUG_FS 312#ifdef CONFIG_DEBUG_FS
273 mmc_add_card_debugfs(card); 313 mmc_add_card_debugfs(card);
274#endif 314#endif
275 315
316 ret = device_add(&card->dev);
317 if (ret)
318 return ret;
319
276 mmc_card_set_present(card); 320 mmc_card_set_present(card);
277 321
278 return 0; 322 return 0;
diff --git a/drivers/mmc/core/bus.h b/drivers/mmc/core/bus.h
index 18178766ab46..00a19710b6b4 100644
--- a/drivers/mmc/core/bus.h
+++ b/drivers/mmc/core/bus.h
@@ -14,7 +14,7 @@
14#define MMC_DEV_ATTR(name, fmt, args...) \ 14#define MMC_DEV_ATTR(name, fmt, args...) \
15static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf) \ 15static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf) \
16{ \ 16{ \
17 struct mmc_card *card = container_of(dev, struct mmc_card, dev); \ 17 struct mmc_card *card = mmc_dev_to_card(dev); \
18 return sprintf(buf, fmt, args); \ 18 return sprintf(buf, fmt, args); \
19} \ 19} \
20static DEVICE_ATTR(name, S_IRUGO, mmc_##name##_show, NULL) 20static DEVICE_ATTR(name, S_IRUGO, mmc_##name##_show, NULL)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 09eee6df0653..7843efe22359 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -22,6 +22,7 @@
22#include <linux/scatterlist.h> 22#include <linux/scatterlist.h>
23#include <linux/log2.h> 23#include <linux/log2.h>
24#include <linux/regulator/consumer.h> 24#include <linux/regulator/consumer.h>
25#include <linux/pm_runtime.h>
25 26
26#include <linux/mmc/card.h> 27#include <linux/mmc/card.h>
27#include <linux/mmc/host.h> 28#include <linux/mmc/host.h>
@@ -58,6 +59,7 @@ int mmc_assume_removable;
58#else 59#else
59int mmc_assume_removable = 1; 60int mmc_assume_removable = 1;
60#endif 61#endif
62EXPORT_SYMBOL(mmc_assume_removable);
61module_param_named(removable, mmc_assume_removable, bool, 0644); 63module_param_named(removable, mmc_assume_removable, bool, 0644);
62MODULE_PARM_DESC( 64MODULE_PARM_DESC(
63 removable, 65 removable,
@@ -129,6 +131,8 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
129 131
130 if (mrq->done) 132 if (mrq->done)
131 mrq->done(mrq); 133 mrq->done(mrq);
134
135 mmc_host_clk_gate(host);
132 } 136 }
133} 137}
134 138
@@ -163,8 +167,6 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
163 167
164 WARN_ON(!host->claimed); 168 WARN_ON(!host->claimed);
165 169
166 led_trigger_event(host->led, LED_FULL);
167
168 mrq->cmd->error = 0; 170 mrq->cmd->error = 0;
169 mrq->cmd->mrq = mrq; 171 mrq->cmd->mrq = mrq;
170 if (mrq->data) { 172 if (mrq->data) {
@@ -189,6 +191,8 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
189 mrq->stop->mrq = mrq; 191 mrq->stop->mrq = mrq;
190 } 192 }
191 } 193 }
194 mmc_host_clk_ungate(host);
195 led_trigger_event(host->led, LED_FULL);
192 host->ops->request(host, mrq); 196 host->ops->request(host, mrq);
193} 197}
194 198
@@ -232,12 +236,10 @@ EXPORT_SYMBOL(mmc_wait_for_req);
232 */ 236 */
233int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 237int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
234{ 238{
235 struct mmc_request mrq; 239 struct mmc_request mrq = {0};
236 240
237 WARN_ON(!host->claimed); 241 WARN_ON(!host->claimed);
238 242
239 memset(&mrq, 0, sizeof(struct mmc_request));
240
241 memset(cmd->resp, 0, sizeof(cmd->resp)); 243 memset(cmd->resp, 0, sizeof(cmd->resp));
242 cmd->retries = retries; 244 cmd->retries = retries;
243 245
@@ -294,8 +296,9 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
294 unsigned int timeout_us, limit_us; 296 unsigned int timeout_us, limit_us;
295 297
296 timeout_us = data->timeout_ns / 1000; 298 timeout_us = data->timeout_ns / 1000;
297 timeout_us += data->timeout_clks * 1000 / 299 if (mmc_host_clk_rate(card->host))
298 (card->host->ios.clock / 1000); 300 timeout_us += data->timeout_clks * 1000 /
301 (mmc_host_clk_rate(card->host) / 1000);
299 302
300 if (data->flags & MMC_DATA_WRITE) 303 if (data->flags & MMC_DATA_WRITE)
301 /* 304 /*
@@ -522,7 +525,14 @@ int mmc_try_claim_host(struct mmc_host *host)
522} 525}
523EXPORT_SYMBOL(mmc_try_claim_host); 526EXPORT_SYMBOL(mmc_try_claim_host);
524 527
525static void mmc_do_release_host(struct mmc_host *host) 528/**
529 * mmc_do_release_host - release a claimed host
530 * @host: mmc host to release
531 *
532 * If you successfully claimed a host, this function will
533 * release it again.
534 */
535void mmc_do_release_host(struct mmc_host *host)
526{ 536{
527 unsigned long flags; 537 unsigned long flags;
528 538
@@ -537,6 +547,7 @@ static void mmc_do_release_host(struct mmc_host *host)
537 wake_up(&host->wq); 547 wake_up(&host->wq);
538 } 548 }
539} 549}
550EXPORT_SYMBOL(mmc_do_release_host);
540 551
541void mmc_host_deeper_disable(struct work_struct *work) 552void mmc_host_deeper_disable(struct work_struct *work)
542{ 553{
@@ -613,6 +624,8 @@ static inline void mmc_set_ios(struct mmc_host *host)
613 ios->power_mode, ios->chip_select, ios->vdd, 624 ios->power_mode, ios->chip_select, ios->vdd,
614 ios->bus_width, ios->timing); 625 ios->bus_width, ios->timing);
615 626
627 if (ios->clock > 0)
628 mmc_set_ungated(host);
616 host->ops->set_ios(host, ios); 629 host->ops->set_ios(host, ios);
617} 630}
618 631
@@ -640,6 +653,61 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
640 mmc_set_ios(host); 653 mmc_set_ios(host);
641} 654}
642 655
656#ifdef CONFIG_MMC_CLKGATE
657/*
658 * This gates the clock by setting it to 0 Hz.
659 */
660void mmc_gate_clock(struct mmc_host *host)
661{
662 unsigned long flags;
663
664 spin_lock_irqsave(&host->clk_lock, flags);
665 host->clk_old = host->ios.clock;
666 host->ios.clock = 0;
667 host->clk_gated = true;
668 spin_unlock_irqrestore(&host->clk_lock, flags);
669 mmc_set_ios(host);
670}
671
672/*
673 * This restores the clock from gating by using the cached
674 * clock value.
675 */
676void mmc_ungate_clock(struct mmc_host *host)
677{
678 /*
679 * We should previously have gated the clock, so the clock shall
680 * be 0 here! The clock may however be 0 during initialization,
681 * when some request operations are performed before setting
682 * the frequency. When ungate is requested in that situation
683 * we just ignore the call.
684 */
685 if (host->clk_old) {
686 BUG_ON(host->ios.clock);
687 /* This call will also set host->clk_gated to false */
688 mmc_set_clock(host, host->clk_old);
689 }
690}
691
692void mmc_set_ungated(struct mmc_host *host)
693{
694 unsigned long flags;
695
696 /*
697 * We've been given a new frequency while the clock is gated,
698 * so make sure we regard this as ungating it.
699 */
700 spin_lock_irqsave(&host->clk_lock, flags);
701 host->clk_gated = false;
702 spin_unlock_irqrestore(&host->clk_lock, flags);
703}
704
705#else
706void mmc_set_ungated(struct mmc_host *host)
707{
708}
709#endif
710
643/* 711/*
644 * Change the bus mode (open drain/push-pull) of a host. 712 * Change the bus mode (open drain/push-pull) of a host.
645 */ 713 */
@@ -771,8 +839,9 @@ EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
771 839
772/** 840/**
773 * mmc_regulator_set_ocr - set regulator to match host->ios voltage 841 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
774 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) 842 * @mmc: the host to regulate
775 * @supply: regulator to use 843 * @supply: regulator to use
844 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
776 * 845 *
777 * Returns zero on success, else negative errno. 846 * Returns zero on success, else negative errno.
778 * 847 *
@@ -780,15 +849,12 @@ EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
780 * a particular supply voltage. This would normally be called from the 849 * a particular supply voltage. This would normally be called from the
781 * set_ios() method. 850 * set_ios() method.
782 */ 851 */
783int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit) 852int mmc_regulator_set_ocr(struct mmc_host *mmc,
853 struct regulator *supply,
854 unsigned short vdd_bit)
784{ 855{
785 int result = 0; 856 int result = 0;
786 int min_uV, max_uV; 857 int min_uV, max_uV;
787 int enabled;
788
789 enabled = regulator_is_enabled(supply);
790 if (enabled < 0)
791 return enabled;
792 858
793 if (vdd_bit) { 859 if (vdd_bit) {
794 int tmp; 860 int tmp;
@@ -819,17 +885,25 @@ int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit)
819 else 885 else
820 result = 0; 886 result = 0;
821 887
822 if (result == 0 && !enabled) 888 if (result == 0 && !mmc->regulator_enabled) {
823 result = regulator_enable(supply); 889 result = regulator_enable(supply);
824 } else if (enabled) { 890 if (!result)
891 mmc->regulator_enabled = true;
892 }
893 } else if (mmc->regulator_enabled) {
825 result = regulator_disable(supply); 894 result = regulator_disable(supply);
895 if (result == 0)
896 mmc->regulator_enabled = false;
826 } 897 }
827 898
899 if (result)
900 dev_err(mmc_dev(mmc),
901 "could not set regulator OCR (%d)\n", result);
828 return result; 902 return result;
829} 903}
830EXPORT_SYMBOL(mmc_regulator_set_ocr); 904EXPORT_SYMBOL(mmc_regulator_set_ocr);
831 905
832#endif 906#endif /* CONFIG_REGULATOR */
833 907
834/* 908/*
835 * Mask off any voltages we don't support and select 909 * Mask off any voltages we don't support and select
@@ -858,6 +932,38 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
858 return ocr; 932 return ocr;
859} 933}
860 934
935int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
936{
937 struct mmc_command cmd = {0};
938 int err = 0;
939
940 BUG_ON(!host);
941
942 /*
943 * Send CMD11 only if the request is to switch the card to
944 * 1.8V signalling.
945 */
946 if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
947 cmd.opcode = SD_SWITCH_VOLTAGE;
948 cmd.arg = 0;
949 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
950
951 err = mmc_wait_for_cmd(host, &cmd, 0);
952 if (err)
953 return err;
954
955 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
956 return -EIO;
957 }
958
959 host->ios.signal_voltage = signal_voltage;
960
961 if (host->ops->start_signal_voltage_switch)
962 err = host->ops->start_signal_voltage_switch(host, &host->ios);
963
964 return err;
965}
966
861/* 967/*
862 * Select timing parameters for host. 968 * Select timing parameters for host.
863 */ 969 */
@@ -868,6 +974,15 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
868} 974}
869 975
870/* 976/*
977 * Select appropriate driver type for host.
978 */
979void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
980{
981 host->ios.drv_type = drv_type;
982 mmc_set_ios(host);
983}
984
985/*
871 * Apply power to the MMC stack. This is a two-stage process. 986 * Apply power to the MMC stack. This is a two-stage process.
872 * First, we enable power to the card without the clock running. 987 * First, we enable power to the card without the clock running.
873 * We then wait a bit for the power to stabilise. Finally, 988 * We then wait a bit for the power to stabilise. Finally,
@@ -907,12 +1022,7 @@ static void mmc_power_up(struct mmc_host *host)
907 */ 1022 */
908 mmc_delay(10); 1023 mmc_delay(10);
909 1024
910 if (host->f_min > 400000) { 1025 host->ios.clock = host->f_init;
911 pr_warning("%s: Minimum clock frequency too high for "
912 "identification mode\n", mmc_hostname(host));
913 host->ios.clock = host->f_min;
914 } else
915 host->ios.clock = 400000;
916 1026
917 host->ios.power_mode = MMC_POWER_ON; 1027 host->ios.power_mode = MMC_POWER_ON;
918 mmc_set_ios(host); 1028 mmc_set_ios(host);
@@ -928,6 +1038,13 @@ static void mmc_power_off(struct mmc_host *host)
928{ 1038{
929 host->ios.clock = 0; 1039 host->ios.clock = 0;
930 host->ios.vdd = 0; 1040 host->ios.vdd = 0;
1041
1042 /*
1043 * Reset ocr mask to be the highest possible voltage supported for
1044 * this mmc host. This value will be used at next power up.
1045 */
1046 host->ocr = 1 << (fls(host->ocr_avail) - 1);
1047
931 if (!mmc_host_is_spi(host)) { 1048 if (!mmc_host_is_spi(host)) {
932 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1049 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
933 host->ios.chip_select = MMC_CS_DONTCARE; 1050 host->ios.chip_select = MMC_CS_DONTCARE;
@@ -1099,9 +1216,8 @@ void mmc_init_erase(struct mmc_card *card)
1099 } 1216 }
1100} 1217}
1101 1218
1102static void mmc_set_mmc_erase_timeout(struct mmc_card *card, 1219static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1103 struct mmc_command *cmd, 1220 unsigned int arg, unsigned int qty)
1104 unsigned int arg, unsigned int qty)
1105{ 1221{
1106 unsigned int erase_timeout; 1222 unsigned int erase_timeout;
1107 1223
@@ -1129,7 +1245,7 @@ static void mmc_set_mmc_erase_timeout(struct mmc_card *card,
1129 */ 1245 */
1130 timeout_clks <<= 1; 1246 timeout_clks <<= 1;
1131 timeout_us += (timeout_clks * 1000) / 1247 timeout_us += (timeout_clks * 1000) /
1132 (card->host->ios.clock / 1000); 1248 (mmc_host_clk_rate(card->host) / 1000);
1133 1249
1134 erase_timeout = timeout_us / 1000; 1250 erase_timeout = timeout_us / 1000;
1135 1251
@@ -1158,44 +1274,48 @@ static void mmc_set_mmc_erase_timeout(struct mmc_card *card,
1158 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) 1274 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1159 erase_timeout = 1000; 1275 erase_timeout = 1000;
1160 1276
1161 cmd->erase_timeout = erase_timeout; 1277 return erase_timeout;
1162} 1278}
1163 1279
1164static void mmc_set_sd_erase_timeout(struct mmc_card *card, 1280static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1165 struct mmc_command *cmd, unsigned int arg, 1281 unsigned int arg,
1166 unsigned int qty) 1282 unsigned int qty)
1167{ 1283{
1284 unsigned int erase_timeout;
1285
1168 if (card->ssr.erase_timeout) { 1286 if (card->ssr.erase_timeout) {
1169 /* Erase timeout specified in SD Status Register (SSR) */ 1287 /* Erase timeout specified in SD Status Register (SSR) */
1170 cmd->erase_timeout = card->ssr.erase_timeout * qty + 1288 erase_timeout = card->ssr.erase_timeout * qty +
1171 card->ssr.erase_offset; 1289 card->ssr.erase_offset;
1172 } else { 1290 } else {
1173 /* 1291 /*
1174 * Erase timeout not specified in SD Status Register (SSR) so 1292 * Erase timeout not specified in SD Status Register (SSR) so
1175 * use 250ms per write block. 1293 * use 250ms per write block.
1176 */ 1294 */
1177 cmd->erase_timeout = 250 * qty; 1295 erase_timeout = 250 * qty;
1178 } 1296 }
1179 1297
1180 /* Must not be less than 1 second */ 1298 /* Must not be less than 1 second */
1181 if (cmd->erase_timeout < 1000) 1299 if (erase_timeout < 1000)
1182 cmd->erase_timeout = 1000; 1300 erase_timeout = 1000;
1301
1302 return erase_timeout;
1183} 1303}
1184 1304
1185static void mmc_set_erase_timeout(struct mmc_card *card, 1305static unsigned int mmc_erase_timeout(struct mmc_card *card,
1186 struct mmc_command *cmd, unsigned int arg, 1306 unsigned int arg,
1187 unsigned int qty) 1307 unsigned int qty)
1188{ 1308{
1189 if (mmc_card_sd(card)) 1309 if (mmc_card_sd(card))
1190 mmc_set_sd_erase_timeout(card, cmd, arg, qty); 1310 return mmc_sd_erase_timeout(card, arg, qty);
1191 else 1311 else
1192 mmc_set_mmc_erase_timeout(card, cmd, arg, qty); 1312 return mmc_mmc_erase_timeout(card, arg, qty);
1193} 1313}
1194 1314
1195static int mmc_do_erase(struct mmc_card *card, unsigned int from, 1315static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1196 unsigned int to, unsigned int arg) 1316 unsigned int to, unsigned int arg)
1197{ 1317{
1198 struct mmc_command cmd; 1318 struct mmc_command cmd = {0};
1199 unsigned int qty = 0; 1319 unsigned int qty = 0;
1200 int err; 1320 int err;
1201 1321
@@ -1229,7 +1349,6 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1229 to <<= 9; 1349 to <<= 9;
1230 } 1350 }
1231 1351
1232 memset(&cmd, 0, sizeof(struct mmc_command));
1233 if (mmc_card_sd(card)) 1352 if (mmc_card_sd(card))
1234 cmd.opcode = SD_ERASE_WR_BLK_START; 1353 cmd.opcode = SD_ERASE_WR_BLK_START;
1235 else 1354 else
@@ -1263,7 +1382,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1263 cmd.opcode = MMC_ERASE; 1382 cmd.opcode = MMC_ERASE;
1264 cmd.arg = arg; 1383 cmd.arg = arg;
1265 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1384 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1266 mmc_set_erase_timeout(card, &cmd, arg, qty); 1385 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
1267 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1386 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1268 if (err) { 1387 if (err) {
1269 printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n", 1388 printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n",
@@ -1397,33 +1516,77 @@ int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1397} 1516}
1398EXPORT_SYMBOL(mmc_erase_group_aligned); 1517EXPORT_SYMBOL(mmc_erase_group_aligned);
1399 1518
1519int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1520{
1521 struct mmc_command cmd = {0};
1522
1523 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
1524 return 0;
1525
1526 cmd.opcode = MMC_SET_BLOCKLEN;
1527 cmd.arg = blocklen;
1528 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1529 return mmc_wait_for_cmd(card->host, &cmd, 5);
1530}
1531EXPORT_SYMBOL(mmc_set_blocklen);
1532
1533static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
1534{
1535 host->f_init = freq;
1536
1537#ifdef CONFIG_MMC_DEBUG
1538 pr_info("%s: %s: trying to init card at %u Hz\n",
1539 mmc_hostname(host), __func__, host->f_init);
1540#endif
1541 mmc_power_up(host);
1542
1543 /*
1544 * sdio_reset sends CMD52 to reset card. Since we do not know
1545 * if the card is being re-initialized, just send it. CMD52
1546 * should be ignored by SD/eMMC cards.
1547 */
1548 sdio_reset(host);
1549 mmc_go_idle(host);
1550
1551 mmc_send_if_cond(host, host->ocr_avail);
1552
1553 /* Order's important: probe SDIO, then SD, then MMC */
1554 if (!mmc_attach_sdio(host))
1555 return 0;
1556 if (!mmc_attach_sd(host))
1557 return 0;
1558 if (!mmc_attach_mmc(host))
1559 return 0;
1560
1561 mmc_power_off(host);
1562 return -EIO;
1563}
1564
1400void mmc_rescan(struct work_struct *work) 1565void mmc_rescan(struct work_struct *work)
1401{ 1566{
1567 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
1402 struct mmc_host *host = 1568 struct mmc_host *host =
1403 container_of(work, struct mmc_host, detect.work); 1569 container_of(work, struct mmc_host, detect.work);
1404 u32 ocr; 1570 int i;
1405 int err;
1406 unsigned long flags;
1407
1408 spin_lock_irqsave(&host->lock, flags);
1409 1571
1410 if (host->rescan_disable) { 1572 if (host->rescan_disable)
1411 spin_unlock_irqrestore(&host->lock, flags);
1412 return; 1573 return;
1413 }
1414
1415 spin_unlock_irqrestore(&host->lock, flags);
1416
1417 1574
1418 mmc_bus_get(host); 1575 mmc_bus_get(host);
1419 1576
1420 /* if there is a card registered, check whether it is still present */ 1577 /*
1421 if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) 1578 * if there is a _removable_ card registered, check whether it is
1579 * still present
1580 */
1581 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
1582 && !(host->caps & MMC_CAP_NONREMOVABLE))
1422 host->bus_ops->detect(host); 1583 host->bus_ops->detect(host);
1423 1584
1585 /*
1586 * Let mmc_bus_put() free the bus/bus_ops if we've found that
1587 * the card is no longer present.
1588 */
1424 mmc_bus_put(host); 1589 mmc_bus_put(host);
1425
1426
1427 mmc_bus_get(host); 1590 mmc_bus_get(host);
1428 1591
1429 /* if there still is a card present, stop here */ 1592 /* if there still is a card present, stop here */
@@ -1432,8 +1595,6 @@ void mmc_rescan(struct work_struct *work)
1432 goto out; 1595 goto out;
1433 } 1596 }
1434 1597
1435 /* detect a newly inserted card */
1436
1437 /* 1598 /*
1438 * Only we can add a new handler, so it's safe to 1599 * Only we can add a new handler, so it's safe to
1439 * release the lock here. 1600 * release the lock here.
@@ -1444,55 +1605,15 @@ void mmc_rescan(struct work_struct *work)
1444 goto out; 1605 goto out;
1445 1606
1446 mmc_claim_host(host); 1607 mmc_claim_host(host);
1447 1608 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
1448 mmc_power_up(host); 1609 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
1449 sdio_reset(host); 1610 break;
1450 mmc_go_idle(host); 1611 if (freqs[i] <= host->f_min)
1451 1612 break;
1452 mmc_send_if_cond(host, host->ocr_avail);
1453
1454 /*
1455 * First we search for SDIO...
1456 */
1457 err = mmc_send_io_op_cond(host, 0, &ocr);
1458 if (!err) {
1459 if (mmc_attach_sdio(host, ocr)) {
1460 mmc_claim_host(host);
1461 /* try SDMEM (but not MMC) even if SDIO is broken */
1462 if (mmc_send_app_op_cond(host, 0, &ocr))
1463 goto out_fail;
1464
1465 if (mmc_attach_sd(host, ocr))
1466 mmc_power_off(host);
1467 }
1468 goto out;
1469 }
1470
1471 /*
1472 * ...then normal SD...
1473 */
1474 err = mmc_send_app_op_cond(host, 0, &ocr);
1475 if (!err) {
1476 if (mmc_attach_sd(host, ocr))
1477 mmc_power_off(host);
1478 goto out;
1479 }
1480
1481 /*
1482 * ...and finally MMC.
1483 */
1484 err = mmc_send_op_cond(host, 0, &ocr);
1485 if (!err) {
1486 if (mmc_attach_mmc(host, ocr))
1487 mmc_power_off(host);
1488 goto out;
1489 } 1613 }
1490
1491out_fail:
1492 mmc_release_host(host); 1614 mmc_release_host(host);
1493 mmc_power_off(host);
1494 1615
1495out: 1616 out:
1496 if (host->caps & MMC_CAP_NEEDS_POLL) 1617 if (host->caps & MMC_CAP_NEEDS_POLL)
1497 mmc_schedule_delayed_work(&host->detect, HZ); 1618 mmc_schedule_delayed_work(&host->detect, HZ);
1498} 1619}
@@ -1514,7 +1635,7 @@ void mmc_stop_host(struct mmc_host *host)
1514 1635
1515 if (host->caps & MMC_CAP_DISABLE) 1636 if (host->caps & MMC_CAP_DISABLE)
1516 cancel_delayed_work(&host->disable); 1637 cancel_delayed_work(&host->disable);
1517 cancel_delayed_work(&host->detect); 1638 cancel_delayed_work_sync(&host->detect);
1518 mmc_flush_scheduled_work(); 1639 mmc_flush_scheduled_work();
1519 1640
1520 /* clear pm flags now and let card drivers set them as needed */ 1641 /* clear pm flags now and let card drivers set them as needed */
@@ -1538,37 +1659,45 @@ void mmc_stop_host(struct mmc_host *host)
1538 mmc_power_off(host); 1659 mmc_power_off(host);
1539} 1660}
1540 1661
1541void mmc_power_save_host(struct mmc_host *host) 1662int mmc_power_save_host(struct mmc_host *host)
1542{ 1663{
1664 int ret = 0;
1665
1543 mmc_bus_get(host); 1666 mmc_bus_get(host);
1544 1667
1545 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 1668 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
1546 mmc_bus_put(host); 1669 mmc_bus_put(host);
1547 return; 1670 return -EINVAL;
1548 } 1671 }
1549 1672
1550 if (host->bus_ops->power_save) 1673 if (host->bus_ops->power_save)
1551 host->bus_ops->power_save(host); 1674 ret = host->bus_ops->power_save(host);
1552 1675
1553 mmc_bus_put(host); 1676 mmc_bus_put(host);
1554 1677
1555 mmc_power_off(host); 1678 mmc_power_off(host);
1679
1680 return ret;
1556} 1681}
1557EXPORT_SYMBOL(mmc_power_save_host); 1682EXPORT_SYMBOL(mmc_power_save_host);
1558 1683
1559void mmc_power_restore_host(struct mmc_host *host) 1684int mmc_power_restore_host(struct mmc_host *host)
1560{ 1685{
1686 int ret;
1687
1561 mmc_bus_get(host); 1688 mmc_bus_get(host);
1562 1689
1563 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 1690 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
1564 mmc_bus_put(host); 1691 mmc_bus_put(host);
1565 return; 1692 return -EINVAL;
1566 } 1693 }
1567 1694
1568 mmc_power_up(host); 1695 mmc_power_up(host);
1569 host->bus_ops->power_restore(host); 1696 ret = host->bus_ops->power_restore(host);
1570 1697
1571 mmc_bus_put(host); 1698 mmc_bus_put(host);
1699
1700 return ret;
1572} 1701}
1573EXPORT_SYMBOL(mmc_power_restore_host); 1702EXPORT_SYMBOL(mmc_power_restore_host);
1574 1703
@@ -1647,7 +1776,7 @@ int mmc_suspend_host(struct mmc_host *host)
1647 } 1776 }
1648 mmc_bus_put(host); 1777 mmc_bus_put(host);
1649 1778
1650 if (!err && !(host->pm_flags & MMC_PM_KEEP_POWER)) 1779 if (!err && !mmc_card_keep_power(host))
1651 mmc_power_off(host); 1780 mmc_power_off(host);
1652 1781
1653 return err; 1782 return err;
@@ -1665,9 +1794,21 @@ int mmc_resume_host(struct mmc_host *host)
1665 1794
1666 mmc_bus_get(host); 1795 mmc_bus_get(host);
1667 if (host->bus_ops && !host->bus_dead) { 1796 if (host->bus_ops && !host->bus_dead) {
1668 if (!(host->pm_flags & MMC_PM_KEEP_POWER)) { 1797 if (!mmc_card_keep_power(host)) {
1669 mmc_power_up(host); 1798 mmc_power_up(host);
1670 mmc_select_voltage(host, host->ocr); 1799 mmc_select_voltage(host, host->ocr);
1800 /*
1801 * Tell runtime PM core we just powered up the card,
1802 * since it still believes the card is powered off.
1803 * Note that currently runtime PM is only enabled
1804 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
1805 */
1806 if (mmc_card_sdio(host->card) &&
1807 (host->caps & MMC_CAP_POWER_OFF_CARD)) {
1808 pm_runtime_disable(&host->card->dev);
1809 pm_runtime_set_active(&host->card->dev);
1810 pm_runtime_enable(&host->card->dev);
1811 }
1671 } 1812 }
1672 BUG_ON(!host->bus_ops->resume); 1813 BUG_ON(!host->bus_ops->resume);
1673 err = host->bus_ops->resume(host); 1814 err = host->bus_ops->resume(host);
@@ -1678,6 +1819,7 @@ int mmc_resume_host(struct mmc_host *host)
1678 err = 0; 1819 err = 0;
1679 } 1820 }
1680 } 1821 }
1822 host->pm_flags &= ~MMC_PM_KEEP_POWER;
1681 mmc_bus_put(host); 1823 mmc_bus_put(host);
1682 1824
1683 return err; 1825 return err;
@@ -1720,6 +1862,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
1720 1862
1721 case PM_POST_SUSPEND: 1863 case PM_POST_SUSPEND:
1722 case PM_POST_HIBERNATION: 1864 case PM_POST_HIBERNATION:
1865 case PM_POST_RESTORE:
1723 1866
1724 spin_lock_irqsave(&host->lock, flags); 1867 spin_lock_irqsave(&host->lock, flags);
1725 host->rescan_disable = 0; 1868 host->rescan_disable = 0;
@@ -1736,7 +1879,7 @@ static int __init mmc_init(void)
1736{ 1879{
1737 int ret; 1880 int ret;
1738 1881
1739 workqueue = create_singlethread_workqueue("kmmcd"); 1882 workqueue = alloc_ordered_workqueue("kmmcd", 0);
1740 if (!workqueue) 1883 if (!workqueue)
1741 return -ENOMEM; 1884 return -ENOMEM;
1742 1885
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 9d9eef50e5d1..d9411ed2a39b 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -22,8 +22,8 @@ struct mmc_bus_ops {
22 void (*detect)(struct mmc_host *); 22 void (*detect)(struct mmc_host *);
23 int (*suspend)(struct mmc_host *); 23 int (*suspend)(struct mmc_host *);
24 int (*resume)(struct mmc_host *); 24 int (*resume)(struct mmc_host *);
25 void (*power_save)(struct mmc_host *); 25 int (*power_save)(struct mmc_host *);
26 void (*power_restore)(struct mmc_host *); 26 int (*power_restore)(struct mmc_host *);
27}; 27};
28 28
29void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops); 29void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
@@ -33,10 +33,16 @@ void mmc_init_erase(struct mmc_card *card);
33 33
34void mmc_set_chip_select(struct mmc_host *host, int mode); 34void mmc_set_chip_select(struct mmc_host *host, int mode);
35void mmc_set_clock(struct mmc_host *host, unsigned int hz); 35void mmc_set_clock(struct mmc_host *host, unsigned int hz);
36void mmc_gate_clock(struct mmc_host *host);
37void mmc_ungate_clock(struct mmc_host *host);
38void mmc_set_ungated(struct mmc_host *host);
36void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); 39void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
37void mmc_set_bus_width(struct mmc_host *host, unsigned int width); 40void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
38u32 mmc_select_voltage(struct mmc_host *host, u32 ocr); 41u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
42int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage,
43 bool cmd11);
39void mmc_set_timing(struct mmc_host *host, unsigned int timing); 44void mmc_set_timing(struct mmc_host *host, unsigned int timing);
45void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
40 46
41static inline void mmc_delay(unsigned int ms) 47static inline void mmc_delay(unsigned int ms)
42{ 48{
@@ -52,13 +58,12 @@ void mmc_rescan(struct work_struct *work);
52void mmc_start_host(struct mmc_host *host); 58void mmc_start_host(struct mmc_host *host);
53void mmc_stop_host(struct mmc_host *host); 59void mmc_stop_host(struct mmc_host *host);
54 60
55int mmc_attach_mmc(struct mmc_host *host, u32 ocr); 61int mmc_attach_mmc(struct mmc_host *host);
56int mmc_attach_sd(struct mmc_host *host, u32 ocr); 62int mmc_attach_sd(struct mmc_host *host);
57int mmc_attach_sdio(struct mmc_host *host, u32 ocr); 63int mmc_attach_sdio(struct mmc_host *host);
58 64
59/* Module parameters */ 65/* Module parameters */
60extern int use_spi_crc; 66extern int use_spi_crc;
61extern int mmc_assume_removable;
62 67
63/* Debugfs information for hosts and cards */ 68/* Debugfs information for hosts and cards */
64void mmc_add_host_debugfs(struct mmc_host *host); 69void mmc_add_host_debugfs(struct mmc_host *host);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 53cb380c0987..998797ed67a6 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -134,6 +134,33 @@ static const struct file_operations mmc_ios_fops = {
134 .release = single_release, 134 .release = single_release,
135}; 135};
136 136
137static int mmc_clock_opt_get(void *data, u64 *val)
138{
139 struct mmc_host *host = data;
140
141 *val = host->ios.clock;
142
143 return 0;
144}
145
146static int mmc_clock_opt_set(void *data, u64 val)
147{
148 struct mmc_host *host = data;
149
150 /* We need this check due to input value is u64 */
151 if (val > host->f_max)
152 return -EINVAL;
153
154 mmc_claim_host(host);
155 mmc_set_clock(host, (unsigned int) val);
156 mmc_release_host(host);
157
158 return 0;
159}
160
161DEFINE_SIMPLE_ATTRIBUTE(mmc_clock_fops, mmc_clock_opt_get, mmc_clock_opt_set,
162 "%llu\n");
163
137void mmc_add_host_debugfs(struct mmc_host *host) 164void mmc_add_host_debugfs(struct mmc_host *host)
138{ 165{
139 struct dentry *root; 166 struct dentry *root;
@@ -150,11 +177,20 @@ void mmc_add_host_debugfs(struct mmc_host *host)
150 host->debugfs_root = root; 177 host->debugfs_root = root;
151 178
152 if (!debugfs_create_file("ios", S_IRUSR, root, host, &mmc_ios_fops)) 179 if (!debugfs_create_file("ios", S_IRUSR, root, host, &mmc_ios_fops))
153 goto err_ios; 180 goto err_node;
181
182 if (!debugfs_create_file("clock", S_IRUSR | S_IWUSR, root, host,
183 &mmc_clock_fops))
184 goto err_node;
154 185
186#ifdef CONFIG_MMC_CLKGATE
187 if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
188 root, &host->clk_delay))
189 goto err_node;
190#endif
155 return; 191 return;
156 192
157err_ios: 193err_node:
158 debugfs_remove_recursive(root); 194 debugfs_remove_recursive(root);
159 host->debugfs_root = NULL; 195 host->debugfs_root = NULL;
160err_root: 196err_root:
@@ -245,6 +281,7 @@ static const struct file_operations mmc_dbg_ext_csd_fops = {
245 .open = mmc_ext_csd_open, 281 .open = mmc_ext_csd_open,
246 .read = mmc_ext_csd_read, 282 .read = mmc_ext_csd_read,
247 .release = mmc_ext_csd_release, 283 .release = mmc_ext_csd_release,
284 .llseek = default_llseek,
248}; 285};
249 286
250void mmc_add_card_debugfs(struct mmc_card *card) 287void mmc_add_card_debugfs(struct mmc_card *card)
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index d80cfdc8edd2..b29d3e8fd3a2 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved. 4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 * Copyright (C) 2007-2008 Pierre Ossman 5 * Copyright (C) 2007-2008 Pierre Ossman
6 * Copyright (C) 2010 Linus Walleij
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -20,6 +21,7 @@
20#include <linux/suspend.h> 21#include <linux/suspend.h>
21 22
22#include <linux/mmc/host.h> 23#include <linux/mmc/host.h>
24#include <linux/mmc/card.h>
23 25
24#include "core.h" 26#include "core.h"
25#include "host.h" 27#include "host.h"
@@ -50,6 +52,202 @@ void mmc_unregister_host_class(void)
50static DEFINE_IDR(mmc_host_idr); 52static DEFINE_IDR(mmc_host_idr);
51static DEFINE_SPINLOCK(mmc_host_lock); 53static DEFINE_SPINLOCK(mmc_host_lock);
52 54
55#ifdef CONFIG_MMC_CLKGATE
56
57/*
58 * Enabling clock gating will make the core call out to the host
59 * once up and once down when it performs a request or card operation
60 * intermingled in any fashion. The driver will see this through
61 * set_ios() operations with ios.clock field set to 0 to gate (disable)
62 * the block clock, and to the old frequency to enable it again.
63 */
64static void mmc_host_clk_gate_delayed(struct mmc_host *host)
65{
66 unsigned long tick_ns;
67 unsigned long freq = host->ios.clock;
68 unsigned long flags;
69
70 if (!freq) {
71 pr_debug("%s: frequency set to 0 in disable function, "
72 "this means the clock is already disabled.\n",
73 mmc_hostname(host));
74 return;
75 }
76 /*
77 * New requests may have appeared while we were scheduling,
78 * then there is no reason to delay the check before
79 * clk_disable().
80 */
81 spin_lock_irqsave(&host->clk_lock, flags);
82
83 /*
84 * Delay n bus cycles (at least 8 from MMC spec) before attempting
85 * to disable the MCI block clock. The reference count may have
86 * gone up again after this delay due to rescheduling!
87 */
88 if (!host->clk_requests) {
89 spin_unlock_irqrestore(&host->clk_lock, flags);
90 tick_ns = DIV_ROUND_UP(1000000000, freq);
91 ndelay(host->clk_delay * tick_ns);
92 } else {
93 /* New users appeared while waiting for this work */
94 spin_unlock_irqrestore(&host->clk_lock, flags);
95 return;
96 }
97 mutex_lock(&host->clk_gate_mutex);
98 spin_lock_irqsave(&host->clk_lock, flags);
99 if (!host->clk_requests) {
100 spin_unlock_irqrestore(&host->clk_lock, flags);
101 /* This will set host->ios.clock to 0 */
102 mmc_gate_clock(host);
103 spin_lock_irqsave(&host->clk_lock, flags);
104 pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
105 }
106 spin_unlock_irqrestore(&host->clk_lock, flags);
107 mutex_unlock(&host->clk_gate_mutex);
108}
109
110/*
111 * Internal work. Work to disable the clock at some later point.
112 */
113static void mmc_host_clk_gate_work(struct work_struct *work)
114{
115 struct mmc_host *host = container_of(work, struct mmc_host,
116 clk_gate_work);
117
118 mmc_host_clk_gate_delayed(host);
119}
120
121/**
122 * mmc_host_clk_ungate - ungate hardware MCI clocks
123 * @host: host to ungate.
124 *
125 * Makes sure the host ios.clock is restored to a non-zero value
126 * past this call. Increase clock reference count and ungate clock
127 * if we're the first user.
128 */
129void mmc_host_clk_ungate(struct mmc_host *host)
130{
131 unsigned long flags;
132
133 mutex_lock(&host->clk_gate_mutex);
134 spin_lock_irqsave(&host->clk_lock, flags);
135 if (host->clk_gated) {
136 spin_unlock_irqrestore(&host->clk_lock, flags);
137 mmc_ungate_clock(host);
138 spin_lock_irqsave(&host->clk_lock, flags);
139 pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
140 }
141 host->clk_requests++;
142 spin_unlock_irqrestore(&host->clk_lock, flags);
143 mutex_unlock(&host->clk_gate_mutex);
144}
145
146/**
147 * mmc_host_may_gate_card - check if this card may be gated
148 * @card: card to check.
149 */
150static bool mmc_host_may_gate_card(struct mmc_card *card)
151{
152 /* If there is no card we may gate it */
153 if (!card)
154 return true;
155 /*
156 * Don't gate SDIO cards! These need to be clocked at all times
157 * since they may be independent systems generating interrupts
158 * and other events. The clock requests counter from the core will
159 * go down to zero since the core does not need it, but we will not
160 * gate the clock, because there is somebody out there that may still
161 * be using it.
162 */
163 return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING);
164}
165
166/**
167 * mmc_host_clk_gate - gate off hardware MCI clocks
168 * @host: host to gate.
169 *
170 * Calls the host driver with ios.clock set to zero as often as possible
171 * in order to gate off hardware MCI clocks. Decrease clock reference
172 * count and schedule disabling of clock.
173 */
174void mmc_host_clk_gate(struct mmc_host *host)
175{
176 unsigned long flags;
177
178 spin_lock_irqsave(&host->clk_lock, flags);
179 host->clk_requests--;
180 if (mmc_host_may_gate_card(host->card) &&
181 !host->clk_requests)
182 schedule_work(&host->clk_gate_work);
183 spin_unlock_irqrestore(&host->clk_lock, flags);
184}
185
186/**
187 * mmc_host_clk_rate - get current clock frequency setting
188 * @host: host to get the clock frequency for.
189 *
190 * Returns current clock frequency regardless of gating.
191 */
192unsigned int mmc_host_clk_rate(struct mmc_host *host)
193{
194 unsigned long freq;
195 unsigned long flags;
196
197 spin_lock_irqsave(&host->clk_lock, flags);
198 if (host->clk_gated)
199 freq = host->clk_old;
200 else
201 freq = host->ios.clock;
202 spin_unlock_irqrestore(&host->clk_lock, flags);
203 return freq;
204}
205
206/**
207 * mmc_host_clk_init - set up clock gating code
208 * @host: host with potential clock to control
209 */
210static inline void mmc_host_clk_init(struct mmc_host *host)
211{
212 host->clk_requests = 0;
213 /* Hold MCI clock for 8 cycles by default */
214 host->clk_delay = 8;
215 host->clk_gated = false;
216 INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
217 spin_lock_init(&host->clk_lock);
218 mutex_init(&host->clk_gate_mutex);
219}
220
221/**
222 * mmc_host_clk_exit - shut down clock gating code
223 * @host: host with potential clock to control
224 */
225static inline void mmc_host_clk_exit(struct mmc_host *host)
226{
227 /*
228 * Wait for any outstanding gate and then make sure we're
229 * ungated before exiting.
230 */
231 if (cancel_work_sync(&host->clk_gate_work))
232 mmc_host_clk_gate_delayed(host);
233 if (host->clk_gated)
234 mmc_host_clk_ungate(host);
235 /* There should be only one user now */
236 WARN_ON(host->clk_requests > 1);
237}
238
239#else
240
241static inline void mmc_host_clk_init(struct mmc_host *host)
242{
243}
244
245static inline void mmc_host_clk_exit(struct mmc_host *host)
246{
247}
248
249#endif
250
53/** 251/**
54 * mmc_alloc_host - initialise the per-host structure. 252 * mmc_alloc_host - initialise the per-host structure.
55 * @extra: sizeof private data structure 253 * @extra: sizeof private data structure
@@ -82,6 +280,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
82 host->class_dev.class = &mmc_host_class; 280 host->class_dev.class = &mmc_host_class;
83 device_initialize(&host->class_dev); 281 device_initialize(&host->class_dev);
84 282
283 mmc_host_clk_init(host);
284
85 spin_lock_init(&host->lock); 285 spin_lock_init(&host->lock);
86 init_waitqueue_head(&host->wq); 286 init_waitqueue_head(&host->wq);
87 INIT_DELAYED_WORK(&host->detect, mmc_rescan); 287 INIT_DELAYED_WORK(&host->detect, mmc_rescan);
@@ -94,8 +294,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
94 * By default, hosts do not support SGIO or large requests. 294 * By default, hosts do not support SGIO or large requests.
95 * They have to set these according to their abilities. 295 * They have to set these according to their abilities.
96 */ 296 */
97 host->max_hw_segs = 1; 297 host->max_segs = 1;
98 host->max_phys_segs = 1;
99 host->max_seg_size = PAGE_CACHE_SIZE; 298 host->max_seg_size = PAGE_CACHE_SIZE;
100 299
101 host->max_req_size = PAGE_CACHE_SIZE; 300 host->max_req_size = PAGE_CACHE_SIZE;
@@ -126,12 +325,12 @@ int mmc_add_host(struct mmc_host *host)
126 WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && 325 WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
127 !host->ops->enable_sdio_irq); 326 !host->ops->enable_sdio_irq);
128 327
129 led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
130
131 err = device_add(&host->class_dev); 328 err = device_add(&host->class_dev);
132 if (err) 329 if (err)
133 return err; 330 return err;
134 331
332 led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
333
135#ifdef CONFIG_DEBUG_FS 334#ifdef CONFIG_DEBUG_FS
136 mmc_add_host_debugfs(host); 335 mmc_add_host_debugfs(host);
137#endif 336#endif
@@ -164,6 +363,8 @@ void mmc_remove_host(struct mmc_host *host)
164 device_del(&host->class_dev); 363 device_del(&host->class_dev);
165 364
166 led_trigger_unregister_simple(host->led); 365 led_trigger_unregister_simple(host->led);
366
367 mmc_host_clk_exit(host);
167} 368}
168 369
169EXPORT_SYMBOL(mmc_remove_host); 370EXPORT_SYMBOL(mmc_remove_host);
@@ -184,4 +385,3 @@ void mmc_free_host(struct mmc_host *host)
184} 385}
185 386
186EXPORT_SYMBOL(mmc_free_host); 387EXPORT_SYMBOL(mmc_free_host);
187
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 8c87e1109a34..de199f911928 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -10,10 +10,31 @@
10 */ 10 */
11#ifndef _MMC_CORE_HOST_H 11#ifndef _MMC_CORE_HOST_H
12#define _MMC_CORE_HOST_H 12#define _MMC_CORE_HOST_H
13#include <linux/mmc/host.h>
13 14
14int mmc_register_host_class(void); 15int mmc_register_host_class(void);
15void mmc_unregister_host_class(void); 16void mmc_unregister_host_class(void);
16 17
18#ifdef CONFIG_MMC_CLKGATE
19void mmc_host_clk_ungate(struct mmc_host *host);
20void mmc_host_clk_gate(struct mmc_host *host);
21unsigned int mmc_host_clk_rate(struct mmc_host *host);
22
23#else
24static inline void mmc_host_clk_ungate(struct mmc_host *host)
25{
26}
27
28static inline void mmc_host_clk_gate(struct mmc_host *host)
29{
30}
31
32static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
33{
34 return host->ios.clock;
35}
36#endif
37
17void mmc_host_deeper_disable(struct work_struct *work); 38void mmc_host_deeper_disable(struct work_struct *work);
18 39
19#endif 40#endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 6909a54c39be..aa7d1d79b8c5 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -20,6 +20,7 @@
20#include "core.h" 20#include "core.h"
21#include "bus.h" 21#include "bus.h"
22#include "mmc_ops.h" 22#include "mmc_ops.h"
23#include "sd_ops.h"
23 24
24static const unsigned int tran_exp[] = { 25static const unsigned int tran_exp[] = {
25 10000, 100000, 1000000, 10000000, 26 10000, 100000, 1000000, 10000000,
@@ -173,14 +174,17 @@ static int mmc_decode_csd(struct mmc_card *card)
173} 174}
174 175
175/* 176/*
176 * Read and decode extended CSD. 177 * Read extended CSD.
177 */ 178 */
178static int mmc_read_ext_csd(struct mmc_card *card) 179static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
179{ 180{
180 int err; 181 int err;
181 u8 *ext_csd; 182 u8 *ext_csd;
182 183
183 BUG_ON(!card); 184 BUG_ON(!card);
185 BUG_ON(!new_ext_csd);
186
187 *new_ext_csd = NULL;
184 188
185 if (card->csd.mmca_vsn < CSD_SPEC_VER_4) 189 if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
186 return 0; 190 return 0;
@@ -198,12 +202,15 @@ static int mmc_read_ext_csd(struct mmc_card *card)
198 202
199 err = mmc_send_ext_csd(card, ext_csd); 203 err = mmc_send_ext_csd(card, ext_csd);
200 if (err) { 204 if (err) {
205 kfree(ext_csd);
206 *new_ext_csd = NULL;
207
201 /* If the host or the card can't do the switch, 208 /* If the host or the card can't do the switch,
202 * fail more gracefully. */ 209 * fail more gracefully. */
203 if ((err != -EINVAL) 210 if ((err != -EINVAL)
204 && (err != -ENOSYS) 211 && (err != -ENOSYS)
205 && (err != -EFAULT)) 212 && (err != -EFAULT))
206 goto out; 213 return err;
207 214
208 /* 215 /*
209 * High capacity cards should have this "magic" size 216 * High capacity cards should have this "magic" size
@@ -221,17 +228,31 @@ static int mmc_read_ext_csd(struct mmc_card *card)
221 mmc_hostname(card->host)); 228 mmc_hostname(card->host));
222 err = 0; 229 err = 0;
223 } 230 }
231 } else
232 *new_ext_csd = ext_csd;
224 233
225 goto out; 234 return err;
226 } 235}
236
237/*
238 * Decode extended CSD.
239 */
240static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
241{
242 int err = 0;
243
244 BUG_ON(!card);
245
246 if (!ext_csd)
247 return 0;
227 248
228 /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */ 249 /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
250 card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
229 if (card->csd.structure == 3) { 251 if (card->csd.structure == 3) {
230 int ext_csd_struct = ext_csd[EXT_CSD_STRUCTURE]; 252 if (card->ext_csd.raw_ext_csd_structure > 2) {
231 if (ext_csd_struct > 2) {
232 printk(KERN_ERR "%s: unrecognised EXT_CSD structure " 253 printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
233 "version %d\n", mmc_hostname(card->host), 254 "version %d\n", mmc_hostname(card->host),
234 ext_csd_struct); 255 card->ext_csd.raw_ext_csd_structure);
235 err = -EINVAL; 256 err = -EINVAL;
236 goto out; 257 goto out;
237 } 258 }
@@ -245,6 +266,10 @@ static int mmc_read_ext_csd(struct mmc_card *card)
245 goto out; 266 goto out;
246 } 267 }
247 268
269 card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
270 card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
271 card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
272 card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
248 if (card->ext_csd.rev >= 2) { 273 if (card->ext_csd.rev >= 2) {
249 card->ext_csd.sectors = 274 card->ext_csd.sectors =
250 ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | 275 ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
@@ -256,8 +281,23 @@ static int mmc_read_ext_csd(struct mmc_card *card)
256 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) 281 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
257 mmc_card_set_blockaddr(card); 282 mmc_card_set_blockaddr(card);
258 } 283 }
259 284 card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
260 switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) { 285 switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
286 case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
287 EXT_CSD_CARD_TYPE_26:
288 card->ext_csd.hs_max_dtr = 52000000;
289 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52;
290 break;
291 case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 |
292 EXT_CSD_CARD_TYPE_26:
293 card->ext_csd.hs_max_dtr = 52000000;
294 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V;
295 break;
296 case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 |
297 EXT_CSD_CARD_TYPE_26:
298 card->ext_csd.hs_max_dtr = 52000000;
299 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V;
300 break;
261 case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: 301 case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
262 card->ext_csd.hs_max_dtr = 52000000; 302 card->ext_csd.hs_max_dtr = 52000000;
263 break; 303 break;
@@ -271,8 +311,17 @@ static int mmc_read_ext_csd(struct mmc_card *card)
271 mmc_hostname(card->host)); 311 mmc_hostname(card->host));
272 } 312 }
273 313
314 card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
315 card->ext_csd.raw_erase_timeout_mult =
316 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
317 card->ext_csd.raw_hc_erase_grp_size =
318 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
274 if (card->ext_csd.rev >= 3) { 319 if (card->ext_csd.rev >= 3) {
275 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; 320 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
321 card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
322
323 /* EXT_CSD value is in units of 10ms, but we store in ms */
324 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
276 325
277 /* Sleep / awake timeout in 100ns units */ 326 /* Sleep / awake timeout in 100ns units */
278 if (sa_shift > 0 && sa_shift <= 0x17) 327 if (sa_shift > 0 && sa_shift <= 0x17)
@@ -284,9 +333,65 @@ static int mmc_read_ext_csd(struct mmc_card *card)
284 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; 333 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
285 card->ext_csd.hc_erase_size = 334 card->ext_csd.hc_erase_size =
286 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10; 335 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
336
337 card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
338
339 /*
340 * There are two boot regions of equal size, defined in
341 * multiples of 128K.
342 */
343 card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
287 } 344 }
288 345
346 card->ext_csd.raw_hc_erase_gap_size =
347 ext_csd[EXT_CSD_PARTITION_ATTRIBUTE];
348 card->ext_csd.raw_sec_trim_mult =
349 ext_csd[EXT_CSD_SEC_TRIM_MULT];
350 card->ext_csd.raw_sec_erase_mult =
351 ext_csd[EXT_CSD_SEC_ERASE_MULT];
352 card->ext_csd.raw_sec_feature_support =
353 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
354 card->ext_csd.raw_trim_mult =
355 ext_csd[EXT_CSD_TRIM_MULT];
289 if (card->ext_csd.rev >= 4) { 356 if (card->ext_csd.rev >= 4) {
357 /*
358 * Enhanced area feature support -- check whether the eMMC
359 * card has the Enhanced area enabled. If so, export enhanced
360 * area offset and size to user by adding sysfs interface.
361 */
362 if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
363 (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
364 u8 hc_erase_grp_sz =
365 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
366 u8 hc_wp_grp_sz =
367 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
368
369 card->ext_csd.enhanced_area_en = 1;
370 /*
371 * calculate the enhanced data area offset, in bytes
372 */
373 card->ext_csd.enhanced_area_offset =
374 (ext_csd[139] << 24) + (ext_csd[138] << 16) +
375 (ext_csd[137] << 8) + ext_csd[136];
376 if (mmc_card_blockaddr(card))
377 card->ext_csd.enhanced_area_offset <<= 9;
378 /*
379 * calculate the enhanced data area size, in kilobytes
380 */
381 card->ext_csd.enhanced_area_size =
382 (ext_csd[142] << 16) + (ext_csd[141] << 8) +
383 ext_csd[140];
384 card->ext_csd.enhanced_area_size *=
385 (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
386 card->ext_csd.enhanced_area_size <<= 9;
387 } else {
388 /*
389 * If the enhanced area is not enabled, disable these
390 * device attributes.
391 */
392 card->ext_csd.enhanced_area_offset = -EINVAL;
393 card->ext_csd.enhanced_area_size = -EINVAL;
394 }
290 card->ext_csd.sec_trim_mult = 395 card->ext_csd.sec_trim_mult =
291 ext_csd[EXT_CSD_SEC_TRIM_MULT]; 396 ext_csd[EXT_CSD_SEC_TRIM_MULT];
292 card->ext_csd.sec_erase_mult = 397 card->ext_csd.sec_erase_mult =
@@ -297,14 +402,83 @@ static int mmc_read_ext_csd(struct mmc_card *card)
297 ext_csd[EXT_CSD_TRIM_MULT]; 402 ext_csd[EXT_CSD_TRIM_MULT];
298 } 403 }
299 404
405 if (card->ext_csd.rev >= 5)
406 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
407
300 if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) 408 if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
301 card->erased_byte = 0xFF; 409 card->erased_byte = 0xFF;
302 else 410 else
303 card->erased_byte = 0x0; 411 card->erased_byte = 0x0;
304 412
305out: 413out:
414 return err;
415}
416
417static inline void mmc_free_ext_csd(u8 *ext_csd)
418{
306 kfree(ext_csd); 419 kfree(ext_csd);
420}
421
422
423static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
424{
425 u8 *bw_ext_csd;
426 int err;
307 427
428 if (bus_width == MMC_BUS_WIDTH_1)
429 return 0;
430
431 err = mmc_get_ext_csd(card, &bw_ext_csd);
432
433 if (err || bw_ext_csd == NULL) {
434 if (bus_width != MMC_BUS_WIDTH_1)
435 err = -EINVAL;
436 goto out;
437 }
438
439 if (bus_width == MMC_BUS_WIDTH_1)
440 goto out;
441
442 /* only compare read only fields */
443 err = (!(card->ext_csd.raw_partition_support ==
444 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
445 (card->ext_csd.raw_erased_mem_count ==
446 bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
447 (card->ext_csd.rev ==
448 bw_ext_csd[EXT_CSD_REV]) &&
449 (card->ext_csd.raw_ext_csd_structure ==
450 bw_ext_csd[EXT_CSD_STRUCTURE]) &&
451 (card->ext_csd.raw_card_type ==
452 bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
453 (card->ext_csd.raw_s_a_timeout ==
454 bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
455 (card->ext_csd.raw_hc_erase_gap_size ==
456 bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
457 (card->ext_csd.raw_erase_timeout_mult ==
458 bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
459 (card->ext_csd.raw_hc_erase_grp_size ==
460 bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
461 (card->ext_csd.raw_sec_trim_mult ==
462 bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
463 (card->ext_csd.raw_sec_erase_mult ==
464 bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
465 (card->ext_csd.raw_sec_feature_support ==
466 bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
467 (card->ext_csd.raw_trim_mult ==
468 bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
469 (card->ext_csd.raw_sectors[0] ==
470 bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
471 (card->ext_csd.raw_sectors[1] ==
472 bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
473 (card->ext_csd.raw_sectors[2] ==
474 bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
475 (card->ext_csd.raw_sectors[3] ==
476 bw_ext_csd[EXT_CSD_SEC_CNT + 3]));
477 if (err)
478 err = -EINVAL;
479
480out:
481 mmc_free_ext_csd(bw_ext_csd);
308 return err; 482 return err;
309} 483}
310 484
@@ -321,6 +495,9 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
321MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); 495MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
322MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); 496MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
323MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial); 497MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
498MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
499 card->ext_csd.enhanced_area_offset);
500MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
324 501
325static struct attribute *mmc_std_attrs[] = { 502static struct attribute *mmc_std_attrs[] = {
326 &dev_attr_cid.attr, 503 &dev_attr_cid.attr,
@@ -334,6 +511,8 @@ static struct attribute *mmc_std_attrs[] = {
334 &dev_attr_name.attr, 511 &dev_attr_name.attr,
335 &dev_attr_oemid.attr, 512 &dev_attr_oemid.attr,
336 &dev_attr_serial.attr, 513 &dev_attr_serial.attr,
514 &dev_attr_enhanced_area_offset.attr,
515 &dev_attr_enhanced_area_size.attr,
337 NULL, 516 NULL,
338}; 517};
339 518
@@ -360,9 +539,11 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
360 struct mmc_card *oldcard) 539 struct mmc_card *oldcard)
361{ 540{
362 struct mmc_card *card; 541 struct mmc_card *card;
363 int err; 542 int err, ddr = 0;
364 u32 cid[4]; 543 u32 cid[4];
365 unsigned int max_dtr; 544 unsigned int max_dtr;
545 u32 rocr;
546 u8 *ext_csd = NULL;
366 547
367 BUG_ON(!host); 548 BUG_ON(!host);
368 WARN_ON(!host->claimed); 549 WARN_ON(!host->claimed);
@@ -376,7 +557,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
376 mmc_go_idle(host); 557 mmc_go_idle(host);
377 558
378 /* The extra bit indicates that we support high capacity */ 559 /* The extra bit indicates that we support high capacity */
379 err = mmc_send_op_cond(host, ocr | (1 << 30), NULL); 560 err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
380 if (err) 561 if (err)
381 goto err; 562 goto err;
382 563
@@ -461,20 +642,76 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
461 /* 642 /*
462 * Fetch and process extended CSD. 643 * Fetch and process extended CSD.
463 */ 644 */
464 err = mmc_read_ext_csd(card); 645
646 err = mmc_get_ext_csd(card, &ext_csd);
647 if (err)
648 goto free_card;
649 err = mmc_read_ext_csd(card, ext_csd);
465 if (err) 650 if (err)
466 goto free_card; 651 goto free_card;
652
653 /* If doing byte addressing, check if required to do sector
654 * addressing. Handle the case of <2GB cards needing sector
655 * addressing. See section 8.1 JEDEC Standard JED84-A441;
656 * ocr register has bit 30 set for sector addressing.
657 */
658 if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
659 mmc_card_set_blockaddr(card);
660
467 /* Erase size depends on CSD and Extended CSD */ 661 /* Erase size depends on CSD and Extended CSD */
468 mmc_set_erase_size(card); 662 mmc_set_erase_size(card);
469 } 663 }
470 664
471 /* 665 /*
666 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
667 * bit. This bit will be lost every time after a reset or power off.
668 */
669 if (card->ext_csd.enhanced_area_en) {
670 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
671 EXT_CSD_ERASE_GROUP_DEF, 1, 0);
672
673 if (err && err != -EBADMSG)
674 goto free_card;
675
676 if (err) {
677 err = 0;
678 /*
679 * Just disable enhanced area off & sz
680 * will try to enable ERASE_GROUP_DEF
681 * during next time reinit
682 */
683 card->ext_csd.enhanced_area_offset = -EINVAL;
684 card->ext_csd.enhanced_area_size = -EINVAL;
685 } else {
686 card->ext_csd.erase_group_def = 1;
687 /*
688 * enable ERASE_GRP_DEF successfully.
689 * This will affect the erase size, so
690 * here need to reset erase size
691 */
692 mmc_set_erase_size(card);
693 }
694 }
695
696 /*
697 * Ensure eMMC user default partition is enabled
698 */
699 if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
700 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
701 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
702 card->ext_csd.part_config,
703 card->ext_csd.part_time);
704 if (err && err != -EBADMSG)
705 goto free_card;
706 }
707
708 /*
472 * Activate high speed (if supported) 709 * Activate high speed (if supported)
473 */ 710 */
474 if ((card->ext_csd.hs_max_dtr != 0) && 711 if ((card->ext_csd.hs_max_dtr != 0) &&
475 (host->caps & MMC_CAP_MMC_HIGHSPEED)) { 712 (host->caps & MMC_CAP_MMC_HIGHSPEED)) {
476 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 713 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
477 EXT_CSD_HS_TIMING, 1); 714 EXT_CSD_HS_TIMING, 1, 0);
478 if (err && err != -EBADMSG) 715 if (err && err != -EBADMSG)
479 goto free_card; 716 goto free_card;
480 717
@@ -503,32 +740,102 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
503 mmc_set_clock(host, max_dtr); 740 mmc_set_clock(host, max_dtr);
504 741
505 /* 742 /*
506 * Activate wide bus (if supported). 743 * Indicate DDR mode (if supported).
744 */
745 if (mmc_card_highspeed(card)) {
746 if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)
747 && ((host->caps & (MMC_CAP_1_8V_DDR |
748 MMC_CAP_UHS_DDR50))
749 == (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)))
750 ddr = MMC_1_8V_DDR_MODE;
751 else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
752 && ((host->caps & (MMC_CAP_1_2V_DDR |
753 MMC_CAP_UHS_DDR50))
754 == (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)))
755 ddr = MMC_1_2V_DDR_MODE;
756 }
757
758 /*
759 * Activate wide bus and DDR (if supported).
507 */ 760 */
508 if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) && 761 if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
509 (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { 762 (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
510 unsigned ext_csd_bit, bus_width; 763 static unsigned ext_csd_bits[][2] = {
511 764 { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
512 if (host->caps & MMC_CAP_8_BIT_DATA) { 765 { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 },
513 ext_csd_bit = EXT_CSD_BUS_WIDTH_8; 766 { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 },
514 bus_width = MMC_BUS_WIDTH_8; 767 };
515 } else { 768 static unsigned bus_widths[] = {
516 ext_csd_bit = EXT_CSD_BUS_WIDTH_4; 769 MMC_BUS_WIDTH_8,
517 bus_width = MMC_BUS_WIDTH_4; 770 MMC_BUS_WIDTH_4,
771 MMC_BUS_WIDTH_1
772 };
773 unsigned idx, bus_width = 0;
774
775 if (host->caps & MMC_CAP_8_BIT_DATA)
776 idx = 0;
777 else
778 idx = 1;
779 for (; idx < ARRAY_SIZE(bus_widths); idx++) {
780 bus_width = bus_widths[idx];
781 if (bus_width == MMC_BUS_WIDTH_1)
782 ddr = 0; /* no DDR for 1-bit width */
783 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
784 EXT_CSD_BUS_WIDTH,
785 ext_csd_bits[idx][0],
786 0);
787 if (!err) {
788 mmc_set_bus_width(card->host, bus_width);
789
790 /*
791 * If controller can't handle bus width test,
792 * compare ext_csd previously read in 1 bit mode
793 * against ext_csd at new bus width
794 */
795 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
796 err = mmc_compare_ext_csds(card,
797 bus_width);
798 else
799 err = mmc_bus_test(card, bus_width);
800 if (!err)
801 break;
802 }
518 } 803 }
519 804
520 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 805 if (!err && ddr) {
521 EXT_CSD_BUS_WIDTH, ext_csd_bit); 806 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
522 807 EXT_CSD_BUS_WIDTH,
523 if (err && err != -EBADMSG) 808 ext_csd_bits[idx][1],
524 goto free_card; 809 0);
525 810 }
526 if (err) { 811 if (err) {
527 printk(KERN_WARNING "%s: switch to bus width %d " 812 printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
528 "failed\n", mmc_hostname(card->host), 813 "failed\n", mmc_hostname(card->host),
529 1 << bus_width); 814 1 << bus_width, ddr);
530 err = 0; 815 goto free_card;
531 } else { 816 } else if (ddr) {
817 /*
818 * eMMC cards can support 3.3V to 1.2V i/o (vccq)
819 * signaling.
820 *
821 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
822 *
823 * 1.8V vccq at 3.3V core voltage (vcc) is not required
824 * in the JEDEC spec for DDR.
825 *
826 * Do not force change in vccq since we are obviously
827 * working and no change to vccq is needed.
828 *
829 * WARNING: eMMC rules are NOT the same as SD DDR
830 */
831 if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) {
832 err = mmc_set_signal_voltage(host,
833 MMC_SIGNAL_VOLTAGE_120, 0);
834 if (err)
835 goto err;
836 }
837 mmc_card_set_ddr_mode(card);
838 mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50);
532 mmc_set_bus_width(card->host, bus_width); 839 mmc_set_bus_width(card->host, bus_width);
533 } 840 }
534 } 841 }
@@ -536,12 +843,14 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
536 if (!oldcard) 843 if (!oldcard)
537 host->card = card; 844 host->card = card;
538 845
846 mmc_free_ext_csd(ext_csd);
539 return 0; 847 return 0;
540 848
541free_card: 849free_card:
542 if (!oldcard) 850 if (!oldcard)
543 mmc_remove_card(card); 851 mmc_remove_card(card);
544err: 852err:
853 mmc_free_ext_csd(ext_csd);
545 854
546 return err; 855 return err;
547} 856}
@@ -623,12 +932,16 @@ static int mmc_resume(struct mmc_host *host)
623 return err; 932 return err;
624} 933}
625 934
626static void mmc_power_restore(struct mmc_host *host) 935static int mmc_power_restore(struct mmc_host *host)
627{ 936{
937 int ret;
938
628 host->card->state &= ~MMC_STATE_HIGHSPEED; 939 host->card->state &= ~MMC_STATE_HIGHSPEED;
629 mmc_claim_host(host); 940 mmc_claim_host(host);
630 mmc_init_card(host, host->ocr, host->card); 941 ret = mmc_init_card(host, host->ocr, host->card);
631 mmc_release_host(host); 942 mmc_release_host(host);
943
944 return ret;
632} 945}
633 946
634static int mmc_sleep(struct mmc_host *host) 947static int mmc_sleep(struct mmc_host *host)
@@ -685,7 +998,7 @@ static void mmc_attach_bus_ops(struct mmc_host *host)
685{ 998{
686 const struct mmc_bus_ops *bus_ops; 999 const struct mmc_bus_ops *bus_ops;
687 1000
688 if (host->caps & MMC_CAP_NONREMOVABLE || !mmc_assume_removable) 1001 if (!mmc_card_is_removable(host))
689 bus_ops = &mmc_ops_unsafe; 1002 bus_ops = &mmc_ops_unsafe;
690 else 1003 else
691 bus_ops = &mmc_ops; 1004 bus_ops = &mmc_ops;
@@ -695,14 +1008,21 @@ static void mmc_attach_bus_ops(struct mmc_host *host)
695/* 1008/*
696 * Starting point for MMC card init. 1009 * Starting point for MMC card init.
697 */ 1010 */
698int mmc_attach_mmc(struct mmc_host *host, u32 ocr) 1011int mmc_attach_mmc(struct mmc_host *host)
699{ 1012{
700 int err; 1013 int err;
1014 u32 ocr;
701 1015
702 BUG_ON(!host); 1016 BUG_ON(!host);
703 WARN_ON(!host->claimed); 1017 WARN_ON(!host->claimed);
704 1018
1019 err = mmc_send_op_cond(host, 0, &ocr);
1020 if (err)
1021 return err;
1022
705 mmc_attach_bus_ops(host); 1023 mmc_attach_bus_ops(host);
1024 if (host->ocr_avail_mmc)
1025 host->ocr_avail = host->ocr_avail_mmc;
706 1026
707 /* 1027 /*
708 * We need to get OCR a different way for SPI. 1028 * We need to get OCR a different way for SPI.
@@ -742,20 +1062,20 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr)
742 goto err; 1062 goto err;
743 1063
744 mmc_release_host(host); 1064 mmc_release_host(host);
745
746 err = mmc_add_card(host->card); 1065 err = mmc_add_card(host->card);
1066 mmc_claim_host(host);
747 if (err) 1067 if (err)
748 goto remove_card; 1068 goto remove_card;
749 1069
750 return 0; 1070 return 0;
751 1071
752remove_card: 1072remove_card:
1073 mmc_release_host(host);
753 mmc_remove_card(host->card); 1074 mmc_remove_card(host->card);
754 host->card = NULL;
755 mmc_claim_host(host); 1075 mmc_claim_host(host);
1076 host->card = NULL;
756err: 1077err:
757 mmc_detach_bus(host); 1078 mmc_detach_bus(host);
758 mmc_release_host(host);
759 1079
760 printk(KERN_ERR "%s: error %d whilst initialising MMC card\n", 1080 printk(KERN_ERR "%s: error %d whilst initialising MMC card\n",
761 mmc_hostname(host), err); 1081 mmc_hostname(host), err);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 326447c9ede8..845ce7c533b9 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -23,12 +23,10 @@
23static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) 23static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
24{ 24{
25 int err; 25 int err;
26 struct mmc_command cmd; 26 struct mmc_command cmd = {0};
27 27
28 BUG_ON(!host); 28 BUG_ON(!host);
29 29
30 memset(&cmd, 0, sizeof(struct mmc_command));
31
32 cmd.opcode = MMC_SELECT_CARD; 30 cmd.opcode = MMC_SELECT_CARD;
33 31
34 if (card) { 32 if (card) {
@@ -60,15 +58,13 @@ int mmc_deselect_cards(struct mmc_host *host)
60 58
61int mmc_card_sleepawake(struct mmc_host *host, int sleep) 59int mmc_card_sleepawake(struct mmc_host *host, int sleep)
62{ 60{
63 struct mmc_command cmd; 61 struct mmc_command cmd = {0};
64 struct mmc_card *card = host->card; 62 struct mmc_card *card = host->card;
65 int err; 63 int err;
66 64
67 if (sleep) 65 if (sleep)
68 mmc_deselect_cards(host); 66 mmc_deselect_cards(host);
69 67
70 memset(&cmd, 0, sizeof(struct mmc_command));
71
72 cmd.opcode = MMC_SLEEP_AWAKE; 68 cmd.opcode = MMC_SLEEP_AWAKE;
73 cmd.arg = card->rca << 16; 69 cmd.arg = card->rca << 16;
74 if (sleep) 70 if (sleep)
@@ -97,7 +93,7 @@ int mmc_card_sleepawake(struct mmc_host *host, int sleep)
97int mmc_go_idle(struct mmc_host *host) 93int mmc_go_idle(struct mmc_host *host)
98{ 94{
99 int err; 95 int err;
100 struct mmc_command cmd; 96 struct mmc_command cmd = {0};
101 97
102 /* 98 /*
103 * Non-SPI hosts need to prevent chipselect going active during 99 * Non-SPI hosts need to prevent chipselect going active during
@@ -105,7 +101,7 @@ int mmc_go_idle(struct mmc_host *host)
105 * that in case of hardware that won't pull up DAT3/nCS otherwise. 101 * that in case of hardware that won't pull up DAT3/nCS otherwise.
106 * 102 *
107 * SPI hosts ignore ios.chip_select; it's managed according to 103 * SPI hosts ignore ios.chip_select; it's managed according to
108 * rules that must accomodate non-MMC slaves which this layer 104 * rules that must accommodate non-MMC slaves which this layer
109 * won't even know about. 105 * won't even know about.
110 */ 106 */
111 if (!mmc_host_is_spi(host)) { 107 if (!mmc_host_is_spi(host)) {
@@ -113,8 +109,6 @@ int mmc_go_idle(struct mmc_host *host)
113 mmc_delay(1); 109 mmc_delay(1);
114 } 110 }
115 111
116 memset(&cmd, 0, sizeof(struct mmc_command));
117
118 cmd.opcode = MMC_GO_IDLE_STATE; 112 cmd.opcode = MMC_GO_IDLE_STATE;
119 cmd.arg = 0; 113 cmd.arg = 0;
120 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; 114 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
@@ -135,13 +129,11 @@ int mmc_go_idle(struct mmc_host *host)
135 129
136int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 130int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
137{ 131{
138 struct mmc_command cmd; 132 struct mmc_command cmd = {0};
139 int i, err = 0; 133 int i, err = 0;
140 134
141 BUG_ON(!host); 135 BUG_ON(!host);
142 136
143 memset(&cmd, 0, sizeof(struct mmc_command));
144
145 cmd.opcode = MMC_SEND_OP_COND; 137 cmd.opcode = MMC_SEND_OP_COND;
146 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; 138 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
147 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; 139 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
@@ -178,13 +170,11 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
178int mmc_all_send_cid(struct mmc_host *host, u32 *cid) 170int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
179{ 171{
180 int err; 172 int err;
181 struct mmc_command cmd; 173 struct mmc_command cmd = {0};
182 174
183 BUG_ON(!host); 175 BUG_ON(!host);
184 BUG_ON(!cid); 176 BUG_ON(!cid);
185 177
186 memset(&cmd, 0, sizeof(struct mmc_command));
187
188 cmd.opcode = MMC_ALL_SEND_CID; 178 cmd.opcode = MMC_ALL_SEND_CID;
189 cmd.arg = 0; 179 cmd.arg = 0;
190 cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; 180 cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
@@ -201,13 +191,11 @@ int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
201int mmc_set_relative_addr(struct mmc_card *card) 191int mmc_set_relative_addr(struct mmc_card *card)
202{ 192{
203 int err; 193 int err;
204 struct mmc_command cmd; 194 struct mmc_command cmd = {0};
205 195
206 BUG_ON(!card); 196 BUG_ON(!card);
207 BUG_ON(!card->host); 197 BUG_ON(!card->host);
208 198
209 memset(&cmd, 0, sizeof(struct mmc_command));
210
211 cmd.opcode = MMC_SET_RELATIVE_ADDR; 199 cmd.opcode = MMC_SET_RELATIVE_ADDR;
212 cmd.arg = card->rca << 16; 200 cmd.arg = card->rca << 16;
213 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 201 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
@@ -223,13 +211,11 @@ static int
223mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) 211mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
224{ 212{
225 int err; 213 int err;
226 struct mmc_command cmd; 214 struct mmc_command cmd = {0};
227 215
228 BUG_ON(!host); 216 BUG_ON(!host);
229 BUG_ON(!cxd); 217 BUG_ON(!cxd);
230 218
231 memset(&cmd, 0, sizeof(struct mmc_command));
232
233 cmd.opcode = opcode; 219 cmd.opcode = opcode;
234 cmd.arg = arg; 220 cmd.arg = arg;
235 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; 221 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
@@ -247,9 +233,9 @@ static int
247mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, 233mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
248 u32 opcode, void *buf, unsigned len) 234 u32 opcode, void *buf, unsigned len)
249{ 235{
250 struct mmc_request mrq; 236 struct mmc_request mrq = {0};
251 struct mmc_command cmd; 237 struct mmc_command cmd = {0};
252 struct mmc_data data; 238 struct mmc_data data = {0};
253 struct scatterlist sg; 239 struct scatterlist sg;
254 void *data_buf; 240 void *data_buf;
255 241
@@ -260,10 +246,6 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
260 if (data_buf == NULL) 246 if (data_buf == NULL)
261 return -ENOMEM; 247 return -ENOMEM;
262 248
263 memset(&mrq, 0, sizeof(struct mmc_request));
264 memset(&cmd, 0, sizeof(struct mmc_command));
265 memset(&data, 0, sizeof(struct mmc_data));
266
267 mrq.cmd = &cmd; 249 mrq.cmd = &cmd;
268 mrq.data = &data; 250 mrq.data = &data;
269 251
@@ -355,11 +337,9 @@ int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
355 337
356int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) 338int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
357{ 339{
358 struct mmc_command cmd; 340 struct mmc_command cmd = {0};
359 int err; 341 int err;
360 342
361 memset(&cmd, 0, sizeof(struct mmc_command));
362
363 cmd.opcode = MMC_SPI_READ_OCR; 343 cmd.opcode = MMC_SPI_READ_OCR;
364 cmd.arg = highcap ? (1 << 30) : 0; 344 cmd.arg = highcap ? (1 << 30) : 0;
365 cmd.flags = MMC_RSP_SPI_R3; 345 cmd.flags = MMC_RSP_SPI_R3;
@@ -372,11 +352,9 @@ int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
372 352
373int mmc_spi_set_crc(struct mmc_host *host, int use_crc) 353int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
374{ 354{
375 struct mmc_command cmd; 355 struct mmc_command cmd = {0};
376 int err; 356 int err;
377 357
378 memset(&cmd, 0, sizeof(struct mmc_command));
379
380 cmd.opcode = MMC_SPI_CRC_ON_OFF; 358 cmd.opcode = MMC_SPI_CRC_ON_OFF;
381 cmd.flags = MMC_RSP_SPI_R1; 359 cmd.flags = MMC_RSP_SPI_R1;
382 cmd.arg = use_crc; 360 cmd.arg = use_crc;
@@ -387,23 +365,34 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
387 return err; 365 return err;
388} 366}
389 367
390int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value) 368/**
369 * mmc_switch - modify EXT_CSD register
370 * @card: the MMC card associated with the data transfer
371 * @set: cmd set values
372 * @index: EXT_CSD register index
373 * @value: value to program into EXT_CSD register
374 * @timeout_ms: timeout (ms) for operation performed by register write,
375 * timeout of zero implies maximum possible timeout
376 *
377 * Modifies the EXT_CSD register for selected card.
378 */
379int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
380 unsigned int timeout_ms)
391{ 381{
392 int err; 382 int err;
393 struct mmc_command cmd; 383 struct mmc_command cmd = {0};
394 u32 status; 384 u32 status;
395 385
396 BUG_ON(!card); 386 BUG_ON(!card);
397 BUG_ON(!card->host); 387 BUG_ON(!card->host);
398 388
399 memset(&cmd, 0, sizeof(struct mmc_command));
400
401 cmd.opcode = MMC_SWITCH; 389 cmd.opcode = MMC_SWITCH;
402 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 390 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
403 (index << 16) | 391 (index << 16) |
404 (value << 8) | 392 (value << 8) |
405 set; 393 set;
406 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 394 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
395 cmd.cmd_timeout_ms = timeout_ms;
407 396
408 err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 397 err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
409 if (err) 398 if (err)
@@ -433,17 +422,16 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value)
433 422
434 return 0; 423 return 0;
435} 424}
425EXPORT_SYMBOL_GPL(mmc_switch);
436 426
437int mmc_send_status(struct mmc_card *card, u32 *status) 427int mmc_send_status(struct mmc_card *card, u32 *status)
438{ 428{
439 int err; 429 int err;
440 struct mmc_command cmd; 430 struct mmc_command cmd = {0};
441 431
442 BUG_ON(!card); 432 BUG_ON(!card);
443 BUG_ON(!card->host); 433 BUG_ON(!card->host);
444 434
445 memset(&cmd, 0, sizeof(struct mmc_command));
446
447 cmd.opcode = MMC_SEND_STATUS; 435 cmd.opcode = MMC_SEND_STATUS;
448 if (!mmc_host_is_spi(card->host)) 436 if (!mmc_host_is_spi(card->host))
449 cmd.arg = card->rca << 16; 437 cmd.arg = card->rca << 16;
@@ -462,3 +450,100 @@ int mmc_send_status(struct mmc_card *card, u32 *status)
462 return 0; 450 return 0;
463} 451}
464 452
453static int
454mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
455 u8 len)
456{
457 struct mmc_request mrq = {0};
458 struct mmc_command cmd = {0};
459 struct mmc_data data = {0};
460 struct scatterlist sg;
461 u8 *data_buf;
462 u8 *test_buf;
463 int i, err;
464 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
465 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
466
467 /* dma onto stack is unsafe/nonportable, but callers to this
468 * routine normally provide temporary on-stack buffers ...
469 */
470 data_buf = kmalloc(len, GFP_KERNEL);
471 if (!data_buf)
472 return -ENOMEM;
473
474 if (len == 8)
475 test_buf = testdata_8bit;
476 else if (len == 4)
477 test_buf = testdata_4bit;
478 else {
479 printk(KERN_ERR "%s: Invalid bus_width %d\n",
480 mmc_hostname(host), len);
481 kfree(data_buf);
482 return -EINVAL;
483 }
484
485 if (opcode == MMC_BUS_TEST_W)
486 memcpy(data_buf, test_buf, len);
487
488 mrq.cmd = &cmd;
489 mrq.data = &data;
490 cmd.opcode = opcode;
491 cmd.arg = 0;
492
493 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
494 * rely on callers to never use this with "native" calls for reading
495 * CSD or CID. Native versions of those commands use the R2 type,
496 * not R1 plus a data block.
497 */
498 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
499
500 data.blksz = len;
501 data.blocks = 1;
502 if (opcode == MMC_BUS_TEST_R)
503 data.flags = MMC_DATA_READ;
504 else
505 data.flags = MMC_DATA_WRITE;
506
507 data.sg = &sg;
508 data.sg_len = 1;
509 sg_init_one(&sg, data_buf, len);
510 mmc_wait_for_req(host, &mrq);
511 err = 0;
512 if (opcode == MMC_BUS_TEST_R) {
513 for (i = 0; i < len / 4; i++)
514 if ((test_buf[i] ^ data_buf[i]) != 0xff) {
515 err = -EIO;
516 break;
517 }
518 }
519 kfree(data_buf);
520
521 if (cmd.error)
522 return cmd.error;
523 if (data.error)
524 return data.error;
525
526 return err;
527}
528
529int mmc_bus_test(struct mmc_card *card, u8 bus_width)
530{
531 int err, width;
532
533 if (bus_width == MMC_BUS_WIDTH_8)
534 width = 8;
535 else if (bus_width == MMC_BUS_WIDTH_4)
536 width = 4;
537 else if (bus_width == MMC_BUS_WIDTH_1)
538 return 0; /* no need for test */
539 else
540 return -EINVAL;
541
542 /*
543 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
544 * is a problem. This improves chances that the test will work.
545 */
546 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
547 err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
548 return err;
549}
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 653eb8e84178..9276946fa5b7 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -20,12 +20,12 @@ int mmc_all_send_cid(struct mmc_host *host, u32 *cid);
20int mmc_set_relative_addr(struct mmc_card *card); 20int mmc_set_relative_addr(struct mmc_card *card);
21int mmc_send_csd(struct mmc_card *card, u32 *csd); 21int mmc_send_csd(struct mmc_card *card, u32 *csd);
22int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd); 22int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd);
23int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value);
24int mmc_send_status(struct mmc_card *card, u32 *status); 23int mmc_send_status(struct mmc_card *card, u32 *status);
25int mmc_send_cid(struct mmc_host *host, u32 *cid); 24int mmc_send_cid(struct mmc_host *host, u32 *cid);
26int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp); 25int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
27int mmc_spi_set_crc(struct mmc_host *host, int use_crc); 26int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
28int mmc_card_sleepawake(struct mmc_host *host, int sleep); 27int mmc_card_sleepawake(struct mmc_host *host, int sleep);
28int mmc_bus_test(struct mmc_card *card, u8 bus_width);
29 29
30#endif 30#endif
31 31
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
new file mode 100644
index 000000000000..3a596217029e
--- /dev/null
+++ b/drivers/mmc/core/quirks.c
@@ -0,0 +1,79 @@
1/*
2 * This file contains work-arounds for many known SD/MMC
3 * and SDIO hardware bugs.
4 *
5 * Copyright (c) 2011 Andrei Warkentin <andreiw@motorola.com>
6 * Copyright (c) 2011 Pierre Tardy <tardyp@gmail.com>
7 * Inspired from pci fixup code:
8 * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
9 *
10 */
11
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/mmc/card.h>
15
16#ifndef SDIO_VENDOR_ID_TI
17#define SDIO_VENDOR_ID_TI 0x0097
18#endif
19
20#ifndef SDIO_DEVICE_ID_TI_WL1271
21#define SDIO_DEVICE_ID_TI_WL1271 0x4076
22#endif
23
24/*
25 * This hook just adds a quirk for all sdio devices
26 */
27static void add_quirk_for_sdio_devices(struct mmc_card *card, int data)
28{
29 if (mmc_card_sdio(card))
30 card->quirks |= data;
31}
32
33static const struct mmc_fixup mmc_fixup_methods[] = {
34 /* by default sdio devices are considered CLK_GATING broken */
35 /* good cards will be whitelisted as they are tested */
36 SDIO_FIXUP(SDIO_ANY_ID, SDIO_ANY_ID,
37 add_quirk_for_sdio_devices,
38 MMC_QUIRK_BROKEN_CLK_GATING),
39
40 SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
41 remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
42
43 SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
44 add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
45
46 SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
47 add_quirk, MMC_QUIRK_DISABLE_CD),
48
49 END_FIXUP
50};
51
52void mmc_fixup_device(struct mmc_card *card, const struct mmc_fixup *table)
53{
54 const struct mmc_fixup *f;
55 u64 rev = cid_rev_card(card);
56
57 /* Non-core specific workarounds. */
58 if (!table)
59 table = mmc_fixup_methods;
60
61 for (f = table; f->vendor_fixup; f++) {
62 if ((f->manfid == CID_MANFID_ANY ||
63 f->manfid == card->cid.manfid) &&
64 (f->oemid == CID_OEMID_ANY ||
65 f->oemid == card->cid.oemid) &&
66 (f->name == CID_NAME_ANY ||
67 !strncmp(f->name, card->cid.prod_name,
68 sizeof(card->cid.prod_name))) &&
69 (f->cis_vendor == card->cis.vendor ||
70 f->cis_vendor == (u16) SDIO_ANY_ID) &&
71 (f->cis_device == card->cis.device ||
72 f->cis_device == (u16) SDIO_ANY_ID) &&
73 rev >= f->rev_start && rev <= f->rev_end) {
74 dev_dbg(&card->dev, "calling %pF\n", f->vendor_fixup);
75 f->vendor_fixup(card, f->data);
76 }
77 }
78}
79EXPORT_SYMBOL(mmc_fixup_device);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 0f5241085557..ff2774128aa9 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -21,6 +21,7 @@
21#include "core.h" 21#include "core.h"
22#include "bus.h" 22#include "bus.h"
23#include "mmc_ops.h" 23#include "mmc_ops.h"
24#include "sd.h"
24#include "sd_ops.h" 25#include "sd_ops.h"
25 26
26static const unsigned int tran_exp[] = { 27static const unsigned int tran_exp[] = {
@@ -129,7 +130,7 @@ static int mmc_decode_csd(struct mmc_card *card)
129 break; 130 break;
130 case 1: 131 case 1:
131 /* 132 /*
132 * This is a block-addressed SDHC card. Most 133 * This is a block-addressed SDHC or SDXC card. Most
133 * interesting fields are unused and have fixed 134 * interesting fields are unused and have fixed
134 * values. To avoid getting tripped by buggy cards, 135 * values. To avoid getting tripped by buggy cards,
135 * we assume those fixed values ourselves. 136 * we assume those fixed values ourselves.
@@ -143,6 +144,11 @@ static int mmc_decode_csd(struct mmc_card *card)
143 e = UNSTUFF_BITS(resp, 96, 3); 144 e = UNSTUFF_BITS(resp, 96, 3);
144 csd->max_dtr = tran_exp[e] * tran_mant[m]; 145 csd->max_dtr = tran_exp[e] * tran_mant[m];
145 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); 146 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
147 csd->c_size = UNSTUFF_BITS(resp, 48, 22);
148
149 /* SDXC cards have a minimum C_SIZE of 0x00FFFF */
150 if (csd->c_size >= 0xFFFF)
151 mmc_card_set_ext_capacity(card);
146 152
147 m = UNSTUFF_BITS(resp, 48, 22); 153 m = UNSTUFF_BITS(resp, 48, 22);
148 csd->capacity = (1 + m) << 10; 154 csd->capacity = (1 + m) << 10;
@@ -188,12 +194,17 @@ static int mmc_decode_scr(struct mmc_card *card)
188 194
189 scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4); 195 scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4);
190 scr->bus_widths = UNSTUFF_BITS(resp, 48, 4); 196 scr->bus_widths = UNSTUFF_BITS(resp, 48, 4);
197 if (scr->sda_vsn == SCR_SPEC_VER_2)
198 /* Check if Physical Layer Spec v3.0 is supported */
199 scr->sda_spec3 = UNSTUFF_BITS(resp, 47, 1);
191 200
192 if (UNSTUFF_BITS(resp, 55, 1)) 201 if (UNSTUFF_BITS(resp, 55, 1))
193 card->erased_byte = 0xFF; 202 card->erased_byte = 0xFF;
194 else 203 else
195 card->erased_byte = 0x0; 204 card->erased_byte = 0x0;
196 205
206 if (scr->sda_spec3)
207 scr->cmds = UNSTUFF_BITS(resp, 32, 2);
197 return 0; 208 return 0;
198} 209}
199 210
@@ -273,29 +284,74 @@ static int mmc_read_switch(struct mmc_card *card)
273 status = kmalloc(64, GFP_KERNEL); 284 status = kmalloc(64, GFP_KERNEL);
274 if (!status) { 285 if (!status) {
275 printk(KERN_ERR "%s: could not allocate a buffer for " 286 printk(KERN_ERR "%s: could not allocate a buffer for "
276 "switch capabilities.\n", mmc_hostname(card->host)); 287 "switch capabilities.\n",
288 mmc_hostname(card->host));
277 return -ENOMEM; 289 return -ENOMEM;
278 } 290 }
279 291
292 /* Find out the supported Bus Speed Modes. */
280 err = mmc_sd_switch(card, 0, 0, 1, status); 293 err = mmc_sd_switch(card, 0, 0, 1, status);
281 if (err) { 294 if (err) {
282 /* If the host or the card can't do the switch, 295 /*
283 * fail more gracefully. */ 296 * If the host or the card can't do the switch,
284 if ((err != -EINVAL) 297 * fail more gracefully.
285 && (err != -ENOSYS) 298 */
286 && (err != -EFAULT)) 299 if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
287 goto out; 300 goto out;
288 301
289 printk(KERN_WARNING "%s: problem reading switch " 302 printk(KERN_WARNING "%s: problem reading Bus Speed modes.\n",
290 "capabilities, performance might suffer.\n",
291 mmc_hostname(card->host)); 303 mmc_hostname(card->host));
292 err = 0; 304 err = 0;
293 305
294 goto out; 306 goto out;
295 } 307 }
296 308
297 if (status[13] & 0x02) 309 if (card->scr.sda_spec3) {
298 card->sw_caps.hs_max_dtr = 50000000; 310 card->sw_caps.sd3_bus_mode = status[13];
311
312 /* Find out Driver Strengths supported by the card */
313 err = mmc_sd_switch(card, 0, 2, 1, status);
314 if (err) {
315 /*
316 * If the host or the card can't do the switch,
317 * fail more gracefully.
318 */
319 if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
320 goto out;
321
322 printk(KERN_WARNING "%s: problem reading "
323 "Driver Strength.\n",
324 mmc_hostname(card->host));
325 err = 0;
326
327 goto out;
328 }
329
330 card->sw_caps.sd3_drv_type = status[9];
331
332 /* Find out Current Limits supported by the card */
333 err = mmc_sd_switch(card, 0, 3, 1, status);
334 if (err) {
335 /*
336 * If the host or the card can't do the switch,
337 * fail more gracefully.
338 */
339 if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
340 goto out;
341
342 printk(KERN_WARNING "%s: problem reading "
343 "Current Limit.\n",
344 mmc_hostname(card->host));
345 err = 0;
346
347 goto out;
348 }
349
350 card->sw_caps.sd3_curr_limit = status[7];
351 } else {
352 if (status[13] & 0x02)
353 card->sw_caps.hs_max_dtr = 50000000;
354 }
299 355
300out: 356out:
301 kfree(status); 357 kfree(status);
@@ -351,6 +407,232 @@ out:
351 return err; 407 return err;
352} 408}
353 409
410static int sd_select_driver_type(struct mmc_card *card, u8 *status)
411{
412 int host_drv_type = 0, card_drv_type = 0;
413 int err;
414
415 /*
416 * If the host doesn't support any of the Driver Types A,C or D,
417 * default Driver Type B is used.
418 */
419 if (!(card->host->caps & (MMC_CAP_DRIVER_TYPE_A | MMC_CAP_DRIVER_TYPE_C
420 | MMC_CAP_DRIVER_TYPE_D)))
421 return 0;
422
423 if (card->host->caps & MMC_CAP_DRIVER_TYPE_A) {
424 host_drv_type = MMC_SET_DRIVER_TYPE_A;
425 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
426 card_drv_type = MMC_SET_DRIVER_TYPE_A;
427 else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B)
428 card_drv_type = MMC_SET_DRIVER_TYPE_B;
429 else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
430 card_drv_type = MMC_SET_DRIVER_TYPE_C;
431 } else if (card->host->caps & MMC_CAP_DRIVER_TYPE_C) {
432 host_drv_type = MMC_SET_DRIVER_TYPE_C;
433 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
434 card_drv_type = MMC_SET_DRIVER_TYPE_C;
435 } else if (!(card->host->caps & MMC_CAP_DRIVER_TYPE_D)) {
436 /*
437 * If we are here, that means only the default driver type
438 * B is supported by the host.
439 */
440 host_drv_type = MMC_SET_DRIVER_TYPE_B;
441 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B)
442 card_drv_type = MMC_SET_DRIVER_TYPE_B;
443 else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
444 card_drv_type = MMC_SET_DRIVER_TYPE_C;
445 }
446
447 err = mmc_sd_switch(card, 1, 2, card_drv_type, status);
448 if (err)
449 return err;
450
451 if ((status[15] & 0xF) != card_drv_type) {
452 printk(KERN_WARNING "%s: Problem setting driver strength!\n",
453 mmc_hostname(card->host));
454 return 0;
455 }
456
457 mmc_set_driver_type(card->host, host_drv_type);
458
459 return 0;
460}
461
462static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
463{
464 unsigned int bus_speed = 0, timing = 0;
465 int err;
466
467 /*
468 * If the host doesn't support any of the UHS-I modes, fallback on
469 * default speed.
470 */
471 if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
472 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
473 return 0;
474
475 if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
476 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
477 bus_speed = UHS_SDR104_BUS_SPEED;
478 timing = MMC_TIMING_UHS_SDR104;
479 card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
480 } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
481 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
482 bus_speed = UHS_DDR50_BUS_SPEED;
483 timing = MMC_TIMING_UHS_DDR50;
484 card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
485 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
486 MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
487 SD_MODE_UHS_SDR50)) {
488 bus_speed = UHS_SDR50_BUS_SPEED;
489 timing = MMC_TIMING_UHS_SDR50;
490 card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
491 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
492 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
493 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
494 bus_speed = UHS_SDR25_BUS_SPEED;
495 timing = MMC_TIMING_UHS_SDR25;
496 card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
497 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
498 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
499 MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
500 SD_MODE_UHS_SDR12)) {
501 bus_speed = UHS_SDR12_BUS_SPEED;
502 timing = MMC_TIMING_UHS_SDR12;
503 card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
504 }
505
506 card->sd_bus_speed = bus_speed;
507 err = mmc_sd_switch(card, 1, 0, bus_speed, status);
508 if (err)
509 return err;
510
511 if ((status[16] & 0xF) != bus_speed)
512 printk(KERN_WARNING "%s: Problem setting bus speed mode!\n",
513 mmc_hostname(card->host));
514 else {
515 mmc_set_timing(card->host, timing);
516 mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
517 }
518
519 return 0;
520}
521
522static int sd_set_current_limit(struct mmc_card *card, u8 *status)
523{
524 int current_limit = 0;
525 int err;
526
527 /*
528 * Current limit switch is only defined for SDR50, SDR104, and DDR50
529 * bus speed modes. For other bus speed modes, we set the default
530 * current limit of 200mA.
531 */
532 if ((card->sd_bus_speed == UHS_SDR50_BUS_SPEED) ||
533 (card->sd_bus_speed == UHS_SDR104_BUS_SPEED) ||
534 (card->sd_bus_speed == UHS_DDR50_BUS_SPEED)) {
535 if (card->host->caps & MMC_CAP_MAX_CURRENT_800) {
536 if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_800)
537 current_limit = SD_SET_CURRENT_LIMIT_800;
538 else if (card->sw_caps.sd3_curr_limit &
539 SD_MAX_CURRENT_600)
540 current_limit = SD_SET_CURRENT_LIMIT_600;
541 else if (card->sw_caps.sd3_curr_limit &
542 SD_MAX_CURRENT_400)
543 current_limit = SD_SET_CURRENT_LIMIT_400;
544 else if (card->sw_caps.sd3_curr_limit &
545 SD_MAX_CURRENT_200)
546 current_limit = SD_SET_CURRENT_LIMIT_200;
547 } else if (card->host->caps & MMC_CAP_MAX_CURRENT_600) {
548 if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_600)
549 current_limit = SD_SET_CURRENT_LIMIT_600;
550 else if (card->sw_caps.sd3_curr_limit &
551 SD_MAX_CURRENT_400)
552 current_limit = SD_SET_CURRENT_LIMIT_400;
553 else if (card->sw_caps.sd3_curr_limit &
554 SD_MAX_CURRENT_200)
555 current_limit = SD_SET_CURRENT_LIMIT_200;
556 } else if (card->host->caps & MMC_CAP_MAX_CURRENT_400) {
557 if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_400)
558 current_limit = SD_SET_CURRENT_LIMIT_400;
559 else if (card->sw_caps.sd3_curr_limit &
560 SD_MAX_CURRENT_200)
561 current_limit = SD_SET_CURRENT_LIMIT_200;
562 } else if (card->host->caps & MMC_CAP_MAX_CURRENT_200) {
563 if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_200)
564 current_limit = SD_SET_CURRENT_LIMIT_200;
565 }
566 } else
567 current_limit = SD_SET_CURRENT_LIMIT_200;
568
569 err = mmc_sd_switch(card, 1, 3, current_limit, status);
570 if (err)
571 return err;
572
573 if (((status[15] >> 4) & 0x0F) != current_limit)
574 printk(KERN_WARNING "%s: Problem setting current limit!\n",
575 mmc_hostname(card->host));
576
577 return 0;
578}
579
580/*
581 * UHS-I specific initialization procedure
582 */
583static int mmc_sd_init_uhs_card(struct mmc_card *card)
584{
585 int err;
586 u8 *status;
587
588 if (!card->scr.sda_spec3)
589 return 0;
590
591 if (!(card->csd.cmdclass & CCC_SWITCH))
592 return 0;
593
594 status = kmalloc(64, GFP_KERNEL);
595 if (!status) {
596 printk(KERN_ERR "%s: could not allocate a buffer for "
597 "switch capabilities.\n", mmc_hostname(card->host));
598 return -ENOMEM;
599 }
600
601 /* Set 4-bit bus width */
602 if ((card->host->caps & MMC_CAP_4_BIT_DATA) &&
603 (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
604 err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
605 if (err)
606 goto out;
607
608 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
609 }
610
611 /* Set the driver strength for the card */
612 err = sd_select_driver_type(card, status);
613 if (err)
614 goto out;
615
616 /* Set bus speed mode of the card */
617 err = sd_set_bus_speed_mode(card, status);
618 if (err)
619 goto out;
620
621 /* Set current limit for the card */
622 err = sd_set_current_limit(card, status);
623 if (err)
624 goto out;
625
626 /* SPI mode doesn't define CMD19 */
627 if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning)
628 err = card->host->ops->execute_tuning(card->host);
629
630out:
631 kfree(status);
632
633 return err;
634}
635
354MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1], 636MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
355 card->raw_cid[2], card->raw_cid[3]); 637 card->raw_cid[2], card->raw_cid[3]);
356MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1], 638MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
@@ -399,7 +681,7 @@ struct device_type sd_type = {
399/* 681/*
400 * Fetch CID from card. 682 * Fetch CID from card.
401 */ 683 */
402int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid) 684int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
403{ 685{
404 int err; 686 int err;
405 687
@@ -419,12 +701,39 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid)
419 */ 701 */
420 err = mmc_send_if_cond(host, ocr); 702 err = mmc_send_if_cond(host, ocr);
421 if (!err) 703 if (!err)
422 ocr |= 1 << 30; 704 ocr |= SD_OCR_CCS;
705
706 /*
707 * If the host supports one of UHS-I modes, request the card
708 * to switch to 1.8V signaling level.
709 */
710 if (host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
711 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))
712 ocr |= SD_OCR_S18R;
713
714 /* If the host can supply more than 150mA, XPC should be set to 1. */
715 if (host->caps & (MMC_CAP_SET_XPC_330 | MMC_CAP_SET_XPC_300 |
716 MMC_CAP_SET_XPC_180))
717 ocr |= SD_OCR_XPC;
423 718
424 err = mmc_send_app_op_cond(host, ocr, NULL); 719try_again:
720 err = mmc_send_app_op_cond(host, ocr, rocr);
425 if (err) 721 if (err)
426 return err; 722 return err;
427 723
724 /*
725 * In case CCS and S18A in the response is set, start Signal Voltage
726 * Switch procedure. SPI mode doesn't support CMD11.
727 */
728 if (!mmc_host_is_spi(host) && rocr &&
729 ((*rocr & 0x41000000) == 0x41000000)) {
730 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, true);
731 if (err) {
732 ocr &= ~SD_OCR_S18R;
733 goto try_again;
734 }
735 }
736
428 if (mmc_host_is_spi(host)) 737 if (mmc_host_is_spi(host))
429 err = mmc_send_cid(host, cid); 738 err = mmc_send_cid(host, cid);
430 else 739 else
@@ -552,11 +861,12 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
552 struct mmc_card *card; 861 struct mmc_card *card;
553 int err; 862 int err;
554 u32 cid[4]; 863 u32 cid[4];
864 u32 rocr = 0;
555 865
556 BUG_ON(!host); 866 BUG_ON(!host);
557 WARN_ON(!host->claimed); 867 WARN_ON(!host->claimed);
558 868
559 err = mmc_sd_get_cid(host, ocr, cid); 869 err = mmc_sd_get_cid(host, ocr, cid, &rocr);
560 if (err) 870 if (err)
561 return err; 871 return err;
562 872
@@ -609,30 +919,47 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
609 if (err) 919 if (err)
610 goto free_card; 920 goto free_card;
611 921
612 /* 922 /* Initialization sequence for UHS-I cards */
613 * Attempt to change to high-speed (if supported) 923 if (rocr & SD_ROCR_S18A) {
614 */ 924 err = mmc_sd_init_uhs_card(card);
615 err = mmc_sd_switch_hs(card); 925 if (err)
616 if (err > 0) 926 goto free_card;
617 mmc_sd_go_highspeed(card);
618 else if (err)
619 goto free_card;
620 927
621 /* 928 /* Card is an ultra-high-speed card */
622 * Set bus speed. 929 mmc_sd_card_set_uhs(card);
623 */
624 mmc_set_clock(host, mmc_sd_get_max_clock(card));
625 930
626 /* 931 /*
627 * Switch to wider bus (if supported). 932 * Since initialization is now complete, enable preset
628 */ 933 * value registers for UHS-I cards.
629 if ((host->caps & MMC_CAP_4_BIT_DATA) && 934 */
630 (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) { 935 if (host->ops->enable_preset_value)
631 err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4); 936 host->ops->enable_preset_value(host, true);
632 if (err) 937 } else {
938 /*
939 * Attempt to change to high-speed (if supported)
940 */
941 err = mmc_sd_switch_hs(card);
942 if (err > 0)
943 mmc_sd_go_highspeed(card);
944 else if (err)
633 goto free_card; 945 goto free_card;
634 946
635 mmc_set_bus_width(host, MMC_BUS_WIDTH_4); 947 /*
948 * Set bus speed.
949 */
950 mmc_set_clock(host, mmc_sd_get_max_clock(card));
951
952 /*
953 * Switch to wider bus (if supported).
954 */
955 if ((host->caps & MMC_CAP_4_BIT_DATA) &&
956 (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
957 err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
958 if (err)
959 goto free_card;
960
961 mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
962 }
636 } 963 }
637 964
638 host->card = card; 965 host->card = card;
@@ -722,12 +1049,16 @@ static int mmc_sd_resume(struct mmc_host *host)
722 return err; 1049 return err;
723} 1050}
724 1051
725static void mmc_sd_power_restore(struct mmc_host *host) 1052static int mmc_sd_power_restore(struct mmc_host *host)
726{ 1053{
1054 int ret;
1055
727 host->card->state &= ~MMC_STATE_HIGHSPEED; 1056 host->card->state &= ~MMC_STATE_HIGHSPEED;
728 mmc_claim_host(host); 1057 mmc_claim_host(host);
729 mmc_sd_init_card(host, host->ocr, host->card); 1058 ret = mmc_sd_init_card(host, host->ocr, host->card);
730 mmc_release_host(host); 1059 mmc_release_host(host);
1060
1061 return ret;
731} 1062}
732 1063
733static const struct mmc_bus_ops mmc_sd_ops = { 1064static const struct mmc_bus_ops mmc_sd_ops = {
@@ -750,7 +1081,7 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host)
750{ 1081{
751 const struct mmc_bus_ops *bus_ops; 1082 const struct mmc_bus_ops *bus_ops;
752 1083
753 if (host->caps & MMC_CAP_NONREMOVABLE || !mmc_assume_removable) 1084 if (!mmc_card_is_removable(host))
754 bus_ops = &mmc_sd_ops_unsafe; 1085 bus_ops = &mmc_sd_ops_unsafe;
755 else 1086 else
756 bus_ops = &mmc_sd_ops; 1087 bus_ops = &mmc_sd_ops;
@@ -760,14 +1091,30 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host)
760/* 1091/*
761 * Starting point for SD card init. 1092 * Starting point for SD card init.
762 */ 1093 */
763int mmc_attach_sd(struct mmc_host *host, u32 ocr) 1094int mmc_attach_sd(struct mmc_host *host)
764{ 1095{
765 int err; 1096 int err;
1097 u32 ocr;
766 1098
767 BUG_ON(!host); 1099 BUG_ON(!host);
768 WARN_ON(!host->claimed); 1100 WARN_ON(!host->claimed);
769 1101
1102 /* Make sure we are at 3.3V signalling voltage */
1103 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false);
1104 if (err)
1105 return err;
1106
1107 /* Disable preset value enable if already set since last time */
1108 if (host->ops->enable_preset_value)
1109 host->ops->enable_preset_value(host, false);
1110
1111 err = mmc_send_app_op_cond(host, 0, &ocr);
1112 if (err)
1113 return err;
1114
770 mmc_sd_attach_bus_ops(host); 1115 mmc_sd_attach_bus_ops(host);
1116 if (host->ocr_avail_sd)
1117 host->ocr_avail = host->ocr_avail_sd;
771 1118
772 /* 1119 /*
773 * We need to get OCR a different way for SPI. 1120 * We need to get OCR a different way for SPI.
@@ -791,7 +1138,8 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr)
791 ocr &= ~0x7F; 1138 ocr &= ~0x7F;
792 } 1139 }
793 1140
794 if (ocr & MMC_VDD_165_195) { 1141 if ((ocr & MMC_VDD_165_195) &&
1142 !(host->ocr_avail_sd & MMC_VDD_165_195)) {
795 printk(KERN_WARNING "%s: SD card claims to support the " 1143 printk(KERN_WARNING "%s: SD card claims to support the "
796 "incompletely defined 'low voltage range'. This " 1144 "incompletely defined 'low voltage range'. This "
797 "will be ignored.\n", mmc_hostname(host)); 1145 "will be ignored.\n", mmc_hostname(host));
@@ -816,20 +1164,20 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr)
816 goto err; 1164 goto err;
817 1165
818 mmc_release_host(host); 1166 mmc_release_host(host);
819
820 err = mmc_add_card(host->card); 1167 err = mmc_add_card(host->card);
1168 mmc_claim_host(host);
821 if (err) 1169 if (err)
822 goto remove_card; 1170 goto remove_card;
823 1171
824 return 0; 1172 return 0;
825 1173
826remove_card: 1174remove_card:
1175 mmc_release_host(host);
827 mmc_remove_card(host->card); 1176 mmc_remove_card(host->card);
828 host->card = NULL; 1177 host->card = NULL;
829 mmc_claim_host(host); 1178 mmc_claim_host(host);
830err: 1179err:
831 mmc_detach_bus(host); 1180 mmc_detach_bus(host);
832 mmc_release_host(host);
833 1181
834 printk(KERN_ERR "%s: error %d whilst initialising SD card\n", 1182 printk(KERN_ERR "%s: error %d whilst initialising SD card\n",
835 mmc_hostname(host), err); 1183 mmc_hostname(host), err);
diff --git a/drivers/mmc/core/sd.h b/drivers/mmc/core/sd.h
index 3d8800fa7600..4b34b24f3f76 100644
--- a/drivers/mmc/core/sd.h
+++ b/drivers/mmc/core/sd.h
@@ -5,7 +5,7 @@
5 5
6extern struct device_type sd_type; 6extern struct device_type sd_type;
7 7
8int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid); 8int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr);
9int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card); 9int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card);
10void mmc_decode_cid(struct mmc_card *card); 10void mmc_decode_cid(struct mmc_card *card);
11int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, 11int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 797cdb5887fd..021fed153804 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -9,6 +9,7 @@
9 * your option) any later version. 9 * your option) any later version.
10 */ 10 */
11 11
12#include <linux/slab.h>
12#include <linux/types.h> 13#include <linux/types.h>
13#include <linux/scatterlist.h> 14#include <linux/scatterlist.h>
14 15
@@ -20,10 +21,10 @@
20#include "core.h" 21#include "core.h"
21#include "sd_ops.h" 22#include "sd_ops.h"
22 23
23static int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card) 24int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
24{ 25{
25 int err; 26 int err;
26 struct mmc_command cmd; 27 struct mmc_command cmd = {0};
27 28
28 BUG_ON(!host); 29 BUG_ON(!host);
29 BUG_ON(card && (card->host != host)); 30 BUG_ON(card && (card->host != host));
@@ -48,6 +49,7 @@ static int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
48 49
49 return 0; 50 return 0;
50} 51}
52EXPORT_SYMBOL_GPL(mmc_app_cmd);
51 53
52/** 54/**
53 * mmc_wait_for_app_cmd - start an application command and wait for 55 * mmc_wait_for_app_cmd - start an application command and wait for
@@ -65,7 +67,7 @@ static int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
65int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, 67int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
66 struct mmc_command *cmd, int retries) 68 struct mmc_command *cmd, int retries)
67{ 69{
68 struct mmc_request mrq; 70 struct mmc_request mrq = {0};
69 71
70 int i, err; 72 int i, err;
71 73
@@ -118,13 +120,11 @@ EXPORT_SYMBOL(mmc_wait_for_app_cmd);
118int mmc_app_set_bus_width(struct mmc_card *card, int width) 120int mmc_app_set_bus_width(struct mmc_card *card, int width)
119{ 121{
120 int err; 122 int err;
121 struct mmc_command cmd; 123 struct mmc_command cmd = {0};
122 124
123 BUG_ON(!card); 125 BUG_ON(!card);
124 BUG_ON(!card->host); 126 BUG_ON(!card->host);
125 127
126 memset(&cmd, 0, sizeof(struct mmc_command));
127
128 cmd.opcode = SD_APP_SET_BUS_WIDTH; 128 cmd.opcode = SD_APP_SET_BUS_WIDTH;
129 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 129 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
130 130
@@ -148,13 +148,11 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width)
148 148
149int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 149int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
150{ 150{
151 struct mmc_command cmd; 151 struct mmc_command cmd = {0};
152 int i, err = 0; 152 int i, err = 0;
153 153
154 BUG_ON(!host); 154 BUG_ON(!host);
155 155
156 memset(&cmd, 0, sizeof(struct mmc_command));
157
158 cmd.opcode = SD_APP_OP_COND; 156 cmd.opcode = SD_APP_OP_COND;
159 if (mmc_host_is_spi(host)) 157 if (mmc_host_is_spi(host))
160 cmd.arg = ocr & (1 << 30); /* SPI only defines one bit */ 158 cmd.arg = ocr & (1 << 30); /* SPI only defines one bit */
@@ -193,7 +191,7 @@ int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
193 191
194int mmc_send_if_cond(struct mmc_host *host, u32 ocr) 192int mmc_send_if_cond(struct mmc_host *host, u32 ocr)
195{ 193{
196 struct mmc_command cmd; 194 struct mmc_command cmd = {0};
197 int err; 195 int err;
198 static const u8 test_pattern = 0xAA; 196 static const u8 test_pattern = 0xAA;
199 u8 result_pattern; 197 u8 result_pattern;
@@ -225,13 +223,11 @@ int mmc_send_if_cond(struct mmc_host *host, u32 ocr)
225int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca) 223int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
226{ 224{
227 int err; 225 int err;
228 struct mmc_command cmd; 226 struct mmc_command cmd = {0};
229 227
230 BUG_ON(!host); 228 BUG_ON(!host);
231 BUG_ON(!rca); 229 BUG_ON(!rca);
232 230
233 memset(&cmd, 0, sizeof(struct mmc_command));
234
235 cmd.opcode = SD_SEND_RELATIVE_ADDR; 231 cmd.opcode = SD_SEND_RELATIVE_ADDR;
236 cmd.arg = 0; 232 cmd.arg = 0;
237 cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR; 233 cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR;
@@ -248,10 +244,11 @@ int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
248int mmc_app_send_scr(struct mmc_card *card, u32 *scr) 244int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
249{ 245{
250 int err; 246 int err;
251 struct mmc_request mrq; 247 struct mmc_request mrq = {0};
252 struct mmc_command cmd; 248 struct mmc_command cmd = {0};
253 struct mmc_data data; 249 struct mmc_data data = {0};
254 struct scatterlist sg; 250 struct scatterlist sg;
251 void *data_buf;
255 252
256 BUG_ON(!card); 253 BUG_ON(!card);
257 BUG_ON(!card->host); 254 BUG_ON(!card->host);
@@ -263,9 +260,12 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
263 if (err) 260 if (err)
264 return err; 261 return err;
265 262
266 memset(&mrq, 0, sizeof(struct mmc_request)); 263 /* dma onto stack is unsafe/nonportable, but callers to this
267 memset(&cmd, 0, sizeof(struct mmc_command)); 264 * routine normally provide temporary on-stack buffers ...
268 memset(&data, 0, sizeof(struct mmc_data)); 265 */
266 data_buf = kmalloc(sizeof(card->raw_scr), GFP_KERNEL);
267 if (data_buf == NULL)
268 return -ENOMEM;
269 269
270 mrq.cmd = &cmd; 270 mrq.cmd = &cmd;
271 mrq.data = &data; 271 mrq.data = &data;
@@ -280,12 +280,15 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
280 data.sg = &sg; 280 data.sg = &sg;
281 data.sg_len = 1; 281 data.sg_len = 1;
282 282
283 sg_init_one(&sg, scr, 8); 283 sg_init_one(&sg, data_buf, 8);
284 284
285 mmc_set_data_timeout(&data, card); 285 mmc_set_data_timeout(&data, card);
286 286
287 mmc_wait_for_req(card->host, &mrq); 287 mmc_wait_for_req(card->host, &mrq);
288 288
289 memcpy(scr, data_buf, sizeof(card->raw_scr));
290 kfree(data_buf);
291
289 if (cmd.error) 292 if (cmd.error)
290 return cmd.error; 293 return cmd.error;
291 if (data.error) 294 if (data.error)
@@ -300,9 +303,9 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
300int mmc_sd_switch(struct mmc_card *card, int mode, int group, 303int mmc_sd_switch(struct mmc_card *card, int mode, int group,
301 u8 value, u8 *resp) 304 u8 value, u8 *resp)
302{ 305{
303 struct mmc_request mrq; 306 struct mmc_request mrq = {0};
304 struct mmc_command cmd; 307 struct mmc_command cmd = {0};
305 struct mmc_data data; 308 struct mmc_data data = {0};
306 struct scatterlist sg; 309 struct scatterlist sg;
307 310
308 BUG_ON(!card); 311 BUG_ON(!card);
@@ -313,10 +316,6 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group,
313 mode = !!mode; 316 mode = !!mode;
314 value &= 0xF; 317 value &= 0xF;
315 318
316 memset(&mrq, 0, sizeof(struct mmc_request));
317 memset(&cmd, 0, sizeof(struct mmc_command));
318 memset(&data, 0, sizeof(struct mmc_data));
319
320 mrq.cmd = &cmd; 319 mrq.cmd = &cmd;
321 mrq.data = &data; 320 mrq.data = &data;
322 321
@@ -349,9 +348,9 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group,
349int mmc_app_sd_status(struct mmc_card *card, void *ssr) 348int mmc_app_sd_status(struct mmc_card *card, void *ssr)
350{ 349{
351 int err; 350 int err;
352 struct mmc_request mrq; 351 struct mmc_request mrq = {0};
353 struct mmc_command cmd; 352 struct mmc_command cmd = {0};
354 struct mmc_data data; 353 struct mmc_data data = {0};
355 struct scatterlist sg; 354 struct scatterlist sg;
356 355
357 BUG_ON(!card); 356 BUG_ON(!card);
@@ -364,10 +363,6 @@ int mmc_app_sd_status(struct mmc_card *card, void *ssr)
364 if (err) 363 if (err)
365 return err; 364 return err;
366 365
367 memset(&mrq, 0, sizeof(struct mmc_request));
368 memset(&cmd, 0, sizeof(struct mmc_command));
369 memset(&data, 0, sizeof(struct mmc_data));
370
371 mrq.cmd = &cmd; 366 mrq.cmd = &cmd;
372 mrq.data = &data; 367 mrq.data = &data;
373 368
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index f332c52968b7..262fff019177 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -10,11 +10,13 @@
10 */ 10 */
11 11
12#include <linux/err.h> 12#include <linux/err.h>
13#include <linux/pm_runtime.h>
13 14
14#include <linux/mmc/host.h> 15#include <linux/mmc/host.h>
15#include <linux/mmc/card.h> 16#include <linux/mmc/card.h>
16#include <linux/mmc/sdio.h> 17#include <linux/mmc/sdio.h>
17#include <linux/mmc/sdio_func.h> 18#include <linux/mmc/sdio_func.h>
19#include <linux/mmc/sdio_ids.h>
18 20
19#include "core.h" 21#include "core.h"
20#include "bus.h" 22#include "bus.h"
@@ -30,6 +32,11 @@ static int sdio_read_fbr(struct sdio_func *func)
30 int ret; 32 int ret;
31 unsigned char data; 33 unsigned char data;
32 34
35 if (mmc_card_nonstd_func_interface(func->card)) {
36 func->class = SDIO_CLASS_NONE;
37 return 0;
38 }
39
33 ret = mmc_io_rw_direct(func->card, 0, 0, 40 ret = mmc_io_rw_direct(func->card, 0, 0,
34 SDIO_FBR_BASE(func->num) + SDIO_FBR_STD_IF, 0, &data); 41 SDIO_FBR_BASE(func->num) + SDIO_FBR_STD_IF, 0, &data);
35 if (ret) 42 if (ret)
@@ -180,7 +187,7 @@ static int sdio_disable_cd(struct mmc_card *card)
180 int ret; 187 int ret;
181 u8 ctrl; 188 u8 ctrl;
182 189
183 if (!card->cccr.disable_cd) 190 if (!mmc_card_disable_cd(card))
184 return 0; 191 return 0;
185 192
186 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl); 193 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl);
@@ -362,8 +369,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
362 goto err; 369 goto err;
363 } 370 }
364 371
365 if (ocr & R4_MEMORY_PRESENT 372 if ((ocr & R4_MEMORY_PRESENT) &&
366 && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) { 373 mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid, NULL) == 0) {
367 card->type = MMC_TYPE_SD_COMBO; 374 card->type = MMC_TYPE_SD_COMBO;
368 375
369 if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || 376 if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
@@ -394,6 +401,14 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
394 if (err) 401 if (err)
395 goto remove; 402 goto remove;
396 403
404 /*
405 * Update oldcard with the new RCA received from the SDIO
406 * device -- we're doing this so that it's updated in the
407 * "card" struct when oldcard overwrites that later.
408 */
409 if (oldcard)
410 oldcard->rca = card->rca;
411
397 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); 412 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
398 } 413 }
399 414
@@ -456,8 +471,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
456 return -ENOENT; 471 return -ENOENT;
457 472
458 card = oldcard; 473 card = oldcard;
459 return 0;
460 } 474 }
475 mmc_fixup_device(card, NULL);
461 476
462 if (card->type == MMC_TYPE_SD_COMBO) { 477 if (card->type == MMC_TYPE_SD_COMBO) {
463 err = mmc_sd_setup_card(host, card, oldcard != NULL); 478 err = mmc_sd_setup_card(host, card, oldcard != NULL);
@@ -546,6 +561,13 @@ static void mmc_sdio_detect(struct mmc_host *host)
546 BUG_ON(!host); 561 BUG_ON(!host);
547 BUG_ON(!host->card); 562 BUG_ON(!host->card);
548 563
564 /* Make sure card is powered before detecting it */
565 if (host->caps & MMC_CAP_POWER_OFF_CARD) {
566 err = pm_runtime_get_sync(&host->card->dev);
567 if (err < 0)
568 goto out;
569 }
570
549 mmc_claim_host(host); 571 mmc_claim_host(host);
550 572
551 /* 573 /*
@@ -555,6 +577,21 @@ static void mmc_sdio_detect(struct mmc_host *host)
555 577
556 mmc_release_host(host); 578 mmc_release_host(host);
557 579
580 /*
581 * Tell PM core it's OK to power off the card now.
582 *
583 * The _sync variant is used in order to ensure that the card
584 * is left powered off in case an error occurred, and the card
585 * is going to be removed.
586 *
587 * Since there is no specific reason to believe a new user
588 * is about to show up at this point, the _sync variant is
589 * desirable anyway.
590 */
591 if (host->caps & MMC_CAP_POWER_OFF_CARD)
592 pm_runtime_put_sync(&host->card->dev);
593
594out:
558 if (err) { 595 if (err) {
559 mmc_sdio_remove(host); 596 mmc_sdio_remove(host);
560 597
@@ -594,7 +631,7 @@ static int mmc_sdio_suspend(struct mmc_host *host)
594 } 631 }
595 } 632 }
596 633
597 if (!err && host->pm_flags & MMC_PM_KEEP_POWER) { 634 if (!err && mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
598 mmc_claim_host(host); 635 mmc_claim_host(host);
599 sdio_disable_wide(host->card); 636 sdio_disable_wide(host->card);
600 mmc_release_host(host); 637 mmc_release_host(host);
@@ -605,23 +642,27 @@ static int mmc_sdio_suspend(struct mmc_host *host)
605 642
606static int mmc_sdio_resume(struct mmc_host *host) 643static int mmc_sdio_resume(struct mmc_host *host)
607{ 644{
608 int i, err; 645 int i, err = 0;
609 646
610 BUG_ON(!host); 647 BUG_ON(!host);
611 BUG_ON(!host->card); 648 BUG_ON(!host->card);
612 649
613 /* Basic card reinitialization. */ 650 /* Basic card reinitialization. */
614 mmc_claim_host(host); 651 mmc_claim_host(host);
615 err = mmc_sdio_init_card(host, host->ocr, host->card, 652
616 (host->pm_flags & MMC_PM_KEEP_POWER)); 653 /* No need to reinitialize powered-resumed nonremovable cards */
617 if (!err) { 654 if (mmc_card_is_removable(host) || !mmc_card_keep_power(host))
618 /* We may have switched to 1-bit mode during suspend. */ 655 err = mmc_sdio_init_card(host, host->ocr, host->card,
656 mmc_card_keep_power(host));
657 else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
658 /* We may have switched to 1-bit mode during suspend */
619 err = sdio_enable_4bit_bus(host->card); 659 err = sdio_enable_4bit_bus(host->card);
620 if (err > 0) { 660 if (err > 0) {
621 mmc_set_bus_width(host, MMC_BUS_WIDTH_4); 661 mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
622 err = 0; 662 err = 0;
623 } 663 }
624 } 664 }
665
625 if (!err && host->sdio_irqs) 666 if (!err && host->sdio_irqs)
626 mmc_signal_sdio_irq(host); 667 mmc_signal_sdio_irq(host);
627 mmc_release_host(host); 668 mmc_release_host(host);
@@ -647,27 +688,90 @@ static int mmc_sdio_resume(struct mmc_host *host)
647 return err; 688 return err;
648} 689}
649 690
691static int mmc_sdio_power_restore(struct mmc_host *host)
692{
693 int ret;
694 u32 ocr;
695
696 BUG_ON(!host);
697 BUG_ON(!host->card);
698
699 mmc_claim_host(host);
700
701 /*
702 * Reset the card by performing the same steps that are taken by
703 * mmc_rescan_try_freq() and mmc_attach_sdio() during a "normal" probe.
704 *
705 * sdio_reset() is technically not needed. Having just powered up the
706 * hardware, it should already be in reset state. However, some
707 * platforms (such as SD8686 on OLPC) do not instantly cut power,
708 * meaning that a reset is required when restoring power soon after
709 * powering off. It is harmless in other cases.
710 *
711 * The CMD5 reset (mmc_send_io_op_cond()), according to the SDIO spec,
712 * is not necessary for non-removable cards. However, it is required
713 * for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and
714 * harmless in other situations.
715 *
716 * With these steps taken, mmc_select_voltage() is also required to
717 * restore the correct voltage setting of the card.
718 */
719 sdio_reset(host);
720 mmc_go_idle(host);
721 mmc_send_if_cond(host, host->ocr_avail);
722
723 ret = mmc_send_io_op_cond(host, 0, &ocr);
724 if (ret)
725 goto out;
726
727 if (host->ocr_avail_sdio)
728 host->ocr_avail = host->ocr_avail_sdio;
729
730 host->ocr = mmc_select_voltage(host, ocr & ~0x7F);
731 if (!host->ocr) {
732 ret = -EINVAL;
733 goto out;
734 }
735
736 ret = mmc_sdio_init_card(host, host->ocr, host->card,
737 mmc_card_keep_power(host));
738 if (!ret && host->sdio_irqs)
739 mmc_signal_sdio_irq(host);
740
741out:
742 mmc_release_host(host);
743
744 return ret;
745}
746
650static const struct mmc_bus_ops mmc_sdio_ops = { 747static const struct mmc_bus_ops mmc_sdio_ops = {
651 .remove = mmc_sdio_remove, 748 .remove = mmc_sdio_remove,
652 .detect = mmc_sdio_detect, 749 .detect = mmc_sdio_detect,
653 .suspend = mmc_sdio_suspend, 750 .suspend = mmc_sdio_suspend,
654 .resume = mmc_sdio_resume, 751 .resume = mmc_sdio_resume,
752 .power_restore = mmc_sdio_power_restore,
655}; 753};
656 754
657 755
658/* 756/*
659 * Starting point for SDIO card init. 757 * Starting point for SDIO card init.
660 */ 758 */
661int mmc_attach_sdio(struct mmc_host *host, u32 ocr) 759int mmc_attach_sdio(struct mmc_host *host)
662{ 760{
663 int err; 761 int err, i, funcs;
664 int i, funcs; 762 u32 ocr;
665 struct mmc_card *card; 763 struct mmc_card *card;
666 764
667 BUG_ON(!host); 765 BUG_ON(!host);
668 WARN_ON(!host->claimed); 766 WARN_ON(!host->claimed);
669 767
768 err = mmc_send_io_op_cond(host, 0, &ocr);
769 if (err)
770 return err;
771
670 mmc_attach_bus(host, &mmc_sdio_ops); 772 mmc_attach_bus(host, &mmc_sdio_ops);
773 if (host->ocr_avail_sdio)
774 host->ocr_avail = host->ocr_avail_sdio;
671 775
672 /* 776 /*
673 * Sanity check the voltages that the card claims to 777 * Sanity check the voltages that the card claims to
@@ -699,6 +803,23 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
699 card = host->card; 803 card = host->card;
700 804
701 /* 805 /*
806 * Enable runtime PM only if supported by host+card+board
807 */
808 if (host->caps & MMC_CAP_POWER_OFF_CARD) {
809 /*
810 * Let runtime PM core know our card is active
811 */
812 err = pm_runtime_set_active(&card->dev);
813 if (err)
814 goto remove;
815
816 /*
817 * Enable runtime PM for this card
818 */
819 pm_runtime_enable(&card->dev);
820 }
821
822 /*
702 * The number of functions on the card is encoded inside 823 * The number of functions on the card is encoded inside
703 * the ocr. 824 * the ocr.
704 */ 825 */
@@ -712,13 +833,18 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
712 err = sdio_init_func(host->card, i + 1); 833 err = sdio_init_func(host->card, i + 1);
713 if (err) 834 if (err)
714 goto remove; 835 goto remove;
715 }
716 836
717 mmc_release_host(host); 837 /*
838 * Enable Runtime PM for this func (if supported)
839 */
840 if (host->caps & MMC_CAP_POWER_OFF_CARD)
841 pm_runtime_enable(&card->sdio_func[i]->dev);
842 }
718 843
719 /* 844 /*
720 * First add the card to the driver model... 845 * First add the card to the driver model...
721 */ 846 */
847 mmc_release_host(host);
722 err = mmc_add_card(host->card); 848 err = mmc_add_card(host->card);
723 if (err) 849 if (err)
724 goto remove_added; 850 goto remove_added;
@@ -732,6 +858,7 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
732 goto remove_added; 858 goto remove_added;
733 } 859 }
734 860
861 mmc_claim_host(host);
735 return 0; 862 return 0;
736 863
737 864
@@ -741,11 +868,12 @@ remove_added:
741 mmc_claim_host(host); 868 mmc_claim_host(host);
742remove: 869remove:
743 /* And with lock if it hasn't been added. */ 870 /* And with lock if it hasn't been added. */
871 mmc_release_host(host);
744 if (host->card) 872 if (host->card)
745 mmc_sdio_remove(host); 873 mmc_sdio_remove(host);
874 mmc_claim_host(host);
746err: 875err:
747 mmc_detach_bus(host); 876 mmc_detach_bus(host);
748 mmc_release_host(host);
749 877
750 printk(KERN_ERR "%s: error %d whilst initialising SDIO card\n", 878 printk(KERN_ERR "%s: error %d whilst initialising SDIO card\n",
751 mmc_hostname(host), err); 879 mmc_hostname(host), err);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 4a890dcb95ab..d2565df8a7fb 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -14,8 +14,10 @@
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/pm_runtime.h>
17 18
18#include <linux/mmc/card.h> 19#include <linux/mmc/card.h>
20#include <linux/mmc/host.h>
19#include <linux/mmc/sdio_func.h> 21#include <linux/mmc/sdio_func.h>
20 22
21#include "sdio_cis.h" 23#include "sdio_cis.h"
@@ -125,21 +127,51 @@ static int sdio_bus_probe(struct device *dev)
125 if (!id) 127 if (!id)
126 return -ENODEV; 128 return -ENODEV;
127 129
130 /* Unbound SDIO functions are always suspended.
131 * During probe, the function is set active and the usage count
132 * is incremented. If the driver supports runtime PM,
133 * it should call pm_runtime_put_noidle() in its probe routine and
134 * pm_runtime_get_noresume() in its remove routine.
135 */
136 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) {
137 ret = pm_runtime_get_sync(dev);
138 if (ret < 0)
139 goto out;
140 }
141
128 /* Set the default block size so the driver is sure it's something 142 /* Set the default block size so the driver is sure it's something
129 * sensible. */ 143 * sensible. */
130 sdio_claim_host(func); 144 sdio_claim_host(func);
131 ret = sdio_set_block_size(func, 0); 145 ret = sdio_set_block_size(func, 0);
132 sdio_release_host(func); 146 sdio_release_host(func);
133 if (ret) 147 if (ret)
134 return ret; 148 goto disable_runtimepm;
149
150 ret = drv->probe(func, id);
151 if (ret)
152 goto disable_runtimepm;
153
154 return 0;
135 155
136 return drv->probe(func, id); 156disable_runtimepm:
157 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
158 pm_runtime_put_noidle(dev);
159out:
160 return ret;
137} 161}
138 162
139static int sdio_bus_remove(struct device *dev) 163static int sdio_bus_remove(struct device *dev)
140{ 164{
141 struct sdio_driver *drv = to_sdio_driver(dev->driver); 165 struct sdio_driver *drv = to_sdio_driver(dev->driver);
142 struct sdio_func *func = dev_to_sdio_func(dev); 166 struct sdio_func *func = dev_to_sdio_func(dev);
167 int ret = 0;
168
169 /* Make sure card is powered before invoking ->remove() */
170 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) {
171 ret = pm_runtime_get_sync(dev);
172 if (ret < 0)
173 goto out;
174 }
143 175
144 drv->remove(func); 176 drv->remove(func);
145 177
@@ -151,9 +183,36 @@ static int sdio_bus_remove(struct device *dev)
151 sdio_release_host(func); 183 sdio_release_host(func);
152 } 184 }
153 185
154 return 0; 186 /* First, undo the increment made directly above */
187 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
188 pm_runtime_put_noidle(dev);
189
190 /* Then undo the runtime PM settings in sdio_bus_probe() */
191 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
192 pm_runtime_put_sync(dev);
193
194out:
195 return ret;
155} 196}
156 197
198#ifdef CONFIG_PM_RUNTIME
199
200static const struct dev_pm_ops sdio_bus_pm_ops = {
201 SET_RUNTIME_PM_OPS(
202 pm_generic_runtime_suspend,
203 pm_generic_runtime_resume,
204 pm_generic_runtime_idle
205 )
206};
207
208#define SDIO_PM_OPS_PTR (&sdio_bus_pm_ops)
209
210#else /* !CONFIG_PM_RUNTIME */
211
212#define SDIO_PM_OPS_PTR NULL
213
214#endif /* !CONFIG_PM_RUNTIME */
215
157static struct bus_type sdio_bus_type = { 216static struct bus_type sdio_bus_type = {
158 .name = "sdio", 217 .name = "sdio",
159 .dev_attrs = sdio_dev_attrs, 218 .dev_attrs = sdio_dev_attrs,
@@ -161,6 +220,7 @@ static struct bus_type sdio_bus_type = {
161 .uevent = sdio_bus_uevent, 220 .uevent = sdio_bus_uevent,
162 .probe = sdio_bus_probe, 221 .probe = sdio_bus_probe,
163 .remove = sdio_bus_remove, 222 .remove = sdio_bus_remove,
223 .pm = SDIO_PM_OPS_PTR,
164}; 224};
165 225
166int sdio_register_bus(void) 226int sdio_register_bus(void)
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index bb192f90e8e9..03ead028d2ce 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -31,6 +31,17 @@ static int process_sdio_pending_irqs(struct mmc_card *card)
31{ 31{
32 int i, ret, count; 32 int i, ret, count;
33 unsigned char pending; 33 unsigned char pending;
34 struct sdio_func *func;
35
36 /*
37 * Optimization, if there is only 1 function interrupt registered
38 * call irq handler directly
39 */
40 func = card->sdio_single_irq;
41 if (func) {
42 func->irq_handler(func);
43 return 1;
44 }
34 45
35 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending); 46 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending);
36 if (ret) { 47 if (ret) {
@@ -42,10 +53,10 @@ static int process_sdio_pending_irqs(struct mmc_card *card)
42 count = 0; 53 count = 0;
43 for (i = 1; i <= 7; i++) { 54 for (i = 1; i <= 7; i++) {
44 if (pending & (1 << i)) { 55 if (pending & (1 << i)) {
45 struct sdio_func *func = card->sdio_func[i - 1]; 56 func = card->sdio_func[i - 1];
46 if (!func) { 57 if (!func) {
47 printk(KERN_WARNING "%s: pending IRQ for " 58 printk(KERN_WARNING "%s: pending IRQ for "
48 "non-existant function\n", 59 "non-existent function\n",
49 mmc_card_id(card)); 60 mmc_card_id(card));
50 ret = -EINVAL; 61 ret = -EINVAL;
51 } else if (func->irq_handler) { 62 } else if (func->irq_handler) {
@@ -186,6 +197,24 @@ static int sdio_card_irq_put(struct mmc_card *card)
186 return 0; 197 return 0;
187} 198}
188 199
200/* If there is only 1 function registered set sdio_single_irq */
201static void sdio_single_irq_set(struct mmc_card *card)
202{
203 struct sdio_func *func;
204 int i;
205
206 card->sdio_single_irq = NULL;
207 if ((card->host->caps & MMC_CAP_SDIO_IRQ) &&
208 card->host->sdio_irqs == 1)
209 for (i = 0; i < card->sdio_funcs; i++) {
210 func = card->sdio_func[i];
211 if (func && func->irq_handler) {
212 card->sdio_single_irq = func;
213 break;
214 }
215 }
216}
217
189/** 218/**
190 * sdio_claim_irq - claim the IRQ for a SDIO function 219 * sdio_claim_irq - claim the IRQ for a SDIO function
191 * @func: SDIO function 220 * @func: SDIO function
@@ -227,6 +256,7 @@ int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
227 ret = sdio_card_irq_get(func->card); 256 ret = sdio_card_irq_get(func->card);
228 if (ret) 257 if (ret)
229 func->irq_handler = NULL; 258 func->irq_handler = NULL;
259 sdio_single_irq_set(func->card);
230 260
231 return ret; 261 return ret;
232} 262}
@@ -251,6 +281,7 @@ int sdio_release_irq(struct sdio_func *func)
251 if (func->irq_handler) { 281 if (func->irq_handler) {
252 func->irq_handler = NULL; 282 func->irq_handler = NULL;
253 sdio_card_irq_put(func->card); 283 sdio_card_irq_put(func->card);
284 sdio_single_irq_set(func->card);
254 } 285 }
255 286
256 ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg); 287 ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);
diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c
index dea36d9c22e6..f087d876c573 100644
--- a/drivers/mmc/core/sdio_ops.c
+++ b/drivers/mmc/core/sdio_ops.c
@@ -21,13 +21,11 @@
21 21
22int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 22int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
23{ 23{
24 struct mmc_command cmd; 24 struct mmc_command cmd = {0};
25 int i, err = 0; 25 int i, err = 0;
26 26
27 BUG_ON(!host); 27 BUG_ON(!host);
28 28
29 memset(&cmd, 0, sizeof(struct mmc_command));
30
31 cmd.opcode = SD_IO_SEND_OP_COND; 29 cmd.opcode = SD_IO_SEND_OP_COND;
32 cmd.arg = ocr; 30 cmd.arg = ocr;
33 cmd.flags = MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR; 31 cmd.flags = MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR;
@@ -70,7 +68,7 @@ int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
70static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn, 68static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,
71 unsigned addr, u8 in, u8 *out) 69 unsigned addr, u8 in, u8 *out)
72{ 70{
73 struct mmc_command cmd; 71 struct mmc_command cmd = {0};
74 int err; 72 int err;
75 73
76 BUG_ON(!host); 74 BUG_ON(!host);
@@ -80,8 +78,6 @@ static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,
80 if (addr & ~0x1FFFF) 78 if (addr & ~0x1FFFF)
81 return -EINVAL; 79 return -EINVAL;
82 80
83 memset(&cmd, 0, sizeof(struct mmc_command));
84
85 cmd.opcode = SD_IO_RW_DIRECT; 81 cmd.opcode = SD_IO_RW_DIRECT;
86 cmd.arg = write ? 0x80000000 : 0x00000000; 82 cmd.arg = write ? 0x80000000 : 0x00000000;
87 cmd.arg |= fn << 28; 83 cmd.arg |= fn << 28;
@@ -125,9 +121,9 @@ int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
125int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, 121int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
126 unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz) 122 unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz)
127{ 123{
128 struct mmc_request mrq; 124 struct mmc_request mrq = {0};
129 struct mmc_command cmd; 125 struct mmc_command cmd = {0};
130 struct mmc_data data; 126 struct mmc_data data = {0};
131 struct scatterlist sg; 127 struct scatterlist sg;
132 128
133 BUG_ON(!card); 129 BUG_ON(!card);
@@ -140,10 +136,6 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
140 if (addr & ~0x1FFFF) 136 if (addr & ~0x1FFFF)
141 return -EINVAL; 137 return -EINVAL;
142 138
143 memset(&mrq, 0, sizeof(struct mmc_request));
144 memset(&cmd, 0, sizeof(struct mmc_command));
145 memset(&data, 0, sizeof(struct mmc_data));
146
147 mrq.cmd = &cmd; 139 mrq.cmd = &cmd;
148 mrq.data = &data; 140 mrq.data = &data;
149 141
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 68d12794cfd9..56dbf3f6ad08 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -83,7 +83,7 @@ config MMC_RICOH_MMC
83 83
84config MMC_SDHCI_OF 84config MMC_SDHCI_OF
85 tristate "SDHCI support on OpenFirmware platforms" 85 tristate "SDHCI support on OpenFirmware platforms"
86 depends on MMC_SDHCI && PPC_OF 86 depends on MMC_SDHCI && OF
87 help 87 help
88 This selects the OF support for Secure Digital Host Controller 88 This selects the OF support for Secure Digital Host Controller
89 Interfaces. 89 Interfaces.
@@ -93,6 +93,7 @@ config MMC_SDHCI_OF
93config MMC_SDHCI_OF_ESDHC 93config MMC_SDHCI_OF_ESDHC
94 bool "SDHCI OF support for the Freescale eSDHC controller" 94 bool "SDHCI OF support for the Freescale eSDHC controller"
95 depends on MMC_SDHCI_OF 95 depends on MMC_SDHCI_OF
96 depends on PPC_OF
96 select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER 97 select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
97 help 98 help
98 This selects the Freescale eSDHC controller support. 99 This selects the Freescale eSDHC controller support.
@@ -102,6 +103,7 @@ config MMC_SDHCI_OF_ESDHC
102config MMC_SDHCI_OF_HLWD 103config MMC_SDHCI_OF_HLWD
103 bool "SDHCI OF support for the Nintendo Wii SDHCI controllers" 104 bool "SDHCI OF support for the Nintendo Wii SDHCI controllers"
104 depends on MMC_SDHCI_OF 105 depends on MMC_SDHCI_OF
106 depends on PPC_OF
105 select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER 107 select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
106 help 108 help
107 This selects the Secure Digital Host Controller Interface (SDHCI) 109 This selects the Secure Digital Host Controller Interface (SDHCI)
@@ -130,6 +132,37 @@ config MMC_SDHCI_CNS3XXX
130 132
131 If unsure, say N. 133 If unsure, say N.
132 134
135config MMC_SDHCI_ESDHC_IMX
136 bool "SDHCI platform support for the Freescale eSDHC i.MX controller"
137 depends on MMC_SDHCI_PLTFM && (ARCH_MX25 || ARCH_MX35 || ARCH_MX5)
138 select MMC_SDHCI_IO_ACCESSORS
139 help
140 This selects the Freescale eSDHC controller support on the platform
141 bus, found on platforms like mx35/51.
142
143 If unsure, say N.
144
145config MMC_SDHCI_DOVE
146 bool "SDHCI support on Marvell's Dove SoC"
147 depends on ARCH_DOVE
148 depends on MMC_SDHCI_PLTFM
149 select MMC_SDHCI_IO_ACCESSORS
150 help
151 This selects the Secure Digital Host Controller Interface in
152 Marvell's Dove SoC.
153
154 If unsure, say N.
155
156config MMC_SDHCI_TEGRA
157 bool "SDHCI platform support for the Tegra SD/MMC Controller"
158 depends on MMC_SDHCI_PLTFM && ARCH_TEGRA
159 select MMC_SDHCI_IO_ACCESSORS
160 help
161 This selects the Tegra SD/MMC controller. If you have a Tegra
162 platform with SD or MMC devices, say Y or M here.
163
164 If unsure, say N.
165
133config MMC_SDHCI_S3C 166config MMC_SDHCI_S3C
134 tristate "SDHCI support on Samsung S3C SoC" 167 tristate "SDHCI support on Samsung S3C SoC"
135 depends on MMC_SDHCI && PLAT_SAMSUNG 168 depends on MMC_SDHCI && PLAT_SAMSUNG
@@ -145,6 +178,18 @@ config MMC_SDHCI_S3C
145 178
146 If unsure, say N. 179 If unsure, say N.
147 180
181config MMC_SDHCI_PXA
182 tristate "Marvell PXA168/PXA910/MMP2 SD Host Controller support"
183 depends on ARCH_PXA || ARCH_MMP
184 select MMC_SDHCI
185 select MMC_SDHCI_IO_ACCESSORS
186 help
187 This selects the Marvell(R) PXA168/PXA910/MMP2 SD Host Controller.
188 If you have a PXA168/PXA910/MMP2 platform with SD Host Controller
189 and a card slot, say Y or M here.
190
191 If unsure, say N.
192
148config MMC_SDHCI_SPEAR 193config MMC_SDHCI_SPEAR
149 tristate "SDHCI support on ST SPEAr platform" 194 tristate "SDHCI support on ST SPEAr platform"
150 depends on MMC_SDHCI && PLAT_SPEAR 195 depends on MMC_SDHCI && PLAT_SPEAR
@@ -180,7 +225,7 @@ config MMC_OMAP
180 225
181config MMC_OMAP_HS 226config MMC_OMAP_HS
182 tristate "TI OMAP High Speed Multimedia Card Interface support" 227 tristate "TI OMAP High Speed Multimedia Card Interface support"
183 depends on ARCH_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4 228 depends on SOC_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4
184 help 229 help
185 This selects the TI OMAP High Speed Multimedia card Interface. 230 This selects the TI OMAP High Speed Multimedia card Interface.
186 If you have an OMAP2430 or OMAP3 board or OMAP4 board with a 231 If you have an OMAP2430 or OMAP3 board or OMAP4 board with a
@@ -237,7 +282,7 @@ endchoice
237 282
238config MMC_ATMELMCI_DMA 283config MMC_ATMELMCI_DMA
239 bool "Atmel MCI DMA support (EXPERIMENTAL)" 284 bool "Atmel MCI DMA support (EXPERIMENTAL)"
240 depends on MMC_ATMELMCI && AVR32 && DMA_ENGINE && EXPERIMENTAL 285 depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE && EXPERIMENTAL
241 help 286 help
242 Say Y here to have the Atmel MCI driver use a DMA engine to 287 Say Y here to have the Atmel MCI driver use a DMA engine to
243 do data transfers and thus increase the throughput and 288 do data transfers and thus increase the throughput and
@@ -266,7 +311,7 @@ config MMC_MSM
266 311
267config MMC_MXC 312config MMC_MXC
268 tristate "Freescale i.MX2/3 Multimedia Card Interface support" 313 tristate "Freescale i.MX2/3 Multimedia Card Interface support"
269 depends on ARCH_MXC 314 depends on MACH_MX21 || MACH_MX27 || ARCH_MX31
270 help 315 help
271 This selects the Freescale i.MX2/3 Multimedia card Interface. 316 This selects the Freescale i.MX2/3 Multimedia card Interface.
272 If you have a i.MX platform with a Multimedia Card slot, 317 If you have a i.MX platform with a Multimedia Card slot,
@@ -274,6 +319,15 @@ config MMC_MXC
274 319
275 If unsure, say N. 320 If unsure, say N.
276 321
322config MMC_MXS
323 tristate "Freescale MXS Multimedia Card Interface support"
324 depends on ARCH_MXS && MXS_DMA
325 help
326 This selects the Freescale SSP MMC controller found on MXS based
327 platforms like mx23/28.
328
329 If unsure, say N.
330
277config MMC_TIFM_SD 331config MMC_TIFM_SD
278 tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)" 332 tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)"
279 depends on EXPERIMENTAL && PCI 333 depends on EXPERIMENTAL && PCI
@@ -385,16 +439,29 @@ config MMC_SDRICOH_CS
385 To compile this driver as a module, choose M here: the 439 To compile this driver as a module, choose M here: the
386 module will be called sdricoh_cs. 440 module will be called sdricoh_cs.
387 441
442config MMC_TMIO_CORE
443 tristate
444
388config MMC_TMIO 445config MMC_TMIO
389 tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support" 446 tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support"
390 depends on MFD_TMIO || MFD_ASIC3 || MFD_SH_MOBILE_SDHI 447 depends on MFD_TMIO || MFD_ASIC3
448 select MMC_TMIO_CORE
391 help 449 help
392 This provides support for the SD/MMC cell found in TC6393XB, 450 This provides support for the SD/MMC cell found in TC6393XB,
393 T7L66XB and also HTC ASIC3 451 T7L66XB and also HTC ASIC3
394 452
453config MMC_SDHI
454 tristate "SH-Mobile SDHI SD/SDIO controller support"
455 depends on SUPERH || ARCH_SHMOBILE
456 select MMC_TMIO_CORE
457 help
458 This provides support for the SDHI SD/SDIO controller found in
459 SuperH and ARM SH-Mobile SoCs
460
395config MMC_CB710 461config MMC_CB710
396 tristate "ENE CB710 MMC/SD Interface support" 462 tristate "ENE CB710 MMC/SD Interface support"
397 depends on PCI 463 depends on PCI
464 select MISC_DEVICES
398 select CB710_CORE 465 select CB710_CORE
399 help 466 help
400 This option enables support for MMC/SD part of ENE CB710/720 Flash 467 This option enables support for MMC/SD part of ENE CB710/720 Flash
@@ -435,11 +502,27 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
435 help 502 help
436 If you say yes here SD-Cards may work on the EZkit. 503 If you say yes here SD-Cards may work on the EZkit.
437 504
505config MMC_DW
506 tristate "Synopsys DesignWare Memory Card Interface"
507 depends on ARM
508 help
509 This selects support for the Synopsys DesignWare Mobile Storage IP
510 block, this provides host support for SD and MMC interfaces, in both
511 PIO and external DMA modes.
512
513config MMC_DW_IDMAC
514 bool "Internal DMAC interface"
515 depends on MMC_DW
516 help
517 This selects support for the internal DMAC block within the Synopsys
518 Designware Mobile Storage IP block. This disables the external DMA
519 interface.
520
438config MMC_SH_MMCIF 521config MMC_SH_MMCIF
439 tristate "SuperH Internal MMCIF support" 522 tristate "SuperH Internal MMCIF support"
440 depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE) 523 depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
441 help 524 help
442 This selects the MMC Host Interface controler (MMCIF). 525 This selects the MMC Host Interface controller (MMCIF).
443 526
444 This driver supports MMCIF in sh7724/sh7757/sh7372. 527 This driver supports MMCIF in sh7724/sh7757/sh7372.
445 528
@@ -451,3 +534,48 @@ config MMC_JZ4740
451 SoCs. 534 SoCs.
452 If you have a board based on such a SoC and with a SD/MMC slot, 535 If you have a board based on such a SoC and with a SD/MMC slot,
453 say Y or M here. 536 say Y or M here.
537
538config MMC_VUB300
539 tristate "VUB300 USB to SDIO/SD/MMC Host Controller support"
540 depends on USB
541 help
542 This selects support for Elan Digital Systems' VUB300 chip.
543
544 The VUB300 is a USB-SDIO Host Controller Interface chip
545 that enables the host computer to use SDIO/SD/MMC cards
546 via a USB 2.0 or USB 1.1 host.
547
548 The VUB300 chip will be found in both physically separate
549 USB to SDIO/SD/MMC adapters and embedded on some motherboards.
550
551 The VUB300 chip supports SD and MMC memory cards in addition
552 to single and multifunction SDIO cards.
553
554 Some SDIO cards will need a firmware file to be loaded and
555 sent to VUB300 chip in order to achieve better data throughput.
556 Download these "Offload Pseudocode" from Elan Digital Systems'
557 web-site http://www.elandigitalsystems.com/support/downloads.php
558 and put them in /lib/firmware. Note that without these additional
559 firmware files the VUB300 chip will still function, but not at
560 the best obtainable data rate.
561
562 To compile this mmc host controller driver as a module,
563 choose M here: the module will be called vub300.
564
565 If you have a computer with an embedded VUB300 chip
566 or if you intend connecting a USB adapter based on a
567 VUB300 chip say Y or M here.
568
569config MMC_USHC
570 tristate "USB SD Host Controller (USHC) support"
571 depends on USB
572 help
573 This selects support for USB SD Host Controllers based on
574 the Cypress Astoria chip with firmware compliant with CSR's
575 USB SD Host Controller specification (CS-118793-SP).
576
577 CSR boards with this device include: USB<>SDIO (M1985v2),
578 and Ultrasira.
579
580 Note: These controllers only support SDIO cards and do not
581 support MMC or SD memory cards.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 840bcb52d82f..58a5cf73d6e9 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -2,16 +2,14 @@
2# Makefile for MMC/SD host controller drivers 2# Makefile for MMC/SD host controller drivers
3# 3#
4 4
5ifeq ($(CONFIG_MMC_DEBUG),y)
6 EXTRA_CFLAGS += -DDEBUG
7endif
8
9obj-$(CONFIG_MMC_ARMMMCI) += mmci.o 5obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
10obj-$(CONFIG_MMC_PXA) += pxamci.o 6obj-$(CONFIG_MMC_PXA) += pxamci.o
11obj-$(CONFIG_MMC_IMX) += imxmmc.o 7obj-$(CONFIG_MMC_IMX) += imxmmc.o
12obj-$(CONFIG_MMC_MXC) += mxcmmc.o 8obj-$(CONFIG_MMC_MXC) += mxcmmc.o
9obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
13obj-$(CONFIG_MMC_SDHCI) += sdhci.o 10obj-$(CONFIG_MMC_SDHCI) += sdhci.o
14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o 11obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
12obj-$(CONFIG_MMC_SDHCI_PXA) += sdhci-pxa.o
15obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o 13obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
16obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o 14obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
17obj-$(CONFIG_MMC_WBSD) += wbsd.o 15obj-$(CONFIG_MMC_WBSD) += wbsd.o
@@ -31,15 +29,27 @@ endif
31obj-$(CONFIG_MMC_S3C) += s3cmci.o 29obj-$(CONFIG_MMC_S3C) += s3cmci.o
32obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o 30obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
33obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o 31obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
34obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 32obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o
33tmio_mmc_core-y := tmio_mmc_pio.o
34ifneq ($(CONFIG_MMC_SDHI),n)
35tmio_mmc_core-y += tmio_mmc_dma.o
36endif
37obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o
38obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
35obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 39obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
36obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o 40obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
41obj-$(CONFIG_MMC_DW) += dw_mmc.o
37obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o 42obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
38obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o 43obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
44obj-$(CONFIG_MMC_VUB300) += vub300.o
45obj-$(CONFIG_MMC_USHC) += ushc.o
39 46
40obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o 47obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o
41sdhci-platform-y := sdhci-pltfm.o 48sdhci-platform-y := sdhci-pltfm.o
42sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o 49sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
50sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
51sdhci-platform-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
52sdhci-platform-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
43 53
44obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o 54obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
45sdhci-of-y := sdhci-of-core.o 55sdhci-of-y := sdhci-of-core.o
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 87226cd202a5..d3e6a962f423 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -69,6 +69,7 @@
69#include <linux/highmem.h> 69#include <linux/highmem.h>
70 70
71#include <linux/mmc/host.h> 71#include <linux/mmc/host.h>
72#include <linux/mmc/sdio.h>
72 73
73#include <asm/io.h> 74#include <asm/io.h>
74#include <asm/irq.h> 75#include <asm/irq.h>
@@ -493,10 +494,14 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command
493 else if (data->flags & MMC_DATA_WRITE) 494 else if (data->flags & MMC_DATA_WRITE)
494 cmdr |= AT91_MCI_TRCMD_START; 495 cmdr |= AT91_MCI_TRCMD_START;
495 496
496 if (data->flags & MMC_DATA_STREAM) 497 if (cmd->opcode == SD_IO_RW_EXTENDED) {
497 cmdr |= AT91_MCI_TRTYP_STREAM; 498 cmdr |= AT91_MCI_TRTYP_SDIO_BLOCK;
498 if (data->blocks > 1) 499 } else {
499 cmdr |= AT91_MCI_TRTYP_MULTIPLE; 500 if (data->flags & MMC_DATA_STREAM)
501 cmdr |= AT91_MCI_TRTYP_STREAM;
502 if (data->blocks > 1)
503 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
504 }
500 } 505 }
501 else { 506 else {
502 block_length = 0; 507 block_length = 0;
@@ -928,7 +933,7 @@ static int __init at91_mci_probe(struct platform_device *pdev)
928 if (!res) 933 if (!res)
929 return -ENXIO; 934 return -ENXIO;
930 935
931 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME)) 936 if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME))
932 return -EBUSY; 937 return -EBUSY;
933 938
934 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev); 939 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
@@ -947,8 +952,7 @@ static int __init at91_mci_probe(struct platform_device *pdev)
947 mmc->max_blk_size = MCI_MAXBLKSIZE; 952 mmc->max_blk_size = MCI_MAXBLKSIZE;
948 mmc->max_blk_count = MCI_BLKATONCE; 953 mmc->max_blk_count = MCI_BLKATONCE;
949 mmc->max_req_size = MCI_BUFSIZE; 954 mmc->max_req_size = MCI_BUFSIZE;
950 mmc->max_phys_segs = MCI_BLKATONCE; 955 mmc->max_segs = MCI_BLKATONCE;
951 mmc->max_hw_segs = MCI_BLKATONCE;
952 mmc->max_seg_size = MCI_BUFSIZE; 956 mmc->max_seg_size = MCI_BUFSIZE;
953 957
954 host = mmc_priv(mmc); 958 host = mmc_priv(mmc);
@@ -1017,7 +1021,7 @@ static int __init at91_mci_probe(struct platform_device *pdev)
1017 /* 1021 /*
1018 * Map I/O region 1022 * Map I/O region
1019 */ 1023 */
1020 host->baseaddr = ioremap(res->start, res->end - res->start + 1); 1024 host->baseaddr = ioremap(res->start, resource_size(res));
1021 if (!host->baseaddr) { 1025 if (!host->baseaddr) {
1022 ret = -ENOMEM; 1026 ret = -ENOMEM;
1023 goto fail1; 1027 goto fail1;
@@ -1093,7 +1097,7 @@ fail4b:
1093fail5: 1097fail5:
1094 mmc_free_host(mmc); 1098 mmc_free_host(mmc);
1095fail6: 1099fail6:
1096 release_mem_region(res->start, res->end - res->start + 1); 1100 release_mem_region(res->start, resource_size(res));
1097 dev_err(&pdev->dev, "probe failed, err %d\n", ret); 1101 dev_err(&pdev->dev, "probe failed, err %d\n", ret);
1098 return ret; 1102 return ret;
1099} 1103}
@@ -1138,7 +1142,7 @@ static int __exit at91_mci_remove(struct platform_device *pdev)
1138 1142
1139 iounmap(host->baseaddr); 1143 iounmap(host->baseaddr);
1140 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1144 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1141 release_mem_region(res->start, res->end - res->start + 1); 1145 release_mem_region(res->start, resource_size(res));
1142 1146
1143 mmc_free_host(mmc); 1147 mmc_free_host(mmc);
1144 platform_set_drvdata(pdev, NULL); 1148 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 95ef864ad8f9..aa8039f473c4 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -26,6 +26,7 @@
26#include <linux/stat.h> 26#include <linux/stat.h>
27 27
28#include <linux/mmc/host.h> 28#include <linux/mmc/host.h>
29#include <linux/mmc/sdio.h>
29 30
30#include <mach/atmel-mci.h> 31#include <mach/atmel-mci.h>
31#include <linux/atmel-mci.h> 32#include <linux/atmel-mci.h>
@@ -126,7 +127,7 @@ struct atmel_mci_dma {
126 * EVENT_DATA_COMPLETE is set in @pending_events, all data-related 127 * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
127 * interrupts must be disabled and @data_status updated with a 128 * interrupts must be disabled and @data_status updated with a
128 * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the 129 * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
129 * CMDRDY interupt must be disabled and @cmd_status updated with a 130 * CMDRDY interrupt must be disabled and @cmd_status updated with a
130 * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the 131 * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
131 * bytes_xfered field of @data must be written. This is ensured by 132 * bytes_xfered field of @data must be written. This is ensured by
132 * using barriers. 133 * using barriers.
@@ -532,12 +533,17 @@ static u32 atmci_prepare_command(struct mmc_host *mmc,
532 data = cmd->data; 533 data = cmd->data;
533 if (data) { 534 if (data) {
534 cmdr |= MCI_CMDR_START_XFER; 535 cmdr |= MCI_CMDR_START_XFER;
535 if (data->flags & MMC_DATA_STREAM) 536
536 cmdr |= MCI_CMDR_STREAM; 537 if (cmd->opcode == SD_IO_RW_EXTENDED) {
537 else if (data->blocks > 1) 538 cmdr |= MCI_CMDR_SDIO_BLOCK;
538 cmdr |= MCI_CMDR_MULTI_BLOCK; 539 } else {
539 else 540 if (data->flags & MMC_DATA_STREAM)
540 cmdr |= MCI_CMDR_BLOCK; 541 cmdr |= MCI_CMDR_STREAM;
542 else if (data->blocks > 1)
543 cmdr |= MCI_CMDR_MULTI_BLOCK;
544 else
545 cmdr |= MCI_CMDR_BLOCK;
546 }
541 547
542 if (data->flags & MMC_DATA_READ) 548 if (data->flags & MMC_DATA_READ)
543 cmdr |= MCI_CMDR_TRDIR_READ; 549 cmdr |= MCI_CMDR_TRDIR_READ;
@@ -572,7 +578,8 @@ static void atmci_dma_cleanup(struct atmel_mci *host)
572 struct mmc_data *data = host->data; 578 struct mmc_data *data = host->data;
573 579
574 if (data) 580 if (data)
575 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len, 581 dma_unmap_sg(host->dma.chan->device->dev,
582 data->sg, data->sg_len,
576 ((data->flags & MMC_DATA_WRITE) 583 ((data->flags & MMC_DATA_WRITE)
577 ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); 584 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
578} 585}
@@ -582,7 +589,7 @@ static void atmci_stop_dma(struct atmel_mci *host)
582 struct dma_chan *chan = host->data_chan; 589 struct dma_chan *chan = host->data_chan;
583 590
584 if (chan) { 591 if (chan) {
585 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); 592 dmaengine_terminate_all(chan);
586 atmci_dma_cleanup(host); 593 atmci_dma_cleanup(host);
587 } else { 594 } else {
588 /* Data transfer was stopped by the interrupt handler */ 595 /* Data transfer was stopped by the interrupt handler */
@@ -678,11 +685,11 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
678 else 685 else
679 direction = DMA_TO_DEVICE; 686 direction = DMA_TO_DEVICE;
680 687
681 sglen = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, direction); 688 sglen = dma_map_sg(chan->device->dev, data->sg,
682 if (sglen != data->sg_len) 689 data->sg_len, direction);
683 goto unmap_exit; 690
684 desc = chan->device->device_prep_slave_sg(chan, 691 desc = chan->device->device_prep_slave_sg(chan,
685 data->sg, data->sg_len, direction, 692 data->sg, sglen, direction,
686 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 693 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
687 if (!desc) 694 if (!desc)
688 goto unmap_exit; 695 goto unmap_exit;
@@ -693,7 +700,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
693 700
694 return 0; 701 return 0;
695unmap_exit: 702unmap_exit:
696 dma_unmap_sg(&host->pdev->dev, data->sg, sglen, direction); 703 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
697 return -ENOMEM; 704 return -ENOMEM;
698} 705}
699 706
@@ -703,8 +710,8 @@ static void atmci_submit_data(struct atmel_mci *host)
703 struct dma_async_tx_descriptor *desc = host->dma.data_desc; 710 struct dma_async_tx_descriptor *desc = host->dma.data_desc;
704 711
705 if (chan) { 712 if (chan) {
706 desc->tx_submit(desc); 713 dmaengine_submit(desc);
707 chan->device->device_issue_pending(chan); 714 dma_async_issue_pending(chan);
708 } 715 }
709} 716}
710 717
@@ -1075,7 +1082,7 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1075 /* 1082 /*
1076 * Update the MMC clock rate if necessary. This may be 1083 * Update the MMC clock rate if necessary. This may be
1077 * necessary if set_ios() is called when a different slot is 1084 * necessary if set_ios() is called when a different slot is
1078 * busy transfering data. 1085 * busy transferring data.
1079 */ 1086 */
1080 if (host->need_clock_update) { 1087 if (host->need_clock_update) {
1081 mci_writel(host, MR, host->mode_reg); 1088 mci_writel(host, MR, host->mode_reg);
@@ -1618,8 +1625,7 @@ static int __init atmci_init_slot(struct atmel_mci *host,
1618 if (slot_data->bus_width >= 4) 1625 if (slot_data->bus_width >= 4)
1619 mmc->caps |= MMC_CAP_4_BIT_DATA; 1626 mmc->caps |= MMC_CAP_4_BIT_DATA;
1620 1627
1621 mmc->max_hw_segs = 64; 1628 mmc->max_segs = 64;
1622 mmc->max_phys_segs = 64;
1623 mmc->max_req_size = 32768 * 512; 1629 mmc->max_req_size = 32768 * 512;
1624 mmc->max_blk_size = 32768; 1630 mmc->max_blk_size = 32768;
1625 mmc->max_blk_count = 512; 1631 mmc->max_blk_count = 512;
@@ -1777,7 +1783,7 @@ static int __init atmci_probe(struct platform_device *pdev)
1777 } 1783 }
1778 1784
1779 ret = -ENOMEM; 1785 ret = -ENOMEM;
1780 host->regs = ioremap(regs->start, regs->end - regs->start + 1); 1786 host->regs = ioremap(regs->start, resource_size(regs));
1781 if (!host->regs) 1787 if (!host->regs)
1782 goto err_ioremap; 1788 goto err_ioremap;
1783 1789
@@ -1893,5 +1899,5 @@ late_initcall(atmci_init); /* try to load after dma driver when built-in */
1893module_exit(atmci_exit); 1899module_exit(atmci_exit);
1894 1900
1895MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver"); 1901MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
1896MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>"); 1902MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1897MODULE_LICENSE("GPL v2"); 1903MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index c8da5d30a861..ef72e874ca36 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -192,7 +192,7 @@ static inline void SEND_STOP(struct au1xmmc_host *host)
192 au_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host)); 192 au_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host));
193 au_sync(); 193 au_sync();
194 194
195 /* Send the stop commmand */ 195 /* Send the stop command */
196 au_writel(STOP_CMD, HOST_CMD(host)); 196 au_writel(STOP_CMD, HOST_CMD(host));
197} 197}
198 198
@@ -964,7 +964,7 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
964 goto out1; 964 goto out1;
965 } 965 }
966 966
967 host->ioarea = request_mem_region(r->start, r->end - r->start + 1, 967 host->ioarea = request_mem_region(r->start, resource_size(r),
968 pdev->name); 968 pdev->name);
969 if (!host->ioarea) { 969 if (!host->ioarea) {
970 dev_err(&pdev->dev, "mmio already in use\n"); 970 dev_err(&pdev->dev, "mmio already in use\n");
@@ -998,7 +998,7 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
998 mmc->f_max = 24000000; 998 mmc->f_max = 24000000;
999 999
1000 mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE; 1000 mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE;
1001 mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT; 1001 mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT;
1002 1002
1003 mmc->max_blk_size = 2048; 1003 mmc->max_blk_size = 2048;
1004 mmc->max_blk_count = 512; 1004 mmc->max_blk_count = 512;
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 4b0e677d7295..0371bf502249 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -462,14 +462,14 @@ static int __devinit sdh_probe(struct platform_device *pdev)
462 goto out; 462 goto out;
463 } 463 }
464 464
465 mmc = mmc_alloc_host(sizeof(*mmc), &pdev->dev); 465 mmc = mmc_alloc_host(sizeof(struct sdh_host), &pdev->dev);
466 if (!mmc) { 466 if (!mmc) {
467 ret = -ENOMEM; 467 ret = -ENOMEM;
468 goto out; 468 goto out;
469 } 469 }
470 470
471 mmc->ops = &sdh_ops; 471 mmc->ops = &sdh_ops;
472 mmc->max_phys_segs = 32; 472 mmc->max_segs = 32;
473 mmc->max_seg_size = 1 << 16; 473 mmc->max_seg_size = 1 << 16;
474 mmc->max_blk_size = 1 << 11; 474 mmc->max_blk_size = 1 << 11;
475 mmc->max_blk_count = 1 << 11; 475 mmc->max_blk_count = 1 << 11;
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index ca3bdc831900..ce2a47b71dd6 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -25,7 +25,7 @@ static const u8 cb710_src_freq_mhz[16] = {
25 50, 55, 60, 65, 70, 75, 80, 85 25 50, 55, 60, 65, 70, 75, 80, 85
26}; 26};
27 27
28static void cb710_mmc_set_clock(struct mmc_host *mmc, int hz) 28static void cb710_mmc_select_clock_divider(struct mmc_host *mmc, int hz)
29{ 29{
30 struct cb710_slot *slot = cb710_mmc_to_slot(mmc); 30 struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
31 struct pci_dev *pdev = cb710_slot_to_chip(slot)->pdev; 31 struct pci_dev *pdev = cb710_slot_to_chip(slot)->pdev;
@@ -33,8 +33,11 @@ static void cb710_mmc_set_clock(struct mmc_host *mmc, int hz)
33 u32 divider_idx; 33 u32 divider_idx;
34 int src_hz; 34 int src_hz;
35 35
36 /* this is magic, unverifiable for me, unless I get 36 /* on CB710 in HP nx9500:
37 * MMC card with cables connected to bus signals */ 37 * src_freq_idx == 0
38 * indexes 1-7 work as written in the table
39 * indexes 0,8-15 give no clock output
40 */
38 pci_read_config_dword(pdev, 0x48, &src_freq_idx); 41 pci_read_config_dword(pdev, 0x48, &src_freq_idx);
39 src_freq_idx = (src_freq_idx >> 16) & 0xF; 42 src_freq_idx = (src_freq_idx >> 16) & 0xF;
40 src_hz = cb710_src_freq_mhz[src_freq_idx] * 1000000; 43 src_hz = cb710_src_freq_mhz[src_freq_idx] * 1000000;
@@ -46,13 +49,15 @@ static void cb710_mmc_set_clock(struct mmc_host *mmc, int hz)
46 49
47 if (src_freq_idx) 50 if (src_freq_idx)
48 divider_idx |= 0x8; 51 divider_idx |= 0x8;
52 else if (divider_idx == 0)
53 divider_idx = 1;
49 54
50 cb710_pci_update_config_reg(pdev, 0x40, ~0xF0000000, divider_idx << 28); 55 cb710_pci_update_config_reg(pdev, 0x40, ~0xF0000000, divider_idx << 28);
51 56
52 dev_dbg(cb710_slot_dev(slot), 57 dev_dbg(cb710_slot_dev(slot),
53 "clock set to %d Hz, wanted %d Hz; flag = %d\n", 58 "clock set to %d Hz, wanted %d Hz; src_freq_idx = %d, divider_idx = %d|%d\n",
54 src_hz >> cb710_clock_divider_log2[divider_idx & 7], 59 src_hz >> cb710_clock_divider_log2[divider_idx & 7],
55 hz, (divider_idx & 8) != 0); 60 hz, src_freq_idx, divider_idx & 7, divider_idx & 8);
56} 61}
57 62
58static void __cb710_mmc_enable_irq(struct cb710_slot *slot, 63static void __cb710_mmc_enable_irq(struct cb710_slot *slot,
@@ -95,16 +100,8 @@ static void cb710_mmc_reset_events(struct cb710_slot *slot)
95 cb710_write_port_8(slot, CB710_MMC_STATUS2_PORT, 0xFF); 100 cb710_write_port_8(slot, CB710_MMC_STATUS2_PORT, 0xFF);
96} 101}
97 102
98static int cb710_mmc_is_card_inserted(struct cb710_slot *slot)
99{
100 return cb710_read_port_8(slot, CB710_MMC_STATUS3_PORT)
101 & CB710_MMC_S3_CARD_DETECTED;
102}
103
104static void cb710_mmc_enable_4bit_data(struct cb710_slot *slot, int enable) 103static void cb710_mmc_enable_4bit_data(struct cb710_slot *slot, int enable)
105{ 104{
106 dev_dbg(cb710_slot_dev(slot), "configuring %d-data-line%s mode\n",
107 enable ? 4 : 1, enable ? "s" : "");
108 if (enable) 105 if (enable)
109 cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 106 cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT,
110 CB710_MMC_C1_4BIT_DATA_BUS, 0); 107 CB710_MMC_C1_4BIT_DATA_BUS, 0);
@@ -208,7 +205,7 @@ static int cb710_wait_while_busy(struct cb710_slot *slot, uint8_t mask)
208 "WAIT12: waited %d loops, mask %02X, entry val %08X, exit val %08X\n", 205 "WAIT12: waited %d loops, mask %02X, entry val %08X, exit val %08X\n",
209 limit, mask, e, x); 206 limit, mask, e, x);
210#endif 207#endif
211 return 0; 208 return err;
212} 209}
213 210
214static void cb710_mmc_set_transfer_size(struct cb710_slot *slot, 211static void cb710_mmc_set_transfer_size(struct cb710_slot *slot,
@@ -494,13 +491,8 @@ static void cb710_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
494 reader->mrq = mrq; 491 reader->mrq = mrq;
495 cb710_mmc_enable_irq(slot, CB710_MMC_IE_TEST_MASK, 0); 492 cb710_mmc_enable_irq(slot, CB710_MMC_IE_TEST_MASK, 0);
496 493
497 if (cb710_mmc_is_card_inserted(slot)) { 494 if (!cb710_mmc_command(mmc, mrq->cmd) && mrq->stop)
498 if (!cb710_mmc_command(mmc, mrq->cmd) && mrq->stop) 495 cb710_mmc_command(mmc, mrq->stop);
499 cb710_mmc_command(mmc, mrq->stop);
500 mdelay(1);
501 } else {
502 mrq->cmd->error = -ENOMEDIUM;
503 }
504 496
505 tasklet_schedule(&reader->finish_req_tasklet); 497 tasklet_schedule(&reader->finish_req_tasklet);
506} 498}
@@ -512,7 +504,7 @@ static int cb710_mmc_powerup(struct cb710_slot *slot)
512#endif 504#endif
513 int err; 505 int err;
514 506
515 /* a lot of magic; see comment in cb710_mmc_set_clock() */ 507 /* a lot of magic for now */
516 dev_dbg(cb710_slot_dev(slot), "bus powerup\n"); 508 dev_dbg(cb710_slot_dev(slot), "bus powerup\n");
517 cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); 509 cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
518 err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20); 510 err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
@@ -572,13 +564,7 @@ static void cb710_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
572 struct cb710_mmc_reader *reader = mmc_priv(mmc); 564 struct cb710_mmc_reader *reader = mmc_priv(mmc);
573 int err; 565 int err;
574 566
575 cb710_mmc_set_clock(mmc, ios->clock); 567 cb710_mmc_select_clock_divider(mmc, ios->clock);
576
577 if (!cb710_mmc_is_card_inserted(slot)) {
578 dev_dbg(cb710_slot_dev(slot),
579 "no card inserted - ignoring bus powerup request\n");
580 ios->power_mode = MMC_POWER_OFF;
581 }
582 568
583 if (ios->power_mode != reader->last_power_mode) 569 if (ios->power_mode != reader->last_power_mode)
584 switch (ios->power_mode) { 570 switch (ios->power_mode) {
@@ -619,6 +605,14 @@ static int cb710_mmc_get_ro(struct mmc_host *mmc)
619 & CB710_MMC_S3_WRITE_PROTECTED; 605 & CB710_MMC_S3_WRITE_PROTECTED;
620} 606}
621 607
608static int cb710_mmc_get_cd(struct mmc_host *mmc)
609{
610 struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
611
612 return cb710_read_port_8(slot, CB710_MMC_STATUS3_PORT)
613 & CB710_MMC_S3_CARD_DETECTED;
614}
615
622static int cb710_mmc_irq_handler(struct cb710_slot *slot) 616static int cb710_mmc_irq_handler(struct cb710_slot *slot)
623{ 617{
624 struct mmc_host *mmc = cb710_slot_to_mmc(slot); 618 struct mmc_host *mmc = cb710_slot_to_mmc(slot);
@@ -664,7 +658,8 @@ static void cb710_mmc_finish_request_tasklet(unsigned long data)
664static const struct mmc_host_ops cb710_mmc_host = { 658static const struct mmc_host_ops cb710_mmc_host = {
665 .request = cb710_mmc_request, 659 .request = cb710_mmc_request,
666 .set_ios = cb710_mmc_set_ios, 660 .set_ios = cb710_mmc_set_ios,
667 .get_ro = cb710_mmc_get_ro 661 .get_ro = cb710_mmc_get_ro,
662 .get_cd = cb710_mmc_get_cd,
668}; 663};
669 664
670#ifdef CONFIG_PM 665#ifdef CONFIG_PM
@@ -746,6 +741,7 @@ static int __devinit cb710_mmc_init(struct platform_device *pdev)
746err_free_mmc: 741err_free_mmc:
747 dev_dbg(cb710_slot_dev(slot), "mmc_add_host() failed: %d\n", err); 742 dev_dbg(cb710_slot_dev(slot), "mmc_add_host() failed: %d\n", err);
748 743
744 cb710_set_irq_handler(slot, NULL);
749 mmc_free_host(mmc); 745 mmc_free_host(mmc);
750 return err; 746 return err;
751} 747}
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 33d9f1b00862..0076c7448fe6 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -66,8 +66,8 @@
66#define DAVINCI_MMCBLNC 0x60 66#define DAVINCI_MMCBLNC 0x60
67#define DAVINCI_SDIOCTL 0x64 67#define DAVINCI_SDIOCTL 0x64
68#define DAVINCI_SDIOST0 0x68 68#define DAVINCI_SDIOST0 0x68
69#define DAVINCI_SDIOEN 0x6C 69#define DAVINCI_SDIOIEN 0x6C
70#define DAVINCI_SDIOST 0x70 70#define DAVINCI_SDIOIST 0x70
71#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */ 71#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */
72 72
73/* DAVINCI_MMCCTL definitions */ 73/* DAVINCI_MMCCTL definitions */
@@ -131,6 +131,14 @@
131#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */ 131#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */
132#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */ 132#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */
133 133
134/* DAVINCI_SDIOST0 definitions */
135#define SDIOST0_DAT1_HI BIT(0)
136
137/* DAVINCI_SDIOIEN definitions */
138#define SDIOIEN_IOINTEN BIT(0)
139
140/* DAVINCI_SDIOIST definitions */
141#define SDIOIST_IOINT BIT(0)
134 142
135/* MMCSD Init clock in Hz in opendrain mode */ 143/* MMCSD Init clock in Hz in opendrain mode */
136#define MMCSD_INIT_CLOCK 200000 144#define MMCSD_INIT_CLOCK 200000
@@ -138,7 +146,7 @@
138/* 146/*
139 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, 147 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
140 * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only 148 * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
141 * for drivers with max_hw_segs == 1, making the segments bigger (64KB) 149 * for drivers with max_segs == 1, making the segments bigger (64KB)
142 * than the page or two that's otherwise typical. nr_sg (passed from 150 * than the page or two that's otherwise typical. nr_sg (passed from
143 * platform data) == 16 gives at least the same throughput boost, using 151 * platform data) == 16 gives at least the same throughput boost, using
144 * EDMA transfer linkage instead of spending CPU time copying pages. 152 * EDMA transfer linkage instead of spending CPU time copying pages.
@@ -164,7 +172,7 @@ struct mmc_davinci_host {
164 unsigned int mmc_input_clk; 172 unsigned int mmc_input_clk;
165 void __iomem *base; 173 void __iomem *base;
166 struct resource *mem_res; 174 struct resource *mem_res;
167 int irq; 175 int mmc_irq, sdio_irq;
168 unsigned char bus_mode; 176 unsigned char bus_mode;
169 177
170#define DAVINCI_MMC_DATADIR_NONE 0 178#define DAVINCI_MMC_DATADIR_NONE 0
@@ -184,6 +192,7 @@ struct mmc_davinci_host {
184 u32 rxdma, txdma; 192 u32 rxdma, txdma;
185 bool use_dma; 193 bool use_dma;
186 bool do_dma; 194 bool do_dma;
195 bool sdio_int;
187 196
188 /* Scatterlist DMA uses one or more parameter RAM entries: 197 /* Scatterlist DMA uses one or more parameter RAM entries:
189 * the main one (associated with rxdma or txdma) plus zero or 198 * the main one (associated with rxdma or txdma) plus zero or
@@ -480,7 +489,7 @@ static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
480 struct scatterlist *sg; 489 struct scatterlist *sg;
481 unsigned sg_len; 490 unsigned sg_len;
482 unsigned bytes_left = host->bytes_left; 491 unsigned bytes_left = host->bytes_left;
483 const unsigned shift = ffs(rw_threshold) - 1;; 492 const unsigned shift = ffs(rw_threshold) - 1;
484 493
485 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 494 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
486 template = &host->tx_template; 495 template = &host->tx_template;
@@ -866,6 +875,19 @@ mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
866{ 875{
867 host->data = NULL; 876 host->data = NULL;
868 877
878 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) {
879 /*
880 * SDIO Interrupt Detection work-around as suggested by
881 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata
882 * 2.1.6): Signal SDIO interrupt only if it is enabled by core
883 */
884 if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) &
885 SDIOST0_DAT1_HI)) {
886 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
887 mmc_signal_sdio_irq(host->mmc);
888 }
889 }
890
869 if (host->do_dma) { 891 if (host->do_dma) {
870 davinci_abort_dma(host); 892 davinci_abort_dma(host);
871 893
@@ -932,6 +954,21 @@ davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
932 mmc_davinci_reset_ctrl(host, 0); 954 mmc_davinci_reset_ctrl(host, 0);
933} 955}
934 956
957static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id)
958{
959 struct mmc_davinci_host *host = dev_id;
960 unsigned int status;
961
962 status = readl(host->base + DAVINCI_SDIOIST);
963 if (status & SDIOIST_IOINT) {
964 dev_dbg(mmc_dev(host->mmc),
965 "SDIO interrupt status %x\n", status);
966 writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
967 mmc_signal_sdio_irq(host->mmc);
968 }
969 return IRQ_HANDLED;
970}
971
935static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) 972static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
936{ 973{
937 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; 974 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id;
@@ -1076,11 +1113,32 @@ static int mmc_davinci_get_ro(struct mmc_host *mmc)
1076 return config->get_ro(pdev->id); 1113 return config->get_ro(pdev->id);
1077} 1114}
1078 1115
1116static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1117{
1118 struct mmc_davinci_host *host = mmc_priv(mmc);
1119
1120 if (enable) {
1121 if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) {
1122 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
1123 mmc_signal_sdio_irq(host->mmc);
1124 } else {
1125 host->sdio_int = true;
1126 writel(readl(host->base + DAVINCI_SDIOIEN) |
1127 SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN);
1128 }
1129 } else {
1130 host->sdio_int = false;
1131 writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN,
1132 host->base + DAVINCI_SDIOIEN);
1133 }
1134}
1135
1079static struct mmc_host_ops mmc_davinci_ops = { 1136static struct mmc_host_ops mmc_davinci_ops = {
1080 .request = mmc_davinci_request, 1137 .request = mmc_davinci_request,
1081 .set_ios = mmc_davinci_set_ios, 1138 .set_ios = mmc_davinci_set_ios,
1082 .get_cd = mmc_davinci_get_cd, 1139 .get_cd = mmc_davinci_get_cd,
1083 .get_ro = mmc_davinci_get_ro, 1140 .get_ro = mmc_davinci_get_ro,
1141 .enable_sdio_irq = mmc_davinci_enable_sdio_irq,
1084}; 1142};
1085 1143
1086/*----------------------------------------------------------------------*/ 1144/*----------------------------------------------------------------------*/
@@ -1209,7 +1267,8 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1209 host->nr_sg = MAX_NR_SG; 1267 host->nr_sg = MAX_NR_SG;
1210 1268
1211 host->use_dma = use_dma; 1269 host->use_dma = use_dma;
1212 host->irq = irq; 1270 host->mmc_irq = irq;
1271 host->sdio_irq = platform_get_irq(pdev, 1);
1213 1272
1214 if (host->use_dma && davinci_acquire_dma_channels(host) != 0) 1273 if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
1215 host->use_dma = 0; 1274 host->use_dma = 0;
@@ -1239,8 +1298,7 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1239 * Each hw_seg uses one EDMA parameter RAM slot, always one 1298 * Each hw_seg uses one EDMA parameter RAM slot, always one
1240 * channel and then usually some linked slots. 1299 * channel and then usually some linked slots.
1241 */ 1300 */
1242 mmc->max_hw_segs = 1 + host->n_link; 1301 mmc->max_segs = 1 + host->n_link;
1243 mmc->max_phys_segs = mmc->max_hw_segs;
1244 1302
1245 /* EDMA limit per hw segment (one or two MBytes) */ 1303 /* EDMA limit per hw segment (one or two MBytes) */
1246 mmc->max_seg_size = MAX_CCNT * rw_threshold; 1304 mmc->max_seg_size = MAX_CCNT * rw_threshold;
@@ -1250,8 +1308,7 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1250 mmc->max_blk_count = 65535; /* NBLK is 16 bits */ 1308 mmc->max_blk_count = 65535; /* NBLK is 16 bits */
1251 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1309 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1252 1310
1253 dev_dbg(mmc_dev(host->mmc), "max_phys_segs=%d\n", mmc->max_phys_segs); 1311 dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs);
1254 dev_dbg(mmc_dev(host->mmc), "max_hw_segs=%d\n", mmc->max_hw_segs);
1255 dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size); 1312 dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size);
1256 dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size); 1313 dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size);
1257 dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size); 1314 dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size);
@@ -1272,6 +1329,13 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1272 if (ret) 1329 if (ret)
1273 goto out; 1330 goto out;
1274 1331
1332 if (host->sdio_irq >= 0) {
1333 ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0,
1334 mmc_hostname(mmc), host);
1335 if (!ret)
1336 mmc->caps |= MMC_CAP_SDIO_IRQ;
1337 }
1338
1275 rename_region(mem, mmc_hostname(mmc)); 1339 rename_region(mem, mmc_hostname(mmc));
1276 1340
1277 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", 1341 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
@@ -1315,7 +1379,9 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
1315 mmc_davinci_cpufreq_deregister(host); 1379 mmc_davinci_cpufreq_deregister(host);
1316 1380
1317 mmc_remove_host(host->mmc); 1381 mmc_remove_host(host->mmc);
1318 free_irq(host->irq, host); 1382 free_irq(host->mmc_irq, host);
1383 if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
1384 free_irq(host->sdio_irq, host);
1319 1385
1320 davinci_release_dma_channels(host); 1386 davinci_release_dma_channels(host);
1321 1387
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
new file mode 100644
index 000000000000..66dcddb9c205
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc.c
@@ -0,0 +1,1859 @@
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/scatterlist.h>
26#include <linux/seq_file.h>
27#include <linux/slab.h>
28#include <linux/stat.h>
29#include <linux/delay.h>
30#include <linux/irq.h>
31#include <linux/mmc/host.h>
32#include <linux/mmc/mmc.h>
33#include <linux/mmc/dw_mmc.h>
34#include <linux/bitops.h>
35#include <linux/regulator/consumer.h>
36
37#include "dw_mmc.h"
38
39/* Common flag combinations */
40#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \
41 SDMMC_INT_HTO | SDMMC_INT_SBE | \
42 SDMMC_INT_EBE)
43#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
44 SDMMC_INT_RESP_ERR)
45#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
46 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
47#define DW_MCI_SEND_STATUS 1
48#define DW_MCI_RECV_STATUS 2
49#define DW_MCI_DMA_THRESHOLD 16
50
51#ifdef CONFIG_MMC_DW_IDMAC
52struct idmac_desc {
53 u32 des0; /* Control Descriptor */
54#define IDMAC_DES0_DIC BIT(1)
55#define IDMAC_DES0_LD BIT(2)
56#define IDMAC_DES0_FD BIT(3)
57#define IDMAC_DES0_CH BIT(4)
58#define IDMAC_DES0_ER BIT(5)
59#define IDMAC_DES0_CES BIT(30)
60#define IDMAC_DES0_OWN BIT(31)
61
62 u32 des1; /* Buffer sizes */
63#define IDMAC_SET_BUFFER1_SIZE(d, s) \
64 ((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff))
65
66 u32 des2; /* buffer 1 physical address */
67
68 u32 des3; /* buffer 2 physical address */
69};
70#endif /* CONFIG_MMC_DW_IDMAC */
71
72/**
73 * struct dw_mci_slot - MMC slot state
74 * @mmc: The mmc_host representing this slot.
75 * @host: The MMC controller this slot is using.
76 * @ctype: Card type for this slot.
77 * @mrq: mmc_request currently being processed or waiting to be
78 * processed, or NULL when the slot is idle.
79 * @queue_node: List node for placing this node in the @queue list of
80 * &struct dw_mci.
81 * @clock: Clock rate configured by set_ios(). Protected by host->lock.
82 * @flags: Random state bits associated with the slot.
83 * @id: Number of this slot.
84 * @last_detect_state: Most recently observed card detect state.
85 */
86struct dw_mci_slot {
87 struct mmc_host *mmc;
88 struct dw_mci *host;
89
90 u32 ctype;
91
92 struct mmc_request *mrq;
93 struct list_head queue_node;
94
95 unsigned int clock;
96 unsigned long flags;
97#define DW_MMC_CARD_PRESENT 0
98#define DW_MMC_CARD_NEED_INIT 1
99 int id;
100 int last_detect_state;
101};
102
103#if defined(CONFIG_DEBUG_FS)
104static int dw_mci_req_show(struct seq_file *s, void *v)
105{
106 struct dw_mci_slot *slot = s->private;
107 struct mmc_request *mrq;
108 struct mmc_command *cmd;
109 struct mmc_command *stop;
110 struct mmc_data *data;
111
112 /* Make sure we get a consistent snapshot */
113 spin_lock_bh(&slot->host->lock);
114 mrq = slot->mrq;
115
116 if (mrq) {
117 cmd = mrq->cmd;
118 data = mrq->data;
119 stop = mrq->stop;
120
121 if (cmd)
122 seq_printf(s,
123 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
124 cmd->opcode, cmd->arg, cmd->flags,
125 cmd->resp[0], cmd->resp[1], cmd->resp[2],
126 cmd->resp[2], cmd->error);
127 if (data)
128 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
129 data->bytes_xfered, data->blocks,
130 data->blksz, data->flags, data->error);
131 if (stop)
132 seq_printf(s,
133 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
134 stop->opcode, stop->arg, stop->flags,
135 stop->resp[0], stop->resp[1], stop->resp[2],
136 stop->resp[2], stop->error);
137 }
138
139 spin_unlock_bh(&slot->host->lock);
140
141 return 0;
142}
143
144static int dw_mci_req_open(struct inode *inode, struct file *file)
145{
146 return single_open(file, dw_mci_req_show, inode->i_private);
147}
148
149static const struct file_operations dw_mci_req_fops = {
150 .owner = THIS_MODULE,
151 .open = dw_mci_req_open,
152 .read = seq_read,
153 .llseek = seq_lseek,
154 .release = single_release,
155};
156
157static int dw_mci_regs_show(struct seq_file *s, void *v)
158{
159 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
160 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
161 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
162 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
163 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
164 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
165
166 return 0;
167}
168
169static int dw_mci_regs_open(struct inode *inode, struct file *file)
170{
171 return single_open(file, dw_mci_regs_show, inode->i_private);
172}
173
174static const struct file_operations dw_mci_regs_fops = {
175 .owner = THIS_MODULE,
176 .open = dw_mci_regs_open,
177 .read = seq_read,
178 .llseek = seq_lseek,
179 .release = single_release,
180};
181
182static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
183{
184 struct mmc_host *mmc = slot->mmc;
185 struct dw_mci *host = slot->host;
186 struct dentry *root;
187 struct dentry *node;
188
189 root = mmc->debugfs_root;
190 if (!root)
191 return;
192
193 node = debugfs_create_file("regs", S_IRUSR, root, host,
194 &dw_mci_regs_fops);
195 if (!node)
196 goto err;
197
198 node = debugfs_create_file("req", S_IRUSR, root, slot,
199 &dw_mci_req_fops);
200 if (!node)
201 goto err;
202
203 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
204 if (!node)
205 goto err;
206
207 node = debugfs_create_x32("pending_events", S_IRUSR, root,
208 (u32 *)&host->pending_events);
209 if (!node)
210 goto err;
211
212 node = debugfs_create_x32("completed_events", S_IRUSR, root,
213 (u32 *)&host->completed_events);
214 if (!node)
215 goto err;
216
217 return;
218
219err:
220 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
221}
222#endif /* defined(CONFIG_DEBUG_FS) */
223
224static void dw_mci_set_timeout(struct dw_mci *host)
225{
226 /* timeout (maximum) */
227 mci_writel(host, TMOUT, 0xffffffff);
228}
229
230static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
231{
232 struct mmc_data *data;
233 u32 cmdr;
234 cmd->error = -EINPROGRESS;
235
236 cmdr = cmd->opcode;
237
238 if (cmdr == MMC_STOP_TRANSMISSION)
239 cmdr |= SDMMC_CMD_STOP;
240 else
241 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
242
243 if (cmd->flags & MMC_RSP_PRESENT) {
244 /* We expect a response, so set this bit */
245 cmdr |= SDMMC_CMD_RESP_EXP;
246 if (cmd->flags & MMC_RSP_136)
247 cmdr |= SDMMC_CMD_RESP_LONG;
248 }
249
250 if (cmd->flags & MMC_RSP_CRC)
251 cmdr |= SDMMC_CMD_RESP_CRC;
252
253 data = cmd->data;
254 if (data) {
255 cmdr |= SDMMC_CMD_DAT_EXP;
256 if (data->flags & MMC_DATA_STREAM)
257 cmdr |= SDMMC_CMD_STRM_MODE;
258 if (data->flags & MMC_DATA_WRITE)
259 cmdr |= SDMMC_CMD_DAT_WR;
260 }
261
262 return cmdr;
263}
264
265static void dw_mci_start_command(struct dw_mci *host,
266 struct mmc_command *cmd, u32 cmd_flags)
267{
268 host->cmd = cmd;
269 dev_vdbg(&host->pdev->dev,
270 "start command: ARGR=0x%08x CMDR=0x%08x\n",
271 cmd->arg, cmd_flags);
272
273 mci_writel(host, CMDARG, cmd->arg);
274 wmb();
275
276 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
277}
278
279static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
280{
281 dw_mci_start_command(host, data->stop, host->stop_cmdr);
282}
283
284/* DMA interface functions */
285static void dw_mci_stop_dma(struct dw_mci *host)
286{
287 if (host->use_dma) {
288 host->dma_ops->stop(host);
289 host->dma_ops->cleanup(host);
290 } else {
291 /* Data transfer was stopped by the interrupt handler */
292 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
293 }
294}
295
296#ifdef CONFIG_MMC_DW_IDMAC
297static void dw_mci_dma_cleanup(struct dw_mci *host)
298{
299 struct mmc_data *data = host->data;
300
301 if (data)
302 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
303 ((data->flags & MMC_DATA_WRITE)
304 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
305}
306
307static void dw_mci_idmac_stop_dma(struct dw_mci *host)
308{
309 u32 temp;
310
311 /* Disable and reset the IDMAC interface */
312 temp = mci_readl(host, CTRL);
313 temp &= ~SDMMC_CTRL_USE_IDMAC;
314 temp |= SDMMC_CTRL_DMA_RESET;
315 mci_writel(host, CTRL, temp);
316
317 /* Stop the IDMAC running */
318 temp = mci_readl(host, BMOD);
319 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
320 mci_writel(host, BMOD, temp);
321}
322
323static void dw_mci_idmac_complete_dma(struct dw_mci *host)
324{
325 struct mmc_data *data = host->data;
326
327 dev_vdbg(&host->pdev->dev, "DMA complete\n");
328
329 host->dma_ops->cleanup(host);
330
331 /*
332 * If the card was removed, data will be NULL. No point in trying to
333 * send the stop command or waiting for NBUSY in this case.
334 */
335 if (data) {
336 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
337 tasklet_schedule(&host->tasklet);
338 }
339}
340
341static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
342 unsigned int sg_len)
343{
344 int i;
345 struct idmac_desc *desc = host->sg_cpu;
346
347 for (i = 0; i < sg_len; i++, desc++) {
348 unsigned int length = sg_dma_len(&data->sg[i]);
349 u32 mem_addr = sg_dma_address(&data->sg[i]);
350
351 /* Set the OWN bit and disable interrupts for this descriptor */
352 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
353
354 /* Buffer length */
355 IDMAC_SET_BUFFER1_SIZE(desc, length);
356
357 /* Physical address to DMA to/from */
358 desc->des2 = mem_addr;
359 }
360
361 /* Set first descriptor */
362 desc = host->sg_cpu;
363 desc->des0 |= IDMAC_DES0_FD;
364
365 /* Set last descriptor */
366 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
367 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
368 desc->des0 |= IDMAC_DES0_LD;
369
370 wmb();
371}
372
373static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
374{
375 u32 temp;
376
377 dw_mci_translate_sglist(host, host->data, sg_len);
378
379 /* Select IDMAC interface */
380 temp = mci_readl(host, CTRL);
381 temp |= SDMMC_CTRL_USE_IDMAC;
382 mci_writel(host, CTRL, temp);
383
384 wmb();
385
386 /* Enable the IDMAC */
387 temp = mci_readl(host, BMOD);
388 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
389 mci_writel(host, BMOD, temp);
390
391 /* Start it running */
392 mci_writel(host, PLDMND, 1);
393}
394
395static int dw_mci_idmac_init(struct dw_mci *host)
396{
397 struct idmac_desc *p;
398 int i;
399
400 /* Number of descriptors in the ring buffer */
401 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
402
403 /* Forward link the descriptor list */
404 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
405 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
406
407 /* Set the last descriptor as the end-of-ring descriptor */
408 p->des3 = host->sg_dma;
409 p->des0 = IDMAC_DES0_ER;
410
411 /* Mask out interrupts - get Tx & Rx complete only */
412 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
413 SDMMC_IDMAC_INT_TI);
414
415 /* Set the descriptor base address */
416 mci_writel(host, DBADDR, host->sg_dma);
417 return 0;
418}
419
420static struct dw_mci_dma_ops dw_mci_idmac_ops = {
421 .init = dw_mci_idmac_init,
422 .start = dw_mci_idmac_start_dma,
423 .stop = dw_mci_idmac_stop_dma,
424 .complete = dw_mci_idmac_complete_dma,
425 .cleanup = dw_mci_dma_cleanup,
426};
427#endif /* CONFIG_MMC_DW_IDMAC */
428
429static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
430{
431 struct scatterlist *sg;
432 unsigned int i, direction, sg_len;
433 u32 temp;
434
435 /* If we don't have a channel, we can't do DMA */
436 if (!host->use_dma)
437 return -ENODEV;
438
439 /*
440 * We don't do DMA on "complex" transfers, i.e. with
441 * non-word-aligned buffers or lengths. Also, we don't bother
442 * with all the DMA setup overhead for short transfers.
443 */
444 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
445 return -EINVAL;
446 if (data->blksz & 3)
447 return -EINVAL;
448
449 for_each_sg(data->sg, sg, data->sg_len, i) {
450 if (sg->offset & 3 || sg->length & 3)
451 return -EINVAL;
452 }
453
454 if (data->flags & MMC_DATA_READ)
455 direction = DMA_FROM_DEVICE;
456 else
457 direction = DMA_TO_DEVICE;
458
459 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
460 direction);
461
462 dev_vdbg(&host->pdev->dev,
463 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
464 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
465 sg_len);
466
467 /* Enable the DMA interface */
468 temp = mci_readl(host, CTRL);
469 temp |= SDMMC_CTRL_DMA_ENABLE;
470 mci_writel(host, CTRL, temp);
471
472 /* Disable RX/TX IRQs, let DMA handle it */
473 temp = mci_readl(host, INTMASK);
474 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
475 mci_writel(host, INTMASK, temp);
476
477 host->dma_ops->start(host, sg_len);
478
479 return 0;
480}
481
482static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
483{
484 u32 temp;
485
486 data->error = -EINPROGRESS;
487
488 WARN_ON(host->data);
489 host->sg = NULL;
490 host->data = data;
491
492 if (dw_mci_submit_data_dma(host, data)) {
493 host->sg = data->sg;
494 host->pio_offset = 0;
495 if (data->flags & MMC_DATA_READ)
496 host->dir_status = DW_MCI_RECV_STATUS;
497 else
498 host->dir_status = DW_MCI_SEND_STATUS;
499
500 temp = mci_readl(host, INTMASK);
501 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
502 mci_writel(host, INTMASK, temp);
503
504 temp = mci_readl(host, CTRL);
505 temp &= ~SDMMC_CTRL_DMA_ENABLE;
506 mci_writel(host, CTRL, temp);
507 }
508}
509
510static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
511{
512 struct dw_mci *host = slot->host;
513 unsigned long timeout = jiffies + msecs_to_jiffies(500);
514 unsigned int cmd_status = 0;
515
516 mci_writel(host, CMDARG, arg);
517 wmb();
518 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
519
520 while (time_before(jiffies, timeout)) {
521 cmd_status = mci_readl(host, CMD);
522 if (!(cmd_status & SDMMC_CMD_START))
523 return;
524 }
525 dev_err(&slot->mmc->class_dev,
526 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
527 cmd, arg, cmd_status);
528}
529
530static void dw_mci_setup_bus(struct dw_mci_slot *slot)
531{
532 struct dw_mci *host = slot->host;
533 u32 div;
534
535 if (slot->clock != host->current_speed) {
536 if (host->bus_hz % slot->clock)
537 /*
538 * move the + 1 after the divide to prevent
539 * over-clocking the card.
540 */
541 div = ((host->bus_hz / slot->clock) >> 1) + 1;
542 else
543 div = (host->bus_hz / slot->clock) >> 1;
544
545 dev_info(&slot->mmc->class_dev,
546 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
547 " div = %d)\n", slot->id, host->bus_hz, slot->clock,
548 div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
549
550 /* disable clock */
551 mci_writel(host, CLKENA, 0);
552 mci_writel(host, CLKSRC, 0);
553
554 /* inform CIU */
555 mci_send_cmd(slot,
556 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
557
558 /* set clock to desired speed */
559 mci_writel(host, CLKDIV, div);
560
561 /* inform CIU */
562 mci_send_cmd(slot,
563 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
564
565 /* enable clock */
566 mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE |
567 SDMMC_CLKEN_LOW_PWR);
568
569 /* inform CIU */
570 mci_send_cmd(slot,
571 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
572
573 host->current_speed = slot->clock;
574 }
575
576 /* Set the current slot bus width */
577 mci_writel(host, CTYPE, slot->ctype);
578}
579
580static void dw_mci_start_request(struct dw_mci *host,
581 struct dw_mci_slot *slot)
582{
583 struct mmc_request *mrq;
584 struct mmc_command *cmd;
585 struct mmc_data *data;
586 u32 cmdflags;
587
588 mrq = slot->mrq;
589 if (host->pdata->select_slot)
590 host->pdata->select_slot(slot->id);
591
592 /* Slot specific timing and width adjustment */
593 dw_mci_setup_bus(slot);
594
595 host->cur_slot = slot;
596 host->mrq = mrq;
597
598 host->pending_events = 0;
599 host->completed_events = 0;
600 host->data_status = 0;
601
602 data = mrq->data;
603 if (data) {
604 dw_mci_set_timeout(host);
605 mci_writel(host, BYTCNT, data->blksz*data->blocks);
606 mci_writel(host, BLKSIZ, data->blksz);
607 }
608
609 cmd = mrq->cmd;
610 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
611
612 /* this is the first command, send the initialization clock */
613 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
614 cmdflags |= SDMMC_CMD_INIT;
615
616 if (data) {
617 dw_mci_submit_data(host, data);
618 wmb();
619 }
620
621 dw_mci_start_command(host, cmd, cmdflags);
622
623 if (mrq->stop)
624 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
625}
626
627static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
628 struct mmc_request *mrq)
629{
630 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
631 host->state);
632
633 spin_lock_bh(&host->lock);
634 slot->mrq = mrq;
635
636 if (host->state == STATE_IDLE) {
637 host->state = STATE_SENDING_CMD;
638 dw_mci_start_request(host, slot);
639 } else {
640 list_add_tail(&slot->queue_node, &host->queue);
641 }
642
643 spin_unlock_bh(&host->lock);
644}
645
646static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
647{
648 struct dw_mci_slot *slot = mmc_priv(mmc);
649 struct dw_mci *host = slot->host;
650
651 WARN_ON(slot->mrq);
652
653 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
654 mrq->cmd->error = -ENOMEDIUM;
655 mmc_request_done(mmc, mrq);
656 return;
657 }
658
659 /* We don't support multiple blocks of weird lengths. */
660 dw_mci_queue_request(host, slot, mrq);
661}
662
663static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
664{
665 struct dw_mci_slot *slot = mmc_priv(mmc);
666 u32 regs;
667
668 /* set default 1 bit mode */
669 slot->ctype = SDMMC_CTYPE_1BIT;
670
671 switch (ios->bus_width) {
672 case MMC_BUS_WIDTH_1:
673 slot->ctype = SDMMC_CTYPE_1BIT;
674 break;
675 case MMC_BUS_WIDTH_4:
676 slot->ctype = SDMMC_CTYPE_4BIT;
677 break;
678 case MMC_BUS_WIDTH_8:
679 slot->ctype = SDMMC_CTYPE_8BIT;
680 break;
681 }
682
683 /* DDR mode set */
684 if (ios->ddr) {
685 regs = mci_readl(slot->host, UHS_REG);
686 regs |= (0x1 << slot->id) << 16;
687 mci_writel(slot->host, UHS_REG, regs);
688 }
689
690 if (ios->clock) {
691 /*
692 * Use mirror of ios->clock to prevent race with mmc
693 * core ios update when finding the minimum.
694 */
695 slot->clock = ios->clock;
696 }
697
698 switch (ios->power_mode) {
699 case MMC_POWER_UP:
700 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
701 break;
702 default:
703 break;
704 }
705}
706
707static int dw_mci_get_ro(struct mmc_host *mmc)
708{
709 int read_only;
710 struct dw_mci_slot *slot = mmc_priv(mmc);
711 struct dw_mci_board *brd = slot->host->pdata;
712
713 /* Use platform get_ro function, else try on board write protect */
714 if (brd->get_ro)
715 read_only = brd->get_ro(slot->id);
716 else
717 read_only =
718 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
719
720 dev_dbg(&mmc->class_dev, "card is %s\n",
721 read_only ? "read-only" : "read-write");
722
723 return read_only;
724}
725
726static int dw_mci_get_cd(struct mmc_host *mmc)
727{
728 int present;
729 struct dw_mci_slot *slot = mmc_priv(mmc);
730 struct dw_mci_board *brd = slot->host->pdata;
731
732 /* Use platform get_cd function, else try onboard card detect */
733 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
734 present = 1;
735 else if (brd->get_cd)
736 present = !brd->get_cd(slot->id);
737 else
738 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
739 == 0 ? 1 : 0;
740
741 if (present)
742 dev_dbg(&mmc->class_dev, "card is present\n");
743 else
744 dev_dbg(&mmc->class_dev, "card is not present\n");
745
746 return present;
747}
748
749static const struct mmc_host_ops dw_mci_ops = {
750 .request = dw_mci_request,
751 .set_ios = dw_mci_set_ios,
752 .get_ro = dw_mci_get_ro,
753 .get_cd = dw_mci_get_cd,
754};
755
756static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
757 __releases(&host->lock)
758 __acquires(&host->lock)
759{
760 struct dw_mci_slot *slot;
761 struct mmc_host *prev_mmc = host->cur_slot->mmc;
762
763 WARN_ON(host->cmd || host->data);
764
765 host->cur_slot->mrq = NULL;
766 host->mrq = NULL;
767 if (!list_empty(&host->queue)) {
768 slot = list_entry(host->queue.next,
769 struct dw_mci_slot, queue_node);
770 list_del(&slot->queue_node);
771 dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
772 mmc_hostname(slot->mmc));
773 host->state = STATE_SENDING_CMD;
774 dw_mci_start_request(host, slot);
775 } else {
776 dev_vdbg(&host->pdev->dev, "list empty\n");
777 host->state = STATE_IDLE;
778 }
779
780 spin_unlock(&host->lock);
781 mmc_request_done(prev_mmc, mrq);
782 spin_lock(&host->lock);
783}
784
785static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
786{
787 u32 status = host->cmd_status;
788
789 host->cmd_status = 0;
790
791 /* Read the response from the card (up to 16 bytes) */
792 if (cmd->flags & MMC_RSP_PRESENT) {
793 if (cmd->flags & MMC_RSP_136) {
794 cmd->resp[3] = mci_readl(host, RESP0);
795 cmd->resp[2] = mci_readl(host, RESP1);
796 cmd->resp[1] = mci_readl(host, RESP2);
797 cmd->resp[0] = mci_readl(host, RESP3);
798 } else {
799 cmd->resp[0] = mci_readl(host, RESP0);
800 cmd->resp[1] = 0;
801 cmd->resp[2] = 0;
802 cmd->resp[3] = 0;
803 }
804 }
805
806 if (status & SDMMC_INT_RTO)
807 cmd->error = -ETIMEDOUT;
808 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
809 cmd->error = -EILSEQ;
810 else if (status & SDMMC_INT_RESP_ERR)
811 cmd->error = -EIO;
812 else
813 cmd->error = 0;
814
815 if (cmd->error) {
816 /* newer ip versions need a delay between retries */
817 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
818 mdelay(20);
819
820 if (cmd->data) {
821 host->data = NULL;
822 dw_mci_stop_dma(host);
823 }
824 }
825}
826
827static void dw_mci_tasklet_func(unsigned long priv)
828{
829 struct dw_mci *host = (struct dw_mci *)priv;
830 struct mmc_data *data;
831 struct mmc_command *cmd;
832 enum dw_mci_state state;
833 enum dw_mci_state prev_state;
834 u32 status;
835
836 spin_lock(&host->lock);
837
838 state = host->state;
839 data = host->data;
840
841 do {
842 prev_state = state;
843
844 switch (state) {
845 case STATE_IDLE:
846 break;
847
848 case STATE_SENDING_CMD:
849 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
850 &host->pending_events))
851 break;
852
853 cmd = host->cmd;
854 host->cmd = NULL;
855 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
856 dw_mci_command_complete(host, host->mrq->cmd);
857 if (!host->mrq->data || cmd->error) {
858 dw_mci_request_end(host, host->mrq);
859 goto unlock;
860 }
861
862 prev_state = state = STATE_SENDING_DATA;
863 /* fall through */
864
865 case STATE_SENDING_DATA:
866 if (test_and_clear_bit(EVENT_DATA_ERROR,
867 &host->pending_events)) {
868 dw_mci_stop_dma(host);
869 if (data->stop)
870 send_stop_cmd(host, data);
871 state = STATE_DATA_ERROR;
872 break;
873 }
874
875 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
876 &host->pending_events))
877 break;
878
879 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
880 prev_state = state = STATE_DATA_BUSY;
881 /* fall through */
882
883 case STATE_DATA_BUSY:
884 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
885 &host->pending_events))
886 break;
887
888 host->data = NULL;
889 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
890 status = host->data_status;
891
892 if (status & DW_MCI_DATA_ERROR_FLAGS) {
893 if (status & SDMMC_INT_DTO) {
894 dev_err(&host->pdev->dev,
895 "data timeout error\n");
896 data->error = -ETIMEDOUT;
897 } else if (status & SDMMC_INT_DCRC) {
898 dev_err(&host->pdev->dev,
899 "data CRC error\n");
900 data->error = -EILSEQ;
901 } else {
902 dev_err(&host->pdev->dev,
903 "data FIFO error "
904 "(status=%08x)\n",
905 status);
906 data->error = -EIO;
907 }
908 } else {
909 data->bytes_xfered = data->blocks * data->blksz;
910 data->error = 0;
911 }
912
913 if (!data->stop) {
914 dw_mci_request_end(host, host->mrq);
915 goto unlock;
916 }
917
918 prev_state = state = STATE_SENDING_STOP;
919 if (!data->error)
920 send_stop_cmd(host, data);
921 /* fall through */
922
923 case STATE_SENDING_STOP:
924 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
925 &host->pending_events))
926 break;
927
928 host->cmd = NULL;
929 dw_mci_command_complete(host, host->mrq->stop);
930 dw_mci_request_end(host, host->mrq);
931 goto unlock;
932
933 case STATE_DATA_ERROR:
934 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
935 &host->pending_events))
936 break;
937
938 state = STATE_DATA_BUSY;
939 break;
940 }
941 } while (state != prev_state);
942
943 host->state = state;
944unlock:
945 spin_unlock(&host->lock);
946
947}
948
949static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
950{
951 u16 *pdata = (u16 *)buf;
952
953 WARN_ON(cnt % 2 != 0);
954
955 cnt = cnt >> 1;
956 while (cnt > 0) {
957 mci_writew(host, DATA, *pdata++);
958 cnt--;
959 }
960}
961
962static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
963{
964 u16 *pdata = (u16 *)buf;
965
966 WARN_ON(cnt % 2 != 0);
967
968 cnt = cnt >> 1;
969 while (cnt > 0) {
970 *pdata++ = mci_readw(host, DATA);
971 cnt--;
972 }
973}
974
975static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
976{
977 u32 *pdata = (u32 *)buf;
978
979 WARN_ON(cnt % 4 != 0);
980 WARN_ON((unsigned long)pdata & 0x3);
981
982 cnt = cnt >> 2;
983 while (cnt > 0) {
984 mci_writel(host, DATA, *pdata++);
985 cnt--;
986 }
987}
988
989static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
990{
991 u32 *pdata = (u32 *)buf;
992
993 WARN_ON(cnt % 4 != 0);
994 WARN_ON((unsigned long)pdata & 0x3);
995
996 cnt = cnt >> 2;
997 while (cnt > 0) {
998 *pdata++ = mci_readl(host, DATA);
999 cnt--;
1000 }
1001}
1002
1003static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1004{
1005 u64 *pdata = (u64 *)buf;
1006
1007 WARN_ON(cnt % 8 != 0);
1008
1009 cnt = cnt >> 3;
1010 while (cnt > 0) {
1011 mci_writeq(host, DATA, *pdata++);
1012 cnt--;
1013 }
1014}
1015
1016static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1017{
1018 u64 *pdata = (u64 *)buf;
1019
1020 WARN_ON(cnt % 8 != 0);
1021
1022 cnt = cnt >> 3;
1023 while (cnt > 0) {
1024 *pdata++ = mci_readq(host, DATA);
1025 cnt--;
1026 }
1027}
1028
1029static void dw_mci_read_data_pio(struct dw_mci *host)
1030{
1031 struct scatterlist *sg = host->sg;
1032 void *buf = sg_virt(sg);
1033 unsigned int offset = host->pio_offset;
1034 struct mmc_data *data = host->data;
1035 int shift = host->data_shift;
1036 u32 status;
1037 unsigned int nbytes = 0, len;
1038
1039 do {
1040 len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
1041 if (offset + len <= sg->length) {
1042 host->pull_data(host, (void *)(buf + offset), len);
1043
1044 offset += len;
1045 nbytes += len;
1046
1047 if (offset == sg->length) {
1048 flush_dcache_page(sg_page(sg));
1049 host->sg = sg = sg_next(sg);
1050 if (!sg)
1051 goto done;
1052
1053 offset = 0;
1054 buf = sg_virt(sg);
1055 }
1056 } else {
1057 unsigned int remaining = sg->length - offset;
1058 host->pull_data(host, (void *)(buf + offset),
1059 remaining);
1060 nbytes += remaining;
1061
1062 flush_dcache_page(sg_page(sg));
1063 host->sg = sg = sg_next(sg);
1064 if (!sg)
1065 goto done;
1066
1067 offset = len - remaining;
1068 buf = sg_virt(sg);
1069 host->pull_data(host, buf, offset);
1070 nbytes += offset;
1071 }
1072
1073 status = mci_readl(host, MINTSTS);
1074 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1075 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1076 host->data_status = status;
1077 data->bytes_xfered += nbytes;
1078 smp_wmb();
1079
1080 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1081
1082 tasklet_schedule(&host->tasklet);
1083 return;
1084 }
1085 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
1086 len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
1087 host->pio_offset = offset;
1088 data->bytes_xfered += nbytes;
1089 return;
1090
1091done:
1092 data->bytes_xfered += nbytes;
1093 smp_wmb();
1094 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1095}
1096
1097static void dw_mci_write_data_pio(struct dw_mci *host)
1098{
1099 struct scatterlist *sg = host->sg;
1100 void *buf = sg_virt(sg);
1101 unsigned int offset = host->pio_offset;
1102 struct mmc_data *data = host->data;
1103 int shift = host->data_shift;
1104 u32 status;
1105 unsigned int nbytes = 0, len;
1106
1107 do {
1108 len = SDMMC_FIFO_SZ -
1109 (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
1110 if (offset + len <= sg->length) {
1111 host->push_data(host, (void *)(buf + offset), len);
1112
1113 offset += len;
1114 nbytes += len;
1115 if (offset == sg->length) {
1116 host->sg = sg = sg_next(sg);
1117 if (!sg)
1118 goto done;
1119
1120 offset = 0;
1121 buf = sg_virt(sg);
1122 }
1123 } else {
1124 unsigned int remaining = sg->length - offset;
1125
1126 host->push_data(host, (void *)(buf + offset),
1127 remaining);
1128 nbytes += remaining;
1129
1130 host->sg = sg = sg_next(sg);
1131 if (!sg)
1132 goto done;
1133
1134 offset = len - remaining;
1135 buf = sg_virt(sg);
1136 host->push_data(host, (void *)buf, offset);
1137 nbytes += offset;
1138 }
1139
1140 status = mci_readl(host, MINTSTS);
1141 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1142 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1143 host->data_status = status;
1144 data->bytes_xfered += nbytes;
1145
1146 smp_wmb();
1147
1148 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1149
1150 tasklet_schedule(&host->tasklet);
1151 return;
1152 }
1153 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1154
1155 host->pio_offset = offset;
1156 data->bytes_xfered += nbytes;
1157
1158 return;
1159
1160done:
1161 data->bytes_xfered += nbytes;
1162 smp_wmb();
1163 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1164}
1165
1166static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1167{
1168 if (!host->cmd_status)
1169 host->cmd_status = status;
1170
1171 smp_wmb();
1172
1173 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1174 tasklet_schedule(&host->tasklet);
1175}
1176
1177static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1178{
1179 struct dw_mci *host = dev_id;
1180 u32 status, pending;
1181 unsigned int pass_count = 0;
1182
1183 do {
1184 status = mci_readl(host, RINTSTS);
1185 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1186
1187 /*
1188 * DTO fix - version 2.10a and below, and only if internal DMA
1189 * is configured.
1190 */
1191 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1192 if (!pending &&
1193 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1194 pending |= SDMMC_INT_DATA_OVER;
1195 }
1196
1197 if (!pending)
1198 break;
1199
1200 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1201 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1202 host->cmd_status = status;
1203 smp_wmb();
1204 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1205 tasklet_schedule(&host->tasklet);
1206 }
1207
1208 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1209 /* if there is an error report DATA_ERROR */
1210 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1211 host->data_status = status;
1212 smp_wmb();
1213 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1214 tasklet_schedule(&host->tasklet);
1215 }
1216
1217 if (pending & SDMMC_INT_DATA_OVER) {
1218 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1219 if (!host->data_status)
1220 host->data_status = status;
1221 smp_wmb();
1222 if (host->dir_status == DW_MCI_RECV_STATUS) {
1223 if (host->sg != NULL)
1224 dw_mci_read_data_pio(host);
1225 }
1226 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1227 tasklet_schedule(&host->tasklet);
1228 }
1229
1230 if (pending & SDMMC_INT_RXDR) {
1231 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1232 if (host->sg)
1233 dw_mci_read_data_pio(host);
1234 }
1235
1236 if (pending & SDMMC_INT_TXDR) {
1237 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1238 if (host->sg)
1239 dw_mci_write_data_pio(host);
1240 }
1241
1242 if (pending & SDMMC_INT_CMD_DONE) {
1243 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1244 dw_mci_cmd_interrupt(host, status);
1245 }
1246
1247 if (pending & SDMMC_INT_CD) {
1248 mci_writel(host, RINTSTS, SDMMC_INT_CD);
1249 tasklet_schedule(&host->card_tasklet);
1250 }
1251
1252 } while (pass_count++ < 5);
1253
1254#ifdef CONFIG_MMC_DW_IDMAC
1255 /* Handle DMA interrupts */
1256 pending = mci_readl(host, IDSTS);
1257 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1258 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1259 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1260 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1261 host->dma_ops->complete(host);
1262 }
1263#endif
1264
1265 return IRQ_HANDLED;
1266}
1267
1268static void dw_mci_tasklet_card(unsigned long data)
1269{
1270 struct dw_mci *host = (struct dw_mci *)data;
1271 int i;
1272
1273 for (i = 0; i < host->num_slots; i++) {
1274 struct dw_mci_slot *slot = host->slot[i];
1275 struct mmc_host *mmc = slot->mmc;
1276 struct mmc_request *mrq;
1277 int present;
1278 u32 ctrl;
1279
1280 present = dw_mci_get_cd(mmc);
1281 while (present != slot->last_detect_state) {
1282 spin_lock(&host->lock);
1283
1284 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1285 present ? "inserted" : "removed");
1286
1287 /* Card change detected */
1288 slot->last_detect_state = present;
1289
1290 /* Power up slot */
1291 if (present != 0) {
1292 if (host->pdata->setpower)
1293 host->pdata->setpower(slot->id,
1294 mmc->ocr_avail);
1295
1296 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1297 }
1298
1299 /* Clean up queue if present */
1300 mrq = slot->mrq;
1301 if (mrq) {
1302 if (mrq == host->mrq) {
1303 host->data = NULL;
1304 host->cmd = NULL;
1305
1306 switch (host->state) {
1307 case STATE_IDLE:
1308 break;
1309 case STATE_SENDING_CMD:
1310 mrq->cmd->error = -ENOMEDIUM;
1311 if (!mrq->data)
1312 break;
1313 /* fall through */
1314 case STATE_SENDING_DATA:
1315 mrq->data->error = -ENOMEDIUM;
1316 dw_mci_stop_dma(host);
1317 break;
1318 case STATE_DATA_BUSY:
1319 case STATE_DATA_ERROR:
1320 if (mrq->data->error == -EINPROGRESS)
1321 mrq->data->error = -ENOMEDIUM;
1322 if (!mrq->stop)
1323 break;
1324 /* fall through */
1325 case STATE_SENDING_STOP:
1326 mrq->stop->error = -ENOMEDIUM;
1327 break;
1328 }
1329
1330 dw_mci_request_end(host, mrq);
1331 } else {
1332 list_del(&slot->queue_node);
1333 mrq->cmd->error = -ENOMEDIUM;
1334 if (mrq->data)
1335 mrq->data->error = -ENOMEDIUM;
1336 if (mrq->stop)
1337 mrq->stop->error = -ENOMEDIUM;
1338
1339 spin_unlock(&host->lock);
1340 mmc_request_done(slot->mmc, mrq);
1341 spin_lock(&host->lock);
1342 }
1343 }
1344
1345 /* Power down slot */
1346 if (present == 0) {
1347 if (host->pdata->setpower)
1348 host->pdata->setpower(slot->id, 0);
1349 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1350
1351 /*
1352 * Clear down the FIFO - doing so generates a
1353 * block interrupt, hence setting the
1354 * scatter-gather pointer to NULL.
1355 */
1356 host->sg = NULL;
1357
1358 ctrl = mci_readl(host, CTRL);
1359 ctrl |= SDMMC_CTRL_FIFO_RESET;
1360 mci_writel(host, CTRL, ctrl);
1361
1362#ifdef CONFIG_MMC_DW_IDMAC
1363 ctrl = mci_readl(host, BMOD);
1364 ctrl |= 0x01; /* Software reset of DMA */
1365 mci_writel(host, BMOD, ctrl);
1366#endif
1367
1368 }
1369
1370 spin_unlock(&host->lock);
1371 present = dw_mci_get_cd(mmc);
1372 }
1373
1374 mmc_detect_change(slot->mmc,
1375 msecs_to_jiffies(host->pdata->detect_delay_ms));
1376 }
1377}
1378
1379static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1380{
1381 struct mmc_host *mmc;
1382 struct dw_mci_slot *slot;
1383
1384 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->pdev->dev);
1385 if (!mmc)
1386 return -ENOMEM;
1387
1388 slot = mmc_priv(mmc);
1389 slot->id = id;
1390 slot->mmc = mmc;
1391 slot->host = host;
1392
1393 mmc->ops = &dw_mci_ops;
1394 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
1395 mmc->f_max = host->bus_hz;
1396
1397 if (host->pdata->get_ocr)
1398 mmc->ocr_avail = host->pdata->get_ocr(id);
1399 else
1400 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1401
1402 /*
1403 * Start with slot power disabled, it will be enabled when a card
1404 * is detected.
1405 */
1406 if (host->pdata->setpower)
1407 host->pdata->setpower(id, 0);
1408
1409 if (host->pdata->caps)
1410 mmc->caps = host->pdata->caps;
1411 else
1412 mmc->caps = 0;
1413
1414 if (host->pdata->get_bus_wd)
1415 if (host->pdata->get_bus_wd(slot->id) >= 4)
1416 mmc->caps |= MMC_CAP_4_BIT_DATA;
1417
1418 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
1419 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1420
1421#ifdef CONFIG_MMC_DW_IDMAC
1422 mmc->max_segs = host->ring_size;
1423 mmc->max_blk_size = 65536;
1424 mmc->max_blk_count = host->ring_size;
1425 mmc->max_seg_size = 0x1000;
1426 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
1427#else
1428 if (host->pdata->blk_settings) {
1429 mmc->max_segs = host->pdata->blk_settings->max_segs;
1430 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
1431 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
1432 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
1433 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
1434 } else {
1435 /* Useful defaults if platform data is unset. */
1436 mmc->max_segs = 64;
1437 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
1438 mmc->max_blk_count = 512;
1439 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1440 mmc->max_seg_size = mmc->max_req_size;
1441 }
1442#endif /* CONFIG_MMC_DW_IDMAC */
1443
1444 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
1445 if (IS_ERR(host->vmmc)) {
1446 printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc));
1447 host->vmmc = NULL;
1448 } else
1449 regulator_enable(host->vmmc);
1450
1451 if (dw_mci_get_cd(mmc))
1452 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1453 else
1454 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1455
1456 host->slot[id] = slot;
1457 mmc_add_host(mmc);
1458
1459#if defined(CONFIG_DEBUG_FS)
1460 dw_mci_init_debugfs(slot);
1461#endif
1462
1463 /* Card initially undetected */
1464 slot->last_detect_state = 0;
1465
1466 /*
1467 * Card may have been plugged in prior to boot so we
1468 * need to run the detect tasklet
1469 */
1470 tasklet_schedule(&host->card_tasklet);
1471
1472 return 0;
1473}
1474
1475static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
1476{
1477 /* Shutdown detect IRQ */
1478 if (slot->host->pdata->exit)
1479 slot->host->pdata->exit(id);
1480
1481 /* Debugfs stuff is cleaned up by mmc core */
1482 mmc_remove_host(slot->mmc);
1483 slot->host->slot[id] = NULL;
1484 mmc_free_host(slot->mmc);
1485}
1486
1487static void dw_mci_init_dma(struct dw_mci *host)
1488{
1489 /* Alloc memory for sg translation */
1490 host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, PAGE_SIZE,
1491 &host->sg_dma, GFP_KERNEL);
1492 if (!host->sg_cpu) {
1493 dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n",
1494 __func__);
1495 goto no_dma;
1496 }
1497
1498 /* Determine which DMA interface to use */
1499#ifdef CONFIG_MMC_DW_IDMAC
1500 host->dma_ops = &dw_mci_idmac_ops;
1501 dev_info(&host->pdev->dev, "Using internal DMA controller.\n");
1502#endif
1503
1504 if (!host->dma_ops)
1505 goto no_dma;
1506
1507 if (host->dma_ops->init) {
1508 if (host->dma_ops->init(host)) {
1509 dev_err(&host->pdev->dev, "%s: Unable to initialize "
1510 "DMA Controller.\n", __func__);
1511 goto no_dma;
1512 }
1513 } else {
1514 dev_err(&host->pdev->dev, "DMA initialization not found.\n");
1515 goto no_dma;
1516 }
1517
1518 host->use_dma = 1;
1519 return;
1520
1521no_dma:
1522 dev_info(&host->pdev->dev, "Using PIO mode.\n");
1523 host->use_dma = 0;
1524 return;
1525}
1526
1527static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
1528{
1529 unsigned long timeout = jiffies + msecs_to_jiffies(500);
1530 unsigned int ctrl;
1531
1532 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
1533 SDMMC_CTRL_DMA_RESET));
1534
1535 /* wait till resets clear */
1536 do {
1537 ctrl = mci_readl(host, CTRL);
1538 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
1539 SDMMC_CTRL_DMA_RESET)))
1540 return true;
1541 } while (time_before(jiffies, timeout));
1542
1543 dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
1544
1545 return false;
1546}
1547
1548static int dw_mci_probe(struct platform_device *pdev)
1549{
1550 struct dw_mci *host;
1551 struct resource *regs;
1552 struct dw_mci_board *pdata;
1553 int irq, ret, i, width;
1554 u32 fifo_size;
1555
1556 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1557 if (!regs)
1558 return -ENXIO;
1559
1560 irq = platform_get_irq(pdev, 0);
1561 if (irq < 0)
1562 return irq;
1563
1564 host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL);
1565 if (!host)
1566 return -ENOMEM;
1567
1568 host->pdev = pdev;
1569 host->pdata = pdata = pdev->dev.platform_data;
1570 if (!pdata || !pdata->init) {
1571 dev_err(&pdev->dev,
1572 "Platform data must supply init function\n");
1573 ret = -ENODEV;
1574 goto err_freehost;
1575 }
1576
1577 if (!pdata->select_slot && pdata->num_slots > 1) {
1578 dev_err(&pdev->dev,
1579 "Platform data must supply select_slot function\n");
1580 ret = -ENODEV;
1581 goto err_freehost;
1582 }
1583
1584 if (!pdata->bus_hz) {
1585 dev_err(&pdev->dev,
1586 "Platform data must supply bus speed\n");
1587 ret = -ENODEV;
1588 goto err_freehost;
1589 }
1590
1591 host->bus_hz = pdata->bus_hz;
1592 host->quirks = pdata->quirks;
1593
1594 spin_lock_init(&host->lock);
1595 INIT_LIST_HEAD(&host->queue);
1596
1597 ret = -ENOMEM;
1598 host->regs = ioremap(regs->start, regs->end - regs->start + 1);
1599 if (!host->regs)
1600 goto err_freehost;
1601
1602 host->dma_ops = pdata->dma_ops;
1603 dw_mci_init_dma(host);
1604
1605 /*
1606 * Get the host data width - this assumes that HCON has been set with
1607 * the correct values.
1608 */
1609 i = (mci_readl(host, HCON) >> 7) & 0x7;
1610 if (!i) {
1611 host->push_data = dw_mci_push_data16;
1612 host->pull_data = dw_mci_pull_data16;
1613 width = 16;
1614 host->data_shift = 1;
1615 } else if (i == 2) {
1616 host->push_data = dw_mci_push_data64;
1617 host->pull_data = dw_mci_pull_data64;
1618 width = 64;
1619 host->data_shift = 3;
1620 } else {
1621 /* Check for a reserved value, and warn if it is */
1622 WARN((i != 1),
1623 "HCON reports a reserved host data width!\n"
1624 "Defaulting to 32-bit access.\n");
1625 host->push_data = dw_mci_push_data32;
1626 host->pull_data = dw_mci_pull_data32;
1627 width = 32;
1628 host->data_shift = 2;
1629 }
1630
1631 /* Reset all blocks */
1632 if (!mci_wait_reset(&pdev->dev, host)) {
1633 ret = -ENODEV;
1634 goto err_dmaunmap;
1635 }
1636
1637 /* Clear the interrupts for the host controller */
1638 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1639 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
1640
1641 /* Put in max timeout */
1642 mci_writel(host, TMOUT, 0xFFFFFFFF);
1643
1644 /*
1645 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
1646 * Tx Mark = fifo_size / 2 DMA Size = 8
1647 */
1648 fifo_size = mci_readl(host, FIFOTH);
1649 fifo_size = (fifo_size >> 16) & 0x7ff;
1650 host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
1651 ((fifo_size/2) << 0));
1652 mci_writel(host, FIFOTH, host->fifoth_val);
1653
1654 /* disable clock to CIU */
1655 mci_writel(host, CLKENA, 0);
1656 mci_writel(host, CLKSRC, 0);
1657
1658 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
1659 tasklet_init(&host->card_tasklet,
1660 dw_mci_tasklet_card, (unsigned long)host);
1661
1662 ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host);
1663 if (ret)
1664 goto err_dmaunmap;
1665
1666 platform_set_drvdata(pdev, host);
1667
1668 if (host->pdata->num_slots)
1669 host->num_slots = host->pdata->num_slots;
1670 else
1671 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
1672
1673 /* We need at least one slot to succeed */
1674 for (i = 0; i < host->num_slots; i++) {
1675 ret = dw_mci_init_slot(host, i);
1676 if (ret) {
1677 ret = -ENODEV;
1678 goto err_init_slot;
1679 }
1680 }
1681
1682 /*
1683 * Enable interrupts for command done, data over, data empty, card det,
1684 * receive ready and error such as transmit, receive timeout, crc error
1685 */
1686 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1687 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
1688 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
1689 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
1690 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
1691
1692 dev_info(&pdev->dev, "DW MMC controller at irq %d, "
1693 "%d bit host data width\n", irq, width);
1694 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
1695 dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n");
1696
1697 return 0;
1698
1699err_init_slot:
1700 /* De-init any initialized slots */
1701 while (i > 0) {
1702 if (host->slot[i])
1703 dw_mci_cleanup_slot(host->slot[i], i);
1704 i--;
1705 }
1706 free_irq(irq, host);
1707
1708err_dmaunmap:
1709 if (host->use_dma && host->dma_ops->exit)
1710 host->dma_ops->exit(host);
1711 dma_free_coherent(&host->pdev->dev, PAGE_SIZE,
1712 host->sg_cpu, host->sg_dma);
1713 iounmap(host->regs);
1714
1715 if (host->vmmc) {
1716 regulator_disable(host->vmmc);
1717 regulator_put(host->vmmc);
1718 }
1719
1720
1721err_freehost:
1722 kfree(host);
1723 return ret;
1724}
1725
1726static int __exit dw_mci_remove(struct platform_device *pdev)
1727{
1728 struct dw_mci *host = platform_get_drvdata(pdev);
1729 int i;
1730
1731 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1732 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
1733
1734 platform_set_drvdata(pdev, NULL);
1735
1736 for (i = 0; i < host->num_slots; i++) {
1737 dev_dbg(&pdev->dev, "remove slot %d\n", i);
1738 if (host->slot[i])
1739 dw_mci_cleanup_slot(host->slot[i], i);
1740 }
1741
1742 /* disable clock to CIU */
1743 mci_writel(host, CLKENA, 0);
1744 mci_writel(host, CLKSRC, 0);
1745
1746 free_irq(platform_get_irq(pdev, 0), host);
1747 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
1748
1749 if (host->use_dma && host->dma_ops->exit)
1750 host->dma_ops->exit(host);
1751
1752 if (host->vmmc) {
1753 regulator_disable(host->vmmc);
1754 regulator_put(host->vmmc);
1755 }
1756
1757 iounmap(host->regs);
1758
1759 kfree(host);
1760 return 0;
1761}
1762
1763#ifdef CONFIG_PM
1764/*
1765 * TODO: we should probably disable the clock to the card in the suspend path.
1766 */
1767static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
1768{
1769 int i, ret;
1770 struct dw_mci *host = platform_get_drvdata(pdev);
1771
1772 for (i = 0; i < host->num_slots; i++) {
1773 struct dw_mci_slot *slot = host->slot[i];
1774 if (!slot)
1775 continue;
1776 ret = mmc_suspend_host(slot->mmc);
1777 if (ret < 0) {
1778 while (--i >= 0) {
1779 slot = host->slot[i];
1780 if (slot)
1781 mmc_resume_host(host->slot[i]->mmc);
1782 }
1783 return ret;
1784 }
1785 }
1786
1787 if (host->vmmc)
1788 regulator_disable(host->vmmc);
1789
1790 return 0;
1791}
1792
1793static int dw_mci_resume(struct platform_device *pdev)
1794{
1795 int i, ret;
1796 struct dw_mci *host = platform_get_drvdata(pdev);
1797
1798 if (host->vmmc)
1799 regulator_enable(host->vmmc);
1800
1801 if (host->dma_ops->init)
1802 host->dma_ops->init(host);
1803
1804 if (!mci_wait_reset(&pdev->dev, host)) {
1805 ret = -ENODEV;
1806 return ret;
1807 }
1808
1809 /* Restore the old value at FIFOTH register */
1810 mci_writel(host, FIFOTH, host->fifoth_val);
1811
1812 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1813 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
1814 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
1815 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
1816 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
1817
1818 for (i = 0; i < host->num_slots; i++) {
1819 struct dw_mci_slot *slot = host->slot[i];
1820 if (!slot)
1821 continue;
1822 ret = mmc_resume_host(host->slot[i]->mmc);
1823 if (ret < 0)
1824 return ret;
1825 }
1826
1827 return 0;
1828}
1829#else
1830#define dw_mci_suspend NULL
1831#define dw_mci_resume NULL
1832#endif /* CONFIG_PM */
1833
1834static struct platform_driver dw_mci_driver = {
1835 .remove = __exit_p(dw_mci_remove),
1836 .suspend = dw_mci_suspend,
1837 .resume = dw_mci_resume,
1838 .driver = {
1839 .name = "dw_mmc",
1840 },
1841};
1842
1843static int __init dw_mci_init(void)
1844{
1845 return platform_driver_probe(&dw_mci_driver, dw_mci_probe);
1846}
1847
1848static void __exit dw_mci_exit(void)
1849{
1850 platform_driver_unregister(&dw_mci_driver);
1851}
1852
1853module_init(dw_mci_init);
1854module_exit(dw_mci_exit);
1855
1856MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
1857MODULE_AUTHOR("NXP Semiconductor VietNam");
1858MODULE_AUTHOR("Imagination Technologies Ltd");
1859MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
new file mode 100644
index 000000000000..23c662af5616
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc.h
@@ -0,0 +1,168 @@
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef _DW_MMC_H_
15#define _DW_MMC_H_
16
17#define SDMMC_CTRL 0x000
18#define SDMMC_PWREN 0x004
19#define SDMMC_CLKDIV 0x008
20#define SDMMC_CLKSRC 0x00c
21#define SDMMC_CLKENA 0x010
22#define SDMMC_TMOUT 0x014
23#define SDMMC_CTYPE 0x018
24#define SDMMC_BLKSIZ 0x01c
25#define SDMMC_BYTCNT 0x020
26#define SDMMC_INTMASK 0x024
27#define SDMMC_CMDARG 0x028
28#define SDMMC_CMD 0x02c
29#define SDMMC_RESP0 0x030
30#define SDMMC_RESP1 0x034
31#define SDMMC_RESP2 0x038
32#define SDMMC_RESP3 0x03c
33#define SDMMC_MINTSTS 0x040
34#define SDMMC_RINTSTS 0x044
35#define SDMMC_STATUS 0x048
36#define SDMMC_FIFOTH 0x04c
37#define SDMMC_CDETECT 0x050
38#define SDMMC_WRTPRT 0x054
39#define SDMMC_GPIO 0x058
40#define SDMMC_TCBCNT 0x05c
41#define SDMMC_TBBCNT 0x060
42#define SDMMC_DEBNCE 0x064
43#define SDMMC_USRID 0x068
44#define SDMMC_VERID 0x06c
45#define SDMMC_HCON 0x070
46#define SDMMC_UHS_REG 0x074
47#define SDMMC_BMOD 0x080
48#define SDMMC_PLDMND 0x084
49#define SDMMC_DBADDR 0x088
50#define SDMMC_IDSTS 0x08c
51#define SDMMC_IDINTEN 0x090
52#define SDMMC_DSCADDR 0x094
53#define SDMMC_BUFADDR 0x098
54#define SDMMC_DATA 0x100
55
56/* shift bit field */
57#define _SBF(f, v) ((v) << (f))
58
59/* Control register defines */
60#define SDMMC_CTRL_USE_IDMAC BIT(25)
61#define SDMMC_CTRL_CEATA_INT_EN BIT(11)
62#define SDMMC_CTRL_SEND_AS_CCSD BIT(10)
63#define SDMMC_CTRL_SEND_CCSD BIT(9)
64#define SDMMC_CTRL_ABRT_READ_DATA BIT(8)
65#define SDMMC_CTRL_SEND_IRQ_RESP BIT(7)
66#define SDMMC_CTRL_READ_WAIT BIT(6)
67#define SDMMC_CTRL_DMA_ENABLE BIT(5)
68#define SDMMC_CTRL_INT_ENABLE BIT(4)
69#define SDMMC_CTRL_DMA_RESET BIT(2)
70#define SDMMC_CTRL_FIFO_RESET BIT(1)
71#define SDMMC_CTRL_RESET BIT(0)
72/* Clock Enable register defines */
73#define SDMMC_CLKEN_LOW_PWR BIT(16)
74#define SDMMC_CLKEN_ENABLE BIT(0)
75/* time-out register defines */
76#define SDMMC_TMOUT_DATA(n) _SBF(8, (n))
77#define SDMMC_TMOUT_DATA_MSK 0xFFFFFF00
78#define SDMMC_TMOUT_RESP(n) ((n) & 0xFF)
79#define SDMMC_TMOUT_RESP_MSK 0xFF
80/* card-type register defines */
81#define SDMMC_CTYPE_8BIT BIT(16)
82#define SDMMC_CTYPE_4BIT BIT(0)
83#define SDMMC_CTYPE_1BIT 0
84/* Interrupt status & mask register defines */
85#define SDMMC_INT_SDIO BIT(16)
86#define SDMMC_INT_EBE BIT(15)
87#define SDMMC_INT_ACD BIT(14)
88#define SDMMC_INT_SBE BIT(13)
89#define SDMMC_INT_HLE BIT(12)
90#define SDMMC_INT_FRUN BIT(11)
91#define SDMMC_INT_HTO BIT(10)
92#define SDMMC_INT_DTO BIT(9)
93#define SDMMC_INT_RTO BIT(8)
94#define SDMMC_INT_DCRC BIT(7)
95#define SDMMC_INT_RCRC BIT(6)
96#define SDMMC_INT_RXDR BIT(5)
97#define SDMMC_INT_TXDR BIT(4)
98#define SDMMC_INT_DATA_OVER BIT(3)
99#define SDMMC_INT_CMD_DONE BIT(2)
100#define SDMMC_INT_RESP_ERR BIT(1)
101#define SDMMC_INT_CD BIT(0)
102#define SDMMC_INT_ERROR 0xbfc2
103/* Command register defines */
104#define SDMMC_CMD_START BIT(31)
105#define SDMMC_CMD_CCS_EXP BIT(23)
106#define SDMMC_CMD_CEATA_RD BIT(22)
107#define SDMMC_CMD_UPD_CLK BIT(21)
108#define SDMMC_CMD_INIT BIT(15)
109#define SDMMC_CMD_STOP BIT(14)
110#define SDMMC_CMD_PRV_DAT_WAIT BIT(13)
111#define SDMMC_CMD_SEND_STOP BIT(12)
112#define SDMMC_CMD_STRM_MODE BIT(11)
113#define SDMMC_CMD_DAT_WR BIT(10)
114#define SDMMC_CMD_DAT_EXP BIT(9)
115#define SDMMC_CMD_RESP_CRC BIT(8)
116#define SDMMC_CMD_RESP_LONG BIT(7)
117#define SDMMC_CMD_RESP_EXP BIT(6)
118#define SDMMC_CMD_INDX(n) ((n) & 0x1F)
119/* Status register defines */
120#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF)
121#define SDMMC_FIFO_SZ 32
122/* Internal DMAC interrupt defines */
123#define SDMMC_IDMAC_INT_AI BIT(9)
124#define SDMMC_IDMAC_INT_NI BIT(8)
125#define SDMMC_IDMAC_INT_CES BIT(5)
126#define SDMMC_IDMAC_INT_DU BIT(4)
127#define SDMMC_IDMAC_INT_FBE BIT(2)
128#define SDMMC_IDMAC_INT_RI BIT(1)
129#define SDMMC_IDMAC_INT_TI BIT(0)
130/* Internal DMAC bus mode bits */
131#define SDMMC_IDMAC_ENABLE BIT(7)
132#define SDMMC_IDMAC_FB BIT(1)
133#define SDMMC_IDMAC_SWRESET BIT(0)
134
135/* Register access macros */
136#define mci_readl(dev, reg) \
137 __raw_readl(dev->regs + SDMMC_##reg)
138#define mci_writel(dev, reg, value) \
139 __raw_writel((value), dev->regs + SDMMC_##reg)
140
141/* 16-bit FIFO access macros */
142#define mci_readw(dev, reg) \
143 __raw_readw(dev->regs + SDMMC_##reg)
144#define mci_writew(dev, reg, value) \
145 __raw_writew((value), dev->regs + SDMMC_##reg)
146
147/* 64-bit FIFO access macros */
148#ifdef readq
149#define mci_readq(dev, reg) \
150 __raw_readq(dev->regs + SDMMC_##reg)
151#define mci_writeq(dev, reg, value) \
152 __raw_writeq((value), dev->regs + SDMMC_##reg)
153#else
154/*
155 * Dummy readq implementation for architectures that don't define it.
156 *
157 * We would assume that none of these architectures would configure
158 * the IP block with a 64bit FIFO width, so this code will never be
159 * executed on those machines. Defining these macros here keeps the
160 * rest of the code free from ifdefs.
161 */
162#define mci_readq(dev, reg) \
163 (*(volatile u64 __force *)(dev->regs + SDMMC_##reg))
164#define mci_writeq(dev, reg, value) \
165 (*(volatile u64 __force *)(dev->regs + SDMMC_##reg) = value)
166#endif
167
168#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index 5a950b16d9e6..881f7ba545ae 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -966,8 +966,7 @@ static int __init imxmci_probe(struct platform_device *pdev)
966 mmc->caps = MMC_CAP_4_BIT_DATA; 966 mmc->caps = MMC_CAP_4_BIT_DATA;
967 967
968 /* MMC core transfer sizes tunable parameters */ 968 /* MMC core transfer sizes tunable parameters */
969 mmc->max_hw_segs = 64; 969 mmc->max_segs = 64;
970 mmc->max_phys_segs = 64;
971 mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */ 970 mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */
972 mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */ 971 mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */
973 mmc->max_blk_size = 2048; 972 mmc->max_blk_size = 2048;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index ad4f9870e3ca..74218ad677e4 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/mmc/host.h> 16#include <linux/mmc/host.h>
17#include <linux/err.h>
17#include <linux/io.h> 18#include <linux/io.h>
18#include <linux/irq.h> 19#include <linux/irq.h>
19#include <linux/interrupt.h> 20#include <linux/interrupt.h>
@@ -827,8 +828,8 @@ static int __devinit jz4740_mmc_probe(struct platform_device* pdev)
827 } 828 }
828 829
829 host->clk = clk_get(&pdev->dev, "mmc"); 830 host->clk = clk_get(&pdev->dev, "mmc");
830 if (!host->clk) { 831 if (IS_ERR(host->clk)) {
831 ret = -ENOENT; 832 ret = PTR_ERR(host->clk);
832 dev_err(&pdev->dev, "Failed to get mmc clock\n"); 833 dev_err(&pdev->dev, "Failed to get mmc clock\n");
833 goto err_free_host; 834 goto err_free_host;
834 } 835 }
@@ -876,8 +877,7 @@ static int __devinit jz4740_mmc_probe(struct platform_device* pdev)
876 mmc->max_blk_count = (1 << 15) - 1; 877 mmc->max_blk_count = (1 << 15) - 1;
877 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 878 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
878 879
879 mmc->max_phys_segs = 128; 880 mmc->max_segs = 128;
880 mmc->max_hw_segs = 128;
881 mmc->max_seg_size = mmc->max_req_size; 881 mmc->max_seg_size = mmc->max_req_size;
882 882
883 host->mmc = mmc; 883 host->mmc = mmc;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 62a35822003e..7c1e16aaf17f 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -99,7 +99,7 @@
99#define r1b_timeout (HZ * 3) 99#define r1b_timeout (HZ * 3)
100 100
101/* One of the critical speed parameters is the amount of data which may 101/* One of the critical speed parameters is the amount of data which may
102 * be transfered in one command. If this value is too low, the SD card 102 * be transferred in one command. If this value is too low, the SD card
103 * controller has to do multiple partial block writes (argggh!). With 103 * controller has to do multiple partial block writes (argggh!). With
104 * today (2008) SD cards there is little speed gain if we transfer more 104 * today (2008) SD cards there is little speed gain if we transfer more
105 * than 64 KBytes at a time. So use this value until there is any indication 105 * than 64 KBytes at a time. So use this value until there is any indication
@@ -1055,6 +1055,8 @@ static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
1055{ 1055{
1056 struct mmc_spi_host *host = mmc_priv(mmc); 1056 struct mmc_spi_host *host = mmc_priv(mmc);
1057 int status = -EINVAL; 1057 int status = -EINVAL;
1058 int crc_retry = 5;
1059 struct mmc_command stop;
1058 1060
1059#ifdef DEBUG 1061#ifdef DEBUG
1060 /* MMC core and layered drivers *MUST* issue SPI-aware commands */ 1062 /* MMC core and layered drivers *MUST* issue SPI-aware commands */
@@ -1087,10 +1089,29 @@ static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
1087 /* request exclusive bus access */ 1089 /* request exclusive bus access */
1088 spi_bus_lock(host->spi->master); 1090 spi_bus_lock(host->spi->master);
1089 1091
1092crc_recover:
1090 /* issue command; then optionally data and stop */ 1093 /* issue command; then optionally data and stop */
1091 status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL); 1094 status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
1092 if (status == 0 && mrq->data) { 1095 if (status == 0 && mrq->data) {
1093 mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz); 1096 mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz);
1097
1098 /*
1099 * The SPI bus is not always reliable for large data transfers.
1100 * If an occasional crc error is reported by the SD device with
1101 * data read/write over SPI, it may be recovered by repeating
1102 * the last SD command again. The retry count is set to 5 to
1103 * ensure the driver passes stress tests.
1104 */
1105 if (mrq->data->error == -EILSEQ && crc_retry) {
1106 stop.opcode = MMC_STOP_TRANSMISSION;
1107 stop.arg = 0;
1108 stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1109 status = mmc_spi_command_send(host, mrq, &stop, 0);
1110 crc_retry--;
1111 mrq->data->error = 0;
1112 goto crc_recover;
1113 }
1114
1094 if (mrq->stop) 1115 if (mrq->stop)
1095 status = mmc_spi_command_send(host, mrq, mrq->stop, 0); 1116 status = mmc_spi_command_send(host, mrq, mrq->stop, 0);
1096 else 1117 else
@@ -1345,8 +1366,7 @@ static int mmc_spi_probe(struct spi_device *spi)
1345 1366
1346 mmc->ops = &mmc_spi_ops; 1367 mmc->ops = &mmc_spi_ops;
1347 mmc->max_blk_size = MMC_SPI_BLOCKSIZE; 1368 mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
1348 mmc->max_hw_segs = MMC_SPI_BLOCKSATONCE; 1369 mmc->max_segs = MMC_SPI_BLOCKSATONCE;
1349 mmc->max_phys_segs = MMC_SPI_BLOCKSATONCE;
1350 mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE; 1370 mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE;
1351 mmc->max_blk_count = MMC_SPI_BLOCKSATONCE; 1371 mmc->max_blk_count = MMC_SPI_BLOCKSATONCE;
1352 1372
@@ -1496,21 +1516,17 @@ static int __devexit mmc_spi_remove(struct spi_device *spi)
1496 return 0; 1516 return 0;
1497} 1517}
1498 1518
1499#if defined(CONFIG_OF)
1500static struct of_device_id mmc_spi_of_match_table[] __devinitdata = { 1519static struct of_device_id mmc_spi_of_match_table[] __devinitdata = {
1501 { .compatible = "mmc-spi-slot", }, 1520 { .compatible = "mmc-spi-slot", },
1502 {}, 1521 {},
1503}; 1522};
1504#endif
1505 1523
1506static struct spi_driver mmc_spi_driver = { 1524static struct spi_driver mmc_spi_driver = {
1507 .driver = { 1525 .driver = {
1508 .name = "mmc_spi", 1526 .name = "mmc_spi",
1509 .bus = &spi_bus_type, 1527 .bus = &spi_bus_type,
1510 .owner = THIS_MODULE, 1528 .owner = THIS_MODULE,
1511#if defined(CONFIG_OF)
1512 .of_match_table = mmc_spi_of_match_table, 1529 .of_match_table = mmc_spi_of_match_table,
1513#endif
1514 }, 1530 },
1515 .probe = mmc_spi_probe, 1531 .probe = mmc_spi_probe,
1516 .remove = __devexit_p(mmc_spi_remove), 1532 .remove = __devexit_p(mmc_spi_remove),
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 840b301b5671..fe140724a02e 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -2,7 +2,7 @@
2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3 * 3 *
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson AB. 5 * Copyright (C) 2010 ST-Ericsson SA
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -14,17 +14,21 @@
14#include <linux/ioport.h> 14#include <linux/ioport.h>
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/kernel.h>
17#include <linux/delay.h> 18#include <linux/delay.h>
18#include <linux/err.h> 19#include <linux/err.h>
19#include <linux/highmem.h> 20#include <linux/highmem.h>
20#include <linux/log2.h> 21#include <linux/log2.h>
21#include <linux/mmc/host.h> 22#include <linux/mmc/host.h>
23#include <linux/mmc/card.h>
22#include <linux/amba/bus.h> 24#include <linux/amba/bus.h>
23#include <linux/clk.h> 25#include <linux/clk.h>
24#include <linux/scatterlist.h> 26#include <linux/scatterlist.h>
25#include <linux/gpio.h> 27#include <linux/gpio.h>
26#include <linux/amba/mmci.h>
27#include <linux/regulator/consumer.h> 28#include <linux/regulator/consumer.h>
29#include <linux/dmaengine.h>
30#include <linux/dma-mapping.h>
31#include <linux/amba/mmci.h>
28 32
29#include <asm/div64.h> 33#include <asm/div64.h>
30#include <asm/io.h> 34#include <asm/io.h>
@@ -41,27 +45,66 @@ static unsigned int fmax = 515633;
41 * @clkreg: default value for MCICLOCK register 45 * @clkreg: default value for MCICLOCK register
42 * @clkreg_enable: enable value for MMCICLOCK register 46 * @clkreg_enable: enable value for MMCICLOCK register
43 * @datalength_bits: number of bits in the MMCIDATALENGTH register 47 * @datalength_bits: number of bits in the MMCIDATALENGTH register
48 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
49 * is asserted (likewise for RX)
50 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
51 * is asserted (likewise for RX)
52 * @sdio: variant supports SDIO
53 * @st_clkdiv: true if using a ST-specific clock divider algorithm
54 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
44 */ 55 */
45struct variant_data { 56struct variant_data {
46 unsigned int clkreg; 57 unsigned int clkreg;
47 unsigned int clkreg_enable; 58 unsigned int clkreg_enable;
48 unsigned int datalength_bits; 59 unsigned int datalength_bits;
60 unsigned int fifosize;
61 unsigned int fifohalfsize;
62 bool sdio;
63 bool st_clkdiv;
64 bool blksz_datactrl16;
49}; 65};
50 66
51static struct variant_data variant_arm = { 67static struct variant_data variant_arm = {
68 .fifosize = 16 * 4,
69 .fifohalfsize = 8 * 4,
70 .datalength_bits = 16,
71};
72
73static struct variant_data variant_arm_extended_fifo = {
74 .fifosize = 128 * 4,
75 .fifohalfsize = 64 * 4,
52 .datalength_bits = 16, 76 .datalength_bits = 16,
53}; 77};
54 78
55static struct variant_data variant_u300 = { 79static struct variant_data variant_u300 = {
56 .clkreg_enable = 1 << 13, /* HWFCEN */ 80 .fifosize = 16 * 4,
81 .fifohalfsize = 8 * 4,
82 .clkreg_enable = MCI_ST_U300_HWFCEN,
57 .datalength_bits = 16, 83 .datalength_bits = 16,
84 .sdio = true,
58}; 85};
59 86
60static struct variant_data variant_ux500 = { 87static struct variant_data variant_ux500 = {
88 .fifosize = 30 * 4,
89 .fifohalfsize = 8 * 4,
61 .clkreg = MCI_CLK_ENABLE, 90 .clkreg = MCI_CLK_ENABLE,
62 .clkreg_enable = 1 << 14, /* HWFCEN */ 91 .clkreg_enable = MCI_ST_UX500_HWFCEN,
63 .datalength_bits = 24, 92 .datalength_bits = 24,
93 .sdio = true,
94 .st_clkdiv = true,
64}; 95};
96
97static struct variant_data variant_ux500v2 = {
98 .fifosize = 30 * 4,
99 .fifohalfsize = 8 * 4,
100 .clkreg = MCI_CLK_ENABLE,
101 .clkreg_enable = MCI_ST_UX500_HWFCEN,
102 .datalength_bits = 24,
103 .sdio = true,
104 .st_clkdiv = true,
105 .blksz_datactrl16 = true,
106};
107
65/* 108/*
66 * This must be called with host->lock held 109 * This must be called with host->lock held
67 */ 110 */
@@ -73,8 +116,25 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
73 if (desired) { 116 if (desired) {
74 if (desired >= host->mclk) { 117 if (desired >= host->mclk) {
75 clk = MCI_CLK_BYPASS; 118 clk = MCI_CLK_BYPASS;
119 if (variant->st_clkdiv)
120 clk |= MCI_ST_UX500_NEG_EDGE;
76 host->cclk = host->mclk; 121 host->cclk = host->mclk;
122 } else if (variant->st_clkdiv) {
123 /*
124 * DB8500 TRM says f = mclk / (clkdiv + 2)
125 * => clkdiv = (mclk / f) - 2
126 * Round the divider up so we don't exceed the max
127 * frequency
128 */
129 clk = DIV_ROUND_UP(host->mclk, desired) - 2;
130 if (clk >= 256)
131 clk = 255;
132 host->cclk = host->mclk / (clk + 2);
77 } else { 133 } else {
134 /*
135 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
136 * => clkdiv = mclk / (2 * f) - 1
137 */
78 clk = host->mclk / (2 * desired) - 1; 138 clk = host->mclk / (2 * desired) - 1;
79 if (clk >= 256) 139 if (clk >= 256)
80 clk = 255; 140 clk = 255;
@@ -105,9 +165,6 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
105 host->mrq = NULL; 165 host->mrq = NULL;
106 host->cmd = NULL; 166 host->cmd = NULL;
107 167
108 if (mrq->data)
109 mrq->data->bytes_xfered = host->data_xfered;
110
111 /* 168 /*
112 * Need to drop the host lock here; mmc_request_done may call 169 * Need to drop the host lock here; mmc_request_done may call
113 * back into the driver... 170 * back into the driver...
@@ -117,10 +174,26 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
117 spin_lock(&host->lock); 174 spin_lock(&host->lock);
118} 175}
119 176
177static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
178{
179 void __iomem *base = host->base;
180
181 if (host->singleirq) {
182 unsigned int mask0 = readl(base + MMCIMASK0);
183
184 mask0 &= ~MCI_IRQ1MASK;
185 mask0 |= mask;
186
187 writel(mask0, base + MMCIMASK0);
188 }
189
190 writel(mask, base + MMCIMASK1);
191}
192
120static void mmci_stop_data(struct mmci_host *host) 193static void mmci_stop_data(struct mmci_host *host)
121{ 194{
122 writel(0, host->base + MMCIDATACTRL); 195 writel(0, host->base + MMCIDATACTRL);
123 writel(0, host->base + MMCIMASK1); 196 mmci_set_mask1(host, 0);
124 host->data = NULL; 197 host->data = NULL;
125} 198}
126 199
@@ -136,8 +209,251 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
136 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 209 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
137} 210}
138 211
212/*
213 * All the DMA operation mode stuff goes inside this ifdef.
214 * This assumes that you have a generic DMA device interface,
215 * no custom DMA interfaces are supported.
216 */
217#ifdef CONFIG_DMA_ENGINE
218static void __devinit mmci_dma_setup(struct mmci_host *host)
219{
220 struct mmci_platform_data *plat = host->plat;
221 const char *rxname, *txname;
222 dma_cap_mask_t mask;
223
224 if (!plat || !plat->dma_filter) {
225 dev_info(mmc_dev(host->mmc), "no DMA platform data\n");
226 return;
227 }
228
229 /* Try to acquire a generic DMA engine slave channel */
230 dma_cap_zero(mask);
231 dma_cap_set(DMA_SLAVE, mask);
232
233 /*
234 * If only an RX channel is specified, the driver will
235 * attempt to use it bidirectionally, however if it is
236 * is specified but cannot be located, DMA will be disabled.
237 */
238 if (plat->dma_rx_param) {
239 host->dma_rx_channel = dma_request_channel(mask,
240 plat->dma_filter,
241 plat->dma_rx_param);
242 /* E.g if no DMA hardware is present */
243 if (!host->dma_rx_channel)
244 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
245 }
246
247 if (plat->dma_tx_param) {
248 host->dma_tx_channel = dma_request_channel(mask,
249 plat->dma_filter,
250 plat->dma_tx_param);
251 if (!host->dma_tx_channel)
252 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
253 } else {
254 host->dma_tx_channel = host->dma_rx_channel;
255 }
256
257 if (host->dma_rx_channel)
258 rxname = dma_chan_name(host->dma_rx_channel);
259 else
260 rxname = "none";
261
262 if (host->dma_tx_channel)
263 txname = dma_chan_name(host->dma_tx_channel);
264 else
265 txname = "none";
266
267 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
268 rxname, txname);
269
270 /*
271 * Limit the maximum segment size in any SG entry according to
272 * the parameters of the DMA engine device.
273 */
274 if (host->dma_tx_channel) {
275 struct device *dev = host->dma_tx_channel->device->dev;
276 unsigned int max_seg_size = dma_get_max_seg_size(dev);
277
278 if (max_seg_size < host->mmc->max_seg_size)
279 host->mmc->max_seg_size = max_seg_size;
280 }
281 if (host->dma_rx_channel) {
282 struct device *dev = host->dma_rx_channel->device->dev;
283 unsigned int max_seg_size = dma_get_max_seg_size(dev);
284
285 if (max_seg_size < host->mmc->max_seg_size)
286 host->mmc->max_seg_size = max_seg_size;
287 }
288}
289
290/*
291 * This is used in __devinit or __devexit so inline it
292 * so it can be discarded.
293 */
294static inline void mmci_dma_release(struct mmci_host *host)
295{
296 struct mmci_platform_data *plat = host->plat;
297
298 if (host->dma_rx_channel)
299 dma_release_channel(host->dma_rx_channel);
300 if (host->dma_tx_channel && plat->dma_tx_param)
301 dma_release_channel(host->dma_tx_channel);
302 host->dma_rx_channel = host->dma_tx_channel = NULL;
303}
304
305static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
306{
307 struct dma_chan *chan = host->dma_current;
308 enum dma_data_direction dir;
309 u32 status;
310 int i;
311
312 /* Wait up to 1ms for the DMA to complete */
313 for (i = 0; ; i++) {
314 status = readl(host->base + MMCISTATUS);
315 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
316 break;
317 udelay(10);
318 }
319
320 /*
321 * Check to see whether we still have some data left in the FIFO -
322 * this catches DMA controllers which are unable to monitor the
323 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
324 * contiguous buffers. On TX, we'll get a FIFO underrun error.
325 */
326 if (status & MCI_RXDATAAVLBLMASK) {
327 dmaengine_terminate_all(chan);
328 if (!data->error)
329 data->error = -EIO;
330 }
331
332 if (data->flags & MMC_DATA_WRITE) {
333 dir = DMA_TO_DEVICE;
334 } else {
335 dir = DMA_FROM_DEVICE;
336 }
337
338 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
339
340 /*
341 * Use of DMA with scatter-gather is impossible.
342 * Give up with DMA and switch back to PIO mode.
343 */
344 if (status & MCI_RXDATAAVLBLMASK) {
345 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
346 mmci_dma_release(host);
347 }
348}
349
350static void mmci_dma_data_error(struct mmci_host *host)
351{
352 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
353 dmaengine_terminate_all(host->dma_current);
354}
355
356static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
357{
358 struct variant_data *variant = host->variant;
359 struct dma_slave_config conf = {
360 .src_addr = host->phybase + MMCIFIFO,
361 .dst_addr = host->phybase + MMCIFIFO,
362 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
363 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
364 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
365 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
366 };
367 struct mmc_data *data = host->data;
368 struct dma_chan *chan;
369 struct dma_device *device;
370 struct dma_async_tx_descriptor *desc;
371 int nr_sg;
372
373 host->dma_current = NULL;
374
375 if (data->flags & MMC_DATA_READ) {
376 conf.direction = DMA_FROM_DEVICE;
377 chan = host->dma_rx_channel;
378 } else {
379 conf.direction = DMA_TO_DEVICE;
380 chan = host->dma_tx_channel;
381 }
382
383 /* If there's no DMA channel, fall back to PIO */
384 if (!chan)
385 return -EINVAL;
386
387 /* If less than or equal to the fifo size, don't bother with DMA */
388 if (host->size <= variant->fifosize)
389 return -EINVAL;
390
391 device = chan->device;
392 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
393 if (nr_sg == 0)
394 return -EINVAL;
395
396 dmaengine_slave_config(chan, &conf);
397 desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
398 conf.direction, DMA_CTRL_ACK);
399 if (!desc)
400 goto unmap_exit;
401
402 /* Okay, go for it. */
403 host->dma_current = chan;
404
405 dev_vdbg(mmc_dev(host->mmc),
406 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
407 data->sg_len, data->blksz, data->blocks, data->flags);
408 dmaengine_submit(desc);
409 dma_async_issue_pending(chan);
410
411 datactrl |= MCI_DPSM_DMAENABLE;
412
413 /* Trigger the DMA transfer */
414 writel(datactrl, host->base + MMCIDATACTRL);
415
416 /*
417 * Let the MMCI say when the data is ended and it's time
418 * to fire next DMA request. When that happens, MMCI will
419 * call mmci_data_end()
420 */
421 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
422 host->base + MMCIMASK0);
423 return 0;
424
425unmap_exit:
426 dmaengine_terminate_all(chan);
427 dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
428 return -ENOMEM;
429}
430#else
431/* Blank functions if the DMA engine is not available */
432static inline void mmci_dma_setup(struct mmci_host *host)
433{
434}
435
436static inline void mmci_dma_release(struct mmci_host *host)
437{
438}
439
440static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
441{
442}
443
444static inline void mmci_dma_data_error(struct mmci_host *host)
445{
446}
447
448static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
449{
450 return -ENOSYS;
451}
452#endif
453
139static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 454static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
140{ 455{
456 struct variant_data *variant = host->variant;
141 unsigned int datactrl, timeout, irqmask; 457 unsigned int datactrl, timeout, irqmask;
142 unsigned long long clks; 458 unsigned long long clks;
143 void __iomem *base; 459 void __iomem *base;
@@ -148,9 +464,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
148 464
149 host->data = data; 465 host->data = data;
150 host->size = data->blksz * data->blocks; 466 host->size = data->blksz * data->blocks;
151 host->data_xfered = 0; 467 data->bytes_xfered = 0;
152
153 mmci_init_sg(host, data);
154 468
155 clks = (unsigned long long)data->timeout_ns * host->cclk; 469 clks = (unsigned long long)data->timeout_ns * host->cclk;
156 do_div(clks, 1000000000UL); 470 do_div(clks, 1000000000UL);
@@ -164,16 +478,33 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
164 blksz_bits = ffs(data->blksz) - 1; 478 blksz_bits = ffs(data->blksz) - 1;
165 BUG_ON(1 << blksz_bits != data->blksz); 479 BUG_ON(1 << blksz_bits != data->blksz);
166 480
167 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 481 if (variant->blksz_datactrl16)
168 if (data->flags & MMC_DATA_READ) { 482 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
483 else
484 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
485
486 if (data->flags & MMC_DATA_READ)
169 datactrl |= MCI_DPSM_DIRECTION; 487 datactrl |= MCI_DPSM_DIRECTION;
488
489 /*
490 * Attempt to use DMA operation mode, if this
491 * should fail, fall back to PIO mode
492 */
493 if (!mmci_dma_start_data(host, datactrl))
494 return;
495
496 /* IRQ mode, map the SG list for CPU reading/writing */
497 mmci_init_sg(host, data);
498
499 if (data->flags & MMC_DATA_READ) {
170 irqmask = MCI_RXFIFOHALFFULLMASK; 500 irqmask = MCI_RXFIFOHALFFULLMASK;
171 501
172 /* 502 /*
173 * If we have less than a FIFOSIZE of bytes to transfer, 503 * If we have less than the fifo 'half-full' threshold to
174 * trigger a PIO interrupt as soon as any data is available. 504 * transfer, trigger a PIO interrupt as soon as any data
505 * is available.
175 */ 506 */
176 if (host->size < MCI_FIFOSIZE) 507 if (host->size < variant->fifohalfsize)
177 irqmask |= MCI_RXDATAAVLBLMASK; 508 irqmask |= MCI_RXDATAAVLBLMASK;
178 } else { 509 } else {
179 /* 510 /*
@@ -183,9 +514,14 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
183 irqmask = MCI_TXFIFOHALFEMPTYMASK; 514 irqmask = MCI_TXFIFOHALFEMPTYMASK;
184 } 515 }
185 516
517 /* The ST Micro variants has a special bit to enable SDIO */
518 if (variant->sdio && host->mmc->card)
519 if (mmc_card_sdio(host->mmc->card))
520 datactrl |= MCI_ST_DPSM_SDIOEN;
521
186 writel(datactrl, base + MMCIDATACTRL); 522 writel(datactrl, base + MMCIDATACTRL);
187 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 523 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
188 writel(irqmask, base + MMCIMASK1); 524 mmci_set_mask1(host, irqmask);
189} 525}
190 526
191static void 527static void
@@ -220,49 +556,58 @@ static void
220mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 556mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
221 unsigned int status) 557 unsigned int status)
222{ 558{
223 if (status & MCI_DATABLOCKEND) { 559 /* First check for errors */
224 host->data_xfered += data->blksz; 560 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
225#ifdef CONFIG_ARCH_U300 561 u32 remain, success;
562
563 /* Terminate the DMA transfer */
564 if (dma_inprogress(host))
565 mmci_dma_data_error(host);
566
226 /* 567 /*
227 * On the U300 some signal or other is 568 * Calculate how far we are into the transfer. Note that
228 * badly routed so that a data write does 569 * the data counter gives the number of bytes transferred
229 * not properly terminate with a MCI_DATAEND 570 * on the MMC bus, not on the host side. On reads, this
230 * status flag. This quirk will make writes 571 * can be as much as a FIFO-worth of data ahead. This
231 * work again. 572 * matters for FIFO overruns only.
232 */ 573 */
233 if (data->flags & MMC_DATA_WRITE) 574 remain = readl(host->base + MMCIDATACNT);
234 status |= MCI_DATAEND; 575 success = data->blksz * data->blocks - remain;
235#endif 576
236 } 577 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
237 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 578 status, success);
238 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status); 579 if (status & MCI_DATACRCFAIL) {
239 if (status & MCI_DATACRCFAIL) 580 /* Last block was not successful */
581 success -= 1;
240 data->error = -EILSEQ; 582 data->error = -EILSEQ;
241 else if (status & MCI_DATATIMEOUT) 583 } else if (status & MCI_DATATIMEOUT) {
242 data->error = -ETIMEDOUT; 584 data->error = -ETIMEDOUT;
243 else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) 585 } else if (status & MCI_STARTBITERR) {
586 data->error = -ECOMM;
587 } else if (status & MCI_TXUNDERRUN) {
588 data->error = -EIO;
589 } else if (status & MCI_RXOVERRUN) {
590 if (success > host->variant->fifosize)
591 success -= host->variant->fifosize;
592 else
593 success = 0;
244 data->error = -EIO; 594 data->error = -EIO;
245 status |= MCI_DATAEND;
246
247 /*
248 * We hit an error condition. Ensure that any data
249 * partially written to a page is properly coherent.
250 */
251 if (data->flags & MMC_DATA_READ) {
252 struct sg_mapping_iter *sg_miter = &host->sg_miter;
253 unsigned long flags;
254
255 local_irq_save(flags);
256 if (sg_miter_next(sg_miter)) {
257 flush_dcache_page(sg_miter->page);
258 sg_miter_stop(sg_miter);
259 }
260 local_irq_restore(flags);
261 } 595 }
596 data->bytes_xfered = round_down(success, data->blksz);
262 } 597 }
263 if (status & MCI_DATAEND) { 598
599 if (status & MCI_DATABLOCKEND)
600 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
601
602 if (status & MCI_DATAEND || data->error) {
603 if (dma_inprogress(host))
604 mmci_dma_unmap(host, data);
264 mmci_stop_data(host); 605 mmci_stop_data(host);
265 606
607 if (!data->error)
608 /* The error clause is handled above, success! */
609 data->bytes_xfered = data->blksz * data->blocks;
610
266 if (!data->stop) { 611 if (!data->stop) {
267 mmci_request_end(host, data->mrq); 612 mmci_request_end(host, data->mrq);
268 } else { 613 } else {
@@ -279,15 +624,15 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
279 624
280 host->cmd = NULL; 625 host->cmd = NULL;
281 626
282 cmd->resp[0] = readl(base + MMCIRESPONSE0);
283 cmd->resp[1] = readl(base + MMCIRESPONSE1);
284 cmd->resp[2] = readl(base + MMCIRESPONSE2);
285 cmd->resp[3] = readl(base + MMCIRESPONSE3);
286
287 if (status & MCI_CMDTIMEOUT) { 627 if (status & MCI_CMDTIMEOUT) {
288 cmd->error = -ETIMEDOUT; 628 cmd->error = -ETIMEDOUT;
289 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 629 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
290 cmd->error = -EILSEQ; 630 cmd->error = -EILSEQ;
631 } else {
632 cmd->resp[0] = readl(base + MMCIRESPONSE0);
633 cmd->resp[1] = readl(base + MMCIRESPONSE1);
634 cmd->resp[2] = readl(base + MMCIRESPONSE2);
635 cmd->resp[3] = readl(base + MMCIRESPONSE3);
291 } 636 }
292 637
293 if (!cmd->data || cmd->error) { 638 if (!cmd->data || cmd->error) {
@@ -332,16 +677,43 @@ static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int rema
332 677
333static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 678static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
334{ 679{
680 struct variant_data *variant = host->variant;
335 void __iomem *base = host->base; 681 void __iomem *base = host->base;
336 char *ptr = buffer; 682 char *ptr = buffer;
337 683
338 do { 684 do {
339 unsigned int count, maxcnt; 685 unsigned int count, maxcnt;
340 686
341 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE; 687 maxcnt = status & MCI_TXFIFOEMPTY ?
688 variant->fifosize : variant->fifohalfsize;
342 count = min(remain, maxcnt); 689 count = min(remain, maxcnt);
343 690
344 writesl(base + MMCIFIFO, ptr, count >> 2); 691 /*
692 * The ST Micro variant for SDIO transfer sizes
693 * less then 8 bytes should have clock H/W flow
694 * control disabled.
695 */
696 if (variant->sdio &&
697 mmc_card_sdio(host->mmc->card)) {
698 if (count < 8)
699 writel(readl(host->base + MMCICLOCK) &
700 ~variant->clkreg_enable,
701 host->base + MMCICLOCK);
702 else
703 writel(readl(host->base + MMCICLOCK) |
704 variant->clkreg_enable,
705 host->base + MMCICLOCK);
706 }
707
708 /*
709 * SDIO especially may want to send something that is
710 * not divisible by 4 (as opposed to card sectors
711 * etc), and the FIFO only accept full 32-bit writes.
712 * So compensate by adding +3 on the count, a single
713 * byte become a 32bit write, 7 bytes will be two
714 * 32bit writes etc.
715 */
716 writesl(base + MMCIFIFO, ptr, (count + 3) >> 2);
345 717
346 ptr += count; 718 ptr += count;
347 remain -= count; 719 remain -= count;
@@ -362,6 +734,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
362{ 734{
363 struct mmci_host *host = dev_id; 735 struct mmci_host *host = dev_id;
364 struct sg_mapping_iter *sg_miter = &host->sg_miter; 736 struct sg_mapping_iter *sg_miter = &host->sg_miter;
737 struct variant_data *variant = host->variant;
365 void __iomem *base = host->base; 738 void __iomem *base = host->base;
366 unsigned long flags; 739 unsigned long flags;
367 u32 status; 740 u32 status;
@@ -406,9 +779,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
406 if (remain) 779 if (remain)
407 break; 780 break;
408 781
409 if (status & MCI_RXACTIVE)
410 flush_dcache_page(sg_miter->page);
411
412 status = readl(base + MMCISTATUS); 782 status = readl(base + MMCISTATUS);
413 } while (1); 783 } while (1);
414 784
@@ -417,11 +787,11 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
417 local_irq_restore(flags); 787 local_irq_restore(flags);
418 788
419 /* 789 /*
420 * If we're nearing the end of the read, switch to 790 * If we have less than the fifo 'half-full' threshold to transfer,
421 * "any data available" mode. 791 * trigger a PIO interrupt as soon as any data is available.
422 */ 792 */
423 if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE) 793 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
424 writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1); 794 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
425 795
426 /* 796 /*
427 * If we run out of data, disable the data IRQs; this 797 * If we run out of data, disable the data IRQs; this
@@ -430,7 +800,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
430 * stops us racing with our data end IRQ. 800 * stops us racing with our data end IRQ.
431 */ 801 */
432 if (host->size == 0) { 802 if (host->size == 0) {
433 writel(0, base + MMCIMASK1); 803 mmci_set_mask1(host, 0);
434 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 804 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
435 } 805 }
436 806
@@ -453,6 +823,14 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
453 struct mmc_data *data; 823 struct mmc_data *data;
454 824
455 status = readl(host->base + MMCISTATUS); 825 status = readl(host->base + MMCISTATUS);
826
827 if (host->singleirq) {
828 if (status & readl(host->base + MMCIMASK1))
829 mmci_pio_irq(irq, dev_id);
830
831 status &= ~MCI_IRQ1MASK;
832 }
833
456 status &= readl(host->base + MMCIMASK0); 834 status &= readl(host->base + MMCIMASK0);
457 writel(status, host->base + MMCICLEAR); 835 writel(status, host->base + MMCICLEAR);
458 836
@@ -507,19 +885,27 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
507 struct mmci_host *host = mmc_priv(mmc); 885 struct mmci_host *host = mmc_priv(mmc);
508 u32 pwr = 0; 886 u32 pwr = 0;
509 unsigned long flags; 887 unsigned long flags;
888 int ret;
510 889
511 switch (ios->power_mode) { 890 switch (ios->power_mode) {
512 case MMC_POWER_OFF: 891 case MMC_POWER_OFF:
513 if(host->vcc && 892 if (host->vcc)
514 regulator_is_enabled(host->vcc)) 893 ret = mmc_regulator_set_ocr(mmc, host->vcc, 0);
515 regulator_disable(host->vcc);
516 break; 894 break;
517 case MMC_POWER_UP: 895 case MMC_POWER_UP:
518#ifdef CONFIG_REGULATOR 896 if (host->vcc) {
519 if (host->vcc) 897 ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd);
520 /* This implicitly enables the regulator */ 898 if (ret) {
521 mmc_regulator_set_ocr(host->vcc, ios->vdd); 899 dev_err(mmc_dev(mmc), "unable to set OCR\n");
522#endif 900 /*
901 * The .set_ios() function in the mmc_host_ops
902 * struct return void, and failing to set the
903 * power should be rare so we print an error
904 * and return here.
905 */
906 return;
907 }
908 }
523 if (host->plat->vdd_handler) 909 if (host->plat->vdd_handler)
524 pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd, 910 pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd,
525 ios->power_mode); 911 ios->power_mode);
@@ -564,18 +950,23 @@ static int mmci_get_ro(struct mmc_host *mmc)
564 if (host->gpio_wp == -ENOSYS) 950 if (host->gpio_wp == -ENOSYS)
565 return -ENOSYS; 951 return -ENOSYS;
566 952
567 return gpio_get_value(host->gpio_wp); 953 return gpio_get_value_cansleep(host->gpio_wp);
568} 954}
569 955
570static int mmci_get_cd(struct mmc_host *mmc) 956static int mmci_get_cd(struct mmc_host *mmc)
571{ 957{
572 struct mmci_host *host = mmc_priv(mmc); 958 struct mmci_host *host = mmc_priv(mmc);
959 struct mmci_platform_data *plat = host->plat;
573 unsigned int status; 960 unsigned int status;
574 961
575 if (host->gpio_cd == -ENOSYS) 962 if (host->gpio_cd == -ENOSYS) {
576 status = host->plat->status(mmc_dev(host->mmc)); 963 if (!plat->status)
577 else 964 return 1; /* Assume always present */
578 status = !gpio_get_value(host->gpio_cd); 965
966 status = plat->status(mmc_dev(host->mmc));
967 } else
968 status = !!gpio_get_value_cansleep(host->gpio_cd)
969 ^ plat->cd_invert;
579 970
580 /* 971 /*
581 * Use positive logic throughout - status is zero for no card, 972 * Use positive logic throughout - status is zero for no card,
@@ -584,6 +975,15 @@ static int mmci_get_cd(struct mmc_host *mmc)
584 return status; 975 return status;
585} 976}
586 977
978static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
979{
980 struct mmci_host *host = dev_id;
981
982 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
983
984 return IRQ_HANDLED;
985}
986
587static const struct mmc_host_ops mmci_ops = { 987static const struct mmc_host_ops mmci_ops = {
588 .request = mmci_request, 988 .request = mmci_request,
589 .set_ios = mmci_set_ios, 989 .set_ios = mmci_set_ios,
@@ -591,7 +991,8 @@ static const struct mmc_host_ops mmci_ops = {
591 .get_cd = mmci_get_cd, 991 .get_cd = mmci_get_cd,
592}; 992};
593 993
594static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) 994static int __devinit mmci_probe(struct amba_device *dev,
995 const struct amba_id *id)
595{ 996{
596 struct mmci_platform_data *plat = dev->dev.platform_data; 997 struct mmci_platform_data *plat = dev->dev.platform_data;
597 struct variant_data *variant = id->data; 998 struct variant_data *variant = id->data;
@@ -620,6 +1021,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
620 1021
621 host->gpio_wp = -ENOSYS; 1022 host->gpio_wp = -ENOSYS;
622 host->gpio_cd = -ENOSYS; 1023 host->gpio_cd = -ENOSYS;
1024 host->gpio_cd_irq = -1;
623 1025
624 host->hw_designer = amba_manf(dev); 1026 host->hw_designer = amba_manf(dev);
625 host->hw_revision = amba_rev(dev); 1027 host->hw_revision = amba_rev(dev);
@@ -653,6 +1055,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
653 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1055 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
654 host->mclk); 1056 host->mclk);
655 } 1057 }
1058 host->phybase = dev->res.start;
656 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1059 host->base = ioremap(dev->res.start, resource_size(&dev->res));
657 if (!host->base) { 1060 if (!host->base) {
658 ret = -ENOMEM; 1061 ret = -ENOMEM;
@@ -699,13 +1102,11 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
699 if (host->vcc == NULL) 1102 if (host->vcc == NULL)
700 mmc->ocr_avail = plat->ocr_mask; 1103 mmc->ocr_avail = plat->ocr_mask;
701 mmc->caps = plat->capabilities; 1104 mmc->caps = plat->capabilities;
702 mmc->caps |= MMC_CAP_NEEDS_POLL;
703 1105
704 /* 1106 /*
705 * We can do SGIO 1107 * We can do SGIO
706 */ 1108 */
707 mmc->max_hw_segs = 16; 1109 mmc->max_segs = NR_SG;
708 mmc->max_phys_segs = NR_SG;
709 1110
710 /* 1111 /*
711 * Since only a certain number of bits are valid in the data length 1112 * Since only a certain number of bits are valid in the data length
@@ -744,6 +1145,20 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
744 host->gpio_cd = plat->gpio_cd; 1145 host->gpio_cd = plat->gpio_cd;
745 else if (ret != -ENOSYS) 1146 else if (ret != -ENOSYS)
746 goto err_gpio_cd; 1147 goto err_gpio_cd;
1148
1149 /*
1150 * A gpio pin that will detect cards when inserted and removed
1151 * will most likely want to trigger on the edges if it is
1152 * 0 when ejected and 1 when inserted (or mutatis mutandis
1153 * for the inverted case) so we request triggers on both
1154 * edges.
1155 */
1156 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
1157 mmci_cd_irq,
1158 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
1159 DRIVER_NAME " (cd)", host);
1160 if (ret >= 0)
1161 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
747 } 1162 }
748 if (gpio_is_valid(plat->gpio_wp)) { 1163 if (gpio_is_valid(plat->gpio_wp)) {
749 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1164 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
@@ -755,23 +1170,35 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
755 goto err_gpio_wp; 1170 goto err_gpio_wp;
756 } 1171 }
757 1172
1173 if ((host->plat->status || host->gpio_cd != -ENOSYS)
1174 && host->gpio_cd_irq < 0)
1175 mmc->caps |= MMC_CAP_NEEDS_POLL;
1176
758 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 1177 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
759 if (ret) 1178 if (ret)
760 goto unmap; 1179 goto unmap;
761 1180
762 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host); 1181 if (dev->irq[1] == NO_IRQ)
763 if (ret) 1182 host->singleirq = true;
764 goto irq0_free; 1183 else {
1184 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
1185 DRIVER_NAME " (pio)", host);
1186 if (ret)
1187 goto irq0_free;
1188 }
765 1189
766 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1190 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
767 1191
768 amba_set_drvdata(dev, mmc); 1192 amba_set_drvdata(dev, mmc);
769 1193
770 mmc_add_host(mmc); 1194 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
1195 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
1196 amba_rev(dev), (unsigned long long)dev->res.start,
1197 dev->irq[0], dev->irq[1]);
771 1198
772 dev_info(&dev->dev, "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n", 1199 mmci_dma_setup(host);
773 mmc_hostname(mmc), amba_rev(dev), amba_config(dev), 1200
774 (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); 1201 mmc_add_host(mmc);
775 1202
776 return 0; 1203 return 0;
777 1204
@@ -781,6 +1208,8 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
781 if (host->gpio_wp != -ENOSYS) 1208 if (host->gpio_wp != -ENOSYS)
782 gpio_free(host->gpio_wp); 1209 gpio_free(host->gpio_wp);
783 err_gpio_wp: 1210 err_gpio_wp:
1211 if (host->gpio_cd_irq >= 0)
1212 free_irq(host->gpio_cd_irq, host);
784 if (host->gpio_cd != -ENOSYS) 1213 if (host->gpio_cd != -ENOSYS)
785 gpio_free(host->gpio_cd); 1214 gpio_free(host->gpio_cd);
786 err_gpio_cd: 1215 err_gpio_cd:
@@ -814,11 +1243,15 @@ static int __devexit mmci_remove(struct amba_device *dev)
814 writel(0, host->base + MMCICOMMAND); 1243 writel(0, host->base + MMCICOMMAND);
815 writel(0, host->base + MMCIDATACTRL); 1244 writel(0, host->base + MMCIDATACTRL);
816 1245
1246 mmci_dma_release(host);
817 free_irq(dev->irq[0], host); 1247 free_irq(dev->irq[0], host);
818 free_irq(dev->irq[1], host); 1248 if (!host->singleirq)
1249 free_irq(dev->irq[1], host);
819 1250
820 if (host->gpio_wp != -ENOSYS) 1251 if (host->gpio_wp != -ENOSYS)
821 gpio_free(host->gpio_wp); 1252 gpio_free(host->gpio_wp);
1253 if (host->gpio_cd_irq >= 0)
1254 free_irq(host->gpio_cd_irq, host);
822 if (host->gpio_cd != -ENOSYS) 1255 if (host->gpio_cd != -ENOSYS)
823 gpio_free(host->gpio_cd); 1256 gpio_free(host->gpio_cd);
824 1257
@@ -826,8 +1259,8 @@ static int __devexit mmci_remove(struct amba_device *dev)
826 clk_disable(host->clk); 1259 clk_disable(host->clk);
827 clk_put(host->clk); 1260 clk_put(host->clk);
828 1261
829 if (regulator_is_enabled(host->vcc)) 1262 if (host->vcc)
830 regulator_disable(host->vcc); 1263 mmc_regulator_set_ocr(mmc, host->vcc, 0);
831 regulator_put(host->vcc); 1264 regulator_put(host->vcc);
832 1265
833 mmc_free_host(mmc); 1266 mmc_free_host(mmc);
@@ -878,10 +1311,15 @@ static int mmci_resume(struct amba_device *dev)
878static struct amba_id mmci_ids[] = { 1311static struct amba_id mmci_ids[] = {
879 { 1312 {
880 .id = 0x00041180, 1313 .id = 0x00041180,
881 .mask = 0x000fffff, 1314 .mask = 0xff0fffff,
882 .data = &variant_arm, 1315 .data = &variant_arm,
883 }, 1316 },
884 { 1317 {
1318 .id = 0x01041180,
1319 .mask = 0xff0fffff,
1320 .data = &variant_arm_extended_fifo,
1321 },
1322 {
885 .id = 0x00041181, 1323 .id = 0x00041181,
886 .mask = 0x000fffff, 1324 .mask = 0x000fffff,
887 .data = &variant_arm, 1325 .data = &variant_arm,
@@ -899,9 +1337,14 @@ static struct amba_id mmci_ids[] = {
899 }, 1337 },
900 { 1338 {
901 .id = 0x00480180, 1339 .id = 0x00480180,
902 .mask = 0x00ffffff, 1340 .mask = 0xf0ffffff,
903 .data = &variant_ux500, 1341 .data = &variant_ux500,
904 }, 1342 },
1343 {
1344 .id = 0x10480180,
1345 .mask = 0xf0ffffff,
1346 .data = &variant_ux500v2,
1347 },
905 { 0, 0 }, 1348 { 0, 0 },
906}; 1349};
907 1350
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 68970cfb81e1..2164e8c6476c 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -11,23 +11,33 @@
11#define MCI_PWR_OFF 0x00 11#define MCI_PWR_OFF 0x00
12#define MCI_PWR_UP 0x02 12#define MCI_PWR_UP 0x02
13#define MCI_PWR_ON 0x03 13#define MCI_PWR_ON 0x03
14#define MCI_DATA2DIREN (1 << 2)
15#define MCI_CMDDIREN (1 << 3)
16#define MCI_DATA0DIREN (1 << 4)
17#define MCI_DATA31DIREN (1 << 5)
18#define MCI_OD (1 << 6) 14#define MCI_OD (1 << 6)
19#define MCI_ROD (1 << 7) 15#define MCI_ROD (1 << 7)
20/* The ST Micro version does not have ROD */ 16/*
21#define MCI_FBCLKEN (1 << 7) 17 * The ST Micro version does not have ROD and reuse the voltage registers
22#define MCI_DATA74DIREN (1 << 8) 18 * for direction settings
19 */
20#define MCI_ST_DATA2DIREN (1 << 2)
21#define MCI_ST_CMDDIREN (1 << 3)
22#define MCI_ST_DATA0DIREN (1 << 4)
23#define MCI_ST_DATA31DIREN (1 << 5)
24#define MCI_ST_FBCLKEN (1 << 7)
25#define MCI_ST_DATA74DIREN (1 << 8)
23 26
24#define MMCICLOCK 0x004 27#define MMCICLOCK 0x004
25#define MCI_CLK_ENABLE (1 << 8) 28#define MCI_CLK_ENABLE (1 << 8)
26#define MCI_CLK_PWRSAVE (1 << 9) 29#define MCI_CLK_PWRSAVE (1 << 9)
27#define MCI_CLK_BYPASS (1 << 10) 30#define MCI_CLK_BYPASS (1 << 10)
28#define MCI_4BIT_BUS (1 << 11) 31#define MCI_4BIT_BUS (1 << 11)
29/* 8bit wide buses supported in ST Micro versions */ 32/*
33 * 8bit wide buses, hardware flow contronl, negative edges and clock inversion
34 * supported in ST Micro U300 and Ux500 versions
35 */
30#define MCI_ST_8BIT_BUS (1 << 12) 36#define MCI_ST_8BIT_BUS (1 << 12)
37#define MCI_ST_U300_HWFCEN (1 << 13)
38#define MCI_ST_UX500_NEG_EDGE (1 << 13)
39#define MCI_ST_UX500_HWFCEN (1 << 14)
40#define MCI_ST_UX500_CLK_INV (1 << 15)
31 41
32#define MMCIARGUMENT 0x008 42#define MMCIARGUMENT 0x008
33#define MMCICOMMAND 0x00c 43#define MMCICOMMAND 0x00c
@@ -54,10 +64,16 @@
54#define MCI_DPSM_MODE (1 << 2) 64#define MCI_DPSM_MODE (1 << 2)
55#define MCI_DPSM_DMAENABLE (1 << 3) 65#define MCI_DPSM_DMAENABLE (1 << 3)
56#define MCI_DPSM_BLOCKSIZE (1 << 4) 66#define MCI_DPSM_BLOCKSIZE (1 << 4)
57#define MCI_DPSM_RWSTART (1 << 8) 67/* Control register extensions in the ST Micro U300 and Ux500 versions */
58#define MCI_DPSM_RWSTOP (1 << 9) 68#define MCI_ST_DPSM_RWSTART (1 << 8)
59#define MCI_DPSM_RWMOD (1 << 10) 69#define MCI_ST_DPSM_RWSTOP (1 << 9)
60#define MCI_DPSM_SDIOEN (1 << 11) 70#define MCI_ST_DPSM_RWMOD (1 << 10)
71#define MCI_ST_DPSM_SDIOEN (1 << 11)
72/* Control register extensions in the ST Micro Ux500 versions */
73#define MCI_ST_DPSM_DMAREQCTL (1 << 12)
74#define MCI_ST_DPSM_DBOOTMODEEN (1 << 13)
75#define MCI_ST_DPSM_BUSYMODE (1 << 14)
76#define MCI_ST_DPSM_DDRMODE (1 << 15)
61 77
62#define MMCIDATACNT 0x030 78#define MMCIDATACNT 0x030
63#define MMCISTATUS 0x034 79#define MMCISTATUS 0x034
@@ -70,6 +86,7 @@
70#define MCI_CMDRESPEND (1 << 6) 86#define MCI_CMDRESPEND (1 << 6)
71#define MCI_CMDSENT (1 << 7) 87#define MCI_CMDSENT (1 << 7)
72#define MCI_DATAEND (1 << 8) 88#define MCI_DATAEND (1 << 8)
89#define MCI_STARTBITERR (1 << 9)
73#define MCI_DATABLOCKEND (1 << 10) 90#define MCI_DATABLOCKEND (1 << 10)
74#define MCI_CMDACTIVE (1 << 11) 91#define MCI_CMDACTIVE (1 << 11)
75#define MCI_TXACTIVE (1 << 12) 92#define MCI_TXACTIVE (1 << 12)
@@ -82,8 +99,9 @@
82#define MCI_RXFIFOEMPTY (1 << 19) 99#define MCI_RXFIFOEMPTY (1 << 19)
83#define MCI_TXDATAAVLBL (1 << 20) 100#define MCI_TXDATAAVLBL (1 << 20)
84#define MCI_RXDATAAVLBL (1 << 21) 101#define MCI_RXDATAAVLBL (1 << 21)
85#define MCI_SDIOIT (1 << 22) 102/* Extended status bits for the ST Micro variants */
86#define MCI_CEATAEND (1 << 23) 103#define MCI_ST_SDIOIT (1 << 22)
104#define MCI_ST_CEATAEND (1 << 23)
87 105
88#define MMCICLEAR 0x038 106#define MMCICLEAR 0x038
89#define MCI_CMDCRCFAILCLR (1 << 0) 107#define MCI_CMDCRCFAILCLR (1 << 0)
@@ -95,9 +113,11 @@
95#define MCI_CMDRESPENDCLR (1 << 6) 113#define MCI_CMDRESPENDCLR (1 << 6)
96#define MCI_CMDSENTCLR (1 << 7) 114#define MCI_CMDSENTCLR (1 << 7)
97#define MCI_DATAENDCLR (1 << 8) 115#define MCI_DATAENDCLR (1 << 8)
116#define MCI_STARTBITERRCLR (1 << 9)
98#define MCI_DATABLOCKENDCLR (1 << 10) 117#define MCI_DATABLOCKENDCLR (1 << 10)
99#define MCI_SDIOITC (1 << 22) 118/* Extended status bits for the ST Micro variants */
100#define MCI_CEATAENDC (1 << 23) 119#define MCI_ST_SDIOITC (1 << 22)
120#define MCI_ST_CEATAENDC (1 << 23)
101 121
102#define MMCIMASK0 0x03c 122#define MMCIMASK0 0x03c
103#define MCI_CMDCRCFAILMASK (1 << 0) 123#define MCI_CMDCRCFAILMASK (1 << 0)
@@ -109,6 +129,7 @@
109#define MCI_CMDRESPENDMASK (1 << 6) 129#define MCI_CMDRESPENDMASK (1 << 6)
110#define MCI_CMDSENTMASK (1 << 7) 130#define MCI_CMDSENTMASK (1 << 7)
111#define MCI_DATAENDMASK (1 << 8) 131#define MCI_DATAENDMASK (1 << 8)
132#define MCI_STARTBITERRMASK (1 << 9)
112#define MCI_DATABLOCKENDMASK (1 << 10) 133#define MCI_DATABLOCKENDMASK (1 << 10)
113#define MCI_CMDACTIVEMASK (1 << 11) 134#define MCI_CMDACTIVEMASK (1 << 11)
114#define MCI_TXACTIVEMASK (1 << 12) 135#define MCI_TXACTIVEMASK (1 << 12)
@@ -121,8 +142,9 @@
121#define MCI_RXFIFOEMPTYMASK (1 << 19) 142#define MCI_RXFIFOEMPTYMASK (1 << 19)
122#define MCI_TXDATAAVLBLMASK (1 << 20) 143#define MCI_TXDATAAVLBLMASK (1 << 20)
123#define MCI_RXDATAAVLBLMASK (1 << 21) 144#define MCI_RXDATAAVLBLMASK (1 << 21)
124#define MCI_SDIOITMASK (1 << 22) 145/* Extended status bits for the ST Micro variants */
125#define MCI_CEATAENDMASK (1 << 23) 146#define MCI_ST_SDIOITMASK (1 << 22)
147#define MCI_ST_CEATAENDMASK (1 << 23)
126 148
127#define MMCIMASK1 0x040 149#define MMCIMASK1 0x040
128#define MMCIFIFOCNT 0x048 150#define MMCIFIFOCNT 0x048
@@ -131,21 +153,21 @@
131#define MCI_IRQENABLE \ 153#define MCI_IRQENABLE \
132 (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \ 154 (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
133 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \ 155 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
134 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK) 156 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_STARTBITERRMASK)
135 157
136/* 158/* These interrupts are directed to IRQ1 when two IRQ lines are available */
137 * The size of the FIFO in bytes. 159#define MCI_IRQ1MASK \
138 */ 160 (MCI_RXFIFOHALFFULLMASK | MCI_RXDATAAVLBLMASK | \
139#define MCI_FIFOSIZE (16*4) 161 MCI_TXFIFOHALFEMPTYMASK)
140
141#define MCI_FIFOHALFSIZE (MCI_FIFOSIZE / 2)
142 162
143#define NR_SG 16 163#define NR_SG 16
144 164
145struct clk; 165struct clk;
146struct variant_data; 166struct variant_data;
167struct dma_chan;
147 168
148struct mmci_host { 169struct mmci_host {
170 phys_addr_t phybase;
149 void __iomem *base; 171 void __iomem *base;
150 struct mmc_request *mrq; 172 struct mmc_request *mrq;
151 struct mmc_command *cmd; 173 struct mmc_command *cmd;
@@ -154,8 +176,8 @@ struct mmci_host {
154 struct clk *clk; 176 struct clk *clk;
155 int gpio_cd; 177 int gpio_cd;
156 int gpio_wp; 178 int gpio_wp;
157 179 int gpio_cd_irq;
158 unsigned int data_xfered; 180 bool singleirq;
159 181
160 spinlock_t lock; 182 spinlock_t lock;
161 183
@@ -175,5 +197,16 @@ struct mmci_host {
175 struct sg_mapping_iter sg_miter; 197 struct sg_mapping_iter sg_miter;
176 unsigned int size; 198 unsigned int size;
177 struct regulator *vcc; 199 struct regulator *vcc;
200
201#ifdef CONFIG_DMA_ENGINE
202 /* DMA stuff */
203 struct dma_chan *dma_current;
204 struct dma_chan *dma_rx_channel;
205 struct dma_chan *dma_tx_channel;
206
207#define dma_inprogress(host) ((host)->dma_current)
208#else
209#define dma_inprogress(host) (0)
210#endif
178}; 211};
179 212
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index ff7752348b11..a4c865a5286b 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -36,6 +36,7 @@
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/memory.h> 37#include <linux/memory.h>
38#include <linux/gfp.h> 38#include <linux/gfp.h>
39#include <linux/gpio.h>
39 40
40#include <asm/cacheflush.h> 41#include <asm/cacheflush.h>
41#include <asm/div64.h> 42#include <asm/div64.h>
@@ -44,6 +45,7 @@
44#include <mach/mmc.h> 45#include <mach/mmc.h>
45#include <mach/msm_iomap.h> 46#include <mach/msm_iomap.h>
46#include <mach/dma.h> 47#include <mach/dma.h>
48#include <mach/clk.h>
47 49
48#include "msm_sdcc.h" 50#include "msm_sdcc.h"
49 51
@@ -126,6 +128,40 @@ static void
126msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, 128msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
127 u32 c); 129 u32 c);
128 130
131static void msmsdcc_reset_and_restore(struct msmsdcc_host *host)
132{
133 u32 mci_clk = 0;
134 u32 mci_mask0 = 0;
135 int ret = 0;
136
137 /* Save the controller state */
138 mci_clk = readl(host->base + MMCICLOCK);
139 mci_mask0 = readl(host->base + MMCIMASK0);
140
141 /* Reset the controller */
142 ret = clk_reset(host->clk, CLK_RESET_ASSERT);
143 if (ret)
144 pr_err("%s: Clock assert failed at %u Hz with err %d\n",
145 mmc_hostname(host->mmc), host->clk_rate, ret);
146
147 ret = clk_reset(host->clk, CLK_RESET_DEASSERT);
148 if (ret)
149 pr_err("%s: Clock deassert failed at %u Hz with err %d\n",
150 mmc_hostname(host->mmc), host->clk_rate, ret);
151
152 pr_info("%s: Controller has been re-initialiazed\n",
153 mmc_hostname(host->mmc));
154
155 /* Restore the contoller state */
156 writel(host->pwr, host->base + MMCIPOWER);
157 writel(mci_clk, host->base + MMCICLOCK);
158 writel(mci_mask0, host->base + MMCIMASK0);
159 ret = clk_set_rate(host->clk, host->clk_rate);
160 if (ret)
161 pr_err("%s: Failed to set clk rate %u Hz (%d)\n",
162 mmc_hostname(host->mmc), host->clk_rate, ret);
163}
164
129static void 165static void
130msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq) 166msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
131{ 167{
@@ -155,7 +191,7 @@ static void
155msmsdcc_stop_data(struct msmsdcc_host *host) 191msmsdcc_stop_data(struct msmsdcc_host *host)
156{ 192{
157 host->curr.data = NULL; 193 host->curr.data = NULL;
158 host->curr.got_dataend = host->curr.got_datablkend = 0; 194 host->curr.got_dataend = 0;
159} 195}
160 196
161uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host) 197uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
@@ -189,61 +225,52 @@ msmsdcc_dma_exec_func(struct msm_dmov_cmd *cmd)
189} 225}
190 226
191static void 227static void
192msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd, 228msmsdcc_dma_complete_tlet(unsigned long data)
193 unsigned int result,
194 struct msm_dmov_errdata *err)
195{ 229{
196 struct msmsdcc_dma_data *dma_data = 230 struct msmsdcc_host *host = (struct msmsdcc_host *)data;
197 container_of(cmd, struct msmsdcc_dma_data, hdr);
198 struct msmsdcc_host *host = dma_data->host;
199 unsigned long flags; 231 unsigned long flags;
200 struct mmc_request *mrq; 232 struct mmc_request *mrq;
233 struct msm_dmov_errdata err;
201 234
202 spin_lock_irqsave(&host->lock, flags); 235 spin_lock_irqsave(&host->lock, flags);
203 host->dma.active = 0; 236 host->dma.active = 0;
204 237
238 err = host->dma.err;
205 mrq = host->curr.mrq; 239 mrq = host->curr.mrq;
206 BUG_ON(!mrq); 240 BUG_ON(!mrq);
207 WARN_ON(!mrq->data); 241 WARN_ON(!mrq->data);
208 242
209 if (!(result & DMOV_RSLT_VALID)) { 243 if (!(host->dma.result & DMOV_RSLT_VALID)) {
210 pr_err("msmsdcc: Invalid DataMover result\n"); 244 pr_err("msmsdcc: Invalid DataMover result\n");
211 goto out; 245 goto out;
212 } 246 }
213 247
214 if (result & DMOV_RSLT_DONE) { 248 if (host->dma.result & DMOV_RSLT_DONE) {
215 host->curr.data_xfered = host->curr.xfer_size; 249 host->curr.data_xfered = host->curr.xfer_size;
216 } else { 250 } else {
217 /* Error or flush */ 251 /* Error or flush */
218 if (result & DMOV_RSLT_ERROR) 252 if (host->dma.result & DMOV_RSLT_ERROR)
219 pr_err("%s: DMA error (0x%.8x)\n", 253 pr_err("%s: DMA error (0x%.8x)\n",
220 mmc_hostname(host->mmc), result); 254 mmc_hostname(host->mmc), host->dma.result);
221 if (result & DMOV_RSLT_FLUSH) 255 if (host->dma.result & DMOV_RSLT_FLUSH)
222 pr_err("%s: DMA channel flushed (0x%.8x)\n", 256 pr_err("%s: DMA channel flushed (0x%.8x)\n",
223 mmc_hostname(host->mmc), result); 257 mmc_hostname(host->mmc), host->dma.result);
224 if (err) 258
225 pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n", 259 pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n",
226 err->flush[0], err->flush[1], err->flush[2], 260 err.flush[0], err.flush[1], err.flush[2],
227 err->flush[3], err->flush[4], err->flush[5]); 261 err.flush[3], err.flush[4], err.flush[5]);
262
263 msmsdcc_reset_and_restore(host);
228 if (!mrq->data->error) 264 if (!mrq->data->error)
229 mrq->data->error = -EIO; 265 mrq->data->error = -EIO;
230 } 266 }
231 dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents, 267 dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
232 host->dma.dir); 268 host->dma.dir);
233 269
234 if (host->curr.user_pages) {
235 struct scatterlist *sg = host->dma.sg;
236 int i;
237
238 for (i = 0; i < host->dma.num_ents; i++)
239 flush_dcache_page(sg_page(sg++));
240 }
241
242 host->dma.sg = NULL; 270 host->dma.sg = NULL;
243 host->dma.busy = 0; 271 host->dma.busy = 0;
244 272
245 if ((host->curr.got_dataend && host->curr.got_datablkend) 273 if (host->curr.got_dataend || mrq->data->error) {
246 || mrq->data->error) {
247 274
248 /* 275 /*
249 * If we've already gotten our DATAEND / DATABLKEND 276 * If we've already gotten our DATAEND / DATABLKEND
@@ -273,6 +300,22 @@ out:
273 return; 300 return;
274} 301}
275 302
303static void
304msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
305 unsigned int result,
306 struct msm_dmov_errdata *err)
307{
308 struct msmsdcc_dma_data *dma_data =
309 container_of(cmd, struct msmsdcc_dma_data, hdr);
310 struct msmsdcc_host *host = dma_data->host;
311
312 dma_data->result = result;
313 if (err)
314 memcpy(&dma_data->err, err, sizeof(struct msm_dmov_errdata));
315
316 tasklet_schedule(&host->dma_tlet);
317}
318
276static int validate_dma(struct msmsdcc_host *host, struct mmc_data *data) 319static int validate_dma(struct msmsdcc_host *host, struct mmc_data *data)
277{ 320{
278 if (host->dma.channel == -1) 321 if (host->dma.channel == -1)
@@ -333,14 +376,30 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
333 host->curr.user_pages = 0; 376 host->curr.user_pages = 0;
334 377
335 box = &nc->cmd[0]; 378 box = &nc->cmd[0];
336 for (i = 0; i < host->dma.num_ents; i++) {
337 box->cmd = CMD_MODE_BOX;
338 379
339 /* Initialize sg dma address */ 380 /* location of command block must be 64 bit aligned */
340 sg->dma_address = page_to_dma(mmc_dev(host->mmc), sg_page(sg)) 381 BUG_ON(host->dma.cmd_busaddr & 0x07);
341 + sg->offset;
342 382
343 if (i == (host->dma.num_ents - 1)) 383 nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
384 host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
385 DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
386 host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
387
388 n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
389 host->dma.num_ents, host->dma.dir);
390 if (n == 0) {
391 printk(KERN_ERR "%s: Unable to map in all sg elements\n",
392 mmc_hostname(host->mmc));
393 host->dma.sg = NULL;
394 host->dma.num_ents = 0;
395 return -ENOMEM;
396 }
397
398 for_each_sg(host->dma.sg, sg, n, i) {
399
400 box->cmd = CMD_MODE_BOX;
401
402 if (i == n - 1)
344 box->cmd |= CMD_LC; 403 box->cmd |= CMD_LC;
345 rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ? 404 rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
346 (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 : 405 (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
@@ -368,27 +427,6 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
368 box->cmd |= CMD_DST_CRCI(crci); 427 box->cmd |= CMD_DST_CRCI(crci);
369 } 428 }
370 box++; 429 box++;
371 sg++;
372 }
373
374 /* location of command block must be 64 bit aligned */
375 BUG_ON(host->dma.cmd_busaddr & 0x07);
376
377 nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
378 host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
379 DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
380 host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
381
382 n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
383 host->dma.num_ents, host->dma.dir);
384/* dsb inside dma_map_sg will write nc out to mem as well */
385
386 if (n != host->dma.num_ents) {
387 printk(KERN_ERR "%s: Unable to map in all sg elements\n",
388 mmc_hostname(host->mmc));
389 host->dma.sg = NULL;
390 host->dma.num_ents = 0;
391 return -ENOMEM;
392 } 430 }
393 431
394 return 0; 432 return 0;
@@ -424,6 +462,11 @@ msmsdcc_start_command_deferred(struct msmsdcc_host *host,
424 (cmd->opcode == 53)) 462 (cmd->opcode == 53))
425 *c |= MCI_CSPM_DATCMD; 463 *c |= MCI_CSPM_DATCMD;
426 464
465 if (host->prog_scan && (cmd->opcode == 12)) {
466 *c |= MCI_CPSM_PROGENA;
467 host->prog_enable = true;
468 }
469
427 if (cmd == cmd->mrq->stop) 470 if (cmd == cmd->mrq->stop)
428 *c |= MCI_CSPM_MCIABORT; 471 *c |= MCI_CSPM_MCIABORT;
429 472
@@ -450,7 +493,6 @@ msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data,
450 host->curr.xfer_remain = host->curr.xfer_size; 493 host->curr.xfer_remain = host->curr.xfer_size;
451 host->curr.data_xfered = 0; 494 host->curr.data_xfered = 0;
452 host->curr.got_dataend = 0; 495 host->curr.got_dataend = 0;
453 host->curr.got_datablkend = 0;
454 496
455 memset(&host->pio, 0, sizeof(host->pio)); 497 memset(&host->pio, 0, sizeof(host->pio));
456 498
@@ -494,6 +536,8 @@ msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data,
494 host->cmd_c = c; 536 host->cmd_c = c;
495 } 537 }
496 msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr); 538 msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr);
539 if (data->flags & MMC_DATA_WRITE)
540 host->prog_scan = true;
497 } else { 541 } else {
498 msmsdcc_writel(host, timeout, MMCIDATATIMER); 542 msmsdcc_writel(host, timeout, MMCIDATATIMER);
499 543
@@ -555,6 +599,9 @@ msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)
555 uint32_t *ptr = (uint32_t *) buffer; 599 uint32_t *ptr = (uint32_t *) buffer;
556 int count = 0; 600 int count = 0;
557 601
602 if (remain % 4)
603 remain = ((remain >> 2) + 1) << 2;
604
558 while (msmsdcc_readl(host, MMCISTATUS) & MCI_RXDATAAVLBL) { 605 while (msmsdcc_readl(host, MMCISTATUS) & MCI_RXDATAAVLBL) {
559 *ptr = msmsdcc_readl(host, MMCIFIFO + (count % MCI_FIFOSIZE)); 606 *ptr = msmsdcc_readl(host, MMCIFIFO + (count % MCI_FIFOSIZE));
560 ptr++; 607 ptr++;
@@ -575,13 +622,14 @@ msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer,
575 char *ptr = buffer; 622 char *ptr = buffer;
576 623
577 do { 624 do {
578 unsigned int count, maxcnt; 625 unsigned int count, maxcnt, sz;
579 626
580 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : 627 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE :
581 MCI_FIFOHALFSIZE; 628 MCI_FIFOHALFSIZE;
582 count = min(remain, maxcnt); 629 count = min(remain, maxcnt);
583 630
584 writesl(base + MMCIFIFO, ptr, count >> 2); 631 sz = count % 4 ? (count >> 2) + 1 : (count >> 2);
632 writesl(base + MMCIFIFO, ptr, sz);
585 ptr += count; 633 ptr += count;
586 remain -= count; 634 remain -= count;
587 635
@@ -702,10 +750,26 @@ static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
702 msm_dmov_stop_cmd(host->dma.channel, 750 msm_dmov_stop_cmd(host->dma.channel,
703 &host->dma.hdr, 0); 751 &host->dma.hdr, 0);
704 else if (host->curr.data) { /* Non DMA */ 752 else if (host->curr.data) { /* Non DMA */
753 msmsdcc_reset_and_restore(host);
705 msmsdcc_stop_data(host); 754 msmsdcc_stop_data(host);
706 msmsdcc_request_end(host, cmd->mrq); 755 msmsdcc_request_end(host, cmd->mrq);
707 } else /* host->data == NULL */ 756 } else { /* host->data == NULL */
708 msmsdcc_request_end(host, cmd->mrq); 757 if (!cmd->error && host->prog_enable) {
758 if (status & MCI_PROGDONE) {
759 host->prog_scan = false;
760 host->prog_enable = false;
761 msmsdcc_request_end(host, cmd->mrq);
762 } else {
763 host->curr.cmd = cmd;
764 }
765 } else {
766 if (host->prog_enable) {
767 host->prog_scan = false;
768 host->prog_enable = false;
769 }
770 msmsdcc_request_end(host, cmd->mrq);
771 }
772 }
709 } else if (cmd->data) 773 } else if (cmd->data)
710 if (!(cmd->data->flags & MMC_DATA_READ)) 774 if (!(cmd->data->flags & MMC_DATA_READ))
711 msmsdcc_start_data(host, cmd->data, 775 msmsdcc_start_data(host, cmd->data,
@@ -719,7 +783,7 @@ msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
719 struct mmc_data *data = host->curr.data; 783 struct mmc_data *data = host->curr.data;
720 784
721 if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL | 785 if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
722 MCI_CMDTIMEOUT) && host->curr.cmd) { 786 MCI_CMDTIMEOUT | MCI_PROGDONE) && host->curr.cmd) {
723 msmsdcc_do_cmdirq(host, status); 787 msmsdcc_do_cmdirq(host, status);
724 } 788 }
725 789
@@ -735,6 +799,7 @@ msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
735 msm_dmov_stop_cmd(host->dma.channel, 799 msm_dmov_stop_cmd(host->dma.channel,
736 &host->dma.hdr, 0); 800 &host->dma.hdr, 0);
737 else { 801 else {
802 msmsdcc_reset_and_restore(host);
738 if (host->curr.data) 803 if (host->curr.data)
739 msmsdcc_stop_data(host); 804 msmsdcc_stop_data(host);
740 if (!data->stop) 805 if (!data->stop)
@@ -748,14 +813,10 @@ msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
748 if (!host->curr.got_dataend && (status & MCI_DATAEND)) 813 if (!host->curr.got_dataend && (status & MCI_DATAEND))
749 host->curr.got_dataend = 1; 814 host->curr.got_dataend = 1;
750 815
751 if (!host->curr.got_datablkend && (status & MCI_DATABLOCKEND))
752 host->curr.got_datablkend = 1;
753
754 /* 816 /*
755 * If DMA is still in progress, we complete via the completion handler 817 * If DMA is still in progress, we complete via the completion handler
756 */ 818 */
757 if (host->curr.got_dataend && host->curr.got_datablkend && 819 if (host->curr.got_dataend && !host->dma.busy) {
758 !host->dma.busy) {
759 /* 820 /*
760 * There appears to be an issue in the controller where 821 * There appears to be an issue in the controller where
761 * if you request a small block transfer (< fifo size), 822 * if you request a small block transfer (< fifo size),
@@ -792,8 +853,7 @@ msmsdcc_irq(int irq, void *dev_id)
792 853
793 do { 854 do {
794 status = msmsdcc_readl(host, MMCISTATUS); 855 status = msmsdcc_readl(host, MMCISTATUS);
795 status &= (msmsdcc_readl(host, MMCIMASK0) | 856 status &= msmsdcc_readl(host, MMCIMASK0);
796 MCI_DATABLOCKENDMASK);
797 msmsdcc_writel(host, status, MMCICLEAR); 857 msmsdcc_writel(host, status, MMCICLEAR);
798 858
799 if (status & MCI_SDIOINTR) 859 if (status & MCI_SDIOINTR)
@@ -874,6 +934,38 @@ msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
874 spin_unlock_irqrestore(&host->lock, flags); 934 spin_unlock_irqrestore(&host->lock, flags);
875} 935}
876 936
937static void msmsdcc_setup_gpio(struct msmsdcc_host *host, bool enable)
938{
939 struct msm_mmc_gpio_data *curr;
940 int i, rc = 0;
941
942 if (!host->plat->gpio_data && host->gpio_config_status == enable)
943 return;
944
945 curr = host->plat->gpio_data;
946 for (i = 0; i < curr->size; i++) {
947 if (enable) {
948 rc = gpio_request(curr->gpio[i].no,
949 curr->gpio[i].name);
950 if (rc) {
951 pr_err("%s: gpio_request(%d, %s) failed %d\n",
952 mmc_hostname(host->mmc),
953 curr->gpio[i].no,
954 curr->gpio[i].name, rc);
955 goto free_gpios;
956 }
957 } else {
958 gpio_free(curr->gpio[i].no);
959 }
960 }
961 host->gpio_config_status = enable;
962 return;
963
964free_gpios:
965 for (; i >= 0; i--)
966 gpio_free(curr->gpio[i].no);
967}
968
877static void 969static void
878msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 970msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
879{ 971{
@@ -886,6 +978,8 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
886 978
887 msmsdcc_enable_clocks(host); 979 msmsdcc_enable_clocks(host);
888 980
981 spin_unlock_irqrestore(&host->lock, flags);
982
889 if (ios->clock) { 983 if (ios->clock) {
890 if (ios->clock != host->clk_rate) { 984 if (ios->clock != host->clk_rate) {
891 rc = clk_set_rate(host->clk, ios->clock); 985 rc = clk_set_rate(host->clk, ios->clock);
@@ -912,9 +1006,11 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
912 1006
913 switch (ios->power_mode) { 1007 switch (ios->power_mode) {
914 case MMC_POWER_OFF: 1008 case MMC_POWER_OFF:
1009 msmsdcc_setup_gpio(host, false);
915 break; 1010 break;
916 case MMC_POWER_UP: 1011 case MMC_POWER_UP:
917 pwr |= MCI_PWR_UP; 1012 pwr |= MCI_PWR_UP;
1013 msmsdcc_setup_gpio(host, true);
918 break; 1014 break;
919 case MMC_POWER_ON: 1015 case MMC_POWER_ON:
920 pwr |= MCI_PWR_ON; 1016 pwr |= MCI_PWR_ON;
@@ -931,9 +1027,10 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
931 msmsdcc_writel(host, pwr, MMCIPOWER); 1027 msmsdcc_writel(host, pwr, MMCIPOWER);
932 } 1028 }
933#if BUSCLK_PWRSAVE 1029#if BUSCLK_PWRSAVE
1030 spin_lock_irqsave(&host->lock, flags);
934 msmsdcc_disable_clocks(host, 1); 1031 msmsdcc_disable_clocks(host, 1);
935#endif
936 spin_unlock_irqrestore(&host->lock, flags); 1032 spin_unlock_irqrestore(&host->lock, flags);
1033#endif
937} 1034}
938 1035
939static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable) 1036static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -1118,6 +1215,9 @@ msmsdcc_probe(struct platform_device *pdev)
1118 host->dmares = dmares; 1215 host->dmares = dmares;
1119 spin_lock_init(&host->lock); 1216 spin_lock_init(&host->lock);
1120 1217
1218 tasklet_init(&host->dma_tlet, msmsdcc_dma_complete_tlet,
1219 (unsigned long)host);
1220
1121 /* 1221 /*
1122 * Setup DMA 1222 * Setup DMA
1123 */ 1223 */
@@ -1164,8 +1264,7 @@ msmsdcc_probe(struct platform_device *pdev)
1164 mmc->caps |= MMC_CAP_SDIO_IRQ; 1264 mmc->caps |= MMC_CAP_SDIO_IRQ;
1165 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED; 1265 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
1166 1266
1167 mmc->max_phys_segs = NR_SG; 1267 mmc->max_segs = NR_SG;
1168 mmc->max_hw_segs = NR_SG;
1169 mmc->max_blk_size = 4096; /* MCI_DATA_CTL BLOCKSIZE up to 4096 */ 1268 mmc->max_blk_size = 4096; /* MCI_DATA_CTL BLOCKSIZE up to 4096 */
1170 mmc->max_blk_count = 65536; 1269 mmc->max_blk_count = 65536;
1171 1270
@@ -1257,9 +1356,6 @@ msmsdcc_probe(struct platform_device *pdev)
1257 if (host->timer.function) 1356 if (host->timer.function)
1258 pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc)); 1357 pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
1259 1358
1260#if BUSCLK_PWRSAVE
1261 msmsdcc_disable_clocks(host, 1);
1262#endif
1263 return 0; 1359 return 0;
1264 cmd_irq_free: 1360 cmd_irq_free:
1265 free_irq(cmd_irqres->start, host); 1361 free_irq(cmd_irqres->start, host);
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index ff2b0f74f6f4..42d7bbc977c5 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -138,7 +138,7 @@
138#define MCI_IRQENABLE \ 138#define MCI_IRQENABLE \
139 (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \ 139 (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
140 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \ 140 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
141 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATAENDMASK) 141 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATAENDMASK|MCI_PROGDONEMASK)
142 142
143/* 143/*
144 * The size of the FIFO in bytes. 144 * The size of the FIFO in bytes.
@@ -172,6 +172,8 @@ struct msmsdcc_dma_data {
172 struct msmsdcc_host *host; 172 struct msmsdcc_host *host;
173 int busy; /* Set if DM is busy */ 173 int busy; /* Set if DM is busy */
174 int active; 174 int active;
175 unsigned int result;
176 struct msm_dmov_errdata err;
175}; 177};
176 178
177struct msmsdcc_pio_data { 179struct msmsdcc_pio_data {
@@ -188,7 +190,6 @@ struct msmsdcc_curr_req {
188 unsigned int xfer_remain; /* Bytes remaining to send */ 190 unsigned int xfer_remain; /* Bytes remaining to send */
189 unsigned int data_xfered; /* Bytes acked by BLKEND irq */ 191 unsigned int data_xfered; /* Bytes acked by BLKEND irq */
190 int got_dataend; 192 int got_dataend;
191 int got_datablkend;
192 int user_pages; 193 int user_pages;
193}; 194};
194 195
@@ -235,13 +236,17 @@ struct msmsdcc_host {
235 int cmdpoll; 236 int cmdpoll;
236 struct msmsdcc_stats stats; 237 struct msmsdcc_stats stats;
237 238
239 struct tasklet_struct dma_tlet;
238 /* Command parameters */ 240 /* Command parameters */
239 unsigned int cmd_timeout; 241 unsigned int cmd_timeout;
240 unsigned int cmd_pio_irqmask; 242 unsigned int cmd_pio_irqmask;
241 unsigned int cmd_datactrl; 243 unsigned int cmd_datactrl;
242 struct mmc_command *cmd_cmd; 244 struct mmc_command *cmd_cmd;
243 u32 cmd_c; 245 u32 cmd_c;
246 bool gpio_config_status;
244 247
248 bool prog_scan;
249 bool prog_enable;
245}; 250};
246 251
247#endif 252#endif
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 366eefa77c5a..a5bf60e01af4 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -742,8 +742,7 @@ static int __init mvsd_probe(struct platform_device *pdev)
742 mmc->max_blk_size = 2048; 742 mmc->max_blk_size = 2048;
743 mmc->max_blk_count = 65535; 743 mmc->max_blk_count = 65535;
744 744
745 mmc->max_hw_segs = 1; 745 mmc->max_segs = 1;
746 mmc->max_phys_segs = 1;
747 mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count; 746 mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count;
748 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 747 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
749 748
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 350f78e86245..cc20e0259325 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -31,16 +31,15 @@
31#include <linux/clk.h> 31#include <linux/clk.h>
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/gpio.h> 33#include <linux/gpio.h>
34#include <linux/regulator/consumer.h>
35#include <linux/dmaengine.h>
34 36
35#include <asm/dma.h> 37#include <asm/dma.h>
36#include <asm/irq.h> 38#include <asm/irq.h>
37#include <asm/sizes.h> 39#include <asm/sizes.h>
38#include <mach/mmc.h> 40#include <mach/mmc.h>
39 41
40#ifdef CONFIG_ARCH_MX2 42#include <mach/dma.h>
41#include <mach/dma-mx1-mx2.h>
42#define HAS_DMA
43#endif
44 43
45#define DRIVER_NAME "mxc-mmc" 44#define DRIVER_NAME "mxc-mmc"
46 45
@@ -117,7 +116,8 @@ struct mxcmci_host {
117 void __iomem *base; 116 void __iomem *base;
118 int irq; 117 int irq;
119 int detect_irq; 118 int detect_irq;
120 int dma; 119 struct dma_chan *dma;
120 struct dma_async_tx_descriptor *desc;
121 int do_dma; 121 int do_dma;
122 int default_irq_mask; 122 int default_irq_mask;
123 int use_sdio; 123 int use_sdio;
@@ -128,7 +128,6 @@ struct mxcmci_host {
128 struct mmc_command *cmd; 128 struct mmc_command *cmd;
129 struct mmc_data *data; 129 struct mmc_data *data;
130 130
131 unsigned int dma_nents;
132 unsigned int datasize; 131 unsigned int datasize;
133 unsigned int dma_dir; 132 unsigned int dma_dir;
134 133
@@ -141,10 +140,54 @@ struct mxcmci_host {
141 140
142 struct work_struct datawork; 141 struct work_struct datawork;
143 spinlock_t lock; 142 spinlock_t lock;
143
144 struct regulator *vcc;
145
146 int burstlen;
147 int dmareq;
148 struct dma_slave_config dma_slave_config;
149 struct imx_dma_data dma_data;
144}; 150};
145 151
146static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); 152static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
147 153
154static inline void mxcmci_init_ocr(struct mxcmci_host *host)
155{
156 host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
157
158 if (IS_ERR(host->vcc)) {
159 host->vcc = NULL;
160 } else {
161 host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
162 if (host->pdata && host->pdata->ocr_avail)
163 dev_warn(mmc_dev(host->mmc),
164 "pdata->ocr_avail will not be used\n");
165 }
166
167 if (host->vcc == NULL) {
168 /* fall-back to platform data */
169 if (host->pdata && host->pdata->ocr_avail)
170 host->mmc->ocr_avail = host->pdata->ocr_avail;
171 else
172 host->mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
173 }
174}
175
176static inline void mxcmci_set_power(struct mxcmci_host *host,
177 unsigned char power_mode,
178 unsigned int vdd)
179{
180 if (host->vcc) {
181 if (power_mode == MMC_POWER_UP)
182 mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
183 else if (power_mode == MMC_POWER_OFF)
184 mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
185 }
186
187 if (host->pdata && host->pdata->setpower)
188 host->pdata->setpower(mmc_dev(host->mmc), vdd);
189}
190
148static inline int mxcmci_use_dma(struct mxcmci_host *host) 191static inline int mxcmci_use_dma(struct mxcmci_host *host)
149{ 192{
150 return host->do_dma; 193 return host->do_dma;
@@ -166,17 +209,16 @@ static void mxcmci_softreset(struct mxcmci_host *host)
166 209
167 writew(0xff, host->base + MMC_REG_RES_TO); 210 writew(0xff, host->base + MMC_REG_RES_TO);
168} 211}
212static int mxcmci_setup_dma(struct mmc_host *mmc);
169 213
170static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) 214static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
171{ 215{
172 unsigned int nob = data->blocks; 216 unsigned int nob = data->blocks;
173 unsigned int blksz = data->blksz; 217 unsigned int blksz = data->blksz;
174 unsigned int datasize = nob * blksz; 218 unsigned int datasize = nob * blksz;
175#ifdef HAS_DMA
176 struct scatterlist *sg; 219 struct scatterlist *sg;
177 int i; 220 int i, nents;
178 int ret; 221
179#endif
180 if (data->flags & MMC_DATA_STREAM) 222 if (data->flags & MMC_DATA_STREAM)
181 nob = 0xffff; 223 nob = 0xffff;
182 224
@@ -187,7 +229,9 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
187 writew(blksz, host->base + MMC_REG_BLK_LEN); 229 writew(blksz, host->base + MMC_REG_BLK_LEN);
188 host->datasize = datasize; 230 host->datasize = datasize;
189 231
190#ifdef HAS_DMA 232 if (!mxcmci_use_dma(host))
233 return 0;
234
191 for_each_sg(data->sg, sg, data->sg_len, i) { 235 for_each_sg(data->sg, sg, data->sg_len, i) {
192 if (sg->offset & 3 || sg->length & 3) { 236 if (sg->offset & 3 || sg->length & 3) {
193 host->do_dma = 0; 237 host->do_dma = 0;
@@ -195,34 +239,30 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
195 } 239 }
196 } 240 }
197 241
198 if (data->flags & MMC_DATA_READ) { 242 if (data->flags & MMC_DATA_READ)
199 host->dma_dir = DMA_FROM_DEVICE; 243 host->dma_dir = DMA_FROM_DEVICE;
200 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, 244 else
201 data->sg_len, host->dma_dir);
202
203 ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents,
204 datasize,
205 host->res->start + MMC_REG_BUFFER_ACCESS,
206 DMA_MODE_READ);
207 } else {
208 host->dma_dir = DMA_TO_DEVICE; 245 host->dma_dir = DMA_TO_DEVICE;
209 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
210 data->sg_len, host->dma_dir);
211 246
212 ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, 247 nents = dma_map_sg(host->dma->device->dev, data->sg,
213 datasize, 248 data->sg_len, host->dma_dir);
214 host->res->start + MMC_REG_BUFFER_ACCESS, 249 if (nents != data->sg_len)
215 DMA_MODE_WRITE); 250 return -EINVAL;
216 } 251
252 host->desc = host->dma->device->device_prep_slave_sg(host->dma,
253 data->sg, data->sg_len, host->dma_dir,
254 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
217 255
218 if (ret) { 256 if (!host->desc) {
219 dev_err(mmc_dev(host->mmc), "failed to setup DMA : %d\n", ret); 257 dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
220 return ret; 258 host->dma_dir);
259 host->do_dma = 0;
260 return 0; /* Fall back to PIO */
221 } 261 }
222 wmb(); 262 wmb();
223 263
224 imx_dma_enable(host->dma); 264 dmaengine_submit(host->desc);
225#endif /* HAS_DMA */ 265
226 return 0; 266 return 0;
227} 267}
228 268
@@ -297,13 +337,11 @@ static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
297 struct mmc_data *data = host->data; 337 struct mmc_data *data = host->data;
298 int data_error; 338 int data_error;
299 339
300#ifdef HAS_DMA
301 if (mxcmci_use_dma(host)) { 340 if (mxcmci_use_dma(host)) {
302 imx_dma_disable(host->dma); 341 dmaengine_terminate_all(host->dma);
303 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents, 342 dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
304 host->dma_dir); 343 host->dma_dir);
305 } 344 }
306#endif
307 345
308 if (stat & STATUS_ERR_MASK) { 346 if (stat & STATUS_ERR_MASK) {
309 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", 347 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
@@ -505,7 +543,6 @@ static void mxcmci_datawork(struct work_struct *work)
505 } 543 }
506} 544}
507 545
508#ifdef HAS_DMA
509static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat) 546static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
510{ 547{
511 struct mmc_data *data = host->data; 548 struct mmc_data *data = host->data;
@@ -528,7 +565,6 @@ static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
528 mxcmci_finish_request(host, host->req); 565 mxcmci_finish_request(host, host->req);
529 } 566 }
530} 567}
531#endif /* HAS_DMA */
532 568
533static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat) 569static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
534{ 570{
@@ -566,12 +602,10 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)
566 sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio; 602 sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
567 spin_unlock_irqrestore(&host->lock, flags); 603 spin_unlock_irqrestore(&host->lock, flags);
568 604
569#ifdef HAS_DMA
570 if (mxcmci_use_dma(host) && 605 if (mxcmci_use_dma(host) &&
571 (stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE))) 606 (stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE)))
572 writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE, 607 writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
573 host->base + MMC_REG_STATUS); 608 host->base + MMC_REG_STATUS);
574#endif
575 609
576 if (sdio_irq) { 610 if (sdio_irq) {
577 writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS); 611 writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS);
@@ -581,14 +615,14 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)
581 if (stat & STATUS_END_CMD_RESP) 615 if (stat & STATUS_END_CMD_RESP)
582 mxcmci_cmd_done(host, stat); 616 mxcmci_cmd_done(host, stat);
583 617
584#ifdef HAS_DMA
585 if (mxcmci_use_dma(host) && 618 if (mxcmci_use_dma(host) &&
586 (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) 619 (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE)))
587 mxcmci_data_done(host, stat); 620 mxcmci_data_done(host, stat);
588#endif 621
589 if (host->default_irq_mask && 622 if (host->default_irq_mask &&
590 (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL))) 623 (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
591 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 624 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
625
592 return IRQ_HANDLED; 626 return IRQ_HANDLED;
593} 627}
594 628
@@ -602,9 +636,10 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
602 636
603 host->req = req; 637 host->req = req;
604 host->cmdat &= ~CMD_DAT_CONT_INIT; 638 host->cmdat &= ~CMD_DAT_CONT_INIT;
605#ifdef HAS_DMA 639
606 host->do_dma = 1; 640 if (host->dma)
607#endif 641 host->do_dma = 1;
642
608 if (req->data) { 643 if (req->data) {
609 error = mxcmci_setup_data(host, req->data); 644 error = mxcmci_setup_data(host, req->data);
610 if (error) { 645 if (error) {
@@ -620,6 +655,7 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
620 } 655 }
621 656
622 error = mxcmci_start_cmd(host, req->cmd, cmdat); 657 error = mxcmci_start_cmd(host, req->cmd, cmdat);
658
623out: 659out:
624 if (error) 660 if (error)
625 mxcmci_finish_request(host, req); 661 mxcmci_finish_request(host, req);
@@ -658,31 +694,55 @@ static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
658 prescaler, divider, clk_in, clk_ios); 694 prescaler, divider, clk_in, clk_ios);
659} 695}
660 696
697static int mxcmci_setup_dma(struct mmc_host *mmc)
698{
699 struct mxcmci_host *host = mmc_priv(mmc);
700 struct dma_slave_config *config = &host->dma_slave_config;
701
702 config->dst_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
703 config->src_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
704 config->dst_addr_width = 4;
705 config->src_addr_width = 4;
706 config->dst_maxburst = host->burstlen;
707 config->src_maxburst = host->burstlen;
708
709 return dmaengine_slave_config(host->dma, config);
710}
711
661static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 712static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
662{ 713{
663 struct mxcmci_host *host = mmc_priv(mmc); 714 struct mxcmci_host *host = mmc_priv(mmc);
664#ifdef HAS_DMA 715 int burstlen, ret;
665 unsigned int blen; 716
666 /* 717 /*
667 * use burstlen of 64 in 4 bit mode (--> reg value 0) 718 * use burstlen of 64 in 4 bit mode (--> reg value 0)
668 * use burstlen of 16 in 1 bit mode (--> reg value 16) 719 * use burstlen of 16 in 1 bit mode (--> reg value 16)
669 */ 720 */
670 if (ios->bus_width == MMC_BUS_WIDTH_4) 721 if (ios->bus_width == MMC_BUS_WIDTH_4)
671 blen = 0; 722 burstlen = 64;
672 else 723 else
673 blen = 16; 724 burstlen = 16;
725
726 if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
727 host->burstlen = burstlen;
728 ret = mxcmci_setup_dma(mmc);
729 if (ret) {
730 dev_err(mmc_dev(host->mmc),
731 "failed to config DMA channel. Falling back to PIO\n");
732 dma_release_channel(host->dma);
733 host->do_dma = 0;
734 }
735 }
674 736
675 imx_dma_config_burstlen(host->dma, blen);
676#endif
677 if (ios->bus_width == MMC_BUS_WIDTH_4) 737 if (ios->bus_width == MMC_BUS_WIDTH_4)
678 host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4; 738 host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
679 else 739 else
680 host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4; 740 host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
681 741
682 if (host->power_mode != ios->power_mode) { 742 if (host->power_mode != ios->power_mode) {
683 if (host->pdata && host->pdata->setpower) 743 mxcmci_set_power(host, ios->power_mode, ios->vdd);
684 host->pdata->setpower(mmc_dev(mmc), ios->vdd);
685 host->power_mode = ios->power_mode; 744 host->power_mode = ios->power_mode;
745
686 if (ios->power_mode == MMC_POWER_ON) 746 if (ios->power_mode == MMC_POWER_ON)
687 host->cmdat |= CMD_DAT_CONT_INIT; 747 host->cmdat |= CMD_DAT_CONT_INIT;
688 } 748 }
@@ -754,6 +814,18 @@ static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
754 host->caps |= MMC_CAP_4_BIT_DATA; 814 host->caps |= MMC_CAP_4_BIT_DATA;
755} 815}
756 816
817static bool filter(struct dma_chan *chan, void *param)
818{
819 struct mxcmci_host *host = param;
820
821 if (!imx_dma_is_general_purpose(chan))
822 return false;
823
824 chan->private = &host->dma_data;
825
826 return true;
827}
828
757static const struct mmc_host_ops mxcmci_ops = { 829static const struct mmc_host_ops mxcmci_ops = {
758 .request = mxcmci_request, 830 .request = mxcmci_request,
759 .set_ios = mxcmci_set_ios, 831 .set_ios = mxcmci_set_ios,
@@ -768,6 +840,7 @@ static int mxcmci_probe(struct platform_device *pdev)
768 struct mxcmci_host *host = NULL; 840 struct mxcmci_host *host = NULL;
769 struct resource *iores, *r; 841 struct resource *iores, *r;
770 int ret = 0, irq; 842 int ret = 0, irq;
843 dma_cap_mask_t mask;
771 844
772 printk(KERN_INFO "i.MX SDHC driver\n"); 845 printk(KERN_INFO "i.MX SDHC driver\n");
773 846
@@ -790,8 +863,7 @@ static int mxcmci_probe(struct platform_device *pdev)
790 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; 863 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
791 864
792 /* MMC core transfer sizes tunable parameters */ 865 /* MMC core transfer sizes tunable parameters */
793 mmc->max_hw_segs = 64; 866 mmc->max_segs = 64;
794 mmc->max_phys_segs = 64;
795 mmc->max_blk_size = 2048; 867 mmc->max_blk_size = 2048;
796 mmc->max_blk_count = 65535; 868 mmc->max_blk_count = 65535;
797 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 869 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
@@ -808,10 +880,7 @@ static int mxcmci_probe(struct platform_device *pdev)
808 host->pdata = pdev->dev.platform_data; 880 host->pdata = pdev->dev.platform_data;
809 spin_lock_init(&host->lock); 881 spin_lock_init(&host->lock);
810 882
811 if (host->pdata && host->pdata->ocr_avail) 883 mxcmci_init_ocr(host);
812 mmc->ocr_avail = host->pdata->ocr_avail;
813 else
814 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
815 884
816 if (host->pdata && host->pdata->dat3_card_detect) 885 if (host->pdata && host->pdata->dat3_card_detect)
817 host->default_irq_mask = 886 host->default_irq_mask =
@@ -847,29 +916,23 @@ static int mxcmci_probe(struct platform_device *pdev)
847 916
848 writel(host->default_irq_mask, host->base + MMC_REG_INT_CNTR); 917 writel(host->default_irq_mask, host->base + MMC_REG_INT_CNTR);
849 918
850#ifdef HAS_DMA
851 host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW);
852 if (host->dma < 0) {
853 dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n");
854 ret = -EBUSY;
855 goto out_clk_put;
856 }
857
858 r = platform_get_resource(pdev, IORESOURCE_DMA, 0); 919 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
859 if (!r) { 920 if (r) {
860 ret = -EINVAL; 921 host->dmareq = r->start;
861 goto out_free_dma; 922 host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
923 host->dma_data.priority = DMA_PRIO_LOW;
924 host->dma_data.dma_request = host->dmareq;
925 dma_cap_zero(mask);
926 dma_cap_set(DMA_SLAVE, mask);
927 host->dma = dma_request_channel(mask, filter, host);
928 if (host->dma)
929 mmc->max_seg_size = dma_get_max_seg_size(
930 host->dma->device->dev);
862 } 931 }
863 932
864 ret = imx_dma_config_channel(host->dma, 933 if (!host->dma)
865 IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_FIFO, 934 dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n");
866 IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, 935
867 r->start, 0);
868 if (ret) {
869 dev_err(mmc_dev(host->mmc), "failed to config DMA channel\n");
870 goto out_free_dma;
871 }
872#endif
873 INIT_WORK(&host->datawork, mxcmci_datawork); 936 INIT_WORK(&host->datawork, mxcmci_datawork);
874 937
875 ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host); 938 ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host);
@@ -892,9 +955,8 @@ static int mxcmci_probe(struct platform_device *pdev)
892out_free_irq: 955out_free_irq:
893 free_irq(host->irq, host); 956 free_irq(host->irq, host);
894out_free_dma: 957out_free_dma:
895#ifdef HAS_DMA 958 if (host->dma)
896 imx_dma_free(host->dma); 959 dma_release_channel(host->dma);
897#endif
898out_clk_put: 960out_clk_put:
899 clk_disable(host->clk); 961 clk_disable(host->clk);
900 clk_put(host->clk); 962 clk_put(host->clk);
@@ -916,19 +978,22 @@ static int mxcmci_remove(struct platform_device *pdev)
916 978
917 mmc_remove_host(mmc); 979 mmc_remove_host(mmc);
918 980
981 if (host->vcc)
982 regulator_put(host->vcc);
983
919 if (host->pdata && host->pdata->exit) 984 if (host->pdata && host->pdata->exit)
920 host->pdata->exit(&pdev->dev, mmc); 985 host->pdata->exit(&pdev->dev, mmc);
921 986
922 free_irq(host->irq, host); 987 free_irq(host->irq, host);
923 iounmap(host->base); 988 iounmap(host->base);
924#ifdef HAS_DMA 989
925 imx_dma_free(host->dma); 990 if (host->dma)
926#endif 991 dma_release_channel(host->dma);
992
927 clk_disable(host->clk); 993 clk_disable(host->clk);
928 clk_put(host->clk); 994 clk_put(host->clk);
929 995
930 release_mem_region(host->res->start, resource_size(host->res)); 996 release_mem_region(host->res->start, resource_size(host->res));
931 release_resource(host->res);
932 997
933 mmc_free_host(mmc); 998 mmc_free_host(mmc);
934 999
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
new file mode 100644
index 000000000000..99d39a6a1032
--- /dev/null
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -0,0 +1,874 @@
1/*
2 * Portions copyright (C) 2003 Russell King, PXA MMCI Driver
3 * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
4 *
5 * Copyright 2008 Embedded Alley Solutions, Inc.
6 * Copyright 2009-2011 Freescale Semiconductor, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/ioport.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
28#include <linux/interrupt.h>
29#include <linux/dma-mapping.h>
30#include <linux/dmaengine.h>
31#include <linux/highmem.h>
32#include <linux/clk.h>
33#include <linux/err.h>
34#include <linux/completion.h>
35#include <linux/mmc/host.h>
36#include <linux/mmc/mmc.h>
37#include <linux/mmc/sdio.h>
38#include <linux/gpio.h>
39#include <linux/regulator/consumer.h>
40
41#include <mach/mxs.h>
42#include <mach/common.h>
43#include <mach/dma.h>
44#include <mach/mmc.h>
45
46#define DRIVER_NAME "mxs-mmc"
47
48/* card detect polling timeout */
49#define MXS_MMC_DETECT_TIMEOUT (HZ/2)
50
51#define SSP_VERSION_LATEST 4
52#define ssp_is_old() (host->version < SSP_VERSION_LATEST)
53
54/* SSP registers */
55#define HW_SSP_CTRL0 0x000
56#define BM_SSP_CTRL0_RUN (1 << 29)
57#define BM_SSP_CTRL0_SDIO_IRQ_CHECK (1 << 28)
58#define BM_SSP_CTRL0_IGNORE_CRC (1 << 26)
59#define BM_SSP_CTRL0_READ (1 << 25)
60#define BM_SSP_CTRL0_DATA_XFER (1 << 24)
61#define BP_SSP_CTRL0_BUS_WIDTH (22)
62#define BM_SSP_CTRL0_BUS_WIDTH (0x3 << 22)
63#define BM_SSP_CTRL0_WAIT_FOR_IRQ (1 << 21)
64#define BM_SSP_CTRL0_LONG_RESP (1 << 19)
65#define BM_SSP_CTRL0_GET_RESP (1 << 17)
66#define BM_SSP_CTRL0_ENABLE (1 << 16)
67#define BP_SSP_CTRL0_XFER_COUNT (0)
68#define BM_SSP_CTRL0_XFER_COUNT (0xffff)
69#define HW_SSP_CMD0 0x010
70#define BM_SSP_CMD0_DBL_DATA_RATE_EN (1 << 25)
71#define BM_SSP_CMD0_SLOW_CLKING_EN (1 << 22)
72#define BM_SSP_CMD0_CONT_CLKING_EN (1 << 21)
73#define BM_SSP_CMD0_APPEND_8CYC (1 << 20)
74#define BP_SSP_CMD0_BLOCK_SIZE (16)
75#define BM_SSP_CMD0_BLOCK_SIZE (0xf << 16)
76#define BP_SSP_CMD0_BLOCK_COUNT (8)
77#define BM_SSP_CMD0_BLOCK_COUNT (0xff << 8)
78#define BP_SSP_CMD0_CMD (0)
79#define BM_SSP_CMD0_CMD (0xff)
80#define HW_SSP_CMD1 0x020
81#define HW_SSP_XFER_SIZE 0x030
82#define HW_SSP_BLOCK_SIZE 0x040
83#define BP_SSP_BLOCK_SIZE_BLOCK_COUNT (4)
84#define BM_SSP_BLOCK_SIZE_BLOCK_COUNT (0xffffff << 4)
85#define BP_SSP_BLOCK_SIZE_BLOCK_SIZE (0)
86#define BM_SSP_BLOCK_SIZE_BLOCK_SIZE (0xf)
87#define HW_SSP_TIMING (ssp_is_old() ? 0x050 : 0x070)
88#define BP_SSP_TIMING_TIMEOUT (16)
89#define BM_SSP_TIMING_TIMEOUT (0xffff << 16)
90#define BP_SSP_TIMING_CLOCK_DIVIDE (8)
91#define BM_SSP_TIMING_CLOCK_DIVIDE (0xff << 8)
92#define BP_SSP_TIMING_CLOCK_RATE (0)
93#define BM_SSP_TIMING_CLOCK_RATE (0xff)
94#define HW_SSP_CTRL1 (ssp_is_old() ? 0x060 : 0x080)
95#define BM_SSP_CTRL1_SDIO_IRQ (1 << 31)
96#define BM_SSP_CTRL1_SDIO_IRQ_EN (1 << 30)
97#define BM_SSP_CTRL1_RESP_ERR_IRQ (1 << 29)
98#define BM_SSP_CTRL1_RESP_ERR_IRQ_EN (1 << 28)
99#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ (1 << 27)
100#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN (1 << 26)
101#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ (1 << 25)
102#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN (1 << 24)
103#define BM_SSP_CTRL1_DATA_CRC_IRQ (1 << 23)
104#define BM_SSP_CTRL1_DATA_CRC_IRQ_EN (1 << 22)
105#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ (1 << 21)
106#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ_EN (1 << 20)
107#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ (1 << 17)
108#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN (1 << 16)
109#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ (1 << 15)
110#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ_EN (1 << 14)
111#define BM_SSP_CTRL1_DMA_ENABLE (1 << 13)
112#define BM_SSP_CTRL1_POLARITY (1 << 9)
113#define BP_SSP_CTRL1_WORD_LENGTH (4)
114#define BM_SSP_CTRL1_WORD_LENGTH (0xf << 4)
115#define BP_SSP_CTRL1_SSP_MODE (0)
116#define BM_SSP_CTRL1_SSP_MODE (0xf)
117#define HW_SSP_SDRESP0 (ssp_is_old() ? 0x080 : 0x0a0)
118#define HW_SSP_SDRESP1 (ssp_is_old() ? 0x090 : 0x0b0)
119#define HW_SSP_SDRESP2 (ssp_is_old() ? 0x0a0 : 0x0c0)
120#define HW_SSP_SDRESP3 (ssp_is_old() ? 0x0b0 : 0x0d0)
121#define HW_SSP_STATUS (ssp_is_old() ? 0x0c0 : 0x100)
122#define BM_SSP_STATUS_CARD_DETECT (1 << 28)
123#define BM_SSP_STATUS_SDIO_IRQ (1 << 17)
124#define HW_SSP_VERSION (cpu_is_mx23() ? 0x110 : 0x130)
125#define BP_SSP_VERSION_MAJOR (24)
126
127#define BF_SSP(value, field) (((value) << BP_SSP_##field) & BM_SSP_##field)
128
129#define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \
130 BM_SSP_CTRL1_RESP_ERR_IRQ | \
131 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \
132 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | \
133 BM_SSP_CTRL1_DATA_CRC_IRQ | \
134 BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | \
135 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \
136 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
137
138#define SSP_PIO_NUM 3
139
140struct mxs_mmc_host {
141 struct mmc_host *mmc;
142 struct mmc_request *mrq;
143 struct mmc_command *cmd;
144 struct mmc_data *data;
145
146 void __iomem *base;
147 int irq;
148 struct resource *res;
149 struct resource *dma_res;
150 struct clk *clk;
151 unsigned int clk_rate;
152
153 struct dma_chan *dmach;
154 struct mxs_dma_data dma_data;
155 unsigned int dma_dir;
156 u32 ssp_pio_words[SSP_PIO_NUM];
157
158 unsigned int version;
159 unsigned char bus_width;
160 spinlock_t lock;
161 int sdio_irq_en;
162};
163
164static int mxs_mmc_get_ro(struct mmc_host *mmc)
165{
166 struct mxs_mmc_host *host = mmc_priv(mmc);
167 struct mxs_mmc_platform_data *pdata =
168 mmc_dev(host->mmc)->platform_data;
169
170 if (!pdata)
171 return -EFAULT;
172
173 if (!gpio_is_valid(pdata->wp_gpio))
174 return -EINVAL;
175
176 return gpio_get_value(pdata->wp_gpio);
177}
178
179static int mxs_mmc_get_cd(struct mmc_host *mmc)
180{
181 struct mxs_mmc_host *host = mmc_priv(mmc);
182
183 return !(readl(host->base + HW_SSP_STATUS) &
184 BM_SSP_STATUS_CARD_DETECT);
185}
186
187static void mxs_mmc_reset(struct mxs_mmc_host *host)
188{
189 u32 ctrl0, ctrl1;
190
191 mxs_reset_block(host->base);
192
193 ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
194 ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
195 BF_SSP(0x7, CTRL1_WORD_LENGTH) |
196 BM_SSP_CTRL1_DMA_ENABLE |
197 BM_SSP_CTRL1_POLARITY |
198 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
199 BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
200 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
201 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
202 BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
203
204 writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
205 BF_SSP(2, TIMING_CLOCK_DIVIDE) |
206 BF_SSP(0, TIMING_CLOCK_RATE),
207 host->base + HW_SSP_TIMING);
208
209 if (host->sdio_irq_en) {
210 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
211 ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
212 }
213
214 writel(ctrl0, host->base + HW_SSP_CTRL0);
215 writel(ctrl1, host->base + HW_SSP_CTRL1);
216}
217
218static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
219 struct mmc_command *cmd);
220
221static void mxs_mmc_request_done(struct mxs_mmc_host *host)
222{
223 struct mmc_command *cmd = host->cmd;
224 struct mmc_data *data = host->data;
225 struct mmc_request *mrq = host->mrq;
226
227 if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
228 if (mmc_resp_type(cmd) & MMC_RSP_136) {
229 cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0);
230 cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1);
231 cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2);
232 cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3);
233 } else {
234 cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0);
235 }
236 }
237
238 if (data) {
239 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
240 data->sg_len, host->dma_dir);
241 /*
242 * If there was an error on any block, we mark all
243 * data blocks as being in error.
244 */
245 if (!data->error)
246 data->bytes_xfered = data->blocks * data->blksz;
247 else
248 data->bytes_xfered = 0;
249
250 host->data = NULL;
251 if (mrq->stop) {
252 mxs_mmc_start_cmd(host, mrq->stop);
253 return;
254 }
255 }
256
257 host->mrq = NULL;
258 mmc_request_done(host->mmc, mrq);
259}
260
261static void mxs_mmc_dma_irq_callback(void *param)
262{
263 struct mxs_mmc_host *host = param;
264
265 mxs_mmc_request_done(host);
266}
267
268static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
269{
270 struct mxs_mmc_host *host = dev_id;
271 struct mmc_command *cmd = host->cmd;
272 struct mmc_data *data = host->data;
273 u32 stat;
274
275 spin_lock(&host->lock);
276
277 stat = readl(host->base + HW_SSP_CTRL1);
278 writel(stat & MXS_MMC_IRQ_BITS,
279 host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
280
281 if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
282 mmc_signal_sdio_irq(host->mmc);
283
284 spin_unlock(&host->lock);
285
286 if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
287 cmd->error = -ETIMEDOUT;
288 else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
289 cmd->error = -EIO;
290
291 if (data) {
292 if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
293 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
294 data->error = -ETIMEDOUT;
295 else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
296 data->error = -EILSEQ;
297 else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
298 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
299 data->error = -EIO;
300 }
301
302 return IRQ_HANDLED;
303}
304
305static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
306 struct mxs_mmc_host *host, unsigned int append)
307{
308 struct dma_async_tx_descriptor *desc;
309 struct mmc_data *data = host->data;
310 struct scatterlist * sgl;
311 unsigned int sg_len;
312
313 if (data) {
314 /* data */
315 dma_map_sg(mmc_dev(host->mmc), data->sg,
316 data->sg_len, host->dma_dir);
317 sgl = data->sg;
318 sg_len = data->sg_len;
319 } else {
320 /* pio */
321 sgl = (struct scatterlist *) host->ssp_pio_words;
322 sg_len = SSP_PIO_NUM;
323 }
324
325 desc = host->dmach->device->device_prep_slave_sg(host->dmach,
326 sgl, sg_len, host->dma_dir, append);
327 if (desc) {
328 desc->callback = mxs_mmc_dma_irq_callback;
329 desc->callback_param = host;
330 } else {
331 if (data)
332 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
333 data->sg_len, host->dma_dir);
334 }
335
336 return desc;
337}
338
339static void mxs_mmc_bc(struct mxs_mmc_host *host)
340{
341 struct mmc_command *cmd = host->cmd;
342 struct dma_async_tx_descriptor *desc;
343 u32 ctrl0, cmd0, cmd1;
344
345 ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
346 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
347 cmd1 = cmd->arg;
348
349 if (host->sdio_irq_en) {
350 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
351 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
352 }
353
354 host->ssp_pio_words[0] = ctrl0;
355 host->ssp_pio_words[1] = cmd0;
356 host->ssp_pio_words[2] = cmd1;
357 host->dma_dir = DMA_NONE;
358 desc = mxs_mmc_prep_dma(host, 0);
359 if (!desc)
360 goto out;
361
362 dmaengine_submit(desc);
363 return;
364
365out:
366 dev_warn(mmc_dev(host->mmc),
367 "%s: failed to prep dma\n", __func__);
368}
369
370static void mxs_mmc_ac(struct mxs_mmc_host *host)
371{
372 struct mmc_command *cmd = host->cmd;
373 struct dma_async_tx_descriptor *desc;
374 u32 ignore_crc, get_resp, long_resp;
375 u32 ctrl0, cmd0, cmd1;
376
377 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
378 0 : BM_SSP_CTRL0_IGNORE_CRC;
379 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
380 BM_SSP_CTRL0_GET_RESP : 0;
381 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
382 BM_SSP_CTRL0_LONG_RESP : 0;
383
384 ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
385 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
386 cmd1 = cmd->arg;
387
388 if (host->sdio_irq_en) {
389 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
390 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
391 }
392
393 host->ssp_pio_words[0] = ctrl0;
394 host->ssp_pio_words[1] = cmd0;
395 host->ssp_pio_words[2] = cmd1;
396 host->dma_dir = DMA_NONE;
397 desc = mxs_mmc_prep_dma(host, 0);
398 if (!desc)
399 goto out;
400
401 dmaengine_submit(desc);
402 return;
403
404out:
405 dev_warn(mmc_dev(host->mmc),
406 "%s: failed to prep dma\n", __func__);
407}
408
409static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
410{
411 const unsigned int ssp_timeout_mul = 4096;
412 /*
413 * Calculate ticks in ms since ns are large numbers
414 * and might overflow
415 */
416 const unsigned int clock_per_ms = clock_rate / 1000;
417 const unsigned int ms = ns / 1000;
418 const unsigned int ticks = ms * clock_per_ms;
419 const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
420
421 WARN_ON(ssp_ticks == 0);
422 return ssp_ticks;
423}
424
425static void mxs_mmc_adtc(struct mxs_mmc_host *host)
426{
427 struct mmc_command *cmd = host->cmd;
428 struct mmc_data *data = cmd->data;
429 struct dma_async_tx_descriptor *desc;
430 struct scatterlist *sgl = data->sg, *sg;
431 unsigned int sg_len = data->sg_len;
432 int i;
433
434 unsigned short dma_data_dir, timeout;
435 unsigned int data_size = 0, log2_blksz;
436 unsigned int blocks = data->blocks;
437
438 u32 ignore_crc, get_resp, long_resp, read;
439 u32 ctrl0, cmd0, cmd1, val;
440
441 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
442 0 : BM_SSP_CTRL0_IGNORE_CRC;
443 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
444 BM_SSP_CTRL0_GET_RESP : 0;
445 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
446 BM_SSP_CTRL0_LONG_RESP : 0;
447
448 if (data->flags & MMC_DATA_WRITE) {
449 dma_data_dir = DMA_TO_DEVICE;
450 read = 0;
451 } else {
452 dma_data_dir = DMA_FROM_DEVICE;
453 read = BM_SSP_CTRL0_READ;
454 }
455
456 ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
457 ignore_crc | get_resp | long_resp |
458 BM_SSP_CTRL0_DATA_XFER | read |
459 BM_SSP_CTRL0_WAIT_FOR_IRQ |
460 BM_SSP_CTRL0_ENABLE;
461
462 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
463
464 /* get logarithm to base 2 of block size for setting register */
465 log2_blksz = ilog2(data->blksz);
466
467 /*
468 * take special care of the case that data size from data->sg
469 * is not equal to blocks x blksz
470 */
471 for_each_sg(sgl, sg, sg_len, i)
472 data_size += sg->length;
473
474 if (data_size != data->blocks * data->blksz)
475 blocks = 1;
476
477 /* xfer count, block size and count need to be set differently */
478 if (ssp_is_old()) {
479 ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
480 cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
481 BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
482 } else {
483 writel(data_size, host->base + HW_SSP_XFER_SIZE);
484 writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
485 BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
486 host->base + HW_SSP_BLOCK_SIZE);
487 }
488
489 if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
490 (cmd->opcode == SD_IO_RW_EXTENDED))
491 cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
492
493 cmd1 = cmd->arg;
494
495 if (host->sdio_irq_en) {
496 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
497 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
498 }
499
500 /* set the timeout count */
501 timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns);
502 val = readl(host->base + HW_SSP_TIMING);
503 val &= ~(BM_SSP_TIMING_TIMEOUT);
504 val |= BF_SSP(timeout, TIMING_TIMEOUT);
505 writel(val, host->base + HW_SSP_TIMING);
506
507 /* pio */
508 host->ssp_pio_words[0] = ctrl0;
509 host->ssp_pio_words[1] = cmd0;
510 host->ssp_pio_words[2] = cmd1;
511 host->dma_dir = DMA_NONE;
512 desc = mxs_mmc_prep_dma(host, 0);
513 if (!desc)
514 goto out;
515
516 /* append data sg */
517 WARN_ON(host->data != NULL);
518 host->data = data;
519 host->dma_dir = dma_data_dir;
520 desc = mxs_mmc_prep_dma(host, 1);
521 if (!desc)
522 goto out;
523
524 dmaengine_submit(desc);
525 return;
526out:
527 dev_warn(mmc_dev(host->mmc),
528 "%s: failed to prep dma\n", __func__);
529}
530
531static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
532 struct mmc_command *cmd)
533{
534 host->cmd = cmd;
535
536 switch (mmc_cmd_type(cmd)) {
537 case MMC_CMD_BC:
538 mxs_mmc_bc(host);
539 break;
540 case MMC_CMD_BCR:
541 mxs_mmc_ac(host);
542 break;
543 case MMC_CMD_AC:
544 mxs_mmc_ac(host);
545 break;
546 case MMC_CMD_ADTC:
547 mxs_mmc_adtc(host);
548 break;
549 default:
550 dev_warn(mmc_dev(host->mmc),
551 "%s: unknown MMC command\n", __func__);
552 break;
553 }
554}
555
556static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
557{
558 struct mxs_mmc_host *host = mmc_priv(mmc);
559
560 WARN_ON(host->mrq != NULL);
561 host->mrq = mrq;
562 mxs_mmc_start_cmd(host, mrq->cmd);
563}
564
565static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate)
566{
567 unsigned int ssp_rate, bit_rate;
568 u32 div1, div2;
569 u32 val;
570
571 ssp_rate = clk_get_rate(host->clk);
572
573 for (div1 = 2; div1 < 254; div1 += 2) {
574 div2 = ssp_rate / rate / div1;
575 if (div2 < 0x100)
576 break;
577 }
578
579 if (div1 >= 254) {
580 dev_err(mmc_dev(host->mmc),
581 "%s: cannot set clock to %d\n", __func__, rate);
582 return;
583 }
584
585 if (div2 == 0)
586 bit_rate = ssp_rate / div1;
587 else
588 bit_rate = ssp_rate / div1 / div2;
589
590 val = readl(host->base + HW_SSP_TIMING);
591 val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
592 val |= BF_SSP(div1, TIMING_CLOCK_DIVIDE);
593 val |= BF_SSP(div2 - 1, TIMING_CLOCK_RATE);
594 writel(val, host->base + HW_SSP_TIMING);
595
596 host->clk_rate = bit_rate;
597
598 dev_dbg(mmc_dev(host->mmc),
599 "%s: div1 %d, div2 %d, ssp %d, bit %d, rate %d\n",
600 __func__, div1, div2, ssp_rate, bit_rate, rate);
601}
602
603static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
604{
605 struct mxs_mmc_host *host = mmc_priv(mmc);
606
607 if (ios->bus_width == MMC_BUS_WIDTH_8)
608 host->bus_width = 2;
609 else if (ios->bus_width == MMC_BUS_WIDTH_4)
610 host->bus_width = 1;
611 else
612 host->bus_width = 0;
613
614 if (ios->clock)
615 mxs_mmc_set_clk_rate(host, ios->clock);
616}
617
618static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
619{
620 struct mxs_mmc_host *host = mmc_priv(mmc);
621 unsigned long flags;
622
623 spin_lock_irqsave(&host->lock, flags);
624
625 host->sdio_irq_en = enable;
626
627 if (enable) {
628 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
629 host->base + HW_SSP_CTRL0 + MXS_SET_ADDR);
630 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
631 host->base + HW_SSP_CTRL1 + MXS_SET_ADDR);
632
633 if (readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ)
634 mmc_signal_sdio_irq(host->mmc);
635
636 } else {
637 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
638 host->base + HW_SSP_CTRL0 + MXS_CLR_ADDR);
639 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
640 host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
641 }
642
643 spin_unlock_irqrestore(&host->lock, flags);
644}
645
646static const struct mmc_host_ops mxs_mmc_ops = {
647 .request = mxs_mmc_request,
648 .get_ro = mxs_mmc_get_ro,
649 .get_cd = mxs_mmc_get_cd,
650 .set_ios = mxs_mmc_set_ios,
651 .enable_sdio_irq = mxs_mmc_enable_sdio_irq,
652};
653
654static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
655{
656 struct mxs_mmc_host *host = param;
657
658 if (!mxs_dma_is_apbh(chan))
659 return false;
660
661 if (chan->chan_id != host->dma_res->start)
662 return false;
663
664 chan->private = &host->dma_data;
665
666 return true;
667}
668
669static int mxs_mmc_probe(struct platform_device *pdev)
670{
671 struct mxs_mmc_host *host;
672 struct mmc_host *mmc;
673 struct resource *iores, *dmares, *r;
674 struct mxs_mmc_platform_data *pdata;
675 int ret = 0, irq_err, irq_dma;
676 dma_cap_mask_t mask;
677
678 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
679 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
680 irq_err = platform_get_irq(pdev, 0);
681 irq_dma = platform_get_irq(pdev, 1);
682 if (!iores || !dmares || irq_err < 0 || irq_dma < 0)
683 return -EINVAL;
684
685 r = request_mem_region(iores->start, resource_size(iores), pdev->name);
686 if (!r)
687 return -EBUSY;
688
689 mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
690 if (!mmc) {
691 ret = -ENOMEM;
692 goto out_release_mem;
693 }
694
695 host = mmc_priv(mmc);
696 host->base = ioremap(r->start, resource_size(r));
697 if (!host->base) {
698 ret = -ENOMEM;
699 goto out_mmc_free;
700 }
701
702 /* only major verion does matter */
703 host->version = readl(host->base + HW_SSP_VERSION) >>
704 BP_SSP_VERSION_MAJOR;
705
706 host->mmc = mmc;
707 host->res = r;
708 host->dma_res = dmares;
709 host->irq = irq_err;
710 host->sdio_irq_en = 0;
711
712 host->clk = clk_get(&pdev->dev, NULL);
713 if (IS_ERR(host->clk)) {
714 ret = PTR_ERR(host->clk);
715 goto out_iounmap;
716 }
717 clk_enable(host->clk);
718
719 mxs_mmc_reset(host);
720
721 dma_cap_zero(mask);
722 dma_cap_set(DMA_SLAVE, mask);
723 host->dma_data.chan_irq = irq_dma;
724 host->dmach = dma_request_channel(mask, mxs_mmc_dma_filter, host);
725 if (!host->dmach) {
726 dev_err(mmc_dev(host->mmc),
727 "%s: failed to request dma\n", __func__);
728 goto out_clk_put;
729 }
730
731 /* set mmc core parameters */
732 mmc->ops = &mxs_mmc_ops;
733 mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
734 MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
735
736 pdata = mmc_dev(host->mmc)->platform_data;
737 if (pdata) {
738 if (pdata->flags & SLOTF_8_BIT_CAPABLE)
739 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
740 if (pdata->flags & SLOTF_4_BIT_CAPABLE)
741 mmc->caps |= MMC_CAP_4_BIT_DATA;
742 }
743
744 mmc->f_min = 400000;
745 mmc->f_max = 288000000;
746 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
747
748 mmc->max_segs = 52;
749 mmc->max_blk_size = 1 << 0xf;
750 mmc->max_blk_count = (ssp_is_old()) ? 0xff : 0xffffff;
751 mmc->max_req_size = (ssp_is_old()) ? 0xffff : 0xffffffff;
752 mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev);
753
754 platform_set_drvdata(pdev, mmc);
755
756 ret = request_irq(host->irq, mxs_mmc_irq_handler, 0, DRIVER_NAME, host);
757 if (ret)
758 goto out_free_dma;
759
760 spin_lock_init(&host->lock);
761
762 ret = mmc_add_host(mmc);
763 if (ret)
764 goto out_free_irq;
765
766 dev_info(mmc_dev(host->mmc), "initialized\n");
767
768 return 0;
769
770out_free_irq:
771 free_irq(host->irq, host);
772out_free_dma:
773 if (host->dmach)
774 dma_release_channel(host->dmach);
775out_clk_put:
776 clk_disable(host->clk);
777 clk_put(host->clk);
778out_iounmap:
779 iounmap(host->base);
780out_mmc_free:
781 mmc_free_host(mmc);
782out_release_mem:
783 release_mem_region(iores->start, resource_size(iores));
784 return ret;
785}
786
787static int mxs_mmc_remove(struct platform_device *pdev)
788{
789 struct mmc_host *mmc = platform_get_drvdata(pdev);
790 struct mxs_mmc_host *host = mmc_priv(mmc);
791 struct resource *res = host->res;
792
793 mmc_remove_host(mmc);
794
795 free_irq(host->irq, host);
796
797 platform_set_drvdata(pdev, NULL);
798
799 if (host->dmach)
800 dma_release_channel(host->dmach);
801
802 clk_disable(host->clk);
803 clk_put(host->clk);
804
805 iounmap(host->base);
806
807 mmc_free_host(mmc);
808
809 release_mem_region(res->start, resource_size(res));
810
811 return 0;
812}
813
814#ifdef CONFIG_PM
815static int mxs_mmc_suspend(struct device *dev)
816{
817 struct mmc_host *mmc = dev_get_drvdata(dev);
818 struct mxs_mmc_host *host = mmc_priv(mmc);
819 int ret = 0;
820
821 ret = mmc_suspend_host(mmc);
822
823 clk_disable(host->clk);
824
825 return ret;
826}
827
828static int mxs_mmc_resume(struct device *dev)
829{
830 struct mmc_host *mmc = dev_get_drvdata(dev);
831 struct mxs_mmc_host *host = mmc_priv(mmc);
832 int ret = 0;
833
834 clk_enable(host->clk);
835
836 ret = mmc_resume_host(mmc);
837
838 return ret;
839}
840
841static const struct dev_pm_ops mxs_mmc_pm_ops = {
842 .suspend = mxs_mmc_suspend,
843 .resume = mxs_mmc_resume,
844};
845#endif
846
847static struct platform_driver mxs_mmc_driver = {
848 .probe = mxs_mmc_probe,
849 .remove = mxs_mmc_remove,
850 .driver = {
851 .name = DRIVER_NAME,
852 .owner = THIS_MODULE,
853#ifdef CONFIG_PM
854 .pm = &mxs_mmc_pm_ops,
855#endif
856 },
857};
858
859static int __init mxs_mmc_init(void)
860{
861 return platform_driver_register(&mxs_mmc_driver);
862}
863
864static void __exit mxs_mmc_exit(void)
865{
866 platform_driver_unregister(&mxs_mmc_driver);
867}
868
869module_init(mxs_mmc_init);
870module_exit(mxs_mmc_exit);
871
872MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
873MODULE_AUTHOR("Freescale Semiconductor");
874MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index 1247e5de9faa..ab66f2454dc4 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -15,14 +15,21 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/irq.h>
18#include <linux/gpio.h> 19#include <linux/gpio.h>
19#include <linux/of.h> 20#include <linux/of.h>
20#include <linux/of_gpio.h> 21#include <linux/of_gpio.h>
22#include <linux/of_irq.h>
21#include <linux/spi/spi.h> 23#include <linux/spi/spi.h>
22#include <linux/spi/mmc_spi.h> 24#include <linux/spi/mmc_spi.h>
23#include <linux/mmc/core.h> 25#include <linux/mmc/core.h>
24#include <linux/mmc/host.h> 26#include <linux/mmc/host.h>
25 27
28/* For archs that don't support NO_IRQ (such as mips), provide a dummy value */
29#ifndef NO_IRQ
30#define NO_IRQ 0
31#endif
32
26MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
27 34
28enum { 35enum {
@@ -34,6 +41,7 @@ enum {
34struct of_mmc_spi { 41struct of_mmc_spi {
35 int gpios[NUM_GPIOS]; 42 int gpios[NUM_GPIOS];
36 bool alow_gpios[NUM_GPIOS]; 43 bool alow_gpios[NUM_GPIOS];
44 int detect_irq;
37 struct mmc_spi_platform_data pdata; 45 struct mmc_spi_platform_data pdata;
38}; 46};
39 47
@@ -61,6 +69,22 @@ static int of_mmc_spi_get_ro(struct device *dev)
61 return of_mmc_spi_read_gpio(dev, WP_GPIO); 69 return of_mmc_spi_read_gpio(dev, WP_GPIO);
62} 70}
63 71
72static int of_mmc_spi_init(struct device *dev,
73 irqreturn_t (*irqhandler)(int, void *), void *mmc)
74{
75 struct of_mmc_spi *oms = to_of_mmc_spi(dev);
76
77 return request_threaded_irq(oms->detect_irq, NULL, irqhandler, 0,
78 dev_name(dev), mmc);
79}
80
81static void of_mmc_spi_exit(struct device *dev, void *mmc)
82{
83 struct of_mmc_spi *oms = to_of_mmc_spi(dev);
84
85 free_irq(oms->detect_irq, mmc);
86}
87
64struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi) 88struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
65{ 89{
66 struct device *dev = &spi->dev; 90 struct device *dev = &spi->dev;
@@ -121,8 +145,13 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
121 if (gpio_is_valid(oms->gpios[WP_GPIO])) 145 if (gpio_is_valid(oms->gpios[WP_GPIO]))
122 oms->pdata.get_ro = of_mmc_spi_get_ro; 146 oms->pdata.get_ro = of_mmc_spi_get_ro;
123 147
124 /* We don't support interrupts yet, let's poll. */ 148 oms->detect_irq = irq_of_parse_and_map(np, 0);
125 oms->pdata.caps |= MMC_CAP_NEEDS_POLL; 149 if (oms->detect_irq != NO_IRQ) {
150 oms->pdata.init = of_mmc_spi_init;
151 oms->pdata.exit = of_mmc_spi_exit;
152 } else {
153 oms->pdata.caps |= MMC_CAP_NEEDS_POLL;
154 }
126 155
127 dev->platform_data = &oms->pdata; 156 dev->platform_data = &oms->pdata;
128 return dev->platform_data; 157 return dev->platform_data;
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index d98ddcfac5e5..a6c329040140 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -173,6 +173,8 @@ struct mmc_omap_host {
173 struct omap_mmc_platform_data *pdata; 173 struct omap_mmc_platform_data *pdata;
174}; 174};
175 175
176static struct workqueue_struct *mmc_omap_wq;
177
176static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot) 178static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)
177{ 179{
178 unsigned long tick_ns; 180 unsigned long tick_ns;
@@ -289,7 +291,7 @@ static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled)
289 host->next_slot = new_slot; 291 host->next_slot = new_slot;
290 host->mmc = new_slot->mmc; 292 host->mmc = new_slot->mmc;
291 spin_unlock_irqrestore(&host->slot_lock, flags); 293 spin_unlock_irqrestore(&host->slot_lock, flags);
292 schedule_work(&host->slot_release_work); 294 queue_work(mmc_omap_wq, &host->slot_release_work);
293 return; 295 return;
294 } 296 }
295 297
@@ -457,7 +459,7 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
457 } 459 }
458 460
459 host->stop_data = data; 461 host->stop_data = data;
460 schedule_work(&host->send_stop_work); 462 queue_work(mmc_omap_wq, &host->send_stop_work);
461} 463}
462 464
463static void 465static void
@@ -637,7 +639,7 @@ mmc_omap_cmd_timer(unsigned long data)
637 OMAP_MMC_WRITE(host, IE, 0); 639 OMAP_MMC_WRITE(host, IE, 0);
638 disable_irq(host->irq); 640 disable_irq(host->irq);
639 host->abort = 1; 641 host->abort = 1;
640 schedule_work(&host->cmd_abort_work); 642 queue_work(mmc_omap_wq, &host->cmd_abort_work);
641 } 643 }
642 spin_unlock_irqrestore(&host->slot_lock, flags); 644 spin_unlock_irqrestore(&host->slot_lock, flags);
643} 645}
@@ -826,11 +828,11 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
826 host->abort = 1; 828 host->abort = 1;
827 OMAP_MMC_WRITE(host, IE, 0); 829 OMAP_MMC_WRITE(host, IE, 0);
828 disable_irq_nosync(host->irq); 830 disable_irq_nosync(host->irq);
829 schedule_work(&host->cmd_abort_work); 831 queue_work(mmc_omap_wq, &host->cmd_abort_work);
830 return IRQ_HANDLED; 832 return IRQ_HANDLED;
831 } 833 }
832 834
833 if (end_command) 835 if (end_command && host->cmd)
834 mmc_omap_cmd_done(host, host->cmd); 836 mmc_omap_cmd_done(host, host->cmd);
835 if (host->data != NULL) { 837 if (host->data != NULL) {
836 if (transfer_error) 838 if (transfer_error)
@@ -1335,8 +1337,7 @@ static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)
1335 * NOTE max_seg_size assumption that small blocks aren't 1337 * NOTE max_seg_size assumption that small blocks aren't
1336 * normally used (except e.g. for reading SD registers). 1338 * normally used (except e.g. for reading SD registers).
1337 */ 1339 */
1338 mmc->max_phys_segs = 32; 1340 mmc->max_segs = 32;
1339 mmc->max_hw_segs = 32;
1340 mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */ 1341 mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */
1341 mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */ 1342 mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */
1342 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1343 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
@@ -1388,7 +1389,7 @@ static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
1388 1389
1389 tasklet_kill(&slot->cover_tasklet); 1390 tasklet_kill(&slot->cover_tasklet);
1390 del_timer_sync(&slot->cover_timer); 1391 del_timer_sync(&slot->cover_timer);
1391 flush_scheduled_work(); 1392 flush_workqueue(mmc_omap_wq);
1392 1393
1393 mmc_remove_host(mmc); 1394 mmc_remove_host(mmc);
1394 mmc_free_host(mmc); 1395 mmc_free_host(mmc);
@@ -1416,7 +1417,7 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1416 if (res == NULL || irq < 0) 1417 if (res == NULL || irq < 0)
1417 return -ENXIO; 1418 return -ENXIO;
1418 1419
1419 res = request_mem_region(res->start, res->end - res->start + 1, 1420 res = request_mem_region(res->start, resource_size(res),
1420 pdev->name); 1421 pdev->name);
1421 if (res == NULL) 1422 if (res == NULL)
1422 return -EBUSY; 1423 return -EBUSY;
@@ -1456,7 +1457,7 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1456 1457
1457 host->irq = irq; 1458 host->irq = irq;
1458 host->phys_base = host->mem_res->start; 1459 host->phys_base = host->mem_res->start;
1459 host->virt_base = ioremap(res->start, res->end - res->start + 1); 1460 host->virt_base = ioremap(res->start, resource_size(res));
1460 if (!host->virt_base) 1461 if (!host->virt_base)
1461 goto err_ioremap; 1462 goto err_ioremap;
1462 1463
@@ -1513,7 +1514,7 @@ err_free_mmc_host:
1513err_ioremap: 1514err_ioremap:
1514 kfree(host); 1515 kfree(host);
1515err_free_mem_region: 1516err_free_mem_region:
1516 release_mem_region(res->start, res->end - res->start + 1); 1517 release_mem_region(res->start, resource_size(res));
1517 return ret; 1518 return ret;
1518} 1519}
1519 1520
@@ -1609,12 +1610,22 @@ static struct platform_driver mmc_omap_driver = {
1609 1610
1610static int __init mmc_omap_init(void) 1611static int __init mmc_omap_init(void)
1611{ 1612{
1612 return platform_driver_probe(&mmc_omap_driver, mmc_omap_probe); 1613 int ret;
1614
1615 mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
1616 if (!mmc_omap_wq)
1617 return -ENOMEM;
1618
1619 ret = platform_driver_probe(&mmc_omap_driver, mmc_omap_probe);
1620 if (ret)
1621 destroy_workqueue(mmc_omap_wq);
1622 return ret;
1613} 1623}
1614 1624
1615static void __exit mmc_omap_exit(void) 1625static void __exit mmc_omap_exit(void)
1616{ 1626{
1617 platform_driver_unregister(&mmc_omap_driver); 1627 platform_driver_unregister(&mmc_omap_driver);
1628 destroy_workqueue(mmc_omap_wq);
1618} 1629}
1619 1630
1620module_init(mmc_omap_init); 1631module_init(mmc_omap_init);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 4526d2791f29..dedf3dab8a3b 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -118,7 +118,7 @@
118 118
119#define MMC_TIMEOUT_MS 20 119#define MMC_TIMEOUT_MS 20
120#define OMAP_MMC_MASTER_CLOCK 96000000 120#define OMAP_MMC_MASTER_CLOCK 96000000
121#define DRIVER_NAME "mmci-omap-hs" 121#define DRIVER_NAME "omap_hsmmc"
122 122
123/* Timeouts for entering power saving states on inactivity, msec */ 123/* Timeouts for entering power saving states on inactivity, msec */
124#define OMAP_MMC_DISABLED_TIMEOUT 100 124#define OMAP_MMC_DISABLED_TIMEOUT 100
@@ -250,9 +250,9 @@ static int omap_hsmmc_1_set_power(struct device *dev, int slot, int power_on,
250 mmc_slot(host).before_set_reg(dev, slot, power_on, vdd); 250 mmc_slot(host).before_set_reg(dev, slot, power_on, vdd);
251 251
252 if (power_on) 252 if (power_on)
253 ret = mmc_regulator_set_ocr(host->vcc, vdd); 253 ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
254 else 254 else
255 ret = mmc_regulator_set_ocr(host->vcc, 0); 255 ret = mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
256 256
257 if (mmc_slot(host).after_set_reg) 257 if (mmc_slot(host).after_set_reg)
258 mmc_slot(host).after_set_reg(dev, slot, power_on, vdd); 258 mmc_slot(host).after_set_reg(dev, slot, power_on, vdd);
@@ -260,7 +260,7 @@ static int omap_hsmmc_1_set_power(struct device *dev, int slot, int power_on,
260 return ret; 260 return ret;
261} 261}
262 262
263static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on, 263static int omap_hsmmc_235_set_power(struct device *dev, int slot, int power_on,
264 int vdd) 264 int vdd)
265{ 265{
266 struct omap_hsmmc_host *host = 266 struct omap_hsmmc_host *host =
@@ -291,18 +291,23 @@ static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on,
291 * chips/cards need an interface voltage rail too. 291 * chips/cards need an interface voltage rail too.
292 */ 292 */
293 if (power_on) { 293 if (power_on) {
294 ret = mmc_regulator_set_ocr(host->vcc, vdd); 294 ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
295 /* Enable interface voltage rail, if needed */ 295 /* Enable interface voltage rail, if needed */
296 if (ret == 0 && host->vcc_aux) { 296 if (ret == 0 && host->vcc_aux) {
297 ret = regulator_enable(host->vcc_aux); 297 ret = regulator_enable(host->vcc_aux);
298 if (ret < 0) 298 if (ret < 0)
299 ret = mmc_regulator_set_ocr(host->vcc, 0); 299 ret = mmc_regulator_set_ocr(host->mmc,
300 host->vcc, 0);
300 } 301 }
301 } else { 302 } else {
303 /* Shut down the rail */
302 if (host->vcc_aux) 304 if (host->vcc_aux)
303 ret = regulator_disable(host->vcc_aux); 305 ret = regulator_disable(host->vcc_aux);
304 if (ret == 0) 306 if (!ret) {
305 ret = mmc_regulator_set_ocr(host->vcc, 0); 307 /* Then proceed to shut down the local regulator */
308 ret = mmc_regulator_set_ocr(host->mmc,
309 host->vcc, 0);
310 }
306 } 311 }
307 312
308 if (mmc_slot(host).after_set_reg) 313 if (mmc_slot(host).after_set_reg)
@@ -311,6 +316,12 @@ static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on,
311 return ret; 316 return ret;
312} 317}
313 318
319static int omap_hsmmc_4_set_power(struct device *dev, int slot, int power_on,
320 int vdd)
321{
322 return 0;
323}
324
314static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep, 325static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep,
315 int vdd, int cardsleep) 326 int vdd, int cardsleep)
316{ 327{
@@ -321,7 +332,7 @@ static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep,
321 return regulator_set_mode(host->vcc, mode); 332 return regulator_set_mode(host->vcc, mode);
322} 333}
323 334
324static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep, 335static int omap_hsmmc_235_set_sleep(struct device *dev, int slot, int sleep,
325 int vdd, int cardsleep) 336 int vdd, int cardsleep)
326{ 337{
327 struct omap_hsmmc_host *host = 338 struct omap_hsmmc_host *host =
@@ -343,9 +354,9 @@ static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep,
343 if (cardsleep) { 354 if (cardsleep) {
344 /* VCC can be turned off if card is asleep */ 355 /* VCC can be turned off if card is asleep */
345 if (sleep) 356 if (sleep)
346 err = mmc_regulator_set_ocr(host->vcc, 0); 357 err = mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
347 else 358 else
348 err = mmc_regulator_set_ocr(host->vcc, vdd); 359 err = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
349 } else 360 } else
350 err = regulator_set_mode(host->vcc, mode); 361 err = regulator_set_mode(host->vcc, mode);
351 if (err) 362 if (err)
@@ -360,10 +371,17 @@ static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep,
360 return regulator_enable(host->vcc_aux); 371 return regulator_enable(host->vcc_aux);
361} 372}
362 373
374static int omap_hsmmc_4_set_sleep(struct device *dev, int slot, int sleep,
375 int vdd, int cardsleep)
376{
377 return 0;
378}
379
363static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) 380static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
364{ 381{
365 struct regulator *reg; 382 struct regulator *reg;
366 int ret = 0; 383 int ret = 0;
384 int ocr_value = 0;
367 385
368 switch (host->id) { 386 switch (host->id) {
369 case OMAP_MMC1_DEVID: 387 case OMAP_MMC1_DEVID:
@@ -373,10 +391,14 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
373 break; 391 break;
374 case OMAP_MMC2_DEVID: 392 case OMAP_MMC2_DEVID:
375 case OMAP_MMC3_DEVID: 393 case OMAP_MMC3_DEVID:
394 case OMAP_MMC5_DEVID:
376 /* Off-chip level shifting, or none */ 395 /* Off-chip level shifting, or none */
377 mmc_slot(host).set_power = omap_hsmmc_23_set_power; 396 mmc_slot(host).set_power = omap_hsmmc_235_set_power;
378 mmc_slot(host).set_sleep = omap_hsmmc_23_set_sleep; 397 mmc_slot(host).set_sleep = omap_hsmmc_235_set_sleep;
379 break; 398 break;
399 case OMAP_MMC4_DEVID:
400 mmc_slot(host).set_power = omap_hsmmc_4_set_power;
401 mmc_slot(host).set_sleep = omap_hsmmc_4_set_sleep;
380 default: 402 default:
381 pr_err("MMC%d configuration not supported!\n", host->id); 403 pr_err("MMC%d configuration not supported!\n", host->id);
382 return -EINVAL; 404 return -EINVAL;
@@ -396,12 +418,25 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
396 } 418 }
397 } else { 419 } else {
398 host->vcc = reg; 420 host->vcc = reg;
399 mmc_slot(host).ocr_mask = mmc_regulator_get_ocrmask(reg); 421 ocr_value = mmc_regulator_get_ocrmask(reg);
422 if (!mmc_slot(host).ocr_mask) {
423 mmc_slot(host).ocr_mask = ocr_value;
424 } else {
425 if (!(mmc_slot(host).ocr_mask & ocr_value)) {
426 pr_err("MMC%d ocrmask %x is not supported\n",
427 host->id, mmc_slot(host).ocr_mask);
428 mmc_slot(host).ocr_mask = 0;
429 return -EINVAL;
430 }
431 }
400 432
401 /* Allow an aux regulator */ 433 /* Allow an aux regulator */
402 reg = regulator_get(host->dev, "vmmc_aux"); 434 reg = regulator_get(host->dev, "vmmc_aux");
403 host->vcc_aux = IS_ERR(reg) ? NULL : reg; 435 host->vcc_aux = IS_ERR(reg) ? NULL : reg;
404 436
437 /* For eMMC do not power off when not in sleep state */
438 if (mmc_slot(host).no_regulator_off_init)
439 return 0;
405 /* 440 /*
406 * UGLY HACK: workaround regulator framework bugs. 441 * UGLY HACK: workaround regulator framework bugs.
407 * When the bootloader leaves a supply active, it's 442 * When the bootloader leaves a supply active, it's
@@ -466,8 +501,6 @@ static int omap_hsmmc_gpio_init(struct omap_mmc_platform_data *pdata)
466 int ret; 501 int ret;
467 502
468 if (gpio_is_valid(pdata->slots[0].switch_pin)) { 503 if (gpio_is_valid(pdata->slots[0].switch_pin)) {
469 pdata->suspend = omap_hsmmc_suspend_cdirq;
470 pdata->resume = omap_hsmmc_resume_cdirq;
471 if (pdata->slots[0].cover) 504 if (pdata->slots[0].cover)
472 pdata->slots[0].get_cover_state = 505 pdata->slots[0].get_cover_state =
473 omap_hsmmc_get_cover_state; 506 omap_hsmmc_get_cover_state;
@@ -928,7 +961,8 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
928 spin_unlock(&host->irq_lock); 961 spin_unlock(&host->irq_lock);
929 962
930 if (host->use_dma && dma_ch != -1) { 963 if (host->use_dma && dma_ch != -1) {
931 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len, 964 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
965 host->data->sg_len,
932 omap_hsmmc_get_dma_dir(host, host->data)); 966 omap_hsmmc_get_dma_dir(host, host->data));
933 omap_free_dma(dma_ch); 967 omap_free_dma(dma_ch);
934 } 968 }
@@ -982,6 +1016,17 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
982 OMAP_HSMMC_WRITE(host->base, SYSCTL, 1016 OMAP_HSMMC_WRITE(host->base, SYSCTL,
983 OMAP_HSMMC_READ(host->base, SYSCTL) | bit); 1017 OMAP_HSMMC_READ(host->base, SYSCTL) | bit);
984 1018
1019 /*
1020 * OMAP4 ES2 and greater has an updated reset logic.
1021 * Monitor a 0->1 transition first
1022 */
1023 if (mmc_slot(host).features & HSMMC_HAS_UPDATED_RESET) {
1024 while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit))
1025 && (i++ < limit))
1026 cpu_relax();
1027 }
1028 i = 0;
1029
985 while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) && 1030 while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) &&
986 (i++ < limit)) 1031 (i++ < limit))
987 cpu_relax(); 1032 cpu_relax();
@@ -1301,7 +1346,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1301 return; 1346 return;
1302 } 1347 }
1303 1348
1304 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len, 1349 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
1305 omap_hsmmc_get_dma_dir(host, data)); 1350 omap_hsmmc_get_dma_dir(host, data));
1306 1351
1307 req_in_progress = host->req_in_progress; 1352 req_in_progress = host->req_in_progress;
@@ -1529,7 +1574,7 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1529 break; 1574 break;
1530 } 1575 }
1531 1576
1532 if (host->id == OMAP_MMC1_DEVID) { 1577 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
1533 /* Only MMC1 can interface at 3V without some flavor 1578 /* Only MMC1 can interface at 3V without some flavor
1534 * of external transceiver; but they all handle 1.8V. 1579 * of external transceiver; but they all handle 1.8V.
1535 */ 1580 */
@@ -1621,7 +1666,7 @@ static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
1621 u32 hctl, capa, value; 1666 u32 hctl, capa, value;
1622 1667
1623 /* Only MMC1 supports 3.0V */ 1668 /* Only MMC1 supports 3.0V */
1624 if (host->id == OMAP_MMC1_DEVID) { 1669 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
1625 hctl = SDVS30; 1670 hctl = SDVS30;
1626 capa = VS30 | VS18; 1671 capa = VS30 | VS18;
1627 } else { 1672 } else {
@@ -2003,8 +2048,9 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2003 if (res == NULL || irq < 0) 2048 if (res == NULL || irq < 0)
2004 return -ENXIO; 2049 return -ENXIO;
2005 2050
2006 res = request_mem_region(res->start, res->end - res->start + 1, 2051 res->start += pdata->reg_offset;
2007 pdev->name); 2052 res->end += pdata->reg_offset;
2053 res = request_mem_region(res->start, resource_size(res), pdev->name);
2008 if (res == NULL) 2054 if (res == NULL)
2009 return -EBUSY; 2055 return -EBUSY;
2010 2056
@@ -2073,14 +2119,14 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2073 /* we start off in DISABLED state */ 2119 /* we start off in DISABLED state */
2074 host->dpm_state = DISABLED; 2120 host->dpm_state = DISABLED;
2075 2121
2076 if (mmc_host_enable(host->mmc) != 0) { 2122 if (clk_enable(host->iclk) != 0) {
2077 clk_put(host->iclk); 2123 clk_put(host->iclk);
2078 clk_put(host->fclk); 2124 clk_put(host->fclk);
2079 goto err1; 2125 goto err1;
2080 } 2126 }
2081 2127
2082 if (clk_enable(host->iclk) != 0) { 2128 if (mmc_host_enable(host->mmc) != 0) {
2083 mmc_host_disable(host->mmc); 2129 clk_disable(host->iclk);
2084 clk_put(host->iclk); 2130 clk_put(host->iclk);
2085 clk_put(host->fclk); 2131 clk_put(host->fclk);
2086 goto err1; 2132 goto err1;
@@ -2105,8 +2151,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2105 2151
2106 /* Since we do only SG emulation, we can have as many segs 2152 /* Since we do only SG emulation, we can have as many segs
2107 * as we want. */ 2153 * as we want. */
2108 mmc->max_phys_segs = 1024; 2154 mmc->max_segs = 1024;
2109 mmc->max_hw_segs = 1024;
2110 2155
2111 mmc->max_blk_size = 512; /* Block Length at max can be 1024 */ 2156 mmc->max_blk_size = 512; /* Block Length at max can be 1024 */
2112 mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */ 2157 mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */
@@ -2116,23 +2161,9 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2116 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 2161 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
2117 MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE; 2162 MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
2118 2163
2119 switch (mmc_slot(host).wires) { 2164 mmc->caps |= mmc_slot(host).caps;
2120 case 8: 2165 if (mmc->caps & MMC_CAP_8_BIT_DATA)
2121 mmc->caps |= MMC_CAP_8_BIT_DATA;
2122 /* Fall through */
2123 case 4:
2124 mmc->caps |= MMC_CAP_4_BIT_DATA; 2166 mmc->caps |= MMC_CAP_4_BIT_DATA;
2125 break;
2126 case 1:
2127 /* Nothing to crib here */
2128 case 0:
2129 /* Assuming nothing was given by board, Core use's 1-Bit */
2130 break;
2131 default:
2132 /* Completely unexpected.. Core goes with 1-Bit Width */
2133 dev_crit(mmc_dev(host->mmc), "Invalid width %d\n used!"
2134 "using 1 instead\n", mmc_slot(host).wires);
2135 }
2136 2167
2137 if (mmc_slot(host).nonremovable) 2168 if (mmc_slot(host).nonremovable)
2138 mmc->caps |= MMC_CAP_NONREMOVABLE; 2169 mmc->caps |= MMC_CAP_NONREMOVABLE;
@@ -2203,6 +2234,8 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2203 "Unable to grab MMC CD IRQ\n"); 2234 "Unable to grab MMC CD IRQ\n");
2204 goto err_irq_cd; 2235 goto err_irq_cd;
2205 } 2236 }
2237 pdata->suspend = omap_hsmmc_suspend_cdirq;
2238 pdata->resume = omap_hsmmc_resume_cdirq;
2206 } 2239 }
2207 2240
2208 omap_hsmmc_disable_irq(host); 2241 omap_hsmmc_disable_irq(host);
@@ -2256,7 +2289,7 @@ err1:
2256err_alloc: 2289err_alloc:
2257 omap_hsmmc_gpio_free(pdata); 2290 omap_hsmmc_gpio_free(pdata);
2258err: 2291err:
2259 release_mem_region(res->start, res->end - res->start + 1); 2292 release_mem_region(res->start, resource_size(res));
2260 return ret; 2293 return ret;
2261} 2294}
2262 2295
@@ -2275,7 +2308,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2275 free_irq(host->irq, host); 2308 free_irq(host->irq, host);
2276 if (mmc_slot(host).card_detect_irq) 2309 if (mmc_slot(host).card_detect_irq)
2277 free_irq(mmc_slot(host).card_detect_irq, host); 2310 free_irq(mmc_slot(host).card_detect_irq, host);
2278 flush_scheduled_work(); 2311 flush_work_sync(&host->mmc_carddetect_work);
2279 2312
2280 mmc_host_disable(host->mmc); 2313 mmc_host_disable(host->mmc);
2281 clk_disable(host->iclk); 2314 clk_disable(host->iclk);
@@ -2293,7 +2326,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2293 2326
2294 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2327 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2295 if (res) 2328 if (res)
2296 release_mem_region(res->start, res->end - res->start + 1); 2329 release_mem_region(res->start, resource_size(res));
2297 platform_set_drvdata(pdev, NULL); 2330 platform_set_drvdata(pdev, NULL);
2298 2331
2299 return 0; 2332 return 0;
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 0a4e43f37140..7257738fd7da 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -99,14 +99,25 @@ static inline void pxamci_init_ocr(struct pxamci_host *host)
99 } 99 }
100} 100}
101 101
102static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd) 102static inline int pxamci_set_power(struct pxamci_host *host,
103 unsigned char power_mode,
104 unsigned int vdd)
103{ 105{
104 int on; 106 int on;
105 107
106#ifdef CONFIG_REGULATOR 108 if (host->vcc) {
107 if (host->vcc) 109 int ret;
108 mmc_regulator_set_ocr(host->vcc, vdd); 110
109#endif 111 if (power_mode == MMC_POWER_UP) {
112 ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
113 if (ret)
114 return ret;
115 } else if (power_mode == MMC_POWER_OFF) {
116 ret = mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
117 if (ret)
118 return ret;
119 }
120 }
110 if (!host->vcc && host->pdata && 121 if (!host->vcc && host->pdata &&
111 gpio_is_valid(host->pdata->gpio_power)) { 122 gpio_is_valid(host->pdata->gpio_power)) {
112 on = ((1 << vdd) & host->pdata->ocr_mask); 123 on = ((1 << vdd) & host->pdata->ocr_mask);
@@ -115,6 +126,8 @@ static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd)
115 } 126 }
116 if (!host->vcc && host->pdata && host->pdata->setpower) 127 if (!host->vcc && host->pdata && host->pdata->setpower)
117 host->pdata->setpower(mmc_dev(host->mmc), vdd); 128 host->pdata->setpower(mmc_dev(host->mmc), vdd);
129
130 return 0;
118} 131}
119 132
120static void pxamci_stop_clock(struct pxamci_host *host) 133static void pxamci_stop_clock(struct pxamci_host *host)
@@ -490,9 +503,21 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
490 } 503 }
491 504
492 if (host->power_mode != ios->power_mode) { 505 if (host->power_mode != ios->power_mode) {
506 int ret;
507
493 host->power_mode = ios->power_mode; 508 host->power_mode = ios->power_mode;
494 509
495 pxamci_set_power(host, ios->vdd); 510 ret = pxamci_set_power(host, ios->power_mode, ios->vdd);
511 if (ret) {
512 dev_err(mmc_dev(mmc), "unable to set power\n");
513 /*
514 * The .set_ios() function in the mmc_host_ops
515 * struct return void, and failing to set the
516 * power should be rare so we print an error and
517 * return here.
518 */
519 return;
520 }
496 521
497 if (ios->power_mode == MMC_POWER_ON) 522 if (ios->power_mode == MMC_POWER_ON)
498 host->cmdat |= CMDAT_INIT; 523 host->cmdat |= CMDAT_INIT;
@@ -503,8 +528,8 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
503 else 528 else
504 host->cmdat &= ~CMDAT_SD_4DAT; 529 host->cmdat &= ~CMDAT_SD_4DAT;
505 530
506 pr_debug("PXAMCI: clkrt = %x cmdat = %x\n", 531 dev_dbg(mmc_dev(mmc), "PXAMCI: clkrt = %x cmdat = %x\n",
507 host->clkrt, host->cmdat); 532 host->clkrt, host->cmdat);
508} 533}
509 534
510static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable) 535static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
@@ -576,7 +601,7 @@ static int pxamci_probe(struct platform_device *pdev)
576 * We can do SG-DMA, but we don't because we never know how much 601 * We can do SG-DMA, but we don't because we never know how much
577 * data we successfully wrote to the card. 602 * data we successfully wrote to the card.
578 */ 603 */
579 mmc->max_phys_segs = NR_SG; 604 mmc->max_segs = NR_SG;
580 605
581 /* 606 /*
582 * Our hardware DMA can handle a maximum of one page per SG entry. 607 * Our hardware DMA can handle a maximum of one page per SG entry.
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 976330de379e..a04f87d7ee3d 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -874,7 +874,7 @@ static void finalize_request(struct s3cmci_host *host)
874 if (!mrq->data) 874 if (!mrq->data)
875 goto request_done; 875 goto request_done;
876 876
877 /* Calulate the amout of bytes transfer if there was no error */ 877 /* Calculate the amout of bytes transfer if there was no error */
878 if (mrq->data->error == 0) { 878 if (mrq->data->error == 0) {
879 mrq->data->bytes_xfered = 879 mrq->data->bytes_xfered =
880 (mrq->data->blocks * mrq->data->blksz); 880 (mrq->data->blocks * mrq->data->blksz);
@@ -882,7 +882,7 @@ static void finalize_request(struct s3cmci_host *host)
882 mrq->data->bytes_xfered = 0; 882 mrq->data->bytes_xfered = 0;
883 } 883 }
884 884
885 /* If we had an error while transfering data we flush the 885 /* If we had an error while transferring data we flush the
886 * DMA channel and the fifo to clear out any garbage. */ 886 * DMA channel and the fifo to clear out any garbage. */
887 if (mrq->data->error != 0) { 887 if (mrq->data->error != 0) {
888 if (s3cmci_host_usedma(host)) 888 if (s3cmci_host_usedma(host))
@@ -980,7 +980,7 @@ static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data)
980 980
981 if ((data->blksz & 3) != 0) { 981 if ((data->blksz & 3) != 0) {
982 /* We cannot deal with unaligned blocks with more than 982 /* We cannot deal with unaligned blocks with more than
983 * one block being transfered. */ 983 * one block being transferred. */
984 984
985 if (data->blocks > 1) { 985 if (data->blocks > 1) {
986 pr_warning("%s: can't do non-word sized block transfers (blksz %d)\n", __func__, data->blksz); 986 pr_warning("%s: can't do non-word sized block transfers (blksz %d)\n", __func__, data->blksz);
@@ -1736,8 +1736,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)
1736 mmc->max_req_size = 4095 * 512; 1736 mmc->max_req_size = 4095 * 512;
1737 mmc->max_seg_size = mmc->max_req_size; 1737 mmc->max_seg_size = mmc->max_req_size;
1738 1738
1739 mmc->max_phys_segs = 128; 1739 mmc->max_segs = 128;
1740 mmc->max_hw_segs = 128;
1741 1740
1742 dbg(host, dbg_debug, 1741 dbg(host, dbg_debug,
1743 "probe: mode:%s mapped mci_base:%p irq:%u irq_cd:%u dma:%u.\n", 1742 "probe: mode:%s mapped mci_base:%p irq:%u irq_cd:%u dma:%u.\n",
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index b7050b380d5f..9ebd1d7759dc 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -15,7 +15,7 @@
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/mmc/host.h> 17#include <linux/mmc/host.h>
18#include <linux/sdhci-pltfm.h> 18#include <linux/mmc/sdhci-pltfm.h>
19#include <mach/cns3xxx.h> 19#include <mach/cns3xxx.h>
20#include "sdhci.h" 20#include "sdhci.h"
21#include "sdhci-pltfm.h" 21#include "sdhci-pltfm.h"
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
new file mode 100644
index 000000000000..2aeef4ffed8c
--- /dev/null
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -0,0 +1,70 @@
1/*
2 * sdhci-dove.c Support for SDHCI on Marvell's Dove SoC
3 *
4 * Author: Saeed Bishara <saeed@marvell.com>
5 * Mike Rapoport <mike@compulab.co.il>
6 * Based on sdhci-cns3xxx.c
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include <linux/io.h>
23#include <linux/mmc/host.h>
24
25#include "sdhci.h"
26#include "sdhci-pltfm.h"
27
28static u16 sdhci_dove_readw(struct sdhci_host *host, int reg)
29{
30 u16 ret;
31
32 switch (reg) {
33 case SDHCI_HOST_VERSION:
34 case SDHCI_SLOT_INT_STATUS:
35 /* those registers don't exist */
36 return 0;
37 default:
38 ret = readw(host->ioaddr + reg);
39 }
40 return ret;
41}
42
43static u32 sdhci_dove_readl(struct sdhci_host *host, int reg)
44{
45 u32 ret;
46
47 switch (reg) {
48 case SDHCI_CAPABILITIES:
49 ret = readl(host->ioaddr + reg);
50 /* Mask the support for 3.0V */
51 ret &= ~SDHCI_CAN_VDD_300;
52 break;
53 default:
54 ret = readl(host->ioaddr + reg);
55 }
56 return ret;
57}
58
59static struct sdhci_ops sdhci_dove_ops = {
60 .read_w = sdhci_dove_readw,
61 .read_l = sdhci_dove_readl,
62};
63
64struct sdhci_pltfm_data sdhci_dove_pdata = {
65 .ops = &sdhci_dove_ops,
66 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
67 SDHCI_QUIRK_NO_BUSY_IRQ |
68 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
69 SDHCI_QUIRK_FORCE_DMA,
70};
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
new file mode 100644
index 000000000000..a19967d0bfc4
--- /dev/null
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -0,0 +1,331 @@
1/*
2 * Freescale eSDHC i.MX controller driver for the platform bus.
3 *
4 * derived from the OF-version.
5 *
6 * Copyright (c) 2010 Pengutronix e.K.
7 * Author: Wolfram Sang <w.sang@pengutronix.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
12 */
13
14#include <linux/io.h>
15#include <linux/delay.h>
16#include <linux/err.h>
17#include <linux/clk.h>
18#include <linux/gpio.h>
19#include <linux/slab.h>
20#include <linux/mmc/host.h>
21#include <linux/mmc/sdhci-pltfm.h>
22#include <linux/mmc/mmc.h>
23#include <linux/mmc/sdio.h>
24#include <mach/hardware.h>
25#include <mach/esdhc.h>
26#include "sdhci.h"
27#include "sdhci-pltfm.h"
28#include "sdhci-esdhc.h"
29
30/* VENDOR SPEC register */
31#define SDHCI_VENDOR_SPEC 0xC0
32#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002
33
34#define ESDHC_FLAG_GPIO_FOR_CD_WP (1 << 0)
35/*
36 * The CMDTYPE of the CMD register (offset 0xE) should be set to
37 * "11" when the STOP CMD12 is issued on imx53 to abort one
38 * open ended multi-blk IO. Otherwise the TC INT wouldn't
39 * be generated.
40 * In exact block transfer, the controller doesn't complete the
41 * operations automatically as required at the end of the
42 * transfer and remains on hold if the abort command is not sent.
43 * As a result, the TC flag is not asserted and SW received timeout
44 * exeception. Bit1 of Vendor Spec registor is used to fix it.
45 */
46#define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1)
47
48struct pltfm_imx_data {
49 int flags;
50 u32 scratchpad;
51};
52
53static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg)
54{
55 void __iomem *base = host->ioaddr + (reg & ~0x3);
56 u32 shift = (reg & 0x3) * 8;
57
58 writel(((readl(base) & ~(mask << shift)) | (val << shift)), base);
59}
60
61static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
62{
63 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
64 struct pltfm_imx_data *imx_data = pltfm_host->priv;
65
66 /* fake CARD_PRESENT flag on mx25/35 */
67 u32 val = readl(host->ioaddr + reg);
68
69 if (unlikely((reg == SDHCI_PRESENT_STATE)
70 && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) {
71 struct esdhc_platform_data *boarddata =
72 host->mmc->parent->platform_data;
73
74 if (boarddata && gpio_is_valid(boarddata->cd_gpio)
75 && gpio_get_value(boarddata->cd_gpio))
76 /* no card, if a valid gpio says so... */
77 val &= SDHCI_CARD_PRESENT;
78 else
79 /* ... in all other cases assume card is present */
80 val |= SDHCI_CARD_PRESENT;
81 }
82
83 return val;
84}
85
86static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
87{
88 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
89 struct pltfm_imx_data *imx_data = pltfm_host->priv;
90
91 if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)
92 && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP)))
93 /*
94 * these interrupts won't work with a custom card_detect gpio
95 * (only applied to mx25/35)
96 */
97 val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
98
99 if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
100 && (reg == SDHCI_INT_STATUS)
101 && (val & SDHCI_INT_DATA_END))) {
102 u32 v;
103 v = readl(host->ioaddr + SDHCI_VENDOR_SPEC);
104 v &= ~SDHCI_VENDOR_SPEC_SDIO_QUIRK;
105 writel(v, host->ioaddr + SDHCI_VENDOR_SPEC);
106 }
107
108 writel(val, host->ioaddr + reg);
109}
110
111static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
112{
113 if (unlikely(reg == SDHCI_HOST_VERSION))
114 reg ^= 2;
115
116 return readw(host->ioaddr + reg);
117}
118
119static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
120{
121 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
122 struct pltfm_imx_data *imx_data = pltfm_host->priv;
123
124 switch (reg) {
125 case SDHCI_TRANSFER_MODE:
126 /*
127 * Postpone this write, we must do it together with a
128 * command write that is down below.
129 */
130 if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
131 && (host->cmd->opcode == SD_IO_RW_EXTENDED)
132 && (host->cmd->data->blocks > 1)
133 && (host->cmd->data->flags & MMC_DATA_READ)) {
134 u32 v;
135 v = readl(host->ioaddr + SDHCI_VENDOR_SPEC);
136 v |= SDHCI_VENDOR_SPEC_SDIO_QUIRK;
137 writel(v, host->ioaddr + SDHCI_VENDOR_SPEC);
138 }
139 imx_data->scratchpad = val;
140 return;
141 case SDHCI_COMMAND:
142 if ((host->cmd->opcode == MMC_STOP_TRANSMISSION)
143 && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
144 val |= SDHCI_CMD_ABORTCMD;
145 writel(val << 16 | imx_data->scratchpad,
146 host->ioaddr + SDHCI_TRANSFER_MODE);
147 return;
148 case SDHCI_BLOCK_SIZE:
149 val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
150 break;
151 }
152 esdhc_clrset_le(host, 0xffff, val, reg);
153}
154
155static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
156{
157 u32 new_val;
158
159 switch (reg) {
160 case SDHCI_POWER_CONTROL:
161 /*
162 * FSL put some DMA bits here
163 * If your board has a regulator, code should be here
164 */
165 return;
166 case SDHCI_HOST_CONTROL:
167 /* FSL messed up here, so we can just keep those two */
168 new_val = val & (SDHCI_CTRL_LED | SDHCI_CTRL_4BITBUS);
169 /* ensure the endianess */
170 new_val |= ESDHC_HOST_CONTROL_LE;
171 /* DMA mode bits are shifted */
172 new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5;
173
174 esdhc_clrset_le(host, 0xffff, new_val, reg);
175 return;
176 }
177 esdhc_clrset_le(host, 0xff, val, reg);
178}
179
180static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
181{
182 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
183
184 return clk_get_rate(pltfm_host->clk);
185}
186
187static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
188{
189 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
190
191 return clk_get_rate(pltfm_host->clk) / 256 / 16;
192}
193
194static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
195{
196 struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
197
198 if (boarddata && gpio_is_valid(boarddata->wp_gpio))
199 return gpio_get_value(boarddata->wp_gpio);
200 else
201 return -ENOSYS;
202}
203
204static struct sdhci_ops sdhci_esdhc_ops = {
205 .read_l = esdhc_readl_le,
206 .read_w = esdhc_readw_le,
207 .write_l = esdhc_writel_le,
208 .write_w = esdhc_writew_le,
209 .write_b = esdhc_writeb_le,
210 .set_clock = esdhc_set_clock,
211 .get_max_clock = esdhc_pltfm_get_max_clock,
212 .get_min_clock = esdhc_pltfm_get_min_clock,
213};
214
215static irqreturn_t cd_irq(int irq, void *data)
216{
217 struct sdhci_host *sdhost = (struct sdhci_host *)data;
218
219 tasklet_schedule(&sdhost->card_tasklet);
220 return IRQ_HANDLED;
221};
222
223static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata)
224{
225 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
226 struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
227 struct clk *clk;
228 int err;
229 struct pltfm_imx_data *imx_data;
230
231 clk = clk_get(mmc_dev(host->mmc), NULL);
232 if (IS_ERR(clk)) {
233 dev_err(mmc_dev(host->mmc), "clk err\n");
234 return PTR_ERR(clk);
235 }
236 clk_enable(clk);
237 pltfm_host->clk = clk;
238
239 imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL);
240 if (!imx_data) {
241 clk_disable(pltfm_host->clk);
242 clk_put(pltfm_host->clk);
243 return -ENOMEM;
244 }
245 pltfm_host->priv = imx_data;
246
247 if (!cpu_is_mx25())
248 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
249
250 if (cpu_is_mx25() || cpu_is_mx35()) {
251 /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
252 host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
253 /* write_protect can't be routed to controller, use gpio */
254 sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro;
255 }
256
257 if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51()))
258 imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT;
259
260 if (boarddata) {
261 err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP");
262 if (err) {
263 dev_warn(mmc_dev(host->mmc),
264 "no write-protect pin available!\n");
265 boarddata->wp_gpio = err;
266 }
267
268 err = gpio_request_one(boarddata->cd_gpio, GPIOF_IN, "ESDHC_CD");
269 if (err) {
270 dev_warn(mmc_dev(host->mmc),
271 "no card-detect pin available!\n");
272 goto no_card_detect_pin;
273 }
274
275 /* i.MX5x has issues to be researched */
276 if (!cpu_is_mx25() && !cpu_is_mx35())
277 goto not_supported;
278
279 err = request_irq(gpio_to_irq(boarddata->cd_gpio), cd_irq,
280 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
281 mmc_hostname(host->mmc), host);
282 if (err) {
283 dev_warn(mmc_dev(host->mmc), "request irq error\n");
284 goto no_card_detect_irq;
285 }
286
287 imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD_WP;
288 /* Now we have a working card_detect again */
289 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
290 }
291
292 return 0;
293
294 no_card_detect_irq:
295 gpio_free(boarddata->cd_gpio);
296 no_card_detect_pin:
297 boarddata->cd_gpio = err;
298 not_supported:
299 kfree(imx_data);
300 return 0;
301}
302
303static void esdhc_pltfm_exit(struct sdhci_host *host)
304{
305 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
306 struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
307 struct pltfm_imx_data *imx_data = pltfm_host->priv;
308
309 if (boarddata && gpio_is_valid(boarddata->wp_gpio))
310 gpio_free(boarddata->wp_gpio);
311
312 if (boarddata && gpio_is_valid(boarddata->cd_gpio)) {
313 gpio_free(boarddata->cd_gpio);
314
315 if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION))
316 free_irq(gpio_to_irq(boarddata->cd_gpio), host);
317 }
318
319 clk_disable(pltfm_host->clk);
320 clk_put(pltfm_host->clk);
321 kfree(imx_data);
322}
323
324struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
325 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA
326 | SDHCI_QUIRK_BROKEN_CARD_DETECTION,
327 /* ADMA has issues. Might be fixable */
328 .ops = &sdhci_esdhc_ops,
329 .init = esdhc_pltfm_init,
330 .exit = esdhc_pltfm_exit,
331};
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
new file mode 100644
index 000000000000..c3b08f111942
--- /dev/null
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -0,0 +1,81 @@
1/*
2 * Freescale eSDHC controller driver generics for OF and pltfm.
3 *
4 * Copyright (c) 2007 Freescale Semiconductor, Inc.
5 * Copyright (c) 2009 MontaVista Software, Inc.
6 * Copyright (c) 2010 Pengutronix e.K.
7 * Author: Wolfram Sang <w.sang@pengutronix.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
12 */
13
14#ifndef _DRIVERS_MMC_SDHCI_ESDHC_H
15#define _DRIVERS_MMC_SDHCI_ESDHC_H
16
17/*
18 * Ops and quirks for the Freescale eSDHC controller.
19 */
20
21#define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
22 SDHCI_QUIRK_NO_BUSY_IRQ | \
23 SDHCI_QUIRK_NONSTANDARD_CLOCK | \
24 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
25 SDHCI_QUIRK_PIO_NEEDS_DELAY | \
26 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
27
28#define ESDHC_SYSTEM_CONTROL 0x2c
29#define ESDHC_CLOCK_MASK 0x0000fff0
30#define ESDHC_PREDIV_SHIFT 8
31#define ESDHC_DIVIDER_SHIFT 4
32#define ESDHC_CLOCK_PEREN 0x00000004
33#define ESDHC_CLOCK_HCKEN 0x00000002
34#define ESDHC_CLOCK_IPGEN 0x00000001
35
36/* pltfm-specific */
37#define ESDHC_HOST_CONTROL_LE 0x20
38
39/* OF-specific */
40#define ESDHC_DMA_SYSCTL 0x40c
41#define ESDHC_DMA_SNOOP 0x00000040
42
43#define ESDHC_HOST_CONTROL_RES 0x05
44
45static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
46{
47 int pre_div = 2;
48 int div = 1;
49 u32 temp;
50
51 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
52 temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
53 | ESDHC_CLOCK_MASK);
54 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
55
56 if (clock == 0)
57 goto out;
58
59 while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
60 pre_div *= 2;
61
62 while (host->max_clk / pre_div / div > clock && div < 16)
63 div++;
64
65 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
66 clock, host->max_clk / pre_div / div);
67
68 pre_div >>= 1;
69 div--;
70
71 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
72 temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
73 | (div << ESDHC_DIVIDER_SHIFT)
74 | (pre_div << ESDHC_PREDIV_SHIFT));
75 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
76 mdelay(100);
77out:
78 host->clock = clock;
79}
80
81#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
index c51b71174c1d..60e4186a4345 100644
--- a/drivers/mmc/host/sdhci-of-core.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -13,6 +13,7 @@
13 * your option) any later version. 13 * your option) any later version.
14 */ 14 */
15 15
16#include <linux/err.h>
16#include <linux/module.h> 17#include <linux/module.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/io.h> 19#include <linux/io.h>
@@ -20,8 +21,12 @@
20#include <linux/delay.h> 21#include <linux/delay.h>
21#include <linux/of.h> 22#include <linux/of.h>
22#include <linux/of_platform.h> 23#include <linux/of_platform.h>
24#include <linux/of_address.h>
25#include <linux/of_irq.h>
23#include <linux/mmc/host.h> 26#include <linux/mmc/host.h>
27#ifdef CONFIG_PPC
24#include <asm/machdep.h> 28#include <asm/machdep.h>
29#endif
25#include "sdhci-of.h" 30#include "sdhci-of.h"
26#include "sdhci.h" 31#include "sdhci.h"
27 32
@@ -112,20 +117,30 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np)
112 return true; 117 return true;
113 118
114 /* Old device trees don't have the wp-inverted property. */ 119 /* Old device trees don't have the wp-inverted property. */
120#ifdef CONFIG_PPC
115 return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds); 121 return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds);
122#else
123 return false;
124#endif
116} 125}
117 126
118static int __devinit sdhci_of_probe(struct platform_device *ofdev, 127static const struct of_device_id sdhci_of_match[];
119 const struct of_device_id *match) 128static int __devinit sdhci_of_probe(struct platform_device *ofdev)
120{ 129{
130 const struct of_device_id *match;
121 struct device_node *np = ofdev->dev.of_node; 131 struct device_node *np = ofdev->dev.of_node;
122 struct sdhci_of_data *sdhci_of_data = match->data; 132 struct sdhci_of_data *sdhci_of_data;
123 struct sdhci_host *host; 133 struct sdhci_host *host;
124 struct sdhci_of_host *of_host; 134 struct sdhci_of_host *of_host;
125 const u32 *clk; 135 const __be32 *clk;
126 int size; 136 int size;
127 int ret; 137 int ret;
128 138
139 match = of_match_device(sdhci_of_match, &ofdev->dev);
140 if (!match)
141 return -EINVAL;
142 sdhci_of_data = match->data;
143
129 if (!of_device_is_available(np)) 144 if (!of_device_is_available(np))
130 return -ENODEV; 145 return -ENODEV;
131 146
@@ -166,7 +181,7 @@ static int __devinit sdhci_of_probe(struct platform_device *ofdev,
166 181
167 clk = of_get_property(np, "clock-frequency", &size); 182 clk = of_get_property(np, "clock-frequency", &size);
168 if (clk && size == sizeof(*clk) && *clk) 183 if (clk && size == sizeof(*clk) && *clk)
169 of_host->clock = *clk; 184 of_host->clock = be32_to_cpup(clk);
170 185
171 ret = sdhci_add_host(host); 186 ret = sdhci_add_host(host);
172 if (ret) 187 if (ret)
@@ -208,7 +223,7 @@ static const struct of_device_id sdhci_of_match[] = {
208}; 223};
209MODULE_DEVICE_TABLE(of, sdhci_of_match); 224MODULE_DEVICE_TABLE(of, sdhci_of_match);
210 225
211static struct of_platform_driver sdhci_of_driver = { 226static struct platform_driver sdhci_of_driver = {
212 .driver = { 227 .driver = {
213 .name = "sdhci-of", 228 .name = "sdhci-of",
214 .owner = THIS_MODULE, 229 .owner = THIS_MODULE,
@@ -222,13 +237,13 @@ static struct of_platform_driver sdhci_of_driver = {
222 237
223static int __init sdhci_of_init(void) 238static int __init sdhci_of_init(void)
224{ 239{
225 return of_register_platform_driver(&sdhci_of_driver); 240 return platform_driver_register(&sdhci_of_driver);
226} 241}
227module_init(sdhci_of_init); 242module_init(sdhci_of_init);
228 243
229static void __exit sdhci_of_exit(void) 244static void __exit sdhci_of_exit(void)
230{ 245{
231 of_unregister_platform_driver(&sdhci_of_driver); 246 platform_driver_unregister(&sdhci_of_driver);
232} 247}
233module_exit(sdhci_of_exit); 248module_exit(sdhci_of_exit);
234 249
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index c8623de13af3..ba40d6d035c7 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -18,23 +18,7 @@
18#include <linux/mmc/host.h> 18#include <linux/mmc/host.h>
19#include "sdhci-of.h" 19#include "sdhci-of.h"
20#include "sdhci.h" 20#include "sdhci.h"
21 21#include "sdhci-esdhc.h"
22/*
23 * Ops and quirks for the Freescale eSDHC controller.
24 */
25
26#define ESDHC_DMA_SYSCTL 0x40c
27#define ESDHC_DMA_SNOOP 0x00000040
28
29#define ESDHC_SYSTEM_CONTROL 0x2c
30#define ESDHC_CLOCK_MASK 0x0000fff0
31#define ESDHC_PREDIV_SHIFT 8
32#define ESDHC_DIVIDER_SHIFT 4
33#define ESDHC_CLOCK_PEREN 0x00000004
34#define ESDHC_CLOCK_HCKEN 0x00000002
35#define ESDHC_CLOCK_IPGEN 0x00000001
36
37#define ESDHC_HOST_CONTROL_RES 0x05
38 22
39static u16 esdhc_readw(struct sdhci_host *host, int reg) 23static u16 esdhc_readw(struct sdhci_host *host, int reg)
40{ 24{
@@ -68,51 +52,20 @@ static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
68 sdhci_be32bs_writeb(host, val, reg); 52 sdhci_be32bs_writeb(host, val, reg);
69} 53}
70 54
71static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock) 55static int esdhc_of_enable_dma(struct sdhci_host *host)
72{
73 int pre_div = 2;
74 int div = 1;
75
76 clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
77 ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
78
79 if (clock == 0)
80 goto out;
81
82 while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
83 pre_div *= 2;
84
85 while (host->max_clk / pre_div / div > clock && div < 16)
86 div++;
87
88 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
89 clock, host->max_clk / pre_div / div);
90
91 pre_div >>= 1;
92 div--;
93
94 setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
95 ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN |
96 div << ESDHC_DIVIDER_SHIFT | pre_div << ESDHC_PREDIV_SHIFT);
97 mdelay(100);
98out:
99 host->clock = clock;
100}
101
102static int esdhc_enable_dma(struct sdhci_host *host)
103{ 56{
104 setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP); 57 setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP);
105 return 0; 58 return 0;
106} 59}
107 60
108static unsigned int esdhc_get_max_clock(struct sdhci_host *host) 61static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
109{ 62{
110 struct sdhci_of_host *of_host = sdhci_priv(host); 63 struct sdhci_of_host *of_host = sdhci_priv(host);
111 64
112 return of_host->clock; 65 return of_host->clock;
113} 66}
114 67
115static unsigned int esdhc_get_min_clock(struct sdhci_host *host) 68static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
116{ 69{
117 struct sdhci_of_host *of_host = sdhci_priv(host); 70 struct sdhci_of_host *of_host = sdhci_priv(host);
118 71
@@ -120,14 +73,9 @@ static unsigned int esdhc_get_min_clock(struct sdhci_host *host)
120} 73}
121 74
122struct sdhci_of_data sdhci_esdhc = { 75struct sdhci_of_data sdhci_esdhc = {
123 .quirks = SDHCI_QUIRK_FORCE_BLK_SZ_2048 | 76 /* card detection could be handled via GPIO */
124 SDHCI_QUIRK_BROKEN_CARD_DETECTION | 77 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
125 SDHCI_QUIRK_NO_BUSY_IRQ | 78 | SDHCI_QUIRK_NO_CARD_NO_RESET,
126 SDHCI_QUIRK_NONSTANDARD_CLOCK |
127 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
128 SDHCI_QUIRK_PIO_NEEDS_DELAY |
129 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
130 SDHCI_QUIRK_NO_CARD_NO_RESET,
131 .ops = { 79 .ops = {
132 .read_l = sdhci_be32bs_readl, 80 .read_l = sdhci_be32bs_readl,
133 .read_w = esdhc_readw, 81 .read_w = esdhc_readw,
@@ -136,8 +84,8 @@ struct sdhci_of_data sdhci_esdhc = {
136 .write_w = esdhc_writew, 84 .write_w = esdhc_writew,
137 .write_b = esdhc_writeb, 85 .write_b = esdhc_writeb,
138 .set_clock = esdhc_set_clock, 86 .set_clock = esdhc_set_clock,
139 .enable_dma = esdhc_enable_dma, 87 .enable_dma = esdhc_of_enable_dma,
140 .get_max_clock = esdhc_get_max_clock, 88 .get_max_clock = esdhc_of_get_max_clock,
141 .get_min_clock = esdhc_get_min_clock, 89 .get_min_clock = esdhc_of_get_min_clock,
142 }, 90 },
143}; 91};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index e8aa99deae9a..936bbca19c0a 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -18,11 +18,9 @@
18#include <linux/dma-mapping.h> 18#include <linux/dma-mapping.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/device.h> 20#include <linux/device.h>
21
22#include <linux/mmc/host.h> 21#include <linux/mmc/host.h>
23 22#include <linux/scatterlist.h>
24#include <asm/scatterlist.h> 23#include <linux/io.h>
25#include <asm/io.h>
26 24
27#include "sdhci.h" 25#include "sdhci.h"
28 26
@@ -46,14 +44,14 @@ struct sdhci_pci_slot;
46struct sdhci_pci_fixes { 44struct sdhci_pci_fixes {
47 unsigned int quirks; 45 unsigned int quirks;
48 46
49 int (*probe)(struct sdhci_pci_chip*); 47 int (*probe) (struct sdhci_pci_chip *);
50 48
51 int (*probe_slot)(struct sdhci_pci_slot*); 49 int (*probe_slot) (struct sdhci_pci_slot *);
52 void (*remove_slot)(struct sdhci_pci_slot*, int); 50 void (*remove_slot) (struct sdhci_pci_slot *, int);
53 51
54 int (*suspend)(struct sdhci_pci_chip*, 52 int (*suspend) (struct sdhci_pci_chip *,
55 pm_message_t); 53 pm_message_t);
56 int (*resume)(struct sdhci_pci_chip*); 54 int (*resume) (struct sdhci_pci_chip *);
57}; 55};
58 56
59struct sdhci_pci_slot { 57struct sdhci_pci_slot {
@@ -145,6 +143,105 @@ static const struct sdhci_pci_fixes sdhci_cafe = {
145 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, 143 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
146}; 144};
147 145
146/*
147 * ADMA operation is disabled for Moorestown platform due to
148 * hardware bugs.
149 */
150static int mrst_hc_probe(struct sdhci_pci_chip *chip)
151{
152 /*
153 * slots number is fixed here for MRST as SDIO3/5 are never used and
154 * have hardware bugs.
155 */
156 chip->num_slots = 1;
157 return 0;
158}
159
160static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
161 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
162};
163
164static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
165 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
166 .probe = mrst_hc_probe,
167};
168
169static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
170 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
171};
172
173static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc_sdio = {
174 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
175};
176
177/* O2Micro extra registers */
178#define O2_SD_LOCK_WP 0xD3
179#define O2_SD_MULTI_VCC3V 0xEE
180#define O2_SD_CLKREQ 0xEC
181#define O2_SD_CAPS 0xE0
182#define O2_SD_ADMA1 0xE2
183#define O2_SD_ADMA2 0xE7
184#define O2_SD_INF_MOD 0xF1
185
186static int o2_probe(struct sdhci_pci_chip *chip)
187{
188 int ret;
189 u8 scratch;
190
191 switch (chip->pdev->device) {
192 case PCI_DEVICE_ID_O2_8220:
193 case PCI_DEVICE_ID_O2_8221:
194 case PCI_DEVICE_ID_O2_8320:
195 case PCI_DEVICE_ID_O2_8321:
196 /* This extra setup is required due to broken ADMA. */
197 ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
198 if (ret)
199 return ret;
200 scratch &= 0x7f;
201 pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
202
203 /* Set Multi 3 to VCC3V# */
204 pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08);
205
206 /* Disable CLK_REQ# support after media DET */
207 ret = pci_read_config_byte(chip->pdev, O2_SD_CLKREQ, &scratch);
208 if (ret)
209 return ret;
210 scratch |= 0x20;
211 pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
212
213 /* Choose capabilities, enable SDMA. We have to write 0x01
214 * to the capabilities register first to unlock it.
215 */
216 ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
217 if (ret)
218 return ret;
219 scratch |= 0x01;
220 pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
221 pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
222
223 /* Disable ADMA1/2 */
224 pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39);
225 pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08);
226
227 /* Disable the infinite transfer mode */
228 ret = pci_read_config_byte(chip->pdev, O2_SD_INF_MOD, &scratch);
229 if (ret)
230 return ret;
231 scratch |= 0x08;
232 pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
233
234 /* Lock WP */
235 ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
236 if (ret)
237 return ret;
238 scratch |= 0x80;
239 pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
240 }
241
242 return 0;
243}
244
148static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) 245static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
149{ 246{
150 u8 scratch; 247 u8 scratch;
@@ -173,6 +270,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
173static int jmicron_probe(struct sdhci_pci_chip *chip) 270static int jmicron_probe(struct sdhci_pci_chip *chip)
174{ 271{
175 int ret; 272 int ret;
273 u16 mmcdev = 0;
176 274
177 if (chip->pdev->revision == 0) { 275 if (chip->pdev->revision == 0) {
178 chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR | 276 chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
@@ -194,12 +292,17 @@ static int jmicron_probe(struct sdhci_pci_chip *chip)
194 * 2. The MMC interface has a lower subfunction number 292 * 2. The MMC interface has a lower subfunction number
195 * than the SD interface. 293 * than the SD interface.
196 */ 294 */
197 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) { 295 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD)
296 mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC;
297 else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD)
298 mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD;
299
300 if (mmcdev) {
198 struct pci_dev *sd_dev; 301 struct pci_dev *sd_dev;
199 302
200 sd_dev = NULL; 303 sd_dev = NULL;
201 while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON, 304 while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
202 PCI_DEVICE_ID_JMICRON_JMB38X_MMC, sd_dev)) != NULL) { 305 mmcdev, sd_dev)) != NULL) {
203 if ((PCI_SLOT(chip->pdev->devfn) == 306 if ((PCI_SLOT(chip->pdev->devfn) ==
204 PCI_SLOT(sd_dev->devfn)) && 307 PCI_SLOT(sd_dev->devfn)) &&
205 (chip->pdev->bus == sd_dev->bus)) 308 (chip->pdev->bus == sd_dev->bus))
@@ -224,6 +327,11 @@ static int jmicron_probe(struct sdhci_pci_chip *chip)
224 return ret; 327 return ret;
225 } 328 }
226 329
330 /* quirk for unsable RO-detection on JM388 chips */
331 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD ||
332 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
333 chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT;
334
227 return 0; 335 return 0;
228} 336}
229 337
@@ -259,13 +367,25 @@ static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
259 slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; 367 slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
260 } 368 }
261 369
370 /* JM388 MMC doesn't support 1.8V while SD supports it */
371 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
372 slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 |
373 MMC_VDD_29_30 | MMC_VDD_30_31 |
374 MMC_VDD_165_195; /* allow 1.8V */
375 slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
376 MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */
377 }
378
262 /* 379 /*
263 * The secondary interface requires a bit set to get the 380 * The secondary interface requires a bit set to get the
264 * interrupts. 381 * interrupts.
265 */ 382 */
266 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) 383 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
384 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
267 jmicron_enable_mmc(slot->host, 1); 385 jmicron_enable_mmc(slot->host, 1);
268 386
387 slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
388
269 return 0; 389 return 0;
270} 390}
271 391
@@ -274,7 +394,8 @@ static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
274 if (dead) 394 if (dead)
275 return; 395 return;
276 396
277 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) 397 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
398 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
278 jmicron_enable_mmc(slot->host, 0); 399 jmicron_enable_mmc(slot->host, 0);
279} 400}
280 401
@@ -282,8 +403,9 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
282{ 403{
283 int i; 404 int i;
284 405
285 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) { 406 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
286 for (i = 0;i < chip->num_slots;i++) 407 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
408 for (i = 0; i < chip->num_slots; i++)
287 jmicron_enable_mmc(chip->slots[i]->host, 0); 409 jmicron_enable_mmc(chip->slots[i]->host, 0);
288 } 410 }
289 411
@@ -294,8 +416,9 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
294{ 416{
295 int ret, i; 417 int ret, i;
296 418
297 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) { 419 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
298 for (i = 0;i < chip->num_slots;i++) 420 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
421 for (i = 0; i < chip->num_slots; i++)
299 jmicron_enable_mmc(chip->slots[i]->host, 1); 422 jmicron_enable_mmc(chip->slots[i]->host, 1);
300 } 423 }
301 424
@@ -308,6 +431,10 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
308 return 0; 431 return 0;
309} 432}
310 433
434static const struct sdhci_pci_fixes sdhci_o2 = {
435 .probe = o2_probe,
436};
437
311static const struct sdhci_pci_fixes sdhci_jmicron = { 438static const struct sdhci_pci_fixes sdhci_jmicron = {
312 .probe = jmicron_probe, 439 .probe = jmicron_probe,
313 440
@@ -423,6 +550,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
423 }, 550 },
424 551
425 { 552 {
553 .vendor = PCI_VENDOR_ID_RICOH,
554 .device = 0xe823,
555 .subvendor = PCI_ANY_ID,
556 .subdevice = PCI_ANY_ID,
557 .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc,
558 },
559
560 {
426 .vendor = PCI_VENDOR_ID_ENE, 561 .vendor = PCI_VENDOR_ID_ENE,
427 .device = PCI_DEVICE_ID_ENE_CB712_SD, 562 .device = PCI_DEVICE_ID_ENE_CB712_SD,
428 .subvendor = PCI_ANY_ID, 563 .subvendor = PCI_ANY_ID,
@@ -479,6 +614,22 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
479 }, 614 },
480 615
481 { 616 {
617 .vendor = PCI_VENDOR_ID_JMICRON,
618 .device = PCI_DEVICE_ID_JMICRON_JMB388_SD,
619 .subvendor = PCI_ANY_ID,
620 .subdevice = PCI_ANY_ID,
621 .driver_data = (kernel_ulong_t)&sdhci_jmicron,
622 },
623
624 {
625 .vendor = PCI_VENDOR_ID_JMICRON,
626 .device = PCI_DEVICE_ID_JMICRON_JMB388_ESD,
627 .subvendor = PCI_ANY_ID,
628 .subdevice = PCI_ANY_ID,
629 .driver_data = (kernel_ulong_t)&sdhci_jmicron,
630 },
631
632 {
482 .vendor = PCI_VENDOR_ID_SYSKONNECT, 633 .vendor = PCI_VENDOR_ID_SYSKONNECT,
483 .device = 0x8000, 634 .device = 0x8000,
484 .subvendor = PCI_ANY_ID, 635 .subvendor = PCI_ANY_ID,
@@ -494,6 +645,110 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
494 .driver_data = (kernel_ulong_t)&sdhci_via, 645 .driver_data = (kernel_ulong_t)&sdhci_via,
495 }, 646 },
496 647
648 {
649 .vendor = PCI_VENDOR_ID_INTEL,
650 .device = PCI_DEVICE_ID_INTEL_MRST_SD0,
651 .subvendor = PCI_ANY_ID,
652 .subdevice = PCI_ANY_ID,
653 .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc0,
654 },
655
656 {
657 .vendor = PCI_VENDOR_ID_INTEL,
658 .device = PCI_DEVICE_ID_INTEL_MRST_SD1,
659 .subvendor = PCI_ANY_ID,
660 .subdevice = PCI_ANY_ID,
661 .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2,
662 },
663
664 {
665 .vendor = PCI_VENDOR_ID_INTEL,
666 .device = PCI_DEVICE_ID_INTEL_MRST_SD2,
667 .subvendor = PCI_ANY_ID,
668 .subdevice = PCI_ANY_ID,
669 .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2,
670 },
671
672 {
673 .vendor = PCI_VENDOR_ID_INTEL,
674 .device = PCI_DEVICE_ID_INTEL_MFD_SD,
675 .subvendor = PCI_ANY_ID,
676 .subdevice = PCI_ANY_ID,
677 .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sd,
678 },
679
680 {
681 .vendor = PCI_VENDOR_ID_INTEL,
682 .device = PCI_DEVICE_ID_INTEL_MFD_SDIO1,
683 .subvendor = PCI_ANY_ID,
684 .subdevice = PCI_ANY_ID,
685 .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
686 },
687
688 {
689 .vendor = PCI_VENDOR_ID_INTEL,
690 .device = PCI_DEVICE_ID_INTEL_MFD_SDIO2,
691 .subvendor = PCI_ANY_ID,
692 .subdevice = PCI_ANY_ID,
693 .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
694 },
695
696 {
697 .vendor = PCI_VENDOR_ID_INTEL,
698 .device = PCI_DEVICE_ID_INTEL_MFD_EMMC0,
699 .subvendor = PCI_ANY_ID,
700 .subdevice = PCI_ANY_ID,
701 .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
702 },
703
704 {
705 .vendor = PCI_VENDOR_ID_INTEL,
706 .device = PCI_DEVICE_ID_INTEL_MFD_EMMC1,
707 .subvendor = PCI_ANY_ID,
708 .subdevice = PCI_ANY_ID,
709 .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
710 },
711
712 {
713 .vendor = PCI_VENDOR_ID_O2,
714 .device = PCI_DEVICE_ID_O2_8120,
715 .subvendor = PCI_ANY_ID,
716 .subdevice = PCI_ANY_ID,
717 .driver_data = (kernel_ulong_t)&sdhci_o2,
718 },
719
720 {
721 .vendor = PCI_VENDOR_ID_O2,
722 .device = PCI_DEVICE_ID_O2_8220,
723 .subvendor = PCI_ANY_ID,
724 .subdevice = PCI_ANY_ID,
725 .driver_data = (kernel_ulong_t)&sdhci_o2,
726 },
727
728 {
729 .vendor = PCI_VENDOR_ID_O2,
730 .device = PCI_DEVICE_ID_O2_8221,
731 .subvendor = PCI_ANY_ID,
732 .subdevice = PCI_ANY_ID,
733 .driver_data = (kernel_ulong_t)&sdhci_o2,
734 },
735
736 {
737 .vendor = PCI_VENDOR_ID_O2,
738 .device = PCI_DEVICE_ID_O2_8320,
739 .subvendor = PCI_ANY_ID,
740 .subdevice = PCI_ANY_ID,
741 .driver_data = (kernel_ulong_t)&sdhci_o2,
742 },
743
744 {
745 .vendor = PCI_VENDOR_ID_O2,
746 .device = PCI_DEVICE_ID_O2_8321,
747 .subvendor = PCI_ANY_ID,
748 .subdevice = PCI_ANY_ID,
749 .driver_data = (kernel_ulong_t)&sdhci_o2,
750 },
751
497 { /* Generic SD host controller */ 752 { /* Generic SD host controller */
498 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) 753 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
499 }, 754 },
@@ -546,10 +801,11 @@ static struct sdhci_ops sdhci_pci_ops = {
546 801
547#ifdef CONFIG_PM 802#ifdef CONFIG_PM
548 803
549static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) 804static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
550{ 805{
551 struct sdhci_pci_chip *chip; 806 struct sdhci_pci_chip *chip;
552 struct sdhci_pci_slot *slot; 807 struct sdhci_pci_slot *slot;
808 mmc_pm_flag_t slot_pm_flags;
553 mmc_pm_flag_t pm_flags = 0; 809 mmc_pm_flag_t pm_flags = 0;
554 int i, ret; 810 int i, ret;
555 811
@@ -557,7 +813,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
557 if (!chip) 813 if (!chip)
558 return 0; 814 return 0;
559 815
560 for (i = 0;i < chip->num_slots;i++) { 816 for (i = 0; i < chip->num_slots; i++) {
561 slot = chip->slots[i]; 817 slot = chip->slots[i];
562 if (!slot) 818 if (!slot)
563 continue; 819 continue;
@@ -565,18 +821,22 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
565 ret = sdhci_suspend_host(slot->host, state); 821 ret = sdhci_suspend_host(slot->host, state);
566 822
567 if (ret) { 823 if (ret) {
568 for (i--;i >= 0;i--) 824 for (i--; i >= 0; i--)
569 sdhci_resume_host(chip->slots[i]->host); 825 sdhci_resume_host(chip->slots[i]->host);
570 return ret; 826 return ret;
571 } 827 }
572 828
573 pm_flags |= slot->host->mmc->pm_flags; 829 slot_pm_flags = slot->host->mmc->pm_flags;
830 if (slot_pm_flags & MMC_PM_WAKE_SDIO_IRQ)
831 sdhci_enable_irq_wakeups(slot->host);
832
833 pm_flags |= slot_pm_flags;
574 } 834 }
575 835
576 if (chip->fixes && chip->fixes->suspend) { 836 if (chip->fixes && chip->fixes->suspend) {
577 ret = chip->fixes->suspend(chip, state); 837 ret = chip->fixes->suspend(chip, state);
578 if (ret) { 838 if (ret) {
579 for (i = chip->num_slots - 1;i >= 0;i--) 839 for (i = chip->num_slots - 1; i >= 0; i--)
580 sdhci_resume_host(chip->slots[i]->host); 840 sdhci_resume_host(chip->slots[i]->host);
581 return ret; 841 return ret;
582 } 842 }
@@ -584,8 +844,10 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
584 844
585 pci_save_state(pdev); 845 pci_save_state(pdev);
586 if (pm_flags & MMC_PM_KEEP_POWER) { 846 if (pm_flags & MMC_PM_KEEP_POWER) {
587 if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) 847 if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) {
848 pci_pme_active(pdev, true);
588 pci_enable_wake(pdev, PCI_D3hot, 1); 849 pci_enable_wake(pdev, PCI_D3hot, 1);
850 }
589 pci_set_power_state(pdev, PCI_D3hot); 851 pci_set_power_state(pdev, PCI_D3hot);
590 } else { 852 } else {
591 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); 853 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
@@ -596,7 +858,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
596 return 0; 858 return 0;
597} 859}
598 860
599static int sdhci_pci_resume (struct pci_dev *pdev) 861static int sdhci_pci_resume(struct pci_dev *pdev)
600{ 862{
601 struct sdhci_pci_chip *chip; 863 struct sdhci_pci_chip *chip;
602 struct sdhci_pci_slot *slot; 864 struct sdhci_pci_slot *slot;
@@ -618,7 +880,7 @@ static int sdhci_pci_resume (struct pci_dev *pdev)
618 return ret; 880 return ret;
619 } 881 }
620 882
621 for (i = 0;i < chip->num_slots;i++) { 883 for (i = 0; i < chip->num_slots; i++) {
622 slot = chip->slots[i]; 884 slot = chip->slots[i];
623 if (!slot) 885 if (!slot)
624 continue; 886 continue;
@@ -649,9 +911,6 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
649{ 911{
650 struct sdhci_pci_slot *slot; 912 struct sdhci_pci_slot *slot;
651 struct sdhci_host *host; 913 struct sdhci_host *host;
652
653 resource_size_t addr;
654
655 int ret; 914 int ret;
656 915
657 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { 916 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
@@ -698,10 +957,10 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
698 goto free; 957 goto free;
699 } 958 }
700 959
701 addr = pci_resource_start(pdev, bar);
702 host->ioaddr = pci_ioremap_bar(pdev, bar); 960 host->ioaddr = pci_ioremap_bar(pdev, bar);
703 if (!host->ioaddr) { 961 if (!host->ioaddr) {
704 dev_err(&pdev->dev, "failed to remap registers\n"); 962 dev_err(&pdev->dev, "failed to remap registers\n");
963 ret = -ENOMEM;
705 goto release; 964 goto release;
706 } 965 }
707 966
@@ -761,16 +1020,14 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
761 struct sdhci_pci_chip *chip; 1020 struct sdhci_pci_chip *chip;
762 struct sdhci_pci_slot *slot; 1021 struct sdhci_pci_slot *slot;
763 1022
764 u8 slots, rev, first_bar; 1023 u8 slots, first_bar;
765 int ret, i; 1024 int ret, i;
766 1025
767 BUG_ON(pdev == NULL); 1026 BUG_ON(pdev == NULL);
768 BUG_ON(ent == NULL); 1027 BUG_ON(ent == NULL);
769 1028
770 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev);
771
772 dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", 1029 dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n",
773 (int)pdev->vendor, (int)pdev->device, (int)rev); 1030 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
774 1031
775 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); 1032 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
776 if (ret) 1033 if (ret)
@@ -805,7 +1062,7 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
805 } 1062 }
806 1063
807 chip->pdev = pdev; 1064 chip->pdev = pdev;
808 chip->fixes = (const struct sdhci_pci_fixes*)ent->driver_data; 1065 chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data;
809 if (chip->fixes) 1066 if (chip->fixes)
810 chip->quirks = chip->fixes->quirks; 1067 chip->quirks = chip->fixes->quirks;
811 chip->num_slots = slots; 1068 chip->num_slots = slots;
@@ -818,10 +1075,12 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
818 goto free; 1075 goto free;
819 } 1076 }
820 1077
821 for (i = 0;i < slots;i++) { 1078 slots = chip->num_slots; /* Quirk may have changed this */
1079
1080 for (i = 0; i < slots; i++) {
822 slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i); 1081 slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i);
823 if (IS_ERR(slot)) { 1082 if (IS_ERR(slot)) {
824 for (i--;i >= 0;i--) 1083 for (i--; i >= 0; i--)
825 sdhci_pci_remove_slot(chip->slots[i]); 1084 sdhci_pci_remove_slot(chip->slots[i]);
826 ret = PTR_ERR(slot); 1085 ret = PTR_ERR(slot);
827 goto free; 1086 goto free;
@@ -849,7 +1108,7 @@ static void __devexit sdhci_pci_remove(struct pci_dev *pdev)
849 chip = pci_get_drvdata(pdev); 1108 chip = pci_get_drvdata(pdev);
850 1109
851 if (chip) { 1110 if (chip) {
852 for (i = 0;i < chip->num_slots; i++) 1111 for (i = 0; i < chip->num_slots; i++)
853 sdhci_pci_remove_slot(chip->slots[i]); 1112 sdhci_pci_remove_slot(chip->slots[i]);
854 1113
855 pci_set_drvdata(pdev, NULL); 1114 pci_set_drvdata(pdev, NULL);
@@ -860,9 +1119,9 @@ static void __devexit sdhci_pci_remove(struct pci_dev *pdev)
860} 1119}
861 1120
862static struct pci_driver sdhci_driver = { 1121static struct pci_driver sdhci_driver = {
863 .name = "sdhci-pci", 1122 .name = "sdhci-pci",
864 .id_table = pci_ids, 1123 .id_table = pci_ids,
865 .probe = sdhci_pci_probe, 1124 .probe = sdhci_pci_probe,
866 .remove = __devexit_p(sdhci_pci_remove), 1125 .remove = __devexit_p(sdhci_pci_remove),
867 .suspend = sdhci_pci_suspend, 1126 .suspend = sdhci_pci_suspend,
868 .resume = sdhci_pci_resume, 1127 .resume = sdhci_pci_resume,
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index e045e3c61dde..dbab0407f4b6 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -30,7 +30,7 @@
30#include <linux/mmc/host.h> 30#include <linux/mmc/host.h>
31 31
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/sdhci-pltfm.h> 33#include <linux/mmc/sdhci-pltfm.h>
34 34
35#include "sdhci.h" 35#include "sdhci.h"
36#include "sdhci-pltfm.h" 36#include "sdhci-pltfm.h"
@@ -52,14 +52,17 @@ static struct sdhci_ops sdhci_pltfm_ops = {
52 52
53static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) 53static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
54{ 54{
55 struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
56 const struct platform_device_id *platid = platform_get_device_id(pdev); 55 const struct platform_device_id *platid = platform_get_device_id(pdev);
56 struct sdhci_pltfm_data *pdata;
57 struct sdhci_host *host; 57 struct sdhci_host *host;
58 struct sdhci_pltfm_host *pltfm_host;
58 struct resource *iomem; 59 struct resource *iomem;
59 int ret; 60 int ret;
60 61
61 if (!pdata && platid && platid->driver_data) 62 if (platid && platid->driver_data)
62 pdata = (void *)platid->driver_data; 63 pdata = (void *)platid->driver_data;
64 else
65 pdata = pdev->dev.platform_data;
63 66
64 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 67 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
65 if (!iomem) { 68 if (!iomem) {
@@ -71,16 +74,19 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
71 dev_err(&pdev->dev, "Invalid iomem size. You may " 74 dev_err(&pdev->dev, "Invalid iomem size. You may "
72 "experience problems.\n"); 75 "experience problems.\n");
73 76
74 if (pdev->dev.parent) 77 /* Some PCI-based MFD need the parent here */
75 host = sdhci_alloc_host(pdev->dev.parent, 0); 78 if (pdev->dev.parent != &platform_bus)
79 host = sdhci_alloc_host(pdev->dev.parent, sizeof(*pltfm_host));
76 else 80 else
77 host = sdhci_alloc_host(&pdev->dev, 0); 81 host = sdhci_alloc_host(&pdev->dev, sizeof(*pltfm_host));
78 82
79 if (IS_ERR(host)) { 83 if (IS_ERR(host)) {
80 ret = PTR_ERR(host); 84 ret = PTR_ERR(host);
81 goto err; 85 goto err;
82 } 86 }
83 87
88 pltfm_host = sdhci_priv(host);
89
84 host->hw_name = "platform"; 90 host->hw_name = "platform";
85 if (pdata && pdata->ops) 91 if (pdata && pdata->ops)
86 host->ops = pdata->ops; 92 host->ops = pdata->ops;
@@ -105,7 +111,7 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
105 } 111 }
106 112
107 if (pdata && pdata->init) { 113 if (pdata && pdata->init) {
108 ret = pdata->init(host); 114 ret = pdata->init(host, pdata);
109 if (ret) 115 if (ret)
110 goto err_plat_init; 116 goto err_plat_init;
111 } 117 }
@@ -161,10 +167,38 @@ static const struct platform_device_id sdhci_pltfm_ids[] = {
161#ifdef CONFIG_MMC_SDHCI_CNS3XXX 167#ifdef CONFIG_MMC_SDHCI_CNS3XXX
162 { "sdhci-cns3xxx", (kernel_ulong_t)&sdhci_cns3xxx_pdata }, 168 { "sdhci-cns3xxx", (kernel_ulong_t)&sdhci_cns3xxx_pdata },
163#endif 169#endif
170#ifdef CONFIG_MMC_SDHCI_ESDHC_IMX
171 { "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata },
172#endif
173#ifdef CONFIG_MMC_SDHCI_DOVE
174 { "sdhci-dove", (kernel_ulong_t)&sdhci_dove_pdata },
175#endif
176#ifdef CONFIG_MMC_SDHCI_TEGRA
177 { "sdhci-tegra", (kernel_ulong_t)&sdhci_tegra_pdata },
178#endif
164 { }, 179 { },
165}; 180};
166MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids); 181MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids);
167 182
183#ifdef CONFIG_PM
184static int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state)
185{
186 struct sdhci_host *host = platform_get_drvdata(dev);
187
188 return sdhci_suspend_host(host, state);
189}
190
191static int sdhci_pltfm_resume(struct platform_device *dev)
192{
193 struct sdhci_host *host = platform_get_drvdata(dev);
194
195 return sdhci_resume_host(host);
196}
197#else
198#define sdhci_pltfm_suspend NULL
199#define sdhci_pltfm_resume NULL
200#endif /* CONFIG_PM */
201
168static struct platform_driver sdhci_pltfm_driver = { 202static struct platform_driver sdhci_pltfm_driver = {
169 .driver = { 203 .driver = {
170 .name = "sdhci", 204 .name = "sdhci",
@@ -173,6 +207,8 @@ static struct platform_driver sdhci_pltfm_driver = {
173 .probe = sdhci_pltfm_probe, 207 .probe = sdhci_pltfm_probe,
174 .remove = __devexit_p(sdhci_pltfm_remove), 208 .remove = __devexit_p(sdhci_pltfm_remove),
175 .id_table = sdhci_pltfm_ids, 209 .id_table = sdhci_pltfm_ids,
210 .suspend = sdhci_pltfm_suspend,
211 .resume = sdhci_pltfm_resume,
176}; 212};
177 213
178/*****************************************************************************\ 214/*****************************************************************************\
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 900f32902f73..2b37016ad0ac 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -11,8 +11,18 @@
11#ifndef _DRIVERS_MMC_SDHCI_PLTFM_H 11#ifndef _DRIVERS_MMC_SDHCI_PLTFM_H
12#define _DRIVERS_MMC_SDHCI_PLTFM_H 12#define _DRIVERS_MMC_SDHCI_PLTFM_H
13 13
14#include <linux/sdhci-pltfm.h> 14#include <linux/clk.h>
15#include <linux/types.h>
16#include <linux/mmc/sdhci-pltfm.h>
17
18struct sdhci_pltfm_host {
19 struct clk *clk;
20 void *priv; /* to handle quirks across io-accessor calls */
21};
15 22
16extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata; 23extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata;
24extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata;
25extern struct sdhci_pltfm_data sdhci_dove_pdata;
26extern struct sdhci_pltfm_data sdhci_tegra_pdata;
17 27
18#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */ 28#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-pxa.c b/drivers/mmc/host/sdhci-pxa.c
new file mode 100644
index 000000000000..089c9a68b7b1
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pxa.c
@@ -0,0 +1,303 @@
1/* linux/drivers/mmc/host/sdhci-pxa.c
2 *
3 * Copyright (C) 2010 Marvell International Ltd.
4 * Zhangfei Gao <zhangfei.gao@marvell.com>
5 * Kevin Wang <dwang4@marvell.com>
6 * Mingwei Wang <mwwang@marvell.com>
7 * Philip Rakity <prakity@marvell.com>
8 * Mark Brown <markb@marvell.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15/* Supports:
16 * SDHCI support for MMP2/PXA910/PXA168
17 *
18 * Refer to sdhci-s3c.c.
19 */
20
21#include <linux/delay.h>
22#include <linux/platform_device.h>
23#include <linux/mmc/host.h>
24#include <linux/clk.h>
25#include <linux/io.h>
26#include <linux/err.h>
27#include <plat/sdhci.h>
28#include "sdhci.h"
29
30#define DRIVER_NAME "sdhci-pxa"
31
32#define SD_FIFO_PARAM 0x104
33#define DIS_PAD_SD_CLK_GATE 0x400
34
35struct sdhci_pxa {
36 struct sdhci_host *host;
37 struct sdhci_pxa_platdata *pdata;
38 struct clk *clk;
39 struct resource *res;
40
41 u8 clk_enable;
42};
43
44/*****************************************************************************\
45 * *
46 * SDHCI core callbacks *
47 * *
48\*****************************************************************************/
49static void set_clock(struct sdhci_host *host, unsigned int clock)
50{
51 struct sdhci_pxa *pxa = sdhci_priv(host);
52 u32 tmp = 0;
53
54 if (clock == 0) {
55 if (pxa->clk_enable) {
56 clk_disable(pxa->clk);
57 pxa->clk_enable = 0;
58 }
59 } else {
60 if (0 == pxa->clk_enable) {
61 if (pxa->pdata->flags & PXA_FLAG_DISABLE_CLOCK_GATING) {
62 tmp = readl(host->ioaddr + SD_FIFO_PARAM);
63 tmp |= DIS_PAD_SD_CLK_GATE;
64 writel(tmp, host->ioaddr + SD_FIFO_PARAM);
65 }
66 clk_enable(pxa->clk);
67 pxa->clk_enable = 1;
68 }
69 }
70}
71
72static int set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
73{
74 u16 ctrl_2;
75
76 /*
77 * Set V18_EN -- UHS modes do not work without this.
78 * does not change signaling voltage
79 */
80 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
81
82 /* Select Bus Speed Mode for host */
83 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
84 switch (uhs) {
85 case MMC_TIMING_UHS_SDR12:
86 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
87 break;
88 case MMC_TIMING_UHS_SDR25:
89 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
90 break;
91 case MMC_TIMING_UHS_SDR50:
92 ctrl_2 |= SDHCI_CTRL_UHS_SDR50 | SDHCI_CTRL_VDD_180;
93 break;
94 case MMC_TIMING_UHS_SDR104:
95 ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180;
96 break;
97 case MMC_TIMING_UHS_DDR50:
98 ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180;
99 break;
100 }
101
102 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
103 pr_debug("%s:%s uhs = %d, ctrl_2 = %04X\n",
104 __func__, mmc_hostname(host->mmc), uhs, ctrl_2);
105
106 return 0;
107}
108
109static struct sdhci_ops sdhci_pxa_ops = {
110 .set_uhs_signaling = set_uhs_signaling,
111 .set_clock = set_clock,
112};
113
114/*****************************************************************************\
115 * *
116 * Device probing/removal *
117 * *
118\*****************************************************************************/
119
120static int __devinit sdhci_pxa_probe(struct platform_device *pdev)
121{
122 struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
123 struct device *dev = &pdev->dev;
124 struct sdhci_host *host = NULL;
125 struct resource *iomem = NULL;
126 struct sdhci_pxa *pxa = NULL;
127 int ret, irq;
128
129 irq = platform_get_irq(pdev, 0);
130 if (irq < 0) {
131 dev_err(dev, "no irq specified\n");
132 return irq;
133 }
134
135 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
136 if (!iomem) {
137 dev_err(dev, "no memory specified\n");
138 return -ENOENT;
139 }
140
141 host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pxa));
142 if (IS_ERR(host)) {
143 dev_err(dev, "failed to alloc host\n");
144 return PTR_ERR(host);
145 }
146
147 pxa = sdhci_priv(host);
148 pxa->host = host;
149 pxa->pdata = pdata;
150 pxa->clk_enable = 0;
151
152 pxa->clk = clk_get(dev, "PXA-SDHCLK");
153 if (IS_ERR(pxa->clk)) {
154 dev_err(dev, "failed to get io clock\n");
155 ret = PTR_ERR(pxa->clk);
156 goto out;
157 }
158
159 pxa->res = request_mem_region(iomem->start, resource_size(iomem),
160 mmc_hostname(host->mmc));
161 if (!pxa->res) {
162 dev_err(&pdev->dev, "cannot request region\n");
163 ret = -EBUSY;
164 goto out;
165 }
166
167 host->ioaddr = ioremap(iomem->start, resource_size(iomem));
168 if (!host->ioaddr) {
169 dev_err(&pdev->dev, "failed to remap registers\n");
170 ret = -ENOMEM;
171 goto out;
172 }
173
174 host->hw_name = "MMC";
175 host->ops = &sdhci_pxa_ops;
176 host->irq = irq;
177 host->quirks = SDHCI_QUIRK_BROKEN_ADMA
178 | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
179 | SDHCI_QUIRK_32BIT_DMA_ADDR
180 | SDHCI_QUIRK_32BIT_DMA_SIZE
181 | SDHCI_QUIRK_32BIT_ADMA_SIZE
182 | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
183
184 if (pdata->quirks)
185 host->quirks |= pdata->quirks;
186
187 /* enable 1/8V DDR capable */
188 host->mmc->caps |= MMC_CAP_1_8V_DDR;
189
190 /* If slot design supports 8 bit data, indicate this to MMC. */
191 if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
192 host->mmc->caps |= MMC_CAP_8_BIT_DATA;
193
194 ret = sdhci_add_host(host);
195 if (ret) {
196 dev_err(&pdev->dev, "failed to add host\n");
197 goto out;
198 }
199
200 if (pxa->pdata->max_speed)
201 host->mmc->f_max = pxa->pdata->max_speed;
202
203 platform_set_drvdata(pdev, host);
204
205 return 0;
206out:
207 if (host) {
208 clk_put(pxa->clk);
209 if (host->ioaddr)
210 iounmap(host->ioaddr);
211 if (pxa->res)
212 release_mem_region(pxa->res->start,
213 resource_size(pxa->res));
214 sdhci_free_host(host);
215 }
216
217 return ret;
218}
219
220static int __devexit sdhci_pxa_remove(struct platform_device *pdev)
221{
222 struct sdhci_host *host = platform_get_drvdata(pdev);
223 struct sdhci_pxa *pxa = sdhci_priv(host);
224 int dead = 0;
225 u32 scratch;
226
227 if (host) {
228 scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
229 if (scratch == (u32)-1)
230 dead = 1;
231
232 sdhci_remove_host(host, dead);
233
234 if (host->ioaddr)
235 iounmap(host->ioaddr);
236 if (pxa->res)
237 release_mem_region(pxa->res->start,
238 resource_size(pxa->res));
239 if (pxa->clk_enable) {
240 clk_disable(pxa->clk);
241 pxa->clk_enable = 0;
242 }
243 clk_put(pxa->clk);
244
245 sdhci_free_host(host);
246 platform_set_drvdata(pdev, NULL);
247 }
248
249 return 0;
250}
251
252#ifdef CONFIG_PM
253static int sdhci_pxa_suspend(struct platform_device *dev, pm_message_t state)
254{
255 struct sdhci_host *host = platform_get_drvdata(dev);
256
257 return sdhci_suspend_host(host, state);
258}
259
260static int sdhci_pxa_resume(struct platform_device *dev)
261{
262 struct sdhci_host *host = platform_get_drvdata(dev);
263
264 return sdhci_resume_host(host);
265}
266#else
267#define sdhci_pxa_suspend NULL
268#define sdhci_pxa_resume NULL
269#endif
270
271static struct platform_driver sdhci_pxa_driver = {
272 .probe = sdhci_pxa_probe,
273 .remove = __devexit_p(sdhci_pxa_remove),
274 .suspend = sdhci_pxa_suspend,
275 .resume = sdhci_pxa_resume,
276 .driver = {
277 .name = DRIVER_NAME,
278 .owner = THIS_MODULE,
279 },
280};
281
282/*****************************************************************************\
283 * *
284 * Driver init/exit *
285 * *
286\*****************************************************************************/
287
288static int __init sdhci_pxa_init(void)
289{
290 return platform_driver_register(&sdhci_pxa_driver);
291}
292
293static void __exit sdhci_pxa_exit(void)
294{
295 platform_driver_unregister(&sdhci_pxa_driver);
296}
297
298module_init(sdhci_pxa_init);
299module_exit(sdhci_pxa_exit);
300
301MODULE_DESCRIPTION("SDH controller driver for PXA168/PXA910/MMP2");
302MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>");
303MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index aacb862ecc8a..69e3ee321eb5 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -130,6 +130,15 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
130 if (!clksrc) 130 if (!clksrc)
131 return UINT_MAX; 131 return UINT_MAX;
132 132
133 /*
134 * Clock divider's step is different as 1 from that of host controller
135 * when 'clk_type' is S3C_SDHCI_CLK_DIV_EXTERNAL.
136 */
137 if (ourhost->pdata->clk_type) {
138 rate = clk_round_rate(clksrc, wanted);
139 return wanted - rate;
140 }
141
133 rate = clk_get_rate(clksrc); 142 rate = clk_get_rate(clksrc);
134 143
135 for (div = 1; div < 256; div *= 2) { 144 for (div = 1; div < 256; div *= 2) {
@@ -232,10 +241,79 @@ static unsigned int sdhci_s3c_get_min_clock(struct sdhci_host *host)
232 return min; 241 return min;
233} 242}
234 243
244/* sdhci_cmu_get_max_clk - callback to get maximum clock frequency.*/
245static unsigned int sdhci_cmu_get_max_clock(struct sdhci_host *host)
246{
247 struct sdhci_s3c *ourhost = to_s3c(host);
248
249 return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], UINT_MAX);
250}
251
252/* sdhci_cmu_get_min_clock - callback to get minimal supported clock value. */
253static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host)
254{
255 struct sdhci_s3c *ourhost = to_s3c(host);
256
257 /*
258 * initial clock can be in the frequency range of
259 * 100KHz-400KHz, so we set it as max value.
260 */
261 return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], 400000);
262}
263
264/* sdhci_cmu_set_clock - callback on clock change.*/
265static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
266{
267 struct sdhci_s3c *ourhost = to_s3c(host);
268
269 /* don't bother if the clock is going off */
270 if (clock == 0)
271 return;
272
273 sdhci_s3c_set_clock(host, clock);
274
275 clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
276
277 host->clock = clock;
278}
279
280/**
281 * sdhci_s3c_platform_8bit_width - support 8bit buswidth
282 * @host: The SDHCI host being queried
283 * @width: MMC_BUS_WIDTH_ macro for the bus width being requested
284 *
285 * We have 8-bit width support but is not a v3 controller.
286 * So we add platform_8bit_width() and support 8bit width.
287 */
288static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
289{
290 u8 ctrl;
291
292 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
293
294 switch (width) {
295 case MMC_BUS_WIDTH_8:
296 ctrl |= SDHCI_CTRL_8BITBUS;
297 ctrl &= ~SDHCI_CTRL_4BITBUS;
298 break;
299 case MMC_BUS_WIDTH_4:
300 ctrl |= SDHCI_CTRL_4BITBUS;
301 ctrl &= ~SDHCI_CTRL_8BITBUS;
302 break;
303 default:
304 break;
305 }
306
307 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
308
309 return 0;
310}
311
235static struct sdhci_ops sdhci_s3c_ops = { 312static struct sdhci_ops sdhci_s3c_ops = {
236 .get_max_clock = sdhci_s3c_get_max_clk, 313 .get_max_clock = sdhci_s3c_get_max_clk,
237 .set_clock = sdhci_s3c_set_clock, 314 .set_clock = sdhci_s3c_set_clock,
238 .get_min_clock = sdhci_s3c_get_min_clock, 315 .get_min_clock = sdhci_s3c_get_min_clock,
316 .platform_8bit_width = sdhci_s3c_platform_8bit_width,
239}; 317};
240 318
241static void sdhci_s3c_notify_change(struct platform_device *dev, int state) 319static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
@@ -361,6 +439,13 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
361 439
362 clks++; 440 clks++;
363 sc->clk_bus[ptr] = clk; 441 sc->clk_bus[ptr] = clk;
442
443 /*
444 * save current clock index to know which clock bus
445 * is used later in overriding functions.
446 */
447 sc->cur_clk = ptr;
448
364 clk_enable(clk); 449 clk_enable(clk);
365 450
366 dev_info(dev, "clock source %d: %s (%ld Hz)\n", 451 dev_info(dev, "clock source %d: %s (%ld Hz)\n",
@@ -414,6 +499,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
414 * SDHCI block, or a missing configuration that needs to be set. */ 499 * SDHCI block, or a missing configuration that needs to be set. */
415 host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ; 500 host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;
416 501
502 /* This host supports the Auto CMD12 */
503 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
504
417 if (pdata->cd_type == S3C_SDHCI_CD_NONE || 505 if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
418 pdata->cd_type == S3C_SDHCI_CD_PERMANENT) 506 pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
419 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; 507 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
@@ -421,12 +509,29 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
421 if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT) 509 if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
422 host->mmc->caps = MMC_CAP_NONREMOVABLE; 510 host->mmc->caps = MMC_CAP_NONREMOVABLE;
423 511
512 if (pdata->host_caps)
513 host->mmc->caps |= pdata->host_caps;
514
424 host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR | 515 host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
425 SDHCI_QUIRK_32BIT_DMA_SIZE); 516 SDHCI_QUIRK_32BIT_DMA_SIZE);
426 517
427 /* HSMMC on Samsung SoCs uses SDCLK as timeout clock */ 518 /* HSMMC on Samsung SoCs uses SDCLK as timeout clock */
428 host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; 519 host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
429 520
521 /*
522 * If controller does not have internal clock divider,
523 * we can use overriding functions instead of default.
524 */
525 if (pdata->clk_type) {
526 sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
527 sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
528 sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
529 }
530
531 /* It supports additional host capabilities if needed */
532 if (pdata->host_caps)
533 host->mmc->caps |= pdata->host_caps;
534
430 ret = sdhci_add_host(host); 535 ret = sdhci_add_host(host);
431 if (ret) { 536 if (ret) {
432 dev_err(dev, "sdhci_add_host() failed\n"); 537 dev_err(dev, "sdhci_add_host() failed\n");
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index d70c54c7b70a..60a4c97d3d18 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -50,7 +50,7 @@ static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id)
50 /* val == 1 -> card removed, val == 0 -> card inserted */ 50 /* val == 1 -> card removed, val == 0 -> card inserted */
51 /* if card removed - set irq for low level, else vice versa */ 51 /* if card removed - set irq for low level, else vice versa */
52 gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH; 52 gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH;
53 set_irq_type(irq, gpio_irq_type); 53 irq_set_irq_type(irq, gpio_irq_type);
54 54
55 if (sdhci->data->card_power_gpio >= 0) { 55 if (sdhci->data->card_power_gpio >= 0) {
56 if (!sdhci->data->power_always_enb) { 56 if (!sdhci->data->power_always_enb) {
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
new file mode 100644
index 000000000000..343c97edba32
--- /dev/null
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -0,0 +1,263 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/err.h>
16#include <linux/init.h>
17#include <linux/platform_device.h>
18#include <linux/clk.h>
19#include <linux/io.h>
20#include <linux/gpio.h>
21#include <linux/mmc/card.h>
22#include <linux/mmc/host.h>
23
24#include <mach/gpio.h>
25#include <mach/sdhci.h>
26
27#include "sdhci.h"
28#include "sdhci-pltfm.h"
29
30static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
31{
32 u32 val;
33
34 if (unlikely(reg == SDHCI_PRESENT_STATE)) {
35 /* Use wp_gpio here instead? */
36 val = readl(host->ioaddr + reg);
37 return val | SDHCI_WRITE_PROTECT;
38 }
39
40 return readl(host->ioaddr + reg);
41}
42
43static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
44{
45 if (unlikely(reg == SDHCI_HOST_VERSION)) {
46 /* Erratum: Version register is invalid in HW. */
47 return SDHCI_SPEC_200;
48 }
49
50 return readw(host->ioaddr + reg);
51}
52
53static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
54{
55 /* Seems like we're getting spurious timeout and crc errors, so
56 * disable signalling of them. In case of real errors software
57 * timers should take care of eventually detecting them.
58 */
59 if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
60 val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
61
62 writel(val, host->ioaddr + reg);
63
64 if (unlikely(reg == SDHCI_INT_ENABLE)) {
65 /* Erratum: Must enable block gap interrupt detection */
66 u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
67 if (val & SDHCI_INT_CARD_INT)
68 gap_ctrl |= 0x8;
69 else
70 gap_ctrl &= ~0x8;
71 writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
72 }
73}
74
75static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
76{
77 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
78 struct tegra_sdhci_platform_data *plat;
79
80 plat = pdev->dev.platform_data;
81
82 if (!gpio_is_valid(plat->wp_gpio))
83 return -1;
84
85 return gpio_get_value(plat->wp_gpio);
86}
87
88static irqreturn_t carddetect_irq(int irq, void *data)
89{
90 struct sdhci_host *sdhost = (struct sdhci_host *)data;
91
92 tasklet_schedule(&sdhost->card_tasklet);
93 return IRQ_HANDLED;
94};
95
96static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
97{
98 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
99 struct tegra_sdhci_platform_data *plat;
100 u32 ctrl;
101
102 plat = pdev->dev.platform_data;
103
104 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
105 if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
106 ctrl &= ~SDHCI_CTRL_4BITBUS;
107 ctrl |= SDHCI_CTRL_8BITBUS;
108 } else {
109 ctrl &= ~SDHCI_CTRL_8BITBUS;
110 if (bus_width == MMC_BUS_WIDTH_4)
111 ctrl |= SDHCI_CTRL_4BITBUS;
112 else
113 ctrl &= ~SDHCI_CTRL_4BITBUS;
114 }
115 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
116 return 0;
117}
118
119
120static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
121 struct sdhci_pltfm_data *pdata)
122{
123 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
124 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
125 struct tegra_sdhci_platform_data *plat;
126 struct clk *clk;
127 int rc;
128
129 plat = pdev->dev.platform_data;
130 if (plat == NULL) {
131 dev_err(mmc_dev(host->mmc), "missing platform data\n");
132 return -ENXIO;
133 }
134
135 if (gpio_is_valid(plat->power_gpio)) {
136 rc = gpio_request(plat->power_gpio, "sdhci_power");
137 if (rc) {
138 dev_err(mmc_dev(host->mmc),
139 "failed to allocate power gpio\n");
140 goto out;
141 }
142 tegra_gpio_enable(plat->power_gpio);
143 gpio_direction_output(plat->power_gpio, 1);
144 }
145
146 if (gpio_is_valid(plat->cd_gpio)) {
147 rc = gpio_request(plat->cd_gpio, "sdhci_cd");
148 if (rc) {
149 dev_err(mmc_dev(host->mmc),
150 "failed to allocate cd gpio\n");
151 goto out_power;
152 }
153 tegra_gpio_enable(plat->cd_gpio);
154 gpio_direction_input(plat->cd_gpio);
155
156 rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq,
157 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
158 mmc_hostname(host->mmc), host);
159
160 if (rc) {
161 dev_err(mmc_dev(host->mmc), "request irq error\n");
162 goto out_cd;
163 }
164
165 }
166
167 if (gpio_is_valid(plat->wp_gpio)) {
168 rc = gpio_request(plat->wp_gpio, "sdhci_wp");
169 if (rc) {
170 dev_err(mmc_dev(host->mmc),
171 "failed to allocate wp gpio\n");
172 goto out_irq;
173 }
174 tegra_gpio_enable(plat->wp_gpio);
175 gpio_direction_input(plat->wp_gpio);
176 }
177
178 clk = clk_get(mmc_dev(host->mmc), NULL);
179 if (IS_ERR(clk)) {
180 dev_err(mmc_dev(host->mmc), "clk err\n");
181 rc = PTR_ERR(clk);
182 goto out_wp;
183 }
184 clk_enable(clk);
185 pltfm_host->clk = clk;
186
187 host->mmc->pm_caps = plat->pm_flags;
188
189 if (plat->is_8bit)
190 host->mmc->caps |= MMC_CAP_8_BIT_DATA;
191
192 return 0;
193
194out_wp:
195 if (gpio_is_valid(plat->wp_gpio)) {
196 tegra_gpio_disable(plat->wp_gpio);
197 gpio_free(plat->wp_gpio);
198 }
199
200out_irq:
201 if (gpio_is_valid(plat->cd_gpio))
202 free_irq(gpio_to_irq(plat->cd_gpio), host);
203out_cd:
204 if (gpio_is_valid(plat->cd_gpio)) {
205 tegra_gpio_disable(plat->cd_gpio);
206 gpio_free(plat->cd_gpio);
207 }
208
209out_power:
210 if (gpio_is_valid(plat->power_gpio)) {
211 tegra_gpio_disable(plat->power_gpio);
212 gpio_free(plat->power_gpio);
213 }
214
215out:
216 return rc;
217}
218
219static void tegra_sdhci_pltfm_exit(struct sdhci_host *host)
220{
221 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
222 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
223 struct tegra_sdhci_platform_data *plat;
224
225 plat = pdev->dev.platform_data;
226
227 if (gpio_is_valid(plat->wp_gpio)) {
228 tegra_gpio_disable(plat->wp_gpio);
229 gpio_free(plat->wp_gpio);
230 }
231
232 if (gpio_is_valid(plat->cd_gpio)) {
233 free_irq(gpio_to_irq(plat->cd_gpio), host);
234 tegra_gpio_disable(plat->cd_gpio);
235 gpio_free(plat->cd_gpio);
236 }
237
238 if (gpio_is_valid(plat->power_gpio)) {
239 tegra_gpio_disable(plat->power_gpio);
240 gpio_free(plat->power_gpio);
241 }
242
243 clk_disable(pltfm_host->clk);
244 clk_put(pltfm_host->clk);
245}
246
247static struct sdhci_ops tegra_sdhci_ops = {
248 .get_ro = tegra_sdhci_get_ro,
249 .read_l = tegra_sdhci_readl,
250 .read_w = tegra_sdhci_readw,
251 .write_l = tegra_sdhci_writel,
252 .platform_8bit_width = tegra_sdhci_8bit,
253};
254
255struct sdhci_pltfm_data sdhci_tegra_pdata = {
256 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
257 SDHCI_QUIRK_SINGLE_POWER_WRITE |
258 SDHCI_QUIRK_NO_HISPD_BIT |
259 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
260 .ops = &tegra_sdhci_ops,
261 .init = tegra_sdhci_pltfm_init,
262 .exit = tegra_sdhci_pltfm_exit,
263};
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 401527d273b5..58d5436ff649 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/leds.h> 24#include <linux/leds.h>
25 25
26#include <linux/mmc/mmc.h>
26#include <linux/mmc/host.h> 27#include <linux/mmc/host.h>
27 28
28#include "sdhci.h" 29#include "sdhci.h"
@@ -37,17 +38,21 @@
37#define SDHCI_USE_LEDS_CLASS 38#define SDHCI_USE_LEDS_CLASS
38#endif 39#endif
39 40
41#define MAX_TUNING_LOOP 40
42
40static unsigned int debug_quirks = 0; 43static unsigned int debug_quirks = 0;
41 44
42static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
43static void sdhci_finish_data(struct sdhci_host *); 45static void sdhci_finish_data(struct sdhci_host *);
44 46
45static void sdhci_send_command(struct sdhci_host *, struct mmc_command *); 47static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
46static void sdhci_finish_command(struct sdhci_host *); 48static void sdhci_finish_command(struct sdhci_host *);
49static int sdhci_execute_tuning(struct mmc_host *mmc);
50static void sdhci_tuning_timer(unsigned long data);
47 51
48static void sdhci_dumpregs(struct sdhci_host *host) 52static void sdhci_dumpregs(struct sdhci_host *host)
49{ 53{
50 printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n"); 54 printk(KERN_DEBUG DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
55 mmc_hostname(host->mmc));
51 56
52 printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n", 57 printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
53 sdhci_readl(host, SDHCI_DMA_ADDRESS), 58 sdhci_readl(host, SDHCI_DMA_ADDRESS),
@@ -76,9 +81,14 @@ static void sdhci_dumpregs(struct sdhci_host *host)
76 printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", 81 printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
77 sdhci_readw(host, SDHCI_ACMD12_ERR), 82 sdhci_readw(host, SDHCI_ACMD12_ERR),
78 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 83 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
79 printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Max curr: 0x%08x\n", 84 printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
80 sdhci_readl(host, SDHCI_CAPABILITIES), 85 sdhci_readl(host, SDHCI_CAPABILITIES),
86 sdhci_readl(host, SDHCI_CAPABILITIES_1));
87 printk(KERN_DEBUG DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
88 sdhci_readw(host, SDHCI_COMMAND),
81 sdhci_readl(host, SDHCI_MAX_CURRENT)); 89 sdhci_readl(host, SDHCI_MAX_CURRENT));
90 printk(KERN_DEBUG DRIVER_NAME ": Host ctl2: 0x%08x\n",
91 sdhci_readw(host, SDHCI_HOST_CONTROL2));
82 92
83 if (host->flags & SDHCI_USE_ADMA) 93 if (host->flags & SDHCI_USE_ADMA)
84 printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 94 printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
@@ -152,6 +162,9 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
152 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) 162 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
153 ier = sdhci_readl(host, SDHCI_INT_ENABLE); 163 ier = sdhci_readl(host, SDHCI_INT_ENABLE);
154 164
165 if (host->ops->platform_reset_enter)
166 host->ops->platform_reset_enter(host, mask);
167
155 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 168 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
156 169
157 if (mask & SDHCI_RESET_ALL) 170 if (mask & SDHCI_RESET_ALL)
@@ -172,6 +185,9 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
172 mdelay(1); 185 mdelay(1);
173 } 186 }
174 187
188 if (host->ops->platform_reset_exit)
189 host->ops->platform_reset_exit(host, mask);
190
175 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) 191 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
176 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier); 192 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
177} 193}
@@ -586,9 +602,10 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
586 data->sg_len, direction); 602 data->sg_len, direction);
587} 603}
588 604
589static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) 605static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
590{ 606{
591 u8 count; 607 u8 count;
608 struct mmc_data *data = cmd->data;
592 unsigned target_timeout, current_timeout; 609 unsigned target_timeout, current_timeout;
593 610
594 /* 611 /*
@@ -600,9 +617,16 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
600 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 617 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
601 return 0xE; 618 return 0xE;
602 619
620 /* Unspecified timeout, assume max */
621 if (!data && !cmd->cmd_timeout_ms)
622 return 0xE;
623
603 /* timeout in us */ 624 /* timeout in us */
604 target_timeout = data->timeout_ns / 1000 + 625 if (!data)
605 data->timeout_clks / host->clock; 626 target_timeout = cmd->cmd_timeout_ms * 1000;
627 else
628 target_timeout = data->timeout_ns / 1000 +
629 data->timeout_clks / host->clock;
606 630
607 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) 631 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
608 host->timeout_clk = host->clock / 1000; 632 host->timeout_clk = host->clock / 1000;
@@ -617,6 +641,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
617 * => 641 * =>
618 * (1) / (2) > 2^6 642 * (1) / (2) > 2^6
619 */ 643 */
644 BUG_ON(!host->timeout_clk);
620 count = 0; 645 count = 0;
621 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 646 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
622 while (current_timeout < target_timeout) { 647 while (current_timeout < target_timeout) {
@@ -627,8 +652,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
627 } 652 }
628 653
629 if (count >= 0xF) { 654 if (count >= 0xF) {
630 printk(KERN_WARNING "%s: Too large timeout requested!\n", 655 printk(KERN_WARNING "%s: Too large timeout requested for CMD%d!\n",
631 mmc_hostname(host->mmc)); 656 mmc_hostname(host->mmc), cmd->opcode);
632 count = 0xE; 657 count = 0xE;
633 } 658 }
634 659
@@ -646,15 +671,21 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
646 sdhci_clear_set_irqs(host, dma_irqs, pio_irqs); 671 sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
647} 672}
648 673
649static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) 674static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
650{ 675{
651 u8 count; 676 u8 count;
652 u8 ctrl; 677 u8 ctrl;
678 struct mmc_data *data = cmd->data;
653 int ret; 679 int ret;
654 680
655 WARN_ON(host->data); 681 WARN_ON(host->data);
656 682
657 if (data == NULL) 683 if (data || (cmd->flags & MMC_RSP_BUSY)) {
684 count = sdhci_calc_timeout(host, cmd);
685 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
686 }
687
688 if (!data)
658 return; 689 return;
659 690
660 /* Sanity checks */ 691 /* Sanity checks */
@@ -664,9 +695,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
664 695
665 host->data = data; 696 host->data = data;
666 host->data_early = 0; 697 host->data_early = 0;
667 698 host->data->bytes_xfered = 0;
668 count = sdhci_calc_timeout(host, data);
669 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
670 699
671 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) 700 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
672 host->flags |= SDHCI_REQ_USE_DMA; 701 host->flags |= SDHCI_REQ_USE_DMA;
@@ -802,15 +831,17 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
802 831
803 sdhci_set_transfer_irqs(host); 832 sdhci_set_transfer_irqs(host);
804 833
805 /* We do not handle DMA boundaries, so set it to max (512 KiB) */ 834 /* Set the DMA boundary value and block size */
806 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, data->blksz), SDHCI_BLOCK_SIZE); 835 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
836 data->blksz), SDHCI_BLOCK_SIZE);
807 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 837 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
808} 838}
809 839
810static void sdhci_set_transfer_mode(struct sdhci_host *host, 840static void sdhci_set_transfer_mode(struct sdhci_host *host,
811 struct mmc_data *data) 841 struct mmc_command *cmd)
812{ 842{
813 u16 mode; 843 u16 mode;
844 struct mmc_data *data = cmd->data;
814 845
815 if (data == NULL) 846 if (data == NULL)
816 return; 847 return;
@@ -818,12 +849,20 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
818 WARN_ON(!host->data); 849 WARN_ON(!host->data);
819 850
820 mode = SDHCI_TRNS_BLK_CNT_EN; 851 mode = SDHCI_TRNS_BLK_CNT_EN;
821 if (data->blocks > 1) { 852 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
822 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 853 mode |= SDHCI_TRNS_MULTI;
823 mode |= SDHCI_TRNS_MULTI | SDHCI_TRNS_ACMD12; 854 /*
824 else 855 * If we are sending CMD23, CMD12 never gets sent
825 mode |= SDHCI_TRNS_MULTI; 856 * on successful completion (so no Auto-CMD12).
857 */
858 if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12))
859 mode |= SDHCI_TRNS_AUTO_CMD12;
860 else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
861 mode |= SDHCI_TRNS_AUTO_CMD23;
862 sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2);
863 }
826 } 864 }
865
827 if (data->flags & MMC_DATA_READ) 866 if (data->flags & MMC_DATA_READ)
828 mode |= SDHCI_TRNS_READ; 867 mode |= SDHCI_TRNS_READ;
829 if (host->flags & SDHCI_REQ_USE_DMA) 868 if (host->flags & SDHCI_REQ_USE_DMA)
@@ -863,7 +902,15 @@ static void sdhci_finish_data(struct sdhci_host *host)
863 else 902 else
864 data->bytes_xfered = data->blksz * data->blocks; 903 data->bytes_xfered = data->blksz * data->blocks;
865 904
866 if (data->stop) { 905 /*
906 * Need to send CMD12 if -
907 * a) open-ended multiblock transfer (no CMD23)
908 * b) error in multiblock transfer
909 */
910 if (data->stop &&
911 (data->error ||
912 !host->mrq->sbc)) {
913
867 /* 914 /*
868 * The controller needs a reset of internal state machines 915 * The controller needs a reset of internal state machines
869 * upon error conditions. 916 * upon error conditions.
@@ -915,11 +962,11 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
915 962
916 host->cmd = cmd; 963 host->cmd = cmd;
917 964
918 sdhci_prepare_data(host, cmd->data); 965 sdhci_prepare_data(host, cmd);
919 966
920 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 967 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
921 968
922 sdhci_set_transfer_mode(host, cmd->data); 969 sdhci_set_transfer_mode(host, cmd);
923 970
924 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 971 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
925 printk(KERN_ERR "%s: Unsupported response type!\n", 972 printk(KERN_ERR "%s: Unsupported response type!\n",
@@ -942,7 +989,9 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
942 flags |= SDHCI_CMD_CRC; 989 flags |= SDHCI_CMD_CRC;
943 if (cmd->flags & MMC_RSP_OPCODE) 990 if (cmd->flags & MMC_RSP_OPCODE)
944 flags |= SDHCI_CMD_INDEX; 991 flags |= SDHCI_CMD_INDEX;
945 if (cmd->data) 992
993 /* CMD19 is special in that the Data Present Select should be set */
994 if (cmd->data || (cmd->opcode == MMC_SEND_TUNING_BLOCK))
946 flags |= SDHCI_CMD_DATA; 995 flags |= SDHCI_CMD_DATA;
947 996
948 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 997 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
@@ -972,19 +1021,27 @@ static void sdhci_finish_command(struct sdhci_host *host)
972 1021
973 host->cmd->error = 0; 1022 host->cmd->error = 0;
974 1023
975 if (host->data && host->data_early) 1024 /* Finished CMD23, now send actual command. */
976 sdhci_finish_data(host); 1025 if (host->cmd == host->mrq->sbc) {
1026 host->cmd = NULL;
1027 sdhci_send_command(host, host->mrq->cmd);
1028 } else {
977 1029
978 if (!host->cmd->data) 1030 /* Processed actual command. */
979 tasklet_schedule(&host->finish_tasklet); 1031 if (host->data && host->data_early)
1032 sdhci_finish_data(host);
980 1033
981 host->cmd = NULL; 1034 if (!host->cmd->data)
1035 tasklet_schedule(&host->finish_tasklet);
1036
1037 host->cmd = NULL;
1038 }
982} 1039}
983 1040
984static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 1041static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
985{ 1042{
986 int div; 1043 int div = 0; /* Initialized for compiler warning */
987 u16 clk; 1044 u16 clk = 0;
988 unsigned long timeout; 1045 unsigned long timeout;
989 1046
990 if (clock == host->clock) 1047 if (clock == host->clock)
@@ -1001,13 +1058,59 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1001 if (clock == 0) 1058 if (clock == 0)
1002 goto out; 1059 goto out;
1003 1060
1004 for (div = 1;div < 256;div *= 2) { 1061 if (host->version >= SDHCI_SPEC_300) {
1005 if ((host->max_clk / div) <= clock) 1062 /*
1006 break; 1063 * Check if the Host Controller supports Programmable Clock
1064 * Mode.
1065 */
1066 if (host->clk_mul) {
1067 u16 ctrl;
1068
1069 /*
1070 * We need to figure out whether the Host Driver needs
1071 * to select Programmable Clock Mode, or the value can
1072 * be set automatically by the Host Controller based on
1073 * the Preset Value registers.
1074 */
1075 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1076 if (!(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
1077 for (div = 1; div <= 1024; div++) {
1078 if (((host->max_clk * host->clk_mul) /
1079 div) <= clock)
1080 break;
1081 }
1082 /*
1083 * Set Programmable Clock Mode in the Clock
1084 * Control register.
1085 */
1086 clk = SDHCI_PROG_CLOCK_MODE;
1087 div--;
1088 }
1089 } else {
1090 /* Version 3.00 divisors must be a multiple of 2. */
1091 if (host->max_clk <= clock)
1092 div = 1;
1093 else {
1094 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1095 div += 2) {
1096 if ((host->max_clk / div) <= clock)
1097 break;
1098 }
1099 }
1100 div >>= 1;
1101 }
1102 } else {
1103 /* Version 2.00 divisors must be a power of 2. */
1104 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1105 if ((host->max_clk / div) <= clock)
1106 break;
1107 }
1108 div >>= 1;
1007 } 1109 }
1008 div >>= 1;
1009 1110
1010 clk = div << SDHCI_DIVIDER_SHIFT; 1111 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1112 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1113 << SDHCI_DIVIDER_HI_SHIFT;
1011 clk |= SDHCI_CLOCK_INT_EN; 1114 clk |= SDHCI_CLOCK_INT_EN;
1012 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1115 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1013 1116
@@ -1034,11 +1137,9 @@ out:
1034 1137
1035static void sdhci_set_power(struct sdhci_host *host, unsigned short power) 1138static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
1036{ 1139{
1037 u8 pwr; 1140 u8 pwr = 0;
1038 1141
1039 if (power == (unsigned short)-1) 1142 if (power != (unsigned short)-1) {
1040 pwr = 0;
1041 else {
1042 switch (1 << power) { 1143 switch (1 << power) {
1043 case MMC_VDD_165_195: 1144 case MMC_VDD_165_195:
1044 pwr = SDHCI_POWER_180; 1145 pwr = SDHCI_POWER_180;
@@ -1113,7 +1214,12 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1113#ifndef SDHCI_USE_LEDS_CLASS 1214#ifndef SDHCI_USE_LEDS_CLASS
1114 sdhci_activate_led(host); 1215 sdhci_activate_led(host);
1115#endif 1216#endif
1116 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) { 1217
1218 /*
1219 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1220 * requests if Auto-CMD12 is enabled.
1221 */
1222 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
1117 if (mrq->stop) { 1223 if (mrq->stop) {
1118 mrq->data->stop = NULL; 1224 mrq->data->stop = NULL;
1119 mrq->stop = NULL; 1225 mrq->stop = NULL;
@@ -1132,8 +1238,30 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1132 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1238 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1133 host->mrq->cmd->error = -ENOMEDIUM; 1239 host->mrq->cmd->error = -ENOMEDIUM;
1134 tasklet_schedule(&host->finish_tasklet); 1240 tasklet_schedule(&host->finish_tasklet);
1135 } else 1241 } else {
1136 sdhci_send_command(host, mrq->cmd); 1242 u32 present_state;
1243
1244 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1245 /*
1246 * Check if the re-tuning timer has already expired and there
1247 * is no on-going data transfer. If so, we need to execute
1248 * tuning procedure before sending command.
1249 */
1250 if ((host->flags & SDHCI_NEEDS_RETUNING) &&
1251 !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
1252 spin_unlock_irqrestore(&host->lock, flags);
1253 sdhci_execute_tuning(mmc);
1254 spin_lock_irqsave(&host->lock, flags);
1255
1256 /* Restore original mmc_request structure */
1257 host->mrq = mrq;
1258 }
1259
1260 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1261 sdhci_send_command(host, mrq->sbc);
1262 else
1263 sdhci_send_command(host, mrq->cmd);
1264 }
1137 1265
1138 mmiowb(); 1266 mmiowb();
1139 spin_unlock_irqrestore(&host->lock, flags); 1267 spin_unlock_irqrestore(&host->lock, flags);
@@ -1168,25 +1296,120 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1168 else 1296 else
1169 sdhci_set_power(host, ios->vdd); 1297 sdhci_set_power(host, ios->vdd);
1170 1298
1171 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1299 if (host->ops->platform_send_init_74_clocks)
1300 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1172 1301
1173 if (ios->bus_width == MMC_BUS_WIDTH_8) 1302 /*
1174 ctrl |= SDHCI_CTRL_8BITBUS; 1303 * If your platform has 8-bit width support but is not a v3 controller,
1175 else 1304 * or if it requires special setup code, you should implement that in
1176 ctrl &= ~SDHCI_CTRL_8BITBUS; 1305 * platform_8bit_width().
1306 */
1307 if (host->ops->platform_8bit_width)
1308 host->ops->platform_8bit_width(host, ios->bus_width);
1309 else {
1310 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1311 if (ios->bus_width == MMC_BUS_WIDTH_8) {
1312 ctrl &= ~SDHCI_CTRL_4BITBUS;
1313 if (host->version >= SDHCI_SPEC_300)
1314 ctrl |= SDHCI_CTRL_8BITBUS;
1315 } else {
1316 if (host->version >= SDHCI_SPEC_300)
1317 ctrl &= ~SDHCI_CTRL_8BITBUS;
1318 if (ios->bus_width == MMC_BUS_WIDTH_4)
1319 ctrl |= SDHCI_CTRL_4BITBUS;
1320 else
1321 ctrl &= ~SDHCI_CTRL_4BITBUS;
1322 }
1323 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1324 }
1177 1325
1178 if (ios->bus_width == MMC_BUS_WIDTH_4) 1326 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1179 ctrl |= SDHCI_CTRL_4BITBUS;
1180 else
1181 ctrl &= ~SDHCI_CTRL_4BITBUS;
1182 1327
1183 if (ios->timing == MMC_TIMING_SD_HS && 1328 if ((ios->timing == MMC_TIMING_SD_HS ||
1184 !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) 1329 ios->timing == MMC_TIMING_MMC_HS)
1330 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1185 ctrl |= SDHCI_CTRL_HISPD; 1331 ctrl |= SDHCI_CTRL_HISPD;
1186 else 1332 else
1187 ctrl &= ~SDHCI_CTRL_HISPD; 1333 ctrl &= ~SDHCI_CTRL_HISPD;
1188 1334
1189 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1335 if (host->version >= SDHCI_SPEC_300) {
1336 u16 clk, ctrl_2;
1337 unsigned int clock;
1338
1339 /* In case of UHS-I modes, set High Speed Enable */
1340 if ((ios->timing == MMC_TIMING_UHS_SDR50) ||
1341 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1342 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1343 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1344 (ios->timing == MMC_TIMING_UHS_SDR12))
1345 ctrl |= SDHCI_CTRL_HISPD;
1346
1347 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1348 if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
1349 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1350 /*
1351 * We only need to set Driver Strength if the
1352 * preset value enable is not set.
1353 */
1354 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1355 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1356 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1357 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1358 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1359
1360 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1361 } else {
1362 /*
1363 * According to SDHC Spec v3.00, if the Preset Value
1364 * Enable in the Host Control 2 register is set, we
1365 * need to reset SD Clock Enable before changing High
1366 * Speed Enable to avoid generating clock gliches.
1367 */
1368
1369 /* Reset SD Clock Enable */
1370 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1371 clk &= ~SDHCI_CLOCK_CARD_EN;
1372 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1373
1374 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1375
1376 /* Re-enable SD Clock */
1377 clock = host->clock;
1378 host->clock = 0;
1379 sdhci_set_clock(host, clock);
1380 }
1381
1382
1383 /* Reset SD Clock Enable */
1384 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1385 clk &= ~SDHCI_CLOCK_CARD_EN;
1386 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1387
1388 if (host->ops->set_uhs_signaling)
1389 host->ops->set_uhs_signaling(host, ios->timing);
1390 else {
1391 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1392 /* Select Bus Speed Mode for host */
1393 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1394 if (ios->timing == MMC_TIMING_UHS_SDR12)
1395 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1396 else if (ios->timing == MMC_TIMING_UHS_SDR25)
1397 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1398 else if (ios->timing == MMC_TIMING_UHS_SDR50)
1399 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1400 else if (ios->timing == MMC_TIMING_UHS_SDR104)
1401 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1402 else if (ios->timing == MMC_TIMING_UHS_DDR50)
1403 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1404 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1405 }
1406
1407 /* Re-enable SD Clock */
1408 clock = host->clock;
1409 host->clock = 0;
1410 sdhci_set_clock(host, clock);
1411 } else
1412 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1190 1413
1191 /* 1414 /*
1192 * Some (ENE) controllers go apeshit on some ios operation, 1415 * Some (ENE) controllers go apeshit on some ios operation,
@@ -1201,26 +1424,49 @@ out:
1201 spin_unlock_irqrestore(&host->lock, flags); 1424 spin_unlock_irqrestore(&host->lock, flags);
1202} 1425}
1203 1426
1204static int sdhci_get_ro(struct mmc_host *mmc) 1427static int check_ro(struct sdhci_host *host)
1205{ 1428{
1206 struct sdhci_host *host;
1207 unsigned long flags; 1429 unsigned long flags;
1208 int present; 1430 int is_readonly;
1209
1210 host = mmc_priv(mmc);
1211 1431
1212 spin_lock_irqsave(&host->lock, flags); 1432 spin_lock_irqsave(&host->lock, flags);
1213 1433
1214 if (host->flags & SDHCI_DEVICE_DEAD) 1434 if (host->flags & SDHCI_DEVICE_DEAD)
1215 present = 0; 1435 is_readonly = 0;
1436 else if (host->ops->get_ro)
1437 is_readonly = host->ops->get_ro(host);
1216 else 1438 else
1217 present = sdhci_readl(host, SDHCI_PRESENT_STATE); 1439 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1440 & SDHCI_WRITE_PROTECT);
1218 1441
1219 spin_unlock_irqrestore(&host->lock, flags); 1442 spin_unlock_irqrestore(&host->lock, flags);
1220 1443
1221 if (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT) 1444 /* This quirk needs to be replaced by a callback-function later */
1222 return !!(present & SDHCI_WRITE_PROTECT); 1445 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1223 return !(present & SDHCI_WRITE_PROTECT); 1446 !is_readonly : is_readonly;
1447}
1448
1449#define SAMPLE_COUNT 5
1450
1451static int sdhci_get_ro(struct mmc_host *mmc)
1452{
1453 struct sdhci_host *host;
1454 int i, ro_count;
1455
1456 host = mmc_priv(mmc);
1457
1458 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1459 return check_ro(host);
1460
1461 ro_count = 0;
1462 for (i = 0; i < SAMPLE_COUNT; i++) {
1463 if (check_ro(host)) {
1464 if (++ro_count > SAMPLE_COUNT / 2)
1465 return 1;
1466 }
1467 msleep(30);
1468 }
1469 return 0;
1224} 1470}
1225 1471
1226static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1472static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -1245,11 +1491,322 @@ out:
1245 spin_unlock_irqrestore(&host->lock, flags); 1491 spin_unlock_irqrestore(&host->lock, flags);
1246} 1492}
1247 1493
1494static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1495 struct mmc_ios *ios)
1496{
1497 struct sdhci_host *host;
1498 u8 pwr;
1499 u16 clk, ctrl;
1500 u32 present_state;
1501
1502 host = mmc_priv(mmc);
1503
1504 /*
1505 * Signal Voltage Switching is only applicable for Host Controllers
1506 * v3.00 and above.
1507 */
1508 if (host->version < SDHCI_SPEC_300)
1509 return 0;
1510
1511 /*
1512 * We first check whether the request is to set signalling voltage
1513 * to 3.3V. If so, we change the voltage to 3.3V and return quickly.
1514 */
1515 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1516 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1517 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1518 ctrl &= ~SDHCI_CTRL_VDD_180;
1519 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1520
1521 /* Wait for 5ms */
1522 usleep_range(5000, 5500);
1523
1524 /* 3.3V regulator output should be stable within 5 ms */
1525 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1526 if (!(ctrl & SDHCI_CTRL_VDD_180))
1527 return 0;
1528 else {
1529 printk(KERN_INFO DRIVER_NAME ": Switching to 3.3V "
1530 "signalling voltage failed\n");
1531 return -EIO;
1532 }
1533 } else if (!(ctrl & SDHCI_CTRL_VDD_180) &&
1534 (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)) {
1535 /* Stop SDCLK */
1536 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1537 clk &= ~SDHCI_CLOCK_CARD_EN;
1538 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1539
1540 /* Check whether DAT[3:0] is 0000 */
1541 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1542 if (!((present_state & SDHCI_DATA_LVL_MASK) >>
1543 SDHCI_DATA_LVL_SHIFT)) {
1544 /*
1545 * Enable 1.8V Signal Enable in the Host Control2
1546 * register
1547 */
1548 ctrl |= SDHCI_CTRL_VDD_180;
1549 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1550
1551 /* Wait for 5ms */
1552 usleep_range(5000, 5500);
1553
1554 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1555 if (ctrl & SDHCI_CTRL_VDD_180) {
1556 /* Provide SDCLK again and wait for 1ms*/
1557 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1558 clk |= SDHCI_CLOCK_CARD_EN;
1559 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1560 usleep_range(1000, 1500);
1561
1562 /*
1563 * If DAT[3:0] level is 1111b, then the card
1564 * was successfully switched to 1.8V signaling.
1565 */
1566 present_state = sdhci_readl(host,
1567 SDHCI_PRESENT_STATE);
1568 if ((present_state & SDHCI_DATA_LVL_MASK) ==
1569 SDHCI_DATA_LVL_MASK)
1570 return 0;
1571 }
1572 }
1573
1574 /*
1575 * If we are here, that means the switch to 1.8V signaling
1576 * failed. We power cycle the card, and retry initialization
1577 * sequence by setting S18R to 0.
1578 */
1579 pwr = sdhci_readb(host, SDHCI_POWER_CONTROL);
1580 pwr &= ~SDHCI_POWER_ON;
1581 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1582
1583 /* Wait for 1ms as per the spec */
1584 usleep_range(1000, 1500);
1585 pwr |= SDHCI_POWER_ON;
1586 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1587
1588 printk(KERN_INFO DRIVER_NAME ": Switching to 1.8V signalling "
1589 "voltage failed, retrying with S18R set to 0\n");
1590 return -EAGAIN;
1591 } else
1592 /* No signal voltage switch required */
1593 return 0;
1594}
1595
1596static int sdhci_execute_tuning(struct mmc_host *mmc)
1597{
1598 struct sdhci_host *host;
1599 u16 ctrl;
1600 u32 ier;
1601 int tuning_loop_counter = MAX_TUNING_LOOP;
1602 unsigned long timeout;
1603 int err = 0;
1604
1605 host = mmc_priv(mmc);
1606
1607 disable_irq(host->irq);
1608 spin_lock(&host->lock);
1609
1610 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1611
1612 /*
1613 * Host Controller needs tuning only in case of SDR104 mode
1614 * and for SDR50 mode when Use Tuning for SDR50 is set in
1615 * Capabilities register.
1616 */
1617 if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
1618 (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
1619 (host->flags & SDHCI_SDR50_NEEDS_TUNING)))
1620 ctrl |= SDHCI_CTRL_EXEC_TUNING;
1621 else {
1622 spin_unlock(&host->lock);
1623 enable_irq(host->irq);
1624 return 0;
1625 }
1626
1627 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1628
1629 /*
1630 * As per the Host Controller spec v3.00, tuning command
1631 * generates Buffer Read Ready interrupt, so enable that.
1632 *
1633 * Note: The spec clearly says that when tuning sequence
1634 * is being performed, the controller does not generate
1635 * interrupts other than Buffer Read Ready interrupt. But
1636 * to make sure we don't hit a controller bug, we _only_
1637 * enable Buffer Read Ready interrupt here.
1638 */
1639 ier = sdhci_readl(host, SDHCI_INT_ENABLE);
1640 sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL);
1641
1642 /*
1643 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
1644 * of loops reaches 40 times or a timeout of 150ms occurs.
1645 */
1646 timeout = 150;
1647 do {
1648 struct mmc_command cmd = {0};
1649 struct mmc_request mrq = {0};
1650
1651 if (!tuning_loop_counter && !timeout)
1652 break;
1653
1654 cmd.opcode = MMC_SEND_TUNING_BLOCK;
1655 cmd.arg = 0;
1656 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1657 cmd.retries = 0;
1658 cmd.data = NULL;
1659 cmd.error = 0;
1660
1661 mrq.cmd = &cmd;
1662 host->mrq = &mrq;
1663
1664 /*
1665 * In response to CMD19, the card sends 64 bytes of tuning
1666 * block to the Host Controller. So we set the block size
1667 * to 64 here.
1668 */
1669 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE);
1670
1671 /*
1672 * The tuning block is sent by the card to the host controller.
1673 * So we set the TRNS_READ bit in the Transfer Mode register.
1674 * This also takes care of setting DMA Enable and Multi Block
1675 * Select in the same register to 0.
1676 */
1677 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
1678
1679 sdhci_send_command(host, &cmd);
1680
1681 host->cmd = NULL;
1682 host->mrq = NULL;
1683
1684 spin_unlock(&host->lock);
1685 enable_irq(host->irq);
1686
1687 /* Wait for Buffer Read Ready interrupt */
1688 wait_event_interruptible_timeout(host->buf_ready_int,
1689 (host->tuning_done == 1),
1690 msecs_to_jiffies(50));
1691 disable_irq(host->irq);
1692 spin_lock(&host->lock);
1693
1694 if (!host->tuning_done) {
1695 printk(KERN_INFO DRIVER_NAME ": Timeout waiting for "
1696 "Buffer Read Ready interrupt during tuning "
1697 "procedure, falling back to fixed sampling "
1698 "clock\n");
1699 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1700 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1701 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
1702 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1703
1704 err = -EIO;
1705 goto out;
1706 }
1707
1708 host->tuning_done = 0;
1709
1710 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1711 tuning_loop_counter--;
1712 timeout--;
1713 mdelay(1);
1714 } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
1715
1716 /*
1717 * The Host Driver has exhausted the maximum number of loops allowed,
1718 * so use fixed sampling frequency.
1719 */
1720 if (!tuning_loop_counter || !timeout) {
1721 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1722 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1723 } else {
1724 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
1725 printk(KERN_INFO DRIVER_NAME ": Tuning procedure"
1726 " failed, falling back to fixed sampling"
1727 " clock\n");
1728 err = -EIO;
1729 }
1730 }
1731
1732out:
1733 /*
1734 * If this is the very first time we are here, we start the retuning
1735 * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
1736 * flag won't be set, we check this condition before actually starting
1737 * the timer.
1738 */
1739 if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
1740 (host->tuning_mode == SDHCI_TUNING_MODE_1)) {
1741 mod_timer(&host->tuning_timer, jiffies +
1742 host->tuning_count * HZ);
1743 /* Tuning mode 1 limits the maximum data length to 4MB */
1744 mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
1745 } else {
1746 host->flags &= ~SDHCI_NEEDS_RETUNING;
1747 /* Reload the new initial value for timer */
1748 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
1749 mod_timer(&host->tuning_timer, jiffies +
1750 host->tuning_count * HZ);
1751 }
1752
1753 /*
1754 * In case tuning fails, host controllers which support re-tuning can
1755 * try tuning again at a later time, when the re-tuning timer expires.
1756 * So for these controllers, we return 0. Since there might be other
1757 * controllers who do not have this capability, we return error for
1758 * them.
1759 */
1760 if (err && host->tuning_count &&
1761 host->tuning_mode == SDHCI_TUNING_MODE_1)
1762 err = 0;
1763
1764 sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
1765 spin_unlock(&host->lock);
1766 enable_irq(host->irq);
1767
1768 return err;
1769}
1770
1771static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable)
1772{
1773 struct sdhci_host *host;
1774 u16 ctrl;
1775 unsigned long flags;
1776
1777 host = mmc_priv(mmc);
1778
1779 /* Host Controller v3.00 defines preset value registers */
1780 if (host->version < SDHCI_SPEC_300)
1781 return;
1782
1783 spin_lock_irqsave(&host->lock, flags);
1784
1785 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1786
1787 /*
1788 * We only enable or disable Preset Value if they are not already
1789 * enabled or disabled respectively. Otherwise, we bail out.
1790 */
1791 if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
1792 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
1793 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1794 } else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
1795 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
1796 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1797 }
1798
1799 spin_unlock_irqrestore(&host->lock, flags);
1800}
1801
1248static const struct mmc_host_ops sdhci_ops = { 1802static const struct mmc_host_ops sdhci_ops = {
1249 .request = sdhci_request, 1803 .request = sdhci_request,
1250 .set_ios = sdhci_set_ios, 1804 .set_ios = sdhci_set_ios,
1251 .get_ro = sdhci_get_ro, 1805 .get_ro = sdhci_get_ro,
1252 .enable_sdio_irq = sdhci_enable_sdio_irq, 1806 .enable_sdio_irq = sdhci_enable_sdio_irq,
1807 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
1808 .execute_tuning = sdhci_execute_tuning,
1809 .enable_preset_value = sdhci_enable_preset_value,
1253}; 1810};
1254 1811
1255/*****************************************************************************\ 1812/*****************************************************************************\
@@ -1295,10 +1852,20 @@ static void sdhci_tasklet_finish(unsigned long param)
1295 1852
1296 host = (struct sdhci_host*)param; 1853 host = (struct sdhci_host*)param;
1297 1854
1855 /*
1856 * If this tasklet gets rescheduled while running, it will
1857 * be run again afterwards but without any active request.
1858 */
1859 if (!host->mrq)
1860 return;
1861
1298 spin_lock_irqsave(&host->lock, flags); 1862 spin_lock_irqsave(&host->lock, flags);
1299 1863
1300 del_timer(&host->timer); 1864 del_timer(&host->timer);
1301 1865
1866 if (host->version >= SDHCI_SPEC_300)
1867 del_timer(&host->tuning_timer);
1868
1302 mrq = host->mrq; 1869 mrq = host->mrq;
1303 1870
1304 /* 1871 /*
@@ -1306,7 +1873,7 @@ static void sdhci_tasklet_finish(unsigned long param)
1306 * upon error conditions. 1873 * upon error conditions.
1307 */ 1874 */
1308 if (!(host->flags & SDHCI_DEVICE_DEAD) && 1875 if (!(host->flags & SDHCI_DEVICE_DEAD) &&
1309 (mrq->cmd->error || 1876 ((mrq->cmd && mrq->cmd->error) ||
1310 (mrq->data && (mrq->data->error || 1877 (mrq->data && (mrq->data->error ||
1311 (mrq->data->stop && mrq->data->stop->error))) || 1878 (mrq->data->stop && mrq->data->stop->error))) ||
1312 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { 1879 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
@@ -1372,6 +1939,20 @@ static void sdhci_timeout_timer(unsigned long data)
1372 spin_unlock_irqrestore(&host->lock, flags); 1939 spin_unlock_irqrestore(&host->lock, flags);
1373} 1940}
1374 1941
1942static void sdhci_tuning_timer(unsigned long data)
1943{
1944 struct sdhci_host *host;
1945 unsigned long flags;
1946
1947 host = (struct sdhci_host *)data;
1948
1949 spin_lock_irqsave(&host->lock, flags);
1950
1951 host->flags |= SDHCI_NEEDS_RETUNING;
1952
1953 spin_unlock_irqrestore(&host->lock, flags);
1954}
1955
1375/*****************************************************************************\ 1956/*****************************************************************************\
1376 * * 1957 * *
1377 * Interrupt handling * 1958 * Interrupt handling *
@@ -1427,7 +2008,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1427 sdhci_finish_command(host); 2008 sdhci_finish_command(host);
1428} 2009}
1429 2010
1430#ifdef DEBUG 2011#ifdef CONFIG_MMC_DEBUG
1431static void sdhci_show_adma_error(struct sdhci_host *host) 2012static void sdhci_show_adma_error(struct sdhci_host *host)
1432{ 2013{
1433 const char *name = mmc_hostname(host->mmc); 2014 const char *name = mmc_hostname(host->mmc);
@@ -1460,6 +2041,16 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1460{ 2041{
1461 BUG_ON(intmask == 0); 2042 BUG_ON(intmask == 0);
1462 2043
2044 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2045 if (intmask & SDHCI_INT_DATA_AVAIL) {
2046 if (SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) ==
2047 MMC_SEND_TUNING_BLOCK) {
2048 host->tuning_done = 1;
2049 wake_up(&host->buf_ready_int);
2050 return;
2051 }
2052 }
2053
1463 if (!host->data) { 2054 if (!host->data) {
1464 /* 2055 /*
1465 * The "data complete" interrupt is also used to 2056 * The "data complete" interrupt is also used to
@@ -1483,7 +2074,11 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1483 2074
1484 if (intmask & SDHCI_INT_DATA_TIMEOUT) 2075 if (intmask & SDHCI_INT_DATA_TIMEOUT)
1485 host->data->error = -ETIMEDOUT; 2076 host->data->error = -ETIMEDOUT;
1486 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) 2077 else if (intmask & SDHCI_INT_DATA_END_BIT)
2078 host->data->error = -EILSEQ;
2079 else if ((intmask & SDHCI_INT_DATA_CRC) &&
2080 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2081 != MMC_BUS_TEST_R)
1487 host->data->error = -EILSEQ; 2082 host->data->error = -EILSEQ;
1488 else if (intmask & SDHCI_INT_ADMA_ERROR) { 2083 else if (intmask & SDHCI_INT_ADMA_ERROR) {
1489 printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc)); 2084 printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
@@ -1501,10 +2096,28 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1501 * We currently don't do anything fancy with DMA 2096 * We currently don't do anything fancy with DMA
1502 * boundaries, but as we can't disable the feature 2097 * boundaries, but as we can't disable the feature
1503 * we need to at least restart the transfer. 2098 * we need to at least restart the transfer.
2099 *
2100 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2101 * should return a valid address to continue from, but as
2102 * some controllers are faulty, don't trust them.
1504 */ 2103 */
1505 if (intmask & SDHCI_INT_DMA_END) 2104 if (intmask & SDHCI_INT_DMA_END) {
1506 sdhci_writel(host, sdhci_readl(host, SDHCI_DMA_ADDRESS), 2105 u32 dmastart, dmanow;
1507 SDHCI_DMA_ADDRESS); 2106 dmastart = sg_dma_address(host->data->sg);
2107 dmanow = dmastart + host->data->bytes_xfered;
2108 /*
2109 * Force update to the next DMA block boundary.
2110 */
2111 dmanow = (dmanow &
2112 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2113 SDHCI_DEFAULT_BOUNDARY_SIZE;
2114 host->data->bytes_xfered = dmanow - dmastart;
2115 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2116 " next 0x%08x\n",
2117 mmc_hostname(host->mmc), dmastart,
2118 host->data->bytes_xfered, dmanow);
2119 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2120 }
1508 2121
1509 if (intmask & SDHCI_INT_DATA_END) { 2122 if (intmask & SDHCI_INT_DATA_END) {
1510 if (host->cmd) { 2123 if (host->cmd) {
@@ -1614,6 +2227,14 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
1614 2227
1615 sdhci_disable_card_detection(host); 2228 sdhci_disable_card_detection(host);
1616 2229
2230 /* Disable tuning since we are suspending */
2231 if (host->version >= SDHCI_SPEC_300 && host->tuning_count &&
2232 host->tuning_mode == SDHCI_TUNING_MODE_1) {
2233 host->flags &= ~SDHCI_NEEDS_RETUNING;
2234 mod_timer(&host->tuning_timer, jiffies +
2235 host->tuning_count * HZ);
2236 }
2237
1617 ret = mmc_suspend_host(host->mmc); 2238 ret = mmc_suspend_host(host->mmc);
1618 if (ret) 2239 if (ret)
1619 return ret; 2240 return ret;
@@ -1655,11 +2276,26 @@ int sdhci_resume_host(struct sdhci_host *host)
1655 ret = mmc_resume_host(host->mmc); 2276 ret = mmc_resume_host(host->mmc);
1656 sdhci_enable_card_detection(host); 2277 sdhci_enable_card_detection(host);
1657 2278
2279 /* Set the re-tuning expiration flag */
2280 if ((host->version >= SDHCI_SPEC_300) && host->tuning_count &&
2281 (host->tuning_mode == SDHCI_TUNING_MODE_1))
2282 host->flags |= SDHCI_NEEDS_RETUNING;
2283
1658 return ret; 2284 return ret;
1659} 2285}
1660 2286
1661EXPORT_SYMBOL_GPL(sdhci_resume_host); 2287EXPORT_SYMBOL_GPL(sdhci_resume_host);
1662 2288
2289void sdhci_enable_irq_wakeups(struct sdhci_host *host)
2290{
2291 u8 val;
2292 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2293 val |= SDHCI_WAKE_ON_INT;
2294 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2295}
2296
2297EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2298
1663#endif /* CONFIG_PM */ 2299#endif /* CONFIG_PM */
1664 2300
1665/*****************************************************************************\ 2301/*****************************************************************************\
@@ -1691,7 +2327,9 @@ EXPORT_SYMBOL_GPL(sdhci_alloc_host);
1691int sdhci_add_host(struct sdhci_host *host) 2327int sdhci_add_host(struct sdhci_host *host)
1692{ 2328{
1693 struct mmc_host *mmc; 2329 struct mmc_host *mmc;
1694 unsigned int caps; 2330 u32 caps[2];
2331 u32 max_current_caps;
2332 unsigned int ocr_avail;
1695 int ret; 2333 int ret;
1696 2334
1697 WARN_ON(host == NULL); 2335 WARN_ON(host == NULL);
@@ -1708,18 +2346,21 @@ int sdhci_add_host(struct sdhci_host *host)
1708 host->version = sdhci_readw(host, SDHCI_HOST_VERSION); 2346 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
1709 host->version = (host->version & SDHCI_SPEC_VER_MASK) 2347 host->version = (host->version & SDHCI_SPEC_VER_MASK)
1710 >> SDHCI_SPEC_VER_SHIFT; 2348 >> SDHCI_SPEC_VER_SHIFT;
1711 if (host->version > SDHCI_SPEC_200) { 2349 if (host->version > SDHCI_SPEC_300) {
1712 printk(KERN_ERR "%s: Unknown controller version (%d). " 2350 printk(KERN_ERR "%s: Unknown controller version (%d). "
1713 "You may experience problems.\n", mmc_hostname(mmc), 2351 "You may experience problems.\n", mmc_hostname(mmc),
1714 host->version); 2352 host->version);
1715 } 2353 }
1716 2354
1717 caps = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : 2355 caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
1718 sdhci_readl(host, SDHCI_CAPABILITIES); 2356 sdhci_readl(host, SDHCI_CAPABILITIES);
1719 2357
2358 caps[1] = (host->version >= SDHCI_SPEC_300) ?
2359 sdhci_readl(host, SDHCI_CAPABILITIES_1) : 0;
2360
1720 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 2361 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
1721 host->flags |= SDHCI_USE_SDMA; 2362 host->flags |= SDHCI_USE_SDMA;
1722 else if (!(caps & SDHCI_CAN_DO_SDMA)) 2363 else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
1723 DBG("Controller doesn't have SDMA capability\n"); 2364 DBG("Controller doesn't have SDMA capability\n");
1724 else 2365 else
1725 host->flags |= SDHCI_USE_SDMA; 2366 host->flags |= SDHCI_USE_SDMA;
@@ -1730,7 +2371,8 @@ int sdhci_add_host(struct sdhci_host *host)
1730 host->flags &= ~SDHCI_USE_SDMA; 2371 host->flags &= ~SDHCI_USE_SDMA;
1731 } 2372 }
1732 2373
1733 if ((host->version >= SDHCI_SPEC_200) && (caps & SDHCI_CAN_DO_ADMA2)) 2374 if ((host->version >= SDHCI_SPEC_200) &&
2375 (caps[0] & SDHCI_CAN_DO_ADMA2))
1734 host->flags |= SDHCI_USE_ADMA; 2376 host->flags |= SDHCI_USE_ADMA;
1735 2377
1736 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 2378 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
@@ -1779,8 +2421,13 @@ int sdhci_add_host(struct sdhci_host *host)
1779 mmc_dev(host->mmc)->dma_mask = &host->dma_mask; 2421 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
1780 } 2422 }
1781 2423
1782 host->max_clk = 2424 if (host->version >= SDHCI_SPEC_300)
1783 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; 2425 host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
2426 >> SDHCI_CLOCK_BASE_SHIFT;
2427 else
2428 host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
2429 >> SDHCI_CLOCK_BASE_SHIFT;
2430
1784 host->max_clk *= 1000000; 2431 host->max_clk *= 1000000;
1785 if (host->max_clk == 0 || host->quirks & 2432 if (host->max_clk == 0 || host->quirks &
1786 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 2433 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
@@ -1794,7 +2441,7 @@ int sdhci_add_host(struct sdhci_host *host)
1794 } 2441 }
1795 2442
1796 host->timeout_clk = 2443 host->timeout_clk =
1797 (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; 2444 (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
1798 if (host->timeout_clk == 0) { 2445 if (host->timeout_clk == 0) {
1799 if (host->ops->get_timeout_clock) { 2446 if (host->ops->get_timeout_clock) {
1800 host->timeout_clk = host->ops->get_timeout_clock(host); 2447 host->timeout_clk = host->ops->get_timeout_clock(host);
@@ -1806,36 +2453,185 @@ int sdhci_add_host(struct sdhci_host *host)
1806 return -ENODEV; 2453 return -ENODEV;
1807 } 2454 }
1808 } 2455 }
1809 if (caps & SDHCI_TIMEOUT_CLK_UNIT) 2456 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
1810 host->timeout_clk *= 1000; 2457 host->timeout_clk *= 1000;
1811 2458
1812 /* 2459 /*
2460 * In case of Host Controller v3.00, find out whether clock
2461 * multiplier is supported.
2462 */
2463 host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
2464 SDHCI_CLOCK_MUL_SHIFT;
2465
2466 /*
2467 * In case the value in Clock Multiplier is 0, then programmable
2468 * clock mode is not supported, otherwise the actual clock
2469 * multiplier is one more than the value of Clock Multiplier
2470 * in the Capabilities Register.
2471 */
2472 if (host->clk_mul)
2473 host->clk_mul += 1;
2474
2475 /*
1813 * Set host parameters. 2476 * Set host parameters.
1814 */ 2477 */
1815 mmc->ops = &sdhci_ops; 2478 mmc->ops = &sdhci_ops;
2479 mmc->f_max = host->max_clk;
1816 if (host->ops->get_min_clock) 2480 if (host->ops->get_min_clock)
1817 mmc->f_min = host->ops->get_min_clock(host); 2481 mmc->f_min = host->ops->get_min_clock(host);
1818 else 2482 else if (host->version >= SDHCI_SPEC_300) {
1819 mmc->f_min = host->max_clk / 256; 2483 if (host->clk_mul) {
1820 mmc->f_max = host->max_clk; 2484 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
1821 mmc->caps |= MMC_CAP_SDIO_IRQ; 2485 mmc->f_max = host->max_clk * host->clk_mul;
2486 } else
2487 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
2488 } else
2489 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
2490
2491 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
2492
2493 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
2494 host->flags |= SDHCI_AUTO_CMD12;
2495
2496 /* Auto-CMD23 stuff only works in ADMA or PIO. */
2497 if ((host->version >= SDHCI_SPEC_300) &&
2498 ((host->flags & SDHCI_USE_ADMA) ||
2499 !(host->flags & SDHCI_USE_SDMA))) {
2500 host->flags |= SDHCI_AUTO_CMD23;
2501 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
2502 } else {
2503 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
2504 }
1822 2505
2506 /*
2507 * A controller may support 8-bit width, but the board itself
2508 * might not have the pins brought out. Boards that support
2509 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
2510 * their platform code before calling sdhci_add_host(), and we
2511 * won't assume 8-bit width for hosts without that CAP.
2512 */
1823 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 2513 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
1824 mmc->caps |= MMC_CAP_4_BIT_DATA; 2514 mmc->caps |= MMC_CAP_4_BIT_DATA;
1825 2515
1826 if (caps & SDHCI_CAN_DO_HISPD) 2516 if (caps[0] & SDHCI_CAN_DO_HISPD)
1827 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 2517 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
1828 2518
1829 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 2519 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
2520 mmc_card_is_removable(mmc))
1830 mmc->caps |= MMC_CAP_NEEDS_POLL; 2521 mmc->caps |= MMC_CAP_NEEDS_POLL;
1831 2522
1832 mmc->ocr_avail = 0; 2523 /* UHS-I mode(s) supported by the host controller. */
1833 if (caps & SDHCI_CAN_VDD_330) 2524 if (host->version >= SDHCI_SPEC_300)
1834 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; 2525 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
1835 if (caps & SDHCI_CAN_VDD_300) 2526
1836 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31; 2527 /* SDR104 supports also implies SDR50 support */
1837 if (caps & SDHCI_CAN_VDD_180) 2528 if (caps[1] & SDHCI_SUPPORT_SDR104)
1838 mmc->ocr_avail |= MMC_VDD_165_195; 2529 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
2530 else if (caps[1] & SDHCI_SUPPORT_SDR50)
2531 mmc->caps |= MMC_CAP_UHS_SDR50;
2532
2533 if (caps[1] & SDHCI_SUPPORT_DDR50)
2534 mmc->caps |= MMC_CAP_UHS_DDR50;
2535
2536 /* Does the host needs tuning for SDR50? */
2537 if (caps[1] & SDHCI_USE_SDR50_TUNING)
2538 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
2539
2540 /* Driver Type(s) (A, C, D) supported by the host */
2541 if (caps[1] & SDHCI_DRIVER_TYPE_A)
2542 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
2543 if (caps[1] & SDHCI_DRIVER_TYPE_C)
2544 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
2545 if (caps[1] & SDHCI_DRIVER_TYPE_D)
2546 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
2547
2548 /* Initial value for re-tuning timer count */
2549 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
2550 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
2551
2552 /*
2553 * In case Re-tuning Timer is not disabled, the actual value of
2554 * re-tuning timer will be 2 ^ (n - 1).
2555 */
2556 if (host->tuning_count)
2557 host->tuning_count = 1 << (host->tuning_count - 1);
2558
2559 /* Re-tuning mode supported by the Host Controller */
2560 host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
2561 SDHCI_RETUNING_MODE_SHIFT;
2562
2563 ocr_avail = 0;
2564 /*
2565 * According to SD Host Controller spec v3.00, if the Host System
2566 * can afford more than 150mA, Host Driver should set XPC to 1. Also
2567 * the value is meaningful only if Voltage Support in the Capabilities
2568 * register is set. The actual current value is 4 times the register
2569 * value.
2570 */
2571 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
2572
2573 if (caps[0] & SDHCI_CAN_VDD_330) {
2574 int max_current_330;
2575
2576 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
2577
2578 max_current_330 = ((max_current_caps &
2579 SDHCI_MAX_CURRENT_330_MASK) >>
2580 SDHCI_MAX_CURRENT_330_SHIFT) *
2581 SDHCI_MAX_CURRENT_MULTIPLIER;
2582
2583 if (max_current_330 > 150)
2584 mmc->caps |= MMC_CAP_SET_XPC_330;
2585 }
2586 if (caps[0] & SDHCI_CAN_VDD_300) {
2587 int max_current_300;
2588
2589 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
2590
2591 max_current_300 = ((max_current_caps &
2592 SDHCI_MAX_CURRENT_300_MASK) >>
2593 SDHCI_MAX_CURRENT_300_SHIFT) *
2594 SDHCI_MAX_CURRENT_MULTIPLIER;
2595
2596 if (max_current_300 > 150)
2597 mmc->caps |= MMC_CAP_SET_XPC_300;
2598 }
2599 if (caps[0] & SDHCI_CAN_VDD_180) {
2600 int max_current_180;
2601
2602 ocr_avail |= MMC_VDD_165_195;
2603
2604 max_current_180 = ((max_current_caps &
2605 SDHCI_MAX_CURRENT_180_MASK) >>
2606 SDHCI_MAX_CURRENT_180_SHIFT) *
2607 SDHCI_MAX_CURRENT_MULTIPLIER;
2608
2609 if (max_current_180 > 150)
2610 mmc->caps |= MMC_CAP_SET_XPC_180;
2611
2612 /* Maximum current capabilities of the host at 1.8V */
2613 if (max_current_180 >= 800)
2614 mmc->caps |= MMC_CAP_MAX_CURRENT_800;
2615 else if (max_current_180 >= 600)
2616 mmc->caps |= MMC_CAP_MAX_CURRENT_600;
2617 else if (max_current_180 >= 400)
2618 mmc->caps |= MMC_CAP_MAX_CURRENT_400;
2619 else
2620 mmc->caps |= MMC_CAP_MAX_CURRENT_200;
2621 }
2622
2623 mmc->ocr_avail = ocr_avail;
2624 mmc->ocr_avail_sdio = ocr_avail;
2625 if (host->ocr_avail_sdio)
2626 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
2627 mmc->ocr_avail_sd = ocr_avail;
2628 if (host->ocr_avail_sd)
2629 mmc->ocr_avail_sd &= host->ocr_avail_sd;
2630 else /* normal SD controllers don't support 1.8V */
2631 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
2632 mmc->ocr_avail_mmc = ocr_avail;
2633 if (host->ocr_avail_mmc)
2634 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
1839 2635
1840 if (mmc->ocr_avail == 0) { 2636 if (mmc->ocr_avail == 0) {
1841 printk(KERN_ERR "%s: Hardware doesn't report any " 2637 printk(KERN_ERR "%s: Hardware doesn't report any "
@@ -1850,12 +2646,11 @@ int sdhci_add_host(struct sdhci_host *host)
1850 * can do scatter/gather or not. 2646 * can do scatter/gather or not.
1851 */ 2647 */
1852 if (host->flags & SDHCI_USE_ADMA) 2648 if (host->flags & SDHCI_USE_ADMA)
1853 mmc->max_hw_segs = 128; 2649 mmc->max_segs = 128;
1854 else if (host->flags & SDHCI_USE_SDMA) 2650 else if (host->flags & SDHCI_USE_SDMA)
1855 mmc->max_hw_segs = 1; 2651 mmc->max_segs = 1;
1856 else /* PIO */ 2652 else /* PIO */
1857 mmc->max_hw_segs = 128; 2653 mmc->max_segs = 128;
1858 mmc->max_phys_segs = 128;
1859 2654
1860 /* 2655 /*
1861 * Maximum number of sectors in one transfer. Limited by DMA boundary 2656 * Maximum number of sectors in one transfer. Limited by DMA boundary
@@ -1868,10 +2663,14 @@ int sdhci_add_host(struct sdhci_host *host)
1868 * of bytes. When doing hardware scatter/gather, each entry cannot 2663 * of bytes. When doing hardware scatter/gather, each entry cannot
1869 * be larger than 64 KiB though. 2664 * be larger than 64 KiB though.
1870 */ 2665 */
1871 if (host->flags & SDHCI_USE_ADMA) 2666 if (host->flags & SDHCI_USE_ADMA) {
1872 mmc->max_seg_size = 65536; 2667 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
1873 else 2668 mmc->max_seg_size = 65535;
2669 else
2670 mmc->max_seg_size = 65536;
2671 } else {
1874 mmc->max_seg_size = mmc->max_req_size; 2672 mmc->max_seg_size = mmc->max_req_size;
2673 }
1875 2674
1876 /* 2675 /*
1877 * Maximum block size. This varies from controller to controller and 2676 * Maximum block size. This varies from controller to controller and
@@ -1880,7 +2679,7 @@ int sdhci_add_host(struct sdhci_host *host)
1880 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 2679 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
1881 mmc->max_blk_size = 2; 2680 mmc->max_blk_size = 2;
1882 } else { 2681 } else {
1883 mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> 2682 mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
1884 SDHCI_MAX_BLOCK_SHIFT; 2683 SDHCI_MAX_BLOCK_SHIFT;
1885 if (mmc->max_blk_size >= 3) { 2684 if (mmc->max_blk_size >= 3) {
1886 printk(KERN_WARNING "%s: Invalid maximum block size, " 2685 printk(KERN_WARNING "%s: Invalid maximum block size, "
@@ -1906,6 +2705,15 @@ int sdhci_add_host(struct sdhci_host *host)
1906 2705
1907 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host); 2706 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
1908 2707
2708 if (host->version >= SDHCI_SPEC_300) {
2709 init_waitqueue_head(&host->buf_ready_int);
2710
2711 /* Initialize re-tuning timer */
2712 init_timer(&host->tuning_timer);
2713 host->tuning_timer.data = (unsigned long)host;
2714 host->tuning_timer.function = sdhci_tuning_timer;
2715 }
2716
1909 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, 2717 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1910 mmc_hostname(mmc), host); 2718 mmc_hostname(mmc), host);
1911 if (ret) 2719 if (ret)
@@ -1999,6 +2807,8 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
1999 free_irq(host->irq, host); 2807 free_irq(host->irq, host);
2000 2808
2001 del_timer_sync(&host->timer); 2809 del_timer_sync(&host->timer);
2810 if (host->version >= SDHCI_SPEC_300)
2811 del_timer_sync(&host->tuning_timer);
2002 2812
2003 tasklet_kill(&host->card_tasklet); 2813 tasklet_kill(&host->card_tasklet);
2004 tasklet_kill(&host->finish_tasklet); 2814 tasklet_kill(&host->finish_tasklet);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index d316bc79b636..745c42fa41ed 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -1,6 +1,8 @@
1/* 1/*
2 * linux/drivers/mmc/host/sdhci.h - Secure Digital Host Controller Interface driver 2 * linux/drivers/mmc/host/sdhci.h - Secure Digital Host Controller Interface driver
3 * 3 *
4 * Header file for Host Controller registers and I/O accessors.
5 *
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5 * 7 *
6 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
@@ -8,19 +10,22 @@
8 * the Free Software Foundation; either version 2 of the License, or (at 10 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version. 11 * your option) any later version.
10 */ 12 */
11#ifndef __SDHCI_H 13#ifndef __SDHCI_HW_H
12#define __SDHCI_H 14#define __SDHCI_HW_H
13 15
14#include <linux/scatterlist.h> 16#include <linux/scatterlist.h>
15#include <linux/compiler.h> 17#include <linux/compiler.h>
16#include <linux/types.h> 18#include <linux/types.h>
17#include <linux/io.h> 19#include <linux/io.h>
18 20
21#include <linux/mmc/sdhci.h>
22
19/* 23/*
20 * Controller registers 24 * Controller registers
21 */ 25 */
22 26
23#define SDHCI_DMA_ADDRESS 0x00 27#define SDHCI_DMA_ADDRESS 0x00
28#define SDHCI_ARGUMENT2 SDHCI_DMA_ADDRESS
24 29
25#define SDHCI_BLOCK_SIZE 0x04 30#define SDHCI_BLOCK_SIZE 0x04
26#define SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF)) 31#define SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF))
@@ -32,7 +37,8 @@
32#define SDHCI_TRANSFER_MODE 0x0C 37#define SDHCI_TRANSFER_MODE 0x0C
33#define SDHCI_TRNS_DMA 0x01 38#define SDHCI_TRNS_DMA 0x01
34#define SDHCI_TRNS_BLK_CNT_EN 0x02 39#define SDHCI_TRNS_BLK_CNT_EN 0x02
35#define SDHCI_TRNS_ACMD12 0x04 40#define SDHCI_TRNS_AUTO_CMD12 0x04
41#define SDHCI_TRNS_AUTO_CMD23 0x08
36#define SDHCI_TRNS_READ 0x10 42#define SDHCI_TRNS_READ 0x10
37#define SDHCI_TRNS_MULTI 0x20 43#define SDHCI_TRNS_MULTI 0x20
38 44
@@ -41,6 +47,7 @@
41#define SDHCI_CMD_CRC 0x08 47#define SDHCI_CMD_CRC 0x08
42#define SDHCI_CMD_INDEX 0x10 48#define SDHCI_CMD_INDEX 0x10
43#define SDHCI_CMD_DATA 0x20 49#define SDHCI_CMD_DATA 0x20
50#define SDHCI_CMD_ABORTCMD 0xC0
44 51
45#define SDHCI_CMD_RESP_NONE 0x00 52#define SDHCI_CMD_RESP_NONE 0x00
46#define SDHCI_CMD_RESP_LONG 0x01 53#define SDHCI_CMD_RESP_LONG 0x01
@@ -48,6 +55,7 @@
48#define SDHCI_CMD_RESP_SHORT_BUSY 0x03 55#define SDHCI_CMD_RESP_SHORT_BUSY 0x03
49 56
50#define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff)) 57#define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff))
58#define SDHCI_GET_CMD(c) ((c>>8) & 0x3f)
51 59
52#define SDHCI_RESPONSE 0x10 60#define SDHCI_RESPONSE 0x10
53 61
@@ -62,8 +70,10 @@
62#define SDHCI_DATA_AVAILABLE 0x00000800 70#define SDHCI_DATA_AVAILABLE 0x00000800
63#define SDHCI_CARD_PRESENT 0x00010000 71#define SDHCI_CARD_PRESENT 0x00010000
64#define SDHCI_WRITE_PROTECT 0x00080000 72#define SDHCI_WRITE_PROTECT 0x00080000
73#define SDHCI_DATA_LVL_MASK 0x00F00000
74#define SDHCI_DATA_LVL_SHIFT 20
65 75
66#define SDHCI_HOST_CONTROL 0x28 76#define SDHCI_HOST_CONTROL 0x28
67#define SDHCI_CTRL_LED 0x01 77#define SDHCI_CTRL_LED 0x01
68#define SDHCI_CTRL_4BITBUS 0x02 78#define SDHCI_CTRL_4BITBUS 0x02
69#define SDHCI_CTRL_HISPD 0x04 79#define SDHCI_CTRL_HISPD 0x04
@@ -72,7 +82,7 @@
72#define SDHCI_CTRL_ADMA1 0x08 82#define SDHCI_CTRL_ADMA1 0x08
73#define SDHCI_CTRL_ADMA32 0x10 83#define SDHCI_CTRL_ADMA32 0x10
74#define SDHCI_CTRL_ADMA64 0x18 84#define SDHCI_CTRL_ADMA64 0x18
75#define SDHCI_CTRL_8BITBUS 0x20 85#define SDHCI_CTRL_8BITBUS 0x20
76 86
77#define SDHCI_POWER_CONTROL 0x29 87#define SDHCI_POWER_CONTROL 0x29
78#define SDHCI_POWER_ON 0x01 88#define SDHCI_POWER_ON 0x01
@@ -83,9 +93,17 @@
83#define SDHCI_BLOCK_GAP_CONTROL 0x2A 93#define SDHCI_BLOCK_GAP_CONTROL 0x2A
84 94
85#define SDHCI_WAKE_UP_CONTROL 0x2B 95#define SDHCI_WAKE_UP_CONTROL 0x2B
96#define SDHCI_WAKE_ON_INT 0x01
97#define SDHCI_WAKE_ON_INSERT 0x02
98#define SDHCI_WAKE_ON_REMOVE 0x04
86 99
87#define SDHCI_CLOCK_CONTROL 0x2C 100#define SDHCI_CLOCK_CONTROL 0x2C
88#define SDHCI_DIVIDER_SHIFT 8 101#define SDHCI_DIVIDER_SHIFT 8
102#define SDHCI_DIVIDER_HI_SHIFT 6
103#define SDHCI_DIV_MASK 0xFF
104#define SDHCI_DIV_MASK_LEN 8
105#define SDHCI_DIV_HI_MASK 0x300
106#define SDHCI_PROG_CLOCK_MODE 0x0020
89#define SDHCI_CLOCK_CARD_EN 0x0004 107#define SDHCI_CLOCK_CARD_EN 0x0004
90#define SDHCI_CLOCK_INT_STABLE 0x0002 108#define SDHCI_CLOCK_INT_STABLE 0x0002
91#define SDHCI_CLOCK_INT_EN 0x0001 109#define SDHCI_CLOCK_INT_EN 0x0001
@@ -133,16 +151,33 @@
133 151
134#define SDHCI_ACMD12_ERR 0x3C 152#define SDHCI_ACMD12_ERR 0x3C
135 153
136/* 3E-3F reserved */ 154#define SDHCI_HOST_CONTROL2 0x3E
155#define SDHCI_CTRL_UHS_MASK 0x0007
156#define SDHCI_CTRL_UHS_SDR12 0x0000
157#define SDHCI_CTRL_UHS_SDR25 0x0001
158#define SDHCI_CTRL_UHS_SDR50 0x0002
159#define SDHCI_CTRL_UHS_SDR104 0x0003
160#define SDHCI_CTRL_UHS_DDR50 0x0004
161#define SDHCI_CTRL_VDD_180 0x0008
162#define SDHCI_CTRL_DRV_TYPE_MASK 0x0030
163#define SDHCI_CTRL_DRV_TYPE_B 0x0000
164#define SDHCI_CTRL_DRV_TYPE_A 0x0010
165#define SDHCI_CTRL_DRV_TYPE_C 0x0020
166#define SDHCI_CTRL_DRV_TYPE_D 0x0030
167#define SDHCI_CTRL_EXEC_TUNING 0x0040
168#define SDHCI_CTRL_TUNED_CLK 0x0080
169#define SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000
137 170
138#define SDHCI_CAPABILITIES 0x40 171#define SDHCI_CAPABILITIES 0x40
139#define SDHCI_TIMEOUT_CLK_MASK 0x0000003F 172#define SDHCI_TIMEOUT_CLK_MASK 0x0000003F
140#define SDHCI_TIMEOUT_CLK_SHIFT 0 173#define SDHCI_TIMEOUT_CLK_SHIFT 0
141#define SDHCI_TIMEOUT_CLK_UNIT 0x00000080 174#define SDHCI_TIMEOUT_CLK_UNIT 0x00000080
142#define SDHCI_CLOCK_BASE_MASK 0x00003F00 175#define SDHCI_CLOCK_BASE_MASK 0x00003F00
176#define SDHCI_CLOCK_V3_BASE_MASK 0x0000FF00
143#define SDHCI_CLOCK_BASE_SHIFT 8 177#define SDHCI_CLOCK_BASE_SHIFT 8
144#define SDHCI_MAX_BLOCK_MASK 0x00030000 178#define SDHCI_MAX_BLOCK_MASK 0x00030000
145#define SDHCI_MAX_BLOCK_SHIFT 16 179#define SDHCI_MAX_BLOCK_SHIFT 16
180#define SDHCI_CAN_DO_8BIT 0x00040000
146#define SDHCI_CAN_DO_ADMA2 0x00080000 181#define SDHCI_CAN_DO_ADMA2 0x00080000
147#define SDHCI_CAN_DO_ADMA1 0x00100000 182#define SDHCI_CAN_DO_ADMA1 0x00100000
148#define SDHCI_CAN_DO_HISPD 0x00200000 183#define SDHCI_CAN_DO_HISPD 0x00200000
@@ -152,9 +187,30 @@
152#define SDHCI_CAN_VDD_180 0x04000000 187#define SDHCI_CAN_VDD_180 0x04000000
153#define SDHCI_CAN_64BIT 0x10000000 188#define SDHCI_CAN_64BIT 0x10000000
154 189
155/* 44-47 reserved for more caps */ 190#define SDHCI_SUPPORT_SDR50 0x00000001
156 191#define SDHCI_SUPPORT_SDR104 0x00000002
157#define SDHCI_MAX_CURRENT 0x48 192#define SDHCI_SUPPORT_DDR50 0x00000004
193#define SDHCI_DRIVER_TYPE_A 0x00000010
194#define SDHCI_DRIVER_TYPE_C 0x00000020
195#define SDHCI_DRIVER_TYPE_D 0x00000040
196#define SDHCI_RETUNING_TIMER_COUNT_MASK 0x00000F00
197#define SDHCI_RETUNING_TIMER_COUNT_SHIFT 8
198#define SDHCI_USE_SDR50_TUNING 0x00002000
199#define SDHCI_RETUNING_MODE_MASK 0x0000C000
200#define SDHCI_RETUNING_MODE_SHIFT 14
201#define SDHCI_CLOCK_MUL_MASK 0x00FF0000
202#define SDHCI_CLOCK_MUL_SHIFT 16
203
204#define SDHCI_CAPABILITIES_1 0x44
205
206#define SDHCI_MAX_CURRENT 0x48
207#define SDHCI_MAX_CURRENT_330_MASK 0x0000FF
208#define SDHCI_MAX_CURRENT_330_SHIFT 0
209#define SDHCI_MAX_CURRENT_300_MASK 0x00FF00
210#define SDHCI_MAX_CURRENT_300_SHIFT 8
211#define SDHCI_MAX_CURRENT_180_MASK 0xFF0000
212#define SDHCI_MAX_CURRENT_180_SHIFT 16
213#define SDHCI_MAX_CURRENT_MULTIPLIER 4
158 214
159/* 4C-4F reserved for more max current */ 215/* 4C-4F reserved for more max current */
160 216
@@ -178,134 +234,20 @@
178#define SDHCI_SPEC_VER_SHIFT 0 234#define SDHCI_SPEC_VER_SHIFT 0
179#define SDHCI_SPEC_100 0 235#define SDHCI_SPEC_100 0
180#define SDHCI_SPEC_200 1 236#define SDHCI_SPEC_200 1
237#define SDHCI_SPEC_300 2
181 238
182struct sdhci_ops; 239/*
183 240 * End of controller registers.
184struct sdhci_host { 241 */
185 /* Data set by hardware interface driver */
186 const char *hw_name; /* Hardware bus name */
187
188 unsigned int quirks; /* Deviations from spec. */
189
190/* Controller doesn't honor resets unless we touch the clock register */
191#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
192/* Controller has bad caps bits, but really supports DMA */
193#define SDHCI_QUIRK_FORCE_DMA (1<<1)
194/* Controller doesn't like to be reset when there is no card inserted. */
195#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
196/* Controller doesn't like clearing the power reg before a change */
197#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
198/* Controller has flaky internal state so reset it on each ios change */
199#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
200/* Controller has an unusable DMA engine */
201#define SDHCI_QUIRK_BROKEN_DMA (1<<5)
202/* Controller has an unusable ADMA engine */
203#define SDHCI_QUIRK_BROKEN_ADMA (1<<6)
204/* Controller can only DMA from 32-bit aligned addresses */
205#define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<7)
206/* Controller can only DMA chunk sizes that are a multiple of 32 bits */
207#define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<8)
208/* Controller can only ADMA chunks that are a multiple of 32 bits */
209#define SDHCI_QUIRK_32BIT_ADMA_SIZE (1<<9)
210/* Controller needs to be reset after each request to stay stable */
211#define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<10)
212/* Controller needs voltage and power writes to happen separately */
213#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<11)
214/* Controller provides an incorrect timeout value for transfers */
215#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12)
216/* Controller has an issue with buffer bits for small transfers */
217#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1<<13)
218/* Controller does not provide transfer-complete interrupt when not busy */
219#define SDHCI_QUIRK_NO_BUSY_IRQ (1<<14)
220/* Controller has unreliable card detection */
221#define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15)
222/* Controller reports inverted write-protect state */
223#define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16)
224/* Controller has nonstandard clock management */
225#define SDHCI_QUIRK_NONSTANDARD_CLOCK (1<<17)
226/* Controller does not like fast PIO transfers */
227#define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18)
228/* Controller losing signal/interrupt enable states after reset */
229#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19)
230/* Controller has to be forced to use block size of 2048 bytes */
231#define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20)
232/* Controller cannot do multi-block transfers */
233#define SDHCI_QUIRK_NO_MULTIBLOCK (1<<21)
234/* Controller can only handle 1-bit data transfers */
235#define SDHCI_QUIRK_FORCE_1_BIT_DATA (1<<22)
236/* Controller needs 10ms delay between applying power and clock */
237#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
238/* Controller uses SDCLK instead of TMCLK for data timeouts */
239#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24)
240/* Controller reports wrong base clock capability */
241#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25)
242/* Controller cannot support End Attribute in NOP ADMA descriptor */
243#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26)
244/* Controller is missing device caps. Use caps provided by host */
245#define SDHCI_QUIRK_MISSING_CAPS (1<<27)
246/* Controller uses Auto CMD12 command to stop the transfer */
247#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28)
248/* Controller doesn't have HISPD bit field in HI-SPEED SD card */
249#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29)
250
251 int irq; /* Device IRQ */
252 void __iomem * ioaddr; /* Mapped address */
253
254 const struct sdhci_ops *ops; /* Low level hw interface */
255
256 struct regulator *vmmc; /* Power regulator */
257
258 /* Internal data */
259 struct mmc_host *mmc; /* MMC structure */
260 u64 dma_mask; /* custom DMA mask */
261
262#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
263 struct led_classdev led; /* LED control */
264 char led_name[32];
265#endif
266
267 spinlock_t lock; /* Mutex */
268
269 int flags; /* Host attributes */
270#define SDHCI_USE_SDMA (1<<0) /* Host is SDMA capable */
271#define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */
272#define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */
273#define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */
274
275 unsigned int version; /* SDHCI spec. version */
276
277 unsigned int max_clk; /* Max possible freq (MHz) */
278 unsigned int timeout_clk; /* Timeout freq (KHz) */
279
280 unsigned int clock; /* Current clock (MHz) */
281 u8 pwr; /* Current voltage */
282
283 struct mmc_request *mrq; /* Current request */
284 struct mmc_command *cmd; /* Current command */
285 struct mmc_data *data; /* Current data request */
286 unsigned int data_early:1; /* Data finished before cmd */
287
288 struct sg_mapping_iter sg_miter; /* SG state for PIO */
289 unsigned int blocks; /* remaining PIO blocks */
290
291 int sg_count; /* Mapped sg entries */
292
293 u8 *adma_desc; /* ADMA descriptor table */
294 u8 *align_buffer; /* Bounce buffer */
295
296 dma_addr_t adma_addr; /* Mapped ADMA descr. table */
297 dma_addr_t align_addr; /* Mapped bounce buffer */
298
299 struct tasklet_struct card_tasklet; /* Tasklet structures */
300 struct tasklet_struct finish_tasklet;
301
302 struct timer_list timer; /* Timer for timeouts */
303
304 unsigned int caps; /* Alternative capabilities */
305 242
306 unsigned long private[0] ____cacheline_aligned; 243#define SDHCI_MAX_DIV_SPEC_200 256
307}; 244#define SDHCI_MAX_DIV_SPEC_300 2046
308 245
246/*
247 * Host SDMA buffer boundary. Valid values from 4K to 512K in powers of 2.
248 */
249#define SDHCI_DEFAULT_BOUNDARY_SIZE (512 * 1024)
250#define SDHCI_DEFAULT_BOUNDARY_ARG (ilog2(SDHCI_DEFAULT_BOUNDARY_SIZE) - 12)
309 251
310struct sdhci_ops { 252struct sdhci_ops {
311#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS 253#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
@@ -323,6 +265,15 @@ struct sdhci_ops {
323 unsigned int (*get_max_clock)(struct sdhci_host *host); 265 unsigned int (*get_max_clock)(struct sdhci_host *host);
324 unsigned int (*get_min_clock)(struct sdhci_host *host); 266 unsigned int (*get_min_clock)(struct sdhci_host *host);
325 unsigned int (*get_timeout_clock)(struct sdhci_host *host); 267 unsigned int (*get_timeout_clock)(struct sdhci_host *host);
268 int (*platform_8bit_width)(struct sdhci_host *host,
269 int width);
270 void (*platform_send_init_74_clocks)(struct sdhci_host *host,
271 u8 power_mode);
272 unsigned int (*get_ro)(struct sdhci_host *host);
273 void (*platform_reset_enter)(struct sdhci_host *host, u8 mask);
274 void (*platform_reset_exit)(struct sdhci_host *host, u8 mask);
275 int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
276
326}; 277};
327 278
328#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS 279#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
@@ -425,6 +376,7 @@ extern void sdhci_remove_host(struct sdhci_host *host, int dead);
425#ifdef CONFIG_PM 376#ifdef CONFIG_PM
426extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state); 377extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
427extern int sdhci_resume_host(struct sdhci_host *host); 378extern int sdhci_resume_host(struct sdhci_host *host);
379extern void sdhci_enable_irq_wakeups(struct sdhci_host *host);
428#endif 380#endif
429 381
430#endif /* __SDHCI_H */ 382#endif /* __SDHCI_HW_H */
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index 7aa65bb2af4a..496b7efbc6b0 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -30,7 +30,6 @@
30#include <linux/ioport.h> 30#include <linux/ioport.h>
31#include <linux/scatterlist.h> 31#include <linux/scatterlist.h>
32 32
33#include <pcmcia/cs.h>
34#include <pcmcia/cistpl.h> 33#include <pcmcia/cistpl.h>
35#include <pcmcia/ds.h> 34#include <pcmcia/ds.h>
36#include <linux/io.h> 35#include <linux/io.h>
@@ -77,7 +76,7 @@ static unsigned int switchlocked;
77#define BUSY_TIMEOUT 32767 76#define BUSY_TIMEOUT 32767
78 77
79/* list of supported pcmcia devices */ 78/* list of supported pcmcia devices */
80static struct pcmcia_device_id pcmcia_ids[] = { 79static const struct pcmcia_device_id pcmcia_ids[] = {
81 /* vendor and device strings followed by their crc32 hashes */ 80 /* vendor and device strings followed by their crc32 hashes */
82 PCMCIA_DEVICE_PROD_ID12("RICOH", "Bay1Controller", 0xd9f522ed, 81 PCMCIA_DEVICE_PROD_ID12("RICOH", "Bay1Controller", 0xd9f522ed,
83 0xc3901202), 82 0xc3901202),
@@ -447,7 +446,7 @@ static int sdricoh_init_mmc(struct pci_dev *pci_dev,
447 mmc->max_seg_size = 1024 * 512; 446 mmc->max_seg_size = 1024 * 512;
448 mmc->max_blk_size = 512; 447 mmc->max_blk_size = 512;
449 448
450 /* reset the controler */ 449 /* reset the controller */
451 if (sdricoh_reset(host)) { 450 if (sdricoh_reset(host)) {
452 dev_dbg(dev, "could not reset\n"); 451 dev_dbg(dev, "could not reset\n");
453 result = -EIO; 452 result = -EIO;
@@ -479,7 +478,7 @@ static int sdricoh_pcmcia_probe(struct pcmcia_device *pcmcia_dev)
479 dev_info(&pcmcia_dev->dev, "Searching MMC controller for pcmcia device" 478 dev_info(&pcmcia_dev->dev, "Searching MMC controller for pcmcia device"
480 " %s %s ...\n", pcmcia_dev->prod_id[0], pcmcia_dev->prod_id[1]); 479 " %s %s ...\n", pcmcia_dev->prod_id[0], pcmcia_dev->prod_id[1]);
481 480
482 /* search pci cardbus bridge that contains the mmc controler */ 481 /* search pci cardbus bridge that contains the mmc controller */
483 /* the io region is already claimed by yenta_socket... */ 482 /* the io region is already claimed by yenta_socket... */
484 while ((pci_dev = 483 while ((pci_dev =
485 pci_get_device(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, 484 pci_get_device(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476,
@@ -536,9 +535,7 @@ static int sdricoh_pcmcia_resume(struct pcmcia_device *link)
536#endif 535#endif
537 536
538static struct pcmcia_driver sdricoh_driver = { 537static struct pcmcia_driver sdricoh_driver = {
539 .drv = { 538 .name = DRIVER_NAME,
540 .name = DRIVER_NAME,
541 },
542 .probe = sdricoh_pcmcia_probe, 539 .probe = sdricoh_pcmcia_probe,
543 .remove = sdricoh_pcmcia_detach, 540 .remove = sdricoh_pcmcia_detach,
544 .id_table = pcmcia_ids, 541 .id_table = pcmcia_ids,
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 5d3f824bb5a3..14f8edbaa195 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -16,16 +16,21 @@
16 * 16 *
17 */ 17 */
18 18
19#include <linux/clk.h>
20#include <linux/completion.h>
21#include <linux/delay.h>
19#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
20#include <linux/mmc/host.h> 23#include <linux/dmaengine.h>
21#include <linux/mmc/card.h> 24#include <linux/mmc/card.h>
22#include <linux/mmc/core.h> 25#include <linux/mmc/core.h>
26#include <linux/mmc/host.h>
23#include <linux/mmc/mmc.h> 27#include <linux/mmc/mmc.h>
24#include <linux/mmc/sdio.h> 28#include <linux/mmc/sdio.h>
25#include <linux/delay.h>
26#include <linux/platform_device.h>
27#include <linux/clk.h>
28#include <linux/mmc/sh_mmcif.h> 29#include <linux/mmc/sh_mmcif.h>
30#include <linux/pagemap.h>
31#include <linux/platform_device.h>
32#include <linux/pm_runtime.h>
33#include <linux/spinlock.h>
29 34
30#define DRIVER_NAME "sh_mmcif" 35#define DRIVER_NAME "sh_mmcif"
31#define DRIVER_VERSION "2010-04-28" 36#define DRIVER_VERSION "2010-04-28"
@@ -62,25 +67,6 @@
62/* CE_BLOCK_SET */ 67/* CE_BLOCK_SET */
63#define BLOCK_SIZE_MASK 0x0000ffff 68#define BLOCK_SIZE_MASK 0x0000ffff
64 69
65/* CE_CLK_CTRL */
66#define CLK_ENABLE (1 << 24) /* 1: output mmc clock */
67#define CLK_CLEAR ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
68#define CLK_SUP_PCLK ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
69#define SRSPTO_256 ((1 << 13) | (0 << 12)) /* resp timeout */
70#define SRBSYTO_29 ((1 << 11) | (1 << 10) | \
71 (1 << 9) | (1 << 8)) /* resp busy timeout */
72#define SRWDTO_29 ((1 << 7) | (1 << 6) | \
73 (1 << 5) | (1 << 4)) /* read/write timeout */
74#define SCCSTO_29 ((1 << 3) | (1 << 2) | \
75 (1 << 1) | (1 << 0)) /* ccs timeout */
76
77/* CE_BUF_ACC */
78#define BUF_ACC_DMAWEN (1 << 25)
79#define BUF_ACC_DMAREN (1 << 24)
80#define BUF_ACC_BUSW_32 (0 << 17)
81#define BUF_ACC_BUSW_16 (1 << 17)
82#define BUF_ACC_ATYP (1 << 16)
83
84/* CE_INT */ 70/* CE_INT */
85#define INT_CCSDE (1 << 29) 71#define INT_CCSDE (1 << 29)
86#define INT_CMD12DRE (1 << 26) 72#define INT_CMD12DRE (1 << 26)
@@ -165,30 +151,38 @@
165 STS2_AC12BSYTO | STS2_RSPBSYTO | \ 151 STS2_AC12BSYTO | STS2_RSPBSYTO | \
166 STS2_AC12RSPTO | STS2_RSPTO) 152 STS2_AC12RSPTO | STS2_RSPTO)
167 153
168/* CE_VERSION */
169#define SOFT_RST_ON (1 << 31)
170#define SOFT_RST_OFF (0 << 31)
171
172#define CLKDEV_EMMC_DATA 52000000 /* 52MHz */ 154#define CLKDEV_EMMC_DATA 52000000 /* 52MHz */
173#define CLKDEV_MMC_DATA 20000000 /* 20MHz */ 155#define CLKDEV_MMC_DATA 20000000 /* 20MHz */
174#define CLKDEV_INIT 400000 /* 400 KHz */ 156#define CLKDEV_INIT 400000 /* 400 KHz */
175 157
158enum mmcif_state {
159 STATE_IDLE,
160 STATE_REQUEST,
161 STATE_IOS,
162};
163
176struct sh_mmcif_host { 164struct sh_mmcif_host {
177 struct mmc_host *mmc; 165 struct mmc_host *mmc;
178 struct mmc_data *data; 166 struct mmc_data *data;
179 struct mmc_command *cmd;
180 struct platform_device *pd; 167 struct platform_device *pd;
181 struct clk *hclk; 168 struct clk *hclk;
182 unsigned int clk; 169 unsigned int clk;
183 int bus_width; 170 int bus_width;
184 u16 wait_int; 171 bool sd_error;
185 u16 sd_error;
186 long timeout; 172 long timeout;
187 void __iomem *addr; 173 void __iomem *addr;
188 wait_queue_head_t intr_wait; 174 struct completion intr_wait;
175 enum mmcif_state state;
176 spinlock_t lock;
177 bool power;
178
179 /* DMA support */
180 struct dma_chan *chan_rx;
181 struct dma_chan *chan_tx;
182 struct completion dma_complete;
183 bool dma_active;
189}; 184};
190 185
191
192static inline void sh_mmcif_bitset(struct sh_mmcif_host *host, 186static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
193 unsigned int reg, u32 val) 187 unsigned int reg, u32 val)
194{ 188{
@@ -201,6 +195,182 @@ static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
201 writel(~val & readl(host->addr + reg), host->addr + reg); 195 writel(~val & readl(host->addr + reg), host->addr + reg);
202} 196}
203 197
198static void mmcif_dma_complete(void *arg)
199{
200 struct sh_mmcif_host *host = arg;
201 dev_dbg(&host->pd->dev, "Command completed\n");
202
203 if (WARN(!host->data, "%s: NULL data in DMA completion!\n",
204 dev_name(&host->pd->dev)))
205 return;
206
207 if (host->data->flags & MMC_DATA_READ)
208 dma_unmap_sg(host->chan_rx->device->dev,
209 host->data->sg, host->data->sg_len,
210 DMA_FROM_DEVICE);
211 else
212 dma_unmap_sg(host->chan_tx->device->dev,
213 host->data->sg, host->data->sg_len,
214 DMA_TO_DEVICE);
215
216 complete(&host->dma_complete);
217}
218
219static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
220{
221 struct scatterlist *sg = host->data->sg;
222 struct dma_async_tx_descriptor *desc = NULL;
223 struct dma_chan *chan = host->chan_rx;
224 dma_cookie_t cookie = -EINVAL;
225 int ret;
226
227 ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
228 DMA_FROM_DEVICE);
229 if (ret > 0) {
230 host->dma_active = true;
231 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
232 DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
233 }
234
235 if (desc) {
236 desc->callback = mmcif_dma_complete;
237 desc->callback_param = host;
238 cookie = dmaengine_submit(desc);
239 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
240 dma_async_issue_pending(chan);
241 }
242 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
243 __func__, host->data->sg_len, ret, cookie);
244
245 if (!desc) {
246 /* DMA failed, fall back to PIO */
247 if (ret >= 0)
248 ret = -EIO;
249 host->chan_rx = NULL;
250 host->dma_active = false;
251 dma_release_channel(chan);
252 /* Free the Tx channel too */
253 chan = host->chan_tx;
254 if (chan) {
255 host->chan_tx = NULL;
256 dma_release_channel(chan);
257 }
258 dev_warn(&host->pd->dev,
259 "DMA failed: %d, falling back to PIO\n", ret);
260 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
261 }
262
263 dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
264 desc, cookie, host->data->sg_len);
265}
266
267static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
268{
269 struct scatterlist *sg = host->data->sg;
270 struct dma_async_tx_descriptor *desc = NULL;
271 struct dma_chan *chan = host->chan_tx;
272 dma_cookie_t cookie = -EINVAL;
273 int ret;
274
275 ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
276 DMA_TO_DEVICE);
277 if (ret > 0) {
278 host->dma_active = true;
279 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
280 DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
281 }
282
283 if (desc) {
284 desc->callback = mmcif_dma_complete;
285 desc->callback_param = host;
286 cookie = dmaengine_submit(desc);
287 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
288 dma_async_issue_pending(chan);
289 }
290 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
291 __func__, host->data->sg_len, ret, cookie);
292
293 if (!desc) {
294 /* DMA failed, fall back to PIO */
295 if (ret >= 0)
296 ret = -EIO;
297 host->chan_tx = NULL;
298 host->dma_active = false;
299 dma_release_channel(chan);
300 /* Free the Rx channel too */
301 chan = host->chan_rx;
302 if (chan) {
303 host->chan_rx = NULL;
304 dma_release_channel(chan);
305 }
306 dev_warn(&host->pd->dev,
307 "DMA failed: %d, falling back to PIO\n", ret);
308 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
309 }
310
311 dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
312 desc, cookie);
313}
314
315static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
316{
317 dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
318 chan->private = arg;
319 return true;
320}
321
322static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
323 struct sh_mmcif_plat_data *pdata)
324{
325 host->dma_active = false;
326
327 /* We can only either use DMA for both Tx and Rx or not use it at all */
328 if (pdata->dma) {
329 dma_cap_mask_t mask;
330
331 dma_cap_zero(mask);
332 dma_cap_set(DMA_SLAVE, mask);
333
334 host->chan_tx = dma_request_channel(mask, sh_mmcif_filter,
335 &pdata->dma->chan_priv_tx);
336 dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
337 host->chan_tx);
338
339 if (!host->chan_tx)
340 return;
341
342 host->chan_rx = dma_request_channel(mask, sh_mmcif_filter,
343 &pdata->dma->chan_priv_rx);
344 dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
345 host->chan_rx);
346
347 if (!host->chan_rx) {
348 dma_release_channel(host->chan_tx);
349 host->chan_tx = NULL;
350 return;
351 }
352
353 init_completion(&host->dma_complete);
354 }
355}
356
357static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
358{
359 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
360 /* Descriptors are freed automatically */
361 if (host->chan_tx) {
362 struct dma_chan *chan = host->chan_tx;
363 host->chan_tx = NULL;
364 dma_release_channel(chan);
365 }
366 if (host->chan_rx) {
367 struct dma_chan *chan = host->chan_rx;
368 host->chan_rx = NULL;
369 dma_release_channel(chan);
370 }
371
372 host->dma_active = false;
373}
204 374
205static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) 375static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
206{ 376{
@@ -239,13 +409,12 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
239 u32 state1, state2; 409 u32 state1, state2;
240 int ret, timeout = 10000000; 410 int ret, timeout = 10000000;
241 411
242 host->sd_error = 0; 412 host->sd_error = false;
243 host->wait_int = 0;
244 413
245 state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1); 414 state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
246 state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2); 415 state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
247 pr_debug("%s: ERR HOST_STS1 = %08x\n", DRIVER_NAME, state1); 416 dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
248 pr_debug("%s: ERR HOST_STS2 = %08x\n", DRIVER_NAME, state2); 417 dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
249 418
250 if (state1 & STS1_CMDSEQ) { 419 if (state1 & STS1_CMDSEQ) {
251 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK); 420 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
@@ -253,8 +422,8 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
253 while (1) { 422 while (1) {
254 timeout--; 423 timeout--;
255 if (timeout < 0) { 424 if (timeout < 0) {
256 pr_err(DRIVER_NAME": Forceed end of " \ 425 dev_err(&host->pd->dev,
257 "command sequence timeout err\n"); 426 "Forceed end of command sequence timeout err\n");
258 return -EIO; 427 return -EIO;
259 } 428 }
260 if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1) 429 if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
@@ -263,18 +432,18 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
263 mdelay(1); 432 mdelay(1);
264 } 433 }
265 sh_mmcif_sync_reset(host); 434 sh_mmcif_sync_reset(host);
266 pr_debug(DRIVER_NAME": Forced end of command sequence\n"); 435 dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
267 return -EIO; 436 return -EIO;
268 } 437 }
269 438
270 if (state2 & STS2_CRC_ERR) { 439 if (state2 & STS2_CRC_ERR) {
271 pr_debug(DRIVER_NAME": Happened CRC error\n"); 440 dev_dbg(&host->pd->dev, ": Happened CRC error\n");
272 ret = -EIO; 441 ret = -EIO;
273 } else if (state2 & STS2_TIMEOUT_ERR) { 442 } else if (state2 & STS2_TIMEOUT_ERR) {
274 pr_debug(DRIVER_NAME": Happened Timeout error\n"); 443 dev_dbg(&host->pd->dev, ": Happened Timeout error\n");
275 ret = -ETIMEDOUT; 444 ret = -ETIMEDOUT;
276 } else { 445 } else {
277 pr_debug(DRIVER_NAME": Happened End/Index error\n"); 446 dev_dbg(&host->pd->dev, ": Happened End/Index error\n");
278 ret = -EIO; 447 ret = -EIO;
279 } 448 }
280 return ret; 449 return ret;
@@ -287,17 +456,13 @@ static int sh_mmcif_single_read(struct sh_mmcif_host *host,
287 long time; 456 long time;
288 u32 blocksize, i, *p = sg_virt(data->sg); 457 u32 blocksize, i, *p = sg_virt(data->sg);
289 458
290 host->wait_int = 0;
291
292 /* buf read enable */ 459 /* buf read enable */
293 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); 460 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
294 time = wait_event_interruptible_timeout(host->intr_wait, 461 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
295 host->wait_int == 1 || 462 host->timeout);
296 host->sd_error == 1, host->timeout); 463 if (time <= 0 || host->sd_error)
297 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
298 return sh_mmcif_error_manage(host); 464 return sh_mmcif_error_manage(host);
299 465
300 host->wait_int = 0;
301 blocksize = (BLOCK_SIZE_MASK & 466 blocksize = (BLOCK_SIZE_MASK &
302 sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3; 467 sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
303 for (i = 0; i < blocksize / 4; i++) 468 for (i = 0; i < blocksize / 4; i++)
@@ -305,13 +470,11 @@ static int sh_mmcif_single_read(struct sh_mmcif_host *host,
305 470
306 /* buffer read end */ 471 /* buffer read end */
307 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); 472 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
308 time = wait_event_interruptible_timeout(host->intr_wait, 473 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
309 host->wait_int == 1 || 474 host->timeout);
310 host->sd_error == 1, host->timeout); 475 if (time <= 0 || host->sd_error)
311 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
312 return sh_mmcif_error_manage(host); 476 return sh_mmcif_error_manage(host);
313 477
314 host->wait_int = 0;
315 return 0; 478 return 0;
316} 479}
317 480
@@ -326,19 +489,15 @@ static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
326 MMCIF_CE_BLOCK_SET); 489 MMCIF_CE_BLOCK_SET);
327 for (j = 0; j < data->sg_len; j++) { 490 for (j = 0; j < data->sg_len; j++) {
328 p = sg_virt(data->sg); 491 p = sg_virt(data->sg);
329 host->wait_int = 0;
330 for (sec = 0; sec < data->sg->length / blocksize; sec++) { 492 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
331 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); 493 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
332 /* buf read enable */ 494 /* buf read enable */
333 time = wait_event_interruptible_timeout(host->intr_wait, 495 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
334 host->wait_int == 1 || 496 host->timeout);
335 host->sd_error == 1, host->timeout);
336 497
337 if (host->wait_int != 1 && 498 if (time <= 0 || host->sd_error)
338 (time == 0 || host->sd_error != 0))
339 return sh_mmcif_error_manage(host); 499 return sh_mmcif_error_manage(host);
340 500
341 host->wait_int = 0;
342 for (i = 0; i < blocksize / 4; i++) 501 for (i = 0; i < blocksize / 4; i++)
343 *p++ = sh_mmcif_readl(host->addr, 502 *p++ = sh_mmcif_readl(host->addr,
344 MMCIF_CE_DATA); 503 MMCIF_CE_DATA);
@@ -356,17 +515,14 @@ static int sh_mmcif_single_write(struct sh_mmcif_host *host,
356 long time; 515 long time;
357 u32 blocksize, i, *p = sg_virt(data->sg); 516 u32 blocksize, i, *p = sg_virt(data->sg);
358 517
359 host->wait_int = 0;
360 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); 518 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
361 519
362 /* buf write enable */ 520 /* buf write enable */
363 time = wait_event_interruptible_timeout(host->intr_wait, 521 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
364 host->wait_int == 1 || 522 host->timeout);
365 host->sd_error == 1, host->timeout); 523 if (time <= 0 || host->sd_error)
366 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
367 return sh_mmcif_error_manage(host); 524 return sh_mmcif_error_manage(host);
368 525
369 host->wait_int = 0;
370 blocksize = (BLOCK_SIZE_MASK & 526 blocksize = (BLOCK_SIZE_MASK &
371 sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3; 527 sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
372 for (i = 0; i < blocksize / 4; i++) 528 for (i = 0; i < blocksize / 4; i++)
@@ -375,13 +531,11 @@ static int sh_mmcif_single_write(struct sh_mmcif_host *host,
375 /* buffer write end */ 531 /* buffer write end */
376 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); 532 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
377 533
378 time = wait_event_interruptible_timeout(host->intr_wait, 534 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
379 host->wait_int == 1 || 535 host->timeout);
380 host->sd_error == 1, host->timeout); 536 if (time <= 0 || host->sd_error)
381 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
382 return sh_mmcif_error_manage(host); 537 return sh_mmcif_error_manage(host);
383 538
384 host->wait_int = 0;
385 return 0; 539 return 0;
386} 540}
387 541
@@ -397,19 +551,15 @@ static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
397 551
398 for (j = 0; j < data->sg_len; j++) { 552 for (j = 0; j < data->sg_len; j++) {
399 p = sg_virt(data->sg); 553 p = sg_virt(data->sg);
400 host->wait_int = 0;
401 for (sec = 0; sec < data->sg->length / blocksize; sec++) { 554 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
402 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); 555 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
403 /* buf write enable*/ 556 /* buf write enable*/
404 time = wait_event_interruptible_timeout(host->intr_wait, 557 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
405 host->wait_int == 1 || 558 host->timeout);
406 host->sd_error == 1, host->timeout);
407 559
408 if (host->wait_int != 1 && 560 if (time <= 0 || host->sd_error)
409 (time == 0 || host->sd_error != 0))
410 return sh_mmcif_error_manage(host); 561 return sh_mmcif_error_manage(host);
411 562
412 host->wait_int = 0;
413 for (i = 0; i < blocksize / 4; i++) 563 for (i = 0; i < blocksize / 4; i++)
414 sh_mmcif_writel(host->addr, 564 sh_mmcif_writel(host->addr,
415 MMCIF_CE_DATA, *p++); 565 MMCIF_CE_DATA, *p++);
@@ -457,7 +607,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
457 tmp |= CMD_SET_RTYP_17B; 607 tmp |= CMD_SET_RTYP_17B;
458 break; 608 break;
459 default: 609 default:
460 pr_err(DRIVER_NAME": Not support type response.\n"); 610 dev_err(&host->pd->dev, "Unsupported response type.\n");
461 break; 611 break;
462 } 612 }
463 switch (opc) { 613 switch (opc) {
@@ -485,7 +635,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
485 tmp |= CMD_SET_DATW_8; 635 tmp |= CMD_SET_DATW_8;
486 break; 636 break;
487 default: 637 default:
488 pr_err(DRIVER_NAME": Not support bus width.\n"); 638 dev_err(&host->pd->dev, "Unsupported bus width.\n");
489 break; 639 break;
490 } 640 }
491 } 641 }
@@ -513,10 +663,10 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
513 return opc = ((opc << 24) | tmp); 663 return opc = ((opc << 24) | tmp);
514} 664}
515 665
516static u32 sh_mmcif_data_trans(struct sh_mmcif_host *host, 666static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
517 struct mmc_request *mrq, u32 opc) 667 struct mmc_request *mrq, u32 opc)
518{ 668{
519 u32 ret; 669 int ret;
520 670
521 switch (opc) { 671 switch (opc) {
522 case MMC_READ_MULTIPLE_BLOCK: 672 case MMC_READ_MULTIPLE_BLOCK:
@@ -533,7 +683,7 @@ static u32 sh_mmcif_data_trans(struct sh_mmcif_host *host,
533 ret = sh_mmcif_single_read(host, mrq); 683 ret = sh_mmcif_single_read(host, mrq);
534 break; 684 break;
535 default: 685 default:
536 pr_err(DRIVER_NAME": NOT SUPPORT CMD = d'%08d\n", opc); 686 dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
537 ret = -EINVAL; 687 ret = -EINVAL;
538 break; 688 break;
539 } 689 }
@@ -547,8 +697,6 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
547 int ret = 0, mask = 0; 697 int ret = 0, mask = 0;
548 u32 opc = cmd->opcode; 698 u32 opc = cmd->opcode;
549 699
550 host->cmd = cmd;
551
552 switch (opc) { 700 switch (opc) {
553 /* respons busy check */ 701 /* respons busy check */
554 case MMC_SWITCH: 702 case MMC_SWITCH:
@@ -579,13 +727,12 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
579 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask); 727 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
580 /* set arg */ 728 /* set arg */
581 sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg); 729 sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
582 host->wait_int = 0;
583 /* set cmd */ 730 /* set cmd */
584 sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc); 731 sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
585 732
586 time = wait_event_interruptible_timeout(host->intr_wait, 733 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
587 host->wait_int == 1 || host->sd_error == 1, host->timeout); 734 host->timeout);
588 if (host->wait_int != 1 && time == 0) { 735 if (time <= 0) {
589 cmd->error = sh_mmcif_error_manage(host); 736 cmd->error = sh_mmcif_error_manage(host);
590 return; 737 return;
591 } 738 }
@@ -597,26 +744,34 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
597 cmd->error = -ETIMEDOUT; 744 cmd->error = -ETIMEDOUT;
598 break; 745 break;
599 default: 746 default:
600 pr_debug("%s: Cmd(d'%d) err\n", 747 dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n",
601 DRIVER_NAME, cmd->opcode); 748 cmd->opcode);
602 cmd->error = sh_mmcif_error_manage(host); 749 cmd->error = sh_mmcif_error_manage(host);
603 break; 750 break;
604 } 751 }
605 host->sd_error = 0; 752 host->sd_error = false;
606 host->wait_int = 0;
607 return; 753 return;
608 } 754 }
609 if (!(cmd->flags & MMC_RSP_PRESENT)) { 755 if (!(cmd->flags & MMC_RSP_PRESENT)) {
610 cmd->error = ret; 756 cmd->error = 0;
611 host->wait_int = 0;
612 return; 757 return;
613 } 758 }
614 if (host->wait_int == 1) { 759 sh_mmcif_get_response(host, cmd);
615 sh_mmcif_get_response(host, cmd);
616 host->wait_int = 0;
617 }
618 if (host->data) { 760 if (host->data) {
619 ret = sh_mmcif_data_trans(host, mrq, cmd->opcode); 761 if (!host->dma_active) {
762 ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
763 } else {
764 long time =
765 wait_for_completion_interruptible_timeout(&host->dma_complete,
766 host->timeout);
767 if (!time)
768 ret = -ETIMEDOUT;
769 else if (time < 0)
770 ret = time;
771 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
772 BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
773 host->dma_active = false;
774 }
620 if (ret < 0) 775 if (ret < 0)
621 mrq->data->bytes_xfered = 0; 776 mrq->data->bytes_xfered = 0;
622 else 777 else
@@ -636,37 +791,49 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
636 else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) 791 else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
637 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); 792 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
638 else { 793 else {
639 pr_err(DRIVER_NAME": not support stop cmd\n"); 794 dev_err(&host->pd->dev, "unsupported stop cmd\n");
640 cmd->error = sh_mmcif_error_manage(host); 795 cmd->error = sh_mmcif_error_manage(host);
641 return; 796 return;
642 } 797 }
643 798
644 time = wait_event_interruptible_timeout(host->intr_wait, 799 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
645 host->wait_int == 1 || 800 host->timeout);
646 host->sd_error == 1, host->timeout); 801 if (time <= 0 || host->sd_error) {
647 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) {
648 cmd->error = sh_mmcif_error_manage(host); 802 cmd->error = sh_mmcif_error_manage(host);
649 return; 803 return;
650 } 804 }
651 sh_mmcif_get_cmd12response(host, cmd); 805 sh_mmcif_get_cmd12response(host, cmd);
652 host->wait_int = 0;
653 cmd->error = 0; 806 cmd->error = 0;
654} 807}
655 808
656static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) 809static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
657{ 810{
658 struct sh_mmcif_host *host = mmc_priv(mmc); 811 struct sh_mmcif_host *host = mmc_priv(mmc);
812 unsigned long flags;
813
814 spin_lock_irqsave(&host->lock, flags);
815 if (host->state != STATE_IDLE) {
816 spin_unlock_irqrestore(&host->lock, flags);
817 mrq->cmd->error = -EAGAIN;
818 mmc_request_done(mmc, mrq);
819 return;
820 }
821
822 host->state = STATE_REQUEST;
823 spin_unlock_irqrestore(&host->lock, flags);
659 824
660 switch (mrq->cmd->opcode) { 825 switch (mrq->cmd->opcode) {
661 /* MMCIF does not support SD/SDIO command */ 826 /* MMCIF does not support SD/SDIO command */
662 case SD_IO_SEND_OP_COND: 827 case SD_IO_SEND_OP_COND:
663 case MMC_APP_CMD: 828 case MMC_APP_CMD:
829 host->state = STATE_IDLE;
664 mrq->cmd->error = -ETIMEDOUT; 830 mrq->cmd->error = -ETIMEDOUT;
665 mmc_request_done(mmc, mrq); 831 mmc_request_done(mmc, mrq);
666 return; 832 return;
667 case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */ 833 case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
668 if (!mrq->data) { 834 if (!mrq->data) {
669 /* send_if_cond cmd (not support) */ 835 /* send_if_cond cmd (not support) */
836 host->state = STATE_IDLE;
670 mrq->cmd->error = -ETIMEDOUT; 837 mrq->cmd->error = -ETIMEDOUT;
671 mmc_request_done(mmc, mrq); 838 mmc_request_done(mmc, mrq);
672 return; 839 return;
@@ -676,15 +843,21 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
676 break; 843 break;
677 } 844 }
678 host->data = mrq->data; 845 host->data = mrq->data;
846 if (mrq->data) {
847 if (mrq->data->flags & MMC_DATA_READ) {
848 if (host->chan_rx)
849 sh_mmcif_start_dma_rx(host);
850 } else {
851 if (host->chan_tx)
852 sh_mmcif_start_dma_tx(host);
853 }
854 }
679 sh_mmcif_start_cmd(host, mrq, mrq->cmd); 855 sh_mmcif_start_cmd(host, mrq, mrq->cmd);
680 host->data = NULL; 856 host->data = NULL;
681 857
682 if (mrq->cmd->error != 0) { 858 if (!mrq->cmd->error && mrq->stop)
683 mmc_request_done(mmc, mrq);
684 return;
685 }
686 if (mrq->stop)
687 sh_mmcif_stop_cmd(host, mrq, mrq->stop); 859 sh_mmcif_stop_cmd(host, mrq, mrq->stop);
860 host->state = STATE_IDLE;
688 mmc_request_done(mmc, mrq); 861 mmc_request_done(mmc, mrq);
689} 862}
690 863
@@ -692,27 +865,64 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
692{ 865{
693 struct sh_mmcif_host *host = mmc_priv(mmc); 866 struct sh_mmcif_host *host = mmc_priv(mmc);
694 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; 867 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
868 unsigned long flags;
695 869
696 if (ios->power_mode == MMC_POWER_OFF) { 870 spin_lock_irqsave(&host->lock, flags);
697 /* clock stop */ 871 if (host->state != STATE_IDLE) {
698 sh_mmcif_clock_control(host, 0); 872 spin_unlock_irqrestore(&host->lock, flags);
699 if (p->down_pwr)
700 p->down_pwr(host->pd);
701 return; 873 return;
702 } else if (ios->power_mode == MMC_POWER_UP) { 874 }
875
876 host->state = STATE_IOS;
877 spin_unlock_irqrestore(&host->lock, flags);
878
879 if (ios->power_mode == MMC_POWER_UP) {
703 if (p->set_pwr) 880 if (p->set_pwr)
704 p->set_pwr(host->pd, ios->power_mode); 881 p->set_pwr(host->pd, ios->power_mode);
882 if (!host->power) {
883 /* See if we also get DMA */
884 sh_mmcif_request_dma(host, host->pd->dev.platform_data);
885 pm_runtime_get_sync(&host->pd->dev);
886 host->power = true;
887 }
888 } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
889 /* clock stop */
890 sh_mmcif_clock_control(host, 0);
891 if (ios->power_mode == MMC_POWER_OFF) {
892 if (host->power) {
893 pm_runtime_put(&host->pd->dev);
894 sh_mmcif_release_dma(host);
895 host->power = false;
896 }
897 if (p->down_pwr)
898 p->down_pwr(host->pd);
899 }
900 host->state = STATE_IDLE;
901 return;
705 } 902 }
706 903
707 if (ios->clock) 904 if (ios->clock)
708 sh_mmcif_clock_control(host, ios->clock); 905 sh_mmcif_clock_control(host, ios->clock);
709 906
710 host->bus_width = ios->bus_width; 907 host->bus_width = ios->bus_width;
908 host->state = STATE_IDLE;
909}
910
911static int sh_mmcif_get_cd(struct mmc_host *mmc)
912{
913 struct sh_mmcif_host *host = mmc_priv(mmc);
914 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
915
916 if (!p->get_cd)
917 return -ENOSYS;
918 else
919 return p->get_cd(host->pd);
711} 920}
712 921
713static struct mmc_host_ops sh_mmcif_ops = { 922static struct mmc_host_ops sh_mmcif_ops = {
714 .request = sh_mmcif_request, 923 .request = sh_mmcif_request,
715 .set_ios = sh_mmcif_set_ios, 924 .set_ios = sh_mmcif_set_ios,
925 .get_cd = sh_mmcif_get_cd,
716}; 926};
717 927
718static void sh_mmcif_detect(struct mmc_host *mmc) 928static void sh_mmcif_detect(struct mmc_host *mmc)
@@ -723,7 +933,7 @@ static void sh_mmcif_detect(struct mmc_host *mmc)
723static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) 933static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
724{ 934{
725 struct sh_mmcif_host *host = dev_id; 935 struct sh_mmcif_host *host = dev_id;
726 u32 state = 0; 936 u32 state;
727 int err = 0; 937 int err = 0;
728 938
729 state = sh_mmcif_readl(host->addr, MMCIF_CE_INT); 939 state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
@@ -762,17 +972,19 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
762 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); 972 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
763 err = 1; 973 err = 1;
764 } else { 974 } else {
765 pr_debug("%s: Not support int\n", DRIVER_NAME); 975 dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state);
766 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); 976 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
767 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); 977 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
768 err = 1; 978 err = 1;
769 } 979 }
770 if (err) { 980 if (err) {
771 host->sd_error = 1; 981 host->sd_error = true;
772 pr_debug("%s: int err state = %08x\n", DRIVER_NAME, state); 982 dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
773 } 983 }
774 host->wait_int = 1; 984 if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
775 wake_up(&host->intr_wait); 985 complete(&host->intr_wait);
986 else
987 dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
776 988
777 return IRQ_HANDLED; 989 return IRQ_HANDLED;
778} 990}
@@ -781,8 +993,8 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
781{ 993{
782 int ret = 0, irq[2]; 994 int ret = 0, irq[2];
783 struct mmc_host *mmc; 995 struct mmc_host *mmc;
784 struct sh_mmcif_host *host = NULL; 996 struct sh_mmcif_host *host;
785 struct sh_mmcif_plat_data *pd = NULL; 997 struct sh_mmcif_plat_data *pd;
786 struct resource *res; 998 struct resource *res;
787 void __iomem *reg; 999 void __iomem *reg;
788 char clk_name[8]; 1000 char clk_name[8];
@@ -790,7 +1002,7 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
790 irq[0] = platform_get_irq(pdev, 0); 1002 irq[0] = platform_get_irq(pdev, 0);
791 irq[1] = platform_get_irq(pdev, 1); 1003 irq[1] = platform_get_irq(pdev, 1);
792 if (irq[0] < 0 || irq[1] < 0) { 1004 if (irq[0] < 0 || irq[1] < 0) {
793 pr_err(DRIVER_NAME": Get irq error\n"); 1005 dev_err(&pdev->dev, "Get irq error\n");
794 return -ENXIO; 1006 return -ENXIO;
795 } 1007 }
796 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1008 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -803,7 +1015,7 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
803 dev_err(&pdev->dev, "ioremap error.\n"); 1015 dev_err(&pdev->dev, "ioremap error.\n");
804 return -ENOMEM; 1016 return -ENOMEM;
805 } 1017 }
806 pd = (struct sh_mmcif_plat_data *)(pdev->dev.platform_data); 1018 pd = pdev->dev.platform_data;
807 if (!pd) { 1019 if (!pd) {
808 dev_err(&pdev->dev, "sh_mmcif plat data error.\n"); 1020 dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
809 ret = -ENXIO; 1021 ret = -ENXIO;
@@ -830,7 +1042,8 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
830 host->clk = clk_get_rate(host->hclk); 1042 host->clk = clk_get_rate(host->hclk);
831 host->pd = pdev; 1043 host->pd = pdev;
832 1044
833 init_waitqueue_head(&host->intr_wait); 1045 init_completion(&host->intr_wait);
1046 spin_lock_init(&host->lock);
834 1047
835 mmc->ops = &sh_mmcif_ops; 1048 mmc->ops = &sh_mmcif_ops;
836 mmc->f_max = host->clk; 1049 mmc->f_max = host->clk;
@@ -846,38 +1059,50 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
846 mmc->caps = MMC_CAP_MMC_HIGHSPEED; 1059 mmc->caps = MMC_CAP_MMC_HIGHSPEED;
847 if (pd->caps) 1060 if (pd->caps)
848 mmc->caps |= pd->caps; 1061 mmc->caps |= pd->caps;
849 mmc->max_phys_segs = 128; 1062 mmc->max_segs = 32;
850 mmc->max_hw_segs = 128;
851 mmc->max_blk_size = 512; 1063 mmc->max_blk_size = 512;
852 mmc->max_blk_count = 65535; 1064 mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
853 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1065 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
854 mmc->max_seg_size = mmc->max_req_size; 1066 mmc->max_seg_size = mmc->max_req_size;
855 1067
856 sh_mmcif_sync_reset(host); 1068 sh_mmcif_sync_reset(host);
857 platform_set_drvdata(pdev, host); 1069 platform_set_drvdata(pdev, host);
1070
1071 pm_runtime_enable(&pdev->dev);
1072 host->power = false;
1073
1074 ret = pm_runtime_resume(&pdev->dev);
1075 if (ret < 0)
1076 goto clean_up2;
1077
858 mmc_add_host(mmc); 1078 mmc_add_host(mmc);
859 1079
1080 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1081
860 ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host); 1082 ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
861 if (ret) { 1083 if (ret) {
862 pr_err(DRIVER_NAME": request_irq error (sh_mmc:error)\n"); 1084 dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
863 goto clean_up2; 1085 goto clean_up3;
864 } 1086 }
865 ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host); 1087 ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
866 if (ret) { 1088 if (ret) {
867 free_irq(irq[0], host); 1089 free_irq(irq[0], host);
868 pr_err(DRIVER_NAME": request_irq error (sh_mmc:int)\n"); 1090 dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
869 goto clean_up2; 1091 goto clean_up3;
870 } 1092 }
871 1093
872 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
873 sh_mmcif_detect(host->mmc); 1094 sh_mmcif_detect(host->mmc);
874 1095
875 pr_info("%s: driver version %s\n", DRIVER_NAME, DRIVER_VERSION); 1096 dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
876 pr_debug("%s: chip ver H'%04x\n", DRIVER_NAME, 1097 dev_dbg(&pdev->dev, "chip ver H'%04x\n",
877 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); 1098 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
878 return ret; 1099 return ret;
879 1100
1101clean_up3:
1102 mmc_remove_host(mmc);
1103 pm_runtime_suspend(&pdev->dev);
880clean_up2: 1104clean_up2:
1105 pm_runtime_disable(&pdev->dev);
881 clk_disable(host->hclk); 1106 clk_disable(host->hclk);
882clean_up1: 1107clean_up1:
883 mmc_free_host(mmc); 1108 mmc_free_host(mmc);
@@ -892,31 +1117,70 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
892 struct sh_mmcif_host *host = platform_get_drvdata(pdev); 1117 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
893 int irq[2]; 1118 int irq[2];
894 1119
895 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); 1120 pm_runtime_get_sync(&pdev->dev);
896 1121
897 irq[0] = platform_get_irq(pdev, 0); 1122 mmc_remove_host(host->mmc);
898 irq[1] = platform_get_irq(pdev, 1); 1123 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
899 1124
900 if (host->addr) 1125 if (host->addr)
901 iounmap(host->addr); 1126 iounmap(host->addr);
902 1127
903 platform_set_drvdata(pdev, NULL); 1128 irq[0] = platform_get_irq(pdev, 0);
904 mmc_remove_host(host->mmc); 1129 irq[1] = platform_get_irq(pdev, 1);
905 1130
906 free_irq(irq[0], host); 1131 free_irq(irq[0], host);
907 free_irq(irq[1], host); 1132 free_irq(irq[1], host);
908 1133
1134 platform_set_drvdata(pdev, NULL);
1135
909 clk_disable(host->hclk); 1136 clk_disable(host->hclk);
910 mmc_free_host(host->mmc); 1137 mmc_free_host(host->mmc);
1138 pm_runtime_put_sync(&pdev->dev);
1139 pm_runtime_disable(&pdev->dev);
911 1140
912 return 0; 1141 return 0;
913} 1142}
914 1143
1144#ifdef CONFIG_PM
1145static int sh_mmcif_suspend(struct device *dev)
1146{
1147 struct platform_device *pdev = to_platform_device(dev);
1148 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1149 int ret = mmc_suspend_host(host->mmc);
1150
1151 if (!ret) {
1152 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1153 clk_disable(host->hclk);
1154 }
1155
1156 return ret;
1157}
1158
1159static int sh_mmcif_resume(struct device *dev)
1160{
1161 struct platform_device *pdev = to_platform_device(dev);
1162 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1163
1164 clk_enable(host->hclk);
1165
1166 return mmc_resume_host(host->mmc);
1167}
1168#else
1169#define sh_mmcif_suspend NULL
1170#define sh_mmcif_resume NULL
1171#endif /* CONFIG_PM */
1172
1173static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1174 .suspend = sh_mmcif_suspend,
1175 .resume = sh_mmcif_resume,
1176};
1177
915static struct platform_driver sh_mmcif_driver = { 1178static struct platform_driver sh_mmcif_driver = {
916 .probe = sh_mmcif_probe, 1179 .probe = sh_mmcif_probe,
917 .remove = sh_mmcif_remove, 1180 .remove = sh_mmcif_remove,
918 .driver = { 1181 .driver = {
919 .name = DRIVER_NAME, 1182 .name = DRIVER_NAME,
1183 .pm = &sh_mmcif_dev_pm_ops,
920 }, 1184 },
921}; 1185};
922 1186
@@ -936,5 +1200,5 @@ module_exit(sh_mmcif_exit);
936 1200
937MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver"); 1201MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
938MODULE_LICENSE("GPL"); 1202MODULE_LICENSE("GPL");
939MODULE_ALIAS(DRIVER_NAME); 1203MODULE_ALIAS("platform:" DRIVER_NAME);
940MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>"); 1204MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
new file mode 100644
index 000000000000..ce500f03df85
--- /dev/null
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -0,0 +1,216 @@
1/*
2 * SuperH Mobile SDHI
3 *
4 * Copyright (C) 2009 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Based on "Compaq ASIC3 support":
11 *
12 * Copyright 2001 Compaq Computer Corporation.
13 * Copyright 2004-2005 Phil Blundell
14 * Copyright 2007-2008 OpenedHand Ltd.
15 *
16 * Authors: Phil Blundell <pb@handhelds.org>,
17 * Samuel Ortiz <sameo@openedhand.com>
18 *
19 */
20
21#include <linux/kernel.h>
22#include <linux/clk.h>
23#include <linux/slab.h>
24#include <linux/platform_device.h>
25#include <linux/mmc/host.h>
26#include <linux/mmc/sh_mobile_sdhi.h>
27#include <linux/mfd/tmio.h>
28#include <linux/sh_dma.h>
29
30#include "tmio_mmc.h"
31
32struct sh_mobile_sdhi {
33 struct clk *clk;
34 struct tmio_mmc_data mmc_data;
35 struct sh_dmae_slave param_tx;
36 struct sh_dmae_slave param_rx;
37 struct tmio_mmc_dma dma_priv;
38};
39
40static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state)
41{
42 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
43
44 if (p && p->set_pwr)
45 p->set_pwr(pdev, state);
46}
47
48static int sh_mobile_sdhi_get_cd(struct platform_device *pdev)
49{
50 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
51
52 if (p && p->get_cd)
53 return p->get_cd(pdev);
54 else
55 return -ENOSYS;
56}
57
58static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
59{
60 struct sh_mobile_sdhi *priv;
61 struct tmio_mmc_data *mmc_data;
62 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
63 struct tmio_mmc_host *host;
64 char clk_name[8];
65 int i, irq, ret;
66
67 priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
68 if (priv == NULL) {
69 dev_err(&pdev->dev, "kzalloc failed\n");
70 return -ENOMEM;
71 }
72
73 mmc_data = &priv->mmc_data;
74 p->pdata = mmc_data;
75
76 snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id);
77 priv->clk = clk_get(&pdev->dev, clk_name);
78 if (IS_ERR(priv->clk)) {
79 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
80 ret = PTR_ERR(priv->clk);
81 goto eclkget;
82 }
83
84 clk_enable(priv->clk);
85
86 mmc_data->hclk = clk_get_rate(priv->clk);
87 mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
88 mmc_data->get_cd = sh_mobile_sdhi_get_cd;
89 mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
90 if (p) {
91 mmc_data->flags = p->tmio_flags;
92 mmc_data->ocr_mask = p->tmio_ocr_mask;
93 mmc_data->capabilities |= p->tmio_caps;
94
95 if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) {
96 priv->param_tx.slave_id = p->dma_slave_tx;
97 priv->param_rx.slave_id = p->dma_slave_rx;
98 priv->dma_priv.chan_priv_tx = &priv->param_tx;
99 priv->dma_priv.chan_priv_rx = &priv->param_rx;
100 priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */
101 mmc_data->dma = &priv->dma_priv;
102 }
103 }
104
105 /*
106 * All SDHI blocks support 2-byte and larger block sizes in 4-bit
107 * bus width mode.
108 */
109 mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES;
110
111 /*
112 * All SDHI blocks support SDIO IRQ signalling.
113 */
114 mmc_data->flags |= TMIO_MMC_SDIO_IRQ;
115
116 ret = tmio_mmc_host_probe(&host, pdev, mmc_data);
117 if (ret < 0)
118 goto eprobe;
119
120 for (i = 0; i < 3; i++) {
121 irq = platform_get_irq(pdev, i);
122 if (irq < 0) {
123 if (i) {
124 continue;
125 } else {
126 ret = irq;
127 goto eirq;
128 }
129 }
130 ret = request_irq(irq, tmio_mmc_irq, 0,
131 dev_name(&pdev->dev), host);
132 if (ret) {
133 while (i--) {
134 irq = platform_get_irq(pdev, i);
135 if (irq >= 0)
136 free_irq(irq, host);
137 }
138 goto eirq;
139 }
140 }
141 dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n",
142 mmc_hostname(host->mmc), (unsigned long)
143 (platform_get_resource(pdev,IORESOURCE_MEM, 0)->start),
144 mmc_data->hclk / 1000000);
145
146 return ret;
147
148eirq:
149 tmio_mmc_host_remove(host);
150eprobe:
151 clk_disable(priv->clk);
152 clk_put(priv->clk);
153eclkget:
154 kfree(priv);
155 return ret;
156}
157
158static int sh_mobile_sdhi_remove(struct platform_device *pdev)
159{
160 struct mmc_host *mmc = platform_get_drvdata(pdev);
161 struct tmio_mmc_host *host = mmc_priv(mmc);
162 struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data);
163 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
164 int i, irq;
165
166 p->pdata = NULL;
167
168 tmio_mmc_host_remove(host);
169
170 for (i = 0; i < 3; i++) {
171 irq = platform_get_irq(pdev, i);
172 if (irq >= 0)
173 free_irq(irq, host);
174 }
175
176 clk_disable(priv->clk);
177 clk_put(priv->clk);
178 kfree(priv);
179
180 return 0;
181}
182
183static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
184 .suspend = tmio_mmc_host_suspend,
185 .resume = tmio_mmc_host_resume,
186 .runtime_suspend = tmio_mmc_host_runtime_suspend,
187 .runtime_resume = tmio_mmc_host_runtime_resume,
188};
189
190static struct platform_driver sh_mobile_sdhi_driver = {
191 .driver = {
192 .name = "sh_mobile_sdhi",
193 .owner = THIS_MODULE,
194 .pm = &tmio_mmc_dev_pm_ops,
195 },
196 .probe = sh_mobile_sdhi_probe,
197 .remove = __devexit_p(sh_mobile_sdhi_remove),
198};
199
200static int __init sh_mobile_sdhi_init(void)
201{
202 return platform_driver_register(&sh_mobile_sdhi_driver);
203}
204
205static void __exit sh_mobile_sdhi_exit(void)
206{
207 platform_driver_unregister(&sh_mobile_sdhi_driver);
208}
209
210module_init(sh_mobile_sdhi_init);
211module_exit(sh_mobile_sdhi_exit);
212
213MODULE_DESCRIPTION("SuperH Mobile SDHI driver");
214MODULE_AUTHOR("Magnus Damm");
215MODULE_LICENSE("GPL v2");
216MODULE_ALIAS("platform:sh_mobile_sdhi");
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index cec99958b652..457c26ea09de 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -978,11 +978,10 @@ static int tifm_sd_probe(struct tifm_dev *sock)
978 mmc->f_max = 24000000; 978 mmc->f_max = 24000000;
979 979
980 mmc->max_blk_count = 2048; 980 mmc->max_blk_count = 2048;
981 mmc->max_hw_segs = mmc->max_blk_count; 981 mmc->max_segs = mmc->max_blk_count;
982 mmc->max_blk_size = min(TIFM_MMCSD_MAX_BLOCK_SIZE, PAGE_SIZE); 982 mmc->max_blk_size = min(TIFM_MMCSD_MAX_BLOCK_SIZE, PAGE_SIZE);
983 mmc->max_seg_size = mmc->max_blk_count * mmc->max_blk_size; 983 mmc->max_seg_size = mmc->max_blk_count * mmc->max_blk_size;
984 mmc->max_req_size = mmc->max_seg_size; 984 mmc->max_req_size = mmc->max_seg_size;
985 mmc->max_phys_segs = mmc->max_hw_segs;
986 985
987 sock->card_event = tifm_sd_card_event; 986 sock->card_event = tifm_sd_card_event;
988 sock->data_event = tifm_sd_data_event; 987 sock->data_event = tifm_sd_data_event;
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 69d98e3bf6ab..8d185de90d20 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * linux/drivers/mmc/tmio_mmc.c 2 * linux/drivers/mmc/host/tmio_mmc.c
3 * 3 *
4 * Copyright (C) 2004 Ian Molton 4 * Copyright (C) 2007 Ian Molton
5 * Copyright (C) 2007 Ian Molton 5 * Copyright (C) 2004 Ian Molton
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -11,765 +11,26 @@
11 * Driver for the MMC / SD / SDIO cell found in: 11 * Driver for the MMC / SD / SDIO cell found in:
12 * 12 *
13 * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 13 * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
14 *
15 * This driver draws mainly on scattered spec sheets, Reverse engineering
16 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
17 * support). (Further 4 bit support from a later datasheet).
18 *
19 * TODO:
20 * Investigate using a workqueue for PIO transfers
21 * Eliminate FIXMEs
22 * SDIO support
23 * Better Power management
24 * Handle MMC errors better
25 * double buffer support
26 *
27 */ 14 */
28#include <linux/module.h> 15
29#include <linux/irq.h>
30#include <linux/device.h> 16#include <linux/device.h>
31#include <linux/delay.h>
32#include <linux/dmaengine.h>
33#include <linux/mmc/host.h>
34#include <linux/mfd/core.h> 17#include <linux/mfd/core.h>
35#include <linux/mfd/tmio.h> 18#include <linux/mfd/tmio.h>
19#include <linux/mmc/host.h>
20#include <linux/module.h>
21#include <linux/pagemap.h>
22#include <linux/scatterlist.h>
36 23
37#include "tmio_mmc.h" 24#include "tmio_mmc.h"
38 25
39static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
40{
41 u32 clk = 0, clock;
42
43 if (new_clock) {
44 for (clock = host->mmc->f_min, clk = 0x80000080;
45 new_clock >= (clock<<1); clk >>= 1)
46 clock <<= 1;
47 clk |= 0x100;
48 }
49
50 if (host->set_clk_div)
51 host->set_clk_div(host->pdev, (clk>>22) & 1);
52
53 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
54}
55
56static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
57{
58 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
59 msleep(10);
60 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
61 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
62 msleep(10);
63}
64
65static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
66{
67 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
68 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
69 msleep(10);
70 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
71 msleep(10);
72}
73
74static void reset(struct tmio_mmc_host *host)
75{
76 /* FIXME - should we set stop clock reg here */
77 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
78 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
79 msleep(10);
80 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
81 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
82 msleep(10);
83}
84
85static void
86tmio_mmc_finish_request(struct tmio_mmc_host *host)
87{
88 struct mmc_request *mrq = host->mrq;
89
90 host->mrq = NULL;
91 host->cmd = NULL;
92 host->data = NULL;
93
94 mmc_request_done(host->mmc, mrq);
95}
96
97/* These are the bitmasks the tmio chip requires to implement the MMC response
98 * types. Note that R1 and R6 are the same in this scheme. */
99#define APP_CMD 0x0040
100#define RESP_NONE 0x0300
101#define RESP_R1 0x0400
102#define RESP_R1B 0x0500
103#define RESP_R2 0x0600
104#define RESP_R3 0x0700
105#define DATA_PRESENT 0x0800
106#define TRANSFER_READ 0x1000
107#define TRANSFER_MULTI 0x2000
108#define SECURITY_CMD 0x4000
109
110static int
111tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
112{
113 struct mmc_data *data = host->data;
114 int c = cmd->opcode;
115
116 /* Command 12 is handled by hardware */
117 if (cmd->opcode == 12 && !cmd->arg) {
118 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
119 return 0;
120 }
121
122 switch (mmc_resp_type(cmd)) {
123 case MMC_RSP_NONE: c |= RESP_NONE; break;
124 case MMC_RSP_R1: c |= RESP_R1; break;
125 case MMC_RSP_R1B: c |= RESP_R1B; break;
126 case MMC_RSP_R2: c |= RESP_R2; break;
127 case MMC_RSP_R3: c |= RESP_R3; break;
128 default:
129 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
130 return -EINVAL;
131 }
132
133 host->cmd = cmd;
134
135/* FIXME - this seems to be ok commented out but the spec suggest this bit
136 * should be set when issuing app commands.
137 * if(cmd->flags & MMC_FLAG_ACMD)
138 * c |= APP_CMD;
139 */
140 if (data) {
141 c |= DATA_PRESENT;
142 if (data->blocks > 1) {
143 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
144 c |= TRANSFER_MULTI;
145 }
146 if (data->flags & MMC_DATA_READ)
147 c |= TRANSFER_READ;
148 }
149
150 enable_mmc_irqs(host, TMIO_MASK_CMD);
151
152 /* Fire off the command */
153 sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
154 sd_ctrl_write16(host, CTL_SD_CMD, c);
155
156 return 0;
157}
158
159/*
160 * This chip always returns (at least?) as much data as you ask for.
161 * I'm unsure what happens if you ask for less than a block. This should be
162 * looked into to ensure that a funny length read doesnt hose the controller.
163 */
164static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
165{
166 struct mmc_data *data = host->data;
167 void *sg_virt;
168 unsigned short *buf;
169 unsigned int count;
170 unsigned long flags;
171
172 if (!data) {
173 pr_debug("Spurious PIO IRQ\n");
174 return;
175 }
176
177 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
178 buf = (unsigned short *)(sg_virt + host->sg_off);
179
180 count = host->sg_ptr->length - host->sg_off;
181 if (count > data->blksz)
182 count = data->blksz;
183
184 pr_debug("count: %08x offset: %08x flags %08x\n",
185 count, host->sg_off, data->flags);
186
187 /* Transfer the data */
188 if (data->flags & MMC_DATA_READ)
189 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
190 else
191 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
192
193 host->sg_off += count;
194
195 tmio_mmc_kunmap_atomic(sg_virt, &flags);
196
197 if (host->sg_off == host->sg_ptr->length)
198 tmio_mmc_next_sg(host);
199
200 return;
201}
202
203static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
204{
205 struct mmc_data *data = host->data;
206 struct mmc_command *stop;
207
208 host->data = NULL;
209
210 if (!data) {
211 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
212 return;
213 }
214 stop = data->stop;
215
216 /* FIXME - return correct transfer count on errors */
217 if (!data->error)
218 data->bytes_xfered = data->blocks * data->blksz;
219 else
220 data->bytes_xfered = 0;
221
222 pr_debug("Completed data request\n");
223
224 /*
225 * FIXME: other drivers allow an optional stop command of any given type
226 * which we dont do, as the chip can auto generate them.
227 * Perhaps we can be smarter about when to use auto CMD12 and
228 * only issue the auto request when we know this is the desired
229 * stop command, allowing fallback to the stop command the
230 * upper layers expect. For now, we do what works.
231 */
232
233 if (data->flags & MMC_DATA_READ) {
234 if (!host->chan_rx)
235 disable_mmc_irqs(host, TMIO_MASK_READOP);
236 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
237 host->mrq);
238 } else {
239 if (!host->chan_tx)
240 disable_mmc_irqs(host, TMIO_MASK_WRITEOP);
241 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
242 host->mrq);
243 }
244
245 if (stop) {
246 if (stop->opcode == 12 && !stop->arg)
247 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
248 else
249 BUG();
250 }
251
252 tmio_mmc_finish_request(host);
253}
254
255static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
256{
257 struct mmc_data *data = host->data;
258
259 if (!data)
260 return;
261
262 if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
263 /*
264 * Has all data been written out yet? Testing on SuperH showed,
265 * that in most cases the first interrupt comes already with the
266 * BUSY status bit clear, but on some operations, like mount or
267 * in the beginning of a write / sync / umount, there is one
268 * DATAEND interrupt with the BUSY bit set, in this cases
269 * waiting for one more interrupt fixes the problem.
270 */
271 if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
272 disable_mmc_irqs(host, TMIO_STAT_DATAEND);
273 tasklet_schedule(&host->dma_complete);
274 }
275 } else if (host->chan_rx && (data->flags & MMC_DATA_READ)) {
276 disable_mmc_irqs(host, TMIO_STAT_DATAEND);
277 tasklet_schedule(&host->dma_complete);
278 } else {
279 tmio_mmc_do_data_irq(host);
280 }
281}
282
283static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
284 unsigned int stat)
285{
286 struct mmc_command *cmd = host->cmd;
287 int i, addr;
288
289 if (!host->cmd) {
290 pr_debug("Spurious CMD irq\n");
291 return;
292 }
293
294 host->cmd = NULL;
295
296 /* This controller is sicker than the PXA one. Not only do we need to
297 * drop the top 8 bits of the first response word, we also need to
298 * modify the order of the response for short response command types.
299 */
300
301 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
302 cmd->resp[i] = sd_ctrl_read32(host, addr);
303
304 if (cmd->flags & MMC_RSP_136) {
305 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
306 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
307 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
308 cmd->resp[3] <<= 8;
309 } else if (cmd->flags & MMC_RSP_R3) {
310 cmd->resp[0] = cmd->resp[3];
311 }
312
313 if (stat & TMIO_STAT_CMDTIMEOUT)
314 cmd->error = -ETIMEDOUT;
315 else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
316 cmd->error = -EILSEQ;
317
318 /* If there is data to handle we enable data IRQs here, and
319 * we will ultimatley finish the request in the data_end handler.
320 * If theres no data or we encountered an error, finish now.
321 */
322 if (host->data && !cmd->error) {
323 if (host->data->flags & MMC_DATA_READ) {
324 if (!host->chan_rx)
325 enable_mmc_irqs(host, TMIO_MASK_READOP);
326 } else {
327 struct dma_chan *chan = host->chan_tx;
328 if (!chan)
329 enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
330 else
331 tasklet_schedule(&host->dma_issue);
332 }
333 } else {
334 tmio_mmc_finish_request(host);
335 }
336
337 return;
338}
339
340static irqreturn_t tmio_mmc_irq(int irq, void *devid)
341{
342 struct tmio_mmc_host *host = devid;
343 unsigned int ireg, irq_mask, status;
344
345 pr_debug("MMC IRQ begin\n");
346
347 status = sd_ctrl_read32(host, CTL_STATUS);
348 irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
349 ireg = status & TMIO_MASK_IRQ & ~irq_mask;
350
351 pr_debug_status(status);
352 pr_debug_status(ireg);
353
354 if (!ireg) {
355 disable_mmc_irqs(host, status & ~irq_mask);
356
357 pr_warning("tmio_mmc: Spurious irq, disabling! "
358 "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
359 pr_debug_status(status);
360
361 goto out;
362 }
363
364 while (ireg) {
365 /* Card insert / remove attempts */
366 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
367 ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
368 TMIO_STAT_CARD_REMOVE);
369 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
370 }
371
372 /* CRC and other errors */
373/* if (ireg & TMIO_STAT_ERR_IRQ)
374 * handled |= tmio_error_irq(host, irq, stat);
375 */
376
377 /* Command completion */
378 if (ireg & TMIO_MASK_CMD) {
379 ack_mmc_irqs(host, TMIO_MASK_CMD);
380 tmio_mmc_cmd_irq(host, status);
381 }
382
383 /* Data transfer */
384 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
385 ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
386 tmio_mmc_pio_irq(host);
387 }
388
389 /* Data transfer completion */
390 if (ireg & TMIO_STAT_DATAEND) {
391 ack_mmc_irqs(host, TMIO_STAT_DATAEND);
392 tmio_mmc_data_irq(host);
393 }
394
395 /* Check status - keep going until we've handled it all */
396 status = sd_ctrl_read32(host, CTL_STATUS);
397 irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
398 ireg = status & TMIO_MASK_IRQ & ~irq_mask;
399
400 pr_debug("Status at end of loop: %08x\n", status);
401 pr_debug_status(status);
402 }
403 pr_debug("MMC IRQ end\n");
404
405out:
406 return IRQ_HANDLED;
407}
408
409#ifdef CONFIG_TMIO_MMC_DMA
410static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
411{
412#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
413 /* Switch DMA mode on or off - SuperH specific? */
414 sd_ctrl_write16(host, 0xd8, enable ? 2 : 0);
415#endif
416}
417
418static void tmio_dma_complete(void *arg)
419{
420 struct tmio_mmc_host *host = arg;
421
422 dev_dbg(&host->pdev->dev, "Command completed\n");
423
424 if (!host->data)
425 dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n");
426 else
427 enable_mmc_irqs(host, TMIO_STAT_DATAEND);
428}
429
430static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
431{
432 struct scatterlist *sg = host->sg_ptr;
433 struct dma_async_tx_descriptor *desc = NULL;
434 struct dma_chan *chan = host->chan_rx;
435 int ret;
436
437 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
438 if (ret > 0) {
439 host->dma_sglen = ret;
440 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
441 DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
442 }
443
444 if (desc) {
445 host->desc = desc;
446 desc->callback = tmio_dma_complete;
447 desc->callback_param = host;
448 host->cookie = desc->tx_submit(desc);
449 if (host->cookie < 0) {
450 host->desc = NULL;
451 ret = host->cookie;
452 } else {
453 chan->device->device_issue_pending(chan);
454 }
455 }
456 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
457 __func__, host->sg_len, ret, host->cookie, host->mrq);
458
459 if (!host->desc) {
460 /* DMA failed, fall back to PIO */
461 if (ret >= 0)
462 ret = -EIO;
463 host->chan_rx = NULL;
464 dma_release_channel(chan);
465 /* Free the Tx channel too */
466 chan = host->chan_tx;
467 if (chan) {
468 host->chan_tx = NULL;
469 dma_release_channel(chan);
470 }
471 dev_warn(&host->pdev->dev,
472 "DMA failed: %d, falling back to PIO\n", ret);
473 tmio_mmc_enable_dma(host, false);
474 reset(host);
475 /* Fail this request, let above layers recover */
476 host->mrq->cmd->error = ret;
477 tmio_mmc_finish_request(host);
478 }
479
480 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
481 desc, host->cookie, host->sg_len);
482
483 return ret > 0 ? 0 : ret;
484}
485
486static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
487{
488 struct scatterlist *sg = host->sg_ptr;
489 struct dma_async_tx_descriptor *desc = NULL;
490 struct dma_chan *chan = host->chan_tx;
491 int ret;
492
493 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
494 if (ret > 0) {
495 host->dma_sglen = ret;
496 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
497 DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
498 }
499
500 if (desc) {
501 host->desc = desc;
502 desc->callback = tmio_dma_complete;
503 desc->callback_param = host;
504 host->cookie = desc->tx_submit(desc);
505 if (host->cookie < 0) {
506 host->desc = NULL;
507 ret = host->cookie;
508 }
509 }
510 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
511 __func__, host->sg_len, ret, host->cookie, host->mrq);
512
513 if (!host->desc) {
514 /* DMA failed, fall back to PIO */
515 if (ret >= 0)
516 ret = -EIO;
517 host->chan_tx = NULL;
518 dma_release_channel(chan);
519 /* Free the Rx channel too */
520 chan = host->chan_rx;
521 if (chan) {
522 host->chan_rx = NULL;
523 dma_release_channel(chan);
524 }
525 dev_warn(&host->pdev->dev,
526 "DMA failed: %d, falling back to PIO\n", ret);
527 tmio_mmc_enable_dma(host, false);
528 reset(host);
529 /* Fail this request, let above layers recover */
530 host->mrq->cmd->error = ret;
531 tmio_mmc_finish_request(host);
532 }
533
534 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
535 desc, host->cookie);
536
537 return ret > 0 ? 0 : ret;
538}
539
540static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
541 struct mmc_data *data)
542{
543 if (data->flags & MMC_DATA_READ) {
544 if (host->chan_rx)
545 return tmio_mmc_start_dma_rx(host);
546 } else {
547 if (host->chan_tx)
548 return tmio_mmc_start_dma_tx(host);
549 }
550
551 return 0;
552}
553
554static void tmio_issue_tasklet_fn(unsigned long priv)
555{
556 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
557 struct dma_chan *chan = host->chan_tx;
558
559 chan->device->device_issue_pending(chan);
560}
561
562static void tmio_tasklet_fn(unsigned long arg)
563{
564 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
565
566 if (host->data->flags & MMC_DATA_READ)
567 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
568 DMA_FROM_DEVICE);
569 else
570 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
571 DMA_TO_DEVICE);
572
573 tmio_mmc_do_data_irq(host);
574}
575
576/* It might be necessary to make filter MFD specific */
577static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
578{
579 dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
580 chan->private = arg;
581 return true;
582}
583
584static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
585 struct tmio_mmc_data *pdata)
586{
587 host->cookie = -EINVAL;
588 host->desc = NULL;
589
590 /* We can only either use DMA for both Tx and Rx or not use it at all */
591 if (pdata->dma) {
592 dma_cap_mask_t mask;
593
594 dma_cap_zero(mask);
595 dma_cap_set(DMA_SLAVE, mask);
596
597 host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
598 pdata->dma->chan_priv_tx);
599 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
600 host->chan_tx);
601
602 if (!host->chan_tx)
603 return;
604
605 host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
606 pdata->dma->chan_priv_rx);
607 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
608 host->chan_rx);
609
610 if (!host->chan_rx) {
611 dma_release_channel(host->chan_tx);
612 host->chan_tx = NULL;
613 return;
614 }
615
616 tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host);
617 tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host);
618
619 tmio_mmc_enable_dma(host, true);
620 }
621}
622
623static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
624{
625 if (host->chan_tx) {
626 struct dma_chan *chan = host->chan_tx;
627 host->chan_tx = NULL;
628 dma_release_channel(chan);
629 }
630 if (host->chan_rx) {
631 struct dma_chan *chan = host->chan_rx;
632 host->chan_rx = NULL;
633 dma_release_channel(chan);
634 }
635
636 host->cookie = -EINVAL;
637 host->desc = NULL;
638}
639#else
640static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
641 struct mmc_data *data)
642{
643 return 0;
644}
645
646static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
647 struct tmio_mmc_data *pdata)
648{
649 host->chan_tx = NULL;
650 host->chan_rx = NULL;
651}
652
653static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
654{
655}
656#endif
657
658static int tmio_mmc_start_data(struct tmio_mmc_host *host,
659 struct mmc_data *data)
660{
661 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
662 data->blksz, data->blocks);
663
664 /* Hardware cannot perform 1 and 2 byte requests in 4 bit mode */
665 if (data->blksz < 4 && host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
666 pr_err("%s: %d byte block unsupported in 4 bit mode\n",
667 mmc_hostname(host->mmc), data->blksz);
668 return -EINVAL;
669 }
670
671 tmio_mmc_init_sg(host, data);
672 host->data = data;
673
674 /* Set transfer length / blocksize */
675 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
676 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
677
678 return tmio_mmc_start_dma(host, data);
679}
680
681/* Process requests from the MMC layer */
682static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
683{
684 struct tmio_mmc_host *host = mmc_priv(mmc);
685 int ret;
686
687 if (host->mrq)
688 pr_debug("request not null\n");
689
690 host->mrq = mrq;
691
692 if (mrq->data) {
693 ret = tmio_mmc_start_data(host, mrq->data);
694 if (ret)
695 goto fail;
696 }
697
698 ret = tmio_mmc_start_command(host, mrq->cmd);
699 if (!ret)
700 return;
701
702fail:
703 mrq->cmd->error = ret;
704 mmc_request_done(mmc, mrq);
705}
706
707/* Set MMC clock / power.
708 * Note: This controller uses a simple divider scheme therefore it cannot
709 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
710 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
711 * slowest setting.
712 */
713static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
714{
715 struct tmio_mmc_host *host = mmc_priv(mmc);
716
717 if (ios->clock)
718 tmio_mmc_set_clock(host, ios->clock);
719
720 /* Power sequence - OFF -> ON -> UP */
721 switch (ios->power_mode) {
722 case MMC_POWER_OFF: /* power down SD bus */
723 if (host->set_pwr)
724 host->set_pwr(host->pdev, 0);
725 tmio_mmc_clk_stop(host);
726 break;
727 case MMC_POWER_ON: /* power up SD bus */
728 if (host->set_pwr)
729 host->set_pwr(host->pdev, 1);
730 break;
731 case MMC_POWER_UP: /* start bus clock */
732 tmio_mmc_clk_start(host);
733 break;
734 }
735
736 switch (ios->bus_width) {
737 case MMC_BUS_WIDTH_1:
738 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
739 break;
740 case MMC_BUS_WIDTH_4:
741 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
742 break;
743 }
744
745 /* Let things settle. delay taken from winCE driver */
746 udelay(140);
747}
748
749static int tmio_mmc_get_ro(struct mmc_host *mmc)
750{
751 struct tmio_mmc_host *host = mmc_priv(mmc);
752 struct mfd_cell *cell = host->pdev->dev.platform_data;
753 struct tmio_mmc_data *pdata = cell->driver_data;
754
755 return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
756 (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1;
757}
758
759static const struct mmc_host_ops tmio_mmc_ops = {
760 .request = tmio_mmc_request,
761 .set_ios = tmio_mmc_set_ios,
762 .get_ro = tmio_mmc_get_ro,
763};
764
765#ifdef CONFIG_PM 26#ifdef CONFIG_PM
766static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) 27static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
767{ 28{
768 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 29 const struct mfd_cell *cell = mfd_get_cell(dev);
769 struct mmc_host *mmc = platform_get_drvdata(dev); 30 struct mmc_host *mmc = platform_get_drvdata(dev);
770 int ret; 31 int ret;
771 32
772 ret = mmc_suspend_host(mmc); 33 ret = tmio_mmc_host_suspend(&dev->dev);
773 34
774 /* Tell MFD core it can disable us now.*/ 35 /* Tell MFD core it can disable us now.*/
775 if (!ret && cell->disable) 36 if (!ret && cell->disable)
@@ -780,20 +41,17 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
780 41
781static int tmio_mmc_resume(struct platform_device *dev) 42static int tmio_mmc_resume(struct platform_device *dev)
782{ 43{
783 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 44 const struct mfd_cell *cell = mfd_get_cell(dev);
784 struct mmc_host *mmc = platform_get_drvdata(dev); 45 struct mmc_host *mmc = platform_get_drvdata(dev);
785 int ret = 0; 46 int ret = 0;
786 47
787 /* Tell the MFD core we are ready to be enabled */ 48 /* Tell the MFD core we are ready to be enabled */
788 if (cell->resume) { 49 if (cell->resume)
789 ret = cell->resume(dev); 50 ret = cell->resume(dev);
790 if (ret)
791 goto out;
792 }
793 51
794 mmc_resume_host(mmc); 52 if (!ret)
53 ret = tmio_mmc_host_resume(&dev->dev);
795 54
796out:
797 return ret; 55 return ret;
798} 56}
799#else 57#else
@@ -801,125 +59,69 @@ out:
801#define tmio_mmc_resume NULL 59#define tmio_mmc_resume NULL
802#endif 60#endif
803 61
804static int __devinit tmio_mmc_probe(struct platform_device *dev) 62static int __devinit tmio_mmc_probe(struct platform_device *pdev)
805{ 63{
806 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 64 const struct mfd_cell *cell = mfd_get_cell(pdev);
807 struct tmio_mmc_data *pdata; 65 struct tmio_mmc_data *pdata;
808 struct resource *res_ctl;
809 struct tmio_mmc_host *host; 66 struct tmio_mmc_host *host;
810 struct mmc_host *mmc; 67 int ret = -EINVAL, irq;
811 int ret = -EINVAL;
812 u32 irq_mask = TMIO_MASK_CMD;
813
814 if (dev->num_resources != 2)
815 goto out;
816 68
817 res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0); 69 if (pdev->num_resources != 2)
818 if (!res_ctl)
819 goto out; 70 goto out;
820 71
821 pdata = cell->driver_data; 72 pdata = pdev->dev.platform_data;
822 if (!pdata || !pdata->hclk) 73 if (!pdata || !pdata->hclk)
823 goto out; 74 goto out;
824 75
825 ret = -ENOMEM; 76 irq = platform_get_irq(pdev, 0);
826 77 if (irq < 0) {
827 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev); 78 ret = irq;
828 if (!mmc)
829 goto out; 79 goto out;
830 80 }
831 host = mmc_priv(mmc);
832 host->mmc = mmc;
833 host->pdev = dev;
834 platform_set_drvdata(dev, mmc);
835
836 host->set_pwr = pdata->set_pwr;
837 host->set_clk_div = pdata->set_clk_div;
838
839 /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
840 host->bus_shift = resource_size(res_ctl) >> 10;
841
842 host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
843 if (!host->ctl)
844 goto host_free;
845
846 mmc->ops = &tmio_mmc_ops;
847 mmc->caps = MMC_CAP_4_BIT_DATA;
848 mmc->caps |= pdata->capabilities;
849 mmc->f_max = pdata->hclk;
850 mmc->f_min = mmc->f_max / 512;
851 if (pdata->ocr_mask)
852 mmc->ocr_avail = pdata->ocr_mask;
853 else
854 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
855 81
856 /* Tell the MFD core we are ready to be enabled */ 82 /* Tell the MFD core we are ready to be enabled */
857 if (cell->enable) { 83 if (cell->enable) {
858 ret = cell->enable(dev); 84 ret = cell->enable(pdev);
859 if (ret) 85 if (ret)
860 goto unmap_ctl; 86 goto out;
861 } 87 }
862 88
863 tmio_mmc_clk_stop(host); 89 ret = tmio_mmc_host_probe(&host, pdev, pdata);
864 reset(host);
865
866 ret = platform_get_irq(dev, 0);
867 if (ret >= 0)
868 host->irq = ret;
869 else
870 goto cell_disable;
871
872 disable_mmc_irqs(host, TMIO_MASK_ALL);
873
874 ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
875 IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host);
876 if (ret) 90 if (ret)
877 goto cell_disable; 91 goto cell_disable;
878 92
879 /* See if we also get DMA */ 93 ret = request_irq(irq, tmio_mmc_irq, IRQF_DISABLED |
880 tmio_mmc_request_dma(host, pdata); 94 IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), host);
881 95 if (ret)
882 mmc_add_host(mmc); 96 goto host_remove;
883 97
884 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), 98 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
885 (unsigned long)host->ctl, host->irq); 99 (unsigned long)host->ctl, irq);
886
887 /* Unmask the IRQs we want to know about */
888 if (!host->chan_rx)
889 irq_mask |= TMIO_MASK_READOP;
890 if (!host->chan_tx)
891 irq_mask |= TMIO_MASK_WRITEOP;
892 enable_mmc_irqs(host, irq_mask);
893 100
894 return 0; 101 return 0;
895 102
103host_remove:
104 tmio_mmc_host_remove(host);
896cell_disable: 105cell_disable:
897 if (cell->disable) 106 if (cell->disable)
898 cell->disable(dev); 107 cell->disable(pdev);
899unmap_ctl:
900 iounmap(host->ctl);
901host_free:
902 mmc_free_host(mmc);
903out: 108out:
904 return ret; 109 return ret;
905} 110}
906 111
907static int __devexit tmio_mmc_remove(struct platform_device *dev) 112static int __devexit tmio_mmc_remove(struct platform_device *pdev)
908{ 113{
909 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 114 const struct mfd_cell *cell = mfd_get_cell(pdev);
910 struct mmc_host *mmc = platform_get_drvdata(dev); 115 struct mmc_host *mmc = platform_get_drvdata(pdev);
911 116
912 platform_set_drvdata(dev, NULL); 117 platform_set_drvdata(pdev, NULL);
913 118
914 if (mmc) { 119 if (mmc) {
915 struct tmio_mmc_host *host = mmc_priv(mmc); 120 struct tmio_mmc_host *host = mmc_priv(mmc);
916 mmc_remove_host(mmc); 121 free_irq(platform_get_irq(pdev, 0), host);
917 tmio_mmc_release_dma(host); 122 tmio_mmc_host_remove(host);
918 free_irq(host->irq, host);
919 if (cell->disable) 123 if (cell->disable)
920 cell->disable(dev); 124 cell->disable(pdev);
921 iounmap(host->ctl);
922 mmc_free_host(mmc);
923 } 125 }
924 126
925 return 0; 127 return 0;
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 0fedc78e3ea5..8260bc2c34e3 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -1,58 +1,31 @@
1/* Definitons for use with the tmio_mmc.c 1/*
2 * linux/drivers/mmc/host/tmio_mmc.h
2 * 3 *
3 * (c) 2004 Ian Molton <spyro@f2s.com> 4 * Copyright (C) 2007 Ian Molton
4 * (c) 2007 Ian Molton <spyro@f2s.com> 5 * Copyright (C) 2004 Ian Molton
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
9 * 10 *
11 * Driver for the MMC / SD / SDIO cell found in:
12 *
13 * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
10 */ 14 */
11 15
16#ifndef TMIO_MMC_H
17#define TMIO_MMC_H
18
12#include <linux/highmem.h> 19#include <linux/highmem.h>
13#include <linux/interrupt.h> 20#include <linux/mmc/tmio.h>
14#include <linux/dmaengine.h> 21#include <linux/pagemap.h>
15 22#include <linux/spinlock.h>
16#define CTL_SD_CMD 0x00 23
17#define CTL_ARG_REG 0x04 24/* Definitions for values the CTRL_SDIO_STATUS register can take. */
18#define CTL_STOP_INTERNAL_ACTION 0x08 25#define TMIO_SDIO_STAT_IOIRQ 0x0001
19#define CTL_XFER_BLK_COUNT 0xa 26#define TMIO_SDIO_STAT_EXPUB52 0x4000
20#define CTL_RESPONSE 0x0c 27#define TMIO_SDIO_STAT_EXWT 0x8000
21#define CTL_STATUS 0x1c 28#define TMIO_SDIO_MASK_ALL 0xc007
22#define CTL_IRQ_MASK 0x20
23#define CTL_SD_CARD_CLK_CTL 0x24
24#define CTL_SD_XFER_LEN 0x26
25#define CTL_SD_MEM_CARD_OPT 0x28
26#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
27#define CTL_SD_DATA_PORT 0x30
28#define CTL_TRANSACTION_CTL 0x34
29#define CTL_RESET_SD 0xe0
30#define CTL_SDIO_REGS 0x100
31#define CTL_CLK_AND_WAIT_CTL 0x138
32#define CTL_RESET_SDIO 0x1e0
33
34/* Definitions for values the CTRL_STATUS register can take. */
35#define TMIO_STAT_CMDRESPEND 0x00000001
36#define TMIO_STAT_DATAEND 0x00000004
37#define TMIO_STAT_CARD_REMOVE 0x00000008
38#define TMIO_STAT_CARD_INSERT 0x00000010
39#define TMIO_STAT_SIGSTATE 0x00000020
40#define TMIO_STAT_WRPROTECT 0x00000080
41#define TMIO_STAT_CARD_REMOVE_A 0x00000100
42#define TMIO_STAT_CARD_INSERT_A 0x00000200
43#define TMIO_STAT_SIGSTATE_A 0x00000400
44#define TMIO_STAT_CMD_IDX_ERR 0x00010000
45#define TMIO_STAT_CRCFAIL 0x00020000
46#define TMIO_STAT_STOPBIT_ERR 0x00040000
47#define TMIO_STAT_DATATIMEOUT 0x00080000
48#define TMIO_STAT_RXOVERFLOW 0x00100000
49#define TMIO_STAT_TXUNDERRUN 0x00200000
50#define TMIO_STAT_CMDTIMEOUT 0x00400000
51#define TMIO_STAT_RXRDY 0x01000000
52#define TMIO_STAT_TXRQ 0x02000000
53#define TMIO_STAT_ILL_FUNC 0x20000000
54#define TMIO_STAT_CMD_BUSY 0x40000000
55#define TMIO_STAT_ILL_ACCESS 0x80000000
56 29
57/* Define some IRQ masks */ 30/* Define some IRQ masks */
58/* This is the mask used at reset by the chip */ 31/* This is the mask used at reset by the chip */
@@ -63,28 +36,7 @@
63 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) 36 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
64#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) 37#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
65 38
66 39struct tmio_mmc_data;
67#define enable_mmc_irqs(host, i) \
68 do { \
69 u32 mask;\
70 mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
71 mask &= ~((i) & TMIO_MASK_IRQ); \
72 sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
73 } while (0)
74
75#define disable_mmc_irqs(host, i) \
76 do { \
77 u32 mask;\
78 mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
79 mask |= ((i) & TMIO_MASK_IRQ); \
80 sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
81 } while (0)
82
83#define ack_mmc_irqs(host, i) \
84 do { \
85 sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
86 } while (0)
87
88 40
89struct tmio_mmc_host { 41struct tmio_mmc_host {
90 void __iomem *ctl; 42 void __iomem *ctl;
@@ -93,136 +45,93 @@ struct tmio_mmc_host {
93 struct mmc_request *mrq; 45 struct mmc_request *mrq;
94 struct mmc_data *data; 46 struct mmc_data *data;
95 struct mmc_host *mmc; 47 struct mmc_host *mmc;
96 int irq; 48 unsigned int sdio_irq_enabled;
97 49
98 /* Callbacks for clock / power control */ 50 /* Callbacks for clock / power control */
99 void (*set_pwr)(struct platform_device *host, int state); 51 void (*set_pwr)(struct platform_device *host, int state);
100 void (*set_clk_div)(struct platform_device *host, int state); 52 void (*set_clk_div)(struct platform_device *host, int state);
101 53
54 int pm_error;
55
102 /* pio related stuff */ 56 /* pio related stuff */
103 struct scatterlist *sg_ptr; 57 struct scatterlist *sg_ptr;
58 struct scatterlist *sg_orig;
104 unsigned int sg_len; 59 unsigned int sg_len;
105 unsigned int sg_off; 60 unsigned int sg_off;
106 61
107 struct platform_device *pdev; 62 struct platform_device *pdev;
63 struct tmio_mmc_data *pdata;
108 64
109 /* DMA support */ 65 /* DMA support */
66 bool force_pio;
110 struct dma_chan *chan_rx; 67 struct dma_chan *chan_rx;
111 struct dma_chan *chan_tx; 68 struct dma_chan *chan_tx;
112 struct tasklet_struct dma_complete; 69 struct tasklet_struct dma_complete;
113 struct tasklet_struct dma_issue; 70 struct tasklet_struct dma_issue;
114#ifdef CONFIG_TMIO_MMC_DMA 71 struct scatterlist bounce_sg;
115 struct dma_async_tx_descriptor *desc; 72 u8 *bounce_buf;
116 unsigned int dma_sglen;
117 dma_cookie_t cookie;
118#endif
119};
120
121#include <linux/io.h>
122 73
123static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) 74 /* Track lost interrupts */
124{ 75 struct delayed_work delayed_reset_work;
125 return readw(host->ctl + (addr << host->bus_shift)); 76 spinlock_t lock;
126} 77 unsigned long last_req_ts;
127 78};
128static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
129 u16 *buf, int count)
130{
131 readsw(host->ctl + (addr << host->bus_shift), buf, count);
132}
133 79
134static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) 80int tmio_mmc_host_probe(struct tmio_mmc_host **host,
135{ 81 struct platform_device *pdev,
136 return readw(host->ctl + (addr << host->bus_shift)) | 82 struct tmio_mmc_data *pdata);
137 readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; 83void tmio_mmc_host_remove(struct tmio_mmc_host *host);
138} 84void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
139 85
140static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, 86void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
141 u16 val) 87void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
142{ 88irqreturn_t tmio_mmc_irq(int irq, void *devid);
143 writew(val, host->ctl + (addr << host->bus_shift));
144}
145 89
146static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, 90static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
147 u16 *buf, int count) 91 unsigned long *flags)
148{
149 writesw(host->ctl + (addr << host->bus_shift), buf, count);
150}
151
152static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr,
153 u32 val)
154{ 92{
155 writew(val, host->ctl + (addr << host->bus_shift)); 93 local_irq_save(*flags);
156 writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); 94 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
157} 95}
158 96
159#include <linux/scatterlist.h> 97static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg,
160#include <linux/blkdev.h> 98 unsigned long *flags, void *virt)
161
162static inline void tmio_mmc_init_sg(struct tmio_mmc_host *host,
163 struct mmc_data *data)
164{ 99{
165 host->sg_len = data->sg_len; 100 kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ);
166 host->sg_ptr = data->sg; 101 local_irq_restore(*flags);
167 host->sg_off = 0;
168} 102}
169 103
170static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host) 104#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
105void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data);
106void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata);
107void tmio_mmc_release_dma(struct tmio_mmc_host *host);
108#else
109static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
110 struct mmc_data *data)
171{ 111{
172 host->sg_ptr = sg_next(host->sg_ptr);
173 host->sg_off = 0;
174 return --host->sg_len;
175} 112}
176 113
177static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, 114static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
178 unsigned long *flags) 115 struct tmio_mmc_data *pdata)
179{ 116{
180 local_irq_save(*flags); 117 host->chan_tx = NULL;
181 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; 118 host->chan_rx = NULL;
182} 119}
183 120
184static inline void tmio_mmc_kunmap_atomic(void *virt, 121static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
185 unsigned long *flags)
186{ 122{
187 kunmap_atomic(virt, KM_BIO_SRC_IRQ);
188 local_irq_restore(*flags);
189} 123}
124#endif
190 125
191#ifdef CONFIG_MMC_DEBUG 126#ifdef CONFIG_PM
192 127int tmio_mmc_host_suspend(struct device *dev);
193#define STATUS_TO_TEXT(a) \ 128int tmio_mmc_host_resume(struct device *dev);
194 do { \ 129#else
195 if (status & TMIO_STAT_##a) \ 130#define tmio_mmc_host_suspend NULL
196 printk(#a); \ 131#define tmio_mmc_host_resume NULL
197 } while (0) 132#endif
198 133
199void pr_debug_status(u32 status) 134int tmio_mmc_host_runtime_suspend(struct device *dev);
200{ 135int tmio_mmc_host_runtime_resume(struct device *dev);
201 printk(KERN_DEBUG "status: %08x = ", status);
202 STATUS_TO_TEXT(CARD_REMOVE);
203 STATUS_TO_TEXT(CARD_INSERT);
204 STATUS_TO_TEXT(SIGSTATE);
205 STATUS_TO_TEXT(WRPROTECT);
206 STATUS_TO_TEXT(CARD_REMOVE_A);
207 STATUS_TO_TEXT(CARD_INSERT_A);
208 STATUS_TO_TEXT(SIGSTATE_A);
209 STATUS_TO_TEXT(CMD_IDX_ERR);
210 STATUS_TO_TEXT(STOPBIT_ERR);
211 STATUS_TO_TEXT(ILL_FUNC);
212 STATUS_TO_TEXT(CMD_BUSY);
213 STATUS_TO_TEXT(CMDRESPEND);
214 STATUS_TO_TEXT(DATAEND);
215 STATUS_TO_TEXT(CRCFAIL);
216 STATUS_TO_TEXT(DATATIMEOUT);
217 STATUS_TO_TEXT(CMDTIMEOUT);
218 STATUS_TO_TEXT(RXOVERFLOW);
219 STATUS_TO_TEXT(TXUNDERRUN);
220 STATUS_TO_TEXT(RXRDY);
221 STATUS_TO_TEXT(TXRQ);
222 STATUS_TO_TEXT(ILL_ACCESS);
223 printk("\n");
224}
225 136
226#else
227#define pr_debug_status(s) do { } while (0)
228#endif 137#endif
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
new file mode 100644
index 000000000000..25f1ad6cbe09
--- /dev/null
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -0,0 +1,320 @@
1/*
2 * linux/drivers/mmc/tmio_mmc_dma.c
3 *
4 * Copyright (C) 2010-2011 Guennadi Liakhovetski
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * DMA function for TMIO MMC implementations
11 */
12
13#include <linux/device.h>
14#include <linux/dmaengine.h>
15#include <linux/mfd/tmio.h>
16#include <linux/mmc/host.h>
17#include <linux/mmc/tmio.h>
18#include <linux/pagemap.h>
19#include <linux/scatterlist.h>
20
21#include "tmio_mmc.h"
22
23#define TMIO_MMC_MIN_DMA_LEN 8
24
25static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
26{
27#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
28 /* Switch DMA mode on or off - SuperH specific? */
29 writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift));
30#endif
31}
32
33static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
34{
35 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
36 struct dma_async_tx_descriptor *desc = NULL;
37 struct dma_chan *chan = host->chan_rx;
38 struct tmio_mmc_data *pdata = host->pdata;
39 dma_cookie_t cookie;
40 int ret, i;
41 bool aligned = true, multiple = true;
42 unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
43
44 for_each_sg(sg, sg_tmp, host->sg_len, i) {
45 if (sg_tmp->offset & align)
46 aligned = false;
47 if (sg_tmp->length & align) {
48 multiple = false;
49 break;
50 }
51 }
52
53 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
54 (align & PAGE_MASK))) || !multiple) {
55 ret = -EINVAL;
56 goto pio;
57 }
58
59 if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
60 host->force_pio = true;
61 return;
62 }
63
64 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY);
65
66 /* The only sg element can be unaligned, use our bounce buffer then */
67 if (!aligned) {
68 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
69 host->sg_ptr = &host->bounce_sg;
70 sg = host->sg_ptr;
71 }
72
73 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
74 if (ret > 0)
75 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
76 DMA_FROM_DEVICE, DMA_CTRL_ACK);
77
78 if (desc) {
79 cookie = dmaengine_submit(desc);
80 if (cookie < 0) {
81 desc = NULL;
82 ret = cookie;
83 }
84 }
85 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
86 __func__, host->sg_len, ret, cookie, host->mrq);
87
88pio:
89 if (!desc) {
90 /* DMA failed, fall back to PIO */
91 if (ret >= 0)
92 ret = -EIO;
93 host->chan_rx = NULL;
94 dma_release_channel(chan);
95 /* Free the Tx channel too */
96 chan = host->chan_tx;
97 if (chan) {
98 host->chan_tx = NULL;
99 dma_release_channel(chan);
100 }
101 dev_warn(&host->pdev->dev,
102 "DMA failed: %d, falling back to PIO\n", ret);
103 tmio_mmc_enable_dma(host, false);
104 }
105
106 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
107 desc, cookie, host->sg_len);
108}
109
110static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
111{
112 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
113 struct dma_async_tx_descriptor *desc = NULL;
114 struct dma_chan *chan = host->chan_tx;
115 struct tmio_mmc_data *pdata = host->pdata;
116 dma_cookie_t cookie;
117 int ret, i;
118 bool aligned = true, multiple = true;
119 unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
120
121 for_each_sg(sg, sg_tmp, host->sg_len, i) {
122 if (sg_tmp->offset & align)
123 aligned = false;
124 if (sg_tmp->length & align) {
125 multiple = false;
126 break;
127 }
128 }
129
130 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
131 (align & PAGE_MASK))) || !multiple) {
132 ret = -EINVAL;
133 goto pio;
134 }
135
136 if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
137 host->force_pio = true;
138 return;
139 }
140
141 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ);
142
143 /* The only sg element can be unaligned, use our bounce buffer then */
144 if (!aligned) {
145 unsigned long flags;
146 void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
147 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
148 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
149 tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
150 host->sg_ptr = &host->bounce_sg;
151 sg = host->sg_ptr;
152 }
153
154 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
155 if (ret > 0)
156 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
157 DMA_TO_DEVICE, DMA_CTRL_ACK);
158
159 if (desc) {
160 cookie = dmaengine_submit(desc);
161 if (cookie < 0) {
162 desc = NULL;
163 ret = cookie;
164 }
165 }
166 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
167 __func__, host->sg_len, ret, cookie, host->mrq);
168
169pio:
170 if (!desc) {
171 /* DMA failed, fall back to PIO */
172 if (ret >= 0)
173 ret = -EIO;
174 host->chan_tx = NULL;
175 dma_release_channel(chan);
176 /* Free the Rx channel too */
177 chan = host->chan_rx;
178 if (chan) {
179 host->chan_rx = NULL;
180 dma_release_channel(chan);
181 }
182 dev_warn(&host->pdev->dev,
183 "DMA failed: %d, falling back to PIO\n", ret);
184 tmio_mmc_enable_dma(host, false);
185 }
186
187 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
188 desc, cookie);
189}
190
191void tmio_mmc_start_dma(struct tmio_mmc_host *host,
192 struct mmc_data *data)
193{
194 if (data->flags & MMC_DATA_READ) {
195 if (host->chan_rx)
196 tmio_mmc_start_dma_rx(host);
197 } else {
198 if (host->chan_tx)
199 tmio_mmc_start_dma_tx(host);
200 }
201}
202
203static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
204{
205 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
206 struct dma_chan *chan = NULL;
207
208 spin_lock_irq(&host->lock);
209
210 if (host && host->data) {
211 if (host->data->flags & MMC_DATA_READ)
212 chan = host->chan_rx;
213 else
214 chan = host->chan_tx;
215 }
216
217 spin_unlock_irq(&host->lock);
218
219 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
220
221 if (chan)
222 dma_async_issue_pending(chan);
223}
224
225static void tmio_mmc_tasklet_fn(unsigned long arg)
226{
227 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
228
229 spin_lock_irq(&host->lock);
230
231 if (!host->data)
232 goto out;
233
234 if (host->data->flags & MMC_DATA_READ)
235 dma_unmap_sg(host->chan_rx->device->dev,
236 host->sg_ptr, host->sg_len,
237 DMA_FROM_DEVICE);
238 else
239 dma_unmap_sg(host->chan_tx->device->dev,
240 host->sg_ptr, host->sg_len,
241 DMA_TO_DEVICE);
242
243 tmio_mmc_do_data_irq(host);
244out:
245 spin_unlock_irq(&host->lock);
246}
247
248/* It might be necessary to make filter MFD specific */
249static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
250{
251 dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
252 chan->private = arg;
253 return true;
254}
255
256void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
257{
258 /* We can only either use DMA for both Tx and Rx or not use it at all */
259 if (!pdata->dma)
260 return;
261
262 if (!host->chan_tx && !host->chan_rx) {
263 dma_cap_mask_t mask;
264
265 dma_cap_zero(mask);
266 dma_cap_set(DMA_SLAVE, mask);
267
268 host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
269 pdata->dma->chan_priv_tx);
270 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
271 host->chan_tx);
272
273 if (!host->chan_tx)
274 return;
275
276 host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
277 pdata->dma->chan_priv_rx);
278 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
279 host->chan_rx);
280
281 if (!host->chan_rx)
282 goto ereqrx;
283
284 host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
285 if (!host->bounce_buf)
286 goto ebouncebuf;
287
288 tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host);
289 tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
290 }
291
292 tmio_mmc_enable_dma(host, true);
293
294 return;
295
296ebouncebuf:
297 dma_release_channel(host->chan_rx);
298 host->chan_rx = NULL;
299ereqrx:
300 dma_release_channel(host->chan_tx);
301 host->chan_tx = NULL;
302}
303
304void tmio_mmc_release_dma(struct tmio_mmc_host *host)
305{
306 if (host->chan_tx) {
307 struct dma_chan *chan = host->chan_tx;
308 host->chan_tx = NULL;
309 dma_release_channel(chan);
310 }
311 if (host->chan_rx) {
312 struct dma_chan *chan = host->chan_rx;
313 host->chan_rx = NULL;
314 dma_release_channel(chan);
315 }
316 if (host->bounce_buf) {
317 free_pages((unsigned long)host->bounce_buf, 0);
318 host->bounce_buf = NULL;
319 }
320}
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
new file mode 100644
index 000000000000..0b09e8239aa0
--- /dev/null
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -0,0 +1,1037 @@
1/*
2 * linux/drivers/mmc/host/tmio_mmc_pio.c
3 *
4 * Copyright (C) 2011 Guennadi Liakhovetski
5 * Copyright (C) 2007 Ian Molton
6 * Copyright (C) 2004 Ian Molton
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Driver for the MMC / SD / SDIO IP found in:
13 *
14 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
15 *
16 * This driver draws mainly on scattered spec sheets, Reverse engineering
17 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
18 * support). (Further 4 bit support from a later datasheet).
19 *
20 * TODO:
21 * Investigate using a workqueue for PIO transfers
22 * Eliminate FIXMEs
23 * SDIO support
24 * Better Power management
25 * Handle MMC errors better
26 * double buffer support
27 *
28 */
29
30#include <linux/delay.h>
31#include <linux/device.h>
32#include <linux/highmem.h>
33#include <linux/interrupt.h>
34#include <linux/io.h>
35#include <linux/irq.h>
36#include <linux/mfd/tmio.h>
37#include <linux/mmc/host.h>
38#include <linux/mmc/tmio.h>
39#include <linux/module.h>
40#include <linux/pagemap.h>
41#include <linux/platform_device.h>
42#include <linux/pm_runtime.h>
43#include <linux/scatterlist.h>
44#include <linux/workqueue.h>
45#include <linux/spinlock.h>
46
47#include "tmio_mmc.h"
48
49static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
50{
51 return readw(host->ctl + (addr << host->bus_shift));
52}
53
54static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
55 u16 *buf, int count)
56{
57 readsw(host->ctl + (addr << host->bus_shift), buf, count);
58}
59
60static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
61{
62 return readw(host->ctl + (addr << host->bus_shift)) |
63 readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
64}
65
66static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
67{
68 writew(val, host->ctl + (addr << host->bus_shift));
69}
70
71static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
72 u16 *buf, int count)
73{
74 writesw(host->ctl + (addr << host->bus_shift), buf, count);
75}
76
77static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
78{
79 writew(val, host->ctl + (addr << host->bus_shift));
80 writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
81}
82
83void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
84{
85 u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ);
86 sd_ctrl_write32(host, CTL_IRQ_MASK, mask);
87}
88
89void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
90{
91 u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ);
92 sd_ctrl_write32(host, CTL_IRQ_MASK, mask);
93}
94
95static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
96{
97 sd_ctrl_write32(host, CTL_STATUS, ~i);
98}
99
100static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
101{
102 host->sg_len = data->sg_len;
103 host->sg_ptr = data->sg;
104 host->sg_orig = data->sg;
105 host->sg_off = 0;
106}
107
108static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
109{
110 host->sg_ptr = sg_next(host->sg_ptr);
111 host->sg_off = 0;
112 return --host->sg_len;
113}
114
115#ifdef CONFIG_MMC_DEBUG
116
117#define STATUS_TO_TEXT(a, status, i) \
118 do { \
119 if (status & TMIO_STAT_##a) { \
120 if (i++) \
121 printk(" | "); \
122 printk(#a); \
123 } \
124 } while (0)
125
126static void pr_debug_status(u32 status)
127{
128 int i = 0;
129 printk(KERN_DEBUG "status: %08x = ", status);
130 STATUS_TO_TEXT(CARD_REMOVE, status, i);
131 STATUS_TO_TEXT(CARD_INSERT, status, i);
132 STATUS_TO_TEXT(SIGSTATE, status, i);
133 STATUS_TO_TEXT(WRPROTECT, status, i);
134 STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
135 STATUS_TO_TEXT(CARD_INSERT_A, status, i);
136 STATUS_TO_TEXT(SIGSTATE_A, status, i);
137 STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
138 STATUS_TO_TEXT(STOPBIT_ERR, status, i);
139 STATUS_TO_TEXT(ILL_FUNC, status, i);
140 STATUS_TO_TEXT(CMD_BUSY, status, i);
141 STATUS_TO_TEXT(CMDRESPEND, status, i);
142 STATUS_TO_TEXT(DATAEND, status, i);
143 STATUS_TO_TEXT(CRCFAIL, status, i);
144 STATUS_TO_TEXT(DATATIMEOUT, status, i);
145 STATUS_TO_TEXT(CMDTIMEOUT, status, i);
146 STATUS_TO_TEXT(RXOVERFLOW, status, i);
147 STATUS_TO_TEXT(TXUNDERRUN, status, i);
148 STATUS_TO_TEXT(RXRDY, status, i);
149 STATUS_TO_TEXT(TXRQ, status, i);
150 STATUS_TO_TEXT(ILL_ACCESS, status, i);
151 printk("\n");
152}
153
154#else
155#define pr_debug_status(s) do { } while (0)
156#endif
157
158static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
159{
160 struct tmio_mmc_host *host = mmc_priv(mmc);
161
162 if (enable) {
163 host->sdio_irq_enabled = 1;
164 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
165 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK,
166 (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ));
167 } else {
168 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL);
169 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
170 host->sdio_irq_enabled = 0;
171 }
172}
173
174static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
175{
176 u32 clk = 0, clock;
177
178 if (new_clock) {
179 for (clock = host->mmc->f_min, clk = 0x80000080;
180 new_clock >= (clock<<1); clk >>= 1)
181 clock <<= 1;
182 clk |= 0x100;
183 }
184
185 if (host->set_clk_div)
186 host->set_clk_div(host->pdev, (clk>>22) & 1);
187
188 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
189}
190
191static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
192{
193 struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
194
195 /* implicit BUG_ON(!res) */
196 if (resource_size(res) > 0x100) {
197 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
198 msleep(10);
199 }
200
201 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
202 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
203 msleep(10);
204}
205
206static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
207{
208 struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
209
210 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
211 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
212 msleep(10);
213
214 /* implicit BUG_ON(!res) */
215 if (resource_size(res) > 0x100) {
216 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
217 msleep(10);
218 }
219}
220
221static void tmio_mmc_reset(struct tmio_mmc_host *host)
222{
223 struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
224
225 /* FIXME - should we set stop clock reg here */
226 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
227 /* implicit BUG_ON(!res) */
228 if (resource_size(res) > 0x100)
229 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
230 msleep(10);
231 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
232 if (resource_size(res) > 0x100)
233 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
234 msleep(10);
235}
236
237static void tmio_mmc_reset_work(struct work_struct *work)
238{
239 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
240 delayed_reset_work.work);
241 struct mmc_request *mrq;
242 unsigned long flags;
243
244 spin_lock_irqsave(&host->lock, flags);
245 mrq = host->mrq;
246
247 /*
248 * is request already finished? Since we use a non-blocking
249 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
250 * us, so, have to check for IS_ERR(host->mrq)
251 */
252 if (IS_ERR_OR_NULL(mrq)
253 || time_is_after_jiffies(host->last_req_ts +
254 msecs_to_jiffies(2000))) {
255 spin_unlock_irqrestore(&host->lock, flags);
256 return;
257 }
258
259 dev_warn(&host->pdev->dev,
260 "timeout waiting for hardware interrupt (CMD%u)\n",
261 mrq->cmd->opcode);
262
263 if (host->data)
264 host->data->error = -ETIMEDOUT;
265 else if (host->cmd)
266 host->cmd->error = -ETIMEDOUT;
267 else
268 mrq->cmd->error = -ETIMEDOUT;
269
270 host->cmd = NULL;
271 host->data = NULL;
272 host->force_pio = false;
273
274 spin_unlock_irqrestore(&host->lock, flags);
275
276 tmio_mmc_reset(host);
277
278 /* Ready for new calls */
279 host->mrq = NULL;
280
281 mmc_request_done(host->mmc, mrq);
282}
283
284/* called with host->lock held, interrupts disabled */
285static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
286{
287 struct mmc_request *mrq = host->mrq;
288
289 if (!mrq)
290 return;
291
292 host->cmd = NULL;
293 host->data = NULL;
294 host->force_pio = false;
295
296 cancel_delayed_work(&host->delayed_reset_work);
297
298 host->mrq = NULL;
299
300 /* FIXME: mmc_request_done() can schedule! */
301 mmc_request_done(host->mmc, mrq);
302}
303
304/* These are the bitmasks the tmio chip requires to implement the MMC response
305 * types. Note that R1 and R6 are the same in this scheme. */
306#define APP_CMD 0x0040
307#define RESP_NONE 0x0300
308#define RESP_R1 0x0400
309#define RESP_R1B 0x0500
310#define RESP_R2 0x0600
311#define RESP_R3 0x0700
312#define DATA_PRESENT 0x0800
313#define TRANSFER_READ 0x1000
314#define TRANSFER_MULTI 0x2000
315#define SECURITY_CMD 0x4000
316
317static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
318{
319 struct mmc_data *data = host->data;
320 int c = cmd->opcode;
321
322 /* Command 12 is handled by hardware */
323 if (cmd->opcode == 12 && !cmd->arg) {
324 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
325 return 0;
326 }
327
328 switch (mmc_resp_type(cmd)) {
329 case MMC_RSP_NONE: c |= RESP_NONE; break;
330 case MMC_RSP_R1: c |= RESP_R1; break;
331 case MMC_RSP_R1B: c |= RESP_R1B; break;
332 case MMC_RSP_R2: c |= RESP_R2; break;
333 case MMC_RSP_R3: c |= RESP_R3; break;
334 default:
335 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
336 return -EINVAL;
337 }
338
339 host->cmd = cmd;
340
341/* FIXME - this seems to be ok commented out but the spec suggest this bit
342 * should be set when issuing app commands.
343 * if(cmd->flags & MMC_FLAG_ACMD)
344 * c |= APP_CMD;
345 */
346 if (data) {
347 c |= DATA_PRESENT;
348 if (data->blocks > 1) {
349 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
350 c |= TRANSFER_MULTI;
351 }
352 if (data->flags & MMC_DATA_READ)
353 c |= TRANSFER_READ;
354 }
355
356 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD);
357
358 /* Fire off the command */
359 sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
360 sd_ctrl_write16(host, CTL_SD_CMD, c);
361
362 return 0;
363}
364
365/*
366 * This chip always returns (at least?) as much data as you ask for.
367 * I'm unsure what happens if you ask for less than a block. This should be
368 * looked into to ensure that a funny length read doesn't hose the controller.
369 */
370static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
371{
372 struct mmc_data *data = host->data;
373 void *sg_virt;
374 unsigned short *buf;
375 unsigned int count;
376 unsigned long flags;
377
378 if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
379 pr_err("PIO IRQ in DMA mode!\n");
380 return;
381 } else if (!data) {
382 pr_debug("Spurious PIO IRQ\n");
383 return;
384 }
385
386 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
387 buf = (unsigned short *)(sg_virt + host->sg_off);
388
389 count = host->sg_ptr->length - host->sg_off;
390 if (count > data->blksz)
391 count = data->blksz;
392
393 pr_debug("count: %08x offset: %08x flags %08x\n",
394 count, host->sg_off, data->flags);
395
396 /* Transfer the data */
397 if (data->flags & MMC_DATA_READ)
398 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
399 else
400 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
401
402 host->sg_off += count;
403
404 tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
405
406 if (host->sg_off == host->sg_ptr->length)
407 tmio_mmc_next_sg(host);
408
409 return;
410}
411
412static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
413{
414 if (host->sg_ptr == &host->bounce_sg) {
415 unsigned long flags;
416 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
417 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
418 tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
419 }
420}
421
422/* needs to be called with host->lock held */
423void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
424{
425 struct mmc_data *data = host->data;
426 struct mmc_command *stop;
427
428 host->data = NULL;
429
430 if (!data) {
431 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
432 return;
433 }
434 stop = data->stop;
435
436 /* FIXME - return correct transfer count on errors */
437 if (!data->error)
438 data->bytes_xfered = data->blocks * data->blksz;
439 else
440 data->bytes_xfered = 0;
441
442 pr_debug("Completed data request\n");
443
444 /*
445 * FIXME: other drivers allow an optional stop command of any given type
446 * which we dont do, as the chip can auto generate them.
447 * Perhaps we can be smarter about when to use auto CMD12 and
448 * only issue the auto request when we know this is the desired
449 * stop command, allowing fallback to the stop command the
450 * upper layers expect. For now, we do what works.
451 */
452
453 if (data->flags & MMC_DATA_READ) {
454 if (host->chan_rx && !host->force_pio)
455 tmio_mmc_check_bounce_buffer(host);
456 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
457 host->mrq);
458 } else {
459 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
460 host->mrq);
461 }
462
463 if (stop) {
464 if (stop->opcode == 12 && !stop->arg)
465 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
466 else
467 BUG();
468 }
469
470 tmio_mmc_finish_request(host);
471}
472
473static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
474{
475 struct mmc_data *data;
476 spin_lock(&host->lock);
477 data = host->data;
478
479 if (!data)
480 goto out;
481
482 if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
483 /*
484 * Has all data been written out yet? Testing on SuperH showed,
485 * that in most cases the first interrupt comes already with the
486 * BUSY status bit clear, but on some operations, like mount or
487 * in the beginning of a write / sync / umount, there is one
488 * DATAEND interrupt with the BUSY bit set, in this cases
489 * waiting for one more interrupt fixes the problem.
490 */
491 if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
492 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
493 tasklet_schedule(&host->dma_complete);
494 }
495 } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
496 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
497 tasklet_schedule(&host->dma_complete);
498 } else {
499 tmio_mmc_do_data_irq(host);
500 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
501 }
502out:
503 spin_unlock(&host->lock);
504}
505
506static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
507 unsigned int stat)
508{
509 struct mmc_command *cmd = host->cmd;
510 int i, addr;
511
512 spin_lock(&host->lock);
513
514 if (!host->cmd) {
515 pr_debug("Spurious CMD irq\n");
516 goto out;
517 }
518
519 host->cmd = NULL;
520
521 /* This controller is sicker than the PXA one. Not only do we need to
522 * drop the top 8 bits of the first response word, we also need to
523 * modify the order of the response for short response command types.
524 */
525
526 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
527 cmd->resp[i] = sd_ctrl_read32(host, addr);
528
529 if (cmd->flags & MMC_RSP_136) {
530 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
531 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
532 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
533 cmd->resp[3] <<= 8;
534 } else if (cmd->flags & MMC_RSP_R3) {
535 cmd->resp[0] = cmd->resp[3];
536 }
537
538 if (stat & TMIO_STAT_CMDTIMEOUT)
539 cmd->error = -ETIMEDOUT;
540 else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
541 cmd->error = -EILSEQ;
542
543 /* If there is data to handle we enable data IRQs here, and
544 * we will ultimatley finish the request in the data_end handler.
545 * If theres no data or we encountered an error, finish now.
546 */
547 if (host->data && !cmd->error) {
548 if (host->data->flags & MMC_DATA_READ) {
549 if (host->force_pio || !host->chan_rx)
550 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
551 else
552 tasklet_schedule(&host->dma_issue);
553 } else {
554 if (host->force_pio || !host->chan_tx)
555 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
556 else
557 tasklet_schedule(&host->dma_issue);
558 }
559 } else {
560 tmio_mmc_finish_request(host);
561 }
562
563out:
564 spin_unlock(&host->lock);
565}
566
567irqreturn_t tmio_mmc_irq(int irq, void *devid)
568{
569 struct tmio_mmc_host *host = devid;
570 struct tmio_mmc_data *pdata = host->pdata;
571 unsigned int ireg, irq_mask, status;
572 unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
573
574 pr_debug("MMC IRQ begin\n");
575
576 status = sd_ctrl_read32(host, CTL_STATUS);
577 irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
578 ireg = status & TMIO_MASK_IRQ & ~irq_mask;
579
580 sdio_ireg = 0;
581 if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) {
582 sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
583 sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK);
584 sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask;
585
586 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL);
587
588 if (sdio_ireg && !host->sdio_irq_enabled) {
589 pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
590 sdio_status, sdio_irq_mask, sdio_ireg);
591 tmio_mmc_enable_sdio_irq(host->mmc, 0);
592 goto out;
593 }
594
595 if (host->mmc->caps & MMC_CAP_SDIO_IRQ &&
596 sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
597 mmc_signal_sdio_irq(host->mmc);
598
599 if (sdio_ireg)
600 goto out;
601 }
602
603 pr_debug_status(status);
604 pr_debug_status(ireg);
605
606 if (!ireg) {
607 tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask);
608
609 pr_warning("tmio_mmc: Spurious irq, disabling! "
610 "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
611 pr_debug_status(status);
612
613 goto out;
614 }
615
616 while (ireg) {
617 /* Card insert / remove attempts */
618 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
619 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
620 TMIO_STAT_CARD_REMOVE);
621 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
622 }
623
624 /* CRC and other errors */
625/* if (ireg & TMIO_STAT_ERR_IRQ)
626 * handled |= tmio_error_irq(host, irq, stat);
627 */
628
629 /* Command completion */
630 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
631 tmio_mmc_ack_mmc_irqs(host,
632 TMIO_STAT_CMDRESPEND |
633 TMIO_STAT_CMDTIMEOUT);
634 tmio_mmc_cmd_irq(host, status);
635 }
636
637 /* Data transfer */
638 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
639 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
640 tmio_mmc_pio_irq(host);
641 }
642
643 /* Data transfer completion */
644 if (ireg & TMIO_STAT_DATAEND) {
645 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
646 tmio_mmc_data_irq(host);
647 }
648
649 /* Check status - keep going until we've handled it all */
650 status = sd_ctrl_read32(host, CTL_STATUS);
651 irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
652 ireg = status & TMIO_MASK_IRQ & ~irq_mask;
653
654 pr_debug("Status at end of loop: %08x\n", status);
655 pr_debug_status(status);
656 }
657 pr_debug("MMC IRQ end\n");
658
659out:
660 return IRQ_HANDLED;
661}
662EXPORT_SYMBOL(tmio_mmc_irq);
663
664static int tmio_mmc_start_data(struct tmio_mmc_host *host,
665 struct mmc_data *data)
666{
667 struct tmio_mmc_data *pdata = host->pdata;
668
669 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
670 data->blksz, data->blocks);
671
672 /* Some hardware cannot perform 2 byte requests in 4 bit mode */
673 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
674 int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
675
676 if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
677 pr_err("%s: %d byte block unsupported in 4 bit mode\n",
678 mmc_hostname(host->mmc), data->blksz);
679 return -EINVAL;
680 }
681 }
682
683 tmio_mmc_init_sg(host, data);
684 host->data = data;
685
686 /* Set transfer length / blocksize */
687 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
688 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
689
690 tmio_mmc_start_dma(host, data);
691
692 return 0;
693}
694
695/* Process requests from the MMC layer */
696static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
697{
698 struct tmio_mmc_host *host = mmc_priv(mmc);
699 unsigned long flags;
700 int ret;
701
702 spin_lock_irqsave(&host->lock, flags);
703
704 if (host->mrq) {
705 pr_debug("request not null\n");
706 if (IS_ERR(host->mrq)) {
707 spin_unlock_irqrestore(&host->lock, flags);
708 mrq->cmd->error = -EAGAIN;
709 mmc_request_done(mmc, mrq);
710 return;
711 }
712 }
713
714 host->last_req_ts = jiffies;
715 wmb();
716 host->mrq = mrq;
717
718 spin_unlock_irqrestore(&host->lock, flags);
719
720 if (mrq->data) {
721 ret = tmio_mmc_start_data(host, mrq->data);
722 if (ret)
723 goto fail;
724 }
725
726 ret = tmio_mmc_start_command(host, mrq->cmd);
727 if (!ret) {
728 schedule_delayed_work(&host->delayed_reset_work,
729 msecs_to_jiffies(2000));
730 return;
731 }
732
733fail:
734 host->force_pio = false;
735 host->mrq = NULL;
736 mrq->cmd->error = ret;
737 mmc_request_done(mmc, mrq);
738}
739
740/* Set MMC clock / power.
741 * Note: This controller uses a simple divider scheme therefore it cannot
742 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
743 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
744 * slowest setting.
745 */
746static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
747{
748 struct tmio_mmc_host *host = mmc_priv(mmc);
749 struct tmio_mmc_data *pdata = host->pdata;
750 unsigned long flags;
751
752 spin_lock_irqsave(&host->lock, flags);
753 if (host->mrq) {
754 if (IS_ERR(host->mrq)) {
755 dev_dbg(&host->pdev->dev,
756 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
757 current->comm, task_pid_nr(current),
758 ios->clock, ios->power_mode);
759 host->mrq = ERR_PTR(-EINTR);
760 } else {
761 dev_dbg(&host->pdev->dev,
762 "%s.%d: CMD%u active since %lu, now %lu!\n",
763 current->comm, task_pid_nr(current),
764 host->mrq->cmd->opcode, host->last_req_ts, jiffies);
765 }
766 spin_unlock_irqrestore(&host->lock, flags);
767 return;
768 }
769
770 host->mrq = ERR_PTR(-EBUSY);
771
772 spin_unlock_irqrestore(&host->lock, flags);
773
774 if (ios->clock)
775 tmio_mmc_set_clock(host, ios->clock);
776
777 /* Power sequence - OFF -> UP -> ON */
778 if (ios->power_mode == MMC_POWER_UP) {
779 if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && !pdata->power) {
780 pm_runtime_get_sync(&host->pdev->dev);
781 pdata->power = true;
782 }
783 /* power up SD bus */
784 if (host->set_pwr)
785 host->set_pwr(host->pdev, 1);
786 } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
787 /* power down SD bus */
788 if (ios->power_mode == MMC_POWER_OFF) {
789 if (host->set_pwr)
790 host->set_pwr(host->pdev, 0);
791 if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
792 pdata->power) {
793 pdata->power = false;
794 pm_runtime_put(&host->pdev->dev);
795 }
796 }
797 tmio_mmc_clk_stop(host);
798 } else {
799 /* start bus clock */
800 tmio_mmc_clk_start(host);
801 }
802
803 switch (ios->bus_width) {
804 case MMC_BUS_WIDTH_1:
805 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
806 break;
807 case MMC_BUS_WIDTH_4:
808 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
809 break;
810 }
811
812 /* Let things settle. delay taken from winCE driver */
813 udelay(140);
814 if (PTR_ERR(host->mrq) == -EINTR)
815 dev_dbg(&host->pdev->dev,
816 "%s.%d: IOS interrupted: clk %u, mode %u",
817 current->comm, task_pid_nr(current),
818 ios->clock, ios->power_mode);
819 host->mrq = NULL;
820}
821
822static int tmio_mmc_get_ro(struct mmc_host *mmc)
823{
824 struct tmio_mmc_host *host = mmc_priv(mmc);
825 struct tmio_mmc_data *pdata = host->pdata;
826
827 return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
828 (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
829}
830
831static int tmio_mmc_get_cd(struct mmc_host *mmc)
832{
833 struct tmio_mmc_host *host = mmc_priv(mmc);
834 struct tmio_mmc_data *pdata = host->pdata;
835
836 if (!pdata->get_cd)
837 return -ENOSYS;
838 else
839 return pdata->get_cd(host->pdev);
840}
841
842static const struct mmc_host_ops tmio_mmc_ops = {
843 .request = tmio_mmc_request,
844 .set_ios = tmio_mmc_set_ios,
845 .get_ro = tmio_mmc_get_ro,
846 .get_cd = tmio_mmc_get_cd,
847 .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
848};
849
850int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
851 struct platform_device *pdev,
852 struct tmio_mmc_data *pdata)
853{
854 struct tmio_mmc_host *_host;
855 struct mmc_host *mmc;
856 struct resource *res_ctl;
857 int ret;
858 u32 irq_mask = TMIO_MASK_CMD;
859
860 res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
861 if (!res_ctl)
862 return -EINVAL;
863
864 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
865 if (!mmc)
866 return -ENOMEM;
867
868 pdata->dev = &pdev->dev;
869 _host = mmc_priv(mmc);
870 _host->pdata = pdata;
871 _host->mmc = mmc;
872 _host->pdev = pdev;
873 platform_set_drvdata(pdev, mmc);
874
875 _host->set_pwr = pdata->set_pwr;
876 _host->set_clk_div = pdata->set_clk_div;
877
878 /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
879 _host->bus_shift = resource_size(res_ctl) >> 10;
880
881 _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
882 if (!_host->ctl) {
883 ret = -ENOMEM;
884 goto host_free;
885 }
886
887 mmc->ops = &tmio_mmc_ops;
888 mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
889 mmc->f_max = pdata->hclk;
890 mmc->f_min = mmc->f_max / 512;
891 mmc->max_segs = 32;
892 mmc->max_blk_size = 512;
893 mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
894 mmc->max_segs;
895 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
896 mmc->max_seg_size = mmc->max_req_size;
897 if (pdata->ocr_mask)
898 mmc->ocr_avail = pdata->ocr_mask;
899 else
900 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
901
902 pdata->power = false;
903 pm_runtime_enable(&pdev->dev);
904 ret = pm_runtime_resume(&pdev->dev);
905 if (ret < 0)
906 goto pm_disable;
907
908 tmio_mmc_clk_stop(_host);
909 tmio_mmc_reset(_host);
910
911 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
912 if (pdata->flags & TMIO_MMC_SDIO_IRQ)
913 tmio_mmc_enable_sdio_irq(mmc, 0);
914
915 spin_lock_init(&_host->lock);
916
917 /* Init delayed work for request timeouts */
918 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
919
920 /* See if we also get DMA */
921 tmio_mmc_request_dma(_host, pdata);
922
923 /* We have to keep the device powered for its card detection to work */
924 if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD))
925 pm_runtime_get_noresume(&pdev->dev);
926
927 mmc_add_host(mmc);
928
929 /* Unmask the IRQs we want to know about */
930 if (!_host->chan_rx)
931 irq_mask |= TMIO_MASK_READOP;
932 if (!_host->chan_tx)
933 irq_mask |= TMIO_MASK_WRITEOP;
934
935 tmio_mmc_enable_mmc_irqs(_host, irq_mask);
936
937 *host = _host;
938
939 return 0;
940
941pm_disable:
942 pm_runtime_disable(&pdev->dev);
943 iounmap(_host->ctl);
944host_free:
945 mmc_free_host(mmc);
946
947 return ret;
948}
949EXPORT_SYMBOL(tmio_mmc_host_probe);
950
951void tmio_mmc_host_remove(struct tmio_mmc_host *host)
952{
953 struct platform_device *pdev = host->pdev;
954
955 /*
956 * We don't have to manipulate pdata->power here: if there is a card in
957 * the slot, the runtime PM is active and our .runtime_resume() will not
958 * be run. If there is no card in the slot and the platform can suspend
959 * the controller, the runtime PM is suspended and pdata->power == false,
960 * so, our .runtime_resume() will not try to detect a card in the slot.
961 */
962 if (host->pdata->flags & TMIO_MMC_HAS_COLD_CD)
963 pm_runtime_get_sync(&pdev->dev);
964
965 mmc_remove_host(host->mmc);
966 cancel_delayed_work_sync(&host->delayed_reset_work);
967 tmio_mmc_release_dma(host);
968
969 pm_runtime_put_sync(&pdev->dev);
970 pm_runtime_disable(&pdev->dev);
971
972 iounmap(host->ctl);
973 mmc_free_host(host->mmc);
974}
975EXPORT_SYMBOL(tmio_mmc_host_remove);
976
977#ifdef CONFIG_PM
978int tmio_mmc_host_suspend(struct device *dev)
979{
980 struct mmc_host *mmc = dev_get_drvdata(dev);
981 struct tmio_mmc_host *host = mmc_priv(mmc);
982 int ret = mmc_suspend_host(mmc);
983
984 if (!ret)
985 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
986
987 host->pm_error = pm_runtime_put_sync(dev);
988
989 return ret;
990}
991EXPORT_SYMBOL(tmio_mmc_host_suspend);
992
993int tmio_mmc_host_resume(struct device *dev)
994{
995 struct mmc_host *mmc = dev_get_drvdata(dev);
996 struct tmio_mmc_host *host = mmc_priv(mmc);
997
998 /* The MMC core will perform the complete set up */
999 host->pdata->power = false;
1000
1001 if (!host->pm_error)
1002 pm_runtime_get_sync(dev);
1003
1004 tmio_mmc_reset(mmc_priv(mmc));
1005 tmio_mmc_request_dma(host, host->pdata);
1006
1007 return mmc_resume_host(mmc);
1008}
1009EXPORT_SYMBOL(tmio_mmc_host_resume);
1010
1011#endif /* CONFIG_PM */
1012
1013int tmio_mmc_host_runtime_suspend(struct device *dev)
1014{
1015 return 0;
1016}
1017EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
1018
1019int tmio_mmc_host_runtime_resume(struct device *dev)
1020{
1021 struct mmc_host *mmc = dev_get_drvdata(dev);
1022 struct tmio_mmc_host *host = mmc_priv(mmc);
1023 struct tmio_mmc_data *pdata = host->pdata;
1024
1025 tmio_mmc_reset(host);
1026
1027 if (pdata->power) {
1028 /* Only entered after a card-insert interrupt */
1029 tmio_mmc_set_ios(mmc, &mmc->ios);
1030 mmc_detect_change(mmc, msecs_to_jiffies(100));
1031 }
1032
1033 return 0;
1034}
1035EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
1036
1037MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
new file mode 100644
index 000000000000..f08f944ac53c
--- /dev/null
+++ b/drivers/mmc/host/ushc.c
@@ -0,0 +1,579 @@
1/*
2 * USB SD Host Controller (USHC) controller driver.
3 *
4 * Copyright (C) 2010 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * Notes:
12 * - Only version 2 devices are supported.
13 * - Version 2 devices only support SDIO cards/devices (R2 response is
14 * unsupported).
15 *
16 * References:
17 * [USHC] USB SD Host Controller specification (CS-118793-SP)
18 */
19#include <linux/module.h>
20#include <linux/usb.h>
21#include <linux/kernel.h>
22#include <linux/slab.h>
23#include <linux/dma-mapping.h>
24#include <linux/mmc/host.h>
25
26enum ushc_request {
27 USHC_GET_CAPS = 0x00,
28 USHC_HOST_CTRL = 0x01,
29 USHC_PWR_CTRL = 0x02,
30 USHC_CLK_FREQ = 0x03,
31 USHC_EXEC_CMD = 0x04,
32 USHC_READ_RESP = 0x05,
33 USHC_RESET = 0x06,
34};
35
36enum ushc_request_type {
37 USHC_GET_CAPS_TYPE = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
38 USHC_HOST_CTRL_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
39 USHC_PWR_CTRL_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
40 USHC_CLK_FREQ_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
41 USHC_EXEC_CMD_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
42 USHC_READ_RESP_TYPE = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
43 USHC_RESET_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
44};
45
46#define USHC_GET_CAPS_VERSION_MASK 0xff
47#define USHC_GET_CAPS_3V3 (1 << 8)
48#define USHC_GET_CAPS_3V0 (1 << 9)
49#define USHC_GET_CAPS_1V8 (1 << 10)
50#define USHC_GET_CAPS_HIGH_SPD (1 << 16)
51
52#define USHC_HOST_CTRL_4BIT (1 << 1)
53#define USHC_HOST_CTRL_HIGH_SPD (1 << 0)
54
55#define USHC_PWR_CTRL_OFF 0x00
56#define USHC_PWR_CTRL_3V3 0x01
57#define USHC_PWR_CTRL_3V0 0x02
58#define USHC_PWR_CTRL_1V8 0x03
59
60#define USHC_READ_RESP_BUSY (1 << 4)
61#define USHC_READ_RESP_ERR_TIMEOUT (1 << 3)
62#define USHC_READ_RESP_ERR_CRC (1 << 2)
63#define USHC_READ_RESP_ERR_DAT (1 << 1)
64#define USHC_READ_RESP_ERR_CMD (1 << 0)
65#define USHC_READ_RESP_ERR_MASK 0x0f
66
67struct ushc_cbw {
68 __u8 signature;
69 __u8 cmd_idx;
70 __le16 block_size;
71 __le32 arg;
72} __attribute__((packed));
73
74#define USHC_CBW_SIGNATURE 'C'
75
76struct ushc_csw {
77 __u8 signature;
78 __u8 status;
79 __le32 response;
80} __attribute__((packed));
81
82#define USHC_CSW_SIGNATURE 'S'
83
84struct ushc_int_data {
85 u8 status;
86 u8 reserved[3];
87};
88
89#define USHC_INT_STATUS_SDIO_INT (1 << 1)
90#define USHC_INT_STATUS_CARD_PRESENT (1 << 0)
91
92
93struct ushc_data {
94 struct usb_device *usb_dev;
95 struct mmc_host *mmc;
96
97 struct urb *int_urb;
98 struct ushc_int_data *int_data;
99
100 struct urb *cbw_urb;
101 struct ushc_cbw *cbw;
102
103 struct urb *data_urb;
104
105 struct urb *csw_urb;
106 struct ushc_csw *csw;
107
108 spinlock_t lock;
109 struct mmc_request *current_req;
110 u32 caps;
111 u16 host_ctrl;
112 unsigned long flags;
113 u8 last_status;
114 int clock_freq;
115};
116
117#define DISCONNECTED 0
118#define INT_EN 1
119#define IGNORE_NEXT_INT 2
120
121static void data_callback(struct urb *urb);
122
123static int ushc_hw_reset(struct ushc_data *ushc)
124{
125 return usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
126 USHC_RESET, USHC_RESET_TYPE,
127 0, 0, NULL, 0, 100);
128}
129
130static int ushc_hw_get_caps(struct ushc_data *ushc)
131{
132 int ret;
133 int version;
134
135 ret = usb_control_msg(ushc->usb_dev, usb_rcvctrlpipe(ushc->usb_dev, 0),
136 USHC_GET_CAPS, USHC_GET_CAPS_TYPE,
137 0, 0, &ushc->caps, sizeof(ushc->caps), 100);
138 if (ret < 0)
139 return ret;
140
141 ushc->caps = le32_to_cpu(ushc->caps);
142
143 version = ushc->caps & USHC_GET_CAPS_VERSION_MASK;
144 if (version != 0x02) {
145 dev_err(&ushc->usb_dev->dev, "controller version %d is not supported\n", version);
146 return -EINVAL;
147 }
148
149 return 0;
150}
151
152static int ushc_hw_set_host_ctrl(struct ushc_data *ushc, u16 mask, u16 val)
153{
154 u16 host_ctrl;
155 int ret;
156
157 host_ctrl = (ushc->host_ctrl & ~mask) | val;
158 ret = usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
159 USHC_HOST_CTRL, USHC_HOST_CTRL_TYPE,
160 host_ctrl, 0, NULL, 0, 100);
161 if (ret < 0)
162 return ret;
163 ushc->host_ctrl = host_ctrl;
164 return 0;
165}
166
167static void int_callback(struct urb *urb)
168{
169 struct ushc_data *ushc = urb->context;
170 u8 status, last_status;
171
172 if (urb->status < 0)
173 return;
174
175 status = ushc->int_data->status;
176 last_status = ushc->last_status;
177 ushc->last_status = status;
178
179 /*
180 * Ignore the card interrupt status on interrupt transfers that
181 * were submitted while card interrupts where disabled.
182 *
183 * This avoid occasional spurious interrupts when enabling
184 * interrupts immediately after clearing the source on the card.
185 */
186
187 if (!test_and_clear_bit(IGNORE_NEXT_INT, &ushc->flags)
188 && test_bit(INT_EN, &ushc->flags)
189 && status & USHC_INT_STATUS_SDIO_INT) {
190 mmc_signal_sdio_irq(ushc->mmc);
191 }
192
193 if ((status ^ last_status) & USHC_INT_STATUS_CARD_PRESENT)
194 mmc_detect_change(ushc->mmc, msecs_to_jiffies(100));
195
196 if (!test_bit(INT_EN, &ushc->flags))
197 set_bit(IGNORE_NEXT_INT, &ushc->flags);
198 usb_submit_urb(ushc->int_urb, GFP_ATOMIC);
199}
200
201static void cbw_callback(struct urb *urb)
202{
203 struct ushc_data *ushc = urb->context;
204
205 if (urb->status != 0) {
206 usb_unlink_urb(ushc->data_urb);
207 usb_unlink_urb(ushc->csw_urb);
208 }
209}
210
211static void data_callback(struct urb *urb)
212{
213 struct ushc_data *ushc = urb->context;
214
215 if (urb->status != 0)
216 usb_unlink_urb(ushc->csw_urb);
217}
218
219static void csw_callback(struct urb *urb)
220{
221 struct ushc_data *ushc = urb->context;
222 struct mmc_request *req = ushc->current_req;
223 int status;
224
225 status = ushc->csw->status;
226
227 if (urb->status != 0) {
228 req->cmd->error = urb->status;
229 } else if (status & USHC_READ_RESP_ERR_CMD) {
230 if (status & USHC_READ_RESP_ERR_CRC)
231 req->cmd->error = -EIO;
232 else
233 req->cmd->error = -ETIMEDOUT;
234 }
235 if (req->data) {
236 if (status & USHC_READ_RESP_ERR_DAT) {
237 if (status & USHC_READ_RESP_ERR_CRC)
238 req->data->error = -EIO;
239 else
240 req->data->error = -ETIMEDOUT;
241 req->data->bytes_xfered = 0;
242 } else {
243 req->data->bytes_xfered = req->data->blksz * req->data->blocks;
244 }
245 }
246
247 req->cmd->resp[0] = le32_to_cpu(ushc->csw->response);
248
249 mmc_request_done(ushc->mmc, req);
250}
251
252static void ushc_request(struct mmc_host *mmc, struct mmc_request *req)
253{
254 struct ushc_data *ushc = mmc_priv(mmc);
255 int ret;
256 unsigned long flags;
257
258 spin_lock_irqsave(&ushc->lock, flags);
259
260 if (test_bit(DISCONNECTED, &ushc->flags)) {
261 ret = -ENODEV;
262 goto out;
263 }
264
265 /* Version 2 firmware doesn't support the R2 response format. */
266 if (req->cmd->flags & MMC_RSP_136) {
267 ret = -EINVAL;
268 goto out;
269 }
270
271 /* The Astoria's data FIFOs don't work with clock speeds < 5MHz so
272 limit commands with data to 6MHz or more. */
273 if (req->data && ushc->clock_freq < 6000000) {
274 ret = -EINVAL;
275 goto out;
276 }
277
278 ushc->current_req = req;
279
280 /* Start cmd with CBW. */
281 ushc->cbw->cmd_idx = cpu_to_le16(req->cmd->opcode);
282 if (req->data)
283 ushc->cbw->block_size = cpu_to_le16(req->data->blksz);
284 else
285 ushc->cbw->block_size = 0;
286 ushc->cbw->arg = cpu_to_le32(req->cmd->arg);
287
288 ret = usb_submit_urb(ushc->cbw_urb, GFP_ATOMIC);
289 if (ret < 0)
290 goto out;
291
292 /* Submit data (if any). */
293 if (req->data) {
294 struct mmc_data *data = req->data;
295 int pipe;
296
297 if (data->flags & MMC_DATA_READ)
298 pipe = usb_rcvbulkpipe(ushc->usb_dev, 6);
299 else
300 pipe = usb_sndbulkpipe(ushc->usb_dev, 2);
301
302 usb_fill_bulk_urb(ushc->data_urb, ushc->usb_dev, pipe,
303 sg_virt(data->sg), data->sg->length,
304 data_callback, ushc);
305 ret = usb_submit_urb(ushc->data_urb, GFP_ATOMIC);
306 if (ret < 0)
307 goto out;
308 }
309
310 /* Submit CSW. */
311 ret = usb_submit_urb(ushc->csw_urb, GFP_ATOMIC);
312 if (ret < 0)
313 goto out;
314
315out:
316 spin_unlock_irqrestore(&ushc->lock, flags);
317 if (ret < 0) {
318 usb_unlink_urb(ushc->cbw_urb);
319 usb_unlink_urb(ushc->data_urb);
320 req->cmd->error = ret;
321 mmc_request_done(mmc, req);
322 }
323}
324
325static int ushc_set_power(struct ushc_data *ushc, unsigned char power_mode)
326{
327 u16 voltage;
328
329 switch (power_mode) {
330 case MMC_POWER_OFF:
331 voltage = USHC_PWR_CTRL_OFF;
332 break;
333 case MMC_POWER_UP:
334 case MMC_POWER_ON:
335 voltage = USHC_PWR_CTRL_3V3;
336 break;
337 default:
338 return -EINVAL;
339 }
340
341 return usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
342 USHC_PWR_CTRL, USHC_PWR_CTRL_TYPE,
343 voltage, 0, NULL, 0, 100);
344}
345
346static int ushc_set_bus_width(struct ushc_data *ushc, int bus_width)
347{
348 return ushc_hw_set_host_ctrl(ushc, USHC_HOST_CTRL_4BIT,
349 bus_width == 4 ? USHC_HOST_CTRL_4BIT : 0);
350}
351
352static int ushc_set_bus_freq(struct ushc_data *ushc, int clk, bool enable_hs)
353{
354 int ret;
355
356 /* Hardware can't detect interrupts while the clock is off. */
357 if (clk == 0)
358 clk = 400000;
359
360 ret = ushc_hw_set_host_ctrl(ushc, USHC_HOST_CTRL_HIGH_SPD,
361 enable_hs ? USHC_HOST_CTRL_HIGH_SPD : 0);
362 if (ret < 0)
363 return ret;
364
365 ret = usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
366 USHC_CLK_FREQ, USHC_CLK_FREQ_TYPE,
367 clk & 0xffff, (clk >> 16) & 0xffff, NULL, 0, 100);
368 if (ret < 0)
369 return ret;
370
371 ushc->clock_freq = clk;
372 return 0;
373}
374
375static void ushc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
376{
377 struct ushc_data *ushc = mmc_priv(mmc);
378
379 ushc_set_power(ushc, ios->power_mode);
380 ushc_set_bus_width(ushc, 1 << ios->bus_width);
381 ushc_set_bus_freq(ushc, ios->clock, ios->timing == MMC_TIMING_SD_HS);
382}
383
384static int ushc_get_cd(struct mmc_host *mmc)
385{
386 struct ushc_data *ushc = mmc_priv(mmc);
387
388 return !!(ushc->last_status & USHC_INT_STATUS_CARD_PRESENT);
389}
390
391static void ushc_enable_sdio_irq(struct mmc_host *mmc, int enable)
392{
393 struct ushc_data *ushc = mmc_priv(mmc);
394
395 if (enable)
396 set_bit(INT_EN, &ushc->flags);
397 else
398 clear_bit(INT_EN, &ushc->flags);
399}
400
401static void ushc_clean_up(struct ushc_data *ushc)
402{
403 usb_free_urb(ushc->int_urb);
404 usb_free_urb(ushc->csw_urb);
405 usb_free_urb(ushc->data_urb);
406 usb_free_urb(ushc->cbw_urb);
407
408 kfree(ushc->int_data);
409 kfree(ushc->cbw);
410 kfree(ushc->csw);
411
412 mmc_free_host(ushc->mmc);
413}
414
415static const struct mmc_host_ops ushc_ops = {
416 .request = ushc_request,
417 .set_ios = ushc_set_ios,
418 .get_cd = ushc_get_cd,
419 .enable_sdio_irq = ushc_enable_sdio_irq,
420};
421
422static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id)
423{
424 struct usb_device *usb_dev = interface_to_usbdev(intf);
425 struct mmc_host *mmc;
426 struct ushc_data *ushc;
427 int ret;
428
429 mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
430 if (mmc == NULL)
431 return -ENOMEM;
432 ushc = mmc_priv(mmc);
433 usb_set_intfdata(intf, ushc);
434
435 ushc->usb_dev = usb_dev;
436 ushc->mmc = mmc;
437
438 spin_lock_init(&ushc->lock);
439
440 ret = ushc_hw_reset(ushc);
441 if (ret < 0)
442 goto err;
443
444 /* Read capabilities. */
445 ret = ushc_hw_get_caps(ushc);
446 if (ret < 0)
447 goto err;
448
449 mmc->ops = &ushc_ops;
450
451 mmc->f_min = 400000;
452 mmc->f_max = 50000000;
453 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
454 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
455 mmc->caps |= (ushc->caps & USHC_GET_CAPS_HIGH_SPD) ? MMC_CAP_SD_HIGHSPEED : 0;
456
457 mmc->max_seg_size = 512*511;
458 mmc->max_segs = 1;
459 mmc->max_req_size = 512*511;
460 mmc->max_blk_size = 512;
461 mmc->max_blk_count = 511;
462
463 ushc->int_urb = usb_alloc_urb(0, GFP_KERNEL);
464 if (ushc->int_urb == NULL) {
465 ret = -ENOMEM;
466 goto err;
467 }
468 ushc->int_data = kzalloc(sizeof(struct ushc_int_data), GFP_KERNEL);
469 if (ushc->int_data == NULL) {
470 ret = -ENOMEM;
471 goto err;
472 }
473 usb_fill_int_urb(ushc->int_urb, ushc->usb_dev,
474 usb_rcvintpipe(usb_dev,
475 intf->cur_altsetting->endpoint[0].desc.bEndpointAddress),
476 ushc->int_data, sizeof(struct ushc_int_data),
477 int_callback, ushc,
478 intf->cur_altsetting->endpoint[0].desc.bInterval);
479
480 ushc->cbw_urb = usb_alloc_urb(0, GFP_KERNEL);
481 if (ushc->cbw_urb == NULL) {
482 ret = -ENOMEM;
483 goto err;
484 }
485 ushc->cbw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL);
486 if (ushc->cbw == NULL) {
487 ret = -ENOMEM;
488 goto err;
489 }
490 ushc->cbw->signature = USHC_CBW_SIGNATURE;
491
492 usb_fill_bulk_urb(ushc->cbw_urb, ushc->usb_dev, usb_sndbulkpipe(usb_dev, 2),
493 ushc->cbw, sizeof(struct ushc_cbw),
494 cbw_callback, ushc);
495
496 ushc->data_urb = usb_alloc_urb(0, GFP_KERNEL);
497 if (ushc->data_urb == NULL) {
498 ret = -ENOMEM;
499 goto err;
500 }
501
502 ushc->csw_urb = usb_alloc_urb(0, GFP_KERNEL);
503 if (ushc->csw_urb == NULL) {
504 ret = -ENOMEM;
505 goto err;
506 }
507 ushc->csw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL);
508 if (ushc->csw == NULL) {
509 ret = -ENOMEM;
510 goto err;
511 }
512 usb_fill_bulk_urb(ushc->csw_urb, ushc->usb_dev, usb_rcvbulkpipe(usb_dev, 6),
513 ushc->csw, sizeof(struct ushc_csw),
514 csw_callback, ushc);
515
516 ret = mmc_add_host(ushc->mmc);
517 if (ret)
518 goto err;
519
520 ret = usb_submit_urb(ushc->int_urb, GFP_KERNEL);
521 if (ret < 0) {
522 mmc_remove_host(ushc->mmc);
523 goto err;
524 }
525
526 return 0;
527
528err:
529 ushc_clean_up(ushc);
530 return ret;
531}
532
533static void ushc_disconnect(struct usb_interface *intf)
534{
535 struct ushc_data *ushc = usb_get_intfdata(intf);
536
537 spin_lock_irq(&ushc->lock);
538 set_bit(DISCONNECTED, &ushc->flags);
539 spin_unlock_irq(&ushc->lock);
540
541 usb_kill_urb(ushc->int_urb);
542 usb_kill_urb(ushc->cbw_urb);
543 usb_kill_urb(ushc->data_urb);
544 usb_kill_urb(ushc->csw_urb);
545
546 mmc_remove_host(ushc->mmc);
547
548 ushc_clean_up(ushc);
549}
550
551static struct usb_device_id ushc_id_table[] = {
552 /* CSR USB SD Host Controller */
553 { USB_DEVICE(0x0a12, 0x5d10) },
554 { },
555};
556MODULE_DEVICE_TABLE(usb, ushc_id_table);
557
558static struct usb_driver ushc_driver = {
559 .name = "ushc",
560 .id_table = ushc_id_table,
561 .probe = ushc_probe,
562 .disconnect = ushc_disconnect,
563};
564
565static int __init ushc_init(void)
566{
567 return usb_register(&ushc_driver);
568}
569module_init(ushc_init);
570
571static void __exit ushc_exit(void)
572{
573 usb_deregister(&ushc_driver);
574}
575module_exit(ushc_exit);
576
577MODULE_DESCRIPTION("USB SD Host Controller driver");
578MODULE_AUTHOR("David Vrabel <david.vrabel@csr.com>");
579MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 19f2d72dbca5..4dfe2c02ea91 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -802,12 +802,9 @@ static const struct mmc_host_ops via_sdc_ops = {
802 802
803static void via_reset_pcictrl(struct via_crdr_mmc_host *host) 803static void via_reset_pcictrl(struct via_crdr_mmc_host *host)
804{ 804{
805 void __iomem *addrbase;
806 unsigned long flags; 805 unsigned long flags;
807 u8 gatt; 806 u8 gatt;
808 807
809 addrbase = host->pcictrl_mmiobase;
810
811 spin_lock_irqsave(&host->lock, flags); 808 spin_lock_irqsave(&host->lock, flags);
812 809
813 via_save_pcictrlreg(host); 810 via_save_pcictrlreg(host);
@@ -1050,8 +1047,7 @@ static void via_init_mmc_host(struct via_crdr_mmc_host *host)
1050 mmc->ops = &via_sdc_ops; 1047 mmc->ops = &via_sdc_ops;
1051 1048
1052 /*Hardware cannot do scatter lists*/ 1049 /*Hardware cannot do scatter lists*/
1053 mmc->max_hw_segs = 1; 1050 mmc->max_segs = 1;
1054 mmc->max_phys_segs = 1;
1055 1051
1056 mmc->max_blk_size = VIA_CRDR_MAX_BLOCK_LENGTH; 1052 mmc->max_blk_size = VIA_CRDR_MAX_BLOCK_LENGTH;
1057 mmc->max_blk_count = VIA_CRDR_MAX_BLOCK_COUNT; 1053 mmc->max_blk_count = VIA_CRDR_MAX_BLOCK_COUNT;
@@ -1091,14 +1087,13 @@ static int __devinit via_sd_probe(struct pci_dev *pcidev,
1091 struct mmc_host *mmc; 1087 struct mmc_host *mmc;
1092 struct via_crdr_mmc_host *sdhost; 1088 struct via_crdr_mmc_host *sdhost;
1093 u32 base, len; 1089 u32 base, len;
1094 u8 rev, gatt; 1090 u8 gatt;
1095 int ret; 1091 int ret;
1096 1092
1097 pci_read_config_byte(pcidev, PCI_CLASS_REVISION, &rev);
1098 pr_info(DRV_NAME 1093 pr_info(DRV_NAME
1099 ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n", 1094 ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n",
1100 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, 1095 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1101 (int)rev); 1096 (int)pcidev->revision);
1102 1097
1103 ret = pci_enable_device(pcidev); 1098 ret = pci_enable_device(pcidev);
1104 if (ret) 1099 if (ret)
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
new file mode 100644
index 000000000000..d4455ffbefd8
--- /dev/null
+++ b/drivers/mmc/host/vub300.c
@@ -0,0 +1,2503 @@
1/*
2 * Remote VUB300 SDIO/SDmem Host Controller Driver
3 *
4 * Copyright (C) 2010 Elan Digital Systems Limited
5 *
6 * based on USB Skeleton driver - 2.2
7 *
8 * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation, version 2
13 *
14 * VUB300: is a USB 2.0 client device with a single SDIO/SDmem/MMC slot
15 * Any SDIO/SDmem/MMC device plugged into the VUB300 will appear,
16 * by virtue of this driver, to have been plugged into a local
17 * SDIO host controller, similar to, say, a PCI Ricoh controller
18 * This is because this kernel device driver is both a USB 2.0
19 * client device driver AND an MMC host controller driver. Thus
20 * if there is an existing driver for the inserted SDIO/SDmem/MMC
21 * device then that driver will be used by the kernel to manage
22 * the device in exactly the same fashion as if it had been
23 * directly plugged into, say, a local pci bus Ricoh controller
24 *
25 * RANT: this driver was written using a display 128x48 - converting it
26 * to a line width of 80 makes it very difficult to support. In
27 * particular functions have been broken down into sub functions
28 * and the original meaningful names have been shortened into
29 * cryptic ones.
30 * The problem is that executing a fragment of code subject to
31 * two conditions means an indentation of 24, thus leaving only
32 * 56 characters for a C statement. And that is quite ridiculous!
33 *
34 * Data types: data passed to/from the VUB300 is fixed to a number of
35 * bits and driver data fields reflect that limit by using
36 * u8, u16, u32
37 */
38#include <linux/kernel.h>
39#include <linux/errno.h>
40#include <linux/init.h>
41#include <linux/slab.h>
42#include <linux/module.h>
43#include <linux/kref.h>
44#include <linux/uaccess.h>
45#include <linux/usb.h>
46#include <linux/mutex.h>
47#include <linux/mmc/host.h>
48#include <linux/mmc/card.h>
49#include <linux/mmc/sdio_func.h>
50#include <linux/mmc/sdio_ids.h>
51#include <linux/workqueue.h>
52#include <linux/ctype.h>
53#include <linux/firmware.h>
54#include <linux/scatterlist.h>
55
56struct host_controller_info {
57 u8 info_size;
58 u16 firmware_version;
59 u8 number_of_ports;
60} __packed;
61
62#define FIRMWARE_BLOCK_BOUNDARY 1024
63struct sd_command_header {
64 u8 header_size;
65 u8 header_type;
66 u8 port_number;
67 u8 command_type; /* Bit7 - Rd/Wr */
68 u8 command_index;
69 u8 transfer_size[4]; /* ReadSize + ReadSize */
70 u8 response_type;
71 u8 arguments[4];
72 u8 block_count[2];
73 u8 block_size[2];
74 u8 block_boundary[2];
75 u8 reserved[44]; /* to pad out to 64 bytes */
76} __packed;
77
78struct sd_irqpoll_header {
79 u8 header_size;
80 u8 header_type;
81 u8 port_number;
82 u8 command_type; /* Bit7 - Rd/Wr */
83 u8 padding[16]; /* don't ask why !! */
84 u8 poll_timeout_msb;
85 u8 poll_timeout_lsb;
86 u8 reserved[42]; /* to pad out to 64 bytes */
87} __packed;
88
89struct sd_common_header {
90 u8 header_size;
91 u8 header_type;
92 u8 port_number;
93} __packed;
94
95struct sd_response_header {
96 u8 header_size;
97 u8 header_type;
98 u8 port_number;
99 u8 command_type;
100 u8 command_index;
101 u8 command_response[0];
102} __packed;
103
104struct sd_status_header {
105 u8 header_size;
106 u8 header_type;
107 u8 port_number;
108 u16 port_flags;
109 u32 sdio_clock;
110 u16 host_header_size;
111 u16 func_header_size;
112 u16 ctrl_header_size;
113} __packed;
114
115struct sd_error_header {
116 u8 header_size;
117 u8 header_type;
118 u8 port_number;
119 u8 error_code;
120} __packed;
121
122struct sd_interrupt_header {
123 u8 header_size;
124 u8 header_type;
125 u8 port_number;
126} __packed;
127
128struct offload_registers_access {
129 u8 command_byte[4];
130 u8 Respond_Byte[4];
131} __packed;
132
133#define INTERRUPT_REGISTER_ACCESSES 15
134struct sd_offloaded_interrupt {
135 u8 header_size;
136 u8 header_type;
137 u8 port_number;
138 struct offload_registers_access reg[INTERRUPT_REGISTER_ACCESSES];
139} __packed;
140
141struct sd_register_header {
142 u8 header_size;
143 u8 header_type;
144 u8 port_number;
145 u8 command_type;
146 u8 command_index;
147 u8 command_response[6];
148} __packed;
149
150#define PIGGYBACK_REGISTER_ACCESSES 14
151struct sd_offloaded_piggyback {
152 struct sd_register_header sdio;
153 struct offload_registers_access reg[PIGGYBACK_REGISTER_ACCESSES];
154} __packed;
155
156union sd_response {
157 struct sd_common_header common;
158 struct sd_status_header status;
159 struct sd_error_header error;
160 struct sd_interrupt_header interrupt;
161 struct sd_response_header response;
162 struct sd_offloaded_interrupt irq;
163 struct sd_offloaded_piggyback pig;
164} __packed;
165
166union sd_command {
167 struct sd_command_header head;
168 struct sd_irqpoll_header poll;
169} __packed;
170
171enum SD_RESPONSE_TYPE {
172 SDRT_UNSPECIFIED = 0,
173 SDRT_NONE,
174 SDRT_1,
175 SDRT_1B,
176 SDRT_2,
177 SDRT_3,
178 SDRT_4,
179 SDRT_5,
180 SDRT_5B,
181 SDRT_6,
182 SDRT_7,
183};
184
185#define RESPONSE_INTERRUPT 0x01
186#define RESPONSE_ERROR 0x02
187#define RESPONSE_STATUS 0x03
188#define RESPONSE_IRQ_DISABLED 0x05
189#define RESPONSE_IRQ_ENABLED 0x06
190#define RESPONSE_PIGGYBACKED 0x07
191#define RESPONSE_NO_INTERRUPT 0x08
192#define RESPONSE_PIG_DISABLED 0x09
193#define RESPONSE_PIG_ENABLED 0x0A
194#define SD_ERROR_1BIT_TIMEOUT 0x01
195#define SD_ERROR_4BIT_TIMEOUT 0x02
196#define SD_ERROR_1BIT_CRC_WRONG 0x03
197#define SD_ERROR_4BIT_CRC_WRONG 0x04
198#define SD_ERROR_1BIT_CRC_ERROR 0x05
199#define SD_ERROR_4BIT_CRC_ERROR 0x06
200#define SD_ERROR_NO_CMD_ENDBIT 0x07
201#define SD_ERROR_NO_1BIT_DATEND 0x08
202#define SD_ERROR_NO_4BIT_DATEND 0x09
203#define SD_ERROR_1BIT_UNEXPECTED_TIMEOUT 0x0A
204#define SD_ERROR_4BIT_UNEXPECTED_TIMEOUT 0x0B
205#define SD_ERROR_ILLEGAL_COMMAND 0x0C
206#define SD_ERROR_NO_DEVICE 0x0D
207#define SD_ERROR_TRANSFER_LENGTH 0x0E
208#define SD_ERROR_1BIT_DATA_TIMEOUT 0x0F
209#define SD_ERROR_4BIT_DATA_TIMEOUT 0x10
210#define SD_ERROR_ILLEGAL_STATE 0x11
211#define SD_ERROR_UNKNOWN_ERROR 0x12
212#define SD_ERROR_RESERVED_ERROR 0x13
213#define SD_ERROR_INVALID_FUNCTION 0x14
214#define SD_ERROR_OUT_OF_RANGE 0x15
215#define SD_ERROR_STAT_CMD 0x16
216#define SD_ERROR_STAT_DATA 0x17
217#define SD_ERROR_STAT_CMD_TIMEOUT 0x18
218#define SD_ERROR_SDCRDY_STUCK 0x19
219#define SD_ERROR_UNHANDLED 0x1A
220#define SD_ERROR_OVERRUN 0x1B
221#define SD_ERROR_PIO_TIMEOUT 0x1C
222
223#define FUN(c) (0x000007 & (c->arg>>28))
224#define REG(c) (0x01FFFF & (c->arg>>9))
225
226static int limit_speed_to_24_MHz;
227module_param(limit_speed_to_24_MHz, bool, 0644);
228MODULE_PARM_DESC(limit_speed_to_24_MHz, "Limit Max SDIO Clock Speed to 24 MHz");
229
230static int pad_input_to_usb_pkt;
231module_param(pad_input_to_usb_pkt, bool, 0644);
232MODULE_PARM_DESC(pad_input_to_usb_pkt,
233 "Pad USB data input transfers to whole USB Packet");
234
235static int disable_offload_processing;
236module_param(disable_offload_processing, bool, 0644);
237MODULE_PARM_DESC(disable_offload_processing, "Disable Offload Processing");
238
239static int force_1_bit_data_xfers;
240module_param(force_1_bit_data_xfers, bool, 0644);
241MODULE_PARM_DESC(force_1_bit_data_xfers,
242 "Force SDIO Data Transfers to 1-bit Mode");
243
244static int force_polling_for_irqs;
245module_param(force_polling_for_irqs, bool, 0644);
246MODULE_PARM_DESC(force_polling_for_irqs, "Force Polling for SDIO interrupts");
247
248static int firmware_irqpoll_timeout = 1024;
249module_param(firmware_irqpoll_timeout, int, 0644);
250MODULE_PARM_DESC(firmware_irqpoll_timeout, "VUB300 firmware irqpoll timeout");
251
252static int force_max_req_size = 128;
253module_param(force_max_req_size, int, 0644);
254MODULE_PARM_DESC(force_max_req_size, "set max request size in kBytes");
255
256#ifdef SMSC_DEVELOPMENT_BOARD
257static int firmware_rom_wait_states = 0x04;
258#else
259static int firmware_rom_wait_states = 0x1C;
260#endif
261
262module_param(firmware_rom_wait_states, bool, 0644);
263MODULE_PARM_DESC(firmware_rom_wait_states,
264 "ROM wait states byte=RRRIIEEE (Reserved Internal External)");
265
266#define ELAN_VENDOR_ID 0x2201
267#define VUB300_VENDOR_ID 0x0424
268#define VUB300_PRODUCT_ID 0x012C
269static struct usb_device_id vub300_table[] = {
270 {USB_DEVICE(ELAN_VENDOR_ID, VUB300_PRODUCT_ID)},
271 {USB_DEVICE(VUB300_VENDOR_ID, VUB300_PRODUCT_ID)},
272 {} /* Terminating entry */
273};
274MODULE_DEVICE_TABLE(usb, vub300_table);
275
276static struct workqueue_struct *cmndworkqueue;
277static struct workqueue_struct *pollworkqueue;
278static struct workqueue_struct *deadworkqueue;
279
280static inline int interface_to_InterfaceNumber(struct usb_interface *interface)
281{
282 if (!interface)
283 return -1;
284 if (!interface->cur_altsetting)
285 return -1;
286 return interface->cur_altsetting->desc.bInterfaceNumber;
287}
288
289struct sdio_register {
290 unsigned func_num:3;
291 unsigned sdio_reg:17;
292 unsigned activate:1;
293 unsigned prepared:1;
294 unsigned regvalue:8;
295 unsigned response:8;
296 unsigned sparebit:26;
297};
298
299struct vub300_mmc_host {
300 struct usb_device *udev;
301 struct usb_interface *interface;
302 struct kref kref;
303 struct mutex cmd_mutex;
304 struct mutex irq_mutex;
305 char vub_name[3 + (9 * 8) + 4 + 1]; /* max of 7 sdio fn's */
306 u8 cmnd_out_ep; /* EndPoint for commands */
307 u8 cmnd_res_ep; /* EndPoint for responses */
308 u8 data_out_ep; /* EndPoint for out data */
309 u8 data_inp_ep; /* EndPoint for inp data */
310 bool card_powered;
311 bool card_present;
312 bool read_only;
313 bool large_usb_packets;
314 bool app_spec; /* ApplicationSpecific */
315 bool irq_enabled; /* by the MMC CORE */
316 bool irq_disabled; /* in the firmware */
317 unsigned bus_width:4;
318 u8 total_offload_count;
319 u8 dynamic_register_count;
320 u8 resp_len;
321 u32 datasize;
322 int errors;
323 int usb_transport_fail;
324 int usb_timed_out;
325 int irqs_queued;
326 struct sdio_register sdio_register[16];
327 struct offload_interrupt_function_register {
328#define MAXREGBITS 4
329#define MAXREGS (1<<MAXREGBITS)
330#define MAXREGMASK (MAXREGS-1)
331 u8 offload_count;
332 u32 offload_point;
333 struct offload_registers_access reg[MAXREGS];
334 } fn[8];
335 u16 fbs[8]; /* Function Block Size */
336 struct mmc_command *cmd;
337 struct mmc_request *req;
338 struct mmc_data *data;
339 struct mmc_host *mmc;
340 struct urb *urb;
341 struct urb *command_out_urb;
342 struct urb *command_res_urb;
343 struct completion command_complete;
344 struct completion irqpoll_complete;
345 union sd_command cmnd;
346 union sd_response resp;
347 struct timer_list sg_transfer_timer;
348 struct usb_sg_request sg_request;
349 struct timer_list inactivity_timer;
350 struct work_struct deadwork;
351 struct work_struct cmndwork;
352 struct delayed_work pollwork;
353 struct host_controller_info hc_info;
354 struct sd_status_header system_port_status;
355 u8 padded_buffer[64];
356};
357
358#define kref_to_vub300_mmc_host(d) container_of(d, struct vub300_mmc_host, kref)
359#define SET_TRANSFER_PSEUDOCODE 21
360#define SET_INTERRUPT_PSEUDOCODE 20
361#define SET_FAILURE_MODE 18
362#define SET_ROM_WAIT_STATES 16
363#define SET_IRQ_ENABLE 13
364#define SET_CLOCK_SPEED 11
365#define SET_FUNCTION_BLOCK_SIZE 9
366#define SET_SD_DATA_MODE 6
367#define SET_SD_POWER 4
368#define ENTER_DFU_MODE 3
369#define GET_HC_INF0 1
370#define GET_SYSTEM_PORT_STATUS 0
371
372static void vub300_delete(struct kref *kref)
373{ /* kref callback - softirq */
374 struct vub300_mmc_host *vub300 = kref_to_vub300_mmc_host(kref);
375 struct mmc_host *mmc = vub300->mmc;
376 usb_free_urb(vub300->command_out_urb);
377 vub300->command_out_urb = NULL;
378 usb_free_urb(vub300->command_res_urb);
379 vub300->command_res_urb = NULL;
380 usb_put_dev(vub300->udev);
381 mmc_free_host(mmc);
382 /*
383 * and hence also frees vub300
384 * which is contained at the end of struct mmc
385 */
386}
387
388static void vub300_queue_cmnd_work(struct vub300_mmc_host *vub300)
389{
390 kref_get(&vub300->kref);
391 if (queue_work(cmndworkqueue, &vub300->cmndwork)) {
392 /*
393 * then the cmndworkqueue was not previously
394 * running and the above get ref is obvious
395 * required and will be put when the thread
396 * terminates by a specific call
397 */
398 } else {
399 /*
400 * the cmndworkqueue was already running from
401 * a previous invocation and thus to keep the
402 * kref counts correct we must undo the get
403 */
404 kref_put(&vub300->kref, vub300_delete);
405 }
406}
407
408static void vub300_queue_poll_work(struct vub300_mmc_host *vub300, int delay)
409{
410 kref_get(&vub300->kref);
411 if (queue_delayed_work(pollworkqueue, &vub300->pollwork, delay)) {
412 /*
413 * then the pollworkqueue was not previously
414 * running and the above get ref is obvious
415 * required and will be put when the thread
416 * terminates by a specific call
417 */
418 } else {
419 /*
420 * the pollworkqueue was already running from
421 * a previous invocation and thus to keep the
422 * kref counts correct we must undo the get
423 */
424 kref_put(&vub300->kref, vub300_delete);
425 }
426}
427
428static void vub300_queue_dead_work(struct vub300_mmc_host *vub300)
429{
430 kref_get(&vub300->kref);
431 if (queue_work(deadworkqueue, &vub300->deadwork)) {
432 /*
433 * then the deadworkqueue was not previously
434 * running and the above get ref is obvious
435 * required and will be put when the thread
436 * terminates by a specific call
437 */
438 } else {
439 /*
440 * the deadworkqueue was already running from
441 * a previous invocation and thus to keep the
442 * kref counts correct we must undo the get
443 */
444 kref_put(&vub300->kref, vub300_delete);
445 }
446}
447
448static void irqpoll_res_completed(struct urb *urb)
449{ /* urb completion handler - hardirq */
450 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
451 if (urb->status)
452 vub300->usb_transport_fail = urb->status;
453 complete(&vub300->irqpoll_complete);
454}
455
456static void irqpoll_out_completed(struct urb *urb)
457{ /* urb completion handler - hardirq */
458 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
459 if (urb->status) {
460 vub300->usb_transport_fail = urb->status;
461 complete(&vub300->irqpoll_complete);
462 return;
463 } else {
464 int ret;
465 unsigned int pipe =
466 usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep);
467 usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe,
468 &vub300->resp, sizeof(vub300->resp),
469 irqpoll_res_completed, vub300);
470 vub300->command_res_urb->actual_length = 0;
471 ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC);
472 if (ret) {
473 vub300->usb_transport_fail = ret;
474 complete(&vub300->irqpoll_complete);
475 }
476 return;
477 }
478}
479
480static void send_irqpoll(struct vub300_mmc_host *vub300)
481{
482 /* cmd_mutex is held by vub300_pollwork_thread */
483 int retval;
484 int timeout = 0xFFFF & (0x0001FFFF - firmware_irqpoll_timeout);
485 vub300->cmnd.poll.header_size = 22;
486 vub300->cmnd.poll.header_type = 1;
487 vub300->cmnd.poll.port_number = 0;
488 vub300->cmnd.poll.command_type = 2;
489 vub300->cmnd.poll.poll_timeout_lsb = 0xFF & (unsigned)timeout;
490 vub300->cmnd.poll.poll_timeout_msb = 0xFF & (unsigned)(timeout >> 8);
491 usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev,
492 usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep)
493 , &vub300->cmnd, sizeof(vub300->cmnd)
494 , irqpoll_out_completed, vub300);
495 retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL);
496 if (0 > retval) {
497 vub300->usb_transport_fail = retval;
498 vub300_queue_poll_work(vub300, 1);
499 complete(&vub300->irqpoll_complete);
500 return;
501 } else {
502 return;
503 }
504}
505
506static void new_system_port_status(struct vub300_mmc_host *vub300)
507{
508 int old_card_present = vub300->card_present;
509 int new_card_present =
510 (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0;
511 vub300->read_only =
512 (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
513 if (new_card_present && !old_card_present) {
514 dev_info(&vub300->udev->dev, "card just inserted\n");
515 vub300->card_present = 1;
516 vub300->bus_width = 0;
517 if (disable_offload_processing)
518 strncpy(vub300->vub_name, "EMPTY Processing Disabled",
519 sizeof(vub300->vub_name));
520 else
521 vub300->vub_name[0] = 0;
522 mmc_detect_change(vub300->mmc, 1);
523 } else if (!new_card_present && old_card_present) {
524 dev_info(&vub300->udev->dev, "card just ejected\n");
525 vub300->card_present = 0;
526 mmc_detect_change(vub300->mmc, 0);
527 } else {
528 /* no change */
529 }
530}
531
532static void __add_offloaded_reg_to_fifo(struct vub300_mmc_host *vub300,
533 struct offload_registers_access
534 *register_access, u8 func)
535{
536 u8 r = vub300->fn[func].offload_point + vub300->fn[func].offload_count;
537 memcpy(&vub300->fn[func].reg[MAXREGMASK & r], register_access,
538 sizeof(struct offload_registers_access));
539 vub300->fn[func].offload_count += 1;
540 vub300->total_offload_count += 1;
541}
542
543static void add_offloaded_reg(struct vub300_mmc_host *vub300,
544 struct offload_registers_access *register_access)
545{
546 u32 Register = ((0x03 & register_access->command_byte[0]) << 15)
547 | ((0xFF & register_access->command_byte[1]) << 7)
548 | ((0xFE & register_access->command_byte[2]) >> 1);
549 u8 func = ((0x70 & register_access->command_byte[0]) >> 4);
550 u8 regs = vub300->dynamic_register_count;
551 u8 i = 0;
552 while (0 < regs-- && 1 == vub300->sdio_register[i].activate) {
553 if (vub300->sdio_register[i].func_num == func &&
554 vub300->sdio_register[i].sdio_reg == Register) {
555 if (vub300->sdio_register[i].prepared == 0)
556 vub300->sdio_register[i].prepared = 1;
557 vub300->sdio_register[i].response =
558 register_access->Respond_Byte[2];
559 vub300->sdio_register[i].regvalue =
560 register_access->Respond_Byte[3];
561 return;
562 } else {
563 i += 1;
564 continue;
565 }
566 };
567 __add_offloaded_reg_to_fifo(vub300, register_access, func);
568}
569
570static void check_vub300_port_status(struct vub300_mmc_host *vub300)
571{
572 /*
573 * cmd_mutex is held by vub300_pollwork_thread,
574 * vub300_deadwork_thread or vub300_cmndwork_thread
575 */
576 int retval;
577 retval =
578 usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
579 GET_SYSTEM_PORT_STATUS,
580 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
581 0x0000, 0x0000, &vub300->system_port_status,
582 sizeof(vub300->system_port_status), HZ);
583 if (sizeof(vub300->system_port_status) == retval)
584 new_system_port_status(vub300);
585}
586
587static void __vub300_irqpoll_response(struct vub300_mmc_host *vub300)
588{
589 /* cmd_mutex is held by vub300_pollwork_thread */
590 if (vub300->command_res_urb->actual_length == 0)
591 return;
592
593 switch (vub300->resp.common.header_type) {
594 case RESPONSE_INTERRUPT:
595 mutex_lock(&vub300->irq_mutex);
596 if (vub300->irq_enabled)
597 mmc_signal_sdio_irq(vub300->mmc);
598 else
599 vub300->irqs_queued += 1;
600 vub300->irq_disabled = 1;
601 mutex_unlock(&vub300->irq_mutex);
602 break;
603 case RESPONSE_ERROR:
604 if (vub300->resp.error.error_code == SD_ERROR_NO_DEVICE)
605 check_vub300_port_status(vub300);
606 break;
607 case RESPONSE_STATUS:
608 vub300->system_port_status = vub300->resp.status;
609 new_system_port_status(vub300);
610 if (!vub300->card_present)
611 vub300_queue_poll_work(vub300, HZ / 5);
612 break;
613 case RESPONSE_IRQ_DISABLED:
614 {
615 int offloaded_data_length = vub300->resp.common.header_size - 3;
616 int register_count = offloaded_data_length >> 3;
617 int ri = 0;
618 while (register_count--) {
619 add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]);
620 ri += 1;
621 }
622 mutex_lock(&vub300->irq_mutex);
623 if (vub300->irq_enabled)
624 mmc_signal_sdio_irq(vub300->mmc);
625 else
626 vub300->irqs_queued += 1;
627 vub300->irq_disabled = 1;
628 mutex_unlock(&vub300->irq_mutex);
629 break;
630 }
631 case RESPONSE_IRQ_ENABLED:
632 {
633 int offloaded_data_length = vub300->resp.common.header_size - 3;
634 int register_count = offloaded_data_length >> 3;
635 int ri = 0;
636 while (register_count--) {
637 add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]);
638 ri += 1;
639 }
640 mutex_lock(&vub300->irq_mutex);
641 if (vub300->irq_enabled)
642 mmc_signal_sdio_irq(vub300->mmc);
643 else if (vub300->irqs_queued)
644 vub300->irqs_queued += 1;
645 else
646 vub300->irqs_queued += 1;
647 vub300->irq_disabled = 0;
648 mutex_unlock(&vub300->irq_mutex);
649 break;
650 }
651 case RESPONSE_NO_INTERRUPT:
652 vub300_queue_poll_work(vub300, 1);
653 break;
654 default:
655 break;
656 }
657}
658
659static void __do_poll(struct vub300_mmc_host *vub300)
660{
661 /* cmd_mutex is held by vub300_pollwork_thread */
662 long commretval;
663 mod_timer(&vub300->inactivity_timer, jiffies + HZ);
664 init_completion(&vub300->irqpoll_complete);
665 send_irqpoll(vub300);
666 commretval = wait_for_completion_timeout(&vub300->irqpoll_complete,
667 msecs_to_jiffies(500));
668 if (vub300->usb_transport_fail) {
669 /* no need to do anything */
670 } else if (commretval == 0) {
671 vub300->usb_timed_out = 1;
672 usb_kill_urb(vub300->command_out_urb);
673 usb_kill_urb(vub300->command_res_urb);
674 } else if (commretval < 0) {
675 vub300_queue_poll_work(vub300, 1);
676 } else { /* commretval > 0 */
677 __vub300_irqpoll_response(vub300);
678 }
679}
680
681/* this thread runs only when the driver
682 * is trying to poll the device for an IRQ
683 */
684static void vub300_pollwork_thread(struct work_struct *work)
685{ /* NOT irq */
686 struct vub300_mmc_host *vub300 = container_of(work,
687 struct vub300_mmc_host, pollwork.work);
688 if (!vub300->interface) {
689 kref_put(&vub300->kref, vub300_delete);
690 return;
691 }
692 mutex_lock(&vub300->cmd_mutex);
693 if (vub300->cmd) {
694 vub300_queue_poll_work(vub300, 1);
695 } else if (!vub300->card_present) {
696 /* no need to do anything */
697 } else { /* vub300->card_present */
698 mutex_lock(&vub300->irq_mutex);
699 if (!vub300->irq_enabled) {
700 mutex_unlock(&vub300->irq_mutex);
701 } else if (vub300->irqs_queued) {
702 vub300->irqs_queued -= 1;
703 mmc_signal_sdio_irq(vub300->mmc);
704 mod_timer(&vub300->inactivity_timer, jiffies + HZ);
705 mutex_unlock(&vub300->irq_mutex);
706 } else { /* NOT vub300->irqs_queued */
707 mutex_unlock(&vub300->irq_mutex);
708 __do_poll(vub300);
709 }
710 }
711 mutex_unlock(&vub300->cmd_mutex);
712 kref_put(&vub300->kref, vub300_delete);
713}
714
715static void vub300_deadwork_thread(struct work_struct *work)
716{ /* NOT irq */
717 struct vub300_mmc_host *vub300 =
718 container_of(work, struct vub300_mmc_host, deadwork);
719 if (!vub300->interface) {
720 kref_put(&vub300->kref, vub300_delete);
721 return;
722 }
723 mutex_lock(&vub300->cmd_mutex);
724 if (vub300->cmd) {
725 /*
726 * a command got in as the inactivity
727 * timer expired - so we just let the
728 * processing of the command show if
729 * the device is dead
730 */
731 } else if (vub300->card_present) {
732 check_vub300_port_status(vub300);
733 } else if (vub300->mmc && vub300->mmc->card &&
734 mmc_card_present(vub300->mmc->card)) {
735 /*
736 * the MMC core must not have responded
737 * to the previous indication - lets
738 * hope that it eventually does so we
739 * will just ignore this for now
740 */
741 } else {
742 check_vub300_port_status(vub300);
743 }
744 mod_timer(&vub300->inactivity_timer, jiffies + HZ);
745 mutex_unlock(&vub300->cmd_mutex);
746 kref_put(&vub300->kref, vub300_delete);
747}
748
749static void vub300_inactivity_timer_expired(unsigned long data)
750{ /* softirq */
751 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data;
752 if (!vub300->interface) {
753 kref_put(&vub300->kref, vub300_delete);
754 } else if (vub300->cmd) {
755 mod_timer(&vub300->inactivity_timer, jiffies + HZ);
756 } else {
757 vub300_queue_dead_work(vub300);
758 mod_timer(&vub300->inactivity_timer, jiffies + HZ);
759 }
760}
761
762static int vub300_response_error(u8 error_code)
763{
764 switch (error_code) {
765 case SD_ERROR_PIO_TIMEOUT:
766 case SD_ERROR_1BIT_TIMEOUT:
767 case SD_ERROR_4BIT_TIMEOUT:
768 return -ETIMEDOUT;
769 case SD_ERROR_STAT_DATA:
770 case SD_ERROR_OVERRUN:
771 case SD_ERROR_STAT_CMD:
772 case SD_ERROR_STAT_CMD_TIMEOUT:
773 case SD_ERROR_SDCRDY_STUCK:
774 case SD_ERROR_UNHANDLED:
775 case SD_ERROR_1BIT_CRC_WRONG:
776 case SD_ERROR_4BIT_CRC_WRONG:
777 case SD_ERROR_1BIT_CRC_ERROR:
778 case SD_ERROR_4BIT_CRC_ERROR:
779 case SD_ERROR_NO_CMD_ENDBIT:
780 case SD_ERROR_NO_1BIT_DATEND:
781 case SD_ERROR_NO_4BIT_DATEND:
782 case SD_ERROR_1BIT_DATA_TIMEOUT:
783 case SD_ERROR_4BIT_DATA_TIMEOUT:
784 case SD_ERROR_1BIT_UNEXPECTED_TIMEOUT:
785 case SD_ERROR_4BIT_UNEXPECTED_TIMEOUT:
786 return -EILSEQ;
787 case 33:
788 return -EILSEQ;
789 case SD_ERROR_ILLEGAL_COMMAND:
790 return -EINVAL;
791 case SD_ERROR_NO_DEVICE:
792 return -ENOMEDIUM;
793 default:
794 return -ENODEV;
795 }
796}
797
798static void command_res_completed(struct urb *urb)
799{ /* urb completion handler - hardirq */
800 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
801 if (urb->status) {
802 /* we have to let the initiator handle the error */
803 } else if (vub300->command_res_urb->actual_length == 0) {
804 /*
805 * we have seen this happen once or twice and
806 * we suspect a buggy USB host controller
807 */
808 } else if (!vub300->data) {
809 /* this means that the command (typically CMD52) suceeded */
810 } else if (vub300->resp.common.header_type != 0x02) {
811 /*
812 * this is an error response from the VUB300 chip
813 * and we let the initiator handle it
814 */
815 } else if (vub300->urb) {
816 vub300->cmd->error =
817 vub300_response_error(vub300->resp.error.error_code);
818 usb_unlink_urb(vub300->urb);
819 } else {
820 vub300->cmd->error =
821 vub300_response_error(vub300->resp.error.error_code);
822 usb_sg_cancel(&vub300->sg_request);
823 }
824 complete(&vub300->command_complete); /* got_response_in */
825}
826
827static void command_out_completed(struct urb *urb)
828{ /* urb completion handler - hardirq */
829 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
830 if (urb->status) {
831 complete(&vub300->command_complete);
832 } else {
833 int ret;
834 unsigned int pipe =
835 usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep);
836 usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe,
837 &vub300->resp, sizeof(vub300->resp),
838 command_res_completed, vub300);
839 vub300->command_res_urb->actual_length = 0;
840 ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC);
841 if (ret == 0) {
842 /*
843 * the urb completion handler will call
844 * our completion handler
845 */
846 } else {
847 /*
848 * and thus we only call it directly
849 * when it will not be called
850 */
851 complete(&vub300->command_complete);
852 }
853 }
854}
855
856/*
857 * the STUFF bits are masked out for the comparisons
858 */
859static void snoop_block_size_and_bus_width(struct vub300_mmc_host *vub300,
860 u32 cmd_arg)
861{
862 if ((0xFBFFFE00 & cmd_arg) == 0x80022200)
863 vub300->fbs[1] = (cmd_arg << 8) | (0x00FF & vub300->fbs[1]);
864 else if ((0xFBFFFE00 & cmd_arg) == 0x80022000)
865 vub300->fbs[1] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[1]);
866 else if ((0xFBFFFE00 & cmd_arg) == 0x80042200)
867 vub300->fbs[2] = (cmd_arg << 8) | (0x00FF & vub300->fbs[2]);
868 else if ((0xFBFFFE00 & cmd_arg) == 0x80042000)
869 vub300->fbs[2] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[2]);
870 else if ((0xFBFFFE00 & cmd_arg) == 0x80062200)
871 vub300->fbs[3] = (cmd_arg << 8) | (0x00FF & vub300->fbs[3]);
872 else if ((0xFBFFFE00 & cmd_arg) == 0x80062000)
873 vub300->fbs[3] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[3]);
874 else if ((0xFBFFFE00 & cmd_arg) == 0x80082200)
875 vub300->fbs[4] = (cmd_arg << 8) | (0x00FF & vub300->fbs[4]);
876 else if ((0xFBFFFE00 & cmd_arg) == 0x80082000)
877 vub300->fbs[4] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[4]);
878 else if ((0xFBFFFE00 & cmd_arg) == 0x800A2200)
879 vub300->fbs[5] = (cmd_arg << 8) | (0x00FF & vub300->fbs[5]);
880 else if ((0xFBFFFE00 & cmd_arg) == 0x800A2000)
881 vub300->fbs[5] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[5]);
882 else if ((0xFBFFFE00 & cmd_arg) == 0x800C2200)
883 vub300->fbs[6] = (cmd_arg << 8) | (0x00FF & vub300->fbs[6]);
884 else if ((0xFBFFFE00 & cmd_arg) == 0x800C2000)
885 vub300->fbs[6] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[6]);
886 else if ((0xFBFFFE00 & cmd_arg) == 0x800E2200)
887 vub300->fbs[7] = (cmd_arg << 8) | (0x00FF & vub300->fbs[7]);
888 else if ((0xFBFFFE00 & cmd_arg) == 0x800E2000)
889 vub300->fbs[7] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[7]);
890 else if ((0xFBFFFE03 & cmd_arg) == 0x80000E00)
891 vub300->bus_width = 1;
892 else if ((0xFBFFFE03 & cmd_arg) == 0x80000E02)
893 vub300->bus_width = 4;
894}
895
896static void send_command(struct vub300_mmc_host *vub300)
897{
898 /* cmd_mutex is held by vub300_cmndwork_thread */
899 struct mmc_command *cmd = vub300->cmd;
900 struct mmc_data *data = vub300->data;
901 int retval;
902 int i;
903 u8 response_type;
904 if (vub300->app_spec) {
905 switch (cmd->opcode) {
906 case 6:
907 response_type = SDRT_1;
908 vub300->resp_len = 6;
909 if (0x00000000 == (0x00000003 & cmd->arg))
910 vub300->bus_width = 1;
911 else if (0x00000002 == (0x00000003 & cmd->arg))
912 vub300->bus_width = 4;
913 else
914 dev_err(&vub300->udev->dev,
915 "unexpected ACMD6 bus_width=%d\n",
916 0x00000003 & cmd->arg);
917 break;
918 case 13:
919 response_type = SDRT_1;
920 vub300->resp_len = 6;
921 break;
922 case 22:
923 response_type = SDRT_1;
924 vub300->resp_len = 6;
925 break;
926 case 23:
927 response_type = SDRT_1;
928 vub300->resp_len = 6;
929 break;
930 case 41:
931 response_type = SDRT_3;
932 vub300->resp_len = 6;
933 break;
934 case 42:
935 response_type = SDRT_1;
936 vub300->resp_len = 6;
937 break;
938 case 51:
939 response_type = SDRT_1;
940 vub300->resp_len = 6;
941 break;
942 case 55:
943 response_type = SDRT_1;
944 vub300->resp_len = 6;
945 break;
946 default:
947 vub300->resp_len = 0;
948 cmd->error = -EINVAL;
949 complete(&vub300->command_complete);
950 return;
951 }
952 vub300->app_spec = 0;
953 } else {
954 switch (cmd->opcode) {
955 case 0:
956 response_type = SDRT_NONE;
957 vub300->resp_len = 0;
958 break;
959 case 1:
960 response_type = SDRT_3;
961 vub300->resp_len = 6;
962 break;
963 case 2:
964 response_type = SDRT_2;
965 vub300->resp_len = 17;
966 break;
967 case 3:
968 response_type = SDRT_6;
969 vub300->resp_len = 6;
970 break;
971 case 4:
972 response_type = SDRT_NONE;
973 vub300->resp_len = 0;
974 break;
975 case 5:
976 response_type = SDRT_4;
977 vub300->resp_len = 6;
978 break;
979 case 6:
980 response_type = SDRT_1;
981 vub300->resp_len = 6;
982 break;
983 case 7:
984 response_type = SDRT_1B;
985 vub300->resp_len = 6;
986 break;
987 case 8:
988 response_type = SDRT_7;
989 vub300->resp_len = 6;
990 break;
991 case 9:
992 response_type = SDRT_2;
993 vub300->resp_len = 17;
994 break;
995 case 10:
996 response_type = SDRT_2;
997 vub300->resp_len = 17;
998 break;
999 case 12:
1000 response_type = SDRT_1B;
1001 vub300->resp_len = 6;
1002 break;
1003 case 13:
1004 response_type = SDRT_1;
1005 vub300->resp_len = 6;
1006 break;
1007 case 15:
1008 response_type = SDRT_NONE;
1009 vub300->resp_len = 0;
1010 break;
1011 case 16:
1012 for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++)
1013 vub300->fbs[i] = 0xFFFF & cmd->arg;
1014 response_type = SDRT_1;
1015 vub300->resp_len = 6;
1016 break;
1017 case 17:
1018 case 18:
1019 case 24:
1020 case 25:
1021 case 27:
1022 response_type = SDRT_1;
1023 vub300->resp_len = 6;
1024 break;
1025 case 28:
1026 case 29:
1027 response_type = SDRT_1B;
1028 vub300->resp_len = 6;
1029 break;
1030 case 30:
1031 case 32:
1032 case 33:
1033 response_type = SDRT_1;
1034 vub300->resp_len = 6;
1035 break;
1036 case 38:
1037 response_type = SDRT_1B;
1038 vub300->resp_len = 6;
1039 break;
1040 case 42:
1041 response_type = SDRT_1;
1042 vub300->resp_len = 6;
1043 break;
1044 case 52:
1045 response_type = SDRT_5;
1046 vub300->resp_len = 6;
1047 snoop_block_size_and_bus_width(vub300, cmd->arg);
1048 break;
1049 case 53:
1050 response_type = SDRT_5;
1051 vub300->resp_len = 6;
1052 break;
1053 case 55:
1054 response_type = SDRT_1;
1055 vub300->resp_len = 6;
1056 vub300->app_spec = 1;
1057 break;
1058 case 56:
1059 response_type = SDRT_1;
1060 vub300->resp_len = 6;
1061 break;
1062 default:
1063 vub300->resp_len = 0;
1064 cmd->error = -EINVAL;
1065 complete(&vub300->command_complete);
1066 return;
1067 }
1068 }
1069 /*
1070 * it is a shame that we can not use "sizeof(struct sd_command_header)"
1071 * this is because the packet _must_ be padded to 64 bytes
1072 */
1073 vub300->cmnd.head.header_size = 20;
1074 vub300->cmnd.head.header_type = 0x00;
1075 vub300->cmnd.head.port_number = 0; /* "0" means port 1 */
1076 vub300->cmnd.head.command_type = 0x00; /* standard read command */
1077 vub300->cmnd.head.response_type = response_type;
1078 vub300->cmnd.head.command_index = cmd->opcode;
1079 vub300->cmnd.head.arguments[0] = cmd->arg >> 24;
1080 vub300->cmnd.head.arguments[1] = cmd->arg >> 16;
1081 vub300->cmnd.head.arguments[2] = cmd->arg >> 8;
1082 vub300->cmnd.head.arguments[3] = cmd->arg >> 0;
1083 if (cmd->opcode == 52) {
1084 int fn = 0x7 & (cmd->arg >> 28);
1085 vub300->cmnd.head.block_count[0] = 0;
1086 vub300->cmnd.head.block_count[1] = 0;
1087 vub300->cmnd.head.block_size[0] = (vub300->fbs[fn] >> 8) & 0xFF;
1088 vub300->cmnd.head.block_size[1] = (vub300->fbs[fn] >> 0) & 0xFF;
1089 vub300->cmnd.head.command_type = 0x00;
1090 vub300->cmnd.head.transfer_size[0] = 0;
1091 vub300->cmnd.head.transfer_size[1] = 0;
1092 vub300->cmnd.head.transfer_size[2] = 0;
1093 vub300->cmnd.head.transfer_size[3] = 0;
1094 } else if (!data) {
1095 vub300->cmnd.head.block_count[0] = 0;
1096 vub300->cmnd.head.block_count[1] = 0;
1097 vub300->cmnd.head.block_size[0] = (vub300->fbs[0] >> 8) & 0xFF;
1098 vub300->cmnd.head.block_size[1] = (vub300->fbs[0] >> 0) & 0xFF;
1099 vub300->cmnd.head.command_type = 0x00;
1100 vub300->cmnd.head.transfer_size[0] = 0;
1101 vub300->cmnd.head.transfer_size[1] = 0;
1102 vub300->cmnd.head.transfer_size[2] = 0;
1103 vub300->cmnd.head.transfer_size[3] = 0;
1104 } else if (cmd->opcode == 53) {
1105 int fn = 0x7 & (cmd->arg >> 28);
1106 if (0x08 & vub300->cmnd.head.arguments[0]) { /* BLOCK MODE */
1107 vub300->cmnd.head.block_count[0] =
1108 (data->blocks >> 8) & 0xFF;
1109 vub300->cmnd.head.block_count[1] =
1110 (data->blocks >> 0) & 0xFF;
1111 vub300->cmnd.head.block_size[0] =
1112 (data->blksz >> 8) & 0xFF;
1113 vub300->cmnd.head.block_size[1] =
1114 (data->blksz >> 0) & 0xFF;
1115 } else { /* BYTE MODE */
1116 vub300->cmnd.head.block_count[0] = 0;
1117 vub300->cmnd.head.block_count[1] = 0;
1118 vub300->cmnd.head.block_size[0] =
1119 (vub300->datasize >> 8) & 0xFF;
1120 vub300->cmnd.head.block_size[1] =
1121 (vub300->datasize >> 0) & 0xFF;
1122 }
1123 vub300->cmnd.head.command_type =
1124 (MMC_DATA_READ & data->flags) ? 0x00 : 0x80;
1125 vub300->cmnd.head.transfer_size[0] =
1126 (vub300->datasize >> 24) & 0xFF;
1127 vub300->cmnd.head.transfer_size[1] =
1128 (vub300->datasize >> 16) & 0xFF;
1129 vub300->cmnd.head.transfer_size[2] =
1130 (vub300->datasize >> 8) & 0xFF;
1131 vub300->cmnd.head.transfer_size[3] =
1132 (vub300->datasize >> 0) & 0xFF;
1133 if (vub300->datasize < vub300->fbs[fn]) {
1134 vub300->cmnd.head.block_count[0] = 0;
1135 vub300->cmnd.head.block_count[1] = 0;
1136 }
1137 } else {
1138 vub300->cmnd.head.block_count[0] = (data->blocks >> 8) & 0xFF;
1139 vub300->cmnd.head.block_count[1] = (data->blocks >> 0) & 0xFF;
1140 vub300->cmnd.head.block_size[0] = (data->blksz >> 8) & 0xFF;
1141 vub300->cmnd.head.block_size[1] = (data->blksz >> 0) & 0xFF;
1142 vub300->cmnd.head.command_type =
1143 (MMC_DATA_READ & data->flags) ? 0x00 : 0x80;
1144 vub300->cmnd.head.transfer_size[0] =
1145 (vub300->datasize >> 24) & 0xFF;
1146 vub300->cmnd.head.transfer_size[1] =
1147 (vub300->datasize >> 16) & 0xFF;
1148 vub300->cmnd.head.transfer_size[2] =
1149 (vub300->datasize >> 8) & 0xFF;
1150 vub300->cmnd.head.transfer_size[3] =
1151 (vub300->datasize >> 0) & 0xFF;
1152 if (vub300->datasize < vub300->fbs[0]) {
1153 vub300->cmnd.head.block_count[0] = 0;
1154 vub300->cmnd.head.block_count[1] = 0;
1155 }
1156 }
1157 if (vub300->cmnd.head.block_size[0] || vub300->cmnd.head.block_size[1]) {
1158 u16 block_size = vub300->cmnd.head.block_size[1] |
1159 (vub300->cmnd.head.block_size[0] << 8);
1160 u16 block_boundary = FIRMWARE_BLOCK_BOUNDARY -
1161 (FIRMWARE_BLOCK_BOUNDARY % block_size);
1162 vub300->cmnd.head.block_boundary[0] =
1163 (block_boundary >> 8) & 0xFF;
1164 vub300->cmnd.head.block_boundary[1] =
1165 (block_boundary >> 0) & 0xFF;
1166 } else {
1167 vub300->cmnd.head.block_boundary[0] = 0;
1168 vub300->cmnd.head.block_boundary[1] = 0;
1169 }
1170 usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev,
1171 usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep),
1172 &vub300->cmnd, sizeof(vub300->cmnd),
1173 command_out_completed, vub300);
1174 retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL);
1175 if (retval < 0) {
1176 cmd->error = retval;
1177 complete(&vub300->command_complete);
1178 return;
1179 } else {
1180 return;
1181 }
1182}
1183
1184/*
1185 * timer callback runs in atomic mode
1186 * so it cannot call usb_kill_urb()
1187 */
1188static void vub300_sg_timed_out(unsigned long data)
1189{
1190 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data;
1191 vub300->usb_timed_out = 1;
1192 usb_sg_cancel(&vub300->sg_request);
1193 usb_unlink_urb(vub300->command_out_urb);
1194 usb_unlink_urb(vub300->command_res_urb);
1195}
1196
1197static u16 roundup_to_multiple_of_64(u16 number)
1198{
1199 return 0xFFC0 & (0x3F + number);
1200}
1201
1202/*
1203 * this is a separate function to solve the 80 column width restriction
1204 */
1205static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
1206 const struct firmware *fw)
1207{
1208 u8 register_count = 0;
1209 u16 ts = 0;
1210 u16 interrupt_size = 0;
1211 const u8 *data = fw->data;
1212 int size = fw->size;
1213 u8 c;
1214 dev_info(&vub300->udev->dev, "using %s for SDIO offload processing\n",
1215 vub300->vub_name);
1216 do {
1217 c = *data++;
1218 } while (size-- && c); /* skip comment */
1219 dev_info(&vub300->udev->dev, "using offload firmware %s %s\n", fw->data,
1220 vub300->vub_name);
1221 if (size < 4) {
1222 dev_err(&vub300->udev->dev,
1223 "corrupt offload pseudocode in firmware %s\n",
1224 vub300->vub_name);
1225 strncpy(vub300->vub_name, "corrupt offload pseudocode",
1226 sizeof(vub300->vub_name));
1227 return;
1228 }
1229 interrupt_size += *data++;
1230 size -= 1;
1231 interrupt_size <<= 8;
1232 interrupt_size += *data++;
1233 size -= 1;
1234 if (interrupt_size < size) {
1235 u16 xfer_length = roundup_to_multiple_of_64(interrupt_size);
1236 u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL);
1237 if (xfer_buffer) {
1238 int retval;
1239 memcpy(xfer_buffer, data, interrupt_size);
1240 memset(xfer_buffer + interrupt_size, 0,
1241 xfer_length - interrupt_size);
1242 size -= interrupt_size;
1243 data += interrupt_size;
1244 retval =
1245 usb_control_msg(vub300->udev,
1246 usb_sndctrlpipe(vub300->udev, 0),
1247 SET_INTERRUPT_PSEUDOCODE,
1248 USB_DIR_OUT | USB_TYPE_VENDOR |
1249 USB_RECIP_DEVICE, 0x0000, 0x0000,
1250 xfer_buffer, xfer_length, HZ);
1251 kfree(xfer_buffer);
1252 if (retval < 0) {
1253 strncpy(vub300->vub_name,
1254 "SDIO pseudocode download failed",
1255 sizeof(vub300->vub_name));
1256 return;
1257 }
1258 } else {
1259 dev_err(&vub300->udev->dev,
1260 "not enough memory for xfer buffer to send"
1261 " INTERRUPT_PSEUDOCODE for %s %s\n", fw->data,
1262 vub300->vub_name);
1263 strncpy(vub300->vub_name,
1264 "SDIO interrupt pseudocode download failed",
1265 sizeof(vub300->vub_name));
1266 return;
1267 }
1268 } else {
1269 dev_err(&vub300->udev->dev,
1270 "corrupt interrupt pseudocode in firmware %s %s\n",
1271 fw->data, vub300->vub_name);
1272 strncpy(vub300->vub_name, "corrupt interrupt pseudocode",
1273 sizeof(vub300->vub_name));
1274 return;
1275 }
1276 ts += *data++;
1277 size -= 1;
1278 ts <<= 8;
1279 ts += *data++;
1280 size -= 1;
1281 if (ts < size) {
1282 u16 xfer_length = roundup_to_multiple_of_64(ts);
1283 u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL);
1284 if (xfer_buffer) {
1285 int retval;
1286 memcpy(xfer_buffer, data, ts);
1287 memset(xfer_buffer + ts, 0,
1288 xfer_length - ts);
1289 size -= ts;
1290 data += ts;
1291 retval =
1292 usb_control_msg(vub300->udev,
1293 usb_sndctrlpipe(vub300->udev, 0),
1294 SET_TRANSFER_PSEUDOCODE,
1295 USB_DIR_OUT | USB_TYPE_VENDOR |
1296 USB_RECIP_DEVICE, 0x0000, 0x0000,
1297 xfer_buffer, xfer_length, HZ);
1298 kfree(xfer_buffer);
1299 if (retval < 0) {
1300 strncpy(vub300->vub_name,
1301 "SDIO pseudocode download failed",
1302 sizeof(vub300->vub_name));
1303 return;
1304 }
1305 } else {
1306 dev_err(&vub300->udev->dev,
1307 "not enough memory for xfer buffer to send"
1308 " TRANSFER_PSEUDOCODE for %s %s\n", fw->data,
1309 vub300->vub_name);
1310 strncpy(vub300->vub_name,
1311 "SDIO transfer pseudocode download failed",
1312 sizeof(vub300->vub_name));
1313 return;
1314 }
1315 } else {
1316 dev_err(&vub300->udev->dev,
1317 "corrupt transfer pseudocode in firmware %s %s\n",
1318 fw->data, vub300->vub_name);
1319 strncpy(vub300->vub_name, "corrupt transfer pseudocode",
1320 sizeof(vub300->vub_name));
1321 return;
1322 }
1323 register_count += *data++;
1324 size -= 1;
1325 if (register_count * 4 == size) {
1326 int I = vub300->dynamic_register_count = register_count;
1327 int i = 0;
1328 while (I--) {
1329 unsigned int func_num = 0;
1330 vub300->sdio_register[i].func_num = *data++;
1331 size -= 1;
1332 func_num += *data++;
1333 size -= 1;
1334 func_num <<= 8;
1335 func_num += *data++;
1336 size -= 1;
1337 func_num <<= 8;
1338 func_num += *data++;
1339 size -= 1;
1340 vub300->sdio_register[i].sdio_reg = func_num;
1341 vub300->sdio_register[i].activate = 1;
1342 vub300->sdio_register[i].prepared = 0;
1343 i += 1;
1344 }
1345 dev_info(&vub300->udev->dev,
1346 "initialized %d dynamic pseudocode registers\n",
1347 vub300->dynamic_register_count);
1348 return;
1349 } else {
1350 dev_err(&vub300->udev->dev,
1351 "corrupt dynamic registers in firmware %s\n",
1352 vub300->vub_name);
1353 strncpy(vub300->vub_name, "corrupt dynamic registers",
1354 sizeof(vub300->vub_name));
1355 return;
1356 }
1357}
1358
1359/*
1360 * if the binary containing the EMPTY PseudoCode can not be found
1361 * vub300->vub_name is set anyway in order to prevent an automatic retry
1362 */
1363static void download_offload_pseudocode(struct vub300_mmc_host *vub300)
1364{
1365 struct mmc_card *card = vub300->mmc->card;
1366 int sdio_funcs = card->sdio_funcs;
1367 const struct firmware *fw = NULL;
1368 int l = snprintf(vub300->vub_name, sizeof(vub300->vub_name),
1369 "vub_%04X%04X", card->cis.vendor, card->cis.device);
1370 int n = 0;
1371 int retval;
1372 for (n = 0; n < sdio_funcs; n++) {
1373 struct sdio_func *sf = card->sdio_func[n];
1374 l += snprintf(vub300->vub_name + l,
1375 sizeof(vub300->vub_name) - l, "_%04X%04X",
1376 sf->vendor, sf->device);
1377 };
1378 snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, ".bin");
1379 dev_info(&vub300->udev->dev, "requesting offload firmware %s\n",
1380 vub300->vub_name);
1381 retval = request_firmware(&fw, vub300->vub_name, &card->dev);
1382 if (retval < 0) {
1383 strncpy(vub300->vub_name, "vub_default.bin",
1384 sizeof(vub300->vub_name));
1385 retval = request_firmware(&fw, vub300->vub_name, &card->dev);
1386 if (retval < 0) {
1387 strncpy(vub300->vub_name,
1388 "no SDIO offload firmware found",
1389 sizeof(vub300->vub_name));
1390 } else {
1391 __download_offload_pseudocode(vub300, fw);
1392 release_firmware(fw);
1393 }
1394 } else {
1395 __download_offload_pseudocode(vub300, fw);
1396 release_firmware(fw);
1397 }
1398}
1399
1400static void vub300_usb_bulk_msg_completion(struct urb *urb)
1401{ /* urb completion handler - hardirq */
1402 complete((struct completion *)urb->context);
1403}
1404
1405static int vub300_usb_bulk_msg(struct vub300_mmc_host *vub300,
1406 unsigned int pipe, void *data, int len,
1407 int *actual_length, int timeout_msecs)
1408{
1409 /* cmd_mutex is held by vub300_cmndwork_thread */
1410 struct usb_device *usb_dev = vub300->udev;
1411 struct completion done;
1412 int retval;
1413 vub300->urb = usb_alloc_urb(0, GFP_KERNEL);
1414 if (!vub300->urb)
1415 return -ENOMEM;
1416 usb_fill_bulk_urb(vub300->urb, usb_dev, pipe, data, len,
1417 vub300_usb_bulk_msg_completion, NULL);
1418 init_completion(&done);
1419 vub300->urb->context = &done;
1420 vub300->urb->actual_length = 0;
1421 retval = usb_submit_urb(vub300->urb, GFP_KERNEL);
1422 if (unlikely(retval))
1423 goto out;
1424 if (!wait_for_completion_timeout
1425 (&done, msecs_to_jiffies(timeout_msecs))) {
1426 retval = -ETIMEDOUT;
1427 usb_kill_urb(vub300->urb);
1428 } else {
1429 retval = vub300->urb->status;
1430 }
1431out:
1432 *actual_length = vub300->urb->actual_length;
1433 usb_free_urb(vub300->urb);
1434 vub300->urb = NULL;
1435 return retval;
1436}
1437
1438static int __command_read_data(struct vub300_mmc_host *vub300,
1439 struct mmc_command *cmd, struct mmc_data *data)
1440{
1441 /* cmd_mutex is held by vub300_cmndwork_thread */
1442 int linear_length = vub300->datasize;
1443 int padded_length = vub300->large_usb_packets ?
1444 ((511 + linear_length) >> 9) << 9 :
1445 ((63 + linear_length) >> 6) << 6;
1446 if ((padded_length == linear_length) || !pad_input_to_usb_pkt) {
1447 int result;
1448 unsigned pipe;
1449 pipe = usb_rcvbulkpipe(vub300->udev, vub300->data_inp_ep);
1450 result = usb_sg_init(&vub300->sg_request, vub300->udev,
1451 pipe, 0, data->sg,
1452 data->sg_len, 0, GFP_KERNEL);
1453 if (result < 0) {
1454 usb_unlink_urb(vub300->command_out_urb);
1455 usb_unlink_urb(vub300->command_res_urb);
1456 cmd->error = result;
1457 data->bytes_xfered = 0;
1458 return 0;
1459 } else {
1460 vub300->sg_transfer_timer.expires =
1461 jiffies + msecs_to_jiffies(2000 +
1462 (linear_length / 16384));
1463 add_timer(&vub300->sg_transfer_timer);
1464 usb_sg_wait(&vub300->sg_request);
1465 del_timer(&vub300->sg_transfer_timer);
1466 if (vub300->sg_request.status < 0) {
1467 cmd->error = vub300->sg_request.status;
1468 data->bytes_xfered = 0;
1469 return 0;
1470 } else {
1471 data->bytes_xfered = vub300->datasize;
1472 return linear_length;
1473 }
1474 }
1475 } else {
1476 u8 *buf = kmalloc(padded_length, GFP_KERNEL);
1477 if (buf) {
1478 int result;
1479 unsigned pipe = usb_rcvbulkpipe(vub300->udev,
1480 vub300->data_inp_ep);
1481 int actual_length = 0;
1482 result = vub300_usb_bulk_msg(vub300, pipe, buf,
1483 padded_length, &actual_length,
1484 2000 + (padded_length / 16384));
1485 if (result < 0) {
1486 cmd->error = result;
1487 data->bytes_xfered = 0;
1488 kfree(buf);
1489 return 0;
1490 } else if (actual_length < linear_length) {
1491 cmd->error = -EREMOTEIO;
1492 data->bytes_xfered = 0;
1493 kfree(buf);
1494 return 0;
1495 } else {
1496 sg_copy_from_buffer(data->sg, data->sg_len, buf,
1497 linear_length);
1498 kfree(buf);
1499 data->bytes_xfered = vub300->datasize;
1500 return linear_length;
1501 }
1502 } else {
1503 cmd->error = -ENOMEM;
1504 data->bytes_xfered = 0;
1505 return 0;
1506 }
1507 }
1508}
1509
1510static int __command_write_data(struct vub300_mmc_host *vub300,
1511 struct mmc_command *cmd, struct mmc_data *data)
1512{
1513 /* cmd_mutex is held by vub300_cmndwork_thread */
1514 unsigned pipe = usb_sndbulkpipe(vub300->udev, vub300->data_out_ep);
1515 int linear_length = vub300->datasize;
1516 int modulo_64_length = linear_length & 0x003F;
1517 int modulo_512_length = linear_length & 0x01FF;
1518 if (linear_length < 64) {
1519 int result;
1520 int actual_length;
1521 sg_copy_to_buffer(data->sg, data->sg_len,
1522 vub300->padded_buffer,
1523 sizeof(vub300->padded_buffer));
1524 memset(vub300->padded_buffer + linear_length, 0,
1525 sizeof(vub300->padded_buffer) - linear_length);
1526 result = vub300_usb_bulk_msg(vub300, pipe, vub300->padded_buffer,
1527 sizeof(vub300->padded_buffer),
1528 &actual_length, 2000 +
1529 (sizeof(vub300->padded_buffer) /
1530 16384));
1531 if (result < 0) {
1532 cmd->error = result;
1533 data->bytes_xfered = 0;
1534 } else {
1535 data->bytes_xfered = vub300->datasize;
1536 }
1537 } else if ((!vub300->large_usb_packets && (0 < modulo_64_length)) ||
1538 (vub300->large_usb_packets && (64 > modulo_512_length))
1539 ) { /* don't you just love these work-rounds */
1540 int padded_length = ((63 + linear_length) >> 6) << 6;
1541 u8 *buf = kmalloc(padded_length, GFP_KERNEL);
1542 if (buf) {
1543 int result;
1544 int actual_length;
1545 sg_copy_to_buffer(data->sg, data->sg_len, buf,
1546 padded_length);
1547 memset(buf + linear_length, 0,
1548 padded_length - linear_length);
1549 result =
1550 vub300_usb_bulk_msg(vub300, pipe, buf,
1551 padded_length, &actual_length,
1552 2000 + padded_length / 16384);
1553 kfree(buf);
1554 if (result < 0) {
1555 cmd->error = result;
1556 data->bytes_xfered = 0;
1557 } else {
1558 data->bytes_xfered = vub300->datasize;
1559 }
1560 } else {
1561 cmd->error = -ENOMEM;
1562 data->bytes_xfered = 0;
1563 }
1564 } else { /* no data padding required */
1565 int result;
1566 unsigned char buf[64 * 4];
1567 sg_copy_to_buffer(data->sg, data->sg_len, buf, sizeof(buf));
1568 result = usb_sg_init(&vub300->sg_request, vub300->udev,
1569 pipe, 0, data->sg,
1570 data->sg_len, 0, GFP_KERNEL);
1571 if (result < 0) {
1572 usb_unlink_urb(vub300->command_out_urb);
1573 usb_unlink_urb(vub300->command_res_urb);
1574 cmd->error = result;
1575 data->bytes_xfered = 0;
1576 } else {
1577 vub300->sg_transfer_timer.expires =
1578 jiffies + msecs_to_jiffies(2000 +
1579 linear_length / 16384);
1580 add_timer(&vub300->sg_transfer_timer);
1581 usb_sg_wait(&vub300->sg_request);
1582 if (cmd->error) {
1583 data->bytes_xfered = 0;
1584 } else {
1585 del_timer(&vub300->sg_transfer_timer);
1586 if (vub300->sg_request.status < 0) {
1587 cmd->error = vub300->sg_request.status;
1588 data->bytes_xfered = 0;
1589 } else {
1590 data->bytes_xfered = vub300->datasize;
1591 }
1592 }
1593 }
1594 }
1595 return linear_length;
1596}
1597
1598static void __vub300_command_response(struct vub300_mmc_host *vub300,
1599 struct mmc_command *cmd,
1600 struct mmc_data *data, int data_length)
1601{
1602 /* cmd_mutex is held by vub300_cmndwork_thread */
1603 long respretval;
1604 int msec_timeout = 1000 + data_length / 4;
1605 respretval =
1606 wait_for_completion_timeout(&vub300->command_complete,
1607 msecs_to_jiffies(msec_timeout));
1608 if (respretval == 0) { /* TIMED OUT */
1609 /* we don't know which of "out" and "res" if any failed */
1610 int result;
1611 vub300->usb_timed_out = 1;
1612 usb_kill_urb(vub300->command_out_urb);
1613 usb_kill_urb(vub300->command_res_urb);
1614 cmd->error = -ETIMEDOUT;
1615 result = usb_lock_device_for_reset(vub300->udev,
1616 vub300->interface);
1617 if (result == 0) {
1618 result = usb_reset_device(vub300->udev);
1619 usb_unlock_device(vub300->udev);
1620 }
1621 } else if (respretval < 0) {
1622 /* we don't know which of "out" and "res" if any failed */
1623 usb_kill_urb(vub300->command_out_urb);
1624 usb_kill_urb(vub300->command_res_urb);
1625 cmd->error = respretval;
1626 } else if (cmd->error) {
1627 /*
1628 * the error occured sending the command
1629 * or recieving the response
1630 */
1631 } else if (vub300->command_out_urb->status) {
1632 vub300->usb_transport_fail = vub300->command_out_urb->status;
1633 cmd->error = -EPROTO == vub300->command_out_urb->status ?
1634 -ESHUTDOWN : vub300->command_out_urb->status;
1635 } else if (vub300->command_res_urb->status) {
1636 vub300->usb_transport_fail = vub300->command_res_urb->status;
1637 cmd->error = -EPROTO == vub300->command_res_urb->status ?
1638 -ESHUTDOWN : vub300->command_res_urb->status;
1639 } else if (vub300->resp.common.header_type == 0x00) {
1640 /*
1641 * the command completed successfully
1642 * and there was no piggybacked data
1643 */
1644 } else if (vub300->resp.common.header_type == RESPONSE_ERROR) {
1645 cmd->error =
1646 vub300_response_error(vub300->resp.error.error_code);
1647 if (vub300->data)
1648 usb_sg_cancel(&vub300->sg_request);
1649 } else if (vub300->resp.common.header_type == RESPONSE_PIGGYBACKED) {
1650 int offloaded_data_length =
1651 vub300->resp.common.header_size -
1652 sizeof(struct sd_register_header);
1653 int register_count = offloaded_data_length >> 3;
1654 int ri = 0;
1655 while (register_count--) {
1656 add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]);
1657 ri += 1;
1658 }
1659 vub300->resp.common.header_size =
1660 sizeof(struct sd_register_header);
1661 vub300->resp.common.header_type = 0x00;
1662 cmd->error = 0;
1663 } else if (vub300->resp.common.header_type == RESPONSE_PIG_DISABLED) {
1664 int offloaded_data_length =
1665 vub300->resp.common.header_size -
1666 sizeof(struct sd_register_header);
1667 int register_count = offloaded_data_length >> 3;
1668 int ri = 0;
1669 while (register_count--) {
1670 add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]);
1671 ri += 1;
1672 }
1673 mutex_lock(&vub300->irq_mutex);
1674 if (vub300->irqs_queued) {
1675 vub300->irqs_queued += 1;
1676 } else if (vub300->irq_enabled) {
1677 vub300->irqs_queued += 1;
1678 vub300_queue_poll_work(vub300, 0);
1679 } else {
1680 vub300->irqs_queued += 1;
1681 }
1682 vub300->irq_disabled = 1;
1683 mutex_unlock(&vub300->irq_mutex);
1684 vub300->resp.common.header_size =
1685 sizeof(struct sd_register_header);
1686 vub300->resp.common.header_type = 0x00;
1687 cmd->error = 0;
1688 } else if (vub300->resp.common.header_type == RESPONSE_PIG_ENABLED) {
1689 int offloaded_data_length =
1690 vub300->resp.common.header_size -
1691 sizeof(struct sd_register_header);
1692 int register_count = offloaded_data_length >> 3;
1693 int ri = 0;
1694 while (register_count--) {
1695 add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]);
1696 ri += 1;
1697 }
1698 mutex_lock(&vub300->irq_mutex);
1699 if (vub300->irqs_queued) {
1700 vub300->irqs_queued += 1;
1701 } else if (vub300->irq_enabled) {
1702 vub300->irqs_queued += 1;
1703 vub300_queue_poll_work(vub300, 0);
1704 } else {
1705 vub300->irqs_queued += 1;
1706 }
1707 vub300->irq_disabled = 0;
1708 mutex_unlock(&vub300->irq_mutex);
1709 vub300->resp.common.header_size =
1710 sizeof(struct sd_register_header);
1711 vub300->resp.common.header_type = 0x00;
1712 cmd->error = 0;
1713 } else {
1714 cmd->error = -EINVAL;
1715 }
1716}
1717
1718static void construct_request_response(struct vub300_mmc_host *vub300,
1719 struct mmc_command *cmd)
1720{
1721 int resp_len = vub300->resp_len;
1722 int less_cmd = (17 == resp_len) ? resp_len : resp_len - 1;
1723 int bytes = 3 & less_cmd;
1724 int words = less_cmd >> 2;
1725 u8 *r = vub300->resp.response.command_response;
1726 if (bytes == 3) {
1727 cmd->resp[words] = (r[1 + (words << 2)] << 24)
1728 | (r[2 + (words << 2)] << 16)
1729 | (r[3 + (words << 2)] << 8);
1730 } else if (bytes == 2) {
1731 cmd->resp[words] = (r[1 + (words << 2)] << 24)
1732 | (r[2 + (words << 2)] << 16);
1733 } else if (bytes == 1) {
1734 cmd->resp[words] = (r[1 + (words << 2)] << 24);
1735 }
1736 while (words-- > 0) {
1737 cmd->resp[words] = (r[1 + (words << 2)] << 24)
1738 | (r[2 + (words << 2)] << 16)
1739 | (r[3 + (words << 2)] << 8)
1740 | (r[4 + (words << 2)] << 0);
1741 }
1742 if ((cmd->opcode == 53) && (0x000000FF & cmd->resp[0]))
1743 cmd->resp[0] &= 0xFFFFFF00;
1744}
1745
1746/* this thread runs only when there is an upper level command req outstanding */
1747static void vub300_cmndwork_thread(struct work_struct *work)
1748{
1749 struct vub300_mmc_host *vub300 =
1750 container_of(work, struct vub300_mmc_host, cmndwork);
1751 if (!vub300->interface) {
1752 kref_put(&vub300->kref, vub300_delete);
1753 return;
1754 } else {
1755 struct mmc_request *req = vub300->req;
1756 struct mmc_command *cmd = vub300->cmd;
1757 struct mmc_data *data = vub300->data;
1758 int data_length;
1759 mutex_lock(&vub300->cmd_mutex);
1760 init_completion(&vub300->command_complete);
1761 if (likely(vub300->vub_name[0]) || !vub300->mmc->card ||
1762 !mmc_card_present(vub300->mmc->card)) {
1763 /*
1764 * the name of the EMPTY Pseudo firmware file
1765 * is used as a flag to indicate that the file
1766 * has been already downloaded to the VUB300 chip
1767 */
1768 } else if (0 == vub300->mmc->card->sdio_funcs) {
1769 strncpy(vub300->vub_name, "SD memory device",
1770 sizeof(vub300->vub_name));
1771 } else {
1772 download_offload_pseudocode(vub300);
1773 }
1774 send_command(vub300);
1775 if (!data)
1776 data_length = 0;
1777 else if (MMC_DATA_READ & data->flags)
1778 data_length = __command_read_data(vub300, cmd, data);
1779 else
1780 data_length = __command_write_data(vub300, cmd, data);
1781 __vub300_command_response(vub300, cmd, data, data_length);
1782 vub300->req = NULL;
1783 vub300->cmd = NULL;
1784 vub300->data = NULL;
1785 if (cmd->error) {
1786 if (cmd->error == -ENOMEDIUM)
1787 check_vub300_port_status(vub300);
1788 mutex_unlock(&vub300->cmd_mutex);
1789 mmc_request_done(vub300->mmc, req);
1790 kref_put(&vub300->kref, vub300_delete);
1791 return;
1792 } else {
1793 construct_request_response(vub300, cmd);
1794 vub300->resp_len = 0;
1795 mutex_unlock(&vub300->cmd_mutex);
1796 kref_put(&vub300->kref, vub300_delete);
1797 mmc_request_done(vub300->mmc, req);
1798 return;
1799 }
1800 }
1801}
1802
1803static int examine_cyclic_buffer(struct vub300_mmc_host *vub300,
1804 struct mmc_command *cmd, u8 Function)
1805{
1806 /* cmd_mutex is held by vub300_mmc_request */
1807 u8 cmd0 = 0xFF & (cmd->arg >> 24);
1808 u8 cmd1 = 0xFF & (cmd->arg >> 16);
1809 u8 cmd2 = 0xFF & (cmd->arg >> 8);
1810 u8 cmd3 = 0xFF & (cmd->arg >> 0);
1811 int first = MAXREGMASK & vub300->fn[Function].offload_point;
1812 struct offload_registers_access *rf = &vub300->fn[Function].reg[first];
1813 if (cmd0 == rf->command_byte[0] &&
1814 cmd1 == rf->command_byte[1] &&
1815 cmd2 == rf->command_byte[2] &&
1816 cmd3 == rf->command_byte[3]) {
1817 u8 checksum = 0x00;
1818 cmd->resp[1] = checksum << 24;
1819 cmd->resp[0] = (rf->Respond_Byte[0] << 24)
1820 | (rf->Respond_Byte[1] << 16)
1821 | (rf->Respond_Byte[2] << 8)
1822 | (rf->Respond_Byte[3] << 0);
1823 vub300->fn[Function].offload_point += 1;
1824 vub300->fn[Function].offload_count -= 1;
1825 vub300->total_offload_count -= 1;
1826 return 1;
1827 } else {
1828 int delta = 1; /* because it does not match the first one */
1829 u8 register_count = vub300->fn[Function].offload_count - 1;
1830 u32 register_point = vub300->fn[Function].offload_point + 1;
1831 while (0 < register_count) {
1832 int point = MAXREGMASK & register_point;
1833 struct offload_registers_access *r =
1834 &vub300->fn[Function].reg[point];
1835 if (cmd0 == r->command_byte[0] &&
1836 cmd1 == r->command_byte[1] &&
1837 cmd2 == r->command_byte[2] &&
1838 cmd3 == r->command_byte[3]) {
1839 u8 checksum = 0x00;
1840 cmd->resp[1] = checksum << 24;
1841 cmd->resp[0] = (r->Respond_Byte[0] << 24)
1842 | (r->Respond_Byte[1] << 16)
1843 | (r->Respond_Byte[2] << 8)
1844 | (r->Respond_Byte[3] << 0);
1845 vub300->fn[Function].offload_point += delta;
1846 vub300->fn[Function].offload_count -= delta;
1847 vub300->total_offload_count -= delta;
1848 return 1;
1849 } else {
1850 register_point += 1;
1851 register_count -= 1;
1852 delta += 1;
1853 continue;
1854 }
1855 }
1856 return 0;
1857 }
1858}
1859
1860static int satisfy_request_from_offloaded_data(struct vub300_mmc_host *vub300,
1861 struct mmc_command *cmd)
1862{
1863 /* cmd_mutex is held by vub300_mmc_request */
1864 u8 regs = vub300->dynamic_register_count;
1865 u8 i = 0;
1866 u8 func = FUN(cmd);
1867 u32 reg = REG(cmd);
1868 while (0 < regs--) {
1869 if ((vub300->sdio_register[i].func_num == func) &&
1870 (vub300->sdio_register[i].sdio_reg == reg)) {
1871 if (!vub300->sdio_register[i].prepared) {
1872 return 0;
1873 } else if ((0x80000000 & cmd->arg) == 0x80000000) {
1874 /*
1875 * a write to a dynamic register
1876 * nullifies our offloaded value
1877 */
1878 vub300->sdio_register[i].prepared = 0;
1879 return 0;
1880 } else {
1881 u8 checksum = 0x00;
1882 u8 rsp0 = 0x00;
1883 u8 rsp1 = 0x00;
1884 u8 rsp2 = vub300->sdio_register[i].response;
1885 u8 rsp3 = vub300->sdio_register[i].regvalue;
1886 vub300->sdio_register[i].prepared = 0;
1887 cmd->resp[1] = checksum << 24;
1888 cmd->resp[0] = (rsp0 << 24)
1889 | (rsp1 << 16)
1890 | (rsp2 << 8)
1891 | (rsp3 << 0);
1892 return 1;
1893 }
1894 } else {
1895 i += 1;
1896 continue;
1897 }
1898 };
1899 if (vub300->total_offload_count == 0)
1900 return 0;
1901 else if (vub300->fn[func].offload_count == 0)
1902 return 0;
1903 else
1904 return examine_cyclic_buffer(vub300, cmd, func);
1905}
1906
1907static void vub300_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
1908{ /* NOT irq */
1909 struct mmc_command *cmd = req->cmd;
1910 struct vub300_mmc_host *vub300 = mmc_priv(mmc);
1911 if (!vub300->interface) {
1912 cmd->error = -ESHUTDOWN;
1913 mmc_request_done(mmc, req);
1914 return;
1915 } else {
1916 struct mmc_data *data = req->data;
1917 if (!vub300->card_powered) {
1918 cmd->error = -ENOMEDIUM;
1919 mmc_request_done(mmc, req);
1920 return;
1921 }
1922 if (!vub300->card_present) {
1923 cmd->error = -ENOMEDIUM;
1924 mmc_request_done(mmc, req);
1925 return;
1926 }
1927 if (vub300->usb_transport_fail) {
1928 cmd->error = vub300->usb_transport_fail;
1929 mmc_request_done(mmc, req);
1930 return;
1931 }
1932 if (!vub300->interface) {
1933 cmd->error = -ENODEV;
1934 mmc_request_done(mmc, req);
1935 return;
1936 }
1937 kref_get(&vub300->kref);
1938 mutex_lock(&vub300->cmd_mutex);
1939 mod_timer(&vub300->inactivity_timer, jiffies + HZ);
1940 /*
1941 * for performance we have to return immediately
1942 * if the requested data has been offloaded
1943 */
1944 if (cmd->opcode == 52 &&
1945 satisfy_request_from_offloaded_data(vub300, cmd)) {
1946 cmd->error = 0;
1947 mutex_unlock(&vub300->cmd_mutex);
1948 kref_put(&vub300->kref, vub300_delete);
1949 mmc_request_done(mmc, req);
1950 return;
1951 } else {
1952 vub300->cmd = cmd;
1953 vub300->req = req;
1954 vub300->data = data;
1955 if (data)
1956 vub300->datasize = data->blksz * data->blocks;
1957 else
1958 vub300->datasize = 0;
1959 vub300_queue_cmnd_work(vub300);
1960 mutex_unlock(&vub300->cmd_mutex);
1961 kref_put(&vub300->kref, vub300_delete);
1962 /*
1963 * the kernel lock diagnostics complain
1964 * if the cmd_mutex * is "passed on"
1965 * to the cmndwork thread,
1966 * so we must release it now
1967 * and re-acquire it in the cmndwork thread
1968 */
1969 }
1970 }
1971}
1972
1973static void __set_clock_speed(struct vub300_mmc_host *vub300, u8 buf[8],
1974 struct mmc_ios *ios)
1975{
1976 int buf_array_size = 8; /* ARRAY_SIZE(buf) does not work !!! */
1977 int retval;
1978 u32 kHzClock;
1979 if (ios->clock >= 48000000)
1980 kHzClock = 48000;
1981 else if (ios->clock >= 24000000)
1982 kHzClock = 24000;
1983 else if (ios->clock >= 20000000)
1984 kHzClock = 20000;
1985 else if (ios->clock >= 15000000)
1986 kHzClock = 15000;
1987 else if (ios->clock >= 200000)
1988 kHzClock = 200;
1989 else
1990 kHzClock = 0;
1991 {
1992 int i;
1993 u64 c = kHzClock;
1994 for (i = 0; i < buf_array_size; i++) {
1995 buf[i] = c;
1996 c >>= 8;
1997 }
1998 }
1999 retval =
2000 usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
2001 SET_CLOCK_SPEED,
2002 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
2003 0x00, 0x00, buf, buf_array_size, HZ);
2004 if (retval != 8) {
2005 dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED"
2006 " %dkHz failed with retval=%d\n", kHzClock, retval);
2007 } else {
2008 dev_dbg(&vub300->udev->dev, "SET_CLOCK_SPEED"
2009 " %dkHz\n", kHzClock);
2010 }
2011}
2012
2013static void vub300_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2014{ /* NOT irq */
2015 struct vub300_mmc_host *vub300 = mmc_priv(mmc);
2016 if (!vub300->interface)
2017 return;
2018 kref_get(&vub300->kref);
2019 mutex_lock(&vub300->cmd_mutex);
2020 if ((ios->power_mode == MMC_POWER_OFF) && vub300->card_powered) {
2021 vub300->card_powered = 0;
2022 usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
2023 SET_SD_POWER,
2024 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
2025 0x0000, 0x0000, NULL, 0, HZ);
2026 /* must wait for the VUB300 u-proc to boot up */
2027 msleep(600);
2028 } else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) {
2029 usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
2030 SET_SD_POWER,
2031 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
2032 0x0001, 0x0000, NULL, 0, HZ);
2033 msleep(600);
2034 vub300->card_powered = 1;
2035 } else if (ios->power_mode == MMC_POWER_ON) {
2036 u8 *buf = kmalloc(8, GFP_KERNEL);
2037 if (buf) {
2038 __set_clock_speed(vub300, buf, ios);
2039 kfree(buf);
2040 }
2041 } else {
2042 /* this should mean no change of state */
2043 }
2044 mutex_unlock(&vub300->cmd_mutex);
2045 kref_put(&vub300->kref, vub300_delete);
2046}
2047
2048static int vub300_mmc_get_ro(struct mmc_host *mmc)
2049{
2050 struct vub300_mmc_host *vub300 = mmc_priv(mmc);
2051 return vub300->read_only;
2052}
2053
2054static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
2055{ /* NOT irq */
2056 struct vub300_mmc_host *vub300 = mmc_priv(mmc);
2057 if (!vub300->interface)
2058 return;
2059 kref_get(&vub300->kref);
2060 if (enable) {
2061 mutex_lock(&vub300->irq_mutex);
2062 if (vub300->irqs_queued) {
2063 vub300->irqs_queued -= 1;
2064 mmc_signal_sdio_irq(vub300->mmc);
2065 } else if (vub300->irq_disabled) {
2066 vub300->irq_disabled = 0;
2067 vub300->irq_enabled = 1;
2068 vub300_queue_poll_work(vub300, 0);
2069 } else if (vub300->irq_enabled) {
2070 /* this should not happen, so we will just ignore it */
2071 } else {
2072 vub300->irq_enabled = 1;
2073 vub300_queue_poll_work(vub300, 0);
2074 }
2075 mutex_unlock(&vub300->irq_mutex);
2076 } else {
2077 vub300->irq_enabled = 0;
2078 }
2079 kref_put(&vub300->kref, vub300_delete);
2080}
2081
2082void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card)
2083{ /* NOT irq */
2084 struct vub300_mmc_host *vub300 = mmc_priv(mmc);
2085 dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n");
2086}
2087
2088static struct mmc_host_ops vub300_mmc_ops = {
2089 .request = vub300_mmc_request,
2090 .set_ios = vub300_mmc_set_ios,
2091 .get_ro = vub300_mmc_get_ro,
2092 .enable_sdio_irq = vub300_enable_sdio_irq,
2093 .init_card = vub300_init_card,
2094};
2095
2096static int vub300_probe(struct usb_interface *interface,
2097 const struct usb_device_id *id)
2098{ /* NOT irq */
2099 struct vub300_mmc_host *vub300;
2100 struct usb_host_interface *iface_desc;
2101 struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface));
2102 int i;
2103 int retval = -ENOMEM;
2104 struct urb *command_out_urb;
2105 struct urb *command_res_urb;
2106 struct mmc_host *mmc;
2107 char manufacturer[48];
2108 char product[32];
2109 char serial_number[32];
2110 usb_string(udev, udev->descriptor.iManufacturer, manufacturer,
2111 sizeof(manufacturer));
2112 usb_string(udev, udev->descriptor.iProduct, product, sizeof(product));
2113 usb_string(udev, udev->descriptor.iSerialNumber, serial_number,
2114 sizeof(serial_number));
2115 dev_info(&udev->dev, "probing VID:PID(%04X:%04X) %s %s %s\n",
2116 udev->descriptor.idVendor, udev->descriptor.idProduct,
2117 manufacturer, product, serial_number);
2118 command_out_urb = usb_alloc_urb(0, GFP_KERNEL);
2119 if (!command_out_urb) {
2120 retval = -ENOMEM;
2121 dev_err(&udev->dev, "not enough memory for command_out_urb\n");
2122 goto error0;
2123 }
2124 command_res_urb = usb_alloc_urb(0, GFP_KERNEL);
2125 if (!command_res_urb) {
2126 retval = -ENOMEM;
2127 dev_err(&udev->dev, "not enough memory for command_res_urb\n");
2128 goto error1;
2129 }
2130 /* this also allocates memory for our VUB300 mmc host device */
2131 mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev);
2132 if (!mmc) {
2133 retval = -ENOMEM;
2134 dev_err(&udev->dev, "not enough memory for the mmc_host\n");
2135 goto error4;
2136 }
2137 /* MMC core transfer sizes tunable parameters */
2138 mmc->caps = 0;
2139 if (!force_1_bit_data_xfers)
2140 mmc->caps |= MMC_CAP_4_BIT_DATA;
2141 if (!force_polling_for_irqs)
2142 mmc->caps |= MMC_CAP_SDIO_IRQ;
2143 mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2144 /*
2145 * MMC_CAP_NEEDS_POLL causes core.c:mmc_rescan() to poll
2146 * for devices which results in spurious CMD7's being
2147 * issued which stops some SDIO cards from working
2148 */
2149 if (limit_speed_to_24_MHz) {
2150 mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
2151 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
2152 mmc->f_max = 24000000;
2153 dev_info(&udev->dev, "limiting SDIO speed to 24_MHz\n");
2154 } else {
2155 mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
2156 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
2157 mmc->f_max = 48000000;
2158 }
2159 mmc->f_min = 200000;
2160 mmc->max_blk_count = 511;
2161 mmc->max_blk_size = 512;
2162 mmc->max_segs = 128;
2163 if (force_max_req_size)
2164 mmc->max_req_size = force_max_req_size * 1024;
2165 else
2166 mmc->max_req_size = 64 * 1024;
2167 mmc->max_seg_size = mmc->max_req_size;
2168 mmc->ocr_avail = 0;
2169 mmc->ocr_avail |= MMC_VDD_165_195;
2170 mmc->ocr_avail |= MMC_VDD_20_21;
2171 mmc->ocr_avail |= MMC_VDD_21_22;
2172 mmc->ocr_avail |= MMC_VDD_22_23;
2173 mmc->ocr_avail |= MMC_VDD_23_24;
2174 mmc->ocr_avail |= MMC_VDD_24_25;
2175 mmc->ocr_avail |= MMC_VDD_25_26;
2176 mmc->ocr_avail |= MMC_VDD_26_27;
2177 mmc->ocr_avail |= MMC_VDD_27_28;
2178 mmc->ocr_avail |= MMC_VDD_28_29;
2179 mmc->ocr_avail |= MMC_VDD_29_30;
2180 mmc->ocr_avail |= MMC_VDD_30_31;
2181 mmc->ocr_avail |= MMC_VDD_31_32;
2182 mmc->ocr_avail |= MMC_VDD_32_33;
2183 mmc->ocr_avail |= MMC_VDD_33_34;
2184 mmc->ocr_avail |= MMC_VDD_34_35;
2185 mmc->ocr_avail |= MMC_VDD_35_36;
2186 mmc->ops = &vub300_mmc_ops;
2187 vub300 = mmc_priv(mmc);
2188 vub300->mmc = mmc;
2189 vub300->card_powered = 0;
2190 vub300->bus_width = 0;
2191 vub300->cmnd.head.block_size[0] = 0x00;
2192 vub300->cmnd.head.block_size[1] = 0x00;
2193 vub300->app_spec = 0;
2194 mutex_init(&vub300->cmd_mutex);
2195 mutex_init(&vub300->irq_mutex);
2196 vub300->command_out_urb = command_out_urb;
2197 vub300->command_res_urb = command_res_urb;
2198 vub300->usb_timed_out = 0;
2199 vub300->dynamic_register_count = 0;
2200
2201 for (i = 0; i < ARRAY_SIZE(vub300->fn); i++) {
2202 vub300->fn[i].offload_point = 0;
2203 vub300->fn[i].offload_count = 0;
2204 }
2205
2206 vub300->total_offload_count = 0;
2207 vub300->irq_enabled = 0;
2208 vub300->irq_disabled = 0;
2209 vub300->irqs_queued = 0;
2210
2211 for (i = 0; i < ARRAY_SIZE(vub300->sdio_register); i++)
2212 vub300->sdio_register[i++].activate = 0;
2213
2214 vub300->udev = udev;
2215 vub300->interface = interface;
2216 vub300->cmnd_res_ep = 0;
2217 vub300->cmnd_out_ep = 0;
2218 vub300->data_inp_ep = 0;
2219 vub300->data_out_ep = 0;
2220
2221 for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++)
2222 vub300->fbs[i] = 512;
2223
2224 /*
2225 * set up the endpoint information
2226 *
2227 * use the first pair of bulk-in and bulk-out
2228 * endpoints for Command/Response+Interrupt
2229 *
2230 * use the second pair of bulk-in and bulk-out
2231 * endpoints for Data In/Out
2232 */
2233 vub300->large_usb_packets = 0;
2234 iface_desc = interface->cur_altsetting;
2235 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
2236 struct usb_endpoint_descriptor *endpoint =
2237 &iface_desc->endpoint[i].desc;
2238 dev_info(&vub300->udev->dev,
2239 "vub300 testing %s EndPoint(%d) %02X\n",
2240 usb_endpoint_is_bulk_in(endpoint) ? "BULK IN" :
2241 usb_endpoint_is_bulk_out(endpoint) ? "BULK OUT" :
2242 "UNKNOWN", i, endpoint->bEndpointAddress);
2243 if (endpoint->wMaxPacketSize > 64)
2244 vub300->large_usb_packets = 1;
2245 if (usb_endpoint_is_bulk_in(endpoint)) {
2246 if (!vub300->cmnd_res_ep) {
2247 vub300->cmnd_res_ep =
2248 endpoint->bEndpointAddress;
2249 } else if (!vub300->data_inp_ep) {
2250 vub300->data_inp_ep =
2251 endpoint->bEndpointAddress;
2252 } else {
2253 dev_warn(&vub300->udev->dev,
2254 "ignoring"
2255 " unexpected bulk_in endpoint");
2256 }
2257 } else if (usb_endpoint_is_bulk_out(endpoint)) {
2258 if (!vub300->cmnd_out_ep) {
2259 vub300->cmnd_out_ep =
2260 endpoint->bEndpointAddress;
2261 } else if (!vub300->data_out_ep) {
2262 vub300->data_out_ep =
2263 endpoint->bEndpointAddress;
2264 } else {
2265 dev_warn(&vub300->udev->dev,
2266 "ignoring"
2267 " unexpected bulk_out endpoint");
2268 }
2269 } else {
2270 dev_warn(&vub300->udev->dev,
2271 "vub300 ignoring EndPoint(%d) %02X", i,
2272 endpoint->bEndpointAddress);
2273 }
2274 }
2275 if (vub300->cmnd_res_ep && vub300->cmnd_out_ep &&
2276 vub300->data_inp_ep && vub300->data_out_ep) {
2277 dev_info(&vub300->udev->dev,
2278 "vub300 %s packets"
2279 " using EndPoints %02X %02X %02X %02X\n",
2280 vub300->large_usb_packets ? "LARGE" : "SMALL",
2281 vub300->cmnd_out_ep, vub300->cmnd_res_ep,
2282 vub300->data_out_ep, vub300->data_inp_ep);
2283 /* we have the expected EndPoints */
2284 } else {
2285 dev_err(&vub300->udev->dev,
2286 "Could not find two sets of bulk-in/out endpoint pairs\n");
2287 retval = -EINVAL;
2288 goto error5;
2289 }
2290 retval =
2291 usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
2292 GET_HC_INF0,
2293 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
2294 0x0000, 0x0000, &vub300->hc_info,
2295 sizeof(vub300->hc_info), HZ);
2296 if (retval < 0)
2297 goto error5;
2298 retval =
2299 usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
2300 SET_ROM_WAIT_STATES,
2301 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
2302 firmware_rom_wait_states, 0x0000, NULL, 0, HZ);
2303 if (retval < 0)
2304 goto error5;
2305 dev_info(&vub300->udev->dev,
2306 "operating_mode = %s %s %d MHz %s %d byte USB packets\n",
2307 (mmc->caps & MMC_CAP_SDIO_IRQ) ? "IRQs" : "POLL",
2308 (mmc->caps & MMC_CAP_4_BIT_DATA) ? "4-bit" : "1-bit",
2309 mmc->f_max / 1000000,
2310 pad_input_to_usb_pkt ? "padding input data to" : "with",
2311 vub300->large_usb_packets ? 512 : 64);
2312 retval =
2313 usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
2314 GET_SYSTEM_PORT_STATUS,
2315 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
2316 0x0000, 0x0000, &vub300->system_port_status,
2317 sizeof(vub300->system_port_status), HZ);
2318 if (retval < 0) {
2319 goto error4;
2320 } else if (sizeof(vub300->system_port_status) == retval) {
2321 vub300->card_present =
2322 (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0;
2323 vub300->read_only =
2324 (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
2325 } else {
2326 goto error4;
2327 }
2328 usb_set_intfdata(interface, vub300);
2329 INIT_DELAYED_WORK(&vub300->pollwork, vub300_pollwork_thread);
2330 INIT_WORK(&vub300->cmndwork, vub300_cmndwork_thread);
2331 INIT_WORK(&vub300->deadwork, vub300_deadwork_thread);
2332 kref_init(&vub300->kref);
2333 init_timer(&vub300->sg_transfer_timer);
2334 vub300->sg_transfer_timer.data = (unsigned long)vub300;
2335 vub300->sg_transfer_timer.function = vub300_sg_timed_out;
2336 kref_get(&vub300->kref);
2337 init_timer(&vub300->inactivity_timer);
2338 vub300->inactivity_timer.data = (unsigned long)vub300;
2339 vub300->inactivity_timer.function = vub300_inactivity_timer_expired;
2340 vub300->inactivity_timer.expires = jiffies + HZ;
2341 add_timer(&vub300->inactivity_timer);
2342 if (vub300->card_present)
2343 dev_info(&vub300->udev->dev,
2344 "USB vub300 remote SDIO host controller[%d]"
2345 "connected with SD/SDIO card inserted\n",
2346 interface_to_InterfaceNumber(interface));
2347 else
2348 dev_info(&vub300->udev->dev,
2349 "USB vub300 remote SDIO host controller[%d]"
2350 "connected with no SD/SDIO card inserted\n",
2351 interface_to_InterfaceNumber(interface));
2352 mmc_add_host(mmc);
2353 return 0;
2354error5:
2355 mmc_free_host(mmc);
2356 /*
2357 * and hence also frees vub300
2358 * which is contained at the end of struct mmc
2359 */
2360error4:
2361 usb_free_urb(command_out_urb);
2362error1:
2363 usb_free_urb(command_res_urb);
2364error0:
2365 return retval;
2366}
2367
2368static void vub300_disconnect(struct usb_interface *interface)
2369{ /* NOT irq */
2370 struct vub300_mmc_host *vub300 = usb_get_intfdata(interface);
2371 if (!vub300 || !vub300->mmc) {
2372 return;
2373 } else {
2374 struct mmc_host *mmc = vub300->mmc;
2375 if (!vub300->mmc) {
2376 return;
2377 } else {
2378 int ifnum = interface_to_InterfaceNumber(interface);
2379 usb_set_intfdata(interface, NULL);
2380 /* prevent more I/O from starting */
2381 vub300->interface = NULL;
2382 kref_put(&vub300->kref, vub300_delete);
2383 mmc_remove_host(mmc);
2384 pr_info("USB vub300 remote SDIO host controller[%d]"
2385 " now disconnected", ifnum);
2386 return;
2387 }
2388 }
2389}
2390
2391#ifdef CONFIG_PM
2392static int vub300_suspend(struct usb_interface *intf, pm_message_t message)
2393{
2394 struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
2395 if (!vub300 || !vub300->mmc) {
2396 return 0;
2397 } else {
2398 struct mmc_host *mmc = vub300->mmc;
2399 mmc_suspend_host(mmc);
2400 return 0;
2401 }
2402}
2403
2404static int vub300_resume(struct usb_interface *intf)
2405{
2406 struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
2407 if (!vub300 || !vub300->mmc) {
2408 return 0;
2409 } else {
2410 struct mmc_host *mmc = vub300->mmc;
2411 mmc_resume_host(mmc);
2412 return 0;
2413 }
2414}
2415#else
2416#define vub300_suspend NULL
2417#define vub300_resume NULL
2418#endif
2419static int vub300_pre_reset(struct usb_interface *intf)
2420{ /* NOT irq */
2421 struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
2422 mutex_lock(&vub300->cmd_mutex);
2423 return 0;
2424}
2425
2426static int vub300_post_reset(struct usb_interface *intf)
2427{ /* NOT irq */
2428 struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
2429 /* we are sure no URBs are active - no locking needed */
2430 vub300->errors = -EPIPE;
2431 mutex_unlock(&vub300->cmd_mutex);
2432 return 0;
2433}
2434
2435static struct usb_driver vub300_driver = {
2436 .name = "vub300",
2437 .probe = vub300_probe,
2438 .disconnect = vub300_disconnect,
2439 .suspend = vub300_suspend,
2440 .resume = vub300_resume,
2441 .pre_reset = vub300_pre_reset,
2442 .post_reset = vub300_post_reset,
2443 .id_table = vub300_table,
2444 .supports_autosuspend = 1,
2445};
2446
2447static int __init vub300_init(void)
2448{ /* NOT irq */
2449 int result;
2450
2451 pr_info("VUB300 Driver rom wait states = %02X irqpoll timeout = %04X",
2452 firmware_rom_wait_states, 0x0FFFF & firmware_irqpoll_timeout);
2453 cmndworkqueue = create_singlethread_workqueue("kvub300c");
2454 if (!cmndworkqueue) {
2455 pr_err("not enough memory for the REQUEST workqueue");
2456 result = -ENOMEM;
2457 goto out1;
2458 }
2459 pollworkqueue = create_singlethread_workqueue("kvub300p");
2460 if (!pollworkqueue) {
2461 pr_err("not enough memory for the IRQPOLL workqueue");
2462 result = -ENOMEM;
2463 goto out2;
2464 }
2465 deadworkqueue = create_singlethread_workqueue("kvub300d");
2466 if (!deadworkqueue) {
2467 pr_err("not enough memory for the EXPIRED workqueue");
2468 result = -ENOMEM;
2469 goto out3;
2470 }
2471 result = usb_register(&vub300_driver);
2472 if (result) {
2473 pr_err("usb_register failed. Error number %d", result);
2474 goto out4;
2475 }
2476 return 0;
2477out4:
2478 destroy_workqueue(deadworkqueue);
2479out3:
2480 destroy_workqueue(pollworkqueue);
2481out2:
2482 destroy_workqueue(cmndworkqueue);
2483out1:
2484 return result;
2485}
2486
2487static void __exit vub300_exit(void)
2488{
2489 usb_deregister(&vub300_driver);
2490 flush_workqueue(cmndworkqueue);
2491 flush_workqueue(pollworkqueue);
2492 flush_workqueue(deadworkqueue);
2493 destroy_workqueue(cmndworkqueue);
2494 destroy_workqueue(pollworkqueue);
2495 destroy_workqueue(deadworkqueue);
2496}
2497
2498module_init(vub300_init);
2499module_exit(vub300_exit);
2500
2501MODULE_AUTHOR("Tony Olech <tony.olech@elandigitalsystems.com>");
2502MODULE_DESCRIPTION("VUB300 USB to SD/MMC/SDIO adapter driver");
2503MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 0012f5d13d28..62e5a4d171e1 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -484,7 +484,7 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
484 484
485 /* 485 /*
486 * Check that we aren't being called after the 486 * Check that we aren't being called after the
487 * entire buffer has been transfered. 487 * entire buffer has been transferred.
488 */ 488 */
489 if (host->num_sg == 0) 489 if (host->num_sg == 0)
490 return; 490 return;
@@ -828,7 +828,7 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
828 /* 828 /*
829 * If this is a data transfer the request 829 * If this is a data transfer the request
830 * will be finished after the data has 830 * will be finished after the data has
831 * transfered. 831 * transferred.
832 */ 832 */
833 if (cmd->data && !cmd->error) { 833 if (cmd->data && !cmd->error) {
834 /* 834 /*
@@ -904,7 +904,7 @@ static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
904 setup &= ~WBSD_DAT3_H; 904 setup &= ~WBSD_DAT3_H;
905 905
906 /* 906 /*
907 * We cannot resume card detection immediatly 907 * We cannot resume card detection immediately
908 * because of capacitance and delays in the chip. 908 * because of capacitance and delays in the chip.
909 */ 909 */
910 mod_timer(&host->ignore_timer, jiffies + HZ / 100); 910 mod_timer(&host->ignore_timer, jiffies + HZ / 100);
@@ -1235,8 +1235,7 @@ static int __devinit wbsd_alloc_mmc(struct device *dev)
1235 * Maximum number of segments. Worst case is one sector per segment 1235 * Maximum number of segments. Worst case is one sector per segment
1236 * so this will be 64kB/512. 1236 * so this will be 64kB/512.
1237 */ 1237 */
1238 mmc->max_hw_segs = 128; 1238 mmc->max_segs = 128;
1239 mmc->max_phys_segs = 128;
1240 1239
1241 /* 1240 /*
1242 * Maximum request size. Also limited by 64KiB buffer. 1241 * Maximum request size. Also limited by 64KiB buffer.