aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 19:55:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 19:55:55 -0400
commit8c1c77ff9be27137fa7cbbf51efedef1a2ae915b (patch)
treecdbd09cac5f5d1c6eb5ec4257dc478c6acca70c5
parentf3ae1c75203535f65448517e46c8dd70a56b6c71 (diff)
parent08ee80cc397ac1a306ca689a22ede954d92d0db1 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc: (75 commits) mmc: core: eMMC bus width may not work on all platforms mmc: sdhci: Auto-CMD23 fixes. mmc: sdhci: Auto-CMD23 support. mmc: core: Block CMD23 support for UHS104/SDXC cards. mmc: sdhci: Implement MMC_CAP_CMD23 for SDHCI. mmc: core: Use CMD23 for multiblock transfers when we can. mmc: quirks: Add/remove quirks conditional support. mmc: Add new VUB300 USB-to-SD/SDIO/MMC driver mmc: sdhci-pxa: Add quirks for DMA/ADMA to match h/w mmc: core: duplicated trial with same freq in mmc_rescan_try_freq() mmc: core: add support for eMMC Dual Data Rate mmc: core: eMMC signal voltage does not use CMD11 mmc: sdhci-pxa: add platform code for UHS signaling mmc: sdhci: add hooks for setting UHS in platform specific code mmc: core: clear MMC_PM_KEEP_POWER flag on resume mmc: dw_mmc: fixed wrong regulator_enable in suspend/resume mmc: sdhi: allow powering down controller with no card inserted mmc: tmio: runtime suspend the controller, where possible mmc: sdhi: support up to 3 interrupt sources mmc: sdhi: print physical base address and clock rate ...
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/mmc/00-INDEX2
-rw-r--r--Documentation/mmc/mmc-dev-attrs.txt10
-rw-r--r--Documentation/mmc/mmc-dev-parts.txt27
-rw-r--r--MAINTAINERS7
-rw-r--r--arch/arm/mach-tegra/include/mach/sdhci.h1
-rw-r--r--drivers/mmc/card/block.c712
-rw-r--r--drivers/mmc/card/mmc_test.c116
-rw-r--r--drivers/mmc/card/queue.c8
-rw-r--r--drivers/mmc/core/bus.c11
-rw-r--r--drivers/mmc/core/core.c111
-rw-r--r--drivers/mmc/core/core.h7
-rw-r--r--drivers/mmc/core/host.c4
-rw-r--r--drivers/mmc/core/mmc.c186
-rw-r--r--drivers/mmc/core/mmc_ops.c80
-rw-r--r--drivers/mmc/core/mmc_ops.h1
-rw-r--r--drivers/mmc/core/quirks.c89
-rw-r--r--drivers/mmc/core/sd.c405
-rw-r--r--drivers/mmc/core/sd.h2
-rw-r--r--drivers/mmc/core/sd_ops.c51
-rw-r--r--drivers/mmc/core/sdio.c24
-rw-r--r--drivers/mmc/core/sdio_irq.c33
-rw-r--r--drivers/mmc/core/sdio_ops.c18
-rw-r--r--drivers/mmc/host/Kconfig33
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/dw_mmc.c6
-rw-r--r--drivers/mmc/host/sdhci-pci.c49
-rw-r--r--drivers/mmc/host/sdhci-pxa.c48
-rw-r--r--drivers/mmc/host/sdhci-tegra.c2
-rw-r--r--drivers/mmc/host/sdhci.c854
-rw-r--r--drivers/mmc/host/sdhci.h59
-rw-r--r--drivers/mmc/host/sh_mmcif.c126
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c50
-rw-r--r--drivers/mmc/host/tmio_mmc.c32
-rw-r--r--drivers/mmc/host/tmio_mmc.h16
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c21
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c184
-rw-r--r--drivers/mmc/host/vub300.c2506
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/mfd/tmio.h17
-rw-r--r--include/linux/mmc/Kbuild1
-rw-r--r--include/linux/mmc/card.h189
-rw-r--r--include/linux/mmc/core.h5
-rw-r--r--include/linux/mmc/host.h49
-rw-r--r--include/linux/mmc/ioctl.h54
-rw-r--r--include/linux/mmc/mmc.h18
-rw-r--r--include/linux/mmc/sd.h9
-rw-r--r--include/linux/mmc/sdhci.h15
-rw-r--r--include/linux/mmc/sh_mobile_sdhi.h4
49 files changed, 5699 insertions, 556 deletions
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 2d1ad12e2b3e..3a46e360496d 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -304,6 +304,7 @@ Code Seq#(hex) Include File Comments
3040xB0 all RATIO devices in development: 3040xB0 all RATIO devices in development:
305 <mailto:vgo@ratio.de> 305 <mailto:vgo@ratio.de>
3060xB1 00-1F PPPoX <mailto:mostrows@styx.uwaterloo.ca> 3060xB1 00-1F PPPoX <mailto:mostrows@styx.uwaterloo.ca>
3070xB3 00 linux/mmc/ioctl.h
3070xC0 00-0F linux/usb/iowarrior.h 3080xC0 00-0F linux/usb/iowarrior.h
3080xCB 00-1F CBM serial IEC bus in development: 3090xCB 00-1F CBM serial IEC bus in development:
309 <mailto:michael.klein@puffin.lb.shuttle.de> 310 <mailto:michael.klein@puffin.lb.shuttle.de>
diff --git a/Documentation/mmc/00-INDEX b/Documentation/mmc/00-INDEX
index fca586f5b853..93dd7a714075 100644
--- a/Documentation/mmc/00-INDEX
+++ b/Documentation/mmc/00-INDEX
@@ -2,3 +2,5 @@
2 - this file 2 - this file
3mmc-dev-attrs.txt 3mmc-dev-attrs.txt
4 - info on SD and MMC device attributes 4 - info on SD and MMC device attributes
5mmc-dev-parts.txt
6 - info on SD and MMC device partitions
diff --git a/Documentation/mmc/mmc-dev-attrs.txt b/Documentation/mmc/mmc-dev-attrs.txt
index ff2bd685bced..8898a95b41e5 100644
--- a/Documentation/mmc/mmc-dev-attrs.txt
+++ b/Documentation/mmc/mmc-dev-attrs.txt
@@ -1,3 +1,13 @@
1SD and MMC Block Device Attributes
2==================================
3
4These attributes are defined for the block devices associated with the
5SD or MMC device.
6
7The following attributes are read/write.
8
9 force_ro Enforce read-only access even if write protect switch is off.
10
1SD and MMC Device Attributes 11SD and MMC Device Attributes
2============================ 12============================
3 13
diff --git a/Documentation/mmc/mmc-dev-parts.txt b/Documentation/mmc/mmc-dev-parts.txt
new file mode 100644
index 000000000000..2db28b8e662f
--- /dev/null
+++ b/Documentation/mmc/mmc-dev-parts.txt
@@ -0,0 +1,27 @@
1SD and MMC Device Partitions
2============================
3
4Device partitions are additional logical block devices present on the
5SD/MMC device.
6
7As of this writing, MMC boot partitions as supported and exposed as
8/dev/mmcblkXboot0 and /dev/mmcblkXboot1, where X is the index of the
9parent /dev/mmcblkX.
10
11MMC Boot Partitions
12===================
13
14Read and write access is provided to the two MMC boot partitions. Due to
15the sensitive nature of the boot partition contents, which often store
16a bootloader or bootloader configuration tables crucial to booting the
17platform, write access is disabled by default to reduce the chance of
18accidental bricking.
19
20To enable write access to /dev/mmcblkXbootY, disable the forced read-only
21access with:
22
23echo 0 > /sys/block/mmcblkXbootY/force_ro
24
25To re-enable read-only access:
26
27echo 1 > /sys/block/mmcblkXbootY/force_ro
diff --git a/MAINTAINERS b/MAINTAINERS
index 43494463b57d..1ab17de642e5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6800,6 +6800,13 @@ L: lm-sensors@lm-sensors.org
6800S: Maintained 6800S: Maintained
6801F: drivers/hwmon/vt8231.c 6801F: drivers/hwmon/vt8231.c
6802 6802
6803VUB300 USB to SDIO/SD/MMC bridge chip
6804M: Tony Olech <tony.olech@elandigitalsystems.com>
6805L: linux-mmc@vger.kernel.org
6806L: linux-usb@vger.kernel.org
6807S: Supported
6808F: drivers/mmc/host/vub300.c
6809
6803W1 DALLAS'S 1-WIRE BUS 6810W1 DALLAS'S 1-WIRE BUS
6804M: Evgeniy Polyakov <johnpol@2ka.mipt.ru> 6811M: Evgeniy Polyakov <johnpol@2ka.mipt.ru>
6805S: Maintained 6812S: Maintained
diff --git a/arch/arm/mach-tegra/include/mach/sdhci.h b/arch/arm/mach-tegra/include/mach/sdhci.h
index 3ad086e859c3..4231bc7b8652 100644
--- a/arch/arm/mach-tegra/include/mach/sdhci.h
+++ b/arch/arm/mach-tegra/include/mach/sdhci.h
@@ -24,6 +24,7 @@ struct tegra_sdhci_platform_data {
24 int wp_gpio; 24 int wp_gpio;
25 int power_gpio; 25 int power_gpio;
26 int is_8bit; 26 int is_8bit;
27 int pm_flags;
27}; 28};
28 29
29#endif 30#endif
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 61d233a7c118..71da5641e258 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -31,7 +31,11 @@
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/scatterlist.h> 32#include <linux/scatterlist.h>
33#include <linux/string_helpers.h> 33#include <linux/string_helpers.h>
34#include <linux/delay.h>
35#include <linux/capability.h>
36#include <linux/compat.h>
34 37
38#include <linux/mmc/ioctl.h>
35#include <linux/mmc/card.h> 39#include <linux/mmc/card.h>
36#include <linux/mmc/host.h> 40#include <linux/mmc/host.h>
37#include <linux/mmc/mmc.h> 41#include <linux/mmc/mmc.h>
@@ -48,6 +52,13 @@ MODULE_ALIAS("mmc:block");
48#endif 52#endif
49#define MODULE_PARAM_PREFIX "mmcblk." 53#define MODULE_PARAM_PREFIX "mmcblk."
50 54
55#define INAND_CMD38_ARG_EXT_CSD 113
56#define INAND_CMD38_ARG_ERASE 0x00
57#define INAND_CMD38_ARG_TRIM 0x01
58#define INAND_CMD38_ARG_SECERASE 0x80
59#define INAND_CMD38_ARG_SECTRIM1 0x81
60#define INAND_CMD38_ARG_SECTRIM2 0x88
61
51static DEFINE_MUTEX(block_mutex); 62static DEFINE_MUTEX(block_mutex);
52 63
53/* 64/*
@@ -64,6 +75,7 @@ static int max_devices;
64 75
65/* 256 minors, so at most 256 separate devices */ 76/* 256 minors, so at most 256 separate devices */
66static DECLARE_BITMAP(dev_use, 256); 77static DECLARE_BITMAP(dev_use, 256);
78static DECLARE_BITMAP(name_use, 256);
67 79
68/* 80/*
69 * There is one mmc_blk_data per slot. 81 * There is one mmc_blk_data per slot.
@@ -72,9 +84,24 @@ struct mmc_blk_data {
72 spinlock_t lock; 84 spinlock_t lock;
73 struct gendisk *disk; 85 struct gendisk *disk;
74 struct mmc_queue queue; 86 struct mmc_queue queue;
87 struct list_head part;
88
89 unsigned int flags;
90#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
91#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
75 92
76 unsigned int usage; 93 unsigned int usage;
77 unsigned int read_only; 94 unsigned int read_only;
95 unsigned int part_type;
96 unsigned int name_idx;
97
98 /*
99 * Only set in main mmc_blk_data associated
100 * with mmc_card with mmc_set_drvdata, and keeps
101 * track of the current selected device partition.
102 */
103 unsigned int part_curr;
104 struct device_attribute force_ro;
78}; 105};
79 106
80static DEFINE_MUTEX(open_lock); 107static DEFINE_MUTEX(open_lock);
@@ -97,17 +124,22 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
97 return md; 124 return md;
98} 125}
99 126
127static inline int mmc_get_devidx(struct gendisk *disk)
128{
129 int devmaj = MAJOR(disk_devt(disk));
130 int devidx = MINOR(disk_devt(disk)) / perdev_minors;
131
132 if (!devmaj)
133 devidx = disk->first_minor / perdev_minors;
134 return devidx;
135}
136
100static void mmc_blk_put(struct mmc_blk_data *md) 137static void mmc_blk_put(struct mmc_blk_data *md)
101{ 138{
102 mutex_lock(&open_lock); 139 mutex_lock(&open_lock);
103 md->usage--; 140 md->usage--;
104 if (md->usage == 0) { 141 if (md->usage == 0) {
105 int devmaj = MAJOR(disk_devt(md->disk)); 142 int devidx = mmc_get_devidx(md->disk);
106 int devidx = MINOR(disk_devt(md->disk)) / perdev_minors;
107
108 if (!devmaj)
109 devidx = md->disk->first_minor / perdev_minors;
110
111 blk_cleanup_queue(md->queue.queue); 143 blk_cleanup_queue(md->queue.queue);
112 144
113 __clear_bit(devidx, dev_use); 145 __clear_bit(devidx, dev_use);
@@ -118,6 +150,38 @@ static void mmc_blk_put(struct mmc_blk_data *md)
118 mutex_unlock(&open_lock); 150 mutex_unlock(&open_lock);
119} 151}
120 152
153static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
154 char *buf)
155{
156 int ret;
157 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
158
159 ret = snprintf(buf, PAGE_SIZE, "%d",
160 get_disk_ro(dev_to_disk(dev)) ^
161 md->read_only);
162 mmc_blk_put(md);
163 return ret;
164}
165
166static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
167 const char *buf, size_t count)
168{
169 int ret;
170 char *end;
171 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
172 unsigned long set = simple_strtoul(buf, &end, 0);
173 if (end == buf) {
174 ret = -EINVAL;
175 goto out;
176 }
177
178 set_disk_ro(dev_to_disk(dev), set || md->read_only);
179 ret = count;
180out:
181 mmc_blk_put(md);
182 return ret;
183}
184
121static int mmc_blk_open(struct block_device *bdev, fmode_t mode) 185static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
122{ 186{
123 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); 187 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
@@ -158,35 +222,255 @@ mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
158 return 0; 222 return 0;
159} 223}
160 224
225struct mmc_blk_ioc_data {
226 struct mmc_ioc_cmd ic;
227 unsigned char *buf;
228 u64 buf_bytes;
229};
230
231static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
232 struct mmc_ioc_cmd __user *user)
233{
234 struct mmc_blk_ioc_data *idata;
235 int err;
236
237 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
238 if (!idata) {
239 err = -ENOMEM;
240 goto out;
241 }
242
243 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
244 err = -EFAULT;
245 goto idata_err;
246 }
247
248 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
249 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
250 err = -EOVERFLOW;
251 goto idata_err;
252 }
253
254 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
255 if (!idata->buf) {
256 err = -ENOMEM;
257 goto idata_err;
258 }
259
260 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
261 idata->ic.data_ptr, idata->buf_bytes)) {
262 err = -EFAULT;
263 goto copy_err;
264 }
265
266 return idata;
267
268copy_err:
269 kfree(idata->buf);
270idata_err:
271 kfree(idata);
272out:
273 return ERR_PTR(err);
274}
275
276static int mmc_blk_ioctl_cmd(struct block_device *bdev,
277 struct mmc_ioc_cmd __user *ic_ptr)
278{
279 struct mmc_blk_ioc_data *idata;
280 struct mmc_blk_data *md;
281 struct mmc_card *card;
282 struct mmc_command cmd = {0};
283 struct mmc_data data = {0};
284 struct mmc_request mrq = {0};
285 struct scatterlist sg;
286 int err;
287
288 /*
289 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
290 * whole block device, not on a partition. This prevents overspray
291 * between sibling partitions.
292 */
293 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
294 return -EPERM;
295
296 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
297 if (IS_ERR(idata))
298 return PTR_ERR(idata);
299
300 cmd.opcode = idata->ic.opcode;
301 cmd.arg = idata->ic.arg;
302 cmd.flags = idata->ic.flags;
303
304 data.sg = &sg;
305 data.sg_len = 1;
306 data.blksz = idata->ic.blksz;
307 data.blocks = idata->ic.blocks;
308
309 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
310
311 if (idata->ic.write_flag)
312 data.flags = MMC_DATA_WRITE;
313 else
314 data.flags = MMC_DATA_READ;
315
316 mrq.cmd = &cmd;
317 mrq.data = &data;
318
319 md = mmc_blk_get(bdev->bd_disk);
320 if (!md) {
321 err = -EINVAL;
322 goto cmd_done;
323 }
324
325 card = md->queue.card;
326 if (IS_ERR(card)) {
327 err = PTR_ERR(card);
328 goto cmd_done;
329 }
330
331 mmc_claim_host(card->host);
332
333 if (idata->ic.is_acmd) {
334 err = mmc_app_cmd(card->host, card);
335 if (err)
336 goto cmd_rel_host;
337 }
338
339 /* data.flags must already be set before doing this. */
340 mmc_set_data_timeout(&data, card);
341 /* Allow overriding the timeout_ns for empirical tuning. */
342 if (idata->ic.data_timeout_ns)
343 data.timeout_ns = idata->ic.data_timeout_ns;
344
345 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
346 /*
347 * Pretend this is a data transfer and rely on the host driver
348 * to compute timeout. When all host drivers support
349 * cmd.cmd_timeout for R1B, this can be changed to:
350 *
351 * mrq.data = NULL;
352 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
353 */
354 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
355 }
356
357 mmc_wait_for_req(card->host, &mrq);
358
359 if (cmd.error) {
360 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
361 __func__, cmd.error);
362 err = cmd.error;
363 goto cmd_rel_host;
364 }
365 if (data.error) {
366 dev_err(mmc_dev(card->host), "%s: data error %d\n",
367 __func__, data.error);
368 err = data.error;
369 goto cmd_rel_host;
370 }
371
372 /*
373 * According to the SD specs, some commands require a delay after
374 * issuing the command.
375 */
376 if (idata->ic.postsleep_min_us)
377 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
378
379 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
380 err = -EFAULT;
381 goto cmd_rel_host;
382 }
383
384 if (!idata->ic.write_flag) {
385 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
386 idata->buf, idata->buf_bytes)) {
387 err = -EFAULT;
388 goto cmd_rel_host;
389 }
390 }
391
392cmd_rel_host:
393 mmc_release_host(card->host);
394
395cmd_done:
396 mmc_blk_put(md);
397 kfree(idata->buf);
398 kfree(idata);
399 return err;
400}
401
402static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
403 unsigned int cmd, unsigned long arg)
404{
405 int ret = -EINVAL;
406 if (cmd == MMC_IOC_CMD)
407 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
408 return ret;
409}
410
411#ifdef CONFIG_COMPAT
412static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
413 unsigned int cmd, unsigned long arg)
414{
415 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
416}
417#endif
418
161static const struct block_device_operations mmc_bdops = { 419static const struct block_device_operations mmc_bdops = {
162 .open = mmc_blk_open, 420 .open = mmc_blk_open,
163 .release = mmc_blk_release, 421 .release = mmc_blk_release,
164 .getgeo = mmc_blk_getgeo, 422 .getgeo = mmc_blk_getgeo,
165 .owner = THIS_MODULE, 423 .owner = THIS_MODULE,
424 .ioctl = mmc_blk_ioctl,
425#ifdef CONFIG_COMPAT
426 .compat_ioctl = mmc_blk_compat_ioctl,
427#endif
166}; 428};
167 429
168struct mmc_blk_request { 430struct mmc_blk_request {
169 struct mmc_request mrq; 431 struct mmc_request mrq;
432 struct mmc_command sbc;
170 struct mmc_command cmd; 433 struct mmc_command cmd;
171 struct mmc_command stop; 434 struct mmc_command stop;
172 struct mmc_data data; 435 struct mmc_data data;
173}; 436};
174 437
438static inline int mmc_blk_part_switch(struct mmc_card *card,
439 struct mmc_blk_data *md)
440{
441 int ret;
442 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
443 if (main_md->part_curr == md->part_type)
444 return 0;
445
446 if (mmc_card_mmc(card)) {
447 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
448 card->ext_csd.part_config |= md->part_type;
449
450 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
451 EXT_CSD_PART_CONFIG, card->ext_csd.part_config,
452 card->ext_csd.part_time);
453 if (ret)
454 return ret;
455}
456
457 main_md->part_curr = md->part_type;
458 return 0;
459}
460
175static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) 461static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
176{ 462{
177 int err; 463 int err;
178 u32 result; 464 u32 result;
179 __be32 *blocks; 465 __be32 *blocks;
180 466
181 struct mmc_request mrq; 467 struct mmc_request mrq = {0};
182 struct mmc_command cmd; 468 struct mmc_command cmd = {0};
183 struct mmc_data data; 469 struct mmc_data data = {0};
184 unsigned int timeout_us; 470 unsigned int timeout_us;
185 471
186 struct scatterlist sg; 472 struct scatterlist sg;
187 473
188 memset(&cmd, 0, sizeof(struct mmc_command));
189
190 cmd.opcode = MMC_APP_CMD; 474 cmd.opcode = MMC_APP_CMD;
191 cmd.arg = card->rca << 16; 475 cmd.arg = card->rca << 16;
192 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 476 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
@@ -203,8 +487,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
203 cmd.arg = 0; 487 cmd.arg = 0;
204 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 488 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
205 489
206 memset(&data, 0, sizeof(struct mmc_data));
207
208 data.timeout_ns = card->csd.tacc_ns * 100; 490 data.timeout_ns = card->csd.tacc_ns * 100;
209 data.timeout_clks = card->csd.tacc_clks * 100; 491 data.timeout_clks = card->csd.tacc_clks * 100;
210 492
@@ -223,8 +505,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
223 data.sg = &sg; 505 data.sg = &sg;
224 data.sg_len = 1; 506 data.sg_len = 1;
225 507
226 memset(&mrq, 0, sizeof(struct mmc_request));
227
228 mrq.cmd = &cmd; 508 mrq.cmd = &cmd;
229 mrq.data = &data; 509 mrq.data = &data;
230 510
@@ -247,10 +527,9 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
247 527
248static u32 get_card_status(struct mmc_card *card, struct request *req) 528static u32 get_card_status(struct mmc_card *card, struct request *req)
249{ 529{
250 struct mmc_command cmd; 530 struct mmc_command cmd = {0};
251 int err; 531 int err;
252 532
253 memset(&cmd, 0, sizeof(struct mmc_command));
254 cmd.opcode = MMC_SEND_STATUS; 533 cmd.opcode = MMC_SEND_STATUS;
255 if (!mmc_host_is_spi(card->host)) 534 if (!mmc_host_is_spi(card->host))
256 cmd.arg = card->rca << 16; 535 cmd.arg = card->rca << 16;
@@ -269,8 +548,6 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
269 unsigned int from, nr, arg; 548 unsigned int from, nr, arg;
270 int err = 0; 549 int err = 0;
271 550
272 mmc_claim_host(card->host);
273
274 if (!mmc_can_erase(card)) { 551 if (!mmc_can_erase(card)) {
275 err = -EOPNOTSUPP; 552 err = -EOPNOTSUPP;
276 goto out; 553 goto out;
@@ -284,14 +561,22 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
284 else 561 else
285 arg = MMC_ERASE_ARG; 562 arg = MMC_ERASE_ARG;
286 563
564 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
565 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
566 INAND_CMD38_ARG_EXT_CSD,
567 arg == MMC_TRIM_ARG ?
568 INAND_CMD38_ARG_TRIM :
569 INAND_CMD38_ARG_ERASE,
570 0);
571 if (err)
572 goto out;
573 }
287 err = mmc_erase(card, from, nr, arg); 574 err = mmc_erase(card, from, nr, arg);
288out: 575out:
289 spin_lock_irq(&md->lock); 576 spin_lock_irq(&md->lock);
290 __blk_end_request(req, err, blk_rq_bytes(req)); 577 __blk_end_request(req, err, blk_rq_bytes(req));
291 spin_unlock_irq(&md->lock); 578 spin_unlock_irq(&md->lock);
292 579
293 mmc_release_host(card->host);
294
295 return err ? 0 : 1; 580 return err ? 0 : 1;
296} 581}
297 582
@@ -303,8 +588,6 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
303 unsigned int from, nr, arg; 588 unsigned int from, nr, arg;
304 int err = 0; 589 int err = 0;
305 590
306 mmc_claim_host(card->host);
307
308 if (!mmc_can_secure_erase_trim(card)) { 591 if (!mmc_can_secure_erase_trim(card)) {
309 err = -EOPNOTSUPP; 592 err = -EOPNOTSUPP;
310 goto out; 593 goto out;
@@ -318,19 +601,74 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
318 else 601 else
319 arg = MMC_SECURE_ERASE_ARG; 602 arg = MMC_SECURE_ERASE_ARG;
320 603
604 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
605 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
606 INAND_CMD38_ARG_EXT_CSD,
607 arg == MMC_SECURE_TRIM1_ARG ?
608 INAND_CMD38_ARG_SECTRIM1 :
609 INAND_CMD38_ARG_SECERASE,
610 0);
611 if (err)
612 goto out;
613 }
321 err = mmc_erase(card, from, nr, arg); 614 err = mmc_erase(card, from, nr, arg);
322 if (!err && arg == MMC_SECURE_TRIM1_ARG) 615 if (!err && arg == MMC_SECURE_TRIM1_ARG) {
616 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
617 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
618 INAND_CMD38_ARG_EXT_CSD,
619 INAND_CMD38_ARG_SECTRIM2,
620 0);
621 if (err)
622 goto out;
623 }
323 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); 624 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
625 }
324out: 626out:
325 spin_lock_irq(&md->lock); 627 spin_lock_irq(&md->lock);
326 __blk_end_request(req, err, blk_rq_bytes(req)); 628 __blk_end_request(req, err, blk_rq_bytes(req));
327 spin_unlock_irq(&md->lock); 629 spin_unlock_irq(&md->lock);
328 630
329 mmc_release_host(card->host);
330
331 return err ? 0 : 1; 631 return err ? 0 : 1;
332} 632}
333 633
634static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
635{
636 struct mmc_blk_data *md = mq->data;
637
638 /*
639 * No-op, only service this because we need REQ_FUA for reliable
640 * writes.
641 */
642 spin_lock_irq(&md->lock);
643 __blk_end_request_all(req, 0);
644 spin_unlock_irq(&md->lock);
645
646 return 1;
647}
648
649/*
650 * Reformat current write as a reliable write, supporting
651 * both legacy and the enhanced reliable write MMC cards.
652 * In each transfer we'll handle only as much as a single
653 * reliable write can handle, thus finish the request in
654 * partial completions.
655 */
656static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
657 struct mmc_card *card,
658 struct request *req)
659{
660 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
661 /* Legacy mode imposes restrictions on transfers. */
662 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
663 brq->data.blocks = 1;
664
665 if (brq->data.blocks > card->ext_csd.rel_sectors)
666 brq->data.blocks = card->ext_csd.rel_sectors;
667 else if (brq->data.blocks < card->ext_csd.rel_sectors)
668 brq->data.blocks = 1;
669 }
670}
671
334static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) 672static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
335{ 673{
336 struct mmc_blk_data *md = mq->data; 674 struct mmc_blk_data *md = mq->data;
@@ -338,10 +676,17 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
338 struct mmc_blk_request brq; 676 struct mmc_blk_request brq;
339 int ret = 1, disable_multi = 0; 677 int ret = 1, disable_multi = 0;
340 678
341 mmc_claim_host(card->host); 679 /*
680 * Reliable writes are used to implement Forced Unit Access and
681 * REQ_META accesses, and are supported only on MMCs.
682 */
683 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
684 (req->cmd_flags & REQ_META)) &&
685 (rq_data_dir(req) == WRITE) &&
686 (md->flags & MMC_BLK_REL_WR);
342 687
343 do { 688 do {
344 struct mmc_command cmd; 689 struct mmc_command cmd = {0};
345 u32 readcmd, writecmd, status = 0; 690 u32 readcmd, writecmd, status = 0;
346 691
347 memset(&brq, 0, sizeof(struct mmc_blk_request)); 692 memset(&brq, 0, sizeof(struct mmc_blk_request));
@@ -374,12 +719,12 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
374 if (disable_multi && brq.data.blocks > 1) 719 if (disable_multi && brq.data.blocks > 1)
375 brq.data.blocks = 1; 720 brq.data.blocks = 1;
376 721
377 if (brq.data.blocks > 1) { 722 if (brq.data.blocks > 1 || do_rel_wr) {
378 /* SPI multiblock writes terminate using a special 723 /* SPI multiblock writes terminate using a special
379 * token, not a STOP_TRANSMISSION request. 724 * token, not a STOP_TRANSMISSION request.
380 */ 725 */
381 if (!mmc_host_is_spi(card->host) 726 if (!mmc_host_is_spi(card->host) ||
382 || rq_data_dir(req) == READ) 727 rq_data_dir(req) == READ)
383 brq.mrq.stop = &brq.stop; 728 brq.mrq.stop = &brq.stop;
384 readcmd = MMC_READ_MULTIPLE_BLOCK; 729 readcmd = MMC_READ_MULTIPLE_BLOCK;
385 writecmd = MMC_WRITE_MULTIPLE_BLOCK; 730 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
@@ -396,6 +741,38 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
396 brq.data.flags |= MMC_DATA_WRITE; 741 brq.data.flags |= MMC_DATA_WRITE;
397 } 742 }
398 743
744 if (do_rel_wr)
745 mmc_apply_rel_rw(&brq, card, req);
746
747 /*
748 * Pre-defined multi-block transfers are preferable to
749 * open ended-ones (and necessary for reliable writes).
750 * However, it is not sufficient to just send CMD23,
751 * and avoid the final CMD12, as on an error condition
752 * CMD12 (stop) needs to be sent anyway. This, coupled
753 * with Auto-CMD23 enhancements provided by some
754 * hosts, means that the complexity of dealing
755 * with this is best left to the host. If CMD23 is
756 * supported by card and host, we'll fill sbc in and let
757 * the host deal with handling it correctly. This means
758 * that for hosts that don't expose MMC_CAP_CMD23, no
759 * change of behavior will be observed.
760 *
761 * N.B: Some MMC cards experience perf degradation.
762 * We'll avoid using CMD23-bounded multiblock writes for
763 * these, while retaining features like reliable writes.
764 */
765
766 if ((md->flags & MMC_BLK_CMD23) &&
767 mmc_op_multi(brq.cmd.opcode) &&
768 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
769 brq.sbc.opcode = MMC_SET_BLOCK_COUNT;
770 brq.sbc.arg = brq.data.blocks |
771 (do_rel_wr ? (1 << 31) : 0);
772 brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
773 brq.mrq.sbc = &brq.sbc;
774 }
775
399 mmc_set_data_timeout(&brq.data, card); 776 mmc_set_data_timeout(&brq.data, card);
400 777
401 brq.data.sg = mq->sg; 778 brq.data.sg = mq->sg;
@@ -431,7 +808,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
431 * until later as we need to wait for the card to leave 808 * until later as we need to wait for the card to leave
432 * programming mode even when things go wrong. 809 * programming mode even when things go wrong.
433 */ 810 */
434 if (brq.cmd.error || brq.data.error || brq.stop.error) { 811 if (brq.sbc.error || brq.cmd.error ||
812 brq.data.error || brq.stop.error) {
435 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) { 813 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
436 /* Redo read one sector at a time */ 814 /* Redo read one sector at a time */
437 printk(KERN_WARNING "%s: retrying using single " 815 printk(KERN_WARNING "%s: retrying using single "
@@ -442,6 +820,13 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
442 status = get_card_status(card, req); 820 status = get_card_status(card, req);
443 } 821 }
444 822
823 if (brq.sbc.error) {
824 printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT "
825 "command, response %#x, card status %#x\n",
826 req->rq_disk->disk_name, brq.sbc.error,
827 brq.sbc.resp[0], status);
828 }
829
445 if (brq.cmd.error) { 830 if (brq.cmd.error) {
446 printk(KERN_ERR "%s: error %d sending read/write " 831 printk(KERN_ERR "%s: error %d sending read/write "
447 "command, response %#x, card status %#x\n", 832 "command, response %#x, card status %#x\n",
@@ -520,8 +905,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
520 spin_unlock_irq(&md->lock); 905 spin_unlock_irq(&md->lock);
521 } while (ret); 906 } while (ret);
522 907
523 mmc_release_host(card->host);
524
525 return 1; 908 return 1;
526 909
527 cmd_err: 910 cmd_err:
@@ -548,8 +931,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
548 spin_unlock_irq(&md->lock); 931 spin_unlock_irq(&md->lock);
549 } 932 }
550 933
551 mmc_release_host(card->host);
552
553 spin_lock_irq(&md->lock); 934 spin_lock_irq(&md->lock);
554 while (ret) 935 while (ret)
555 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); 936 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
@@ -560,14 +941,31 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
560 941
561static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) 942static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
562{ 943{
944 int ret;
945 struct mmc_blk_data *md = mq->data;
946 struct mmc_card *card = md->queue.card;
947
948 mmc_claim_host(card->host);
949 ret = mmc_blk_part_switch(card, md);
950 if (ret) {
951 ret = 0;
952 goto out;
953 }
954
563 if (req->cmd_flags & REQ_DISCARD) { 955 if (req->cmd_flags & REQ_DISCARD) {
564 if (req->cmd_flags & REQ_SECURE) 956 if (req->cmd_flags & REQ_SECURE)
565 return mmc_blk_issue_secdiscard_rq(mq, req); 957 ret = mmc_blk_issue_secdiscard_rq(mq, req);
566 else 958 else
567 return mmc_blk_issue_discard_rq(mq, req); 959 ret = mmc_blk_issue_discard_rq(mq, req);
960 } else if (req->cmd_flags & REQ_FLUSH) {
961 ret = mmc_blk_issue_flush(mq, req);
568 } else { 962 } else {
569 return mmc_blk_issue_rw_rq(mq, req); 963 ret = mmc_blk_issue_rw_rq(mq, req);
570 } 964 }
965
966out:
967 mmc_release_host(card->host);
968 return ret;
571} 969}
572 970
573static inline int mmc_blk_readonly(struct mmc_card *card) 971static inline int mmc_blk_readonly(struct mmc_card *card)
@@ -576,7 +974,11 @@ static inline int mmc_blk_readonly(struct mmc_card *card)
576 !(card->csd.cmdclass & CCC_BLOCK_WRITE); 974 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
577} 975}
578 976
579static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) 977static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
978 struct device *parent,
979 sector_t size,
980 bool default_ro,
981 const char *subname)
580{ 982{
581 struct mmc_blk_data *md; 983 struct mmc_blk_data *md;
582 int devidx, ret; 984 int devidx, ret;
@@ -592,6 +994,19 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
592 goto out; 994 goto out;
593 } 995 }
594 996
997 /*
998 * !subname implies we are creating main mmc_blk_data that will be
999 * associated with mmc_card with mmc_set_drvdata. Due to device
1000 * partitions, devidx will not coincide with a per-physical card
1001 * index anymore so we keep track of a name index.
1002 */
1003 if (!subname) {
1004 md->name_idx = find_first_zero_bit(name_use, max_devices);
1005 __set_bit(md->name_idx, name_use);
1006 }
1007 else
1008 md->name_idx = ((struct mmc_blk_data *)
1009 dev_to_disk(parent)->private_data)->name_idx;
595 1010
596 /* 1011 /*
597 * Set the read-only status based on the supported commands 1012 * Set the read-only status based on the supported commands
@@ -606,6 +1021,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
606 } 1021 }
607 1022
608 spin_lock_init(&md->lock); 1023 spin_lock_init(&md->lock);
1024 INIT_LIST_HEAD(&md->part);
609 md->usage = 1; 1025 md->usage = 1;
610 1026
611 ret = mmc_init_queue(&md->queue, card, &md->lock); 1027 ret = mmc_init_queue(&md->queue, card, &md->lock);
@@ -620,8 +1036,8 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
620 md->disk->fops = &mmc_bdops; 1036 md->disk->fops = &mmc_bdops;
621 md->disk->private_data = md; 1037 md->disk->private_data = md;
622 md->disk->queue = md->queue.queue; 1038 md->disk->queue = md->queue.queue;
623 md->disk->driverfs_dev = &card->dev; 1039 md->disk->driverfs_dev = parent;
624 set_disk_ro(md->disk, md->read_only); 1040 set_disk_ro(md->disk, md->read_only || default_ro);
625 1041
626 /* 1042 /*
627 * As discussed on lkml, GENHD_FL_REMOVABLE should: 1043 * As discussed on lkml, GENHD_FL_REMOVABLE should:
@@ -636,32 +1052,107 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
636 */ 1052 */
637 1053
638 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), 1054 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
639 "mmcblk%d", devidx); 1055 "mmcblk%d%s", md->name_idx, subname ? subname : "");
640 1056
641 blk_queue_logical_block_size(md->queue.queue, 512); 1057 blk_queue_logical_block_size(md->queue.queue, 512);
1058 set_capacity(md->disk, size);
1059
1060 if (mmc_host_cmd23(card->host)) {
1061 if (mmc_card_mmc(card) ||
1062 (mmc_card_sd(card) &&
1063 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
1064 md->flags |= MMC_BLK_CMD23;
1065 }
1066
1067 if (mmc_card_mmc(card) &&
1068 md->flags & MMC_BLK_CMD23 &&
1069 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
1070 card->ext_csd.rel_sectors)) {
1071 md->flags |= MMC_BLK_REL_WR;
1072 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
1073 }
1074
1075 return md;
1076
1077 err_putdisk:
1078 put_disk(md->disk);
1079 err_kfree:
1080 kfree(md);
1081 out:
1082 return ERR_PTR(ret);
1083}
1084
1085static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
1086{
1087 sector_t size;
1088 struct mmc_blk_data *md;
642 1089
643 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 1090 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
644 /* 1091 /*
645 * The EXT_CSD sector count is in number or 512 byte 1092 * The EXT_CSD sector count is in number or 512 byte
646 * sectors. 1093 * sectors.
647 */ 1094 */
648 set_capacity(md->disk, card->ext_csd.sectors); 1095 size = card->ext_csd.sectors;
649 } else { 1096 } else {
650 /* 1097 /*
651 * The CSD capacity field is in units of read_blkbits. 1098 * The CSD capacity field is in units of read_blkbits.
652 * set_capacity takes units of 512 bytes. 1099 * set_capacity takes units of 512 bytes.
653 */ 1100 */
654 set_capacity(md->disk, 1101 size = card->csd.capacity << (card->csd.read_blkbits - 9);
655 card->csd.capacity << (card->csd.read_blkbits - 9));
656 } 1102 }
1103
1104 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL);
657 return md; 1105 return md;
1106}
658 1107
659 err_putdisk: 1108static int mmc_blk_alloc_part(struct mmc_card *card,
660 put_disk(md->disk); 1109 struct mmc_blk_data *md,
661 err_kfree: 1110 unsigned int part_type,
662 kfree(md); 1111 sector_t size,
663 out: 1112 bool default_ro,
664 return ERR_PTR(ret); 1113 const char *subname)
1114{
1115 char cap_str[10];
1116 struct mmc_blk_data *part_md;
1117
1118 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
1119 subname);
1120 if (IS_ERR(part_md))
1121 return PTR_ERR(part_md);
1122 part_md->part_type = part_type;
1123 list_add(&part_md->part, &md->part);
1124
1125 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
1126 cap_str, sizeof(cap_str));
1127 printk(KERN_INFO "%s: %s %s partition %u %s\n",
1128 part_md->disk->disk_name, mmc_card_id(card),
1129 mmc_card_name(card), part_md->part_type, cap_str);
1130 return 0;
1131}
1132
1133static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
1134{
1135 int ret = 0;
1136
1137 if (!mmc_card_mmc(card))
1138 return 0;
1139
1140 if (card->ext_csd.boot_size) {
1141 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0,
1142 card->ext_csd.boot_size >> 9,
1143 true,
1144 "boot0");
1145 if (ret)
1146 return ret;
1147 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1,
1148 card->ext_csd.boot_size >> 9,
1149 true,
1150 "boot1");
1151 if (ret)
1152 return ret;
1153 }
1154
1155 return ret;
665} 1156}
666 1157
667static int 1158static int
@@ -682,9 +1173,81 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
682 return 0; 1173 return 0;
683} 1174}
684 1175
1176static void mmc_blk_remove_req(struct mmc_blk_data *md)
1177{
1178 if (md) {
1179 if (md->disk->flags & GENHD_FL_UP) {
1180 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
1181
1182 /* Stop new requests from getting into the queue */
1183 del_gendisk(md->disk);
1184 }
1185
1186 /* Then flush out any already in there */
1187 mmc_cleanup_queue(&md->queue);
1188 mmc_blk_put(md);
1189 }
1190}
1191
1192static void mmc_blk_remove_parts(struct mmc_card *card,
1193 struct mmc_blk_data *md)
1194{
1195 struct list_head *pos, *q;
1196 struct mmc_blk_data *part_md;
1197
1198 __clear_bit(md->name_idx, name_use);
1199 list_for_each_safe(pos, q, &md->part) {
1200 part_md = list_entry(pos, struct mmc_blk_data, part);
1201 list_del(pos);
1202 mmc_blk_remove_req(part_md);
1203 }
1204}
1205
1206static int mmc_add_disk(struct mmc_blk_data *md)
1207{
1208 int ret;
1209
1210 add_disk(md->disk);
1211 md->force_ro.show = force_ro_show;
1212 md->force_ro.store = force_ro_store;
1213 sysfs_attr_init(&md->force_ro.attr);
1214 md->force_ro.attr.name = "force_ro";
1215 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
1216 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
1217 if (ret)
1218 del_gendisk(md->disk);
1219
1220 return ret;
1221}
1222
1223static const struct mmc_fixup blk_fixups[] =
1224{
1225 MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1226 MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1227 MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1228 MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1229 MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1230
1231 /*
1232 * Some MMC cards experience performance degradation with CMD23
1233 * instead of CMD12-bounded multiblock transfers. For now we'll
1234 * black list what's bad...
1235 * - Certain Toshiba cards.
1236 *
1237 * N.B. This doesn't affect SD cards.
1238 */
1239 MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1240 MMC_QUIRK_BLK_NO_CMD23),
1241 MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1242 MMC_QUIRK_BLK_NO_CMD23),
1243 MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1244 MMC_QUIRK_BLK_NO_CMD23),
1245 END_FIXUP
1246};
1247
685static int mmc_blk_probe(struct mmc_card *card) 1248static int mmc_blk_probe(struct mmc_card *card)
686{ 1249{
687 struct mmc_blk_data *md; 1250 struct mmc_blk_data *md, *part_md;
688 int err; 1251 int err;
689 char cap_str[10]; 1252 char cap_str[10];
690 1253
@@ -708,14 +1271,24 @@ static int mmc_blk_probe(struct mmc_card *card)
708 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 1271 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
709 cap_str, md->read_only ? "(ro)" : ""); 1272 cap_str, md->read_only ? "(ro)" : "");
710 1273
1274 if (mmc_blk_alloc_parts(card, md))
1275 goto out;
1276
711 mmc_set_drvdata(card, md); 1277 mmc_set_drvdata(card, md);
712 add_disk(md->disk); 1278 mmc_fixup_device(card, blk_fixups);
1279
1280 if (mmc_add_disk(md))
1281 goto out;
1282
1283 list_for_each_entry(part_md, &md->part, part) {
1284 if (mmc_add_disk(part_md))
1285 goto out;
1286 }
713 return 0; 1287 return 0;
714 1288
715 out: 1289 out:
716 mmc_cleanup_queue(&md->queue); 1290 mmc_blk_remove_parts(card, md);
717 mmc_blk_put(md); 1291 mmc_blk_remove_req(md);
718
719 return err; 1292 return err;
720} 1293}
721 1294
@@ -723,36 +1296,43 @@ static void mmc_blk_remove(struct mmc_card *card)
723{ 1296{
724 struct mmc_blk_data *md = mmc_get_drvdata(card); 1297 struct mmc_blk_data *md = mmc_get_drvdata(card);
725 1298
726 if (md) { 1299 mmc_blk_remove_parts(card, md);
727 /* Stop new requests from getting into the queue */ 1300 mmc_blk_remove_req(md);
728 del_gendisk(md->disk);
729
730 /* Then flush out any already in there */
731 mmc_cleanup_queue(&md->queue);
732
733 mmc_blk_put(md);
734 }
735 mmc_set_drvdata(card, NULL); 1301 mmc_set_drvdata(card, NULL);
736} 1302}
737 1303
738#ifdef CONFIG_PM 1304#ifdef CONFIG_PM
739static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state) 1305static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
740{ 1306{
1307 struct mmc_blk_data *part_md;
741 struct mmc_blk_data *md = mmc_get_drvdata(card); 1308 struct mmc_blk_data *md = mmc_get_drvdata(card);
742 1309
743 if (md) { 1310 if (md) {
744 mmc_queue_suspend(&md->queue); 1311 mmc_queue_suspend(&md->queue);
1312 list_for_each_entry(part_md, &md->part, part) {
1313 mmc_queue_suspend(&part_md->queue);
1314 }
745 } 1315 }
746 return 0; 1316 return 0;
747} 1317}
748 1318
749static int mmc_blk_resume(struct mmc_card *card) 1319static int mmc_blk_resume(struct mmc_card *card)
750{ 1320{
1321 struct mmc_blk_data *part_md;
751 struct mmc_blk_data *md = mmc_get_drvdata(card); 1322 struct mmc_blk_data *md = mmc_get_drvdata(card);
752 1323
753 if (md) { 1324 if (md) {
754 mmc_blk_set_blksize(md, card); 1325 mmc_blk_set_blksize(md, card);
1326
1327 /*
1328 * Resume involves the card going into idle state,
1329 * so current partition is always the main one.
1330 */
1331 md->part_curr = md->part_type;
755 mmc_queue_resume(&md->queue); 1332 mmc_queue_resume(&md->queue);
1333 list_for_each_entry(part_md, &md->part, part) {
1334 mmc_queue_resume(&part_md->queue);
1335 }
756 } 1336 }
757 return 0; 1337 return 0;
758} 1338}
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index abc1a63bcc5e..233cdfae92f4 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -212,7 +212,7 @@ static int mmc_test_busy(struct mmc_command *cmd)
212static int mmc_test_wait_busy(struct mmc_test_card *test) 212static int mmc_test_wait_busy(struct mmc_test_card *test)
213{ 213{
214 int ret, busy; 214 int ret, busy;
215 struct mmc_command cmd; 215 struct mmc_command cmd = {0};
216 216
217 busy = 0; 217 busy = 0;
218 do { 218 do {
@@ -246,18 +246,13 @@ static int mmc_test_buffer_transfer(struct mmc_test_card *test,
246{ 246{
247 int ret; 247 int ret;
248 248
249 struct mmc_request mrq; 249 struct mmc_request mrq = {0};
250 struct mmc_command cmd; 250 struct mmc_command cmd = {0};
251 struct mmc_command stop; 251 struct mmc_command stop = {0};
252 struct mmc_data data; 252 struct mmc_data data = {0};
253 253
254 struct scatterlist sg; 254 struct scatterlist sg;
255 255
256 memset(&mrq, 0, sizeof(struct mmc_request));
257 memset(&cmd, 0, sizeof(struct mmc_command));
258 memset(&data, 0, sizeof(struct mmc_data));
259 memset(&stop, 0, sizeof(struct mmc_command));
260
261 mrq.cmd = &cmd; 256 mrq.cmd = &cmd;
262 mrq.data = &data; 257 mrq.data = &data;
263 mrq.stop = &stop; 258 mrq.stop = &stop;
@@ -731,15 +726,10 @@ static int mmc_test_simple_transfer(struct mmc_test_card *test,
731 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 726 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
732 unsigned blocks, unsigned blksz, int write) 727 unsigned blocks, unsigned blksz, int write)
733{ 728{
734 struct mmc_request mrq; 729 struct mmc_request mrq = {0};
735 struct mmc_command cmd; 730 struct mmc_command cmd = {0};
736 struct mmc_command stop; 731 struct mmc_command stop = {0};
737 struct mmc_data data; 732 struct mmc_data data = {0};
738
739 memset(&mrq, 0, sizeof(struct mmc_request));
740 memset(&cmd, 0, sizeof(struct mmc_command));
741 memset(&data, 0, sizeof(struct mmc_data));
742 memset(&stop, 0, sizeof(struct mmc_command));
743 733
744 mrq.cmd = &cmd; 734 mrq.cmd = &cmd;
745 mrq.data = &data; 735 mrq.data = &data;
@@ -761,18 +751,13 @@ static int mmc_test_simple_transfer(struct mmc_test_card *test,
761static int mmc_test_broken_transfer(struct mmc_test_card *test, 751static int mmc_test_broken_transfer(struct mmc_test_card *test,
762 unsigned blocks, unsigned blksz, int write) 752 unsigned blocks, unsigned blksz, int write)
763{ 753{
764 struct mmc_request mrq; 754 struct mmc_request mrq = {0};
765 struct mmc_command cmd; 755 struct mmc_command cmd = {0};
766 struct mmc_command stop; 756 struct mmc_command stop = {0};
767 struct mmc_data data; 757 struct mmc_data data = {0};
768 758
769 struct scatterlist sg; 759 struct scatterlist sg;
770 760
771 memset(&mrq, 0, sizeof(struct mmc_request));
772 memset(&cmd, 0, sizeof(struct mmc_command));
773 memset(&data, 0, sizeof(struct mmc_data));
774 memset(&stop, 0, sizeof(struct mmc_command));
775
776 mrq.cmd = &cmd; 761 mrq.cmd = &cmd;
777 mrq.data = &data; 762 mrq.data = &data;
778 mrq.stop = &stop; 763 mrq.stop = &stop;
@@ -1401,8 +1386,9 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1401 */ 1386 */
1402static int mmc_test_area_fill(struct mmc_test_card *test) 1387static int mmc_test_area_fill(struct mmc_test_card *test)
1403{ 1388{
1404 return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr, 1389 struct mmc_test_area *t = &test->area;
1405 1, 0, 0); 1390
1391 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1406} 1392}
1407 1393
1408/* 1394/*
@@ -1415,7 +1401,7 @@ static int mmc_test_area_erase(struct mmc_test_card *test)
1415 if (!mmc_can_erase(test->card)) 1401 if (!mmc_can_erase(test->card))
1416 return 0; 1402 return 0;
1417 1403
1418 return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9, 1404 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1419 MMC_ERASE_ARG); 1405 MMC_ERASE_ARG);
1420} 1406}
1421 1407
@@ -1542,8 +1528,10 @@ static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1542static int mmc_test_best_performance(struct mmc_test_card *test, int write, 1528static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1543 int max_scatter) 1529 int max_scatter)
1544{ 1530{
1545 return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr, 1531 struct mmc_test_area *t = &test->area;
1546 write, max_scatter, 1); 1532
1533 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1534 max_scatter, 1);
1547} 1535}
1548 1536
1549/* 1537/*
@@ -1583,18 +1571,19 @@ static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1583 */ 1571 */
1584static int mmc_test_profile_read_perf(struct mmc_test_card *test) 1572static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1585{ 1573{
1574 struct mmc_test_area *t = &test->area;
1586 unsigned long sz; 1575 unsigned long sz;
1587 unsigned int dev_addr; 1576 unsigned int dev_addr;
1588 int ret; 1577 int ret;
1589 1578
1590 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) { 1579 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1591 dev_addr = test->area.dev_addr + (sz >> 9); 1580 dev_addr = t->dev_addr + (sz >> 9);
1592 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1581 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1593 if (ret) 1582 if (ret)
1594 return ret; 1583 return ret;
1595 } 1584 }
1596 sz = test->area.max_tfr; 1585 sz = t->max_tfr;
1597 dev_addr = test->area.dev_addr; 1586 dev_addr = t->dev_addr;
1598 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1587 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1599} 1588}
1600 1589
@@ -1603,6 +1592,7 @@ static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1603 */ 1592 */
1604static int mmc_test_profile_write_perf(struct mmc_test_card *test) 1593static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1605{ 1594{
1595 struct mmc_test_area *t = &test->area;
1606 unsigned long sz; 1596 unsigned long sz;
1607 unsigned int dev_addr; 1597 unsigned int dev_addr;
1608 int ret; 1598 int ret;
@@ -1610,8 +1600,8 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1610 ret = mmc_test_area_erase(test); 1600 ret = mmc_test_area_erase(test);
1611 if (ret) 1601 if (ret)
1612 return ret; 1602 return ret;
1613 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) { 1603 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1614 dev_addr = test->area.dev_addr + (sz >> 9); 1604 dev_addr = t->dev_addr + (sz >> 9);
1615 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1605 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1616 if (ret) 1606 if (ret)
1617 return ret; 1607 return ret;
@@ -1619,8 +1609,8 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1619 ret = mmc_test_area_erase(test); 1609 ret = mmc_test_area_erase(test);
1620 if (ret) 1610 if (ret)
1621 return ret; 1611 return ret;
1622 sz = test->area.max_tfr; 1612 sz = t->max_tfr;
1623 dev_addr = test->area.dev_addr; 1613 dev_addr = t->dev_addr;
1624 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1614 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1625} 1615}
1626 1616
@@ -1629,6 +1619,7 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1629 */ 1619 */
1630static int mmc_test_profile_trim_perf(struct mmc_test_card *test) 1620static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1631{ 1621{
1622 struct mmc_test_area *t = &test->area;
1632 unsigned long sz; 1623 unsigned long sz;
1633 unsigned int dev_addr; 1624 unsigned int dev_addr;
1634 struct timespec ts1, ts2; 1625 struct timespec ts1, ts2;
@@ -1640,8 +1631,8 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1640 if (!mmc_can_erase(test->card)) 1631 if (!mmc_can_erase(test->card))
1641 return RESULT_UNSUP_HOST; 1632 return RESULT_UNSUP_HOST;
1642 1633
1643 for (sz = 512; sz < test->area.max_sz; sz <<= 1) { 1634 for (sz = 512; sz < t->max_sz; sz <<= 1) {
1644 dev_addr = test->area.dev_addr + (sz >> 9); 1635 dev_addr = t->dev_addr + (sz >> 9);
1645 getnstimeofday(&ts1); 1636 getnstimeofday(&ts1);
1646 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1637 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1647 if (ret) 1638 if (ret)
@@ -1649,7 +1640,7 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1649 getnstimeofday(&ts2); 1640 getnstimeofday(&ts2);
1650 mmc_test_print_rate(test, sz, &ts1, &ts2); 1641 mmc_test_print_rate(test, sz, &ts1, &ts2);
1651 } 1642 }
1652 dev_addr = test->area.dev_addr; 1643 dev_addr = t->dev_addr;
1653 getnstimeofday(&ts1); 1644 getnstimeofday(&ts1);
1654 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1645 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1655 if (ret) 1646 if (ret)
@@ -1661,12 +1652,13 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1661 1652
1662static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz) 1653static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1663{ 1654{
1655 struct mmc_test_area *t = &test->area;
1664 unsigned int dev_addr, i, cnt; 1656 unsigned int dev_addr, i, cnt;
1665 struct timespec ts1, ts2; 1657 struct timespec ts1, ts2;
1666 int ret; 1658 int ret;
1667 1659
1668 cnt = test->area.max_sz / sz; 1660 cnt = t->max_sz / sz;
1669 dev_addr = test->area.dev_addr; 1661 dev_addr = t->dev_addr;
1670 getnstimeofday(&ts1); 1662 getnstimeofday(&ts1);
1671 for (i = 0; i < cnt; i++) { 1663 for (i = 0; i < cnt; i++) {
1672 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0); 1664 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
@@ -1684,20 +1676,22 @@ static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1684 */ 1676 */
1685static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test) 1677static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1686{ 1678{
1679 struct mmc_test_area *t = &test->area;
1687 unsigned long sz; 1680 unsigned long sz;
1688 int ret; 1681 int ret;
1689 1682
1690 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) { 1683 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1691 ret = mmc_test_seq_read_perf(test, sz); 1684 ret = mmc_test_seq_read_perf(test, sz);
1692 if (ret) 1685 if (ret)
1693 return ret; 1686 return ret;
1694 } 1687 }
1695 sz = test->area.max_tfr; 1688 sz = t->max_tfr;
1696 return mmc_test_seq_read_perf(test, sz); 1689 return mmc_test_seq_read_perf(test, sz);
1697} 1690}
1698 1691
1699static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz) 1692static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1700{ 1693{
1694 struct mmc_test_area *t = &test->area;
1701 unsigned int dev_addr, i, cnt; 1695 unsigned int dev_addr, i, cnt;
1702 struct timespec ts1, ts2; 1696 struct timespec ts1, ts2;
1703 int ret; 1697 int ret;
@@ -1705,8 +1699,8 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1705 ret = mmc_test_area_erase(test); 1699 ret = mmc_test_area_erase(test);
1706 if (ret) 1700 if (ret)
1707 return ret; 1701 return ret;
1708 cnt = test->area.max_sz / sz; 1702 cnt = t->max_sz / sz;
1709 dev_addr = test->area.dev_addr; 1703 dev_addr = t->dev_addr;
1710 getnstimeofday(&ts1); 1704 getnstimeofday(&ts1);
1711 for (i = 0; i < cnt; i++) { 1705 for (i = 0; i < cnt; i++) {
1712 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0); 1706 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
@@ -1724,15 +1718,16 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1724 */ 1718 */
1725static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test) 1719static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1726{ 1720{
1721 struct mmc_test_area *t = &test->area;
1727 unsigned long sz; 1722 unsigned long sz;
1728 int ret; 1723 int ret;
1729 1724
1730 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) { 1725 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1731 ret = mmc_test_seq_write_perf(test, sz); 1726 ret = mmc_test_seq_write_perf(test, sz);
1732 if (ret) 1727 if (ret)
1733 return ret; 1728 return ret;
1734 } 1729 }
1735 sz = test->area.max_tfr; 1730 sz = t->max_tfr;
1736 return mmc_test_seq_write_perf(test, sz); 1731 return mmc_test_seq_write_perf(test, sz);
1737} 1732}
1738 1733
@@ -1741,6 +1736,7 @@ static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1741 */ 1736 */
1742static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) 1737static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1743{ 1738{
1739 struct mmc_test_area *t = &test->area;
1744 unsigned long sz; 1740 unsigned long sz;
1745 unsigned int dev_addr, i, cnt; 1741 unsigned int dev_addr, i, cnt;
1746 struct timespec ts1, ts2; 1742 struct timespec ts1, ts2;
@@ -1752,15 +1748,15 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1752 if (!mmc_can_erase(test->card)) 1748 if (!mmc_can_erase(test->card))
1753 return RESULT_UNSUP_HOST; 1749 return RESULT_UNSUP_HOST;
1754 1750
1755 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) { 1751 for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1756 ret = mmc_test_area_erase(test); 1752 ret = mmc_test_area_erase(test);
1757 if (ret) 1753 if (ret)
1758 return ret; 1754 return ret;
1759 ret = mmc_test_area_fill(test); 1755 ret = mmc_test_area_fill(test);
1760 if (ret) 1756 if (ret)
1761 return ret; 1757 return ret;
1762 cnt = test->area.max_sz / sz; 1758 cnt = t->max_sz / sz;
1763 dev_addr = test->area.dev_addr; 1759 dev_addr = t->dev_addr;
1764 getnstimeofday(&ts1); 1760 getnstimeofday(&ts1);
1765 for (i = 0; i < cnt; i++) { 1761 for (i = 0; i < cnt; i++) {
1766 ret = mmc_erase(test->card, dev_addr, sz >> 9, 1762 ret = mmc_erase(test->card, dev_addr, sz >> 9,
@@ -1823,11 +1819,12 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1823 1819
1824static int mmc_test_random_perf(struct mmc_test_card *test, int write) 1820static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1825{ 1821{
1822 struct mmc_test_area *t = &test->area;
1826 unsigned int next; 1823 unsigned int next;
1827 unsigned long sz; 1824 unsigned long sz;
1828 int ret; 1825 int ret;
1829 1826
1830 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) { 1827 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1831 /* 1828 /*
1832 * When writing, try to get more consistent results by running 1829 * When writing, try to get more consistent results by running
1833 * the test twice with exactly the same I/O but outputting the 1830 * the test twice with exactly the same I/O but outputting the
@@ -1844,7 +1841,7 @@ static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1844 if (ret) 1841 if (ret)
1845 return ret; 1842 return ret;
1846 } 1843 }
1847 sz = test->area.max_tfr; 1844 sz = t->max_tfr;
1848 if (write) { 1845 if (write) {
1849 next = rnd_next; 1846 next = rnd_next;
1850 ret = mmc_test_rnd_perf(test, write, 0, sz); 1847 ret = mmc_test_rnd_perf(test, write, 0, sz);
@@ -1874,17 +1871,18 @@ static int mmc_test_random_write_perf(struct mmc_test_card *test)
1874static int mmc_test_seq_perf(struct mmc_test_card *test, int write, 1871static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
1875 unsigned int tot_sz, int max_scatter) 1872 unsigned int tot_sz, int max_scatter)
1876{ 1873{
1874 struct mmc_test_area *t = &test->area;
1877 unsigned int dev_addr, i, cnt, sz, ssz; 1875 unsigned int dev_addr, i, cnt, sz, ssz;
1878 struct timespec ts1, ts2; 1876 struct timespec ts1, ts2;
1879 int ret; 1877 int ret;
1880 1878
1881 sz = test->area.max_tfr; 1879 sz = t->max_tfr;
1880
1882 /* 1881 /*
1883 * In the case of a maximally scattered transfer, the maximum transfer 1882 * In the case of a maximally scattered transfer, the maximum transfer
1884 * size is further limited by using PAGE_SIZE segments. 1883 * size is further limited by using PAGE_SIZE segments.
1885 */ 1884 */
1886 if (max_scatter) { 1885 if (max_scatter) {
1887 struct mmc_test_area *t = &test->area;
1888 unsigned long max_tfr; 1886 unsigned long max_tfr;
1889 1887
1890 if (t->max_seg_sz >= PAGE_SIZE) 1888 if (t->max_seg_sz >= PAGE_SIZE)
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 2ae727568df9..c07322c2658c 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -343,18 +343,14 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
343 */ 343 */
344void mmc_queue_bounce_pre(struct mmc_queue *mq) 344void mmc_queue_bounce_pre(struct mmc_queue *mq)
345{ 345{
346 unsigned long flags;
347
348 if (!mq->bounce_buf) 346 if (!mq->bounce_buf)
349 return; 347 return;
350 348
351 if (rq_data_dir(mq->req) != WRITE) 349 if (rq_data_dir(mq->req) != WRITE)
352 return; 350 return;
353 351
354 local_irq_save(flags);
355 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, 352 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
356 mq->bounce_buf, mq->sg[0].length); 353 mq->bounce_buf, mq->sg[0].length);
357 local_irq_restore(flags);
358} 354}
359 355
360/* 356/*
@@ -363,17 +359,13 @@ void mmc_queue_bounce_pre(struct mmc_queue *mq)
363 */ 359 */
364void mmc_queue_bounce_post(struct mmc_queue *mq) 360void mmc_queue_bounce_post(struct mmc_queue *mq)
365{ 361{
366 unsigned long flags;
367
368 if (!mq->bounce_buf) 362 if (!mq->bounce_buf)
369 return; 363 return;
370 364
371 if (rq_data_dir(mq->req) != READ) 365 if (rq_data_dir(mq->req) != READ)
372 return; 366 return;
373 367
374 local_irq_save(flags);
375 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, 368 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
376 mq->bounce_buf, mq->sg[0].length); 369 mq->bounce_buf, mq->sg[0].length);
377 local_irq_restore(flags);
378} 370}
379 371
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index d6d62fd07ee9..393d817ed040 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -274,8 +274,12 @@ int mmc_add_card(struct mmc_card *card)
274 break; 274 break;
275 case MMC_TYPE_SD: 275 case MMC_TYPE_SD:
276 type = "SD"; 276 type = "SD";
277 if (mmc_card_blockaddr(card)) 277 if (mmc_card_blockaddr(card)) {
278 type = "SDHC"; 278 if (mmc_card_ext_capacity(card))
279 type = "SDXC";
280 else
281 type = "SDHC";
282 }
279 break; 283 break;
280 case MMC_TYPE_SDIO: 284 case MMC_TYPE_SDIO:
281 type = "SDIO"; 285 type = "SDIO";
@@ -299,7 +303,8 @@ int mmc_add_card(struct mmc_card *card)
299 } else { 303 } else {
300 printk(KERN_INFO "%s: new %s%s%s card at address %04x\n", 304 printk(KERN_INFO "%s: new %s%s%s card at address %04x\n",
301 mmc_hostname(card->host), 305 mmc_hostname(card->host),
302 mmc_card_highspeed(card) ? "high speed " : "", 306 mmc_sd_card_uhs(card) ? "ultra high speed " :
307 (mmc_card_highspeed(card) ? "high speed " : ""),
303 mmc_card_ddr_mode(card) ? "DDR " : "", 308 mmc_card_ddr_mode(card) ? "DDR " : "",
304 type, card->rca); 309 type, card->rca);
305 } 310 }
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 1f453acc8682..68091dda3f31 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -236,12 +236,10 @@ EXPORT_SYMBOL(mmc_wait_for_req);
236 */ 236 */
237int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 237int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
238{ 238{
239 struct mmc_request mrq; 239 struct mmc_request mrq = {0};
240 240
241 WARN_ON(!host->claimed); 241 WARN_ON(!host->claimed);
242 242
243 memset(&mrq, 0, sizeof(struct mmc_request));
244
245 memset(cmd->resp, 0, sizeof(cmd->resp)); 243 memset(cmd->resp, 0, sizeof(cmd->resp));
246 cmd->retries = retries; 244 cmd->retries = retries;
247 245
@@ -720,22 +718,12 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
720} 718}
721 719
722/* 720/*
723 * Change data bus width and DDR mode of a host.
724 */
725void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,
726 unsigned int ddr)
727{
728 host->ios.bus_width = width;
729 host->ios.ddr = ddr;
730 mmc_set_ios(host);
731}
732
733/*
734 * Change data bus width of a host. 721 * Change data bus width of a host.
735 */ 722 */
736void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 723void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
737{ 724{
738 mmc_set_bus_width_ddr(host, width, MMC_SDR_MODE); 725 host->ios.bus_width = width;
726 mmc_set_ios(host);
739} 727}
740 728
741/** 729/**
@@ -944,6 +932,38 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
944 return ocr; 932 return ocr;
945} 933}
946 934
935int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
936{
937 struct mmc_command cmd = {0};
938 int err = 0;
939
940 BUG_ON(!host);
941
942 /*
943 * Send CMD11 only if the request is to switch the card to
944 * 1.8V signalling.
945 */
946 if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
947 cmd.opcode = SD_SWITCH_VOLTAGE;
948 cmd.arg = 0;
949 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
950
951 err = mmc_wait_for_cmd(host, &cmd, 0);
952 if (err)
953 return err;
954
955 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
956 return -EIO;
957 }
958
959 host->ios.signal_voltage = signal_voltage;
960
961 if (host->ops->start_signal_voltage_switch)
962 err = host->ops->start_signal_voltage_switch(host, &host->ios);
963
964 return err;
965}
966
947/* 967/*
948 * Select timing parameters for host. 968 * Select timing parameters for host.
949 */ 969 */
@@ -954,6 +974,15 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
954} 974}
955 975
956/* 976/*
977 * Select appropriate driver type for host.
978 */
979void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
980{
981 host->ios.drv_type = drv_type;
982 mmc_set_ios(host);
983}
984
985/*
957 * Apply power to the MMC stack. This is a two-stage process. 986 * Apply power to the MMC stack. This is a two-stage process.
958 * First, we enable power to the card without the clock running. 987 * First, we enable power to the card without the clock running.
959 * We then wait a bit for the power to stabilise. Finally, 988 * We then wait a bit for the power to stabilise. Finally,
@@ -1187,9 +1216,8 @@ void mmc_init_erase(struct mmc_card *card)
1187 } 1216 }
1188} 1217}
1189 1218
1190static void mmc_set_mmc_erase_timeout(struct mmc_card *card, 1219static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1191 struct mmc_command *cmd, 1220 unsigned int arg, unsigned int qty)
1192 unsigned int arg, unsigned int qty)
1193{ 1221{
1194 unsigned int erase_timeout; 1222 unsigned int erase_timeout;
1195 1223
@@ -1246,44 +1274,48 @@ static void mmc_set_mmc_erase_timeout(struct mmc_card *card,
1246 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) 1274 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1247 erase_timeout = 1000; 1275 erase_timeout = 1000;
1248 1276
1249 cmd->erase_timeout = erase_timeout; 1277 return erase_timeout;
1250} 1278}
1251 1279
1252static void mmc_set_sd_erase_timeout(struct mmc_card *card, 1280static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1253 struct mmc_command *cmd, unsigned int arg, 1281 unsigned int arg,
1254 unsigned int qty) 1282 unsigned int qty)
1255{ 1283{
1284 unsigned int erase_timeout;
1285
1256 if (card->ssr.erase_timeout) { 1286 if (card->ssr.erase_timeout) {
1257 /* Erase timeout specified in SD Status Register (SSR) */ 1287 /* Erase timeout specified in SD Status Register (SSR) */
1258 cmd->erase_timeout = card->ssr.erase_timeout * qty + 1288 erase_timeout = card->ssr.erase_timeout * qty +
1259 card->ssr.erase_offset; 1289 card->ssr.erase_offset;
1260 } else { 1290 } else {
1261 /* 1291 /*
1262 * Erase timeout not specified in SD Status Register (SSR) so 1292 * Erase timeout not specified in SD Status Register (SSR) so
1263 * use 250ms per write block. 1293 * use 250ms per write block.
1264 */ 1294 */
1265 cmd->erase_timeout = 250 * qty; 1295 erase_timeout = 250 * qty;
1266 } 1296 }
1267 1297
1268 /* Must not be less than 1 second */ 1298 /* Must not be less than 1 second */
1269 if (cmd->erase_timeout < 1000) 1299 if (erase_timeout < 1000)
1270 cmd->erase_timeout = 1000; 1300 erase_timeout = 1000;
1301
1302 return erase_timeout;
1271} 1303}
1272 1304
1273static void mmc_set_erase_timeout(struct mmc_card *card, 1305static unsigned int mmc_erase_timeout(struct mmc_card *card,
1274 struct mmc_command *cmd, unsigned int arg, 1306 unsigned int arg,
1275 unsigned int qty) 1307 unsigned int qty)
1276{ 1308{
1277 if (mmc_card_sd(card)) 1309 if (mmc_card_sd(card))
1278 mmc_set_sd_erase_timeout(card, cmd, arg, qty); 1310 return mmc_sd_erase_timeout(card, arg, qty);
1279 else 1311 else
1280 mmc_set_mmc_erase_timeout(card, cmd, arg, qty); 1312 return mmc_mmc_erase_timeout(card, arg, qty);
1281} 1313}
1282 1314
1283static int mmc_do_erase(struct mmc_card *card, unsigned int from, 1315static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1284 unsigned int to, unsigned int arg) 1316 unsigned int to, unsigned int arg)
1285{ 1317{
1286 struct mmc_command cmd; 1318 struct mmc_command cmd = {0};
1287 unsigned int qty = 0; 1319 unsigned int qty = 0;
1288 int err; 1320 int err;
1289 1321
@@ -1317,7 +1349,6 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1317 to <<= 9; 1349 to <<= 9;
1318 } 1350 }
1319 1351
1320 memset(&cmd, 0, sizeof(struct mmc_command));
1321 if (mmc_card_sd(card)) 1352 if (mmc_card_sd(card))
1322 cmd.opcode = SD_ERASE_WR_BLK_START; 1353 cmd.opcode = SD_ERASE_WR_BLK_START;
1323 else 1354 else
@@ -1351,7 +1382,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1351 cmd.opcode = MMC_ERASE; 1382 cmd.opcode = MMC_ERASE;
1352 cmd.arg = arg; 1383 cmd.arg = arg;
1353 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1384 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1354 mmc_set_erase_timeout(card, &cmd, arg, qty); 1385 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
1355 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1386 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1356 if (err) { 1387 if (err) {
1357 printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n", 1388 printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n",
@@ -1487,12 +1518,11 @@ EXPORT_SYMBOL(mmc_erase_group_aligned);
1487 1518
1488int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) 1519int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1489{ 1520{
1490 struct mmc_command cmd; 1521 struct mmc_command cmd = {0};
1491 1522
1492 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card)) 1523 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
1493 return 0; 1524 return 0;
1494 1525
1495 memset(&cmd, 0, sizeof(struct mmc_command));
1496 cmd.opcode = MMC_SET_BLOCKLEN; 1526 cmd.opcode = MMC_SET_BLOCKLEN;
1497 cmd.arg = blocklen; 1527 cmd.arg = blocklen;
1498 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1528 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
@@ -1578,7 +1608,7 @@ void mmc_rescan(struct work_struct *work)
1578 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 1608 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
1579 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) 1609 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
1580 break; 1610 break;
1581 if (freqs[i] < host->f_min) 1611 if (freqs[i] <= host->f_min)
1582 break; 1612 break;
1583 } 1613 }
1584 mmc_release_host(host); 1614 mmc_release_host(host);
@@ -1746,7 +1776,7 @@ int mmc_suspend_host(struct mmc_host *host)
1746 } 1776 }
1747 mmc_bus_put(host); 1777 mmc_bus_put(host);
1748 1778
1749 if (!err && !(host->pm_flags & MMC_PM_KEEP_POWER)) 1779 if (!err && !mmc_card_keep_power(host))
1750 mmc_power_off(host); 1780 mmc_power_off(host);
1751 1781
1752 return err; 1782 return err;
@@ -1764,7 +1794,7 @@ int mmc_resume_host(struct mmc_host *host)
1764 1794
1765 mmc_bus_get(host); 1795 mmc_bus_get(host);
1766 if (host->bus_ops && !host->bus_dead) { 1796 if (host->bus_ops && !host->bus_dead) {
1767 if (!(host->pm_flags & MMC_PM_KEEP_POWER)) { 1797 if (!mmc_card_keep_power(host)) {
1768 mmc_power_up(host); 1798 mmc_power_up(host);
1769 mmc_select_voltage(host, host->ocr); 1799 mmc_select_voltage(host, host->ocr);
1770 /* 1800 /*
@@ -1789,6 +1819,7 @@ int mmc_resume_host(struct mmc_host *host)
1789 err = 0; 1819 err = 0;
1790 } 1820 }
1791 } 1821 }
1822 host->pm_flags &= ~MMC_PM_KEEP_POWER;
1792 mmc_bus_put(host); 1823 mmc_bus_put(host);
1793 1824
1794 return err; 1825 return err;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 20b1c0831eac..d9411ed2a39b 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -38,10 +38,11 @@ void mmc_ungate_clock(struct mmc_host *host);
38void mmc_set_ungated(struct mmc_host *host); 38void mmc_set_ungated(struct mmc_host *host);
39void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); 39void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
40void mmc_set_bus_width(struct mmc_host *host, unsigned int width); 40void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
41void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,
42 unsigned int ddr);
43u32 mmc_select_voltage(struct mmc_host *host, u32 ocr); 41u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
42int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage,
43 bool cmd11);
44void mmc_set_timing(struct mmc_host *host, unsigned int timing); 44void mmc_set_timing(struct mmc_host *host, unsigned int timing);
45void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
45 46
46static inline void mmc_delay(unsigned int ms) 47static inline void mmc_delay(unsigned int ms)
47{ 48{
@@ -61,8 +62,6 @@ int mmc_attach_mmc(struct mmc_host *host);
61int mmc_attach_sd(struct mmc_host *host); 62int mmc_attach_sd(struct mmc_host *host);
62int mmc_attach_sdio(struct mmc_host *host); 63int mmc_attach_sdio(struct mmc_host *host);
63 64
64void mmc_fixup_device(struct mmc_card *card);
65
66/* Module parameters */ 65/* Module parameters */
67extern int use_spi_crc; 66extern int use_spi_crc;
68 67
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 461e6a17fb90..b29d3e8fd3a2 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -325,12 +325,12 @@ int mmc_add_host(struct mmc_host *host)
325 WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && 325 WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
326 !host->ops->enable_sdio_irq); 326 !host->ops->enable_sdio_irq);
327 327
328 led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
329
330 err = device_add(&host->class_dev); 328 err = device_add(&host->class_dev);
331 if (err) 329 if (err)
332 return err; 330 return err;
333 331
332 led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
333
334#ifdef CONFIG_DEBUG_FS 334#ifdef CONFIG_DEBUG_FS
335 mmc_add_host_debugfs(host); 335 mmc_add_host_debugfs(host);
336#endif 336#endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 772d0d0a541b..2a7e43bc796d 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -20,6 +20,7 @@
20#include "core.h" 20#include "core.h"
21#include "bus.h" 21#include "bus.h"
22#include "mmc_ops.h" 22#include "mmc_ops.h"
23#include "sd_ops.h"
23 24
24static const unsigned int tran_exp[] = { 25static const unsigned int tran_exp[] = {
25 10000, 100000, 1000000, 10000000, 26 10000, 100000, 1000000, 10000000,
@@ -173,14 +174,17 @@ static int mmc_decode_csd(struct mmc_card *card)
173} 174}
174 175
175/* 176/*
176 * Read and decode extended CSD. 177 * Read extended CSD.
177 */ 178 */
178static int mmc_read_ext_csd(struct mmc_card *card) 179static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
179{ 180{
180 int err; 181 int err;
181 u8 *ext_csd; 182 u8 *ext_csd;
182 183
183 BUG_ON(!card); 184 BUG_ON(!card);
185 BUG_ON(!new_ext_csd);
186
187 *new_ext_csd = NULL;
184 188
185 if (card->csd.mmca_vsn < CSD_SPEC_VER_4) 189 if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
186 return 0; 190 return 0;
@@ -198,12 +202,15 @@ static int mmc_read_ext_csd(struct mmc_card *card)
198 202
199 err = mmc_send_ext_csd(card, ext_csd); 203 err = mmc_send_ext_csd(card, ext_csd);
200 if (err) { 204 if (err) {
205 kfree(ext_csd);
206 *new_ext_csd = NULL;
207
201 /* If the host or the card can't do the switch, 208 /* If the host or the card can't do the switch,
202 * fail more gracefully. */ 209 * fail more gracefully. */
203 if ((err != -EINVAL) 210 if ((err != -EINVAL)
204 && (err != -ENOSYS) 211 && (err != -ENOSYS)
205 && (err != -EFAULT)) 212 && (err != -EFAULT))
206 goto out; 213 return err;
207 214
208 /* 215 /*
209 * High capacity cards should have this "magic" size 216 * High capacity cards should have this "magic" size
@@ -221,9 +228,23 @@ static int mmc_read_ext_csd(struct mmc_card *card)
221 mmc_hostname(card->host)); 228 mmc_hostname(card->host));
222 err = 0; 229 err = 0;
223 } 230 }
231 } else
232 *new_ext_csd = ext_csd;
224 233
225 goto out; 234 return err;
226 } 235}
236
237/*
238 * Decode extended CSD.
239 */
240static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
241{
242 int err = 0;
243
244 BUG_ON(!card);
245
246 if (!ext_csd)
247 return 0;
227 248
228 /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */ 249 /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
229 if (card->csd.structure == 3) { 250 if (card->csd.structure == 3) {
@@ -288,6 +309,10 @@ static int mmc_read_ext_csd(struct mmc_card *card)
288 309
289 if (card->ext_csd.rev >= 3) { 310 if (card->ext_csd.rev >= 3) {
290 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; 311 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
312 card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
313
314 /* EXT_CSD value is in units of 10ms, but we store in ms */
315 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
291 316
292 /* Sleep / awake timeout in 100ns units */ 317 /* Sleep / awake timeout in 100ns units */
293 if (sa_shift > 0 && sa_shift <= 0x17) 318 if (sa_shift > 0 && sa_shift <= 0x17)
@@ -299,6 +324,14 @@ static int mmc_read_ext_csd(struct mmc_card *card)
299 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; 324 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
300 card->ext_csd.hc_erase_size = 325 card->ext_csd.hc_erase_size =
301 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10; 326 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
327
328 card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
329
330 /*
331 * There are two boot regions of equal size, defined in
332 * multiples of 128K.
333 */
334 card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
302 } 335 }
303 336
304 if (card->ext_csd.rev >= 4) { 337 if (card->ext_csd.rev >= 4) {
@@ -350,14 +383,78 @@ static int mmc_read_ext_csd(struct mmc_card *card)
350 ext_csd[EXT_CSD_TRIM_MULT]; 383 ext_csd[EXT_CSD_TRIM_MULT];
351 } 384 }
352 385
386 if (card->ext_csd.rev >= 5)
387 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
388
353 if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) 389 if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
354 card->erased_byte = 0xFF; 390 card->erased_byte = 0xFF;
355 else 391 else
356 card->erased_byte = 0x0; 392 card->erased_byte = 0x0;
357 393
358out: 394out:
395 return err;
396}
397
398static inline void mmc_free_ext_csd(u8 *ext_csd)
399{
359 kfree(ext_csd); 400 kfree(ext_csd);
401}
402
403
404static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd,
405 unsigned bus_width)
406{
407 u8 *bw_ext_csd;
408 int err;
409
410 err = mmc_get_ext_csd(card, &bw_ext_csd);
411 if (err)
412 return err;
413
414 if ((ext_csd == NULL || bw_ext_csd == NULL)) {
415 if (bus_width != MMC_BUS_WIDTH_1)
416 err = -EINVAL;
417 goto out;
418 }
360 419
420 if (bus_width == MMC_BUS_WIDTH_1)
421 goto out;
422
423 /* only compare read only fields */
424 err = (!(ext_csd[EXT_CSD_PARTITION_SUPPORT] ==
425 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
426 (ext_csd[EXT_CSD_ERASED_MEM_CONT] ==
427 bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
428 (ext_csd[EXT_CSD_REV] ==
429 bw_ext_csd[EXT_CSD_REV]) &&
430 (ext_csd[EXT_CSD_STRUCTURE] ==
431 bw_ext_csd[EXT_CSD_STRUCTURE]) &&
432 (ext_csd[EXT_CSD_CARD_TYPE] ==
433 bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
434 (ext_csd[EXT_CSD_S_A_TIMEOUT] ==
435 bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
436 (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
437 bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
438 (ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT] ==
439 bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
440 (ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
441 bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
442 (ext_csd[EXT_CSD_SEC_TRIM_MULT] ==
443 bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
444 (ext_csd[EXT_CSD_SEC_ERASE_MULT] ==
445 bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
446 (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] ==
447 bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
448 (ext_csd[EXT_CSD_TRIM_MULT] ==
449 bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
450 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
451 &bw_ext_csd[EXT_CSD_SEC_CNT],
452 4) != 0);
453 if (err)
454 err = -EINVAL;
455
456out:
457 mmc_free_ext_csd(bw_ext_csd);
361 return err; 458 return err;
362} 459}
363 460
@@ -422,6 +519,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
422 u32 cid[4]; 519 u32 cid[4];
423 unsigned int max_dtr; 520 unsigned int max_dtr;
424 u32 rocr; 521 u32 rocr;
522 u8 *ext_csd = NULL;
425 523
426 BUG_ON(!host); 524 BUG_ON(!host);
427 WARN_ON(!host->claimed); 525 WARN_ON(!host->claimed);
@@ -520,7 +618,11 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
520 /* 618 /*
521 * Fetch and process extended CSD. 619 * Fetch and process extended CSD.
522 */ 620 */
523 err = mmc_read_ext_csd(card); 621
622 err = mmc_get_ext_csd(card, &ext_csd);
623 if (err)
624 goto free_card;
625 err = mmc_read_ext_csd(card, ext_csd);
524 if (err) 626 if (err)
525 goto free_card; 627 goto free_card;
526 628
@@ -542,7 +644,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
542 */ 644 */
543 if (card->ext_csd.enhanced_area_en) { 645 if (card->ext_csd.enhanced_area_en) {
544 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 646 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
545 EXT_CSD_ERASE_GROUP_DEF, 1); 647 EXT_CSD_ERASE_GROUP_DEF, 1, 0);
546 648
547 if (err && err != -EBADMSG) 649 if (err && err != -EBADMSG)
548 goto free_card; 650 goto free_card;
@@ -568,12 +670,24 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
568 } 670 }
569 671
570 /* 672 /*
673 * Ensure eMMC user default partition is enabled
674 */
675 if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
676 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
677 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
678 card->ext_csd.part_config,
679 card->ext_csd.part_time);
680 if (err && err != -EBADMSG)
681 goto free_card;
682 }
683
684 /*
571 * Activate high speed (if supported) 685 * Activate high speed (if supported)
572 */ 686 */
573 if ((card->ext_csd.hs_max_dtr != 0) && 687 if ((card->ext_csd.hs_max_dtr != 0) &&
574 (host->caps & MMC_CAP_MMC_HIGHSPEED)) { 688 (host->caps & MMC_CAP_MMC_HIGHSPEED)) {
575 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 689 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
576 EXT_CSD_HS_TIMING, 1); 690 EXT_CSD_HS_TIMING, 1, 0);
577 if (err && err != -EBADMSG) 691 if (err && err != -EBADMSG)
578 goto free_card; 692 goto free_card;
579 693
@@ -606,10 +720,14 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
606 */ 720 */
607 if (mmc_card_highspeed(card)) { 721 if (mmc_card_highspeed(card)) {
608 if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) 722 if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)
609 && (host->caps & (MMC_CAP_1_8V_DDR))) 723 && ((host->caps & (MMC_CAP_1_8V_DDR |
724 MMC_CAP_UHS_DDR50))
725 == (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)))
610 ddr = MMC_1_8V_DDR_MODE; 726 ddr = MMC_1_8V_DDR_MODE;
611 else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) 727 else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
612 && (host->caps & (MMC_CAP_1_2V_DDR))) 728 && ((host->caps & (MMC_CAP_1_2V_DDR |
729 MMC_CAP_UHS_DDR50))
730 == (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)))
613 ddr = MMC_1_2V_DDR_MODE; 731 ddr = MMC_1_2V_DDR_MODE;
614 } 732 }
615 733
@@ -640,18 +758,22 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
640 ddr = 0; /* no DDR for 1-bit width */ 758 ddr = 0; /* no DDR for 1-bit width */
641 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 759 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
642 EXT_CSD_BUS_WIDTH, 760 EXT_CSD_BUS_WIDTH,
643 ext_csd_bits[idx][0]); 761 ext_csd_bits[idx][0],
762 0);
644 if (!err) { 763 if (!err) {
645 mmc_set_bus_width_ddr(card->host, 764 mmc_set_bus_width(card->host, bus_width);
646 bus_width, MMC_SDR_MODE); 765
647 /* 766 /*
648 * If controller can't handle bus width test, 767 * If controller can't handle bus width test,
649 * use the highest bus width to maintain 768 * compare ext_csd previously read in 1 bit mode
650 * compatibility with previous MMC behavior. 769 * against ext_csd at new bus width
651 */ 770 */
652 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) 771 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
653 break; 772 err = mmc_compare_ext_csds(card,
654 err = mmc_bus_test(card, bus_width); 773 ext_csd,
774 bus_width);
775 else
776 err = mmc_bus_test(card, bus_width);
655 if (!err) 777 if (!err)
656 break; 778 break;
657 } 779 }
@@ -659,8 +781,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
659 781
660 if (!err && ddr) { 782 if (!err && ddr) {
661 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 783 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
662 EXT_CSD_BUS_WIDTH, 784 EXT_CSD_BUS_WIDTH,
663 ext_csd_bits[idx][1]); 785 ext_csd_bits[idx][1],
786 0);
664 } 787 }
665 if (err) { 788 if (err) {
666 printk(KERN_WARNING "%s: switch to bus width %d ddr %d " 789 printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
@@ -668,20 +791,43 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
668 1 << bus_width, ddr); 791 1 << bus_width, ddr);
669 goto free_card; 792 goto free_card;
670 } else if (ddr) { 793 } else if (ddr) {
794 /*
795 * eMMC cards can support 3.3V to 1.2V i/o (vccq)
796 * signaling.
797 *
798 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
799 *
800 * 1.8V vccq at 3.3V core voltage (vcc) is not required
801 * in the JEDEC spec for DDR.
802 *
803 * Do not force change in vccq since we are obviously
804 * working and no change to vccq is needed.
805 *
806 * WARNING: eMMC rules are NOT the same as SD DDR
807 */
808 if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) {
809 err = mmc_set_signal_voltage(host,
810 MMC_SIGNAL_VOLTAGE_120, 0);
811 if (err)
812 goto err;
813 }
671 mmc_card_set_ddr_mode(card); 814 mmc_card_set_ddr_mode(card);
672 mmc_set_bus_width_ddr(card->host, bus_width, ddr); 815 mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50);
816 mmc_set_bus_width(card->host, bus_width);
673 } 817 }
674 } 818 }
675 819
676 if (!oldcard) 820 if (!oldcard)
677 host->card = card; 821 host->card = card;
678 822
823 mmc_free_ext_csd(ext_csd);
679 return 0; 824 return 0;
680 825
681free_card: 826free_card:
682 if (!oldcard) 827 if (!oldcard)
683 mmc_remove_card(card); 828 mmc_remove_card(card);
684err: 829err:
830 mmc_free_ext_csd(ext_csd);
685 831
686 return err; 832 return err;
687} 833}
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index f3b22bf89cc9..845ce7c533b9 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -23,12 +23,10 @@
23static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) 23static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
24{ 24{
25 int err; 25 int err;
26 struct mmc_command cmd; 26 struct mmc_command cmd = {0};
27 27
28 BUG_ON(!host); 28 BUG_ON(!host);
29 29
30 memset(&cmd, 0, sizeof(struct mmc_command));
31
32 cmd.opcode = MMC_SELECT_CARD; 30 cmd.opcode = MMC_SELECT_CARD;
33 31
34 if (card) { 32 if (card) {
@@ -60,15 +58,13 @@ int mmc_deselect_cards(struct mmc_host *host)
60 58
61int mmc_card_sleepawake(struct mmc_host *host, int sleep) 59int mmc_card_sleepawake(struct mmc_host *host, int sleep)
62{ 60{
63 struct mmc_command cmd; 61 struct mmc_command cmd = {0};
64 struct mmc_card *card = host->card; 62 struct mmc_card *card = host->card;
65 int err; 63 int err;
66 64
67 if (sleep) 65 if (sleep)
68 mmc_deselect_cards(host); 66 mmc_deselect_cards(host);
69 67
70 memset(&cmd, 0, sizeof(struct mmc_command));
71
72 cmd.opcode = MMC_SLEEP_AWAKE; 68 cmd.opcode = MMC_SLEEP_AWAKE;
73 cmd.arg = card->rca << 16; 69 cmd.arg = card->rca << 16;
74 if (sleep) 70 if (sleep)
@@ -97,7 +93,7 @@ int mmc_card_sleepawake(struct mmc_host *host, int sleep)
97int mmc_go_idle(struct mmc_host *host) 93int mmc_go_idle(struct mmc_host *host)
98{ 94{
99 int err; 95 int err;
100 struct mmc_command cmd; 96 struct mmc_command cmd = {0};
101 97
102 /* 98 /*
103 * Non-SPI hosts need to prevent chipselect going active during 99 * Non-SPI hosts need to prevent chipselect going active during
@@ -113,8 +109,6 @@ int mmc_go_idle(struct mmc_host *host)
113 mmc_delay(1); 109 mmc_delay(1);
114 } 110 }
115 111
116 memset(&cmd, 0, sizeof(struct mmc_command));
117
118 cmd.opcode = MMC_GO_IDLE_STATE; 112 cmd.opcode = MMC_GO_IDLE_STATE;
119 cmd.arg = 0; 113 cmd.arg = 0;
120 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; 114 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
@@ -135,13 +129,11 @@ int mmc_go_idle(struct mmc_host *host)
135 129
136int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 130int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
137{ 131{
138 struct mmc_command cmd; 132 struct mmc_command cmd = {0};
139 int i, err = 0; 133 int i, err = 0;
140 134
141 BUG_ON(!host); 135 BUG_ON(!host);
142 136
143 memset(&cmd, 0, sizeof(struct mmc_command));
144
145 cmd.opcode = MMC_SEND_OP_COND; 137 cmd.opcode = MMC_SEND_OP_COND;
146 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; 138 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
147 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; 139 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
@@ -178,13 +170,11 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
178int mmc_all_send_cid(struct mmc_host *host, u32 *cid) 170int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
179{ 171{
180 int err; 172 int err;
181 struct mmc_command cmd; 173 struct mmc_command cmd = {0};
182 174
183 BUG_ON(!host); 175 BUG_ON(!host);
184 BUG_ON(!cid); 176 BUG_ON(!cid);
185 177
186 memset(&cmd, 0, sizeof(struct mmc_command));
187
188 cmd.opcode = MMC_ALL_SEND_CID; 178 cmd.opcode = MMC_ALL_SEND_CID;
189 cmd.arg = 0; 179 cmd.arg = 0;
190 cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; 180 cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
@@ -201,13 +191,11 @@ int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
201int mmc_set_relative_addr(struct mmc_card *card) 191int mmc_set_relative_addr(struct mmc_card *card)
202{ 192{
203 int err; 193 int err;
204 struct mmc_command cmd; 194 struct mmc_command cmd = {0};
205 195
206 BUG_ON(!card); 196 BUG_ON(!card);
207 BUG_ON(!card->host); 197 BUG_ON(!card->host);
208 198
209 memset(&cmd, 0, sizeof(struct mmc_command));
210
211 cmd.opcode = MMC_SET_RELATIVE_ADDR; 199 cmd.opcode = MMC_SET_RELATIVE_ADDR;
212 cmd.arg = card->rca << 16; 200 cmd.arg = card->rca << 16;
213 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 201 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
@@ -223,13 +211,11 @@ static int
223mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) 211mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
224{ 212{
225 int err; 213 int err;
226 struct mmc_command cmd; 214 struct mmc_command cmd = {0};
227 215
228 BUG_ON(!host); 216 BUG_ON(!host);
229 BUG_ON(!cxd); 217 BUG_ON(!cxd);
230 218
231 memset(&cmd, 0, sizeof(struct mmc_command));
232
233 cmd.opcode = opcode; 219 cmd.opcode = opcode;
234 cmd.arg = arg; 220 cmd.arg = arg;
235 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; 221 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
@@ -247,9 +233,9 @@ static int
247mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, 233mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
248 u32 opcode, void *buf, unsigned len) 234 u32 opcode, void *buf, unsigned len)
249{ 235{
250 struct mmc_request mrq; 236 struct mmc_request mrq = {0};
251 struct mmc_command cmd; 237 struct mmc_command cmd = {0};
252 struct mmc_data data; 238 struct mmc_data data = {0};
253 struct scatterlist sg; 239 struct scatterlist sg;
254 void *data_buf; 240 void *data_buf;
255 241
@@ -260,10 +246,6 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
260 if (data_buf == NULL) 246 if (data_buf == NULL)
261 return -ENOMEM; 247 return -ENOMEM;
262 248
263 memset(&mrq, 0, sizeof(struct mmc_request));
264 memset(&cmd, 0, sizeof(struct mmc_command));
265 memset(&data, 0, sizeof(struct mmc_data));
266
267 mrq.cmd = &cmd; 249 mrq.cmd = &cmd;
268 mrq.data = &data; 250 mrq.data = &data;
269 251
@@ -355,11 +337,9 @@ int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
355 337
356int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) 338int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
357{ 339{
358 struct mmc_command cmd; 340 struct mmc_command cmd = {0};
359 int err; 341 int err;
360 342
361 memset(&cmd, 0, sizeof(struct mmc_command));
362
363 cmd.opcode = MMC_SPI_READ_OCR; 343 cmd.opcode = MMC_SPI_READ_OCR;
364 cmd.arg = highcap ? (1 << 30) : 0; 344 cmd.arg = highcap ? (1 << 30) : 0;
365 cmd.flags = MMC_RSP_SPI_R3; 345 cmd.flags = MMC_RSP_SPI_R3;
@@ -372,11 +352,9 @@ int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
372 352
373int mmc_spi_set_crc(struct mmc_host *host, int use_crc) 353int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
374{ 354{
375 struct mmc_command cmd; 355 struct mmc_command cmd = {0};
376 int err; 356 int err;
377 357
378 memset(&cmd, 0, sizeof(struct mmc_command));
379
380 cmd.opcode = MMC_SPI_CRC_ON_OFF; 358 cmd.opcode = MMC_SPI_CRC_ON_OFF;
381 cmd.flags = MMC_RSP_SPI_R1; 359 cmd.flags = MMC_RSP_SPI_R1;
382 cmd.arg = use_crc; 360 cmd.arg = use_crc;
@@ -387,23 +365,34 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
387 return err; 365 return err;
388} 366}
389 367
390int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value) 368/**
369 * mmc_switch - modify EXT_CSD register
370 * @card: the MMC card associated with the data transfer
371 * @set: cmd set values
372 * @index: EXT_CSD register index
373 * @value: value to program into EXT_CSD register
374 * @timeout_ms: timeout (ms) for operation performed by register write,
375 * timeout of zero implies maximum possible timeout
376 *
377 * Modifies the EXT_CSD register for selected card.
378 */
379int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
380 unsigned int timeout_ms)
391{ 381{
392 int err; 382 int err;
393 struct mmc_command cmd; 383 struct mmc_command cmd = {0};
394 u32 status; 384 u32 status;
395 385
396 BUG_ON(!card); 386 BUG_ON(!card);
397 BUG_ON(!card->host); 387 BUG_ON(!card->host);
398 388
399 memset(&cmd, 0, sizeof(struct mmc_command));
400
401 cmd.opcode = MMC_SWITCH; 389 cmd.opcode = MMC_SWITCH;
402 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 390 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
403 (index << 16) | 391 (index << 16) |
404 (value << 8) | 392 (value << 8) |
405 set; 393 set;
406 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 394 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
395 cmd.cmd_timeout_ms = timeout_ms;
407 396
408 err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 397 err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
409 if (err) 398 if (err)
@@ -433,17 +422,16 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value)
433 422
434 return 0; 423 return 0;
435} 424}
425EXPORT_SYMBOL_GPL(mmc_switch);
436 426
437int mmc_send_status(struct mmc_card *card, u32 *status) 427int mmc_send_status(struct mmc_card *card, u32 *status)
438{ 428{
439 int err; 429 int err;
440 struct mmc_command cmd; 430 struct mmc_command cmd = {0};
441 431
442 BUG_ON(!card); 432 BUG_ON(!card);
443 BUG_ON(!card->host); 433 BUG_ON(!card->host);
444 434
445 memset(&cmd, 0, sizeof(struct mmc_command));
446
447 cmd.opcode = MMC_SEND_STATUS; 435 cmd.opcode = MMC_SEND_STATUS;
448 if (!mmc_host_is_spi(card->host)) 436 if (!mmc_host_is_spi(card->host))
449 cmd.arg = card->rca << 16; 437 cmd.arg = card->rca << 16;
@@ -466,9 +454,9 @@ static int
466mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, 454mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
467 u8 len) 455 u8 len)
468{ 456{
469 struct mmc_request mrq; 457 struct mmc_request mrq = {0};
470 struct mmc_command cmd; 458 struct mmc_command cmd = {0};
471 struct mmc_data data; 459 struct mmc_data data = {0};
472 struct scatterlist sg; 460 struct scatterlist sg;
473 u8 *data_buf; 461 u8 *data_buf;
474 u8 *test_buf; 462 u8 *test_buf;
@@ -497,10 +485,6 @@ mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
497 if (opcode == MMC_BUS_TEST_W) 485 if (opcode == MMC_BUS_TEST_W)
498 memcpy(data_buf, test_buf, len); 486 memcpy(data_buf, test_buf, len);
499 487
500 memset(&mrq, 0, sizeof(struct mmc_request));
501 memset(&cmd, 0, sizeof(struct mmc_command));
502 memset(&data, 0, sizeof(struct mmc_data));
503
504 mrq.cmd = &cmd; 488 mrq.cmd = &cmd;
505 mrq.data = &data; 489 mrq.data = &data;
506 cmd.opcode = opcode; 490 cmd.opcode = opcode;
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index e6d44b8a18db..9276946fa5b7 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -20,7 +20,6 @@ int mmc_all_send_cid(struct mmc_host *host, u32 *cid);
20int mmc_set_relative_addr(struct mmc_card *card); 20int mmc_set_relative_addr(struct mmc_card *card);
21int mmc_send_csd(struct mmc_card *card, u32 *csd); 21int mmc_send_csd(struct mmc_card *card, u32 *csd);
22int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd); 22int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd);
23int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value);
24int mmc_send_status(struct mmc_card *card, u32 *status); 23int mmc_send_status(struct mmc_card *card, u32 *status);
25int mmc_send_cid(struct mmc_host *host, u32 *cid); 24int mmc_send_cid(struct mmc_host *host, u32 *cid);
26int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp); 25int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
index 11118b74eb20..3a596217029e 100644
--- a/drivers/mmc/core/quirks.c
+++ b/drivers/mmc/core/quirks.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * This file contains work-arounds for many known sdio hardware 2 * This file contains work-arounds for many known SD/MMC
3 * bugs. 3 * and SDIO hardware bugs.
4 * 4 *
5 * Copyright (c) 2011 Andrei Warkentin <andreiw@motorola.com>
5 * Copyright (c) 2011 Pierre Tardy <tardyp@gmail.com> 6 * Copyright (c) 2011 Pierre Tardy <tardyp@gmail.com>
6 * Inspired from pci fixup code: 7 * Inspired from pci fixup code:
7 * Copyright (c) 1999 Martin Mares <mj@ucw.cz> 8 * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
@@ -11,34 +12,14 @@
11#include <linux/types.h> 12#include <linux/types.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
13#include <linux/mmc/card.h> 14#include <linux/mmc/card.h>
14#include <linux/mod_devicetable.h>
15 15
16/* 16#ifndef SDIO_VENDOR_ID_TI
17 * The world is not perfect and supplies us with broken mmc/sdio devices. 17#define SDIO_VENDOR_ID_TI 0x0097
18 * For at least a part of these bugs we need a work-around 18#endif
19 */
20
21struct mmc_fixup {
22 u16 vendor, device; /* You can use SDIO_ANY_ID here of course */
23 void (*vendor_fixup)(struct mmc_card *card, int data);
24 int data;
25};
26
27/*
28 * This hook just adds a quirk unconditionnally
29 */
30static void __maybe_unused add_quirk(struct mmc_card *card, int data)
31{
32 card->quirks |= data;
33}
34 19
35/* 20#ifndef SDIO_DEVICE_ID_TI_WL1271
36 * This hook just removes a quirk unconditionnally 21#define SDIO_DEVICE_ID_TI_WL1271 0x4076
37 */ 22#endif
38static void __maybe_unused remove_quirk(struct mmc_card *card, int data)
39{
40 card->quirks &= ~data;
41}
42 23
43/* 24/*
44 * This hook just adds a quirk for all sdio devices 25 * This hook just adds a quirk for all sdio devices
@@ -49,33 +30,47 @@ static void add_quirk_for_sdio_devices(struct mmc_card *card, int data)
49 card->quirks |= data; 30 card->quirks |= data;
50} 31}
51 32
52#ifndef SDIO_VENDOR_ID_TI
53#define SDIO_VENDOR_ID_TI 0x0097
54#endif
55
56#ifndef SDIO_DEVICE_ID_TI_WL1271
57#define SDIO_DEVICE_ID_TI_WL1271 0x4076
58#endif
59
60static const struct mmc_fixup mmc_fixup_methods[] = { 33static const struct mmc_fixup mmc_fixup_methods[] = {
61 /* by default sdio devices are considered CLK_GATING broken */ 34 /* by default sdio devices are considered CLK_GATING broken */
62 /* good cards will be whitelisted as they are tested */ 35 /* good cards will be whitelisted as they are tested */
63 { SDIO_ANY_ID, SDIO_ANY_ID, 36 SDIO_FIXUP(SDIO_ANY_ID, SDIO_ANY_ID,
64 add_quirk_for_sdio_devices, MMC_QUIRK_BROKEN_CLK_GATING }, 37 add_quirk_for_sdio_devices,
65 { SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271, 38 MMC_QUIRK_BROKEN_CLK_GATING),
66 remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING }, 39
67 { 0 } 40 SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
41 remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
42
43 SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
44 add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
45
46 SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
47 add_quirk, MMC_QUIRK_DISABLE_CD),
48
49 END_FIXUP
68}; 50};
69 51
70void mmc_fixup_device(struct mmc_card *card) 52void mmc_fixup_device(struct mmc_card *card, const struct mmc_fixup *table)
71{ 53{
72 const struct mmc_fixup *f; 54 const struct mmc_fixup *f;
55 u64 rev = cid_rev_card(card);
56
57 /* Non-core specific workarounds. */
58 if (!table)
59 table = mmc_fixup_methods;
73 60
74 for (f = mmc_fixup_methods; f->vendor_fixup; f++) { 61 for (f = table; f->vendor_fixup; f++) {
75 if ((f->vendor == card->cis.vendor 62 if ((f->manfid == CID_MANFID_ANY ||
76 || f->vendor == (u16) SDIO_ANY_ID) && 63 f->manfid == card->cid.manfid) &&
77 (f->device == card->cis.device 64 (f->oemid == CID_OEMID_ANY ||
78 || f->device == (u16) SDIO_ANY_ID)) { 65 f->oemid == card->cid.oemid) &&
66 (f->name == CID_NAME_ANY ||
67 !strncmp(f->name, card->cid.prod_name,
68 sizeof(card->cid.prod_name))) &&
69 (f->cis_vendor == card->cis.vendor ||
70 f->cis_vendor == (u16) SDIO_ANY_ID) &&
71 (f->cis_device == card->cis.device ||
72 f->cis_device == (u16) SDIO_ANY_ID) &&
73 rev >= f->rev_start && rev <= f->rev_end) {
79 dev_dbg(&card->dev, "calling %pF\n", f->vendor_fixup); 74 dev_dbg(&card->dev, "calling %pF\n", f->vendor_fixup);
80 f->vendor_fixup(card, f->data); 75 f->vendor_fixup(card, f->data);
81 } 76 }
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 6dac89fe0535..ff2774128aa9 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -130,7 +130,7 @@ static int mmc_decode_csd(struct mmc_card *card)
130 break; 130 break;
131 case 1: 131 case 1:
132 /* 132 /*
133 * This is a block-addressed SDHC card. Most 133 * This is a block-addressed SDHC or SDXC card. Most
134 * interesting fields are unused and have fixed 134 * interesting fields are unused and have fixed
135 * values. To avoid getting tripped by buggy cards, 135 * values. To avoid getting tripped by buggy cards,
136 * we assume those fixed values ourselves. 136 * we assume those fixed values ourselves.
@@ -144,6 +144,11 @@ static int mmc_decode_csd(struct mmc_card *card)
144 e = UNSTUFF_BITS(resp, 96, 3); 144 e = UNSTUFF_BITS(resp, 96, 3);
145 csd->max_dtr = tran_exp[e] * tran_mant[m]; 145 csd->max_dtr = tran_exp[e] * tran_mant[m];
146 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); 146 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
147 csd->c_size = UNSTUFF_BITS(resp, 48, 22);
148
149 /* SDXC cards have a minimum C_SIZE of 0x00FFFF */
150 if (csd->c_size >= 0xFFFF)
151 mmc_card_set_ext_capacity(card);
147 152
148 m = UNSTUFF_BITS(resp, 48, 22); 153 m = UNSTUFF_BITS(resp, 48, 22);
149 csd->capacity = (1 + m) << 10; 154 csd->capacity = (1 + m) << 10;
@@ -189,12 +194,17 @@ static int mmc_decode_scr(struct mmc_card *card)
189 194
190 scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4); 195 scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4);
191 scr->bus_widths = UNSTUFF_BITS(resp, 48, 4); 196 scr->bus_widths = UNSTUFF_BITS(resp, 48, 4);
197 if (scr->sda_vsn == SCR_SPEC_VER_2)
198 /* Check if Physical Layer Spec v3.0 is supported */
199 scr->sda_spec3 = UNSTUFF_BITS(resp, 47, 1);
192 200
193 if (UNSTUFF_BITS(resp, 55, 1)) 201 if (UNSTUFF_BITS(resp, 55, 1))
194 card->erased_byte = 0xFF; 202 card->erased_byte = 0xFF;
195 else 203 else
196 card->erased_byte = 0x0; 204 card->erased_byte = 0x0;
197 205
206 if (scr->sda_spec3)
207 scr->cmds = UNSTUFF_BITS(resp, 32, 2);
198 return 0; 208 return 0;
199} 209}
200 210
@@ -274,29 +284,74 @@ static int mmc_read_switch(struct mmc_card *card)
274 status = kmalloc(64, GFP_KERNEL); 284 status = kmalloc(64, GFP_KERNEL);
275 if (!status) { 285 if (!status) {
276 printk(KERN_ERR "%s: could not allocate a buffer for " 286 printk(KERN_ERR "%s: could not allocate a buffer for "
277 "switch capabilities.\n", mmc_hostname(card->host)); 287 "switch capabilities.\n",
288 mmc_hostname(card->host));
278 return -ENOMEM; 289 return -ENOMEM;
279 } 290 }
280 291
292 /* Find out the supported Bus Speed Modes. */
281 err = mmc_sd_switch(card, 0, 0, 1, status); 293 err = mmc_sd_switch(card, 0, 0, 1, status);
282 if (err) { 294 if (err) {
283 /* If the host or the card can't do the switch, 295 /*
284 * fail more gracefully. */ 296 * If the host or the card can't do the switch,
285 if ((err != -EINVAL) 297 * fail more gracefully.
286 && (err != -ENOSYS) 298 */
287 && (err != -EFAULT)) 299 if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
288 goto out; 300 goto out;
289 301
290 printk(KERN_WARNING "%s: problem reading switch " 302 printk(KERN_WARNING "%s: problem reading Bus Speed modes.\n",
291 "capabilities, performance might suffer.\n",
292 mmc_hostname(card->host)); 303 mmc_hostname(card->host));
293 err = 0; 304 err = 0;
294 305
295 goto out; 306 goto out;
296 } 307 }
297 308
298 if (status[13] & 0x02) 309 if (card->scr.sda_spec3) {
299 card->sw_caps.hs_max_dtr = 50000000; 310 card->sw_caps.sd3_bus_mode = status[13];
311
312 /* Find out Driver Strengths supported by the card */
313 err = mmc_sd_switch(card, 0, 2, 1, status);
314 if (err) {
315 /*
316 * If the host or the card can't do the switch,
317 * fail more gracefully.
318 */
319 if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
320 goto out;
321
322 printk(KERN_WARNING "%s: problem reading "
323 "Driver Strength.\n",
324 mmc_hostname(card->host));
325 err = 0;
326
327 goto out;
328 }
329
330 card->sw_caps.sd3_drv_type = status[9];
331
332 /* Find out Current Limits supported by the card */
333 err = mmc_sd_switch(card, 0, 3, 1, status);
334 if (err) {
335 /*
336 * If the host or the card can't do the switch,
337 * fail more gracefully.
338 */
339 if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
340 goto out;
341
342 printk(KERN_WARNING "%s: problem reading "
343 "Current Limit.\n",
344 mmc_hostname(card->host));
345 err = 0;
346
347 goto out;
348 }
349
350 card->sw_caps.sd3_curr_limit = status[7];
351 } else {
352 if (status[13] & 0x02)
353 card->sw_caps.hs_max_dtr = 50000000;
354 }
300 355
301out: 356out:
302 kfree(status); 357 kfree(status);
@@ -352,6 +407,232 @@ out:
352 return err; 407 return err;
353} 408}
354 409
410static int sd_select_driver_type(struct mmc_card *card, u8 *status)
411{
412 int host_drv_type = 0, card_drv_type = 0;
413 int err;
414
415 /*
416 * If the host doesn't support any of the Driver Types A,C or D,
417 * default Driver Type B is used.
418 */
419 if (!(card->host->caps & (MMC_CAP_DRIVER_TYPE_A | MMC_CAP_DRIVER_TYPE_C
420 | MMC_CAP_DRIVER_TYPE_D)))
421 return 0;
422
423 if (card->host->caps & MMC_CAP_DRIVER_TYPE_A) {
424 host_drv_type = MMC_SET_DRIVER_TYPE_A;
425 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
426 card_drv_type = MMC_SET_DRIVER_TYPE_A;
427 else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B)
428 card_drv_type = MMC_SET_DRIVER_TYPE_B;
429 else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
430 card_drv_type = MMC_SET_DRIVER_TYPE_C;
431 } else if (card->host->caps & MMC_CAP_DRIVER_TYPE_C) {
432 host_drv_type = MMC_SET_DRIVER_TYPE_C;
433 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
434 card_drv_type = MMC_SET_DRIVER_TYPE_C;
435 } else if (!(card->host->caps & MMC_CAP_DRIVER_TYPE_D)) {
436 /*
437 * If we are here, that means only the default driver type
438 * B is supported by the host.
439 */
440 host_drv_type = MMC_SET_DRIVER_TYPE_B;
441 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B)
442 card_drv_type = MMC_SET_DRIVER_TYPE_B;
443 else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
444 card_drv_type = MMC_SET_DRIVER_TYPE_C;
445 }
446
447 err = mmc_sd_switch(card, 1, 2, card_drv_type, status);
448 if (err)
449 return err;
450
451 if ((status[15] & 0xF) != card_drv_type) {
452 printk(KERN_WARNING "%s: Problem setting driver strength!\n",
453 mmc_hostname(card->host));
454 return 0;
455 }
456
457 mmc_set_driver_type(card->host, host_drv_type);
458
459 return 0;
460}
461
462static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
463{
464 unsigned int bus_speed = 0, timing = 0;
465 int err;
466
467 /*
468 * If the host doesn't support any of the UHS-I modes, fallback on
469 * default speed.
470 */
471 if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
472 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
473 return 0;
474
475 if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
476 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
477 bus_speed = UHS_SDR104_BUS_SPEED;
478 timing = MMC_TIMING_UHS_SDR104;
479 card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
480 } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
481 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
482 bus_speed = UHS_DDR50_BUS_SPEED;
483 timing = MMC_TIMING_UHS_DDR50;
484 card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
485 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
486 MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
487 SD_MODE_UHS_SDR50)) {
488 bus_speed = UHS_SDR50_BUS_SPEED;
489 timing = MMC_TIMING_UHS_SDR50;
490 card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
491 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
492 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
493 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
494 bus_speed = UHS_SDR25_BUS_SPEED;
495 timing = MMC_TIMING_UHS_SDR25;
496 card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
497 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
498 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
499 MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
500 SD_MODE_UHS_SDR12)) {
501 bus_speed = UHS_SDR12_BUS_SPEED;
502 timing = MMC_TIMING_UHS_SDR12;
503 card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
504 }
505
506 card->sd_bus_speed = bus_speed;
507 err = mmc_sd_switch(card, 1, 0, bus_speed, status);
508 if (err)
509 return err;
510
511 if ((status[16] & 0xF) != bus_speed)
512 printk(KERN_WARNING "%s: Problem setting bus speed mode!\n",
513 mmc_hostname(card->host));
514 else {
515 mmc_set_timing(card->host, timing);
516 mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
517 }
518
519 return 0;
520}
521
522static int sd_set_current_limit(struct mmc_card *card, u8 *status)
523{
524 int current_limit = 0;
525 int err;
526
527 /*
528 * Current limit switch is only defined for SDR50, SDR104, and DDR50
529 * bus speed modes. For other bus speed modes, we set the default
530 * current limit of 200mA.
531 */
532 if ((card->sd_bus_speed == UHS_SDR50_BUS_SPEED) ||
533 (card->sd_bus_speed == UHS_SDR104_BUS_SPEED) ||
534 (card->sd_bus_speed == UHS_DDR50_BUS_SPEED)) {
535 if (card->host->caps & MMC_CAP_MAX_CURRENT_800) {
536 if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_800)
537 current_limit = SD_SET_CURRENT_LIMIT_800;
538 else if (card->sw_caps.sd3_curr_limit &
539 SD_MAX_CURRENT_600)
540 current_limit = SD_SET_CURRENT_LIMIT_600;
541 else if (card->sw_caps.sd3_curr_limit &
542 SD_MAX_CURRENT_400)
543 current_limit = SD_SET_CURRENT_LIMIT_400;
544 else if (card->sw_caps.sd3_curr_limit &
545 SD_MAX_CURRENT_200)
546 current_limit = SD_SET_CURRENT_LIMIT_200;
547 } else if (card->host->caps & MMC_CAP_MAX_CURRENT_600) {
548 if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_600)
549 current_limit = SD_SET_CURRENT_LIMIT_600;
550 else if (card->sw_caps.sd3_curr_limit &
551 SD_MAX_CURRENT_400)
552 current_limit = SD_SET_CURRENT_LIMIT_400;
553 else if (card->sw_caps.sd3_curr_limit &
554 SD_MAX_CURRENT_200)
555 current_limit = SD_SET_CURRENT_LIMIT_200;
556 } else if (card->host->caps & MMC_CAP_MAX_CURRENT_400) {
557 if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_400)
558 current_limit = SD_SET_CURRENT_LIMIT_400;
559 else if (card->sw_caps.sd3_curr_limit &
560 SD_MAX_CURRENT_200)
561 current_limit = SD_SET_CURRENT_LIMIT_200;
562 } else if (card->host->caps & MMC_CAP_MAX_CURRENT_200) {
563 if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_200)
564 current_limit = SD_SET_CURRENT_LIMIT_200;
565 }
566 } else
567 current_limit = SD_SET_CURRENT_LIMIT_200;
568
569 err = mmc_sd_switch(card, 1, 3, current_limit, status);
570 if (err)
571 return err;
572
573 if (((status[15] >> 4) & 0x0F) != current_limit)
574 printk(KERN_WARNING "%s: Problem setting current limit!\n",
575 mmc_hostname(card->host));
576
577 return 0;
578}
579
580/*
581 * UHS-I specific initialization procedure
582 */
583static int mmc_sd_init_uhs_card(struct mmc_card *card)
584{
585 int err;
586 u8 *status;
587
588 if (!card->scr.sda_spec3)
589 return 0;
590
591 if (!(card->csd.cmdclass & CCC_SWITCH))
592 return 0;
593
594 status = kmalloc(64, GFP_KERNEL);
595 if (!status) {
596 printk(KERN_ERR "%s: could not allocate a buffer for "
597 "switch capabilities.\n", mmc_hostname(card->host));
598 return -ENOMEM;
599 }
600
601 /* Set 4-bit bus width */
602 if ((card->host->caps & MMC_CAP_4_BIT_DATA) &&
603 (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
604 err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
605 if (err)
606 goto out;
607
608 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
609 }
610
611 /* Set the driver strength for the card */
612 err = sd_select_driver_type(card, status);
613 if (err)
614 goto out;
615
616 /* Set bus speed mode of the card */
617 err = sd_set_bus_speed_mode(card, status);
618 if (err)
619 goto out;
620
621 /* Set current limit for the card */
622 err = sd_set_current_limit(card, status);
623 if (err)
624 goto out;
625
626 /* SPI mode doesn't define CMD19 */
627 if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning)
628 err = card->host->ops->execute_tuning(card->host);
629
630out:
631 kfree(status);
632
633 return err;
634}
635
355MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1], 636MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
356 card->raw_cid[2], card->raw_cid[3]); 637 card->raw_cid[2], card->raw_cid[3]);
357MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1], 638MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
@@ -400,7 +681,7 @@ struct device_type sd_type = {
400/* 681/*
401 * Fetch CID from card. 682 * Fetch CID from card.
402 */ 683 */
403int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid) 684int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
404{ 685{
405 int err; 686 int err;
406 687
@@ -420,12 +701,39 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid)
420 */ 701 */
421 err = mmc_send_if_cond(host, ocr); 702 err = mmc_send_if_cond(host, ocr);
422 if (!err) 703 if (!err)
423 ocr |= 1 << 30; 704 ocr |= SD_OCR_CCS;
705
706 /*
707 * If the host supports one of UHS-I modes, request the card
708 * to switch to 1.8V signaling level.
709 */
710 if (host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
711 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))
712 ocr |= SD_OCR_S18R;
713
714 /* If the host can supply more than 150mA, XPC should be set to 1. */
715 if (host->caps & (MMC_CAP_SET_XPC_330 | MMC_CAP_SET_XPC_300 |
716 MMC_CAP_SET_XPC_180))
717 ocr |= SD_OCR_XPC;
424 718
425 err = mmc_send_app_op_cond(host, ocr, NULL); 719try_again:
720 err = mmc_send_app_op_cond(host, ocr, rocr);
426 if (err) 721 if (err)
427 return err; 722 return err;
428 723
724 /*
725 * In case CCS and S18A in the response is set, start Signal Voltage
726 * Switch procedure. SPI mode doesn't support CMD11.
727 */
728 if (!mmc_host_is_spi(host) && rocr &&
729 ((*rocr & 0x41000000) == 0x41000000)) {
730 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, true);
731 if (err) {
732 ocr &= ~SD_OCR_S18R;
733 goto try_again;
734 }
735 }
736
429 if (mmc_host_is_spi(host)) 737 if (mmc_host_is_spi(host))
430 err = mmc_send_cid(host, cid); 738 err = mmc_send_cid(host, cid);
431 else 739 else
@@ -553,11 +861,12 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
553 struct mmc_card *card; 861 struct mmc_card *card;
554 int err; 862 int err;
555 u32 cid[4]; 863 u32 cid[4];
864 u32 rocr = 0;
556 865
557 BUG_ON(!host); 866 BUG_ON(!host);
558 WARN_ON(!host->claimed); 867 WARN_ON(!host->claimed);
559 868
560 err = mmc_sd_get_cid(host, ocr, cid); 869 err = mmc_sd_get_cid(host, ocr, cid, &rocr);
561 if (err) 870 if (err)
562 return err; 871 return err;
563 872
@@ -610,30 +919,47 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
610 if (err) 919 if (err)
611 goto free_card; 920 goto free_card;
612 921
613 /* 922 /* Initialization sequence for UHS-I cards */
614 * Attempt to change to high-speed (if supported) 923 if (rocr & SD_ROCR_S18A) {
615 */ 924 err = mmc_sd_init_uhs_card(card);
616 err = mmc_sd_switch_hs(card); 925 if (err)
617 if (err > 0) 926 goto free_card;
618 mmc_sd_go_highspeed(card);
619 else if (err)
620 goto free_card;
621 927
622 /* 928 /* Card is an ultra-high-speed card */
623 * Set bus speed. 929 mmc_sd_card_set_uhs(card);
624 */
625 mmc_set_clock(host, mmc_sd_get_max_clock(card));
626 930
627 /* 931 /*
628 * Switch to wider bus (if supported). 932 * Since initialization is now complete, enable preset
629 */ 933 * value registers for UHS-I cards.
630 if ((host->caps & MMC_CAP_4_BIT_DATA) && 934 */
631 (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) { 935 if (host->ops->enable_preset_value)
632 err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4); 936 host->ops->enable_preset_value(host, true);
633 if (err) 937 } else {
938 /*
939 * Attempt to change to high-speed (if supported)
940 */
941 err = mmc_sd_switch_hs(card);
942 if (err > 0)
943 mmc_sd_go_highspeed(card);
944 else if (err)
634 goto free_card; 945 goto free_card;
635 946
636 mmc_set_bus_width(host, MMC_BUS_WIDTH_4); 947 /*
948 * Set bus speed.
949 */
950 mmc_set_clock(host, mmc_sd_get_max_clock(card));
951
952 /*
953 * Switch to wider bus (if supported).
954 */
955 if ((host->caps & MMC_CAP_4_BIT_DATA) &&
956 (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
957 err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
958 if (err)
959 goto free_card;
960
961 mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
962 }
637 } 963 }
638 964
639 host->card = card; 965 host->card = card;
@@ -773,6 +1099,15 @@ int mmc_attach_sd(struct mmc_host *host)
773 BUG_ON(!host); 1099 BUG_ON(!host);
774 WARN_ON(!host->claimed); 1100 WARN_ON(!host->claimed);
775 1101
1102 /* Make sure we are at 3.3V signalling voltage */
1103 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false);
1104 if (err)
1105 return err;
1106
1107 /* Disable preset value enable if already set since last time */
1108 if (host->ops->enable_preset_value)
1109 host->ops->enable_preset_value(host, false);
1110
776 err = mmc_send_app_op_cond(host, 0, &ocr); 1111 err = mmc_send_app_op_cond(host, 0, &ocr);
777 if (err) 1112 if (err)
778 return err; 1113 return err;
diff --git a/drivers/mmc/core/sd.h b/drivers/mmc/core/sd.h
index 3d8800fa7600..4b34b24f3f76 100644
--- a/drivers/mmc/core/sd.h
+++ b/drivers/mmc/core/sd.h
@@ -5,7 +5,7 @@
5 5
6extern struct device_type sd_type; 6extern struct device_type sd_type;
7 7
8int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid); 8int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr);
9int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card); 9int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card);
10void mmc_decode_cid(struct mmc_card *card); 10void mmc_decode_cid(struct mmc_card *card);
11int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, 11int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 76af349c14b4..021fed153804 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -21,10 +21,10 @@
21#include "core.h" 21#include "core.h"
22#include "sd_ops.h" 22#include "sd_ops.h"
23 23
24static int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card) 24int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
25{ 25{
26 int err; 26 int err;
27 struct mmc_command cmd; 27 struct mmc_command cmd = {0};
28 28
29 BUG_ON(!host); 29 BUG_ON(!host);
30 BUG_ON(card && (card->host != host)); 30 BUG_ON(card && (card->host != host));
@@ -49,6 +49,7 @@ static int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
49 49
50 return 0; 50 return 0;
51} 51}
52EXPORT_SYMBOL_GPL(mmc_app_cmd);
52 53
53/** 54/**
54 * mmc_wait_for_app_cmd - start an application command and wait for 55 * mmc_wait_for_app_cmd - start an application command and wait for
@@ -66,7 +67,7 @@ static int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
66int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, 67int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
67 struct mmc_command *cmd, int retries) 68 struct mmc_command *cmd, int retries)
68{ 69{
69 struct mmc_request mrq; 70 struct mmc_request mrq = {0};
70 71
71 int i, err; 72 int i, err;
72 73
@@ -119,13 +120,11 @@ EXPORT_SYMBOL(mmc_wait_for_app_cmd);
119int mmc_app_set_bus_width(struct mmc_card *card, int width) 120int mmc_app_set_bus_width(struct mmc_card *card, int width)
120{ 121{
121 int err; 122 int err;
122 struct mmc_command cmd; 123 struct mmc_command cmd = {0};
123 124
124 BUG_ON(!card); 125 BUG_ON(!card);
125 BUG_ON(!card->host); 126 BUG_ON(!card->host);
126 127
127 memset(&cmd, 0, sizeof(struct mmc_command));
128
129 cmd.opcode = SD_APP_SET_BUS_WIDTH; 128 cmd.opcode = SD_APP_SET_BUS_WIDTH;
130 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 129 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
131 130
@@ -149,13 +148,11 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width)
149 148
150int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 149int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
151{ 150{
152 struct mmc_command cmd; 151 struct mmc_command cmd = {0};
153 int i, err = 0; 152 int i, err = 0;
154 153
155 BUG_ON(!host); 154 BUG_ON(!host);
156 155
157 memset(&cmd, 0, sizeof(struct mmc_command));
158
159 cmd.opcode = SD_APP_OP_COND; 156 cmd.opcode = SD_APP_OP_COND;
160 if (mmc_host_is_spi(host)) 157 if (mmc_host_is_spi(host))
161 cmd.arg = ocr & (1 << 30); /* SPI only defines one bit */ 158 cmd.arg = ocr & (1 << 30); /* SPI only defines one bit */
@@ -194,7 +191,7 @@ int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
194 191
195int mmc_send_if_cond(struct mmc_host *host, u32 ocr) 192int mmc_send_if_cond(struct mmc_host *host, u32 ocr)
196{ 193{
197 struct mmc_command cmd; 194 struct mmc_command cmd = {0};
198 int err; 195 int err;
199 static const u8 test_pattern = 0xAA; 196 static const u8 test_pattern = 0xAA;
200 u8 result_pattern; 197 u8 result_pattern;
@@ -226,13 +223,11 @@ int mmc_send_if_cond(struct mmc_host *host, u32 ocr)
226int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca) 223int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
227{ 224{
228 int err; 225 int err;
229 struct mmc_command cmd; 226 struct mmc_command cmd = {0};
230 227
231 BUG_ON(!host); 228 BUG_ON(!host);
232 BUG_ON(!rca); 229 BUG_ON(!rca);
233 230
234 memset(&cmd, 0, sizeof(struct mmc_command));
235
236 cmd.opcode = SD_SEND_RELATIVE_ADDR; 231 cmd.opcode = SD_SEND_RELATIVE_ADDR;
237 cmd.arg = 0; 232 cmd.arg = 0;
238 cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR; 233 cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR;
@@ -249,9 +244,9 @@ int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
249int mmc_app_send_scr(struct mmc_card *card, u32 *scr) 244int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
250{ 245{
251 int err; 246 int err;
252 struct mmc_request mrq; 247 struct mmc_request mrq = {0};
253 struct mmc_command cmd; 248 struct mmc_command cmd = {0};
254 struct mmc_data data; 249 struct mmc_data data = {0};
255 struct scatterlist sg; 250 struct scatterlist sg;
256 void *data_buf; 251 void *data_buf;
257 252
@@ -272,10 +267,6 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
272 if (data_buf == NULL) 267 if (data_buf == NULL)
273 return -ENOMEM; 268 return -ENOMEM;
274 269
275 memset(&mrq, 0, sizeof(struct mmc_request));
276 memset(&cmd, 0, sizeof(struct mmc_command));
277 memset(&data, 0, sizeof(struct mmc_data));
278
279 mrq.cmd = &cmd; 270 mrq.cmd = &cmd;
280 mrq.data = &data; 271 mrq.data = &data;
281 272
@@ -312,9 +303,9 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
312int mmc_sd_switch(struct mmc_card *card, int mode, int group, 303int mmc_sd_switch(struct mmc_card *card, int mode, int group,
313 u8 value, u8 *resp) 304 u8 value, u8 *resp)
314{ 305{
315 struct mmc_request mrq; 306 struct mmc_request mrq = {0};
316 struct mmc_command cmd; 307 struct mmc_command cmd = {0};
317 struct mmc_data data; 308 struct mmc_data data = {0};
318 struct scatterlist sg; 309 struct scatterlist sg;
319 310
320 BUG_ON(!card); 311 BUG_ON(!card);
@@ -325,10 +316,6 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group,
325 mode = !!mode; 316 mode = !!mode;
326 value &= 0xF; 317 value &= 0xF;
327 318
328 memset(&mrq, 0, sizeof(struct mmc_request));
329 memset(&cmd, 0, sizeof(struct mmc_command));
330 memset(&data, 0, sizeof(struct mmc_data));
331
332 mrq.cmd = &cmd; 319 mrq.cmd = &cmd;
333 mrq.data = &data; 320 mrq.data = &data;
334 321
@@ -361,9 +348,9 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group,
361int mmc_app_sd_status(struct mmc_card *card, void *ssr) 348int mmc_app_sd_status(struct mmc_card *card, void *ssr)
362{ 349{
363 int err; 350 int err;
364 struct mmc_request mrq; 351 struct mmc_request mrq = {0};
365 struct mmc_command cmd; 352 struct mmc_command cmd = {0};
366 struct mmc_data data; 353 struct mmc_data data = {0};
367 struct scatterlist sg; 354 struct scatterlist sg;
368 355
369 BUG_ON(!card); 356 BUG_ON(!card);
@@ -376,10 +363,6 @@ int mmc_app_sd_status(struct mmc_card *card, void *ssr)
376 if (err) 363 if (err)
377 return err; 364 return err;
378 365
379 memset(&mrq, 0, sizeof(struct mmc_request));
380 memset(&cmd, 0, sizeof(struct mmc_command));
381 memset(&data, 0, sizeof(struct mmc_data));
382
383 mrq.cmd = &cmd; 366 mrq.cmd = &cmd;
384 mrq.data = &data; 367 mrq.data = &data;
385 368
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index db0f0b44d684..4d0c15bfa514 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -16,6 +16,7 @@
16#include <linux/mmc/card.h> 16#include <linux/mmc/card.h>
17#include <linux/mmc/sdio.h> 17#include <linux/mmc/sdio.h>
18#include <linux/mmc/sdio_func.h> 18#include <linux/mmc/sdio_func.h>
19#include <linux/mmc/sdio_ids.h>
19 20
20#include "core.h" 21#include "core.h"
21#include "bus.h" 22#include "bus.h"
@@ -31,6 +32,11 @@ static int sdio_read_fbr(struct sdio_func *func)
31 int ret; 32 int ret;
32 unsigned char data; 33 unsigned char data;
33 34
35 if (mmc_card_nonstd_func_interface(func->card)) {
36 func->class = SDIO_CLASS_NONE;
37 return 0;
38 }
39
34 ret = mmc_io_rw_direct(func->card, 0, 0, 40 ret = mmc_io_rw_direct(func->card, 0, 0,
35 SDIO_FBR_BASE(func->num) + SDIO_FBR_STD_IF, 0, &data); 41 SDIO_FBR_BASE(func->num) + SDIO_FBR_STD_IF, 0, &data);
36 if (ret) 42 if (ret)
@@ -181,7 +187,7 @@ static int sdio_disable_cd(struct mmc_card *card)
181 int ret; 187 int ret;
182 u8 ctrl; 188 u8 ctrl;
183 189
184 if (!card->cccr.disable_cd) 190 if (!mmc_card_disable_cd(card))
185 return 0; 191 return 0;
186 192
187 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl); 193 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl);
@@ -363,8 +369,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
363 goto err; 369 goto err;
364 } 370 }
365 371
366 if (ocr & R4_MEMORY_PRESENT 372 if ((ocr & R4_MEMORY_PRESENT) &&
367 && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) { 373 mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid, NULL) == 0) {
368 card->type = MMC_TYPE_SD_COMBO; 374 card->type = MMC_TYPE_SD_COMBO;
369 375
370 if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || 376 if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
@@ -466,7 +472,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
466 472
467 card = oldcard; 473 card = oldcard;
468 } 474 }
469 mmc_fixup_device(card); 475 mmc_fixup_device(card, NULL);
470 476
471 if (card->type == MMC_TYPE_SD_COMBO) { 477 if (card->type == MMC_TYPE_SD_COMBO) {
472 err = mmc_sd_setup_card(host, card, oldcard != NULL); 478 err = mmc_sd_setup_card(host, card, oldcard != NULL);
@@ -625,7 +631,7 @@ static int mmc_sdio_suspend(struct mmc_host *host)
625 } 631 }
626 } 632 }
627 633
628 if (!err && host->pm_flags & MMC_PM_KEEP_POWER) { 634 if (!err && mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
629 mmc_claim_host(host); 635 mmc_claim_host(host);
630 sdio_disable_wide(host->card); 636 sdio_disable_wide(host->card);
631 mmc_release_host(host); 637 mmc_release_host(host);
@@ -645,10 +651,10 @@ static int mmc_sdio_resume(struct mmc_host *host)
645 mmc_claim_host(host); 651 mmc_claim_host(host);
646 652
647 /* No need to reinitialize powered-resumed nonremovable cards */ 653 /* No need to reinitialize powered-resumed nonremovable cards */
648 if (mmc_card_is_removable(host) || !mmc_card_is_powered_resumed(host)) 654 if (mmc_card_is_removable(host) || !mmc_card_keep_power(host))
649 err = mmc_sdio_init_card(host, host->ocr, host->card, 655 err = mmc_sdio_init_card(host, host->ocr, host->card,
650 (host->pm_flags & MMC_PM_KEEP_POWER)); 656 mmc_card_keep_power(host));
651 else if (mmc_card_is_powered_resumed(host)) { 657 else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
652 /* We may have switched to 1-bit mode during suspend */ 658 /* We may have switched to 1-bit mode during suspend */
653 err = sdio_enable_4bit_bus(host->card); 659 err = sdio_enable_4bit_bus(host->card);
654 if (err > 0) { 660 if (err > 0) {
@@ -691,7 +697,7 @@ static int mmc_sdio_power_restore(struct mmc_host *host)
691 697
692 mmc_claim_host(host); 698 mmc_claim_host(host);
693 ret = mmc_sdio_init_card(host, host->ocr, host->card, 699 ret = mmc_sdio_init_card(host, host->ocr, host->card,
694 (host->pm_flags & MMC_PM_KEEP_POWER)); 700 mmc_card_keep_power(host));
695 if (!ret && host->sdio_irqs) 701 if (!ret && host->sdio_irqs)
696 mmc_signal_sdio_irq(host); 702 mmc_signal_sdio_irq(host);
697 mmc_release_host(host); 703 mmc_release_host(host);
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index b3001617e67d..03ead028d2ce 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -31,6 +31,17 @@ static int process_sdio_pending_irqs(struct mmc_card *card)
31{ 31{
32 int i, ret, count; 32 int i, ret, count;
33 unsigned char pending; 33 unsigned char pending;
34 struct sdio_func *func;
35
36 /*
37 * Optimization, if there is only 1 function interrupt registered
38 * call irq handler directly
39 */
40 func = card->sdio_single_irq;
41 if (func) {
42 func->irq_handler(func);
43 return 1;
44 }
34 45
35 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending); 46 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending);
36 if (ret) { 47 if (ret) {
@@ -42,7 +53,7 @@ static int process_sdio_pending_irqs(struct mmc_card *card)
42 count = 0; 53 count = 0;
43 for (i = 1; i <= 7; i++) { 54 for (i = 1; i <= 7; i++) {
44 if (pending & (1 << i)) { 55 if (pending & (1 << i)) {
45 struct sdio_func *func = card->sdio_func[i - 1]; 56 func = card->sdio_func[i - 1];
46 if (!func) { 57 if (!func) {
47 printk(KERN_WARNING "%s: pending IRQ for " 58 printk(KERN_WARNING "%s: pending IRQ for "
48 "non-existent function\n", 59 "non-existent function\n",
@@ -186,6 +197,24 @@ static int sdio_card_irq_put(struct mmc_card *card)
186 return 0; 197 return 0;
187} 198}
188 199
200/* If there is only 1 function registered set sdio_single_irq */
201static void sdio_single_irq_set(struct mmc_card *card)
202{
203 struct sdio_func *func;
204 int i;
205
206 card->sdio_single_irq = NULL;
207 if ((card->host->caps & MMC_CAP_SDIO_IRQ) &&
208 card->host->sdio_irqs == 1)
209 for (i = 0; i < card->sdio_funcs; i++) {
210 func = card->sdio_func[i];
211 if (func && func->irq_handler) {
212 card->sdio_single_irq = func;
213 break;
214 }
215 }
216}
217
189/** 218/**
190 * sdio_claim_irq - claim the IRQ for a SDIO function 219 * sdio_claim_irq - claim the IRQ for a SDIO function
191 * @func: SDIO function 220 * @func: SDIO function
@@ -227,6 +256,7 @@ int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
227 ret = sdio_card_irq_get(func->card); 256 ret = sdio_card_irq_get(func->card);
228 if (ret) 257 if (ret)
229 func->irq_handler = NULL; 258 func->irq_handler = NULL;
259 sdio_single_irq_set(func->card);
230 260
231 return ret; 261 return ret;
232} 262}
@@ -251,6 +281,7 @@ int sdio_release_irq(struct sdio_func *func)
251 if (func->irq_handler) { 281 if (func->irq_handler) {
252 func->irq_handler = NULL; 282 func->irq_handler = NULL;
253 sdio_card_irq_put(func->card); 283 sdio_card_irq_put(func->card);
284 sdio_single_irq_set(func->card);
254 } 285 }
255 286
256 ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg); 287 ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);
diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c
index dea36d9c22e6..f087d876c573 100644
--- a/drivers/mmc/core/sdio_ops.c
+++ b/drivers/mmc/core/sdio_ops.c
@@ -21,13 +21,11 @@
21 21
22int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 22int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
23{ 23{
24 struct mmc_command cmd; 24 struct mmc_command cmd = {0};
25 int i, err = 0; 25 int i, err = 0;
26 26
27 BUG_ON(!host); 27 BUG_ON(!host);
28 28
29 memset(&cmd, 0, sizeof(struct mmc_command));
30
31 cmd.opcode = SD_IO_SEND_OP_COND; 29 cmd.opcode = SD_IO_SEND_OP_COND;
32 cmd.arg = ocr; 30 cmd.arg = ocr;
33 cmd.flags = MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR; 31 cmd.flags = MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR;
@@ -70,7 +68,7 @@ int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
70static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn, 68static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,
71 unsigned addr, u8 in, u8 *out) 69 unsigned addr, u8 in, u8 *out)
72{ 70{
73 struct mmc_command cmd; 71 struct mmc_command cmd = {0};
74 int err; 72 int err;
75 73
76 BUG_ON(!host); 74 BUG_ON(!host);
@@ -80,8 +78,6 @@ static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,
80 if (addr & ~0x1FFFF) 78 if (addr & ~0x1FFFF)
81 return -EINVAL; 79 return -EINVAL;
82 80
83 memset(&cmd, 0, sizeof(struct mmc_command));
84
85 cmd.opcode = SD_IO_RW_DIRECT; 81 cmd.opcode = SD_IO_RW_DIRECT;
86 cmd.arg = write ? 0x80000000 : 0x00000000; 82 cmd.arg = write ? 0x80000000 : 0x00000000;
87 cmd.arg |= fn << 28; 83 cmd.arg |= fn << 28;
@@ -125,9 +121,9 @@ int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
125int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, 121int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
126 unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz) 122 unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz)
127{ 123{
128 struct mmc_request mrq; 124 struct mmc_request mrq = {0};
129 struct mmc_command cmd; 125 struct mmc_command cmd = {0};
130 struct mmc_data data; 126 struct mmc_data data = {0};
131 struct scatterlist sg; 127 struct scatterlist sg;
132 128
133 BUG_ON(!card); 129 BUG_ON(!card);
@@ -140,10 +136,6 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
140 if (addr & ~0x1FFFF) 136 if (addr & ~0x1FFFF)
141 return -EINVAL; 137 return -EINVAL;
142 138
143 memset(&mrq, 0, sizeof(struct mmc_request));
144 memset(&cmd, 0, sizeof(struct mmc_command));
145 memset(&data, 0, sizeof(struct mmc_data));
146
147 mrq.cmd = &cmd; 139 mrq.cmd = &cmd;
148 mrq.data = &data; 140 mrq.data = &data;
149 141
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 94df40531c38..56dbf3f6ad08 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -154,7 +154,7 @@ config MMC_SDHCI_DOVE
154 If unsure, say N. 154 If unsure, say N.
155 155
156config MMC_SDHCI_TEGRA 156config MMC_SDHCI_TEGRA
157 tristate "SDHCI platform support for the Tegra SD/MMC Controller" 157 bool "SDHCI platform support for the Tegra SD/MMC Controller"
158 depends on MMC_SDHCI_PLTFM && ARCH_TEGRA 158 depends on MMC_SDHCI_PLTFM && ARCH_TEGRA
159 select MMC_SDHCI_IO_ACCESSORS 159 select MMC_SDHCI_IO_ACCESSORS
160 help 160 help
@@ -535,6 +535,37 @@ config MMC_JZ4740
535 If you have a board based on such a SoC and with a SD/MMC slot, 535 If you have a board based on such a SoC and with a SD/MMC slot,
536 say Y or M here. 536 say Y or M here.
537 537
538config MMC_VUB300
539 tristate "VUB300 USB to SDIO/SD/MMC Host Controller support"
540 depends on USB
541 help
542 This selects support for Elan Digital Systems' VUB300 chip.
543
544 The VUB300 is a USB-SDIO Host Controller Interface chip
545 that enables the host computer to use SDIO/SD/MMC cards
546 via a USB 2.0 or USB 1.1 host.
547
548 The VUB300 chip will be found in both physically separate
549 USB to SDIO/SD/MMC adapters and embedded on some motherboards.
550
551 The VUB300 chip supports SD and MMC memory cards in addition
552 to single and multifunction SDIO cards.
553
554 Some SDIO cards will need a firmware file to be loaded and
555 sent to VUB300 chip in order to achieve better data throughput.
556 Download these "Offload Pseudocode" from Elan Digital Systems'
557 web-site http://www.elandigitalsystems.com/support/downloads.php
558 and put them in /lib/firmware. Note that without these additional
559 firmware files the VUB300 chip will still function, but not at
560 the best obtainable data rate.
561
562 To compile this mmc host controller driver as a module,
563 choose M here: the module will be called vub300.
564
565 If you have a computer with an embedded VUB300 chip
566 or if you intend connecting a USB adapter based on a
567 VUB300 chip say Y or M here.
568
538config MMC_USHC 569config MMC_USHC
539 tristate "USB SD Host Controller (USHC) support" 570 tristate "USB SD Host Controller (USHC) support"
540 depends on USB 571 depends on USB
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 4f1df0aae574..58a5cf73d6e9 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
41obj-$(CONFIG_MMC_DW) += dw_mmc.o 41obj-$(CONFIG_MMC_DW) += dw_mmc.o
42obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o 42obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
43obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o 43obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
44obj-$(CONFIG_MMC_VUB300) += vub300.o
44obj-$(CONFIG_MMC_USHC) += ushc.o 45obj-$(CONFIG_MMC_USHC) += ushc.o
45 46
46obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o 47obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 87e1f57ec9ba..66dcddb9c205 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1769,9 +1769,6 @@ static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
1769 int i, ret; 1769 int i, ret;
1770 struct dw_mci *host = platform_get_drvdata(pdev); 1770 struct dw_mci *host = platform_get_drvdata(pdev);
1771 1771
1772 if (host->vmmc)
1773 regulator_enable(host->vmmc);
1774
1775 for (i = 0; i < host->num_slots; i++) { 1772 for (i = 0; i < host->num_slots; i++) {
1776 struct dw_mci_slot *slot = host->slot[i]; 1773 struct dw_mci_slot *slot = host->slot[i];
1777 if (!slot) 1774 if (!slot)
@@ -1798,6 +1795,9 @@ static int dw_mci_resume(struct platform_device *pdev)
1798 int i, ret; 1795 int i, ret;
1799 struct dw_mci *host = platform_get_drvdata(pdev); 1796 struct dw_mci *host = platform_get_drvdata(pdev);
1800 1797
1798 if (host->vmmc)
1799 regulator_enable(host->vmmc);
1800
1801 if (host->dma_ops->init) 1801 if (host->dma_ops->init)
1802 host->dma_ops->init(host); 1802 host->dma_ops->init(host);
1803 1803
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index f8b5f37007b2..936bbca19c0a 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -18,11 +18,9 @@
18#include <linux/dma-mapping.h> 18#include <linux/dma-mapping.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/device.h> 20#include <linux/device.h>
21
22#include <linux/mmc/host.h> 21#include <linux/mmc/host.h>
23 22#include <linux/scatterlist.h>
24#include <asm/scatterlist.h> 23#include <linux/io.h>
25#include <asm/io.h>
26 24
27#include "sdhci.h" 25#include "sdhci.h"
28 26
@@ -46,14 +44,14 @@ struct sdhci_pci_slot;
46struct sdhci_pci_fixes { 44struct sdhci_pci_fixes {
47 unsigned int quirks; 45 unsigned int quirks;
48 46
49 int (*probe)(struct sdhci_pci_chip*); 47 int (*probe) (struct sdhci_pci_chip *);
50 48
51 int (*probe_slot)(struct sdhci_pci_slot*); 49 int (*probe_slot) (struct sdhci_pci_slot *);
52 void (*remove_slot)(struct sdhci_pci_slot*, int); 50 void (*remove_slot) (struct sdhci_pci_slot *, int);
53 51
54 int (*suspend)(struct sdhci_pci_chip*, 52 int (*suspend) (struct sdhci_pci_chip *,
55 pm_message_t); 53 pm_message_t);
56 int (*resume)(struct sdhci_pci_chip*); 54 int (*resume) (struct sdhci_pci_chip *);
57}; 55};
58 56
59struct sdhci_pci_slot { 57struct sdhci_pci_slot {
@@ -329,6 +327,11 @@ static int jmicron_probe(struct sdhci_pci_chip *chip)
329 return ret; 327 return ret;
330 } 328 }
331 329
330 /* quirk for unsable RO-detection on JM388 chips */
331 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD ||
332 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
333 chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT;
334
332 return 0; 335 return 0;
333} 336}
334 337
@@ -402,7 +405,7 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
402 405
403 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || 406 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
404 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { 407 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
405 for (i = 0;i < chip->num_slots;i++) 408 for (i = 0; i < chip->num_slots; i++)
406 jmicron_enable_mmc(chip->slots[i]->host, 0); 409 jmicron_enable_mmc(chip->slots[i]->host, 0);
407 } 410 }
408 411
@@ -415,7 +418,7 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
415 418
416 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || 419 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
417 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { 420 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
418 for (i = 0;i < chip->num_slots;i++) 421 for (i = 0; i < chip->num_slots; i++)
419 jmicron_enable_mmc(chip->slots[i]->host, 1); 422 jmicron_enable_mmc(chip->slots[i]->host, 1);
420 } 423 }
421 424
@@ -798,7 +801,7 @@ static struct sdhci_ops sdhci_pci_ops = {
798 801
799#ifdef CONFIG_PM 802#ifdef CONFIG_PM
800 803
801static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) 804static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
802{ 805{
803 struct sdhci_pci_chip *chip; 806 struct sdhci_pci_chip *chip;
804 struct sdhci_pci_slot *slot; 807 struct sdhci_pci_slot *slot;
@@ -810,7 +813,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
810 if (!chip) 813 if (!chip)
811 return 0; 814 return 0;
812 815
813 for (i = 0;i < chip->num_slots;i++) { 816 for (i = 0; i < chip->num_slots; i++) {
814 slot = chip->slots[i]; 817 slot = chip->slots[i];
815 if (!slot) 818 if (!slot)
816 continue; 819 continue;
@@ -818,7 +821,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
818 ret = sdhci_suspend_host(slot->host, state); 821 ret = sdhci_suspend_host(slot->host, state);
819 822
820 if (ret) { 823 if (ret) {
821 for (i--;i >= 0;i--) 824 for (i--; i >= 0; i--)
822 sdhci_resume_host(chip->slots[i]->host); 825 sdhci_resume_host(chip->slots[i]->host);
823 return ret; 826 return ret;
824 } 827 }
@@ -833,7 +836,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
833 if (chip->fixes && chip->fixes->suspend) { 836 if (chip->fixes && chip->fixes->suspend) {
834 ret = chip->fixes->suspend(chip, state); 837 ret = chip->fixes->suspend(chip, state);
835 if (ret) { 838 if (ret) {
836 for (i = chip->num_slots - 1;i >= 0;i--) 839 for (i = chip->num_slots - 1; i >= 0; i--)
837 sdhci_resume_host(chip->slots[i]->host); 840 sdhci_resume_host(chip->slots[i]->host);
838 return ret; 841 return ret;
839 } 842 }
@@ -855,7 +858,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
855 return 0; 858 return 0;
856} 859}
857 860
858static int sdhci_pci_resume (struct pci_dev *pdev) 861static int sdhci_pci_resume(struct pci_dev *pdev)
859{ 862{
860 struct sdhci_pci_chip *chip; 863 struct sdhci_pci_chip *chip;
861 struct sdhci_pci_slot *slot; 864 struct sdhci_pci_slot *slot;
@@ -877,7 +880,7 @@ static int sdhci_pci_resume (struct pci_dev *pdev)
877 return ret; 880 return ret;
878 } 881 }
879 882
880 for (i = 0;i < chip->num_slots;i++) { 883 for (i = 0; i < chip->num_slots; i++) {
881 slot = chip->slots[i]; 884 slot = chip->slots[i];
882 if (!slot) 885 if (!slot)
883 continue; 886 continue;
@@ -1059,7 +1062,7 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
1059 } 1062 }
1060 1063
1061 chip->pdev = pdev; 1064 chip->pdev = pdev;
1062 chip->fixes = (const struct sdhci_pci_fixes*)ent->driver_data; 1065 chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data;
1063 if (chip->fixes) 1066 if (chip->fixes)
1064 chip->quirks = chip->fixes->quirks; 1067 chip->quirks = chip->fixes->quirks;
1065 chip->num_slots = slots; 1068 chip->num_slots = slots;
@@ -1074,10 +1077,10 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
1074 1077
1075 slots = chip->num_slots; /* Quirk may have changed this */ 1078 slots = chip->num_slots; /* Quirk may have changed this */
1076 1079
1077 for (i = 0;i < slots;i++) { 1080 for (i = 0; i < slots; i++) {
1078 slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i); 1081 slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i);
1079 if (IS_ERR(slot)) { 1082 if (IS_ERR(slot)) {
1080 for (i--;i >= 0;i--) 1083 for (i--; i >= 0; i--)
1081 sdhci_pci_remove_slot(chip->slots[i]); 1084 sdhci_pci_remove_slot(chip->slots[i]);
1082 ret = PTR_ERR(slot); 1085 ret = PTR_ERR(slot);
1083 goto free; 1086 goto free;
@@ -1105,7 +1108,7 @@ static void __devexit sdhci_pci_remove(struct pci_dev *pdev)
1105 chip = pci_get_drvdata(pdev); 1108 chip = pci_get_drvdata(pdev);
1106 1109
1107 if (chip) { 1110 if (chip) {
1108 for (i = 0;i < chip->num_slots; i++) 1111 for (i = 0; i < chip->num_slots; i++)
1109 sdhci_pci_remove_slot(chip->slots[i]); 1112 sdhci_pci_remove_slot(chip->slots[i]);
1110 1113
1111 pci_set_drvdata(pdev, NULL); 1114 pci_set_drvdata(pdev, NULL);
@@ -1116,9 +1119,9 @@ static void __devexit sdhci_pci_remove(struct pci_dev *pdev)
1116} 1119}
1117 1120
1118static struct pci_driver sdhci_driver = { 1121static struct pci_driver sdhci_driver = {
1119 .name = "sdhci-pci", 1122 .name = "sdhci-pci",
1120 .id_table = pci_ids, 1123 .id_table = pci_ids,
1121 .probe = sdhci_pci_probe, 1124 .probe = sdhci_pci_probe,
1122 .remove = __devexit_p(sdhci_pci_remove), 1125 .remove = __devexit_p(sdhci_pci_remove),
1123 .suspend = sdhci_pci_suspend, 1126 .suspend = sdhci_pci_suspend,
1124 .resume = sdhci_pci_resume, 1127 .resume = sdhci_pci_resume,
diff --git a/drivers/mmc/host/sdhci-pxa.c b/drivers/mmc/host/sdhci-pxa.c
index 5a61208cbc66..089c9a68b7b1 100644
--- a/drivers/mmc/host/sdhci-pxa.c
+++ b/drivers/mmc/host/sdhci-pxa.c
@@ -69,7 +69,45 @@ static void set_clock(struct sdhci_host *host, unsigned int clock)
69 } 69 }
70} 70}
71 71
72static int set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
73{
74 u16 ctrl_2;
75
76 /*
77 * Set V18_EN -- UHS modes do not work without this.
78 * does not change signaling voltage
79 */
80 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
81
82 /* Select Bus Speed Mode for host */
83 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
84 switch (uhs) {
85 case MMC_TIMING_UHS_SDR12:
86 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
87 break;
88 case MMC_TIMING_UHS_SDR25:
89 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
90 break;
91 case MMC_TIMING_UHS_SDR50:
92 ctrl_2 |= SDHCI_CTRL_UHS_SDR50 | SDHCI_CTRL_VDD_180;
93 break;
94 case MMC_TIMING_UHS_SDR104:
95 ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180;
96 break;
97 case MMC_TIMING_UHS_DDR50:
98 ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180;
99 break;
100 }
101
102 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
103 pr_debug("%s:%s uhs = %d, ctrl_2 = %04X\n",
104 __func__, mmc_hostname(host->mmc), uhs, ctrl_2);
105
106 return 0;
107}
108
72static struct sdhci_ops sdhci_pxa_ops = { 109static struct sdhci_ops sdhci_pxa_ops = {
110 .set_uhs_signaling = set_uhs_signaling,
73 .set_clock = set_clock, 111 .set_clock = set_clock,
74}; 112};
75 113
@@ -136,11 +174,19 @@ static int __devinit sdhci_pxa_probe(struct platform_device *pdev)
136 host->hw_name = "MMC"; 174 host->hw_name = "MMC";
137 host->ops = &sdhci_pxa_ops; 175 host->ops = &sdhci_pxa_ops;
138 host->irq = irq; 176 host->irq = irq;
139 host->quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; 177 host->quirks = SDHCI_QUIRK_BROKEN_ADMA
178 | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
179 | SDHCI_QUIRK_32BIT_DMA_ADDR
180 | SDHCI_QUIRK_32BIT_DMA_SIZE
181 | SDHCI_QUIRK_32BIT_ADMA_SIZE
182 | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
140 183
141 if (pdata->quirks) 184 if (pdata->quirks)
142 host->quirks |= pdata->quirks; 185 host->quirks |= pdata->quirks;
143 186
187 /* enable 1/8V DDR capable */
188 host->mmc->caps |= MMC_CAP_1_8V_DDR;
189
144 /* If slot design supports 8 bit data, indicate this to MMC. */ 190 /* If slot design supports 8 bit data, indicate this to MMC. */
145 if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT) 191 if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
146 host->mmc->caps |= MMC_CAP_8_BIT_DATA; 192 host->mmc->caps |= MMC_CAP_8_BIT_DATA;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index f7e1f964395f..343c97edba32 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -184,6 +184,8 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
184 clk_enable(clk); 184 clk_enable(clk);
185 pltfm_host->clk = clk; 185 pltfm_host->clk = clk;
186 186
187 host->mmc->pm_caps = plat->pm_flags;
188
187 if (plat->is_8bit) 189 if (plat->is_8bit)
188 host->mmc->caps |= MMC_CAP_8_BIT_DATA; 190 host->mmc->caps |= MMC_CAP_8_BIT_DATA;
189 191
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 5d20661bc357..58d5436ff649 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -38,13 +38,16 @@
38#define SDHCI_USE_LEDS_CLASS 38#define SDHCI_USE_LEDS_CLASS
39#endif 39#endif
40 40
41#define MAX_TUNING_LOOP 40
42
41static unsigned int debug_quirks = 0; 43static unsigned int debug_quirks = 0;
42 44
43static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
44static void sdhci_finish_data(struct sdhci_host *); 45static void sdhci_finish_data(struct sdhci_host *);
45 46
46static void sdhci_send_command(struct sdhci_host *, struct mmc_command *); 47static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
47static void sdhci_finish_command(struct sdhci_host *); 48static void sdhci_finish_command(struct sdhci_host *);
49static int sdhci_execute_tuning(struct mmc_host *mmc);
50static void sdhci_tuning_timer(unsigned long data);
48 51
49static void sdhci_dumpregs(struct sdhci_host *host) 52static void sdhci_dumpregs(struct sdhci_host *host)
50{ 53{
@@ -84,6 +87,8 @@ static void sdhci_dumpregs(struct sdhci_host *host)
84 printk(KERN_DEBUG DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n", 87 printk(KERN_DEBUG DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
85 sdhci_readw(host, SDHCI_COMMAND), 88 sdhci_readw(host, SDHCI_COMMAND),
86 sdhci_readl(host, SDHCI_MAX_CURRENT)); 89 sdhci_readl(host, SDHCI_MAX_CURRENT));
90 printk(KERN_DEBUG DRIVER_NAME ": Host ctl2: 0x%08x\n",
91 sdhci_readw(host, SDHCI_HOST_CONTROL2));
87 92
88 if (host->flags & SDHCI_USE_ADMA) 93 if (host->flags & SDHCI_USE_ADMA)
89 printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 94 printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
@@ -157,6 +162,9 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
157 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) 162 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
158 ier = sdhci_readl(host, SDHCI_INT_ENABLE); 163 ier = sdhci_readl(host, SDHCI_INT_ENABLE);
159 164
165 if (host->ops->platform_reset_enter)
166 host->ops->platform_reset_enter(host, mask);
167
160 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 168 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
161 169
162 if (mask & SDHCI_RESET_ALL) 170 if (mask & SDHCI_RESET_ALL)
@@ -177,6 +185,9 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
177 mdelay(1); 185 mdelay(1);
178 } 186 }
179 187
188 if (host->ops->platform_reset_exit)
189 host->ops->platform_reset_exit(host, mask);
190
180 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) 191 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
181 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier); 192 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
182} 193}
@@ -591,9 +602,10 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
591 data->sg_len, direction); 602 data->sg_len, direction);
592} 603}
593 604
594static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) 605static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
595{ 606{
596 u8 count; 607 u8 count;
608 struct mmc_data *data = cmd->data;
597 unsigned target_timeout, current_timeout; 609 unsigned target_timeout, current_timeout;
598 610
599 /* 611 /*
@@ -605,9 +617,16 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
605 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 617 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
606 return 0xE; 618 return 0xE;
607 619
620 /* Unspecified timeout, assume max */
621 if (!data && !cmd->cmd_timeout_ms)
622 return 0xE;
623
608 /* timeout in us */ 624 /* timeout in us */
609 target_timeout = data->timeout_ns / 1000 + 625 if (!data)
610 data->timeout_clks / host->clock; 626 target_timeout = cmd->cmd_timeout_ms * 1000;
627 else
628 target_timeout = data->timeout_ns / 1000 +
629 data->timeout_clks / host->clock;
611 630
612 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) 631 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
613 host->timeout_clk = host->clock / 1000; 632 host->timeout_clk = host->clock / 1000;
@@ -622,6 +641,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
622 * => 641 * =>
623 * (1) / (2) > 2^6 642 * (1) / (2) > 2^6
624 */ 643 */
644 BUG_ON(!host->timeout_clk);
625 count = 0; 645 count = 0;
626 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 646 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
627 while (current_timeout < target_timeout) { 647 while (current_timeout < target_timeout) {
@@ -632,8 +652,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
632 } 652 }
633 653
634 if (count >= 0xF) { 654 if (count >= 0xF) {
635 printk(KERN_WARNING "%s: Too large timeout requested!\n", 655 printk(KERN_WARNING "%s: Too large timeout requested for CMD%d!\n",
636 mmc_hostname(host->mmc)); 656 mmc_hostname(host->mmc), cmd->opcode);
637 count = 0xE; 657 count = 0xE;
638 } 658 }
639 659
@@ -651,15 +671,21 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
651 sdhci_clear_set_irqs(host, dma_irqs, pio_irqs); 671 sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
652} 672}
653 673
654static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) 674static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
655{ 675{
656 u8 count; 676 u8 count;
657 u8 ctrl; 677 u8 ctrl;
678 struct mmc_data *data = cmd->data;
658 int ret; 679 int ret;
659 680
660 WARN_ON(host->data); 681 WARN_ON(host->data);
661 682
662 if (data == NULL) 683 if (data || (cmd->flags & MMC_RSP_BUSY)) {
684 count = sdhci_calc_timeout(host, cmd);
685 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
686 }
687
688 if (!data)
663 return; 689 return;
664 690
665 /* Sanity checks */ 691 /* Sanity checks */
@@ -669,9 +695,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
669 695
670 host->data = data; 696 host->data = data;
671 host->data_early = 0; 697 host->data_early = 0;
672 698 host->data->bytes_xfered = 0;
673 count = sdhci_calc_timeout(host, data);
674 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
675 699
676 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) 700 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
677 host->flags |= SDHCI_REQ_USE_DMA; 701 host->flags |= SDHCI_REQ_USE_DMA;
@@ -807,15 +831,17 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
807 831
808 sdhci_set_transfer_irqs(host); 832 sdhci_set_transfer_irqs(host);
809 833
810 /* We do not handle DMA boundaries, so set it to max (512 KiB) */ 834 /* Set the DMA boundary value and block size */
811 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, data->blksz), SDHCI_BLOCK_SIZE); 835 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
836 data->blksz), SDHCI_BLOCK_SIZE);
812 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 837 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
813} 838}
814 839
815static void sdhci_set_transfer_mode(struct sdhci_host *host, 840static void sdhci_set_transfer_mode(struct sdhci_host *host,
816 struct mmc_data *data) 841 struct mmc_command *cmd)
817{ 842{
818 u16 mode; 843 u16 mode;
844 struct mmc_data *data = cmd->data;
819 845
820 if (data == NULL) 846 if (data == NULL)
821 return; 847 return;
@@ -823,12 +849,20 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
823 WARN_ON(!host->data); 849 WARN_ON(!host->data);
824 850
825 mode = SDHCI_TRNS_BLK_CNT_EN; 851 mode = SDHCI_TRNS_BLK_CNT_EN;
826 if (data->blocks > 1) { 852 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
827 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 853 mode |= SDHCI_TRNS_MULTI;
828 mode |= SDHCI_TRNS_MULTI | SDHCI_TRNS_ACMD12; 854 /*
829 else 855 * If we are sending CMD23, CMD12 never gets sent
830 mode |= SDHCI_TRNS_MULTI; 856 * on successful completion (so no Auto-CMD12).
857 */
858 if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12))
859 mode |= SDHCI_TRNS_AUTO_CMD12;
860 else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
861 mode |= SDHCI_TRNS_AUTO_CMD23;
862 sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2);
863 }
831 } 864 }
865
832 if (data->flags & MMC_DATA_READ) 866 if (data->flags & MMC_DATA_READ)
833 mode |= SDHCI_TRNS_READ; 867 mode |= SDHCI_TRNS_READ;
834 if (host->flags & SDHCI_REQ_USE_DMA) 868 if (host->flags & SDHCI_REQ_USE_DMA)
@@ -868,7 +902,15 @@ static void sdhci_finish_data(struct sdhci_host *host)
868 else 902 else
869 data->bytes_xfered = data->blksz * data->blocks; 903 data->bytes_xfered = data->blksz * data->blocks;
870 904
871 if (data->stop) { 905 /*
906 * Need to send CMD12 if -
907 * a) open-ended multiblock transfer (no CMD23)
908 * b) error in multiblock transfer
909 */
910 if (data->stop &&
911 (data->error ||
912 !host->mrq->sbc)) {
913
872 /* 914 /*
873 * The controller needs a reset of internal state machines 915 * The controller needs a reset of internal state machines
874 * upon error conditions. 916 * upon error conditions.
@@ -920,11 +962,11 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
920 962
921 host->cmd = cmd; 963 host->cmd = cmd;
922 964
923 sdhci_prepare_data(host, cmd->data); 965 sdhci_prepare_data(host, cmd);
924 966
925 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 967 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
926 968
927 sdhci_set_transfer_mode(host, cmd->data); 969 sdhci_set_transfer_mode(host, cmd);
928 970
929 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 971 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
930 printk(KERN_ERR "%s: Unsupported response type!\n", 972 printk(KERN_ERR "%s: Unsupported response type!\n",
@@ -947,7 +989,9 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
947 flags |= SDHCI_CMD_CRC; 989 flags |= SDHCI_CMD_CRC;
948 if (cmd->flags & MMC_RSP_OPCODE) 990 if (cmd->flags & MMC_RSP_OPCODE)
949 flags |= SDHCI_CMD_INDEX; 991 flags |= SDHCI_CMD_INDEX;
950 if (cmd->data) 992
993 /* CMD19 is special in that the Data Present Select should be set */
994 if (cmd->data || (cmd->opcode == MMC_SEND_TUNING_BLOCK))
951 flags |= SDHCI_CMD_DATA; 995 flags |= SDHCI_CMD_DATA;
952 996
953 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 997 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
@@ -977,19 +1021,27 @@ static void sdhci_finish_command(struct sdhci_host *host)
977 1021
978 host->cmd->error = 0; 1022 host->cmd->error = 0;
979 1023
980 if (host->data && host->data_early) 1024 /* Finished CMD23, now send actual command. */
981 sdhci_finish_data(host); 1025 if (host->cmd == host->mrq->sbc) {
1026 host->cmd = NULL;
1027 sdhci_send_command(host, host->mrq->cmd);
1028 } else {
982 1029
983 if (!host->cmd->data) 1030 /* Processed actual command. */
984 tasklet_schedule(&host->finish_tasklet); 1031 if (host->data && host->data_early)
1032 sdhci_finish_data(host);
985 1033
986 host->cmd = NULL; 1034 if (!host->cmd->data)
1035 tasklet_schedule(&host->finish_tasklet);
1036
1037 host->cmd = NULL;
1038 }
987} 1039}
988 1040
989static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 1041static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
990{ 1042{
991 int div; 1043 int div = 0; /* Initialized for compiler warning */
992 u16 clk; 1044 u16 clk = 0;
993 unsigned long timeout; 1045 unsigned long timeout;
994 1046
995 if (clock == host->clock) 1047 if (clock == host->clock)
@@ -1007,14 +1059,45 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1007 goto out; 1059 goto out;
1008 1060
1009 if (host->version >= SDHCI_SPEC_300) { 1061 if (host->version >= SDHCI_SPEC_300) {
1010 /* Version 3.00 divisors must be a multiple of 2. */ 1062 /*
1011 if (host->max_clk <= clock) 1063 * Check if the Host Controller supports Programmable Clock
1012 div = 1; 1064 * Mode.
1013 else { 1065 */
1014 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; div += 2) { 1066 if (host->clk_mul) {
1015 if ((host->max_clk / div) <= clock) 1067 u16 ctrl;
1016 break; 1068
1069 /*
1070 * We need to figure out whether the Host Driver needs
1071 * to select Programmable Clock Mode, or the value can
1072 * be set automatically by the Host Controller based on
1073 * the Preset Value registers.
1074 */
1075 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1076 if (!(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
1077 for (div = 1; div <= 1024; div++) {
1078 if (((host->max_clk * host->clk_mul) /
1079 div) <= clock)
1080 break;
1081 }
1082 /*
1083 * Set Programmable Clock Mode in the Clock
1084 * Control register.
1085 */
1086 clk = SDHCI_PROG_CLOCK_MODE;
1087 div--;
1017 } 1088 }
1089 } else {
1090 /* Version 3.00 divisors must be a multiple of 2. */
1091 if (host->max_clk <= clock)
1092 div = 1;
1093 else {
1094 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1095 div += 2) {
1096 if ((host->max_clk / div) <= clock)
1097 break;
1098 }
1099 }
1100 div >>= 1;
1018 } 1101 }
1019 } else { 1102 } else {
1020 /* Version 2.00 divisors must be a power of 2. */ 1103 /* Version 2.00 divisors must be a power of 2. */
@@ -1022,10 +1105,10 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1022 if ((host->max_clk / div) <= clock) 1105 if ((host->max_clk / div) <= clock)
1023 break; 1106 break;
1024 } 1107 }
1108 div >>= 1;
1025 } 1109 }
1026 div >>= 1;
1027 1110
1028 clk = (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1111 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1029 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 1112 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1030 << SDHCI_DIVIDER_HI_SHIFT; 1113 << SDHCI_DIVIDER_HI_SHIFT;
1031 clk |= SDHCI_CLOCK_INT_EN; 1114 clk |= SDHCI_CLOCK_INT_EN;
@@ -1131,7 +1214,12 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1131#ifndef SDHCI_USE_LEDS_CLASS 1214#ifndef SDHCI_USE_LEDS_CLASS
1132 sdhci_activate_led(host); 1215 sdhci_activate_led(host);
1133#endif 1216#endif
1134 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) { 1217
1218 /*
1219 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1220 * requests if Auto-CMD12 is enabled.
1221 */
1222 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
1135 if (mrq->stop) { 1223 if (mrq->stop) {
1136 mrq->data->stop = NULL; 1224 mrq->data->stop = NULL;
1137 mrq->stop = NULL; 1225 mrq->stop = NULL;
@@ -1150,8 +1238,30 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1150 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1238 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1151 host->mrq->cmd->error = -ENOMEDIUM; 1239 host->mrq->cmd->error = -ENOMEDIUM;
1152 tasklet_schedule(&host->finish_tasklet); 1240 tasklet_schedule(&host->finish_tasklet);
1153 } else 1241 } else {
1154 sdhci_send_command(host, mrq->cmd); 1242 u32 present_state;
1243
1244 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1245 /*
1246 * Check if the re-tuning timer has already expired and there
1247 * is no on-going data transfer. If so, we need to execute
1248 * tuning procedure before sending command.
1249 */
1250 if ((host->flags & SDHCI_NEEDS_RETUNING) &&
1251 !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
1252 spin_unlock_irqrestore(&host->lock, flags);
1253 sdhci_execute_tuning(mmc);
1254 spin_lock_irqsave(&host->lock, flags);
1255
1256 /* Restore original mmc_request structure */
1257 host->mrq = mrq;
1258 }
1259
1260 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1261 sdhci_send_command(host, mrq->sbc);
1262 else
1263 sdhci_send_command(host, mrq->cmd);
1264 }
1155 1265
1156 mmiowb(); 1266 mmiowb();
1157 spin_unlock_irqrestore(&host->lock, flags); 1267 spin_unlock_irqrestore(&host->lock, flags);
@@ -1222,7 +1332,84 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1222 else 1332 else
1223 ctrl &= ~SDHCI_CTRL_HISPD; 1333 ctrl &= ~SDHCI_CTRL_HISPD;
1224 1334
1225 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1335 if (host->version >= SDHCI_SPEC_300) {
1336 u16 clk, ctrl_2;
1337 unsigned int clock;
1338
1339 /* In case of UHS-I modes, set High Speed Enable */
1340 if ((ios->timing == MMC_TIMING_UHS_SDR50) ||
1341 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1342 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1343 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1344 (ios->timing == MMC_TIMING_UHS_SDR12))
1345 ctrl |= SDHCI_CTRL_HISPD;
1346
1347 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1348 if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
1349 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1350 /*
1351 * We only need to set Driver Strength if the
1352 * preset value enable is not set.
1353 */
1354 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1355 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1356 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1357 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1358 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1359
1360 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1361 } else {
1362 /*
1363 * According to SDHC Spec v3.00, if the Preset Value
1364 * Enable in the Host Control 2 register is set, we
1365 * need to reset SD Clock Enable before changing High
1366 * Speed Enable to avoid generating clock gliches.
1367 */
1368
1369 /* Reset SD Clock Enable */
1370 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1371 clk &= ~SDHCI_CLOCK_CARD_EN;
1372 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1373
1374 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1375
1376 /* Re-enable SD Clock */
1377 clock = host->clock;
1378 host->clock = 0;
1379 sdhci_set_clock(host, clock);
1380 }
1381
1382
1383 /* Reset SD Clock Enable */
1384 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1385 clk &= ~SDHCI_CLOCK_CARD_EN;
1386 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1387
1388 if (host->ops->set_uhs_signaling)
1389 host->ops->set_uhs_signaling(host, ios->timing);
1390 else {
1391 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1392 /* Select Bus Speed Mode for host */
1393 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1394 if (ios->timing == MMC_TIMING_UHS_SDR12)
1395 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1396 else if (ios->timing == MMC_TIMING_UHS_SDR25)
1397 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1398 else if (ios->timing == MMC_TIMING_UHS_SDR50)
1399 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1400 else if (ios->timing == MMC_TIMING_UHS_SDR104)
1401 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1402 else if (ios->timing == MMC_TIMING_UHS_DDR50)
1403 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1404 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1405 }
1406
1407 /* Re-enable SD Clock */
1408 clock = host->clock;
1409 host->clock = 0;
1410 sdhci_set_clock(host, clock);
1411 } else
1412 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1226 1413
1227 /* 1414 /*
1228 * Some (ENE) controllers go apeshit on some ios operation, 1415 * Some (ENE) controllers go apeshit on some ios operation,
@@ -1237,14 +1424,11 @@ out:
1237 spin_unlock_irqrestore(&host->lock, flags); 1424 spin_unlock_irqrestore(&host->lock, flags);
1238} 1425}
1239 1426
1240static int sdhci_get_ro(struct mmc_host *mmc) 1427static int check_ro(struct sdhci_host *host)
1241{ 1428{
1242 struct sdhci_host *host;
1243 unsigned long flags; 1429 unsigned long flags;
1244 int is_readonly; 1430 int is_readonly;
1245 1431
1246 host = mmc_priv(mmc);
1247
1248 spin_lock_irqsave(&host->lock, flags); 1432 spin_lock_irqsave(&host->lock, flags);
1249 1433
1250 if (host->flags & SDHCI_DEVICE_DEAD) 1434 if (host->flags & SDHCI_DEVICE_DEAD)
@@ -1262,6 +1446,29 @@ static int sdhci_get_ro(struct mmc_host *mmc)
1262 !is_readonly : is_readonly; 1446 !is_readonly : is_readonly;
1263} 1447}
1264 1448
1449#define SAMPLE_COUNT 5
1450
1451static int sdhci_get_ro(struct mmc_host *mmc)
1452{
1453 struct sdhci_host *host;
1454 int i, ro_count;
1455
1456 host = mmc_priv(mmc);
1457
1458 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1459 return check_ro(host);
1460
1461 ro_count = 0;
1462 for (i = 0; i < SAMPLE_COUNT; i++) {
1463 if (check_ro(host)) {
1464 if (++ro_count > SAMPLE_COUNT / 2)
1465 return 1;
1466 }
1467 msleep(30);
1468 }
1469 return 0;
1470}
1471
1265static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1472static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1266{ 1473{
1267 struct sdhci_host *host; 1474 struct sdhci_host *host;
@@ -1284,11 +1491,322 @@ out:
1284 spin_unlock_irqrestore(&host->lock, flags); 1491 spin_unlock_irqrestore(&host->lock, flags);
1285} 1492}
1286 1493
1494static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1495 struct mmc_ios *ios)
1496{
1497 struct sdhci_host *host;
1498 u8 pwr;
1499 u16 clk, ctrl;
1500 u32 present_state;
1501
1502 host = mmc_priv(mmc);
1503
1504 /*
1505 * Signal Voltage Switching is only applicable for Host Controllers
1506 * v3.00 and above.
1507 */
1508 if (host->version < SDHCI_SPEC_300)
1509 return 0;
1510
1511 /*
1512 * We first check whether the request is to set signalling voltage
1513 * to 3.3V. If so, we change the voltage to 3.3V and return quickly.
1514 */
1515 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1516 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1517 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1518 ctrl &= ~SDHCI_CTRL_VDD_180;
1519 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1520
1521 /* Wait for 5ms */
1522 usleep_range(5000, 5500);
1523
1524 /* 3.3V regulator output should be stable within 5 ms */
1525 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1526 if (!(ctrl & SDHCI_CTRL_VDD_180))
1527 return 0;
1528 else {
1529 printk(KERN_INFO DRIVER_NAME ": Switching to 3.3V "
1530 "signalling voltage failed\n");
1531 return -EIO;
1532 }
1533 } else if (!(ctrl & SDHCI_CTRL_VDD_180) &&
1534 (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)) {
1535 /* Stop SDCLK */
1536 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1537 clk &= ~SDHCI_CLOCK_CARD_EN;
1538 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1539
1540 /* Check whether DAT[3:0] is 0000 */
1541 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1542 if (!((present_state & SDHCI_DATA_LVL_MASK) >>
1543 SDHCI_DATA_LVL_SHIFT)) {
1544 /*
1545 * Enable 1.8V Signal Enable in the Host Control2
1546 * register
1547 */
1548 ctrl |= SDHCI_CTRL_VDD_180;
1549 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1550
1551 /* Wait for 5ms */
1552 usleep_range(5000, 5500);
1553
1554 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1555 if (ctrl & SDHCI_CTRL_VDD_180) {
1556 /* Provide SDCLK again and wait for 1ms*/
1557 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1558 clk |= SDHCI_CLOCK_CARD_EN;
1559 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1560 usleep_range(1000, 1500);
1561
1562 /*
1563 * If DAT[3:0] level is 1111b, then the card
1564 * was successfully switched to 1.8V signaling.
1565 */
1566 present_state = sdhci_readl(host,
1567 SDHCI_PRESENT_STATE);
1568 if ((present_state & SDHCI_DATA_LVL_MASK) ==
1569 SDHCI_DATA_LVL_MASK)
1570 return 0;
1571 }
1572 }
1573
1574 /*
1575 * If we are here, that means the switch to 1.8V signaling
1576 * failed. We power cycle the card, and retry initialization
1577 * sequence by setting S18R to 0.
1578 */
1579 pwr = sdhci_readb(host, SDHCI_POWER_CONTROL);
1580 pwr &= ~SDHCI_POWER_ON;
1581 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1582
1583 /* Wait for 1ms as per the spec */
1584 usleep_range(1000, 1500);
1585 pwr |= SDHCI_POWER_ON;
1586 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1587
1588 printk(KERN_INFO DRIVER_NAME ": Switching to 1.8V signalling "
1589 "voltage failed, retrying with S18R set to 0\n");
1590 return -EAGAIN;
1591 } else
1592 /* No signal voltage switch required */
1593 return 0;
1594}
1595
1596static int sdhci_execute_tuning(struct mmc_host *mmc)
1597{
1598 struct sdhci_host *host;
1599 u16 ctrl;
1600 u32 ier;
1601 int tuning_loop_counter = MAX_TUNING_LOOP;
1602 unsigned long timeout;
1603 int err = 0;
1604
1605 host = mmc_priv(mmc);
1606
1607 disable_irq(host->irq);
1608 spin_lock(&host->lock);
1609
1610 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1611
1612 /*
1613 * Host Controller needs tuning only in case of SDR104 mode
1614 * and for SDR50 mode when Use Tuning for SDR50 is set in
1615 * Capabilities register.
1616 */
1617 if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
1618 (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
1619 (host->flags & SDHCI_SDR50_NEEDS_TUNING)))
1620 ctrl |= SDHCI_CTRL_EXEC_TUNING;
1621 else {
1622 spin_unlock(&host->lock);
1623 enable_irq(host->irq);
1624 return 0;
1625 }
1626
1627 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1628
1629 /*
1630 * As per the Host Controller spec v3.00, tuning command
1631 * generates Buffer Read Ready interrupt, so enable that.
1632 *
1633 * Note: The spec clearly says that when tuning sequence
1634 * is being performed, the controller does not generate
1635 * interrupts other than Buffer Read Ready interrupt. But
1636 * to make sure we don't hit a controller bug, we _only_
1637 * enable Buffer Read Ready interrupt here.
1638 */
1639 ier = sdhci_readl(host, SDHCI_INT_ENABLE);
1640 sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL);
1641
1642 /*
1643 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
1644 * of loops reaches 40 times or a timeout of 150ms occurs.
1645 */
1646 timeout = 150;
1647 do {
1648 struct mmc_command cmd = {0};
1649 struct mmc_request mrq = {0};
1650
1651 if (!tuning_loop_counter && !timeout)
1652 break;
1653
1654 cmd.opcode = MMC_SEND_TUNING_BLOCK;
1655 cmd.arg = 0;
1656 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1657 cmd.retries = 0;
1658 cmd.data = NULL;
1659 cmd.error = 0;
1660
1661 mrq.cmd = &cmd;
1662 host->mrq = &mrq;
1663
1664 /*
1665 * In response to CMD19, the card sends 64 bytes of tuning
1666 * block to the Host Controller. So we set the block size
1667 * to 64 here.
1668 */
1669 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE);
1670
1671 /*
1672 * The tuning block is sent by the card to the host controller.
1673 * So we set the TRNS_READ bit in the Transfer Mode register.
1674 * This also takes care of setting DMA Enable and Multi Block
1675 * Select in the same register to 0.
1676 */
1677 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
1678
1679 sdhci_send_command(host, &cmd);
1680
1681 host->cmd = NULL;
1682 host->mrq = NULL;
1683
1684 spin_unlock(&host->lock);
1685 enable_irq(host->irq);
1686
1687 /* Wait for Buffer Read Ready interrupt */
1688 wait_event_interruptible_timeout(host->buf_ready_int,
1689 (host->tuning_done == 1),
1690 msecs_to_jiffies(50));
1691 disable_irq(host->irq);
1692 spin_lock(&host->lock);
1693
1694 if (!host->tuning_done) {
1695 printk(KERN_INFO DRIVER_NAME ": Timeout waiting for "
1696 "Buffer Read Ready interrupt during tuning "
1697 "procedure, falling back to fixed sampling "
1698 "clock\n");
1699 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1700 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1701 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
1702 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1703
1704 err = -EIO;
1705 goto out;
1706 }
1707
1708 host->tuning_done = 0;
1709
1710 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1711 tuning_loop_counter--;
1712 timeout--;
1713 mdelay(1);
1714 } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
1715
1716 /*
1717 * The Host Driver has exhausted the maximum number of loops allowed,
1718 * so use fixed sampling frequency.
1719 */
1720 if (!tuning_loop_counter || !timeout) {
1721 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1722 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1723 } else {
1724 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
1725 printk(KERN_INFO DRIVER_NAME ": Tuning procedure"
1726 " failed, falling back to fixed sampling"
1727 " clock\n");
1728 err = -EIO;
1729 }
1730 }
1731
1732out:
1733 /*
1734 * If this is the very first time we are here, we start the retuning
1735 * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
1736 * flag won't be set, we check this condition before actually starting
1737 * the timer.
1738 */
1739 if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
1740 (host->tuning_mode == SDHCI_TUNING_MODE_1)) {
1741 mod_timer(&host->tuning_timer, jiffies +
1742 host->tuning_count * HZ);
1743 /* Tuning mode 1 limits the maximum data length to 4MB */
1744 mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
1745 } else {
1746 host->flags &= ~SDHCI_NEEDS_RETUNING;
1747 /* Reload the new initial value for timer */
1748 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
1749 mod_timer(&host->tuning_timer, jiffies +
1750 host->tuning_count * HZ);
1751 }
1752
1753 /*
1754 * In case tuning fails, host controllers which support re-tuning can
1755 * try tuning again at a later time, when the re-tuning timer expires.
1756 * So for these controllers, we return 0. Since there might be other
1757 * controllers who do not have this capability, we return error for
1758 * them.
1759 */
1760 if (err && host->tuning_count &&
1761 host->tuning_mode == SDHCI_TUNING_MODE_1)
1762 err = 0;
1763
1764 sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
1765 spin_unlock(&host->lock);
1766 enable_irq(host->irq);
1767
1768 return err;
1769}
1770
1771static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable)
1772{
1773 struct sdhci_host *host;
1774 u16 ctrl;
1775 unsigned long flags;
1776
1777 host = mmc_priv(mmc);
1778
1779 /* Host Controller v3.00 defines preset value registers */
1780 if (host->version < SDHCI_SPEC_300)
1781 return;
1782
1783 spin_lock_irqsave(&host->lock, flags);
1784
1785 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1786
1787 /*
1788 * We only enable or disable Preset Value if they are not already
1789 * enabled or disabled respectively. Otherwise, we bail out.
1790 */
1791 if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
1792 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
1793 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1794 } else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
1795 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
1796 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1797 }
1798
1799 spin_unlock_irqrestore(&host->lock, flags);
1800}
1801
1287static const struct mmc_host_ops sdhci_ops = { 1802static const struct mmc_host_ops sdhci_ops = {
1288 .request = sdhci_request, 1803 .request = sdhci_request,
1289 .set_ios = sdhci_set_ios, 1804 .set_ios = sdhci_set_ios,
1290 .get_ro = sdhci_get_ro, 1805 .get_ro = sdhci_get_ro,
1291 .enable_sdio_irq = sdhci_enable_sdio_irq, 1806 .enable_sdio_irq = sdhci_enable_sdio_irq,
1807 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
1808 .execute_tuning = sdhci_execute_tuning,
1809 .enable_preset_value = sdhci_enable_preset_value,
1292}; 1810};
1293 1811
1294/*****************************************************************************\ 1812/*****************************************************************************\
@@ -1345,6 +1863,9 @@ static void sdhci_tasklet_finish(unsigned long param)
1345 1863
1346 del_timer(&host->timer); 1864 del_timer(&host->timer);
1347 1865
1866 if (host->version >= SDHCI_SPEC_300)
1867 del_timer(&host->tuning_timer);
1868
1348 mrq = host->mrq; 1869 mrq = host->mrq;
1349 1870
1350 /* 1871 /*
@@ -1418,6 +1939,20 @@ static void sdhci_timeout_timer(unsigned long data)
1418 spin_unlock_irqrestore(&host->lock, flags); 1939 spin_unlock_irqrestore(&host->lock, flags);
1419} 1940}
1420 1941
1942static void sdhci_tuning_timer(unsigned long data)
1943{
1944 struct sdhci_host *host;
1945 unsigned long flags;
1946
1947 host = (struct sdhci_host *)data;
1948
1949 spin_lock_irqsave(&host->lock, flags);
1950
1951 host->flags |= SDHCI_NEEDS_RETUNING;
1952
1953 spin_unlock_irqrestore(&host->lock, flags);
1954}
1955
1421/*****************************************************************************\ 1956/*****************************************************************************\
1422 * * 1957 * *
1423 * Interrupt handling * 1958 * Interrupt handling *
@@ -1506,6 +2041,16 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1506{ 2041{
1507 BUG_ON(intmask == 0); 2042 BUG_ON(intmask == 0);
1508 2043
2044 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2045 if (intmask & SDHCI_INT_DATA_AVAIL) {
2046 if (SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) ==
2047 MMC_SEND_TUNING_BLOCK) {
2048 host->tuning_done = 1;
2049 wake_up(&host->buf_ready_int);
2050 return;
2051 }
2052 }
2053
1509 if (!host->data) { 2054 if (!host->data) {
1510 /* 2055 /*
1511 * The "data complete" interrupt is also used to 2056 * The "data complete" interrupt is also used to
@@ -1551,10 +2096,28 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1551 * We currently don't do anything fancy with DMA 2096 * We currently don't do anything fancy with DMA
1552 * boundaries, but as we can't disable the feature 2097 * boundaries, but as we can't disable the feature
1553 * we need to at least restart the transfer. 2098 * we need to at least restart the transfer.
2099 *
2100 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2101 * should return a valid address to continue from, but as
2102 * some controllers are faulty, don't trust them.
1554 */ 2103 */
1555 if (intmask & SDHCI_INT_DMA_END) 2104 if (intmask & SDHCI_INT_DMA_END) {
1556 sdhci_writel(host, sdhci_readl(host, SDHCI_DMA_ADDRESS), 2105 u32 dmastart, dmanow;
1557 SDHCI_DMA_ADDRESS); 2106 dmastart = sg_dma_address(host->data->sg);
2107 dmanow = dmastart + host->data->bytes_xfered;
2108 /*
2109 * Force update to the next DMA block boundary.
2110 */
2111 dmanow = (dmanow &
2112 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2113 SDHCI_DEFAULT_BOUNDARY_SIZE;
2114 host->data->bytes_xfered = dmanow - dmastart;
2115 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2116 " next 0x%08x\n",
2117 mmc_hostname(host->mmc), dmastart,
2118 host->data->bytes_xfered, dmanow);
2119 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2120 }
1558 2121
1559 if (intmask & SDHCI_INT_DATA_END) { 2122 if (intmask & SDHCI_INT_DATA_END) {
1560 if (host->cmd) { 2123 if (host->cmd) {
@@ -1664,6 +2227,14 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
1664 2227
1665 sdhci_disable_card_detection(host); 2228 sdhci_disable_card_detection(host);
1666 2229
2230 /* Disable tuning since we are suspending */
2231 if (host->version >= SDHCI_SPEC_300 && host->tuning_count &&
2232 host->tuning_mode == SDHCI_TUNING_MODE_1) {
2233 host->flags &= ~SDHCI_NEEDS_RETUNING;
2234 mod_timer(&host->tuning_timer, jiffies +
2235 host->tuning_count * HZ);
2236 }
2237
1667 ret = mmc_suspend_host(host->mmc); 2238 ret = mmc_suspend_host(host->mmc);
1668 if (ret) 2239 if (ret)
1669 return ret; 2240 return ret;
@@ -1705,6 +2276,11 @@ int sdhci_resume_host(struct sdhci_host *host)
1705 ret = mmc_resume_host(host->mmc); 2276 ret = mmc_resume_host(host->mmc);
1706 sdhci_enable_card_detection(host); 2277 sdhci_enable_card_detection(host);
1707 2278
2279 /* Set the re-tuning expiration flag */
2280 if ((host->version >= SDHCI_SPEC_300) && host->tuning_count &&
2281 (host->tuning_mode == SDHCI_TUNING_MODE_1))
2282 host->flags |= SDHCI_NEEDS_RETUNING;
2283
1708 return ret; 2284 return ret;
1709} 2285}
1710 2286
@@ -1751,7 +2327,9 @@ EXPORT_SYMBOL_GPL(sdhci_alloc_host);
1751int sdhci_add_host(struct sdhci_host *host) 2327int sdhci_add_host(struct sdhci_host *host)
1752{ 2328{
1753 struct mmc_host *mmc; 2329 struct mmc_host *mmc;
1754 unsigned int caps, ocr_avail; 2330 u32 caps[2];
2331 u32 max_current_caps;
2332 unsigned int ocr_avail;
1755 int ret; 2333 int ret;
1756 2334
1757 WARN_ON(host == NULL); 2335 WARN_ON(host == NULL);
@@ -1774,12 +2352,15 @@ int sdhci_add_host(struct sdhci_host *host)
1774 host->version); 2352 host->version);
1775 } 2353 }
1776 2354
1777 caps = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : 2355 caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
1778 sdhci_readl(host, SDHCI_CAPABILITIES); 2356 sdhci_readl(host, SDHCI_CAPABILITIES);
1779 2357
2358 caps[1] = (host->version >= SDHCI_SPEC_300) ?
2359 sdhci_readl(host, SDHCI_CAPABILITIES_1) : 0;
2360
1780 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 2361 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
1781 host->flags |= SDHCI_USE_SDMA; 2362 host->flags |= SDHCI_USE_SDMA;
1782 else if (!(caps & SDHCI_CAN_DO_SDMA)) 2363 else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
1783 DBG("Controller doesn't have SDMA capability\n"); 2364 DBG("Controller doesn't have SDMA capability\n");
1784 else 2365 else
1785 host->flags |= SDHCI_USE_SDMA; 2366 host->flags |= SDHCI_USE_SDMA;
@@ -1790,7 +2371,8 @@ int sdhci_add_host(struct sdhci_host *host)
1790 host->flags &= ~SDHCI_USE_SDMA; 2371 host->flags &= ~SDHCI_USE_SDMA;
1791 } 2372 }
1792 2373
1793 if ((host->version >= SDHCI_SPEC_200) && (caps & SDHCI_CAN_DO_ADMA2)) 2374 if ((host->version >= SDHCI_SPEC_200) &&
2375 (caps[0] & SDHCI_CAN_DO_ADMA2))
1794 host->flags |= SDHCI_USE_ADMA; 2376 host->flags |= SDHCI_USE_ADMA;
1795 2377
1796 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 2378 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
@@ -1840,10 +2422,10 @@ int sdhci_add_host(struct sdhci_host *host)
1840 } 2422 }
1841 2423
1842 if (host->version >= SDHCI_SPEC_300) 2424 if (host->version >= SDHCI_SPEC_300)
1843 host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) 2425 host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
1844 >> SDHCI_CLOCK_BASE_SHIFT; 2426 >> SDHCI_CLOCK_BASE_SHIFT;
1845 else 2427 else
1846 host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) 2428 host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
1847 >> SDHCI_CLOCK_BASE_SHIFT; 2429 >> SDHCI_CLOCK_BASE_SHIFT;
1848 2430
1849 host->max_clk *= 1000000; 2431 host->max_clk *= 1000000;
@@ -1859,7 +2441,7 @@ int sdhci_add_host(struct sdhci_host *host)
1859 } 2441 }
1860 2442
1861 host->timeout_clk = 2443 host->timeout_clk =
1862 (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; 2444 (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
1863 if (host->timeout_clk == 0) { 2445 if (host->timeout_clk == 0) {
1864 if (host->ops->get_timeout_clock) { 2446 if (host->ops->get_timeout_clock) {
1865 host->timeout_clk = host->ops->get_timeout_clock(host); 2447 host->timeout_clk = host->ops->get_timeout_clock(host);
@@ -1871,22 +2453,55 @@ int sdhci_add_host(struct sdhci_host *host)
1871 return -ENODEV; 2453 return -ENODEV;
1872 } 2454 }
1873 } 2455 }
1874 if (caps & SDHCI_TIMEOUT_CLK_UNIT) 2456 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
1875 host->timeout_clk *= 1000; 2457 host->timeout_clk *= 1000;
1876 2458
1877 /* 2459 /*
2460 * In case of Host Controller v3.00, find out whether clock
2461 * multiplier is supported.
2462 */
2463 host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
2464 SDHCI_CLOCK_MUL_SHIFT;
2465
2466 /*
2467 * In case the value in Clock Multiplier is 0, then programmable
2468 * clock mode is not supported, otherwise the actual clock
2469 * multiplier is one more than the value of Clock Multiplier
2470 * in the Capabilities Register.
2471 */
2472 if (host->clk_mul)
2473 host->clk_mul += 1;
2474
2475 /*
1878 * Set host parameters. 2476 * Set host parameters.
1879 */ 2477 */
1880 mmc->ops = &sdhci_ops; 2478 mmc->ops = &sdhci_ops;
2479 mmc->f_max = host->max_clk;
1881 if (host->ops->get_min_clock) 2480 if (host->ops->get_min_clock)
1882 mmc->f_min = host->ops->get_min_clock(host); 2481 mmc->f_min = host->ops->get_min_clock(host);
1883 else if (host->version >= SDHCI_SPEC_300) 2482 else if (host->version >= SDHCI_SPEC_300) {
1884 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 2483 if (host->clk_mul) {
1885 else 2484 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
2485 mmc->f_max = host->max_clk * host->clk_mul;
2486 } else
2487 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
2488 } else
1886 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 2489 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
1887 2490
1888 mmc->f_max = host->max_clk; 2491 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
1889 mmc->caps |= MMC_CAP_SDIO_IRQ; 2492
2493 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
2494 host->flags |= SDHCI_AUTO_CMD12;
2495
2496 /* Auto-CMD23 stuff only works in ADMA or PIO. */
2497 if ((host->version >= SDHCI_SPEC_300) &&
2498 ((host->flags & SDHCI_USE_ADMA) ||
2499 !(host->flags & SDHCI_USE_SDMA))) {
2500 host->flags |= SDHCI_AUTO_CMD23;
2501 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
2502 } else {
2503 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
2504 }
1890 2505
1891 /* 2506 /*
1892 * A controller may support 8-bit width, but the board itself 2507 * A controller may support 8-bit width, but the board itself
@@ -1898,21 +2513,113 @@ int sdhci_add_host(struct sdhci_host *host)
1898 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 2513 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
1899 mmc->caps |= MMC_CAP_4_BIT_DATA; 2514 mmc->caps |= MMC_CAP_4_BIT_DATA;
1900 2515
1901 if (caps & SDHCI_CAN_DO_HISPD) 2516 if (caps[0] & SDHCI_CAN_DO_HISPD)
1902 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 2517 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
1903 2518
1904 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 2519 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
1905 mmc_card_is_removable(mmc)) 2520 mmc_card_is_removable(mmc))
1906 mmc->caps |= MMC_CAP_NEEDS_POLL; 2521 mmc->caps |= MMC_CAP_NEEDS_POLL;
1907 2522
2523 /* UHS-I mode(s) supported by the host controller. */
2524 if (host->version >= SDHCI_SPEC_300)
2525 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
2526
2527 /* SDR104 supports also implies SDR50 support */
2528 if (caps[1] & SDHCI_SUPPORT_SDR104)
2529 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
2530 else if (caps[1] & SDHCI_SUPPORT_SDR50)
2531 mmc->caps |= MMC_CAP_UHS_SDR50;
2532
2533 if (caps[1] & SDHCI_SUPPORT_DDR50)
2534 mmc->caps |= MMC_CAP_UHS_DDR50;
2535
2536 /* Does the host needs tuning for SDR50? */
2537 if (caps[1] & SDHCI_USE_SDR50_TUNING)
2538 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
2539
2540 /* Driver Type(s) (A, C, D) supported by the host */
2541 if (caps[1] & SDHCI_DRIVER_TYPE_A)
2542 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
2543 if (caps[1] & SDHCI_DRIVER_TYPE_C)
2544 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
2545 if (caps[1] & SDHCI_DRIVER_TYPE_D)
2546 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
2547
2548 /* Initial value for re-tuning timer count */
2549 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
2550 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
2551
2552 /*
2553 * In case Re-tuning Timer is not disabled, the actual value of
2554 * re-tuning timer will be 2 ^ (n - 1).
2555 */
2556 if (host->tuning_count)
2557 host->tuning_count = 1 << (host->tuning_count - 1);
2558
2559 /* Re-tuning mode supported by the Host Controller */
2560 host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
2561 SDHCI_RETUNING_MODE_SHIFT;
2562
1908 ocr_avail = 0; 2563 ocr_avail = 0;
1909 if (caps & SDHCI_CAN_VDD_330) 2564 /*
2565 * According to SD Host Controller spec v3.00, if the Host System
2566 * can afford more than 150mA, Host Driver should set XPC to 1. Also
2567 * the value is meaningful only if Voltage Support in the Capabilities
2568 * register is set. The actual current value is 4 times the register
2569 * value.
2570 */
2571 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
2572
2573 if (caps[0] & SDHCI_CAN_VDD_330) {
2574 int max_current_330;
2575
1910 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 2576 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
1911 if (caps & SDHCI_CAN_VDD_300) 2577
2578 max_current_330 = ((max_current_caps &
2579 SDHCI_MAX_CURRENT_330_MASK) >>
2580 SDHCI_MAX_CURRENT_330_SHIFT) *
2581 SDHCI_MAX_CURRENT_MULTIPLIER;
2582
2583 if (max_current_330 > 150)
2584 mmc->caps |= MMC_CAP_SET_XPC_330;
2585 }
2586 if (caps[0] & SDHCI_CAN_VDD_300) {
2587 int max_current_300;
2588
1912 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 2589 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
1913 if (caps & SDHCI_CAN_VDD_180) 2590
2591 max_current_300 = ((max_current_caps &
2592 SDHCI_MAX_CURRENT_300_MASK) >>
2593 SDHCI_MAX_CURRENT_300_SHIFT) *
2594 SDHCI_MAX_CURRENT_MULTIPLIER;
2595
2596 if (max_current_300 > 150)
2597 mmc->caps |= MMC_CAP_SET_XPC_300;
2598 }
2599 if (caps[0] & SDHCI_CAN_VDD_180) {
2600 int max_current_180;
2601
1914 ocr_avail |= MMC_VDD_165_195; 2602 ocr_avail |= MMC_VDD_165_195;
1915 2603
2604 max_current_180 = ((max_current_caps &
2605 SDHCI_MAX_CURRENT_180_MASK) >>
2606 SDHCI_MAX_CURRENT_180_SHIFT) *
2607 SDHCI_MAX_CURRENT_MULTIPLIER;
2608
2609 if (max_current_180 > 150)
2610 mmc->caps |= MMC_CAP_SET_XPC_180;
2611
2612 /* Maximum current capabilities of the host at 1.8V */
2613 if (max_current_180 >= 800)
2614 mmc->caps |= MMC_CAP_MAX_CURRENT_800;
2615 else if (max_current_180 >= 600)
2616 mmc->caps |= MMC_CAP_MAX_CURRENT_600;
2617 else if (max_current_180 >= 400)
2618 mmc->caps |= MMC_CAP_MAX_CURRENT_400;
2619 else
2620 mmc->caps |= MMC_CAP_MAX_CURRENT_200;
2621 }
2622
1916 mmc->ocr_avail = ocr_avail; 2623 mmc->ocr_avail = ocr_avail;
1917 mmc->ocr_avail_sdio = ocr_avail; 2624 mmc->ocr_avail_sdio = ocr_avail;
1918 if (host->ocr_avail_sdio) 2625 if (host->ocr_avail_sdio)
@@ -1972,7 +2679,7 @@ int sdhci_add_host(struct sdhci_host *host)
1972 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 2679 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
1973 mmc->max_blk_size = 2; 2680 mmc->max_blk_size = 2;
1974 } else { 2681 } else {
1975 mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> 2682 mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
1976 SDHCI_MAX_BLOCK_SHIFT; 2683 SDHCI_MAX_BLOCK_SHIFT;
1977 if (mmc->max_blk_size >= 3) { 2684 if (mmc->max_blk_size >= 3) {
1978 printk(KERN_WARNING "%s: Invalid maximum block size, " 2685 printk(KERN_WARNING "%s: Invalid maximum block size, "
@@ -1998,6 +2705,15 @@ int sdhci_add_host(struct sdhci_host *host)
1998 2705
1999 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host); 2706 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
2000 2707
2708 if (host->version >= SDHCI_SPEC_300) {
2709 init_waitqueue_head(&host->buf_ready_int);
2710
2711 /* Initialize re-tuning timer */
2712 init_timer(&host->tuning_timer);
2713 host->tuning_timer.data = (unsigned long)host;
2714 host->tuning_timer.function = sdhci_tuning_timer;
2715 }
2716
2001 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, 2717 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
2002 mmc_hostname(mmc), host); 2718 mmc_hostname(mmc), host);
2003 if (ret) 2719 if (ret)
@@ -2091,6 +2807,8 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
2091 free_irq(host->irq, host); 2807 free_irq(host->irq, host);
2092 2808
2093 del_timer_sync(&host->timer); 2809 del_timer_sync(&host->timer);
2810 if (host->version >= SDHCI_SPEC_300)
2811 del_timer_sync(&host->tuning_timer);
2094 2812
2095 tasklet_kill(&host->card_tasklet); 2813 tasklet_kill(&host->card_tasklet);
2096 tasklet_kill(&host->finish_tasklet); 2814 tasklet_kill(&host->finish_tasklet);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 25e8bde600d1..745c42fa41ed 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -25,6 +25,7 @@
25 */ 25 */
26 26
27#define SDHCI_DMA_ADDRESS 0x00 27#define SDHCI_DMA_ADDRESS 0x00
28#define SDHCI_ARGUMENT2 SDHCI_DMA_ADDRESS
28 29
29#define SDHCI_BLOCK_SIZE 0x04 30#define SDHCI_BLOCK_SIZE 0x04
30#define SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF)) 31#define SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF))
@@ -36,7 +37,8 @@
36#define SDHCI_TRANSFER_MODE 0x0C 37#define SDHCI_TRANSFER_MODE 0x0C
37#define SDHCI_TRNS_DMA 0x01 38#define SDHCI_TRNS_DMA 0x01
38#define SDHCI_TRNS_BLK_CNT_EN 0x02 39#define SDHCI_TRNS_BLK_CNT_EN 0x02
39#define SDHCI_TRNS_ACMD12 0x04 40#define SDHCI_TRNS_AUTO_CMD12 0x04
41#define SDHCI_TRNS_AUTO_CMD23 0x08
40#define SDHCI_TRNS_READ 0x10 42#define SDHCI_TRNS_READ 0x10
41#define SDHCI_TRNS_MULTI 0x20 43#define SDHCI_TRNS_MULTI 0x20
42 44
@@ -68,8 +70,10 @@
68#define SDHCI_DATA_AVAILABLE 0x00000800 70#define SDHCI_DATA_AVAILABLE 0x00000800
69#define SDHCI_CARD_PRESENT 0x00010000 71#define SDHCI_CARD_PRESENT 0x00010000
70#define SDHCI_WRITE_PROTECT 0x00080000 72#define SDHCI_WRITE_PROTECT 0x00080000
73#define SDHCI_DATA_LVL_MASK 0x00F00000
74#define SDHCI_DATA_LVL_SHIFT 20
71 75
72#define SDHCI_HOST_CONTROL 0x28 76#define SDHCI_HOST_CONTROL 0x28
73#define SDHCI_CTRL_LED 0x01 77#define SDHCI_CTRL_LED 0x01
74#define SDHCI_CTRL_4BITBUS 0x02 78#define SDHCI_CTRL_4BITBUS 0x02
75#define SDHCI_CTRL_HISPD 0x04 79#define SDHCI_CTRL_HISPD 0x04
@@ -99,6 +103,7 @@
99#define SDHCI_DIV_MASK 0xFF 103#define SDHCI_DIV_MASK 0xFF
100#define SDHCI_DIV_MASK_LEN 8 104#define SDHCI_DIV_MASK_LEN 8
101#define SDHCI_DIV_HI_MASK 0x300 105#define SDHCI_DIV_HI_MASK 0x300
106#define SDHCI_PROG_CLOCK_MODE 0x0020
102#define SDHCI_CLOCK_CARD_EN 0x0004 107#define SDHCI_CLOCK_CARD_EN 0x0004
103#define SDHCI_CLOCK_INT_STABLE 0x0002 108#define SDHCI_CLOCK_INT_STABLE 0x0002
104#define SDHCI_CLOCK_INT_EN 0x0001 109#define SDHCI_CLOCK_INT_EN 0x0001
@@ -146,7 +151,22 @@
146 151
147#define SDHCI_ACMD12_ERR 0x3C 152#define SDHCI_ACMD12_ERR 0x3C
148 153
149/* 3E-3F reserved */ 154#define SDHCI_HOST_CONTROL2 0x3E
155#define SDHCI_CTRL_UHS_MASK 0x0007
156#define SDHCI_CTRL_UHS_SDR12 0x0000
157#define SDHCI_CTRL_UHS_SDR25 0x0001
158#define SDHCI_CTRL_UHS_SDR50 0x0002
159#define SDHCI_CTRL_UHS_SDR104 0x0003
160#define SDHCI_CTRL_UHS_DDR50 0x0004
161#define SDHCI_CTRL_VDD_180 0x0008
162#define SDHCI_CTRL_DRV_TYPE_MASK 0x0030
163#define SDHCI_CTRL_DRV_TYPE_B 0x0000
164#define SDHCI_CTRL_DRV_TYPE_A 0x0010
165#define SDHCI_CTRL_DRV_TYPE_C 0x0020
166#define SDHCI_CTRL_DRV_TYPE_D 0x0030
167#define SDHCI_CTRL_EXEC_TUNING 0x0040
168#define SDHCI_CTRL_TUNED_CLK 0x0080
169#define SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000
150 170
151#define SDHCI_CAPABILITIES 0x40 171#define SDHCI_CAPABILITIES 0x40
152#define SDHCI_TIMEOUT_CLK_MASK 0x0000003F 172#define SDHCI_TIMEOUT_CLK_MASK 0x0000003F
@@ -167,9 +187,30 @@
167#define SDHCI_CAN_VDD_180 0x04000000 187#define SDHCI_CAN_VDD_180 0x04000000
168#define SDHCI_CAN_64BIT 0x10000000 188#define SDHCI_CAN_64BIT 0x10000000
169 189
190#define SDHCI_SUPPORT_SDR50 0x00000001
191#define SDHCI_SUPPORT_SDR104 0x00000002
192#define SDHCI_SUPPORT_DDR50 0x00000004
193#define SDHCI_DRIVER_TYPE_A 0x00000010
194#define SDHCI_DRIVER_TYPE_C 0x00000020
195#define SDHCI_DRIVER_TYPE_D 0x00000040
196#define SDHCI_RETUNING_TIMER_COUNT_MASK 0x00000F00
197#define SDHCI_RETUNING_TIMER_COUNT_SHIFT 8
198#define SDHCI_USE_SDR50_TUNING 0x00002000
199#define SDHCI_RETUNING_MODE_MASK 0x0000C000
200#define SDHCI_RETUNING_MODE_SHIFT 14
201#define SDHCI_CLOCK_MUL_MASK 0x00FF0000
202#define SDHCI_CLOCK_MUL_SHIFT 16
203
170#define SDHCI_CAPABILITIES_1 0x44 204#define SDHCI_CAPABILITIES_1 0x44
171 205
172#define SDHCI_MAX_CURRENT 0x48 206#define SDHCI_MAX_CURRENT 0x48
207#define SDHCI_MAX_CURRENT_330_MASK 0x0000FF
208#define SDHCI_MAX_CURRENT_330_SHIFT 0
209#define SDHCI_MAX_CURRENT_300_MASK 0x00FF00
210#define SDHCI_MAX_CURRENT_300_SHIFT 8
211#define SDHCI_MAX_CURRENT_180_MASK 0xFF0000
212#define SDHCI_MAX_CURRENT_180_SHIFT 16
213#define SDHCI_MAX_CURRENT_MULTIPLIER 4
173 214
174/* 4C-4F reserved for more max current */ 215/* 4C-4F reserved for more max current */
175 216
@@ -202,6 +243,12 @@
202#define SDHCI_MAX_DIV_SPEC_200 256 243#define SDHCI_MAX_DIV_SPEC_200 256
203#define SDHCI_MAX_DIV_SPEC_300 2046 244#define SDHCI_MAX_DIV_SPEC_300 2046
204 245
246/*
247 * Host SDMA buffer boundary. Valid values from 4K to 512K in powers of 2.
248 */
249#define SDHCI_DEFAULT_BOUNDARY_SIZE (512 * 1024)
250#define SDHCI_DEFAULT_BOUNDARY_ARG (ilog2(SDHCI_DEFAULT_BOUNDARY_SIZE) - 12)
251
205struct sdhci_ops { 252struct sdhci_ops {
206#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS 253#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
207 u32 (*read_l)(struct sdhci_host *host, int reg); 254 u32 (*read_l)(struct sdhci_host *host, int reg);
@@ -223,6 +270,10 @@ struct sdhci_ops {
223 void (*platform_send_init_74_clocks)(struct sdhci_host *host, 270 void (*platform_send_init_74_clocks)(struct sdhci_host *host,
224 u8 power_mode); 271 u8 power_mode);
225 unsigned int (*get_ro)(struct sdhci_host *host); 272 unsigned int (*get_ro)(struct sdhci_host *host);
273 void (*platform_reset_enter)(struct sdhci_host *host, u8 mask);
274 void (*platform_reset_exit)(struct sdhci_host *host, u8 mask);
275 int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
276
226}; 277};
227 278
228#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS 279#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index af97015a2fc7..14f8edbaa195 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -29,6 +29,8 @@
29#include <linux/mmc/sh_mmcif.h> 29#include <linux/mmc/sh_mmcif.h>
30#include <linux/pagemap.h> 30#include <linux/pagemap.h>
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/pm_runtime.h>
33#include <linux/spinlock.h>
32 34
33#define DRIVER_NAME "sh_mmcif" 35#define DRIVER_NAME "sh_mmcif"
34#define DRIVER_VERSION "2010-04-28" 36#define DRIVER_VERSION "2010-04-28"
@@ -153,6 +155,12 @@
153#define CLKDEV_MMC_DATA 20000000 /* 20MHz */ 155#define CLKDEV_MMC_DATA 20000000 /* 20MHz */
154#define CLKDEV_INIT 400000 /* 400 KHz */ 156#define CLKDEV_INIT 400000 /* 400 KHz */
155 157
158enum mmcif_state {
159 STATE_IDLE,
160 STATE_REQUEST,
161 STATE_IOS,
162};
163
156struct sh_mmcif_host { 164struct sh_mmcif_host {
157 struct mmc_host *mmc; 165 struct mmc_host *mmc;
158 struct mmc_data *data; 166 struct mmc_data *data;
@@ -164,6 +172,9 @@ struct sh_mmcif_host {
164 long timeout; 172 long timeout;
165 void __iomem *addr; 173 void __iomem *addr;
166 struct completion intr_wait; 174 struct completion intr_wait;
175 enum mmcif_state state;
176 spinlock_t lock;
177 bool power;
167 178
168 /* DMA support */ 179 /* DMA support */
169 struct dma_chan *chan_rx; 180 struct dma_chan *chan_rx;
@@ -798,17 +809,31 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
798static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) 809static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
799{ 810{
800 struct sh_mmcif_host *host = mmc_priv(mmc); 811 struct sh_mmcif_host *host = mmc_priv(mmc);
812 unsigned long flags;
813
814 spin_lock_irqsave(&host->lock, flags);
815 if (host->state != STATE_IDLE) {
816 spin_unlock_irqrestore(&host->lock, flags);
817 mrq->cmd->error = -EAGAIN;
818 mmc_request_done(mmc, mrq);
819 return;
820 }
821
822 host->state = STATE_REQUEST;
823 spin_unlock_irqrestore(&host->lock, flags);
801 824
802 switch (mrq->cmd->opcode) { 825 switch (mrq->cmd->opcode) {
803 /* MMCIF does not support SD/SDIO command */ 826 /* MMCIF does not support SD/SDIO command */
804 case SD_IO_SEND_OP_COND: 827 case SD_IO_SEND_OP_COND:
805 case MMC_APP_CMD: 828 case MMC_APP_CMD:
829 host->state = STATE_IDLE;
806 mrq->cmd->error = -ETIMEDOUT; 830 mrq->cmd->error = -ETIMEDOUT;
807 mmc_request_done(mmc, mrq); 831 mmc_request_done(mmc, mrq);
808 return; 832 return;
809 case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */ 833 case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
810 if (!mrq->data) { 834 if (!mrq->data) {
811 /* send_if_cond cmd (not support) */ 835 /* send_if_cond cmd (not support) */
836 host->state = STATE_IDLE;
812 mrq->cmd->error = -ETIMEDOUT; 837 mrq->cmd->error = -ETIMEDOUT;
813 mmc_request_done(mmc, mrq); 838 mmc_request_done(mmc, mrq);
814 return; 839 return;
@@ -830,12 +855,9 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
830 sh_mmcif_start_cmd(host, mrq, mrq->cmd); 855 sh_mmcif_start_cmd(host, mrq, mrq->cmd);
831 host->data = NULL; 856 host->data = NULL;
832 857
833 if (mrq->cmd->error != 0) { 858 if (!mrq->cmd->error && mrq->stop)
834 mmc_request_done(mmc, mrq);
835 return;
836 }
837 if (mrq->stop)
838 sh_mmcif_stop_cmd(host, mrq, mrq->stop); 859 sh_mmcif_stop_cmd(host, mrq, mrq->stop);
860 host->state = STATE_IDLE;
839 mmc_request_done(mmc, mrq); 861 mmc_request_done(mmc, mrq);
840} 862}
841 863
@@ -843,15 +865,39 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
843{ 865{
844 struct sh_mmcif_host *host = mmc_priv(mmc); 866 struct sh_mmcif_host *host = mmc_priv(mmc);
845 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; 867 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
868 unsigned long flags;
869
870 spin_lock_irqsave(&host->lock, flags);
871 if (host->state != STATE_IDLE) {
872 spin_unlock_irqrestore(&host->lock, flags);
873 return;
874 }
875
876 host->state = STATE_IOS;
877 spin_unlock_irqrestore(&host->lock, flags);
846 878
847 if (ios->power_mode == MMC_POWER_UP) { 879 if (ios->power_mode == MMC_POWER_UP) {
848 if (p->set_pwr) 880 if (p->set_pwr)
849 p->set_pwr(host->pd, ios->power_mode); 881 p->set_pwr(host->pd, ios->power_mode);
882 if (!host->power) {
883 /* See if we also get DMA */
884 sh_mmcif_request_dma(host, host->pd->dev.platform_data);
885 pm_runtime_get_sync(&host->pd->dev);
886 host->power = true;
887 }
850 } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { 888 } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
851 /* clock stop */ 889 /* clock stop */
852 sh_mmcif_clock_control(host, 0); 890 sh_mmcif_clock_control(host, 0);
853 if (ios->power_mode == MMC_POWER_OFF && p->down_pwr) 891 if (ios->power_mode == MMC_POWER_OFF) {
854 p->down_pwr(host->pd); 892 if (host->power) {
893 pm_runtime_put(&host->pd->dev);
894 sh_mmcif_release_dma(host);
895 host->power = false;
896 }
897 if (p->down_pwr)
898 p->down_pwr(host->pd);
899 }
900 host->state = STATE_IDLE;
855 return; 901 return;
856 } 902 }
857 903
@@ -859,6 +905,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
859 sh_mmcif_clock_control(host, ios->clock); 905 sh_mmcif_clock_control(host, ios->clock);
860 906
861 host->bus_width = ios->bus_width; 907 host->bus_width = ios->bus_width;
908 host->state = STATE_IDLE;
862} 909}
863 910
864static int sh_mmcif_get_cd(struct mmc_host *mmc) 911static int sh_mmcif_get_cd(struct mmc_host *mmc)
@@ -925,7 +972,7 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
925 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); 972 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
926 err = 1; 973 err = 1;
927 } else { 974 } else {
928 dev_dbg(&host->pd->dev, "Not support int\n"); 975 dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state);
929 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); 976 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
930 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); 977 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
931 err = 1; 978 err = 1;
@@ -996,6 +1043,7 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
996 host->pd = pdev; 1043 host->pd = pdev;
997 1044
998 init_completion(&host->intr_wait); 1045 init_completion(&host->intr_wait);
1046 spin_lock_init(&host->lock);
999 1047
1000 mmc->ops = &sh_mmcif_ops; 1048 mmc->ops = &sh_mmcif_ops;
1001 mmc->f_max = host->clk; 1049 mmc->f_max = host->clk;
@@ -1020,24 +1068,29 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
1020 sh_mmcif_sync_reset(host); 1068 sh_mmcif_sync_reset(host);
1021 platform_set_drvdata(pdev, host); 1069 platform_set_drvdata(pdev, host);
1022 1070
1023 /* See if we also get DMA */ 1071 pm_runtime_enable(&pdev->dev);
1024 sh_mmcif_request_dma(host, pd); 1072 host->power = false;
1073
1074 ret = pm_runtime_resume(&pdev->dev);
1075 if (ret < 0)
1076 goto clean_up2;
1025 1077
1026 mmc_add_host(mmc); 1078 mmc_add_host(mmc);
1027 1079
1080 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1081
1028 ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host); 1082 ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
1029 if (ret) { 1083 if (ret) {
1030 dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n"); 1084 dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
1031 goto clean_up2; 1085 goto clean_up3;
1032 } 1086 }
1033 ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host); 1087 ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
1034 if (ret) { 1088 if (ret) {
1035 free_irq(irq[0], host); 1089 free_irq(irq[0], host);
1036 dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); 1090 dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
1037 goto clean_up2; 1091 goto clean_up3;
1038 } 1092 }
1039 1093
1040 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1041 sh_mmcif_detect(host->mmc); 1094 sh_mmcif_detect(host->mmc);
1042 1095
1043 dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); 1096 dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
@@ -1045,7 +1098,11 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
1045 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); 1098 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
1046 return ret; 1099 return ret;
1047 1100
1101clean_up3:
1102 mmc_remove_host(mmc);
1103 pm_runtime_suspend(&pdev->dev);
1048clean_up2: 1104clean_up2:
1105 pm_runtime_disable(&pdev->dev);
1049 clk_disable(host->hclk); 1106 clk_disable(host->hclk);
1050clean_up1: 1107clean_up1:
1051 mmc_free_host(mmc); 1108 mmc_free_host(mmc);
@@ -1060,14 +1117,14 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
1060 struct sh_mmcif_host *host = platform_get_drvdata(pdev); 1117 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1061 int irq[2]; 1118 int irq[2];
1062 1119
1120 pm_runtime_get_sync(&pdev->dev);
1121
1063 mmc_remove_host(host->mmc); 1122 mmc_remove_host(host->mmc);
1064 sh_mmcif_release_dma(host); 1123 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1065 1124
1066 if (host->addr) 1125 if (host->addr)
1067 iounmap(host->addr); 1126 iounmap(host->addr);
1068 1127
1069 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1070
1071 irq[0] = platform_get_irq(pdev, 0); 1128 irq[0] = platform_get_irq(pdev, 0);
1072 irq[1] = platform_get_irq(pdev, 1); 1129 irq[1] = platform_get_irq(pdev, 1);
1073 1130
@@ -1078,15 +1135,52 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
1078 1135
1079 clk_disable(host->hclk); 1136 clk_disable(host->hclk);
1080 mmc_free_host(host->mmc); 1137 mmc_free_host(host->mmc);
1138 pm_runtime_put_sync(&pdev->dev);
1139 pm_runtime_disable(&pdev->dev);
1081 1140
1082 return 0; 1141 return 0;
1083} 1142}
1084 1143
1144#ifdef CONFIG_PM
1145static int sh_mmcif_suspend(struct device *dev)
1146{
1147 struct platform_device *pdev = to_platform_device(dev);
1148 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1149 int ret = mmc_suspend_host(host->mmc);
1150
1151 if (!ret) {
1152 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1153 clk_disable(host->hclk);
1154 }
1155
1156 return ret;
1157}
1158
1159static int sh_mmcif_resume(struct device *dev)
1160{
1161 struct platform_device *pdev = to_platform_device(dev);
1162 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1163
1164 clk_enable(host->hclk);
1165
1166 return mmc_resume_host(host->mmc);
1167}
1168#else
1169#define sh_mmcif_suspend NULL
1170#define sh_mmcif_resume NULL
1171#endif /* CONFIG_PM */
1172
1173static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1174 .suspend = sh_mmcif_suspend,
1175 .resume = sh_mmcif_resume,
1176};
1177
1085static struct platform_driver sh_mmcif_driver = { 1178static struct platform_driver sh_mmcif_driver = {
1086 .probe = sh_mmcif_probe, 1179 .probe = sh_mmcif_probe,
1087 .remove = sh_mmcif_remove, 1180 .remove = sh_mmcif_remove,
1088 .driver = { 1181 .driver = {
1089 .name = DRIVER_NAME, 1182 .name = DRIVER_NAME,
1183 .pm = &sh_mmcif_dev_pm_ops,
1090 }, 1184 },
1091}; 1185};
1092 1186
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index cc701236d16f..b3654293017b 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -62,7 +62,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
62 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; 62 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
63 struct tmio_mmc_host *host; 63 struct tmio_mmc_host *host;
64 char clk_name[8]; 64 char clk_name[8];
65 int ret; 65 int i, irq, ret;
66 66
67 priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); 67 priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
68 if (priv == NULL) { 68 if (priv == NULL) {
@@ -71,6 +71,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
71 } 71 }
72 72
73 mmc_data = &priv->mmc_data; 73 mmc_data = &priv->mmc_data;
74 p->pdata = mmc_data;
74 75
75 snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id); 76 snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id);
76 priv->clk = clk_get(&pdev->dev, clk_name); 77 priv->clk = clk_get(&pdev->dev, clk_name);
@@ -116,11 +117,36 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
116 if (ret < 0) 117 if (ret < 0)
117 goto eprobe; 118 goto eprobe;
118 119
119 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), 120 for (i = 0; i < 3; i++) {
120 (unsigned long)host->ctl, host->irq); 121 irq = platform_get_irq(pdev, i);
122 if (irq < 0) {
123 if (i) {
124 continue;
125 } else {
126 ret = irq;
127 goto eirq;
128 }
129 }
130 ret = request_irq(irq, tmio_mmc_irq, 0,
131 dev_name(&pdev->dev), host);
132 if (ret) {
133 while (i--) {
134 irq = platform_get_irq(pdev, i);
135 if (irq >= 0)
136 free_irq(irq, host);
137 }
138 goto eirq;
139 }
140 }
141 dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n",
142 mmc_hostname(host->mmc), (unsigned long)
143 (platform_get_resource(pdev,IORESOURCE_MEM, 0)->start),
144 mmc_data->hclk / 1000000);
121 145
122 return ret; 146 return ret;
123 147
148eirq:
149 tmio_mmc_host_remove(host);
124eprobe: 150eprobe:
125 clk_disable(priv->clk); 151 clk_disable(priv->clk);
126 clk_put(priv->clk); 152 clk_put(priv->clk);
@@ -134,6 +160,16 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
134 struct mmc_host *mmc = platform_get_drvdata(pdev); 160 struct mmc_host *mmc = platform_get_drvdata(pdev);
135 struct tmio_mmc_host *host = mmc_priv(mmc); 161 struct tmio_mmc_host *host = mmc_priv(mmc);
136 struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); 162 struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data);
163 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
164 int i, irq;
165
166 p->pdata = NULL;
167
168 for (i = 0; i < 3; i++) {
169 irq = platform_get_irq(pdev, i);
170 if (irq >= 0)
171 free_irq(irq, host);
172 }
137 173
138 tmio_mmc_host_remove(host); 174 tmio_mmc_host_remove(host);
139 clk_disable(priv->clk); 175 clk_disable(priv->clk);
@@ -143,10 +179,18 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
143 return 0; 179 return 0;
144} 180}
145 181
182static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
183 .suspend = tmio_mmc_host_suspend,
184 .resume = tmio_mmc_host_resume,
185 .runtime_suspend = tmio_mmc_host_runtime_suspend,
186 .runtime_resume = tmio_mmc_host_runtime_resume,
187};
188
146static struct platform_driver sh_mobile_sdhi_driver = { 189static struct platform_driver sh_mobile_sdhi_driver = {
147 .driver = { 190 .driver = {
148 .name = "sh_mobile_sdhi", 191 .name = "sh_mobile_sdhi",
149 .owner = THIS_MODULE, 192 .owner = THIS_MODULE,
193 .pm = &tmio_mmc_dev_pm_ops,
150 }, 194 },
151 .probe = sh_mobile_sdhi_probe, 195 .probe = sh_mobile_sdhi_probe,
152 .remove = __devexit_p(sh_mobile_sdhi_remove), 196 .remove = __devexit_p(sh_mobile_sdhi_remove),
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 79c568461d59..14479f9ef53f 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -30,7 +30,7 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
30 struct mmc_host *mmc = platform_get_drvdata(dev); 30 struct mmc_host *mmc = platform_get_drvdata(dev);
31 int ret; 31 int ret;
32 32
33 ret = mmc_suspend_host(mmc); 33 ret = tmio_mmc_host_suspend(&dev->dev);
34 34
35 /* Tell MFD core it can disable us now.*/ 35 /* Tell MFD core it can disable us now.*/
36 if (!ret && cell->disable) 36 if (!ret && cell->disable)
@@ -46,15 +46,12 @@ static int tmio_mmc_resume(struct platform_device *dev)
46 int ret = 0; 46 int ret = 0;
47 47
48 /* Tell the MFD core we are ready to be enabled */ 48 /* Tell the MFD core we are ready to be enabled */
49 if (cell->resume) { 49 if (cell->resume)
50 ret = cell->resume(dev); 50 ret = cell->resume(dev);
51 if (ret)
52 goto out;
53 }
54 51
55 mmc_resume_host(mmc); 52 if (!ret)
53 ret = tmio_mmc_host_resume(&dev->dev);
56 54
57out:
58 return ret; 55 return ret;
59} 56}
60#else 57#else
@@ -67,7 +64,7 @@ static int __devinit tmio_mmc_probe(struct platform_device *pdev)
67 const struct mfd_cell *cell = mfd_get_cell(pdev); 64 const struct mfd_cell *cell = mfd_get_cell(pdev);
68 struct tmio_mmc_data *pdata; 65 struct tmio_mmc_data *pdata;
69 struct tmio_mmc_host *host; 66 struct tmio_mmc_host *host;
70 int ret = -EINVAL; 67 int ret = -EINVAL, irq;
71 68
72 if (pdev->num_resources != 2) 69 if (pdev->num_resources != 2)
73 goto out; 70 goto out;
@@ -76,6 +73,12 @@ static int __devinit tmio_mmc_probe(struct platform_device *pdev)
76 if (!pdata || !pdata->hclk) 73 if (!pdata || !pdata->hclk)
77 goto out; 74 goto out;
78 75
76 irq = platform_get_irq(pdev, 0);
77 if (irq < 0) {
78 ret = irq;
79 goto out;
80 }
81
79 /* Tell the MFD core we are ready to be enabled */ 82 /* Tell the MFD core we are ready to be enabled */
80 if (cell->enable) { 83 if (cell->enable) {
81 ret = cell->enable(pdev); 84 ret = cell->enable(pdev);
@@ -87,11 +90,18 @@ static int __devinit tmio_mmc_probe(struct platform_device *pdev)
87 if (ret) 90 if (ret)
88 goto cell_disable; 91 goto cell_disable;
89 92
93 ret = request_irq(irq, tmio_mmc_irq, IRQF_DISABLED |
94 IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), host);
95 if (ret)
96 goto host_remove;
97
90 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), 98 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
91 (unsigned long)host->ctl, host->irq); 99 (unsigned long)host->ctl, irq);
92 100
93 return 0; 101 return 0;
94 102
103host_remove:
104 tmio_mmc_host_remove(host);
95cell_disable: 105cell_disable:
96 if (cell->disable) 106 if (cell->disable)
97 cell->disable(pdev); 107 cell->disable(pdev);
@@ -107,7 +117,9 @@ static int __devexit tmio_mmc_remove(struct platform_device *pdev)
107 platform_set_drvdata(pdev, NULL); 117 platform_set_drvdata(pdev, NULL);
108 118
109 if (mmc) { 119 if (mmc) {
110 tmio_mmc_host_remove(mmc_priv(mmc)); 120 struct tmio_mmc_host *host = mmc_priv(mmc);
121 free_irq(platform_get_irq(pdev, 0), host);
122 tmio_mmc_host_remove(host);
111 if (cell->disable) 123 if (cell->disable)
112 cell->disable(pdev); 124 cell->disable(pdev);
113 } 125 }
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 099ed49a259b..8260bc2c34e3 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -19,6 +19,7 @@
19#include <linux/highmem.h> 19#include <linux/highmem.h>
20#include <linux/mmc/tmio.h> 20#include <linux/mmc/tmio.h>
21#include <linux/pagemap.h> 21#include <linux/pagemap.h>
22#include <linux/spinlock.h>
22 23
23/* Definitions for values the CTRL_SDIO_STATUS register can take. */ 24/* Definitions for values the CTRL_SDIO_STATUS register can take. */
24#define TMIO_SDIO_STAT_IOIRQ 0x0001 25#define TMIO_SDIO_STAT_IOIRQ 0x0001
@@ -44,13 +45,14 @@ struct tmio_mmc_host {
44 struct mmc_request *mrq; 45 struct mmc_request *mrq;
45 struct mmc_data *data; 46 struct mmc_data *data;
46 struct mmc_host *mmc; 47 struct mmc_host *mmc;
47 int irq;
48 unsigned int sdio_irq_enabled; 48 unsigned int sdio_irq_enabled;
49 49
50 /* Callbacks for clock / power control */ 50 /* Callbacks for clock / power control */
51 void (*set_pwr)(struct platform_device *host, int state); 51 void (*set_pwr)(struct platform_device *host, int state);
52 void (*set_clk_div)(struct platform_device *host, int state); 52 void (*set_clk_div)(struct platform_device *host, int state);
53 53
54 int pm_error;
55
54 /* pio related stuff */ 56 /* pio related stuff */
55 struct scatterlist *sg_ptr; 57 struct scatterlist *sg_ptr;
56 struct scatterlist *sg_orig; 58 struct scatterlist *sg_orig;
@@ -83,6 +85,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
83 85
84void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i); 86void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
85void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i); 87void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
88irqreturn_t tmio_mmc_irq(int irq, void *devid);
86 89
87static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, 90static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
88 unsigned long *flags) 91 unsigned long *flags)
@@ -120,4 +123,15 @@ static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
120} 123}
121#endif 124#endif
122 125
126#ifdef CONFIG_PM
127int tmio_mmc_host_suspend(struct device *dev);
128int tmio_mmc_host_resume(struct device *dev);
129#else
130#define tmio_mmc_host_suspend NULL
131#define tmio_mmc_host_resume NULL
132#endif
133
134int tmio_mmc_host_runtime_suspend(struct device *dev);
135int tmio_mmc_host_runtime_resume(struct device *dev);
136
123#endif 137#endif
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index d3de74ab633e..25f1ad6cbe09 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -256,7 +256,10 @@ static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
256void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) 256void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
257{ 257{
258 /* We can only either use DMA for both Tx and Rx or not use it at all */ 258 /* We can only either use DMA for both Tx and Rx or not use it at all */
259 if (pdata->dma) { 259 if (!pdata->dma)
260 return;
261
262 if (!host->chan_tx && !host->chan_rx) {
260 dma_cap_mask_t mask; 263 dma_cap_mask_t mask;
261 264
262 dma_cap_zero(mask); 265 dma_cap_zero(mask);
@@ -284,18 +287,18 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
284 287
285 tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); 288 tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host);
286 tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); 289 tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
290 }
287 291
288 tmio_mmc_enable_dma(host, true); 292 tmio_mmc_enable_dma(host, true);
293
294 return;
289 295
290 return;
291ebouncebuf: 296ebouncebuf:
292 dma_release_channel(host->chan_rx); 297 dma_release_channel(host->chan_rx);
293 host->chan_rx = NULL; 298 host->chan_rx = NULL;
294ereqrx: 299ereqrx:
295 dma_release_channel(host->chan_tx); 300 dma_release_channel(host->chan_tx);
296 host->chan_tx = NULL; 301 host->chan_tx = NULL;
297 return;
298 }
299} 302}
300 303
301void tmio_mmc_release_dma(struct tmio_mmc_host *host) 304void tmio_mmc_release_dma(struct tmio_mmc_host *host)
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 710339a85c84..ad6347bb02dd 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -39,6 +39,7 @@
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/pagemap.h> 40#include <linux/pagemap.h>
41#include <linux/platform_device.h> 41#include <linux/platform_device.h>
42#include <linux/pm_runtime.h>
42#include <linux/scatterlist.h> 43#include <linux/scatterlist.h>
43#include <linux/workqueue.h> 44#include <linux/workqueue.h>
44#include <linux/spinlock.h> 45#include <linux/spinlock.h>
@@ -243,8 +244,12 @@ static void tmio_mmc_reset_work(struct work_struct *work)
243 spin_lock_irqsave(&host->lock, flags); 244 spin_lock_irqsave(&host->lock, flags);
244 mrq = host->mrq; 245 mrq = host->mrq;
245 246
246 /* request already finished */ 247 /*
247 if (!mrq 248 * is request already finished? Since we use a non-blocking
249 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
250 * us, so, have to check for IS_ERR(host->mrq)
251 */
252 if (IS_ERR_OR_NULL(mrq)
248 || time_is_after_jiffies(host->last_req_ts + 253 || time_is_after_jiffies(host->last_req_ts +
249 msecs_to_jiffies(2000))) { 254 msecs_to_jiffies(2000))) {
250 spin_unlock_irqrestore(&host->lock, flags); 255 spin_unlock_irqrestore(&host->lock, flags);
@@ -264,16 +269,19 @@ static void tmio_mmc_reset_work(struct work_struct *work)
264 269
265 host->cmd = NULL; 270 host->cmd = NULL;
266 host->data = NULL; 271 host->data = NULL;
267 host->mrq = NULL;
268 host->force_pio = false; 272 host->force_pio = false;
269 273
270 spin_unlock_irqrestore(&host->lock, flags); 274 spin_unlock_irqrestore(&host->lock, flags);
271 275
272 tmio_mmc_reset(host); 276 tmio_mmc_reset(host);
273 277
278 /* Ready for new calls */
279 host->mrq = NULL;
280
274 mmc_request_done(host->mmc, mrq); 281 mmc_request_done(host->mmc, mrq);
275} 282}
276 283
284/* called with host->lock held, interrupts disabled */
277static void tmio_mmc_finish_request(struct tmio_mmc_host *host) 285static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
278{ 286{
279 struct mmc_request *mrq = host->mrq; 287 struct mmc_request *mrq = host->mrq;
@@ -281,13 +289,15 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
281 if (!mrq) 289 if (!mrq)
282 return; 290 return;
283 291
284 host->mrq = NULL;
285 host->cmd = NULL; 292 host->cmd = NULL;
286 host->data = NULL; 293 host->data = NULL;
287 host->force_pio = false; 294 host->force_pio = false;
288 295
289 cancel_delayed_work(&host->delayed_reset_work); 296 cancel_delayed_work(&host->delayed_reset_work);
290 297
298 host->mrq = NULL;
299
300 /* FIXME: mmc_request_done() can schedule! */
291 mmc_request_done(host->mmc, mrq); 301 mmc_request_done(host->mmc, mrq);
292} 302}
293 303
@@ -554,7 +564,7 @@ out:
554 spin_unlock(&host->lock); 564 spin_unlock(&host->lock);
555} 565}
556 566
557static irqreturn_t tmio_mmc_irq(int irq, void *devid) 567irqreturn_t tmio_mmc_irq(int irq, void *devid)
558{ 568{
559 struct tmio_mmc_host *host = devid; 569 struct tmio_mmc_host *host = devid;
560 struct tmio_mmc_data *pdata = host->pdata; 570 struct tmio_mmc_data *pdata = host->pdata;
@@ -649,6 +659,7 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
649out: 659out:
650 return IRQ_HANDLED; 660 return IRQ_HANDLED;
651} 661}
662EXPORT_SYMBOL(tmio_mmc_irq);
652 663
653static int tmio_mmc_start_data(struct tmio_mmc_host *host, 664static int tmio_mmc_start_data(struct tmio_mmc_host *host,
654 struct mmc_data *data) 665 struct mmc_data *data)
@@ -685,15 +696,27 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
685static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 696static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
686{ 697{
687 struct tmio_mmc_host *host = mmc_priv(mmc); 698 struct tmio_mmc_host *host = mmc_priv(mmc);
699 unsigned long flags;
688 int ret; 700 int ret;
689 701
690 if (host->mrq) 702 spin_lock_irqsave(&host->lock, flags);
703
704 if (host->mrq) {
691 pr_debug("request not null\n"); 705 pr_debug("request not null\n");
706 if (IS_ERR(host->mrq)) {
707 spin_unlock_irqrestore(&host->lock, flags);
708 mrq->cmd->error = -EAGAIN;
709 mmc_request_done(mmc, mrq);
710 return;
711 }
712 }
692 713
693 host->last_req_ts = jiffies; 714 host->last_req_ts = jiffies;
694 wmb(); 715 wmb();
695 host->mrq = mrq; 716 host->mrq = mrq;
696 717
718 spin_unlock_irqrestore(&host->lock, flags);
719
697 if (mrq->data) { 720 if (mrq->data) {
698 ret = tmio_mmc_start_data(host, mrq->data); 721 ret = tmio_mmc_start_data(host, mrq->data);
699 if (ret) 722 if (ret)
@@ -708,8 +731,8 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
708 } 731 }
709 732
710fail: 733fail:
711 host->mrq = NULL;
712 host->force_pio = false; 734 host->force_pio = false;
735 host->mrq = NULL;
713 mrq->cmd->error = ret; 736 mrq->cmd->error = ret;
714 mmc_request_done(mmc, mrq); 737 mmc_request_done(mmc, mrq);
715} 738}
@@ -723,19 +746,54 @@ fail:
723static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 746static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
724{ 747{
725 struct tmio_mmc_host *host = mmc_priv(mmc); 748 struct tmio_mmc_host *host = mmc_priv(mmc);
749 struct tmio_mmc_data *pdata = host->pdata;
750 unsigned long flags;
751
752 spin_lock_irqsave(&host->lock, flags);
753 if (host->mrq) {
754 if (IS_ERR(host->mrq)) {
755 dev_dbg(&host->pdev->dev,
756 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
757 current->comm, task_pid_nr(current),
758 ios->clock, ios->power_mode);
759 host->mrq = ERR_PTR(-EINTR);
760 } else {
761 dev_dbg(&host->pdev->dev,
762 "%s.%d: CMD%u active since %lu, now %lu!\n",
763 current->comm, task_pid_nr(current),
764 host->mrq->cmd->opcode, host->last_req_ts, jiffies);
765 }
766 spin_unlock_irqrestore(&host->lock, flags);
767 return;
768 }
769
770 host->mrq = ERR_PTR(-EBUSY);
771
772 spin_unlock_irqrestore(&host->lock, flags);
726 773
727 if (ios->clock) 774 if (ios->clock)
728 tmio_mmc_set_clock(host, ios->clock); 775 tmio_mmc_set_clock(host, ios->clock);
729 776
730 /* Power sequence - OFF -> UP -> ON */ 777 /* Power sequence - OFF -> UP -> ON */
731 if (ios->power_mode == MMC_POWER_UP) { 778 if (ios->power_mode == MMC_POWER_UP) {
779 if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && !pdata->power) {
780 pm_runtime_get_sync(&host->pdev->dev);
781 pdata->power = true;
782 }
732 /* power up SD bus */ 783 /* power up SD bus */
733 if (host->set_pwr) 784 if (host->set_pwr)
734 host->set_pwr(host->pdev, 1); 785 host->set_pwr(host->pdev, 1);
735 } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { 786 } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
736 /* power down SD bus */ 787 /* power down SD bus */
737 if (ios->power_mode == MMC_POWER_OFF && host->set_pwr) 788 if (ios->power_mode == MMC_POWER_OFF) {
738 host->set_pwr(host->pdev, 0); 789 if (host->set_pwr)
790 host->set_pwr(host->pdev, 0);
791 if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
792 pdata->power) {
793 pdata->power = false;
794 pm_runtime_put(&host->pdev->dev);
795 }
796 }
739 tmio_mmc_clk_stop(host); 797 tmio_mmc_clk_stop(host);
740 } else { 798 } else {
741 /* start bus clock */ 799 /* start bus clock */
@@ -753,6 +811,12 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
753 811
754 /* Let things settle. delay taken from winCE driver */ 812 /* Let things settle. delay taken from winCE driver */
755 udelay(140); 813 udelay(140);
814 if (PTR_ERR(host->mrq) == -EINTR)
815 dev_dbg(&host->pdev->dev,
816 "%s.%d: IOS interrupted: clk %u, mode %u",
817 current->comm, task_pid_nr(current),
818 ios->clock, ios->power_mode);
819 host->mrq = NULL;
756} 820}
757 821
758static int tmio_mmc_get_ro(struct mmc_host *mmc) 822static int tmio_mmc_get_ro(struct mmc_host *mmc)
@@ -801,6 +865,7 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
801 if (!mmc) 865 if (!mmc)
802 return -ENOMEM; 866 return -ENOMEM;
803 867
868 pdata->dev = &pdev->dev;
804 _host = mmc_priv(mmc); 869 _host = mmc_priv(mmc);
805 _host->pdata = pdata; 870 _host->pdata = pdata;
806 _host->mmc = mmc; 871 _host->mmc = mmc;
@@ -834,24 +899,19 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
834 else 899 else
835 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 900 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
836 901
837 tmio_mmc_clk_stop(_host); 902 pdata->power = false;
838 tmio_mmc_reset(_host); 903 pm_runtime_enable(&pdev->dev);
839 904 ret = pm_runtime_resume(&pdev->dev);
840 ret = platform_get_irq(pdev, 0);
841 if (ret < 0) 905 if (ret < 0)
842 goto unmap_ctl; 906 goto pm_disable;
843 907
844 _host->irq = ret; 908 tmio_mmc_clk_stop(_host);
909 tmio_mmc_reset(_host);
845 910
846 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); 911 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
847 if (pdata->flags & TMIO_MMC_SDIO_IRQ) 912 if (pdata->flags & TMIO_MMC_SDIO_IRQ)
848 tmio_mmc_enable_sdio_irq(mmc, 0); 913 tmio_mmc_enable_sdio_irq(mmc, 0);
849 914
850 ret = request_irq(_host->irq, tmio_mmc_irq, IRQF_DISABLED |
851 IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), _host);
852 if (ret)
853 goto unmap_ctl;
854
855 spin_lock_init(&_host->lock); 915 spin_lock_init(&_host->lock);
856 916
857 /* Init delayed work for request timeouts */ 917 /* Init delayed work for request timeouts */
@@ -860,6 +920,10 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
860 /* See if we also get DMA */ 920 /* See if we also get DMA */
861 tmio_mmc_request_dma(_host, pdata); 921 tmio_mmc_request_dma(_host, pdata);
862 922
923 /* We have to keep the device powered for its card detection to work */
924 if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD))
925 pm_runtime_get_noresume(&pdev->dev);
926
863 mmc_add_host(mmc); 927 mmc_add_host(mmc);
864 928
865 /* Unmask the IRQs we want to know about */ 929 /* Unmask the IRQs we want to know about */
@@ -874,7 +938,8 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
874 938
875 return 0; 939 return 0;
876 940
877unmap_ctl: 941pm_disable:
942 pm_runtime_disable(&pdev->dev);
878 iounmap(_host->ctl); 943 iounmap(_host->ctl);
879host_free: 944host_free:
880 mmc_free_host(mmc); 945 mmc_free_host(mmc);
@@ -885,13 +950,88 @@ EXPORT_SYMBOL(tmio_mmc_host_probe);
885 950
886void tmio_mmc_host_remove(struct tmio_mmc_host *host) 951void tmio_mmc_host_remove(struct tmio_mmc_host *host)
887{ 952{
953 struct platform_device *pdev = host->pdev;
954
955 /*
956 * We don't have to manipulate pdata->power here: if there is a card in
957 * the slot, the runtime PM is active and our .runtime_resume() will not
958 * be run. If there is no card in the slot and the platform can suspend
959 * the controller, the runtime PM is suspended and pdata->power == false,
960 * so, our .runtime_resume() will not try to detect a card in the slot.
961 */
962 if (host->pdata->flags & TMIO_MMC_HAS_COLD_CD)
963 pm_runtime_get_sync(&pdev->dev);
964
888 mmc_remove_host(host->mmc); 965 mmc_remove_host(host->mmc);
889 cancel_delayed_work_sync(&host->delayed_reset_work); 966 cancel_delayed_work_sync(&host->delayed_reset_work);
890 tmio_mmc_release_dma(host); 967 tmio_mmc_release_dma(host);
891 free_irq(host->irq, host); 968
969 pm_runtime_put_sync(&pdev->dev);
970 pm_runtime_disable(&pdev->dev);
971
892 iounmap(host->ctl); 972 iounmap(host->ctl);
893 mmc_free_host(host->mmc); 973 mmc_free_host(host->mmc);
894} 974}
895EXPORT_SYMBOL(tmio_mmc_host_remove); 975EXPORT_SYMBOL(tmio_mmc_host_remove);
896 976
977#ifdef CONFIG_PM
978int tmio_mmc_host_suspend(struct device *dev)
979{
980 struct mmc_host *mmc = dev_get_drvdata(dev);
981 struct tmio_mmc_host *host = mmc_priv(mmc);
982 int ret = mmc_suspend_host(mmc);
983
984 if (!ret)
985 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
986
987 host->pm_error = pm_runtime_put_sync(dev);
988
989 return ret;
990}
991EXPORT_SYMBOL(tmio_mmc_host_suspend);
992
993int tmio_mmc_host_resume(struct device *dev)
994{
995 struct mmc_host *mmc = dev_get_drvdata(dev);
996 struct tmio_mmc_host *host = mmc_priv(mmc);
997
998 /* The MMC core will perform the complete set up */
999 host->pdata->power = false;
1000
1001 if (!host->pm_error)
1002 pm_runtime_get_sync(dev);
1003
1004 tmio_mmc_reset(mmc_priv(mmc));
1005 tmio_mmc_request_dma(host, host->pdata);
1006
1007 return mmc_resume_host(mmc);
1008}
1009EXPORT_SYMBOL(tmio_mmc_host_resume);
1010
1011#endif /* CONFIG_PM */
1012
1013int tmio_mmc_host_runtime_suspend(struct device *dev)
1014{
1015 return 0;
1016}
1017EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
1018
1019int tmio_mmc_host_runtime_resume(struct device *dev)
1020{
1021 struct mmc_host *mmc = dev_get_drvdata(dev);
1022 struct tmio_mmc_host *host = mmc_priv(mmc);
1023 struct tmio_mmc_data *pdata = host->pdata;
1024
1025 tmio_mmc_reset(host);
1026
1027 if (pdata->power) {
1028 /* Only entered after a card-insert interrupt */
1029 tmio_mmc_set_ios(mmc, &mmc->ios);
1030 mmc_detect_change(mmc, msecs_to_jiffies(100));
1031 }
1032
1033 return 0;
1034}
1035EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
1036
897MODULE_LICENSE("GPL v2"); 1037MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
new file mode 100644
index 000000000000..cbb03305b77b
--- /dev/null
+++ b/drivers/mmc/host/vub300.c
@@ -0,0 +1,2506 @@
1/*
2 * Remote VUB300 SDIO/SDmem Host Controller Driver
3 *
4 * Copyright (C) 2010 Elan Digital Systems Limited
5 *
6 * based on USB Skeleton driver - 2.2
7 *
8 * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation, version 2
13 *
14 * VUB300: is a USB 2.0 client device with a single SDIO/SDmem/MMC slot
15 * Any SDIO/SDmem/MMC device plugged into the VUB300 will appear,
16 * by virtue of this driver, to have been plugged into a local
17 * SDIO host controller, similar to, say, a PCI Ricoh controller
18 * This is because this kernel device driver is both a USB 2.0
19 * client device driver AND an MMC host controller driver. Thus
20 * if there is an existing driver for the inserted SDIO/SDmem/MMC
21 * device then that driver will be used by the kernel to manage
22 * the device in exactly the same fashion as if it had been
23 * directly plugged into, say, a local pci bus Ricoh controller
24 *
25 * RANT: this driver was written using a display 128x48 - converting it
26 * to a line width of 80 makes it very difficult to support. In
27 * particular functions have been broken down into sub functions
28 * and the original meaningful names have been shortened into
29 * cryptic ones.
30 * The problem is that executing a fragment of code subject to
31 * two conditions means an indentation of 24, thus leaving only
32 * 56 characters for a C statement. And that is quite ridiculous!
33 *
34 * Data types: data passed to/from the VUB300 is fixed to a number of
35 * bits and driver data fields reflect that limit by using
36 * u8, u16, u32
37 */
38#include <linux/kernel.h>
39#include <linux/errno.h>
40#include <linux/init.h>
41#include <linux/slab.h>
42#include <linux/module.h>
43#include <linux/kref.h>
44#include <linux/uaccess.h>
45#include <linux/usb.h>
46#include <linux/mutex.h>
47#include <linux/mmc/host.h>
48#include <linux/mmc/card.h>
49#include <linux/mmc/sdio_func.h>
50#include <linux/mmc/sdio_ids.h>
51#include <linux/workqueue.h>
52#include <linux/ctype.h>
53#include <linux/firmware.h>
54#include <linux/scatterlist.h>
55
56struct host_controller_info {
57 u8 info_size;
58 u16 firmware_version;
59 u8 number_of_ports;
60} __packed;
61
62#define FIRMWARE_BLOCK_BOUNDARY 1024
63struct sd_command_header {
64 u8 header_size;
65 u8 header_type;
66 u8 port_number;
67 u8 command_type; /* Bit7 - Rd/Wr */
68 u8 command_index;
69 u8 transfer_size[4]; /* ReadSize + ReadSize */
70 u8 response_type;
71 u8 arguments[4];
72 u8 block_count[2];
73 u8 block_size[2];
74 u8 block_boundary[2];
75 u8 reserved[44]; /* to pad out to 64 bytes */
76} __packed;
77
78struct sd_irqpoll_header {
79 u8 header_size;
80 u8 header_type;
81 u8 port_number;
82 u8 command_type; /* Bit7 - Rd/Wr */
83 u8 padding[16]; /* don't ask why !! */
84 u8 poll_timeout_msb;
85 u8 poll_timeout_lsb;
86 u8 reserved[42]; /* to pad out to 64 bytes */
87} __packed;
88
89struct sd_common_header {
90 u8 header_size;
91 u8 header_type;
92 u8 port_number;
93} __packed;
94
95struct sd_response_header {
96 u8 header_size;
97 u8 header_type;
98 u8 port_number;
99 u8 command_type;
100 u8 command_index;
101 u8 command_response[0];
102} __packed;
103
104struct sd_status_header {
105 u8 header_size;
106 u8 header_type;
107 u8 port_number;
108 u16 port_flags;
109 u32 sdio_clock;
110 u16 host_header_size;
111 u16 func_header_size;
112 u16 ctrl_header_size;
113} __packed;
114
115struct sd_error_header {
116 u8 header_size;
117 u8 header_type;
118 u8 port_number;
119 u8 error_code;
120} __packed;
121
122struct sd_interrupt_header {
123 u8 header_size;
124 u8 header_type;
125 u8 port_number;
126} __packed;
127
128struct offload_registers_access {
129 u8 command_byte[4];
130 u8 Respond_Byte[4];
131} __packed;
132
133#define INTERRUPT_REGISTER_ACCESSES 15
134struct sd_offloaded_interrupt {
135 u8 header_size;
136 u8 header_type;
137 u8 port_number;
138 struct offload_registers_access reg[INTERRUPT_REGISTER_ACCESSES];
139} __packed;
140
141struct sd_register_header {
142 u8 header_size;
143 u8 header_type;
144 u8 port_number;
145 u8 command_type;
146 u8 command_index;
147 u8 command_response[6];
148} __packed;
149
150#define PIGGYBACK_REGISTER_ACCESSES 14
151struct sd_offloaded_piggyback {
152 struct sd_register_header sdio;
153 struct offload_registers_access reg[PIGGYBACK_REGISTER_ACCESSES];
154} __packed;
155
156union sd_response {
157 struct sd_common_header common;
158 struct sd_status_header status;
159 struct sd_error_header error;
160 struct sd_interrupt_header interrupt;
161 struct sd_response_header response;
162 struct sd_offloaded_interrupt irq;
163 struct sd_offloaded_piggyback pig;
164} __packed;
165
166union sd_command {
167 struct sd_command_header head;
168 struct sd_irqpoll_header poll;
169} __packed;
170
171enum SD_RESPONSE_TYPE {
172 SDRT_UNSPECIFIED = 0,
173 SDRT_NONE,
174 SDRT_1,
175 SDRT_1B,
176 SDRT_2,
177 SDRT_3,
178 SDRT_4,
179 SDRT_5,
180 SDRT_5B,
181 SDRT_6,
182 SDRT_7,
183};
184
185#define RESPONSE_INTERRUPT 0x01
186#define RESPONSE_ERROR 0x02
187#define RESPONSE_STATUS 0x03
188#define RESPONSE_IRQ_DISABLED 0x05
189#define RESPONSE_IRQ_ENABLED 0x06
190#define RESPONSE_PIGGYBACKED 0x07
191#define RESPONSE_NO_INTERRUPT 0x08
192#define RESPONSE_PIG_DISABLED 0x09
193#define RESPONSE_PIG_ENABLED 0x0A
194#define SD_ERROR_1BIT_TIMEOUT 0x01
195#define SD_ERROR_4BIT_TIMEOUT 0x02
196#define SD_ERROR_1BIT_CRC_WRONG 0x03
197#define SD_ERROR_4BIT_CRC_WRONG 0x04
198#define SD_ERROR_1BIT_CRC_ERROR 0x05
199#define SD_ERROR_4BIT_CRC_ERROR 0x06
200#define SD_ERROR_NO_CMD_ENDBIT 0x07
201#define SD_ERROR_NO_1BIT_DATEND 0x08
202#define SD_ERROR_NO_4BIT_DATEND 0x09
203#define SD_ERROR_1BIT_UNEXPECTED_TIMEOUT 0x0A
204#define SD_ERROR_4BIT_UNEXPECTED_TIMEOUT 0x0B
205#define SD_ERROR_ILLEGAL_COMMAND 0x0C
206#define SD_ERROR_NO_DEVICE 0x0D
207#define SD_ERROR_TRANSFER_LENGTH 0x0E
208#define SD_ERROR_1BIT_DATA_TIMEOUT 0x0F
209#define SD_ERROR_4BIT_DATA_TIMEOUT 0x10
210#define SD_ERROR_ILLEGAL_STATE 0x11
211#define SD_ERROR_UNKNOWN_ERROR 0x12
212#define SD_ERROR_RESERVED_ERROR 0x13
213#define SD_ERROR_INVALID_FUNCTION 0x14
214#define SD_ERROR_OUT_OF_RANGE 0x15
215#define SD_ERROR_STAT_CMD 0x16
216#define SD_ERROR_STAT_DATA 0x17
217#define SD_ERROR_STAT_CMD_TIMEOUT 0x18
218#define SD_ERROR_SDCRDY_STUCK 0x19
219#define SD_ERROR_UNHANDLED 0x1A
220#define SD_ERROR_OVERRUN 0x1B
221#define SD_ERROR_PIO_TIMEOUT 0x1C
222
223#define FUN(c) (0x000007 & (c->arg>>28))
224#define REG(c) (0x01FFFF & (c->arg>>9))
225
226static int limit_speed_to_24_MHz;
227module_param(limit_speed_to_24_MHz, bool, 0644);
228MODULE_PARM_DESC(limit_speed_to_24_MHz, "Limit Max SDIO Clock Speed to 24 MHz");
229
230static int pad_input_to_usb_pkt;
231module_param(pad_input_to_usb_pkt, bool, 0644);
232MODULE_PARM_DESC(pad_input_to_usb_pkt,
233 "Pad USB data input transfers to whole USB Packet");
234
235static int disable_offload_processing;
236module_param(disable_offload_processing, bool, 0644);
237MODULE_PARM_DESC(disable_offload_processing, "Disable Offload Processing");
238
239static int force_1_bit_data_xfers;
240module_param(force_1_bit_data_xfers, bool, 0644);
241MODULE_PARM_DESC(force_1_bit_data_xfers,
242 "Force SDIO Data Transfers to 1-bit Mode");
243
244static int force_polling_for_irqs;
245module_param(force_polling_for_irqs, bool, 0644);
246MODULE_PARM_DESC(force_polling_for_irqs, "Force Polling for SDIO interrupts");
247
248static int firmware_irqpoll_timeout = 1024;
249module_param(firmware_irqpoll_timeout, int, 0644);
250MODULE_PARM_DESC(firmware_irqpoll_timeout, "VUB300 firmware irqpoll timeout");
251
252static int force_max_req_size = 128;
253module_param(force_max_req_size, int, 0644);
254MODULE_PARM_DESC(force_max_req_size, "set max request size in kBytes");
255
256#ifdef SMSC_DEVELOPMENT_BOARD
257static int firmware_rom_wait_states = 0x04;
258#else
259static int firmware_rom_wait_states = 0x1C;
260#endif
261
262module_param(firmware_rom_wait_states, bool, 0644);
263MODULE_PARM_DESC(firmware_rom_wait_states,
264 "ROM wait states byte=RRRIIEEE (Reserved Internal External)");
265
266#define ELAN_VENDOR_ID 0x2201
267#define VUB300_VENDOR_ID 0x0424
268#define VUB300_PRODUCT_ID 0x012C
269static struct usb_device_id vub300_table[] = {
270 {USB_DEVICE(ELAN_VENDOR_ID, VUB300_PRODUCT_ID)},
271 {USB_DEVICE(VUB300_VENDOR_ID, VUB300_PRODUCT_ID)},
272 {} /* Terminating entry */
273};
274MODULE_DEVICE_TABLE(usb, vub300_table);
275
276static struct workqueue_struct *cmndworkqueue;
277static struct workqueue_struct *pollworkqueue;
278static struct workqueue_struct *deadworkqueue;
279
280static inline int interface_to_InterfaceNumber(struct usb_interface *interface)
281{
282 if (!interface)
283 return -1;
284 if (!interface->cur_altsetting)
285 return -1;
286 return interface->cur_altsetting->desc.bInterfaceNumber;
287}
288
289struct sdio_register {
290 unsigned func_num:3;
291 unsigned sdio_reg:17;
292 unsigned activate:1;
293 unsigned prepared:1;
294 unsigned regvalue:8;
295 unsigned response:8;
296 unsigned sparebit:26;
297};
298
299struct vub300_mmc_host {
300 struct usb_device *udev;
301 struct usb_interface *interface;
302 struct kref kref;
303 struct mutex cmd_mutex;
304 struct mutex irq_mutex;
305 char vub_name[3 + (9 * 8) + 4 + 1]; /* max of 7 sdio fn's */
306 u8 cmnd_out_ep; /* EndPoint for commands */
307 u8 cmnd_res_ep; /* EndPoint for responses */
308 u8 data_out_ep; /* EndPoint for out data */
309 u8 data_inp_ep; /* EndPoint for inp data */
310 bool card_powered;
311 bool card_present;
312 bool read_only;
313 bool large_usb_packets;
314 bool app_spec; /* ApplicationSpecific */
315 bool irq_enabled; /* by the MMC CORE */
316 bool irq_disabled; /* in the firmware */
317 unsigned bus_width:4;
318 u8 total_offload_count;
319 u8 dynamic_register_count;
320 u8 resp_len;
321 u32 datasize;
322 int errors;
323 int usb_transport_fail;
324 int usb_timed_out;
325 int irqs_queued;
326 struct sdio_register sdio_register[16];
327 struct offload_interrupt_function_register {
328#define MAXREGBITS 4
329#define MAXREGS (1<<MAXREGBITS)
330#define MAXREGMASK (MAXREGS-1)
331 u8 offload_count;
332 u32 offload_point;
333 struct offload_registers_access reg[MAXREGS];
334 } fn[8];
335 u16 fbs[8]; /* Function Block Size */
336 struct mmc_command *cmd;
337 struct mmc_request *req;
338 struct mmc_data *data;
339 struct mmc_host *mmc;
340 struct urb *urb;
341 struct urb *command_out_urb;
342 struct urb *command_res_urb;
343 struct completion command_complete;
344 struct completion irqpoll_complete;
345 union sd_command cmnd;
346 union sd_response resp;
347 struct timer_list sg_transfer_timer;
348 struct usb_sg_request sg_request;
349 struct timer_list inactivity_timer;
350 struct work_struct deadwork;
351 struct work_struct cmndwork;
352 struct delayed_work pollwork;
353 struct host_controller_info hc_info;
354 struct sd_status_header system_port_status;
355 u8 padded_buffer[64];
356};
357
358#define kref_to_vub300_mmc_host(d) container_of(d, struct vub300_mmc_host, kref)
359#define SET_TRANSFER_PSEUDOCODE 21
360#define SET_INTERRUPT_PSEUDOCODE 20
361#define SET_FAILURE_MODE 18
362#define SET_ROM_WAIT_STATES 16
363#define SET_IRQ_ENABLE 13
364#define SET_CLOCK_SPEED 11
365#define SET_FUNCTION_BLOCK_SIZE 9
366#define SET_SD_DATA_MODE 6
367#define SET_SD_POWER 4
368#define ENTER_DFU_MODE 3
369#define GET_HC_INF0 1
370#define GET_SYSTEM_PORT_STATUS 0
371
372static void vub300_delete(struct kref *kref)
373{ /* kref callback - softirq */
374 struct vub300_mmc_host *vub300 = kref_to_vub300_mmc_host(kref);
375 struct mmc_host *mmc = vub300->mmc;
376 usb_free_urb(vub300->command_out_urb);
377 vub300->command_out_urb = NULL;
378 usb_free_urb(vub300->command_res_urb);
379 vub300->command_res_urb = NULL;
380 usb_put_dev(vub300->udev);
381 mmc_free_host(mmc);
382 /*
383 * and hence also frees vub300
384 * which is contained at the end of struct mmc
385 */
386}
387
388static void vub300_queue_cmnd_work(struct vub300_mmc_host *vub300)
389{
390 kref_get(&vub300->kref);
391 if (queue_work(cmndworkqueue, &vub300->cmndwork)) {
392 /*
393 * then the cmndworkqueue was not previously
394 * running and the above get ref is obvious
395 * required and will be put when the thread
396 * terminates by a specific call
397 */
398 } else {
399 /*
400 * the cmndworkqueue was already running from
401 * a previous invocation and thus to keep the
402 * kref counts correct we must undo the get
403 */
404 kref_put(&vub300->kref, vub300_delete);
405 }
406}
407
408static void vub300_queue_poll_work(struct vub300_mmc_host *vub300, int delay)
409{
410 kref_get(&vub300->kref);
411 if (queue_delayed_work(pollworkqueue, &vub300->pollwork, delay)) {
412 /*
413 * then the pollworkqueue was not previously
414 * running and the above get ref is obvious
415 * required and will be put when the thread
416 * terminates by a specific call
417 */
418 } else {
419 /*
420 * the pollworkqueue was already running from
421 * a previous invocation and thus to keep the
422 * kref counts correct we must undo the get
423 */
424 kref_put(&vub300->kref, vub300_delete);
425 }
426}
427
428static void vub300_queue_dead_work(struct vub300_mmc_host *vub300)
429{
430 kref_get(&vub300->kref);
431 if (queue_work(deadworkqueue, &vub300->deadwork)) {
432 /*
433 * then the deadworkqueue was not previously
434 * running and the above get ref is obvious
435 * required and will be put when the thread
436 * terminates by a specific call
437 */
438 } else {
439 /*
440 * the deadworkqueue was already running from
441 * a previous invocation and thus to keep the
442 * kref counts correct we must undo the get
443 */
444 kref_put(&vub300->kref, vub300_delete);
445 }
446}
447
448static void irqpoll_res_completed(struct urb *urb)
449{ /* urb completion handler - hardirq */
450 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
451 if (urb->status)
452 vub300->usb_transport_fail = urb->status;
453 complete(&vub300->irqpoll_complete);
454}
455
456static void irqpoll_out_completed(struct urb *urb)
457{ /* urb completion handler - hardirq */
458 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
459 if (urb->status) {
460 vub300->usb_transport_fail = urb->status;
461 complete(&vub300->irqpoll_complete);
462 return;
463 } else {
464 int ret;
465 unsigned int pipe =
466 usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep);
467 usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe,
468 &vub300->resp, sizeof(vub300->resp),
469 irqpoll_res_completed, vub300);
470 vub300->command_res_urb->actual_length = 0;
471 ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC);
472 if (ret) {
473 vub300->usb_transport_fail = ret;
474 complete(&vub300->irqpoll_complete);
475 }
476 return;
477 }
478}
479
480static void send_irqpoll(struct vub300_mmc_host *vub300)
481{
482 /* cmd_mutex is held by vub300_pollwork_thread */
483 int retval;
484 int timeout = 0xFFFF & (0x0001FFFF - firmware_irqpoll_timeout);
485 vub300->cmnd.poll.header_size = 22;
486 vub300->cmnd.poll.header_type = 1;
487 vub300->cmnd.poll.port_number = 0;
488 vub300->cmnd.poll.command_type = 2;
489 vub300->cmnd.poll.poll_timeout_lsb = 0xFF & (unsigned)timeout;
490 vub300->cmnd.poll.poll_timeout_msb = 0xFF & (unsigned)(timeout >> 8);
491 usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev,
492 usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep)
493 , &vub300->cmnd, sizeof(vub300->cmnd)
494 , irqpoll_out_completed, vub300);
495 retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL);
496 if (0 > retval) {
497 vub300->usb_transport_fail = retval;
498 vub300_queue_poll_work(vub300, 1);
499 complete(&vub300->irqpoll_complete);
500 return;
501 } else {
502 return;
503 }
504}
505
506static void new_system_port_status(struct vub300_mmc_host *vub300)
507{
508 int old_card_present = vub300->card_present;
509 int new_card_present =
510 (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0;
511 vub300->read_only =
512 (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
513 if (new_card_present && !old_card_present) {
514 dev_info(&vub300->udev->dev, "card just inserted\n");
515 vub300->card_present = 1;
516 vub300->bus_width = 0;
517 if (disable_offload_processing)
518 strncpy(vub300->vub_name, "EMPTY Processing Disabled",
519 sizeof(vub300->vub_name));
520 else
521 vub300->vub_name[0] = 0;
522 mmc_detect_change(vub300->mmc, 1);
523 } else if (!new_card_present && old_card_present) {
524 dev_info(&vub300->udev->dev, "card just ejected\n");
525 vub300->card_present = 0;
526 mmc_detect_change(vub300->mmc, 0);
527 } else {
528 /* no change */
529 }
530}
531
532static void __add_offloaded_reg_to_fifo(struct vub300_mmc_host *vub300,
533 struct offload_registers_access
534 *register_access, u8 func)
535{
536 u8 r = vub300->fn[func].offload_point + vub300->fn[func].offload_count;
537 memcpy(&vub300->fn[func].reg[MAXREGMASK & r], register_access,
538 sizeof(struct offload_registers_access));
539 vub300->fn[func].offload_count += 1;
540 vub300->total_offload_count += 1;
541}
542
543static void add_offloaded_reg(struct vub300_mmc_host *vub300,
544 struct offload_registers_access *register_access)
545{
546 u32 Register = ((0x03 & register_access->command_byte[0]) << 15)
547 | ((0xFF & register_access->command_byte[1]) << 7)
548 | ((0xFE & register_access->command_byte[2]) >> 1);
549 u8 func = ((0x70 & register_access->command_byte[0]) >> 4);
550 u8 regs = vub300->dynamic_register_count;
551 u8 i = 0;
552 while (0 < regs-- && 1 == vub300->sdio_register[i].activate) {
553 if (vub300->sdio_register[i].func_num == func &&
554 vub300->sdio_register[i].sdio_reg == Register) {
555 if (vub300->sdio_register[i].prepared == 0)
556 vub300->sdio_register[i].prepared = 1;
557 vub300->sdio_register[i].response =
558 register_access->Respond_Byte[2];
559 vub300->sdio_register[i].regvalue =
560 register_access->Respond_Byte[3];
561 return;
562 } else {
563 i += 1;
564 continue;
565 }
566 };
567 __add_offloaded_reg_to_fifo(vub300, register_access, func);
568}
569
570static void check_vub300_port_status(struct vub300_mmc_host *vub300)
571{
572 /*
573 * cmd_mutex is held by vub300_pollwork_thread,
574 * vub300_deadwork_thread or vub300_cmndwork_thread
575 */
576 int retval;
577 retval =
578 usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
579 GET_SYSTEM_PORT_STATUS,
580 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
581 0x0000, 0x0000, &vub300->system_port_status,
582 sizeof(vub300->system_port_status), HZ);
583 if (sizeof(vub300->system_port_status) == retval)
584 new_system_port_status(vub300);
585}
586
587static void __vub300_irqpoll_response(struct vub300_mmc_host *vub300)
588{
589 /* cmd_mutex is held by vub300_pollwork_thread */
590 if (vub300->command_res_urb->actual_length == 0)
591 return;
592
593 switch (vub300->resp.common.header_type) {
594 case RESPONSE_INTERRUPT:
595 mutex_lock(&vub300->irq_mutex);
596 if (vub300->irq_enabled)
597 mmc_signal_sdio_irq(vub300->mmc);
598 else
599 vub300->irqs_queued += 1;
600 vub300->irq_disabled = 1;
601 mutex_unlock(&vub300->irq_mutex);
602 break;
603 case RESPONSE_ERROR:
604 if (vub300->resp.error.error_code == SD_ERROR_NO_DEVICE)
605 check_vub300_port_status(vub300);
606 break;
607 case RESPONSE_STATUS:
608 vub300->system_port_status = vub300->resp.status;
609 new_system_port_status(vub300);
610 if (!vub300->card_present)
611 vub300_queue_poll_work(vub300, HZ / 5);
612 break;
613 case RESPONSE_IRQ_DISABLED:
614 {
615 int offloaded_data_length = vub300->resp.common.header_size - 3;
616 int register_count = offloaded_data_length >> 3;
617 int ri = 0;
618 while (register_count--) {
619 add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]);
620 ri += 1;
621 }
622 mutex_lock(&vub300->irq_mutex);
623 if (vub300->irq_enabled)
624 mmc_signal_sdio_irq(vub300->mmc);
625 else
626 vub300->irqs_queued += 1;
627 vub300->irq_disabled = 1;
628 mutex_unlock(&vub300->irq_mutex);
629 break;
630 }
631 case RESPONSE_IRQ_ENABLED:
632 {
633 int offloaded_data_length = vub300->resp.common.header_size - 3;
634 int register_count = offloaded_data_length >> 3;
635 int ri = 0;
636 while (register_count--) {
637 add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]);
638 ri += 1;
639 }
640 mutex_lock(&vub300->irq_mutex);
641 if (vub300->irq_enabled)
642 mmc_signal_sdio_irq(vub300->mmc);
643 else if (vub300->irqs_queued)
644 vub300->irqs_queued += 1;
645 else
646 vub300->irqs_queued += 1;
647 vub300->irq_disabled = 0;
648 mutex_unlock(&vub300->irq_mutex);
649 break;
650 }
651 case RESPONSE_NO_INTERRUPT:
652 vub300_queue_poll_work(vub300, 1);
653 break;
654 default:
655 break;
656 }
657}
658
659static void __do_poll(struct vub300_mmc_host *vub300)
660{
661 /* cmd_mutex is held by vub300_pollwork_thread */
662 long commretval;
663 mod_timer(&vub300->inactivity_timer, jiffies + HZ);
664 init_completion(&vub300->irqpoll_complete);
665 send_irqpoll(vub300);
666 commretval = wait_for_completion_timeout(&vub300->irqpoll_complete,
667 msecs_to_jiffies(500));
668 if (vub300->usb_transport_fail) {
669 /* no need to do anything */
670 } else if (commretval == 0) {
671 vub300->usb_timed_out = 1;
672 usb_kill_urb(vub300->command_out_urb);
673 usb_kill_urb(vub300->command_res_urb);
674 } else if (commretval < 0) {
675 vub300_queue_poll_work(vub300, 1);
676 } else { /* commretval > 0 */
677 __vub300_irqpoll_response(vub300);
678 }
679}
680
681/* this thread runs only when the driver
682 * is trying to poll the device for an IRQ
683 */
684static void vub300_pollwork_thread(struct work_struct *work)
685{ /* NOT irq */
686 struct vub300_mmc_host *vub300 = container_of(work,
687 struct vub300_mmc_host, pollwork.work);
688 if (!vub300->interface) {
689 kref_put(&vub300->kref, vub300_delete);
690 return;
691 }
692 mutex_lock(&vub300->cmd_mutex);
693 if (vub300->cmd) {
694 vub300_queue_poll_work(vub300, 1);
695 } else if (!vub300->card_present) {
696 /* no need to do anything */
697 } else { /* vub300->card_present */
698 mutex_lock(&vub300->irq_mutex);
699 if (!vub300->irq_enabled) {
700 mutex_unlock(&vub300->irq_mutex);
701 } else if (vub300->irqs_queued) {
702 vub300->irqs_queued -= 1;
703 mmc_signal_sdio_irq(vub300->mmc);
704 mod_timer(&vub300->inactivity_timer, jiffies + HZ);
705 mutex_unlock(&vub300->irq_mutex);
706 } else { /* NOT vub300->irqs_queued */
707 mutex_unlock(&vub300->irq_mutex);
708 __do_poll(vub300);
709 }
710 }
711 mutex_unlock(&vub300->cmd_mutex);
712 kref_put(&vub300->kref, vub300_delete);
713}
714
715static void vub300_deadwork_thread(struct work_struct *work)
716{ /* NOT irq */
717 struct vub300_mmc_host *vub300 =
718 container_of(work, struct vub300_mmc_host, deadwork);
719 if (!vub300->interface) {
720 kref_put(&vub300->kref, vub300_delete);
721 return;
722 }
723 mutex_lock(&vub300->cmd_mutex);
724 if (vub300->cmd) {
725 /*
726 * a command got in as the inactivity
727 * timer expired - so we just let the
728 * processing of the command show if
729 * the device is dead
730 */
731 } else if (vub300->card_present) {
732 check_vub300_port_status(vub300);
733 } else if (vub300->mmc && vub300->mmc->card &&
734 mmc_card_present(vub300->mmc->card)) {
735 /*
736 * the MMC core must not have responded
737 * to the previous indication - lets
738 * hope that it eventually does so we
739 * will just ignore this for now
740 */
741 } else {
742 check_vub300_port_status(vub300);
743 }
744 mod_timer(&vub300->inactivity_timer, jiffies + HZ);
745 mutex_unlock(&vub300->cmd_mutex);
746 kref_put(&vub300->kref, vub300_delete);
747}
748
749static void vub300_inactivity_timer_expired(unsigned long data)
750{ /* softirq */
751 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data;
752 if (!vub300->interface) {
753 kref_put(&vub300->kref, vub300_delete);
754 } else if (vub300->cmd) {
755 mod_timer(&vub300->inactivity_timer, jiffies + HZ);
756 } else {
757 vub300_queue_dead_work(vub300);
758 mod_timer(&vub300->inactivity_timer, jiffies + HZ);
759 }
760}
761
762static int vub300_response_error(u8 error_code)
763{
764 switch (error_code) {
765 case SD_ERROR_PIO_TIMEOUT:
766 case SD_ERROR_1BIT_TIMEOUT:
767 case SD_ERROR_4BIT_TIMEOUT:
768 return -ETIMEDOUT;
769 case SD_ERROR_STAT_DATA:
770 case SD_ERROR_OVERRUN:
771 case SD_ERROR_STAT_CMD:
772 case SD_ERROR_STAT_CMD_TIMEOUT:
773 case SD_ERROR_SDCRDY_STUCK:
774 case SD_ERROR_UNHANDLED:
775 case SD_ERROR_1BIT_CRC_WRONG:
776 case SD_ERROR_4BIT_CRC_WRONG:
777 case SD_ERROR_1BIT_CRC_ERROR:
778 case SD_ERROR_4BIT_CRC_ERROR:
779 case SD_ERROR_NO_CMD_ENDBIT:
780 case SD_ERROR_NO_1BIT_DATEND:
781 case SD_ERROR_NO_4BIT_DATEND:
782 case SD_ERROR_1BIT_DATA_TIMEOUT:
783 case SD_ERROR_4BIT_DATA_TIMEOUT:
784 case SD_ERROR_1BIT_UNEXPECTED_TIMEOUT:
785 case SD_ERROR_4BIT_UNEXPECTED_TIMEOUT:
786 return -EILSEQ;
787 case 33:
788 return -EILSEQ;
789 case SD_ERROR_ILLEGAL_COMMAND:
790 return -EINVAL;
791 case SD_ERROR_NO_DEVICE:
792 return -ENOMEDIUM;
793 default:
794 return -ENODEV;
795 }
796}
797
798static void command_res_completed(struct urb *urb)
799{ /* urb completion handler - hardirq */
800 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
801 if (urb->status) {
802 /* we have to let the initiator handle the error */
803 } else if (vub300->command_res_urb->actual_length == 0) {
804 /*
805 * we have seen this happen once or twice and
806 * we suspect a buggy USB host controller
807 */
808 } else if (!vub300->data) {
809 /* this means that the command (typically CMD52) suceeded */
810 } else if (vub300->resp.common.header_type != 0x02) {
811 /*
812 * this is an error response from the VUB300 chip
813 * and we let the initiator handle it
814 */
815 } else if (vub300->urb) {
816 vub300->cmd->error =
817 vub300_response_error(vub300->resp.error.error_code);
818 usb_unlink_urb(vub300->urb);
819 } else {
820 vub300->cmd->error =
821 vub300_response_error(vub300->resp.error.error_code);
822 usb_sg_cancel(&vub300->sg_request);
823 }
824 complete(&vub300->command_complete); /* got_response_in */
825}
826
827static void command_out_completed(struct urb *urb)
828{ /* urb completion handler - hardirq */
829 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
830 if (urb->status) {
831 complete(&vub300->command_complete);
832 } else {
833 int ret;
834 unsigned int pipe =
835 usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep);
836 usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe,
837 &vub300->resp, sizeof(vub300->resp),
838 command_res_completed, vub300);
839 vub300->command_res_urb->actual_length = 0;
840 ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC);
841 if (ret == 0) {
842 /*
843 * the urb completion handler will call
844 * our completion handler
845 */
846 } else {
847 /*
848 * and thus we only call it directly
849 * when it will not be called
850 */
851 complete(&vub300->command_complete);
852 }
853 }
854}
855
856/*
857 * the STUFF bits are masked out for the comparisons
858 */
859static void snoop_block_size_and_bus_width(struct vub300_mmc_host *vub300,
860 u32 cmd_arg)
861{
862 if ((0xFBFFFE00 & cmd_arg) == 0x80022200)
863 vub300->fbs[1] = (cmd_arg << 8) | (0x00FF & vub300->fbs[1]);
864 else if ((0xFBFFFE00 & cmd_arg) == 0x80022000)
865 vub300->fbs[1] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[1]);
866 else if ((0xFBFFFE00 & cmd_arg) == 0x80042200)
867 vub300->fbs[2] = (cmd_arg << 8) | (0x00FF & vub300->fbs[2]);
868 else if ((0xFBFFFE00 & cmd_arg) == 0x80042000)
869 vub300->fbs[2] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[2]);
870 else if ((0xFBFFFE00 & cmd_arg) == 0x80062200)
871 vub300->fbs[3] = (cmd_arg << 8) | (0x00FF & vub300->fbs[3]);
872 else if ((0xFBFFFE00 & cmd_arg) == 0x80062000)
873 vub300->fbs[3] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[3]);
874 else if ((0xFBFFFE00 & cmd_arg) == 0x80082200)
875 vub300->fbs[4] = (cmd_arg << 8) | (0x00FF & vub300->fbs[4]);
876 else if ((0xFBFFFE00 & cmd_arg) == 0x80082000)
877 vub300->fbs[4] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[4]);
878 else if ((0xFBFFFE00 & cmd_arg) == 0x800A2200)
879 vub300->fbs[5] = (cmd_arg << 8) | (0x00FF & vub300->fbs[5]);
880 else if ((0xFBFFFE00 & cmd_arg) == 0x800A2000)
881 vub300->fbs[5] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[5]);
882 else if ((0xFBFFFE00 & cmd_arg) == 0x800C2200)
883 vub300->fbs[6] = (cmd_arg << 8) | (0x00FF & vub300->fbs[6]);
884 else if ((0xFBFFFE00 & cmd_arg) == 0x800C2000)
885 vub300->fbs[6] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[6]);
886 else if ((0xFBFFFE00 & cmd_arg) == 0x800E2200)
887 vub300->fbs[7] = (cmd_arg << 8) | (0x00FF & vub300->fbs[7]);
888 else if ((0xFBFFFE00 & cmd_arg) == 0x800E2000)
889 vub300->fbs[7] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[7]);
890 else if ((0xFBFFFE03 & cmd_arg) == 0x80000E00)
891 vub300->bus_width = 1;
892 else if ((0xFBFFFE03 & cmd_arg) == 0x80000E02)
893 vub300->bus_width = 4;
894}
895
896static void send_command(struct vub300_mmc_host *vub300)
897{
898 /* cmd_mutex is held by vub300_cmndwork_thread */
899 struct mmc_command *cmd = vub300->cmd;
900 struct mmc_data *data = vub300->data;
901 int retval;
902 int i;
903 u8 response_type;
904 if (vub300->app_spec) {
905 switch (cmd->opcode) {
906 case 6:
907 response_type = SDRT_1;
908 vub300->resp_len = 6;
909 if (0x00000000 == (0x00000003 & cmd->arg))
910 vub300->bus_width = 1;
911 else if (0x00000002 == (0x00000003 & cmd->arg))
912 vub300->bus_width = 4;
913 else
914 dev_err(&vub300->udev->dev,
915 "unexpected ACMD6 bus_width=%d\n",
916 0x00000003 & cmd->arg);
917 break;
918 case 13:
919 response_type = SDRT_1;
920 vub300->resp_len = 6;
921 break;
922 case 22:
923 response_type = SDRT_1;
924 vub300->resp_len = 6;
925 break;
926 case 23:
927 response_type = SDRT_1;
928 vub300->resp_len = 6;
929 break;
930 case 41:
931 response_type = SDRT_3;
932 vub300->resp_len = 6;
933 break;
934 case 42:
935 response_type = SDRT_1;
936 vub300->resp_len = 6;
937 break;
938 case 51:
939 response_type = SDRT_1;
940 vub300->resp_len = 6;
941 break;
942 case 55:
943 response_type = SDRT_1;
944 vub300->resp_len = 6;
945 break;
946 default:
947 vub300->resp_len = 0;
948 cmd->error = -EINVAL;
949 complete(&vub300->command_complete);
950 return;
951 }
952 vub300->app_spec = 0;
953 } else {
954 switch (cmd->opcode) {
955 case 0:
956 response_type = SDRT_NONE;
957 vub300->resp_len = 0;
958 break;
959 case 1:
960 response_type = SDRT_3;
961 vub300->resp_len = 6;
962 break;
963 case 2:
964 response_type = SDRT_2;
965 vub300->resp_len = 17;
966 break;
967 case 3:
968 response_type = SDRT_6;
969 vub300->resp_len = 6;
970 break;
971 case 4:
972 response_type = SDRT_NONE;
973 vub300->resp_len = 0;
974 break;
975 case 5:
976 response_type = SDRT_4;
977 vub300->resp_len = 6;
978 break;
979 case 6:
980 response_type = SDRT_1;
981 vub300->resp_len = 6;
982 break;
983 case 7:
984 response_type = SDRT_1B;
985 vub300->resp_len = 6;
986 break;
987 case 8:
988 response_type = SDRT_7;
989 vub300->resp_len = 6;
990 break;
991 case 9:
992 response_type = SDRT_2;
993 vub300->resp_len = 17;
994 break;
995 case 10:
996 response_type = SDRT_2;
997 vub300->resp_len = 17;
998 break;
999 case 12:
1000 response_type = SDRT_1B;
1001 vub300->resp_len = 6;
1002 break;
1003 case 13:
1004 response_type = SDRT_1;
1005 vub300->resp_len = 6;
1006 break;
1007 case 15:
1008 response_type = SDRT_NONE;
1009 vub300->resp_len = 0;
1010 break;
1011 case 16:
1012 for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++)
1013 vub300->fbs[i] = 0xFFFF & cmd->arg;
1014 response_type = SDRT_1;
1015 vub300->resp_len = 6;
1016 break;
1017 case 17:
1018 case 18:
1019 case 24:
1020 case 25:
1021 case 27:
1022 response_type = SDRT_1;
1023 vub300->resp_len = 6;
1024 break;
1025 case 28:
1026 case 29:
1027 response_type = SDRT_1B;
1028 vub300->resp_len = 6;
1029 break;
1030 case 30:
1031 case 32:
1032 case 33:
1033 response_type = SDRT_1;
1034 vub300->resp_len = 6;
1035 break;
1036 case 38:
1037 response_type = SDRT_1B;
1038 vub300->resp_len = 6;
1039 break;
1040 case 42:
1041 response_type = SDRT_1;
1042 vub300->resp_len = 6;
1043 break;
1044 case 52:
1045 response_type = SDRT_5;
1046 vub300->resp_len = 6;
1047 snoop_block_size_and_bus_width(vub300, cmd->arg);
1048 break;
1049 case 53:
1050 response_type = SDRT_5;
1051 vub300->resp_len = 6;
1052 break;
1053 case 55:
1054 response_type = SDRT_1;
1055 vub300->resp_len = 6;
1056 vub300->app_spec = 1;
1057 break;
1058 case 56:
1059 response_type = SDRT_1;
1060 vub300->resp_len = 6;
1061 break;
1062 default:
1063 vub300->resp_len = 0;
1064 cmd->error = -EINVAL;
1065 complete(&vub300->command_complete);
1066 return;
1067 }
1068 }
1069 /*
1070 * it is a shame that we can not use "sizeof(struct sd_command_header)"
1071 * this is because the packet _must_ be padded to 64 bytes
1072 */
1073 vub300->cmnd.head.header_size = 20;
1074 vub300->cmnd.head.header_type = 0x00;
1075 vub300->cmnd.head.port_number = 0; /* "0" means port 1 */
1076 vub300->cmnd.head.command_type = 0x00; /* standard read command */
1077 vub300->cmnd.head.response_type = response_type;
1078 vub300->cmnd.head.command_index = cmd->opcode;
1079 vub300->cmnd.head.arguments[0] = cmd->arg >> 24;
1080 vub300->cmnd.head.arguments[1] = cmd->arg >> 16;
1081 vub300->cmnd.head.arguments[2] = cmd->arg >> 8;
1082 vub300->cmnd.head.arguments[3] = cmd->arg >> 0;
1083 if (cmd->opcode == 52) {
1084 int fn = 0x7 & (cmd->arg >> 28);
1085 vub300->cmnd.head.block_count[0] = 0;
1086 vub300->cmnd.head.block_count[1] = 0;
1087 vub300->cmnd.head.block_size[0] = (vub300->fbs[fn] >> 8) & 0xFF;
1088 vub300->cmnd.head.block_size[1] = (vub300->fbs[fn] >> 0) & 0xFF;
1089 vub300->cmnd.head.command_type = 0x00;
1090 vub300->cmnd.head.transfer_size[0] = 0;
1091 vub300->cmnd.head.transfer_size[1] = 0;
1092 vub300->cmnd.head.transfer_size[2] = 0;
1093 vub300->cmnd.head.transfer_size[3] = 0;
1094 } else if (!data) {
1095 vub300->cmnd.head.block_count[0] = 0;
1096 vub300->cmnd.head.block_count[1] = 0;
1097 vub300->cmnd.head.block_size[0] = (vub300->fbs[0] >> 8) & 0xFF;
1098 vub300->cmnd.head.block_size[1] = (vub300->fbs[0] >> 0) & 0xFF;
1099 vub300->cmnd.head.command_type = 0x00;
1100 vub300->cmnd.head.transfer_size[0] = 0;
1101 vub300->cmnd.head.transfer_size[1] = 0;
1102 vub300->cmnd.head.transfer_size[2] = 0;
1103 vub300->cmnd.head.transfer_size[3] = 0;
1104 } else if (cmd->opcode == 53) {
1105 int fn = 0x7 & (cmd->arg >> 28);
1106 if (0x08 & vub300->cmnd.head.arguments[0]) { /* BLOCK MODE */
1107 vub300->cmnd.head.block_count[0] =
1108 (data->blocks >> 8) & 0xFF;
1109 vub300->cmnd.head.block_count[1] =
1110 (data->blocks >> 0) & 0xFF;
1111 vub300->cmnd.head.block_size[0] =
1112 (data->blksz >> 8) & 0xFF;
1113 vub300->cmnd.head.block_size[1] =
1114 (data->blksz >> 0) & 0xFF;
1115 } else { /* BYTE MODE */
1116 vub300->cmnd.head.block_count[0] = 0;
1117 vub300->cmnd.head.block_count[1] = 0;
1118 vub300->cmnd.head.block_size[0] =
1119 (vub300->datasize >> 8) & 0xFF;
1120 vub300->cmnd.head.block_size[1] =
1121 (vub300->datasize >> 0) & 0xFF;
1122 }
1123 vub300->cmnd.head.command_type =
1124 (MMC_DATA_READ & data->flags) ? 0x00 : 0x80;
1125 vub300->cmnd.head.transfer_size[0] =
1126 (vub300->datasize >> 24) & 0xFF;
1127 vub300->cmnd.head.transfer_size[1] =
1128 (vub300->datasize >> 16) & 0xFF;
1129 vub300->cmnd.head.transfer_size[2] =
1130 (vub300->datasize >> 8) & 0xFF;
1131 vub300->cmnd.head.transfer_size[3] =
1132 (vub300->datasize >> 0) & 0xFF;
1133 if (vub300->datasize < vub300->fbs[fn]) {
1134 vub300->cmnd.head.block_count[0] = 0;
1135 vub300->cmnd.head.block_count[1] = 0;
1136 }
1137 } else {
1138 vub300->cmnd.head.block_count[0] = (data->blocks >> 8) & 0xFF;
1139 vub300->cmnd.head.block_count[1] = (data->blocks >> 0) & 0xFF;
1140 vub300->cmnd.head.block_size[0] = (data->blksz >> 8) & 0xFF;
1141 vub300->cmnd.head.block_size[1] = (data->blksz >> 0) & 0xFF;
1142 vub300->cmnd.head.command_type =
1143 (MMC_DATA_READ & data->flags) ? 0x00 : 0x80;
1144 vub300->cmnd.head.transfer_size[0] =
1145 (vub300->datasize >> 24) & 0xFF;
1146 vub300->cmnd.head.transfer_size[1] =
1147 (vub300->datasize >> 16) & 0xFF;
1148 vub300->cmnd.head.transfer_size[2] =
1149 (vub300->datasize >> 8) & 0xFF;
1150 vub300->cmnd.head.transfer_size[3] =
1151 (vub300->datasize >> 0) & 0xFF;
1152 if (vub300->datasize < vub300->fbs[0]) {
1153 vub300->cmnd.head.block_count[0] = 0;
1154 vub300->cmnd.head.block_count[1] = 0;
1155 }
1156 }
1157 if (vub300->cmnd.head.block_size[0] || vub300->cmnd.head.block_size[1]) {
1158 u16 block_size = vub300->cmnd.head.block_size[1] |
1159 (vub300->cmnd.head.block_size[0] << 8);
1160 u16 block_boundary = FIRMWARE_BLOCK_BOUNDARY -
1161 (FIRMWARE_BLOCK_BOUNDARY % block_size);
1162 vub300->cmnd.head.block_boundary[0] =
1163 (block_boundary >> 8) & 0xFF;
1164 vub300->cmnd.head.block_boundary[1] =
1165 (block_boundary >> 0) & 0xFF;
1166 } else {
1167 vub300->cmnd.head.block_boundary[0] = 0;
1168 vub300->cmnd.head.block_boundary[1] = 0;
1169 }
1170 usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev,
1171 usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep),
1172 &vub300->cmnd, sizeof(vub300->cmnd),
1173 command_out_completed, vub300);
1174 retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL);
1175 if (retval < 0) {
1176 cmd->error = retval;
1177 complete(&vub300->command_complete);
1178 return;
1179 } else {
1180 return;
1181 }
1182}
1183
1184/*
1185 * timer callback runs in atomic mode
1186 * so it cannot call usb_kill_urb()
1187 */
1188static void vub300_sg_timed_out(unsigned long data)
1189{
1190 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data;
1191 vub300->usb_timed_out = 1;
1192 usb_sg_cancel(&vub300->sg_request);
1193 usb_unlink_urb(vub300->command_out_urb);
1194 usb_unlink_urb(vub300->command_res_urb);
1195}
1196
1197static u16 roundup_to_multiple_of_64(u16 number)
1198{
1199 return 0xFFC0 & (0x3F + number);
1200}
1201
1202/*
1203 * this is a separate function to solve the 80 column width restriction
1204 */
1205static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
1206 const struct firmware *fw)
1207{
1208 u8 register_count = 0;
1209 u16 ts = 0;
1210 u16 interrupt_size = 0;
1211 const u8 *data = fw->data;
1212 int size = fw->size;
1213 u8 c;
1214 dev_info(&vub300->udev->dev, "using %s for SDIO offload processing\n",
1215 vub300->vub_name);
1216 do {
1217 c = *data++;
1218 } while (size-- && c); /* skip comment */
1219 dev_info(&vub300->udev->dev, "using offload firmware %s %s\n", fw->data,
1220 vub300->vub_name);
1221 if (size < 4) {
1222 dev_err(&vub300->udev->dev,
1223 "corrupt offload pseudocode in firmware %s\n",
1224 vub300->vub_name);
1225 strncpy(vub300->vub_name, "corrupt offload pseudocode",
1226 sizeof(vub300->vub_name));
1227 return;
1228 }
1229 interrupt_size += *data++;
1230 size -= 1;
1231 interrupt_size <<= 8;
1232 interrupt_size += *data++;
1233 size -= 1;
1234 if (interrupt_size < size) {
1235 u16 xfer_length = roundup_to_multiple_of_64(interrupt_size);
1236 u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL);
1237 if (xfer_buffer) {
1238 int retval;
1239 memcpy(xfer_buffer, data, interrupt_size);
1240 memset(xfer_buffer + interrupt_size, 0,
1241 xfer_length - interrupt_size);
1242 size -= interrupt_size;
1243 data += interrupt_size;
1244 retval =
1245 usb_control_msg(vub300->udev,
1246 usb_sndctrlpipe(vub300->udev, 0),
1247 SET_INTERRUPT_PSEUDOCODE,
1248 USB_DIR_OUT | USB_TYPE_VENDOR |
1249 USB_RECIP_DEVICE, 0x0000, 0x0000,
1250 xfer_buffer, xfer_length, HZ);
1251 kfree(xfer_buffer);
1252 if (retval < 0) {
1253 strncpy(vub300->vub_name,
1254 "SDIO pseudocode download failed",
1255 sizeof(vub300->vub_name));
1256 return;
1257 }
1258 } else {
1259 dev_err(&vub300->udev->dev,
1260 "not enough memory for xfer buffer to send"
1261 " INTERRUPT_PSEUDOCODE for %s %s\n", fw->data,
1262 vub300->vub_name);
1263 strncpy(vub300->vub_name,
1264 "SDIO interrupt pseudocode download failed",
1265 sizeof(vub300->vub_name));
1266 return;
1267 }
1268 } else {
1269 dev_err(&vub300->udev->dev,
1270 "corrupt interrupt pseudocode in firmware %s %s\n",
1271 fw->data, vub300->vub_name);
1272 strncpy(vub300->vub_name, "corrupt interrupt pseudocode",
1273 sizeof(vub300->vub_name));
1274 return;
1275 }
1276 ts += *data++;
1277 size -= 1;
1278 ts <<= 8;
1279 ts += *data++;
1280 size -= 1;
1281 if (ts < size) {
1282 u16 xfer_length = roundup_to_multiple_of_64(ts);
1283 u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL);
1284 if (xfer_buffer) {
1285 int retval;
1286 memcpy(xfer_buffer, data, ts);
1287 memset(xfer_buffer + ts, 0,
1288 xfer_length - ts);
1289 size -= ts;
1290 data += ts;
1291 retval =
1292 usb_control_msg(vub300->udev,
1293 usb_sndctrlpipe(vub300->udev, 0),
1294 SET_TRANSFER_PSEUDOCODE,
1295 USB_DIR_OUT | USB_TYPE_VENDOR |
1296 USB_RECIP_DEVICE, 0x0000, 0x0000,
1297 xfer_buffer, xfer_length, HZ);
1298 kfree(xfer_buffer);
1299 if (retval < 0) {
1300 strncpy(vub300->vub_name,
1301 "SDIO pseudocode download failed",
1302 sizeof(vub300->vub_name));
1303 return;
1304 }
1305 } else {
1306 dev_err(&vub300->udev->dev,
1307 "not enough memory for xfer buffer to send"
1308 " TRANSFER_PSEUDOCODE for %s %s\n", fw->data,
1309 vub300->vub_name);
1310 strncpy(vub300->vub_name,
1311 "SDIO transfer pseudocode download failed",
1312 sizeof(vub300->vub_name));
1313 return;
1314 }
1315 } else {
1316 dev_err(&vub300->udev->dev,
1317 "corrupt transfer pseudocode in firmware %s %s\n",
1318 fw->data, vub300->vub_name);
1319 strncpy(vub300->vub_name, "corrupt transfer pseudocode",
1320 sizeof(vub300->vub_name));
1321 return;
1322 }
1323 register_count += *data++;
1324 size -= 1;
1325 if (register_count * 4 == size) {
1326 int I = vub300->dynamic_register_count = register_count;
1327 int i = 0;
1328 while (I--) {
1329 unsigned int func_num = 0;
1330 vub300->sdio_register[i].func_num = *data++;
1331 size -= 1;
1332 func_num += *data++;
1333 size -= 1;
1334 func_num <<= 8;
1335 func_num += *data++;
1336 size -= 1;
1337 func_num <<= 8;
1338 func_num += *data++;
1339 size -= 1;
1340 vub300->sdio_register[i].sdio_reg = func_num;
1341 vub300->sdio_register[i].activate = 1;
1342 vub300->sdio_register[i].prepared = 0;
1343 i += 1;
1344 }
1345 dev_info(&vub300->udev->dev,
1346 "initialized %d dynamic pseudocode registers\n",
1347 vub300->dynamic_register_count);
1348 return;
1349 } else {
1350 dev_err(&vub300->udev->dev,
1351 "corrupt dynamic registers in firmware %s\n",
1352 vub300->vub_name);
1353 strncpy(vub300->vub_name, "corrupt dynamic registers",
1354 sizeof(vub300->vub_name));
1355 return;
1356 }
1357}
1358
1359/*
1360 * if the binary containing the EMPTY PseudoCode can not be found
1361 * vub300->vub_name is set anyway in order to prevent an automatic retry
1362 */
1363static void download_offload_pseudocode(struct vub300_mmc_host *vub300)
1364{
1365 struct mmc_card *card = vub300->mmc->card;
1366 int sdio_funcs = card->sdio_funcs;
1367 const struct firmware *fw = NULL;
1368 int l = snprintf(vub300->vub_name, sizeof(vub300->vub_name),
1369 "vub_%04X%04X", card->cis.vendor, card->cis.device);
1370 int n = 0;
1371 int retval;
1372 for (n = 0; n < sdio_funcs; n++) {
1373 struct sdio_func *sf = card->sdio_func[n];
1374 l += snprintf(vub300->vub_name + l,
1375 sizeof(vub300->vub_name) - l, "_%04X%04X",
1376 sf->vendor, sf->device);
1377 };
1378 snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, ".bin");
1379 dev_info(&vub300->udev->dev, "requesting offload firmware %s\n",
1380 vub300->vub_name);
1381 retval = request_firmware(&fw, vub300->vub_name, &card->dev);
1382 if (retval < 0) {
1383 strncpy(vub300->vub_name, "vub_default.bin",
1384 sizeof(vub300->vub_name));
1385 retval = request_firmware(&fw, vub300->vub_name, &card->dev);
1386 if (retval < 0) {
1387 strncpy(vub300->vub_name,
1388 "no SDIO offload firmware found",
1389 sizeof(vub300->vub_name));
1390 } else {
1391 __download_offload_pseudocode(vub300, fw);
1392 release_firmware(fw);
1393 }
1394 } else {
1395 __download_offload_pseudocode(vub300, fw);
1396 release_firmware(fw);
1397 }
1398}
1399
1400static void vub300_usb_bulk_msg_completion(struct urb *urb)
1401{ /* urb completion handler - hardirq */
1402 complete((struct completion *)urb->context);
1403}
1404
1405static int vub300_usb_bulk_msg(struct vub300_mmc_host *vub300,
1406 unsigned int pipe, void *data, int len,
1407 int *actual_length, int timeout_msecs)
1408{
1409 /* cmd_mutex is held by vub300_cmndwork_thread */
1410 struct usb_device *usb_dev = vub300->udev;
1411 struct completion done;
1412 int retval;
1413 vub300->urb = usb_alloc_urb(0, GFP_KERNEL);
1414 if (!vub300->urb)
1415 return -ENOMEM;
1416 usb_fill_bulk_urb(vub300->urb, usb_dev, pipe, data, len,
1417 vub300_usb_bulk_msg_completion, NULL);
1418 init_completion(&done);
1419 vub300->urb->context = &done;
1420 vub300->urb->actual_length = 0;
1421 retval = usb_submit_urb(vub300->urb, GFP_KERNEL);
1422 if (unlikely(retval))
1423 goto out;
1424 if (!wait_for_completion_timeout
1425 (&done, msecs_to_jiffies(timeout_msecs))) {
1426 retval = -ETIMEDOUT;
1427 usb_kill_urb(vub300->urb);
1428 } else {
1429 retval = vub300->urb->status;
1430 }
1431out:
1432 *actual_length = vub300->urb->actual_length;
1433 usb_free_urb(vub300->urb);
1434 vub300->urb = NULL;
1435 return retval;
1436}
1437
1438static int __command_read_data(struct vub300_mmc_host *vub300,
1439 struct mmc_command *cmd, struct mmc_data *data)
1440{
1441 /* cmd_mutex is held by vub300_cmndwork_thread */
1442 int linear_length = vub300->datasize;
1443 int padded_length = vub300->large_usb_packets ?
1444 ((511 + linear_length) >> 9) << 9 :
1445 ((63 + linear_length) >> 6) << 6;
1446 if ((padded_length == linear_length) || !pad_input_to_usb_pkt) {
1447 int result;
1448 unsigned pipe;
1449 pipe = usb_rcvbulkpipe(vub300->udev, vub300->data_inp_ep);
1450 result = usb_sg_init(&vub300->sg_request, vub300->udev,
1451 pipe, 0, data->sg,
1452 data->sg_len, 0, GFP_KERNEL);
1453 if (result < 0) {
1454 usb_unlink_urb(vub300->command_out_urb);
1455 usb_unlink_urb(vub300->command_res_urb);
1456 cmd->error = result;
1457 data->bytes_xfered = 0;
1458 return 0;
1459 } else {
1460 vub300->sg_transfer_timer.expires =
1461 jiffies + msecs_to_jiffies(2000 +
1462 (linear_length / 16384));
1463 add_timer(&vub300->sg_transfer_timer);
1464 usb_sg_wait(&vub300->sg_request);
1465 del_timer(&vub300->sg_transfer_timer);
1466 if (vub300->sg_request.status < 0) {
1467 cmd->error = vub300->sg_request.status;
1468 data->bytes_xfered = 0;
1469 return 0;
1470 } else {
1471 data->bytes_xfered = vub300->datasize;
1472 return linear_length;
1473 }
1474 }
1475 } else {
1476 u8 *buf = kmalloc(padded_length, GFP_KERNEL);
1477 if (buf) {
1478 int result;
1479 unsigned pipe = usb_rcvbulkpipe(vub300->udev,
1480 vub300->data_inp_ep);
1481 int actual_length = 0;
1482 result = vub300_usb_bulk_msg(vub300, pipe, buf,
1483 padded_length, &actual_length,
1484 2000 + (padded_length / 16384));
1485 if (result < 0) {
1486 cmd->error = result;
1487 data->bytes_xfered = 0;
1488 kfree(buf);
1489 return 0;
1490 } else if (actual_length < linear_length) {
1491 cmd->error = -EREMOTEIO;
1492 data->bytes_xfered = 0;
1493 kfree(buf);
1494 return 0;
1495 } else {
1496 sg_copy_from_buffer(data->sg, data->sg_len, buf,
1497 linear_length);
1498 kfree(buf);
1499 data->bytes_xfered = vub300->datasize;
1500 return linear_length;
1501 }
1502 } else {
1503 cmd->error = -ENOMEM;
1504 data->bytes_xfered = 0;
1505 return 0;
1506 }
1507 }
1508}
1509
1510static int __command_write_data(struct vub300_mmc_host *vub300,
1511 struct mmc_command *cmd, struct mmc_data *data)
1512{
1513 /* cmd_mutex is held by vub300_cmndwork_thread */
1514 unsigned pipe = usb_sndbulkpipe(vub300->udev, vub300->data_out_ep);
1515 int linear_length = vub300->datasize;
1516 int modulo_64_length = linear_length & 0x003F;
1517 int modulo_512_length = linear_length & 0x01FF;
1518 if (linear_length < 64) {
1519 int result;
1520 int actual_length;
1521 sg_copy_to_buffer(data->sg, data->sg_len,
1522 vub300->padded_buffer,
1523 sizeof(vub300->padded_buffer));
1524 memset(vub300->padded_buffer + linear_length, 0,
1525 sizeof(vub300->padded_buffer) - linear_length);
1526 result = vub300_usb_bulk_msg(vub300, pipe, vub300->padded_buffer,
1527 sizeof(vub300->padded_buffer),
1528 &actual_length, 2000 +
1529 (sizeof(vub300->padded_buffer) /
1530 16384));
1531 if (result < 0) {
1532 cmd->error = result;
1533 data->bytes_xfered = 0;
1534 } else {
1535 data->bytes_xfered = vub300->datasize;
1536 }
1537 } else if ((!vub300->large_usb_packets && (0 < modulo_64_length)) ||
1538 (vub300->large_usb_packets && (64 > modulo_512_length))
1539 ) { /* don't you just love these work-rounds */
1540 int padded_length = ((63 + linear_length) >> 6) << 6;
1541 u8 *buf = kmalloc(padded_length, GFP_KERNEL);
1542 if (buf) {
1543 int result;
1544 int actual_length;
1545 sg_copy_to_buffer(data->sg, data->sg_len, buf,
1546 padded_length);
1547 memset(buf + linear_length, 0,
1548 padded_length - linear_length);
1549 result =
1550 vub300_usb_bulk_msg(vub300, pipe, buf,
1551 padded_length, &actual_length,
1552 2000 + padded_length / 16384);
1553 kfree(buf);
1554 if (result < 0) {
1555 cmd->error = result;
1556 data->bytes_xfered = 0;
1557 } else {
1558 data->bytes_xfered = vub300->datasize;
1559 }
1560 } else {
1561 cmd->error = -ENOMEM;
1562 data->bytes_xfered = 0;
1563 }
1564 } else { /* no data padding required */
1565 int result;
1566 unsigned char buf[64 * 4];
1567 sg_copy_to_buffer(data->sg, data->sg_len, buf, sizeof(buf));
1568 result = usb_sg_init(&vub300->sg_request, vub300->udev,
1569 pipe, 0, data->sg,
1570 data->sg_len, 0, GFP_KERNEL);
1571 if (result < 0) {
1572 usb_unlink_urb(vub300->command_out_urb);
1573 usb_unlink_urb(vub300->command_res_urb);
1574 cmd->error = result;
1575 data->bytes_xfered = 0;
1576 } else {
1577 vub300->sg_transfer_timer.expires =
1578 jiffies + msecs_to_jiffies(2000 +
1579 linear_length / 16384);
1580 add_timer(&vub300->sg_transfer_timer);
1581 usb_sg_wait(&vub300->sg_request);
1582 if (cmd->error) {
1583 data->bytes_xfered = 0;
1584 } else {
1585 del_timer(&vub300->sg_transfer_timer);
1586 if (vub300->sg_request.status < 0) {
1587 cmd->error = vub300->sg_request.status;
1588 data->bytes_xfered = 0;
1589 } else {
1590 data->bytes_xfered = vub300->datasize;
1591 }
1592 }
1593 }
1594 }
1595 return linear_length;
1596}
1597
1598static void __vub300_command_response(struct vub300_mmc_host *vub300,
1599 struct mmc_command *cmd,
1600 struct mmc_data *data, int data_length)
1601{
1602 /* cmd_mutex is held by vub300_cmndwork_thread */
1603 long respretval;
1604 int msec_timeout = 1000 + data_length / 4;
1605 respretval =
1606 wait_for_completion_timeout(&vub300->command_complete,
1607 msecs_to_jiffies(msec_timeout));
1608 if (respretval == 0) { /* TIMED OUT */
1609 /* we don't know which of "out" and "res" if any failed */
1610 int result;
1611 vub300->usb_timed_out = 1;
1612 usb_kill_urb(vub300->command_out_urb);
1613 usb_kill_urb(vub300->command_res_urb);
1614 cmd->error = -ETIMEDOUT;
1615 result = usb_lock_device_for_reset(vub300->udev,
1616 vub300->interface);
1617 if (result == 0) {
1618 result = usb_reset_device(vub300->udev);
1619 usb_unlock_device(vub300->udev);
1620 }
1621 } else if (respretval < 0) {
1622 /* we don't know which of "out" and "res" if any failed */
1623 usb_kill_urb(vub300->command_out_urb);
1624 usb_kill_urb(vub300->command_res_urb);
1625 cmd->error = respretval;
1626 } else if (cmd->error) {
1627 /*
1628 * the error occured sending the command
1629 * or recieving the response
1630 */
1631 } else if (vub300->command_out_urb->status) {
1632 vub300->usb_transport_fail = vub300->command_out_urb->status;
1633 cmd->error = -EPROTO == vub300->command_out_urb->status ?
1634 -ESHUTDOWN : vub300->command_out_urb->status;
1635 } else if (vub300->command_res_urb->status) {
1636 vub300->usb_transport_fail = vub300->command_res_urb->status;
1637 cmd->error = -EPROTO == vub300->command_res_urb->status ?
1638 -ESHUTDOWN : vub300->command_res_urb->status;
1639 } else if (vub300->resp.common.header_type == 0x00) {
1640 /*
1641 * the command completed successfully
1642 * and there was no piggybacked data
1643 */
1644 } else if (vub300->resp.common.header_type == RESPONSE_ERROR) {
1645 cmd->error =
1646 vub300_response_error(vub300->resp.error.error_code);
1647 if (vub300->data)
1648 usb_sg_cancel(&vub300->sg_request);
1649 } else if (vub300->resp.common.header_type == RESPONSE_PIGGYBACKED) {
1650 int offloaded_data_length =
1651 vub300->resp.common.header_size -
1652 sizeof(struct sd_register_header);
1653 int register_count = offloaded_data_length >> 3;
1654 int ri = 0;
1655 while (register_count--) {
1656 add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]);
1657 ri += 1;
1658 }
1659 vub300->resp.common.header_size =
1660 sizeof(struct sd_register_header);
1661 vub300->resp.common.header_type = 0x00;
1662 cmd->error = 0;
1663 } else if (vub300->resp.common.header_type == RESPONSE_PIG_DISABLED) {
1664 int offloaded_data_length =
1665 vub300->resp.common.header_size -
1666 sizeof(struct sd_register_header);
1667 int register_count = offloaded_data_length >> 3;
1668 int ri = 0;
1669 while (register_count--) {
1670 add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]);
1671 ri += 1;
1672 }
1673 mutex_lock(&vub300->irq_mutex);
1674 if (vub300->irqs_queued) {
1675 vub300->irqs_queued += 1;
1676 } else if (vub300->irq_enabled) {
1677 vub300->irqs_queued += 1;
1678 vub300_queue_poll_work(vub300, 0);
1679 } else {
1680 vub300->irqs_queued += 1;
1681 }
1682 vub300->irq_disabled = 1;
1683 mutex_unlock(&vub300->irq_mutex);
1684 vub300->resp.common.header_size =
1685 sizeof(struct sd_register_header);
1686 vub300->resp.common.header_type = 0x00;
1687 cmd->error = 0;
1688 } else if (vub300->resp.common.header_type == RESPONSE_PIG_ENABLED) {
1689 int offloaded_data_length =
1690 vub300->resp.common.header_size -
1691 sizeof(struct sd_register_header);
1692 int register_count = offloaded_data_length >> 3;
1693 int ri = 0;
1694 while (register_count--) {
1695 add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]);
1696 ri += 1;
1697 }
1698 mutex_lock(&vub300->irq_mutex);
1699 if (vub300->irqs_queued) {
1700 vub300->irqs_queued += 1;
1701 } else if (vub300->irq_enabled) {
1702 vub300->irqs_queued += 1;
1703 vub300_queue_poll_work(vub300, 0);
1704 } else {
1705 vub300->irqs_queued += 1;
1706 }
1707 vub300->irq_disabled = 0;
1708 mutex_unlock(&vub300->irq_mutex);
1709 vub300->resp.common.header_size =
1710 sizeof(struct sd_register_header);
1711 vub300->resp.common.header_type = 0x00;
1712 cmd->error = 0;
1713 } else {
1714 cmd->error = -EINVAL;
1715 }
1716}
1717
1718static void construct_request_response(struct vub300_mmc_host *vub300,
1719 struct mmc_command *cmd)
1720{
1721 int resp_len = vub300->resp_len;
1722 int less_cmd = (17 == resp_len) ? resp_len : resp_len - 1;
1723 int bytes = 3 & less_cmd;
1724 int words = less_cmd >> 2;
1725 u8 *r = vub300->resp.response.command_response;
1726 if (bytes == 3) {
1727 cmd->resp[words] = (r[1 + (words << 2)] << 24)
1728 | (r[2 + (words << 2)] << 16)
1729 | (r[3 + (words << 2)] << 8);
1730 } else if (bytes == 2) {
1731 cmd->resp[words] = (r[1 + (words << 2)] << 24)
1732 | (r[2 + (words << 2)] << 16);
1733 } else if (bytes == 1) {
1734 cmd->resp[words] = (r[1 + (words << 2)] << 24);
1735 }
1736 while (words-- > 0) {
1737 cmd->resp[words] = (r[1 + (words << 2)] << 24)
1738 | (r[2 + (words << 2)] << 16)
1739 | (r[3 + (words << 2)] << 8)
1740 | (r[4 + (words << 2)] << 0);
1741 }
1742 if ((cmd->opcode == 53) && (0x000000FF & cmd->resp[0]))
1743 cmd->resp[0] &= 0xFFFFFF00;
1744}
1745
1746/* this thread runs only when there is an upper level command req outstanding */
1747static void vub300_cmndwork_thread(struct work_struct *work)
1748{
1749 struct vub300_mmc_host *vub300 =
1750 container_of(work, struct vub300_mmc_host, cmndwork);
1751 if (!vub300->interface) {
1752 kref_put(&vub300->kref, vub300_delete);
1753 return;
1754 } else {
1755 struct mmc_request *req = vub300->req;
1756 struct mmc_command *cmd = vub300->cmd;
1757 struct mmc_data *data = vub300->data;
1758 int data_length;
1759 mutex_lock(&vub300->cmd_mutex);
1760 init_completion(&vub300->command_complete);
1761 if (likely(vub300->vub_name[0]) || !vub300->mmc->card ||
1762 !mmc_card_present(vub300->mmc->card)) {
1763 /*
1764 * the name of the EMPTY Pseudo firmware file
1765 * is used as a flag to indicate that the file
1766 * has been already downloaded to the VUB300 chip
1767 */
1768 } else if (0 == vub300->mmc->card->sdio_funcs) {
1769 strncpy(vub300->vub_name, "SD memory device",
1770 sizeof(vub300->vub_name));
1771 } else {
1772 download_offload_pseudocode(vub300);
1773 }
1774 send_command(vub300);
1775 if (!data)
1776 data_length = 0;
1777 else if (MMC_DATA_READ & data->flags)
1778 data_length = __command_read_data(vub300, cmd, data);
1779 else
1780 data_length = __command_write_data(vub300, cmd, data);
1781 __vub300_command_response(vub300, cmd, data, data_length);
1782 vub300->req = NULL;
1783 vub300->cmd = NULL;
1784 vub300->data = NULL;
1785 if (cmd->error) {
1786 if (cmd->error == -ENOMEDIUM)
1787 check_vub300_port_status(vub300);
1788 mutex_unlock(&vub300->cmd_mutex);
1789 mmc_request_done(vub300->mmc, req);
1790 kref_put(&vub300->kref, vub300_delete);
1791 return;
1792 } else {
1793 construct_request_response(vub300, cmd);
1794 vub300->resp_len = 0;
1795 mutex_unlock(&vub300->cmd_mutex);
1796 kref_put(&vub300->kref, vub300_delete);
1797 mmc_request_done(vub300->mmc, req);
1798 return;
1799 }
1800 }
1801}
1802
1803static int examine_cyclic_buffer(struct vub300_mmc_host *vub300,
1804 struct mmc_command *cmd, u8 Function)
1805{
1806 /* cmd_mutex is held by vub300_mmc_request */
1807 u8 cmd0 = 0xFF & (cmd->arg >> 24);
1808 u8 cmd1 = 0xFF & (cmd->arg >> 16);
1809 u8 cmd2 = 0xFF & (cmd->arg >> 8);
1810 u8 cmd3 = 0xFF & (cmd->arg >> 0);
1811 int first = MAXREGMASK & vub300->fn[Function].offload_point;
1812 struct offload_registers_access *rf = &vub300->fn[Function].reg[first];
1813 if (cmd0 == rf->command_byte[0] &&
1814 cmd1 == rf->command_byte[1] &&
1815 cmd2 == rf->command_byte[2] &&
1816 cmd3 == rf->command_byte[3]) {
1817 u8 checksum = 0x00;
1818 cmd->resp[1] = checksum << 24;
1819 cmd->resp[0] = (rf->Respond_Byte[0] << 24)
1820 | (rf->Respond_Byte[1] << 16)
1821 | (rf->Respond_Byte[2] << 8)
1822 | (rf->Respond_Byte[3] << 0);
1823 vub300->fn[Function].offload_point += 1;
1824 vub300->fn[Function].offload_count -= 1;
1825 vub300->total_offload_count -= 1;
1826 return 1;
1827 } else {
1828 int delta = 1; /* because it does not match the first one */
1829 u8 register_count = vub300->fn[Function].offload_count - 1;
1830 u32 register_point = vub300->fn[Function].offload_point + 1;
1831 while (0 < register_count) {
1832 int point = MAXREGMASK & register_point;
1833 struct offload_registers_access *r =
1834 &vub300->fn[Function].reg[point];
1835 if (cmd0 == r->command_byte[0] &&
1836 cmd1 == r->command_byte[1] &&
1837 cmd2 == r->command_byte[2] &&
1838 cmd3 == r->command_byte[3]) {
1839 u8 checksum = 0x00;
1840 cmd->resp[1] = checksum << 24;
1841 cmd->resp[0] = (r->Respond_Byte[0] << 24)
1842 | (r->Respond_Byte[1] << 16)
1843 | (r->Respond_Byte[2] << 8)
1844 | (r->Respond_Byte[3] << 0);
1845 vub300->fn[Function].offload_point += delta;
1846 vub300->fn[Function].offload_count -= delta;
1847 vub300->total_offload_count -= delta;
1848 return 1;
1849 } else {
1850 register_point += 1;
1851 register_count -= 1;
1852 delta += 1;
1853 continue;
1854 }
1855 }
1856 return 0;
1857 }
1858}
1859
1860static int satisfy_request_from_offloaded_data(struct vub300_mmc_host *vub300,
1861 struct mmc_command *cmd)
1862{
1863 /* cmd_mutex is held by vub300_mmc_request */
1864 u8 regs = vub300->dynamic_register_count;
1865 u8 i = 0;
1866 u8 func = FUN(cmd);
1867 u32 reg = REG(cmd);
1868 while (0 < regs--) {
1869 if ((vub300->sdio_register[i].func_num == func) &&
1870 (vub300->sdio_register[i].sdio_reg == reg)) {
1871 if (!vub300->sdio_register[i].prepared) {
1872 return 0;
1873 } else if ((0x80000000 & cmd->arg) == 0x80000000) {
1874 /*
1875 * a write to a dynamic register
1876 * nullifies our offloaded value
1877 */
1878 vub300->sdio_register[i].prepared = 0;
1879 return 0;
1880 } else {
1881 u8 checksum = 0x00;
1882 u8 rsp0 = 0x00;
1883 u8 rsp1 = 0x00;
1884 u8 rsp2 = vub300->sdio_register[i].response;
1885 u8 rsp3 = vub300->sdio_register[i].regvalue;
1886 vub300->sdio_register[i].prepared = 0;
1887 cmd->resp[1] = checksum << 24;
1888 cmd->resp[0] = (rsp0 << 24)
1889 | (rsp1 << 16)
1890 | (rsp2 << 8)
1891 | (rsp3 << 0);
1892 return 1;
1893 }
1894 } else {
1895 i += 1;
1896 continue;
1897 }
1898 };
1899 if (vub300->total_offload_count == 0)
1900 return 0;
1901 else if (vub300->fn[func].offload_count == 0)
1902 return 0;
1903 else
1904 return examine_cyclic_buffer(vub300, cmd, func);
1905}
1906
1907static void vub300_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
1908{ /* NOT irq */
1909 struct mmc_command *cmd = req->cmd;
1910 struct vub300_mmc_host *vub300 = mmc_priv(mmc);
1911 if (!vub300->interface) {
1912 cmd->error = -ESHUTDOWN;
1913 mmc_request_done(mmc, req);
1914 return;
1915 } else {
1916 struct mmc_data *data = req->data;
1917 if (!vub300->card_powered) {
1918 cmd->error = -ENOMEDIUM;
1919 mmc_request_done(mmc, req);
1920 return;
1921 }
1922 if (!vub300->card_present) {
1923 cmd->error = -ENOMEDIUM;
1924 mmc_request_done(mmc, req);
1925 return;
1926 }
1927 if (vub300->usb_transport_fail) {
1928 cmd->error = vub300->usb_transport_fail;
1929 mmc_request_done(mmc, req);
1930 return;
1931 }
1932 if (!vub300->interface) {
1933 cmd->error = -ENODEV;
1934 mmc_request_done(mmc, req);
1935 return;
1936 }
1937 kref_get(&vub300->kref);
1938 mutex_lock(&vub300->cmd_mutex);
1939 mod_timer(&vub300->inactivity_timer, jiffies + HZ);
1940 /*
1941 * for performance we have to return immediately
1942 * if the requested data has been offloaded
1943 */
1944 if (cmd->opcode == 52 &&
1945 satisfy_request_from_offloaded_data(vub300, cmd)) {
1946 cmd->error = 0;
1947 mutex_unlock(&vub300->cmd_mutex);
1948 kref_put(&vub300->kref, vub300_delete);
1949 mmc_request_done(mmc, req);
1950 return;
1951 } else {
1952 vub300->cmd = cmd;
1953 vub300->req = req;
1954 vub300->data = data;
1955 if (data)
1956 vub300->datasize = data->blksz * data->blocks;
1957 else
1958 vub300->datasize = 0;
1959 vub300_queue_cmnd_work(vub300);
1960 mutex_unlock(&vub300->cmd_mutex);
1961 kref_put(&vub300->kref, vub300_delete);
1962 /*
1963 * the kernel lock diagnostics complain
1964 * if the cmd_mutex * is "passed on"
1965 * to the cmndwork thread,
1966 * so we must release it now
1967 * and re-acquire it in the cmndwork thread
1968 */
1969 }
1970 }
1971}
1972
1973static void __set_clock_speed(struct vub300_mmc_host *vub300, u8 buf[8],
1974 struct mmc_ios *ios)
1975{
1976 int buf_array_size = 8; /* ARRAY_SIZE(buf) does not work !!! */
1977 int retval;
1978 u32 kHzClock;
1979 if (ios->clock >= 48000000)
1980 kHzClock = 48000;
1981 else if (ios->clock >= 24000000)
1982 kHzClock = 24000;
1983 else if (ios->clock >= 20000000)
1984 kHzClock = 20000;
1985 else if (ios->clock >= 15000000)
1986 kHzClock = 15000;
1987 else if (ios->clock >= 200000)
1988 kHzClock = 200;
1989 else
1990 kHzClock = 0;
1991 {
1992 int i;
1993 u64 c = kHzClock;
1994 for (i = 0; i < buf_array_size; i++) {
1995 buf[i] = c;
1996 c >>= 8;
1997 }
1998 }
1999 retval =
2000 usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
2001 SET_CLOCK_SPEED,
2002 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
2003 0x00, 0x00, buf, buf_array_size, HZ);
2004 if (retval != 8) {
2005 dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED"
2006 " %dkHz failed with retval=%d\n", kHzClock, retval);
2007 } else {
2008 dev_dbg(&vub300->udev->dev, "SET_CLOCK_SPEED"
2009 " %dkHz\n", kHzClock);
2010 }
2011}
2012
2013static void vub300_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2014{ /* NOT irq */
2015 struct vub300_mmc_host *vub300 = mmc_priv(mmc);
2016 if (!vub300->interface)
2017 return;
2018 kref_get(&vub300->kref);
2019 mutex_lock(&vub300->cmd_mutex);
2020 if ((ios->power_mode == MMC_POWER_OFF) && vub300->card_powered) {
2021 vub300->card_powered = 0;
2022 usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
2023 SET_SD_POWER,
2024 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
2025 0x0000, 0x0000, NULL, 0, HZ);
2026 /* must wait for the VUB300 u-proc to boot up */
2027 msleep(600);
2028 } else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) {
2029 usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
2030 SET_SD_POWER,
2031 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
2032 0x0001, 0x0000, NULL, 0, HZ);
2033 msleep(600);
2034 vub300->card_powered = 1;
2035 } else if (ios->power_mode == MMC_POWER_ON) {
2036 u8 *buf = kmalloc(8, GFP_KERNEL);
2037 if (buf) {
2038 __set_clock_speed(vub300, buf, ios);
2039 kfree(buf);
2040 }
2041 } else {
2042 /* this should mean no change of state */
2043 }
2044 mutex_unlock(&vub300->cmd_mutex);
2045 kref_put(&vub300->kref, vub300_delete);
2046}
2047
2048static int vub300_mmc_get_ro(struct mmc_host *mmc)
2049{
2050 struct vub300_mmc_host *vub300 = mmc_priv(mmc);
2051 return vub300->read_only;
2052}
2053
2054static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
2055{ /* NOT irq */
2056 struct vub300_mmc_host *vub300 = mmc_priv(mmc);
2057 if (!vub300->interface)
2058 return;
2059 kref_get(&vub300->kref);
2060 if (enable) {
2061 mutex_lock(&vub300->irq_mutex);
2062 if (vub300->irqs_queued) {
2063 vub300->irqs_queued -= 1;
2064 mmc_signal_sdio_irq(vub300->mmc);
2065 } else if (vub300->irq_disabled) {
2066 vub300->irq_disabled = 0;
2067 vub300->irq_enabled = 1;
2068 vub300_queue_poll_work(vub300, 0);
2069 } else if (vub300->irq_enabled) {
2070 /* this should not happen, so we will just ignore it */
2071 } else {
2072 vub300->irq_enabled = 1;
2073 vub300_queue_poll_work(vub300, 0);
2074 }
2075 mutex_unlock(&vub300->irq_mutex);
2076 } else {
2077 vub300->irq_enabled = 0;
2078 }
2079 kref_put(&vub300->kref, vub300_delete);
2080}
2081
2082void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card)
2083{ /* NOT irq */
2084 struct vub300_mmc_host *vub300 = mmc_priv(mmc);
2085 dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n");
2086}
2087
2088static struct mmc_host_ops vub300_mmc_ops = {
2089 .request = vub300_mmc_request,
2090 .set_ios = vub300_mmc_set_ios,
2091 .get_ro = vub300_mmc_get_ro,
2092 .enable_sdio_irq = vub300_enable_sdio_irq,
2093 .init_card = vub300_init_card,
2094};
2095
2096static int vub300_probe(struct usb_interface *interface,
2097 const struct usb_device_id *id)
2098{ /* NOT irq */
2099 struct vub300_mmc_host *vub300 = NULL;
2100 struct usb_host_interface *iface_desc;
2101 struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface));
2102 int i;
2103 int retval = -ENOMEM;
2104 struct urb *command_out_urb;
2105 struct urb *command_res_urb;
2106 struct mmc_host *mmc;
2107 char manufacturer[48];
2108 char product[32];
2109 char serial_number[32];
2110 usb_string(udev, udev->descriptor.iManufacturer, manufacturer,
2111 sizeof(manufacturer));
2112 usb_string(udev, udev->descriptor.iProduct, product, sizeof(product));
2113 usb_string(udev, udev->descriptor.iSerialNumber, serial_number,
2114 sizeof(serial_number));
2115 dev_info(&udev->dev, "probing VID:PID(%04X:%04X) %s %s %s\n",
2116 udev->descriptor.idVendor, udev->descriptor.idProduct,
2117 manufacturer, product, serial_number);
2118 command_out_urb = usb_alloc_urb(0, GFP_KERNEL);
2119 if (!command_out_urb) {
2120 retval = -ENOMEM;
2121 dev_err(&vub300->udev->dev,
2122 "not enough memory for the command_out_urb\n");
2123 goto error0;
2124 }
2125 command_res_urb = usb_alloc_urb(0, GFP_KERNEL);
2126 if (!command_res_urb) {
2127 retval = -ENOMEM;
2128 dev_err(&vub300->udev->dev,
2129 "not enough memory for the command_res_urb\n");
2130 goto error1;
2131 }
2132 /* this also allocates memory for our VUB300 mmc host device */
2133 mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev);
2134 if (!mmc) {
2135 retval = -ENOMEM;
2136 dev_err(&vub300->udev->dev,
2137 "not enough memory for the mmc_host\n");
2138 goto error4;
2139 }
2140 /* MMC core transfer sizes tunable parameters */
2141 mmc->caps = 0;
2142 if (!force_1_bit_data_xfers)
2143 mmc->caps |= MMC_CAP_4_BIT_DATA;
2144 if (!force_polling_for_irqs)
2145 mmc->caps |= MMC_CAP_SDIO_IRQ;
2146 mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2147 /*
2148 * MMC_CAP_NEEDS_POLL causes core.c:mmc_rescan() to poll
2149 * for devices which results in spurious CMD7's being
2150 * issued which stops some SDIO cards from working
2151 */
2152 if (limit_speed_to_24_MHz) {
2153 mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
2154 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
2155 mmc->f_max = 24000000;
2156 dev_info(&udev->dev, "limiting SDIO speed to 24_MHz\n");
2157 } else {
2158 mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
2159 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
2160 mmc->f_max = 48000000;
2161 }
2162 mmc->f_min = 200000;
2163 mmc->max_blk_count = 511;
2164 mmc->max_blk_size = 512;
2165 mmc->max_segs = 128;
2166 if (force_max_req_size)
2167 mmc->max_req_size = force_max_req_size * 1024;
2168 else
2169 mmc->max_req_size = 64 * 1024;
2170 mmc->max_seg_size = mmc->max_req_size;
2171 mmc->ocr_avail = 0;
2172 mmc->ocr_avail |= MMC_VDD_165_195;
2173 mmc->ocr_avail |= MMC_VDD_20_21;
2174 mmc->ocr_avail |= MMC_VDD_21_22;
2175 mmc->ocr_avail |= MMC_VDD_22_23;
2176 mmc->ocr_avail |= MMC_VDD_23_24;
2177 mmc->ocr_avail |= MMC_VDD_24_25;
2178 mmc->ocr_avail |= MMC_VDD_25_26;
2179 mmc->ocr_avail |= MMC_VDD_26_27;
2180 mmc->ocr_avail |= MMC_VDD_27_28;
2181 mmc->ocr_avail |= MMC_VDD_28_29;
2182 mmc->ocr_avail |= MMC_VDD_29_30;
2183 mmc->ocr_avail |= MMC_VDD_30_31;
2184 mmc->ocr_avail |= MMC_VDD_31_32;
2185 mmc->ocr_avail |= MMC_VDD_32_33;
2186 mmc->ocr_avail |= MMC_VDD_33_34;
2187 mmc->ocr_avail |= MMC_VDD_34_35;
2188 mmc->ocr_avail |= MMC_VDD_35_36;
2189 mmc->ops = &vub300_mmc_ops;
2190 vub300 = mmc_priv(mmc);
2191 vub300->mmc = mmc;
2192 vub300->card_powered = 0;
2193 vub300->bus_width = 0;
2194 vub300->cmnd.head.block_size[0] = 0x00;
2195 vub300->cmnd.head.block_size[1] = 0x00;
2196 vub300->app_spec = 0;
2197 mutex_init(&vub300->cmd_mutex);
2198 mutex_init(&vub300->irq_mutex);
2199 vub300->command_out_urb = command_out_urb;
2200 vub300->command_res_urb = command_res_urb;
2201 vub300->usb_timed_out = 0;
2202 vub300->dynamic_register_count = 0;
2203
2204 for (i = 0; i < ARRAY_SIZE(vub300->fn); i++) {
2205 vub300->fn[i].offload_point = 0;
2206 vub300->fn[i].offload_count = 0;
2207 }
2208
2209 vub300->total_offload_count = 0;
2210 vub300->irq_enabled = 0;
2211 vub300->irq_disabled = 0;
2212 vub300->irqs_queued = 0;
2213
2214 for (i = 0; i < ARRAY_SIZE(vub300->sdio_register); i++)
2215 vub300->sdio_register[i++].activate = 0;
2216
2217 vub300->udev = udev;
2218 vub300->interface = interface;
2219 vub300->cmnd_res_ep = 0;
2220 vub300->cmnd_out_ep = 0;
2221 vub300->data_inp_ep = 0;
2222 vub300->data_out_ep = 0;
2223
2224 for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++)
2225 vub300->fbs[i] = 512;
2226
2227 /*
2228 * set up the endpoint information
2229 *
2230 * use the first pair of bulk-in and bulk-out
2231 * endpoints for Command/Response+Interrupt
2232 *
2233 * use the second pair of bulk-in and bulk-out
2234 * endpoints for Data In/Out
2235 */
2236 vub300->large_usb_packets = 0;
2237 iface_desc = interface->cur_altsetting;
2238 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
2239 struct usb_endpoint_descriptor *endpoint =
2240 &iface_desc->endpoint[i].desc;
2241 dev_info(&vub300->udev->dev,
2242 "vub300 testing %s EndPoint(%d) %02X\n",
2243 usb_endpoint_is_bulk_in(endpoint) ? "BULK IN" :
2244 usb_endpoint_is_bulk_out(endpoint) ? "BULK OUT" :
2245 "UNKNOWN", i, endpoint->bEndpointAddress);
2246 if (endpoint->wMaxPacketSize > 64)
2247 vub300->large_usb_packets = 1;
2248 if (usb_endpoint_is_bulk_in(endpoint)) {
2249 if (!vub300->cmnd_res_ep) {
2250 vub300->cmnd_res_ep =
2251 endpoint->bEndpointAddress;
2252 } else if (!vub300->data_inp_ep) {
2253 vub300->data_inp_ep =
2254 endpoint->bEndpointAddress;
2255 } else {
2256 dev_warn(&vub300->udev->dev,
2257 "ignoring"
2258 " unexpected bulk_in endpoint");
2259 }
2260 } else if (usb_endpoint_is_bulk_out(endpoint)) {
2261 if (!vub300->cmnd_out_ep) {
2262 vub300->cmnd_out_ep =
2263 endpoint->bEndpointAddress;
2264 } else if (!vub300->data_out_ep) {
2265 vub300->data_out_ep =
2266 endpoint->bEndpointAddress;
2267 } else {
2268 dev_warn(&vub300->udev->dev,
2269 "ignoring"
2270 " unexpected bulk_out endpoint");
2271 }
2272 } else {
2273 dev_warn(&vub300->udev->dev,
2274 "vub300 ignoring EndPoint(%d) %02X", i,
2275 endpoint->bEndpointAddress);
2276 }
2277 }
2278 if (vub300->cmnd_res_ep && vub300->cmnd_out_ep &&
2279 vub300->data_inp_ep && vub300->data_out_ep) {
2280 dev_info(&vub300->udev->dev,
2281 "vub300 %s packets"
2282 " using EndPoints %02X %02X %02X %02X\n",
2283 vub300->large_usb_packets ? "LARGE" : "SMALL",
2284 vub300->cmnd_out_ep, vub300->cmnd_res_ep,
2285 vub300->data_out_ep, vub300->data_inp_ep);
2286 /* we have the expected EndPoints */
2287 } else {
2288 dev_err(&vub300->udev->dev,
2289 "Could not find two sets of bulk-in/out endpoint pairs\n");
2290 retval = -EINVAL;
2291 goto error5;
2292 }
2293 retval =
2294 usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
2295 GET_HC_INF0,
2296 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
2297 0x0000, 0x0000, &vub300->hc_info,
2298 sizeof(vub300->hc_info), HZ);
2299 if (retval < 0)
2300 goto error5;
2301 retval =
2302 usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
2303 SET_ROM_WAIT_STATES,
2304 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
2305 firmware_rom_wait_states, 0x0000, NULL, 0, HZ);
2306 if (retval < 0)
2307 goto error5;
2308 dev_info(&vub300->udev->dev,
2309 "operating_mode = %s %s %d MHz %s %d byte USB packets\n",
2310 (mmc->caps & MMC_CAP_SDIO_IRQ) ? "IRQs" : "POLL",
2311 (mmc->caps & MMC_CAP_4_BIT_DATA) ? "4-bit" : "1-bit",
2312 mmc->f_max / 1000000,
2313 pad_input_to_usb_pkt ? "padding input data to" : "with",
2314 vub300->large_usb_packets ? 512 : 64);
2315 retval =
2316 usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
2317 GET_SYSTEM_PORT_STATUS,
2318 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
2319 0x0000, 0x0000, &vub300->system_port_status,
2320 sizeof(vub300->system_port_status), HZ);
2321 if (retval < 0) {
2322 goto error4;
2323 } else if (sizeof(vub300->system_port_status) == retval) {
2324 vub300->card_present =
2325 (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0;
2326 vub300->read_only =
2327 (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
2328 } else {
2329 goto error4;
2330 }
2331 usb_set_intfdata(interface, vub300);
2332 INIT_DELAYED_WORK(&vub300->pollwork, vub300_pollwork_thread);
2333 INIT_WORK(&vub300->cmndwork, vub300_cmndwork_thread);
2334 INIT_WORK(&vub300->deadwork, vub300_deadwork_thread);
2335 kref_init(&vub300->kref);
2336 init_timer(&vub300->sg_transfer_timer);
2337 vub300->sg_transfer_timer.data = (unsigned long)vub300;
2338 vub300->sg_transfer_timer.function = vub300_sg_timed_out;
2339 kref_get(&vub300->kref);
2340 init_timer(&vub300->inactivity_timer);
2341 vub300->inactivity_timer.data = (unsigned long)vub300;
2342 vub300->inactivity_timer.function = vub300_inactivity_timer_expired;
2343 vub300->inactivity_timer.expires = jiffies + HZ;
2344 add_timer(&vub300->inactivity_timer);
2345 if (vub300->card_present)
2346 dev_info(&vub300->udev->dev,
2347 "USB vub300 remote SDIO host controller[%d]"
2348 "connected with SD/SDIO card inserted\n",
2349 interface_to_InterfaceNumber(interface));
2350 else
2351 dev_info(&vub300->udev->dev,
2352 "USB vub300 remote SDIO host controller[%d]"
2353 "connected with no SD/SDIO card inserted\n",
2354 interface_to_InterfaceNumber(interface));
2355 mmc_add_host(mmc);
2356 return 0;
2357error5:
2358 mmc_free_host(mmc);
2359 /*
2360 * and hence also frees vub300
2361 * which is contained at the end of struct mmc
2362 */
2363error4:
2364 usb_free_urb(command_out_urb);
2365error1:
2366 usb_free_urb(command_res_urb);
2367error0:
2368 return retval;
2369}
2370
2371static void vub300_disconnect(struct usb_interface *interface)
2372{ /* NOT irq */
2373 struct vub300_mmc_host *vub300 = usb_get_intfdata(interface);
2374 if (!vub300 || !vub300->mmc) {
2375 return;
2376 } else {
2377 struct mmc_host *mmc = vub300->mmc;
2378 if (!vub300->mmc) {
2379 return;
2380 } else {
2381 int ifnum = interface_to_InterfaceNumber(interface);
2382 usb_set_intfdata(interface, NULL);
2383 /* prevent more I/O from starting */
2384 vub300->interface = NULL;
2385 kref_put(&vub300->kref, vub300_delete);
2386 mmc_remove_host(mmc);
2387 pr_info("USB vub300 remote SDIO host controller[%d]"
2388 " now disconnected", ifnum);
2389 return;
2390 }
2391 }
2392}
2393
2394#ifdef CONFIG_PM
2395static int vub300_suspend(struct usb_interface *intf, pm_message_t message)
2396{
2397 struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
2398 if (!vub300 || !vub300->mmc) {
2399 return 0;
2400 } else {
2401 struct mmc_host *mmc = vub300->mmc;
2402 mmc_suspend_host(mmc);
2403 return 0;
2404 }
2405}
2406
2407static int vub300_resume(struct usb_interface *intf)
2408{
2409 struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
2410 if (!vub300 || !vub300->mmc) {
2411 return 0;
2412 } else {
2413 struct mmc_host *mmc = vub300->mmc;
2414 mmc_resume_host(mmc);
2415 return 0;
2416 }
2417}
2418#else
2419#define vub300_suspend NULL
2420#define vub300_resume NULL
2421#endif
2422static int vub300_pre_reset(struct usb_interface *intf)
2423{ /* NOT irq */
2424 struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
2425 mutex_lock(&vub300->cmd_mutex);
2426 return 0;
2427}
2428
2429static int vub300_post_reset(struct usb_interface *intf)
2430{ /* NOT irq */
2431 struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
2432 /* we are sure no URBs are active - no locking needed */
2433 vub300->errors = -EPIPE;
2434 mutex_unlock(&vub300->cmd_mutex);
2435 return 0;
2436}
2437
2438static struct usb_driver vub300_driver = {
2439 .name = "vub300",
2440 .probe = vub300_probe,
2441 .disconnect = vub300_disconnect,
2442 .suspend = vub300_suspend,
2443 .resume = vub300_resume,
2444 .pre_reset = vub300_pre_reset,
2445 .post_reset = vub300_post_reset,
2446 .id_table = vub300_table,
2447 .supports_autosuspend = 1,
2448};
2449
2450static int __init vub300_init(void)
2451{ /* NOT irq */
2452 int result;
2453
2454 pr_info("VUB300 Driver rom wait states = %02X irqpoll timeout = %04X",
2455 firmware_rom_wait_states, 0x0FFFF & firmware_irqpoll_timeout);
2456 cmndworkqueue = create_singlethread_workqueue("kvub300c");
2457 if (!cmndworkqueue) {
2458 pr_err("not enough memory for the REQUEST workqueue");
2459 result = -ENOMEM;
2460 goto out1;
2461 }
2462 pollworkqueue = create_singlethread_workqueue("kvub300p");
2463 if (!pollworkqueue) {
2464 pr_err("not enough memory for the IRQPOLL workqueue");
2465 result = -ENOMEM;
2466 goto out2;
2467 }
2468 deadworkqueue = create_singlethread_workqueue("kvub300d");
2469 if (!deadworkqueue) {
2470 pr_err("not enough memory for the EXPIRED workqueue");
2471 result = -ENOMEM;
2472 goto out3;
2473 }
2474 result = usb_register(&vub300_driver);
2475 if (result) {
2476 pr_err("usb_register failed. Error number %d", result);
2477 goto out4;
2478 }
2479 return 0;
2480out4:
2481 destroy_workqueue(deadworkqueue);
2482out3:
2483 destroy_workqueue(pollworkqueue);
2484out2:
2485 destroy_workqueue(cmndworkqueue);
2486out1:
2487 return result;
2488}
2489
2490static void __exit vub300_exit(void)
2491{
2492 usb_deregister(&vub300_driver);
2493 flush_workqueue(cmndworkqueue);
2494 flush_workqueue(pollworkqueue);
2495 flush_workqueue(deadworkqueue);
2496 destroy_workqueue(cmndworkqueue);
2497 destroy_workqueue(pollworkqueue);
2498 destroy_workqueue(deadworkqueue);
2499}
2500
2501module_init(vub300_init);
2502module_exit(vub300_exit);
2503
2504MODULE_AUTHOR("Tony Olech <tony.olech@elandigitalsystems.com>");
2505MODULE_DESCRIPTION("VUB300 USB to SD/MMC/SDIO adapter driver");
2506MODULE_LICENSE("GPL");
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index ccfedb4f3eb0..01f636275057 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -4,6 +4,7 @@ header-y += caif/
4header-y += dvb/ 4header-y += dvb/
5header-y += hdlc/ 5header-y += hdlc/
6header-y += isdn/ 6header-y += isdn/
7header-y += mmc/
7header-y += nfsd/ 8header-y += nfsd/
8header-y += raid/ 9header-y += raid/
9header-y += spi/ 10header-y += spi/
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 8e70310ee945..5a90266c3a5a 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -4,6 +4,7 @@
4#include <linux/fb.h> 4#include <linux/fb.h>
5#include <linux/io.h> 5#include <linux/io.h>
6#include <linux/platform_device.h> 6#include <linux/platform_device.h>
7#include <linux/pm_runtime.h>
7 8
8#define tmio_ioread8(addr) readb(addr) 9#define tmio_ioread8(addr) readb(addr)
9#define tmio_ioread16(addr) readw(addr) 10#define tmio_ioread16(addr) readw(addr)
@@ -61,6 +62,12 @@
61 * Some controllers can support SDIO IRQ signalling. 62 * Some controllers can support SDIO IRQ signalling.
62 */ 63 */
63#define TMIO_MMC_SDIO_IRQ (1 << 2) 64#define TMIO_MMC_SDIO_IRQ (1 << 2)
65/*
66 * Some platforms can detect card insertion events with controller powered
67 * down, in which case they have to call tmio_mmc_cd_wakeup() to power up the
68 * controller and report the event to the driver.
69 */
70#define TMIO_MMC_HAS_COLD_CD (1 << 3)
64 71
65int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); 72int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
66int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base); 73int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
@@ -82,11 +89,21 @@ struct tmio_mmc_data {
82 unsigned long flags; 89 unsigned long flags;
83 u32 ocr_mask; /* available voltages */ 90 u32 ocr_mask; /* available voltages */
84 struct tmio_mmc_dma *dma; 91 struct tmio_mmc_dma *dma;
92 struct device *dev;
93 bool power;
85 void (*set_pwr)(struct platform_device *host, int state); 94 void (*set_pwr)(struct platform_device *host, int state);
86 void (*set_clk_div)(struct platform_device *host, int state); 95 void (*set_clk_div)(struct platform_device *host, int state);
87 int (*get_cd)(struct platform_device *host); 96 int (*get_cd)(struct platform_device *host);
88}; 97};
89 98
99static inline void tmio_mmc_cd_wakeup(struct tmio_mmc_data *pdata)
100{
101 if (pdata && !pdata->power) {
102 pdata->power = true;
103 pm_runtime_get(pdata->dev);
104 }
105}
106
90/* 107/*
91 * data for the NAND controller 108 * data for the NAND controller
92 */ 109 */
diff --git a/include/linux/mmc/Kbuild b/include/linux/mmc/Kbuild
new file mode 100644
index 000000000000..1fb26448faa9
--- /dev/null
+++ b/include/linux/mmc/Kbuild
@@ -0,0 +1 @@
header-y += ioctl.h
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index adb4888248be..c6927a4d157f 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -11,6 +11,7 @@
11#define LINUX_MMC_CARD_H 11#define LINUX_MMC_CARD_H
12 12
13#include <linux/mmc/core.h> 13#include <linux/mmc/core.h>
14#include <linux/mod_devicetable.h>
14 15
15struct mmc_cid { 16struct mmc_cid {
16 unsigned int manfid; 17 unsigned int manfid;
@@ -29,6 +30,7 @@ struct mmc_csd {
29 unsigned short cmdclass; 30 unsigned short cmdclass;
30 unsigned short tacc_clks; 31 unsigned short tacc_clks;
31 unsigned int tacc_ns; 32 unsigned int tacc_ns;
33 unsigned int c_size;
32 unsigned int r2w_factor; 34 unsigned int r2w_factor;
33 unsigned int max_dtr; 35 unsigned int max_dtr;
34 unsigned int erase_size; /* In sectors */ 36 unsigned int erase_size; /* In sectors */
@@ -45,6 +47,10 @@ struct mmc_ext_csd {
45 u8 rev; 47 u8 rev;
46 u8 erase_group_def; 48 u8 erase_group_def;
47 u8 sec_feature_support; 49 u8 sec_feature_support;
50 u8 rel_sectors;
51 u8 rel_param;
52 u8 part_config;
53 unsigned int part_time; /* Units: ms */
48 unsigned int sa_timeout; /* Units: 100ns */ 54 unsigned int sa_timeout; /* Units: 100ns */
49 unsigned int hs_max_dtr; 55 unsigned int hs_max_dtr;
50 unsigned int sectors; 56 unsigned int sectors;
@@ -57,13 +63,18 @@ struct mmc_ext_csd {
57 bool enhanced_area_en; /* enable bit */ 63 bool enhanced_area_en; /* enable bit */
58 unsigned long long enhanced_area_offset; /* Units: Byte */ 64 unsigned long long enhanced_area_offset; /* Units: Byte */
59 unsigned int enhanced_area_size; /* Units: KB */ 65 unsigned int enhanced_area_size; /* Units: KB */
66 unsigned int boot_size; /* in bytes */
60}; 67};
61 68
62struct sd_scr { 69struct sd_scr {
63 unsigned char sda_vsn; 70 unsigned char sda_vsn;
71 unsigned char sda_spec3;
64 unsigned char bus_widths; 72 unsigned char bus_widths;
65#define SD_SCR_BUS_WIDTH_1 (1<<0) 73#define SD_SCR_BUS_WIDTH_1 (1<<0)
66#define SD_SCR_BUS_WIDTH_4 (1<<2) 74#define SD_SCR_BUS_WIDTH_4 (1<<2)
75 unsigned char cmds;
76#define SD_SCR_CMD20_SUPPORT (1<<0)
77#define SD_SCR_CMD23_SUPPORT (1<<1)
67}; 78};
68 79
69struct sd_ssr { 80struct sd_ssr {
@@ -74,6 +85,39 @@ struct sd_ssr {
74 85
75struct sd_switch_caps { 86struct sd_switch_caps {
76 unsigned int hs_max_dtr; 87 unsigned int hs_max_dtr;
88 unsigned int uhs_max_dtr;
89#define UHS_SDR104_MAX_DTR 208000000
90#define UHS_SDR50_MAX_DTR 100000000
91#define UHS_DDR50_MAX_DTR 50000000
92#define UHS_SDR25_MAX_DTR UHS_DDR50_MAX_DTR
93#define UHS_SDR12_MAX_DTR 25000000
94 unsigned int sd3_bus_mode;
95#define UHS_SDR12_BUS_SPEED 0
96#define UHS_SDR25_BUS_SPEED 1
97#define UHS_SDR50_BUS_SPEED 2
98#define UHS_SDR104_BUS_SPEED 3
99#define UHS_DDR50_BUS_SPEED 4
100
101#define SD_MODE_UHS_SDR12 (1 << UHS_SDR12_BUS_SPEED)
102#define SD_MODE_UHS_SDR25 (1 << UHS_SDR25_BUS_SPEED)
103#define SD_MODE_UHS_SDR50 (1 << UHS_SDR50_BUS_SPEED)
104#define SD_MODE_UHS_SDR104 (1 << UHS_SDR104_BUS_SPEED)
105#define SD_MODE_UHS_DDR50 (1 << UHS_DDR50_BUS_SPEED)
106 unsigned int sd3_drv_type;
107#define SD_DRIVER_TYPE_B 0x01
108#define SD_DRIVER_TYPE_A 0x02
109#define SD_DRIVER_TYPE_C 0x04
110#define SD_DRIVER_TYPE_D 0x08
111 unsigned int sd3_curr_limit;
112#define SD_SET_CURRENT_LIMIT_200 0
113#define SD_SET_CURRENT_LIMIT_400 1
114#define SD_SET_CURRENT_LIMIT_600 2
115#define SD_SET_CURRENT_LIMIT_800 3
116
117#define SD_MAX_CURRENT_200 (1 << SD_SET_CURRENT_LIMIT_200)
118#define SD_MAX_CURRENT_400 (1 << SD_SET_CURRENT_LIMIT_400)
119#define SD_MAX_CURRENT_600 (1 << SD_SET_CURRENT_LIMIT_600)
120#define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800)
77}; 121};
78 122
79struct sdio_cccr { 123struct sdio_cccr {
@@ -118,6 +162,8 @@ struct mmc_card {
118#define MMC_STATE_HIGHSPEED (1<<2) /* card is in high speed mode */ 162#define MMC_STATE_HIGHSPEED (1<<2) /* card is in high speed mode */
119#define MMC_STATE_BLOCKADDR (1<<3) /* card uses block-addressing */ 163#define MMC_STATE_BLOCKADDR (1<<3) /* card uses block-addressing */
120#define MMC_STATE_HIGHSPEED_DDR (1<<4) /* card is in high speed mode */ 164#define MMC_STATE_HIGHSPEED_DDR (1<<4) /* card is in high speed mode */
165#define MMC_STATE_ULTRAHIGHSPEED (1<<5) /* card is in ultra high speed mode */
166#define MMC_CARD_SDXC (1<<6) /* card is SDXC */
121 unsigned int quirks; /* card quirks */ 167 unsigned int quirks; /* card quirks */
122#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ 168#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */
123#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ 169#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */
@@ -125,6 +171,10 @@ struct mmc_card {
125#define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */ 171#define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */
126 /* (missing CIA registers) */ 172 /* (missing CIA registers) */
127#define MMC_QUIRK_BROKEN_CLK_GATING (1<<3) /* clock gating the sdio bus will make card fail */ 173#define MMC_QUIRK_BROKEN_CLK_GATING (1<<3) /* clock gating the sdio bus will make card fail */
174#define MMC_QUIRK_NONSTD_FUNC_IF (1<<4) /* SDIO card has nonstd function interfaces */
175#define MMC_QUIRK_DISABLE_CD (1<<5) /* disconnect CD/DAT[3] resistor */
176#define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */
177#define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */
128 178
129 unsigned int erase_size; /* erase size in sectors */ 179 unsigned int erase_size; /* erase size in sectors */
130 unsigned int erase_shift; /* if erase unit is power 2 */ 180 unsigned int erase_shift; /* if erase unit is power 2 */
@@ -145,14 +195,100 @@ struct mmc_card {
145 struct sdio_cccr cccr; /* common card info */ 195 struct sdio_cccr cccr; /* common card info */
146 struct sdio_cis cis; /* common tuple info */ 196 struct sdio_cis cis; /* common tuple info */
147 struct sdio_func *sdio_func[SDIO_MAX_FUNCS]; /* SDIO functions (devices) */ 197 struct sdio_func *sdio_func[SDIO_MAX_FUNCS]; /* SDIO functions (devices) */
198 struct sdio_func *sdio_single_irq; /* SDIO function when only one IRQ active */
148 unsigned num_info; /* number of info strings */ 199 unsigned num_info; /* number of info strings */
149 const char **info; /* info strings */ 200 const char **info; /* info strings */
150 struct sdio_func_tuple *tuples; /* unknown common tuples */ 201 struct sdio_func_tuple *tuples; /* unknown common tuples */
151 202
203 unsigned int sd_bus_speed; /* Bus Speed Mode set for the card */
204
152 struct dentry *debugfs_root; 205 struct dentry *debugfs_root;
153}; 206};
154 207
155void mmc_fixup_device(struct mmc_card *dev); 208/*
209 * The world is not perfect and supplies us with broken mmc/sdio devices.
210 * For at least some of these bugs we need a work-around.
211 */
212
213struct mmc_fixup {
214 /* CID-specific fields. */
215 const char *name;
216
217 /* Valid revision range */
218 u64 rev_start, rev_end;
219
220 unsigned int manfid;
221 unsigned short oemid;
222
223 /* SDIO-specfic fields. You can use SDIO_ANY_ID here of course */
224 u16 cis_vendor, cis_device;
225
226 void (*vendor_fixup)(struct mmc_card *card, int data);
227 int data;
228};
229
230#define CID_MANFID_ANY (-1u)
231#define CID_OEMID_ANY ((unsigned short) -1)
232#define CID_NAME_ANY (NULL)
233
234#define END_FIXUP { 0 }
235
236#define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \
237 _cis_vendor, _cis_device, \
238 _fixup, _data) \
239 { \
240 .name = (_name), \
241 .manfid = (_manfid), \
242 .oemid = (_oemid), \
243 .rev_start = (_rev_start), \
244 .rev_end = (_rev_end), \
245 .cis_vendor = (_cis_vendor), \
246 .cis_device = (_cis_device), \
247 .vendor_fixup = (_fixup), \
248 .data = (_data), \
249 }
250
251#define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end, \
252 _fixup, _data) \
253 _FIXUP_EXT(_name, _manfid, \
254 _oemid, _rev_start, _rev_end, \
255 SDIO_ANY_ID, SDIO_ANY_ID, \
256 _fixup, _data) \
257
258#define MMC_FIXUP(_name, _manfid, _oemid, _fixup, _data) \
259 MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data)
260
261#define SDIO_FIXUP(_vendor, _device, _fixup, _data) \
262 _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, \
263 CID_OEMID_ANY, 0, -1ull, \
264 _vendor, _device, \
265 _fixup, _data) \
266
267#define cid_rev(hwrev, fwrev, year, month) \
268 (((u64) hwrev) << 40 | \
269 ((u64) fwrev) << 32 | \
270 ((u64) year) << 16 | \
271 ((u64) month))
272
273#define cid_rev_card(card) \
274 cid_rev(card->cid.hwrev, \
275 card->cid.fwrev, \
276 card->cid.year, \
277 card->cid.month)
278
279/*
280 * Unconditionally quirk add/remove.
281 */
282
283static inline void __maybe_unused add_quirk(struct mmc_card *card, int data)
284{
285 card->quirks |= data;
286}
287
288static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data)
289{
290 card->quirks &= ~data;
291}
156 292
157#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC) 293#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC)
158#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD) 294#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD)
@@ -163,12 +299,50 @@ void mmc_fixup_device(struct mmc_card *dev);
163#define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED) 299#define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED)
164#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR) 300#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR)
165#define mmc_card_ddr_mode(c) ((c)->state & MMC_STATE_HIGHSPEED_DDR) 301#define mmc_card_ddr_mode(c) ((c)->state & MMC_STATE_HIGHSPEED_DDR)
302#define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED)
303#define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
166 304
167#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) 305#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
168#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) 306#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
169#define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED) 307#define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED)
170#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) 308#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
171#define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR) 309#define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR)
310#define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED)
311#define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC)
312
313/*
314 * Quirk add/remove for MMC products.
315 */
316
317static inline void __maybe_unused add_quirk_mmc(struct mmc_card *card, int data)
318{
319 if (mmc_card_mmc(card))
320 card->quirks |= data;
321}
322
323static inline void __maybe_unused remove_quirk_mmc(struct mmc_card *card,
324 int data)
325{
326 if (mmc_card_mmc(card))
327 card->quirks &= ~data;
328}
329
330/*
331 * Quirk add/remove for SD products.
332 */
333
334static inline void __maybe_unused add_quirk_sd(struct mmc_card *card, int data)
335{
336 if (mmc_card_sd(card))
337 card->quirks |= data;
338}
339
340static inline void __maybe_unused remove_quirk_sd(struct mmc_card *card,
341 int data)
342{
343 if (mmc_card_sd(card))
344 card->quirks &= ~data;
345}
172 346
173static inline int mmc_card_lenient_fn0(const struct mmc_card *c) 347static inline int mmc_card_lenient_fn0(const struct mmc_card *c)
174{ 348{
@@ -180,6 +354,16 @@ static inline int mmc_blksz_for_byte_mode(const struct mmc_card *c)
180 return c->quirks & MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; 354 return c->quirks & MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
181} 355}
182 356
357static inline int mmc_card_disable_cd(const struct mmc_card *c)
358{
359 return c->quirks & MMC_QUIRK_DISABLE_CD;
360}
361
362static inline int mmc_card_nonstd_func_interface(const struct mmc_card *c)
363{
364 return c->quirks & MMC_QUIRK_NONSTD_FUNC_IF;
365}
366
183#define mmc_card_name(c) ((c)->cid.prod_name) 367#define mmc_card_name(c) ((c)->cid.prod_name)
184#define mmc_card_id(c) (dev_name(&(c)->dev)) 368#define mmc_card_id(c) (dev_name(&(c)->dev))
185 369
@@ -203,4 +387,7 @@ struct mmc_driver {
203extern int mmc_register_driver(struct mmc_driver *); 387extern int mmc_register_driver(struct mmc_driver *);
204extern void mmc_unregister_driver(struct mmc_driver *); 388extern void mmc_unregister_driver(struct mmc_driver *);
205 389
390extern void mmc_fixup_device(struct mmc_card *card,
391 const struct mmc_fixup *table);
392
206#endif 393#endif
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 07f27af4dba5..b6718e549a51 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -92,7 +92,7 @@ struct mmc_command {
92 * actively failing requests 92 * actively failing requests
93 */ 93 */
94 94
95 unsigned int erase_timeout; /* in milliseconds */ 95 unsigned int cmd_timeout_ms; /* in milliseconds */
96 96
97 struct mmc_data *data; /* data segment associated with cmd */ 97 struct mmc_data *data; /* data segment associated with cmd */
98 struct mmc_request *mrq; /* associated request */ 98 struct mmc_request *mrq; /* associated request */
@@ -120,6 +120,7 @@ struct mmc_data {
120}; 120};
121 121
122struct mmc_request { 122struct mmc_request {
123 struct mmc_command *sbc; /* SET_BLOCK_COUNT for multiblock */
123 struct mmc_command *cmd; 124 struct mmc_command *cmd;
124 struct mmc_data *data; 125 struct mmc_data *data;
125 struct mmc_command *stop; 126 struct mmc_command *stop;
@@ -133,8 +134,10 @@ struct mmc_card;
133 134
134extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *); 135extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *);
135extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int); 136extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int);
137extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *);
136extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, 138extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
137 struct mmc_command *, int); 139 struct mmc_command *, int);
140extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
138 141
139#define MMC_ERASE_ARG 0x00000000 142#define MMC_ERASE_ARG 0x00000000
140#define MMC_SECURE_ERASE_ARG 0x80000000 143#define MMC_SECURE_ERASE_ARG 0x80000000
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index bcb793ec7374..1ee4424462eb 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -50,12 +50,30 @@ struct mmc_ios {
50#define MMC_TIMING_LEGACY 0 50#define MMC_TIMING_LEGACY 0
51#define MMC_TIMING_MMC_HS 1 51#define MMC_TIMING_MMC_HS 1
52#define MMC_TIMING_SD_HS 2 52#define MMC_TIMING_SD_HS 2
53#define MMC_TIMING_UHS_SDR12 MMC_TIMING_LEGACY
54#define MMC_TIMING_UHS_SDR25 MMC_TIMING_SD_HS
55#define MMC_TIMING_UHS_SDR50 3
56#define MMC_TIMING_UHS_SDR104 4
57#define MMC_TIMING_UHS_DDR50 5
53 58
54 unsigned char ddr; /* dual data rate used */ 59 unsigned char ddr; /* dual data rate used */
55 60
56#define MMC_SDR_MODE 0 61#define MMC_SDR_MODE 0
57#define MMC_1_2V_DDR_MODE 1 62#define MMC_1_2V_DDR_MODE 1
58#define MMC_1_8V_DDR_MODE 2 63#define MMC_1_8V_DDR_MODE 2
64
65 unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */
66
67#define MMC_SIGNAL_VOLTAGE_330 0
68#define MMC_SIGNAL_VOLTAGE_180 1
69#define MMC_SIGNAL_VOLTAGE_120 2
70
71 unsigned char drv_type; /* driver type (A, B, C, D) */
72
73#define MMC_SET_DRIVER_TYPE_B 0
74#define MMC_SET_DRIVER_TYPE_A 1
75#define MMC_SET_DRIVER_TYPE_C 2
76#define MMC_SET_DRIVER_TYPE_D 3
59}; 77};
60 78
61struct mmc_host_ops { 79struct mmc_host_ops {
@@ -117,6 +135,10 @@ struct mmc_host_ops {
117 135
118 /* optional callback for HC quirks */ 136 /* optional callback for HC quirks */
119 void (*init_card)(struct mmc_host *host, struct mmc_card *card); 137 void (*init_card)(struct mmc_host *host, struct mmc_card *card);
138
139 int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios);
140 int (*execute_tuning)(struct mmc_host *host);
141 void (*enable_preset_value)(struct mmc_host *host, bool enable);
120}; 142};
121 143
122struct mmc_card; 144struct mmc_card;
@@ -173,6 +195,22 @@ struct mmc_host {
173 /* DDR mode at 1.2V */ 195 /* DDR mode at 1.2V */
174#define MMC_CAP_POWER_OFF_CARD (1 << 13) /* Can power off after boot */ 196#define MMC_CAP_POWER_OFF_CARD (1 << 13) /* Can power off after boot */
175#define MMC_CAP_BUS_WIDTH_TEST (1 << 14) /* CMD14/CMD19 bus width ok */ 197#define MMC_CAP_BUS_WIDTH_TEST (1 << 14) /* CMD14/CMD19 bus width ok */
198#define MMC_CAP_UHS_SDR12 (1 << 15) /* Host supports UHS SDR12 mode */
199#define MMC_CAP_UHS_SDR25 (1 << 16) /* Host supports UHS SDR25 mode */
200#define MMC_CAP_UHS_SDR50 (1 << 17) /* Host supports UHS SDR50 mode */
201#define MMC_CAP_UHS_SDR104 (1 << 18) /* Host supports UHS SDR104 mode */
202#define MMC_CAP_UHS_DDR50 (1 << 19) /* Host supports UHS DDR50 mode */
203#define MMC_CAP_SET_XPC_330 (1 << 20) /* Host supports >150mA current at 3.3V */
204#define MMC_CAP_SET_XPC_300 (1 << 21) /* Host supports >150mA current at 3.0V */
205#define MMC_CAP_SET_XPC_180 (1 << 22) /* Host supports >150mA current at 1.8V */
206#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
207#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
208#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
209#define MMC_CAP_MAX_CURRENT_200 (1 << 26) /* Host max current limit is 200mA */
210#define MMC_CAP_MAX_CURRENT_400 (1 << 27) /* Host max current limit is 400mA */
211#define MMC_CAP_MAX_CURRENT_600 (1 << 28) /* Host max current limit is 600mA */
212#define MMC_CAP_MAX_CURRENT_800 (1 << 29) /* Host max current limit is 800mA */
213#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
176 214
177 mmc_pm_flag_t pm_caps; /* supported pm features */ 215 mmc_pm_flag_t pm_caps; /* supported pm features */
178 216
@@ -321,10 +359,19 @@ static inline int mmc_card_is_removable(struct mmc_host *host)
321 return !(host->caps & MMC_CAP_NONREMOVABLE) && mmc_assume_removable; 359 return !(host->caps & MMC_CAP_NONREMOVABLE) && mmc_assume_removable;
322} 360}
323 361
324static inline int mmc_card_is_powered_resumed(struct mmc_host *host) 362static inline int mmc_card_keep_power(struct mmc_host *host)
325{ 363{
326 return host->pm_flags & MMC_PM_KEEP_POWER; 364 return host->pm_flags & MMC_PM_KEEP_POWER;
327} 365}
328 366
367static inline int mmc_card_wake_sdio_irq(struct mmc_host *host)
368{
369 return host->pm_flags & MMC_PM_WAKE_SDIO_IRQ;
370}
371
372static inline int mmc_host_cmd23(struct mmc_host *host)
373{
374 return host->caps & MMC_CAP_CMD23;
375}
329#endif 376#endif
330 377
diff --git a/include/linux/mmc/ioctl.h b/include/linux/mmc/ioctl.h
new file mode 100644
index 000000000000..5baf2983a12f
--- /dev/null
+++ b/include/linux/mmc/ioctl.h
@@ -0,0 +1,54 @@
1#ifndef LINUX_MMC_IOCTL_H
2#define LINUX_MMC_IOCTL_H
3struct mmc_ioc_cmd {
4 /* Implies direction of data. true = write, false = read */
5 int write_flag;
6
7 /* Application-specific command. true = precede with CMD55 */
8 int is_acmd;
9
10 __u32 opcode;
11 __u32 arg;
12 __u32 response[4]; /* CMD response */
13 unsigned int flags;
14 unsigned int blksz;
15 unsigned int blocks;
16
17 /*
18 * Sleep at least postsleep_min_us useconds, and at most
19 * postsleep_max_us useconds *after* issuing command. Needed for
20 * some read commands for which cards have no other way of indicating
21 * they're ready for the next command (i.e. there is no equivalent of
22 * a "busy" indicator for read operations).
23 */
24 unsigned int postsleep_min_us;
25 unsigned int postsleep_max_us;
26
27 /*
28 * Override driver-computed timeouts. Note the difference in units!
29 */
30 unsigned int data_timeout_ns;
31 unsigned int cmd_timeout_ms;
32
33 /*
34 * For 64-bit machines, the next member, ``__u64 data_ptr``, wants to
35 * be 8-byte aligned. Make sure this struct is the same size when
36 * built for 32-bit.
37 */
38 __u32 __pad;
39
40 /* DAT buffer */
41 __u64 data_ptr;
42};
43#define mmc_ioc_cmd_set_data(ic, ptr) ic.data_ptr = (__u64)(unsigned long) ptr
44
45#define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd)
46
47/*
48 * Since this ioctl is only meant to enhance (and not replace) normal access
49 * to the mmc bus device, an upper data transfer limit of MMC_IOC_MAX_BYTES
50 * is enforced per ioctl call. For larger data transfers, use the normal
51 * block device operations.
52 */
53#define MMC_IOC_MAX_BYTES (512L * 256)
54#endif /* LINUX_MMC_IOCTL_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 264ba5451e3b..ac26a685cca8 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -50,6 +50,7 @@
50#define MMC_SET_BLOCKLEN 16 /* ac [31:0] block len R1 */ 50#define MMC_SET_BLOCKLEN 16 /* ac [31:0] block len R1 */
51#define MMC_READ_SINGLE_BLOCK 17 /* adtc [31:0] data addr R1 */ 51#define MMC_READ_SINGLE_BLOCK 17 /* adtc [31:0] data addr R1 */
52#define MMC_READ_MULTIPLE_BLOCK 18 /* adtc [31:0] data addr R1 */ 52#define MMC_READ_MULTIPLE_BLOCK 18 /* adtc [31:0] data addr R1 */
53#define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */
53 54
54 /* class 3 */ 55 /* class 3 */
55#define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */ 56#define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */
@@ -82,6 +83,12 @@
82#define MMC_APP_CMD 55 /* ac [31:16] RCA R1 */ 83#define MMC_APP_CMD 55 /* ac [31:16] RCA R1 */
83#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */ 84#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */
84 85
86static inline bool mmc_op_multi(u32 opcode)
87{
88 return opcode == MMC_WRITE_MULTIPLE_BLOCK ||
89 opcode == MMC_READ_MULTIPLE_BLOCK;
90}
91
85/* 92/*
86 * MMC_SWITCH argument format: 93 * MMC_SWITCH argument format:
87 * 94 *
@@ -255,18 +262,23 @@ struct _mmc_csd {
255 262
256#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */ 263#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */
257#define EXT_CSD_PARTITION_SUPPORT 160 /* RO */ 264#define EXT_CSD_PARTITION_SUPPORT 160 /* RO */
265#define EXT_CSD_WR_REL_PARAM 166 /* RO */
258#define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */ 266#define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */
267#define EXT_CSD_PART_CONFIG 179 /* R/W */
259#define EXT_CSD_ERASED_MEM_CONT 181 /* RO */ 268#define EXT_CSD_ERASED_MEM_CONT 181 /* RO */
260#define EXT_CSD_BUS_WIDTH 183 /* R/W */ 269#define EXT_CSD_BUS_WIDTH 183 /* R/W */
261#define EXT_CSD_HS_TIMING 185 /* R/W */ 270#define EXT_CSD_HS_TIMING 185 /* R/W */
262#define EXT_CSD_REV 192 /* RO */ 271#define EXT_CSD_REV 192 /* RO */
263#define EXT_CSD_STRUCTURE 194 /* RO */ 272#define EXT_CSD_STRUCTURE 194 /* RO */
264#define EXT_CSD_CARD_TYPE 196 /* RO */ 273#define EXT_CSD_CARD_TYPE 196 /* RO */
274#define EXT_CSD_PART_SWITCH_TIME 199 /* RO */
265#define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */ 275#define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */
266#define EXT_CSD_S_A_TIMEOUT 217 /* RO */ 276#define EXT_CSD_S_A_TIMEOUT 217 /* RO */
277#define EXT_CSD_REL_WR_SEC_C 222 /* RO */
267#define EXT_CSD_HC_WP_GRP_SIZE 221 /* RO */ 278#define EXT_CSD_HC_WP_GRP_SIZE 221 /* RO */
268#define EXT_CSD_ERASE_TIMEOUT_MULT 223 /* RO */ 279#define EXT_CSD_ERASE_TIMEOUT_MULT 223 /* RO */
269#define EXT_CSD_HC_ERASE_GRP_SIZE 224 /* RO */ 280#define EXT_CSD_HC_ERASE_GRP_SIZE 224 /* RO */
281#define EXT_CSD_BOOT_MULT 226 /* RO */
270#define EXT_CSD_SEC_TRIM_MULT 229 /* RO */ 282#define EXT_CSD_SEC_TRIM_MULT 229 /* RO */
271#define EXT_CSD_SEC_ERASE_MULT 230 /* RO */ 283#define EXT_CSD_SEC_ERASE_MULT 230 /* RO */
272#define EXT_CSD_SEC_FEATURE_SUPPORT 231 /* RO */ 284#define EXT_CSD_SEC_FEATURE_SUPPORT 231 /* RO */
@@ -276,6 +288,12 @@ struct _mmc_csd {
276 * EXT_CSD field definitions 288 * EXT_CSD field definitions
277 */ 289 */
278 290
291#define EXT_CSD_WR_REL_PARAM_EN (1<<2)
292
293#define EXT_CSD_PART_CONFIG_ACC_MASK (0x7)
294#define EXT_CSD_PART_CONFIG_ACC_BOOT0 (0x1)
295#define EXT_CSD_PART_CONFIG_ACC_BOOT1 (0x2)
296
279#define EXT_CSD_CMD_SET_NORMAL (1<<0) 297#define EXT_CSD_CMD_SET_NORMAL (1<<0)
280#define EXT_CSD_CMD_SET_SECURE (1<<1) 298#define EXT_CSD_CMD_SET_SECURE (1<<1)
281#define EXT_CSD_CMD_SET_CPSECURE (1<<2) 299#define EXT_CSD_CMD_SET_CPSECURE (1<<2)
diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h
index 3fd85e088cc3..7d35d52c3df3 100644
--- a/include/linux/mmc/sd.h
+++ b/include/linux/mmc/sd.h
@@ -17,6 +17,7 @@
17/* This is basically the same command as for MMC with some quirks. */ 17/* This is basically the same command as for MMC with some quirks. */
18#define SD_SEND_RELATIVE_ADDR 3 /* bcr R6 */ 18#define SD_SEND_RELATIVE_ADDR 3 /* bcr R6 */
19#define SD_SEND_IF_COND 8 /* bcr [11:0] See below R7 */ 19#define SD_SEND_IF_COND 8 /* bcr [11:0] See below R7 */
20#define SD_SWITCH_VOLTAGE 11 /* ac R1 */
20 21
21 /* class 10 */ 22 /* class 10 */
22#define SD_SWITCH 6 /* adtc [31:0] See below R1 */ 23#define SD_SWITCH 6 /* adtc [31:0] See below R1 */
@@ -32,6 +33,12 @@
32#define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */ 33#define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */
33#define SD_APP_SEND_SCR 51 /* adtc R1 */ 34#define SD_APP_SEND_SCR 51 /* adtc R1 */
34 35
36/* OCR bit definitions */
37#define SD_OCR_S18R (1 << 24) /* 1.8V switching request */
38#define SD_ROCR_S18A SD_OCR_S18R /* 1.8V switching accepted by card */
39#define SD_OCR_XPC (1 << 28) /* SDXC power control */
40#define SD_OCR_CCS (1 << 30) /* Card Capacity Status */
41
35/* 42/*
36 * SD_SWITCH argument format: 43 * SD_SWITCH argument format:
37 * 44 *
@@ -59,7 +66,7 @@
59 66
60#define SCR_SPEC_VER_0 0 /* Implements system specification 1.0 - 1.01 */ 67#define SCR_SPEC_VER_0 0 /* Implements system specification 1.0 - 1.01 */
61#define SCR_SPEC_VER_1 1 /* Implements system specification 1.10 */ 68#define SCR_SPEC_VER_1 1 /* Implements system specification 1.10 */
62#define SCR_SPEC_VER_2 2 /* Implements system specification 2.00 */ 69#define SCR_SPEC_VER_2 2 /* Implements system specification 2.00-3.0X */
63 70
64/* 71/*
65 * SD bus widths 72 * SD bus widths
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 83bd9f76709a..6a68c4eb4e44 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -85,6 +85,8 @@ struct sdhci_host {
85#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29) 85#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29)
86/* Controller treats ADMA descriptors with length 0000h incorrectly */ 86/* Controller treats ADMA descriptors with length 0000h incorrectly */
87#define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1<<30) 87#define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1<<30)
88/* The read-only detection via SDHCI_PRESENT_STATE register is unstable */
89#define SDHCI_QUIRK_UNSTABLE_RO_DETECT (1<<31)
88 90
89 int irq; /* Device IRQ */ 91 int irq; /* Device IRQ */
90 void __iomem *ioaddr; /* Mapped address */ 92 void __iomem *ioaddr; /* Mapped address */
@@ -109,11 +111,16 @@ struct sdhci_host {
109#define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */ 111#define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */
110#define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */ 112#define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */
111#define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */ 113#define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */
114#define SDHCI_SDR50_NEEDS_TUNING (1<<4) /* SDR50 needs tuning */
115#define SDHCI_NEEDS_RETUNING (1<<5) /* Host needs retuning */
116#define SDHCI_AUTO_CMD12 (1<<6) /* Auto CMD12 support */
117#define SDHCI_AUTO_CMD23 (1<<7) /* Auto CMD23 support */
112 118
113 unsigned int version; /* SDHCI spec. version */ 119 unsigned int version; /* SDHCI spec. version */
114 120
115 unsigned int max_clk; /* Max possible freq (MHz) */ 121 unsigned int max_clk; /* Max possible freq (MHz) */
116 unsigned int timeout_clk; /* Timeout freq (KHz) */ 122 unsigned int timeout_clk; /* Timeout freq (KHz) */
123 unsigned int clk_mul; /* Clock Muliplier value */
117 124
118 unsigned int clock; /* Current clock (MHz) */ 125 unsigned int clock; /* Current clock (MHz) */
119 u8 pwr; /* Current voltage */ 126 u8 pwr; /* Current voltage */
@@ -145,6 +152,14 @@ struct sdhci_host {
145 unsigned int ocr_avail_sd; 152 unsigned int ocr_avail_sd;
146 unsigned int ocr_avail_mmc; 153 unsigned int ocr_avail_mmc;
147 154
155 wait_queue_head_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */
156 unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */
157
158 unsigned int tuning_count; /* Timer count for re-tuning */
159 unsigned int tuning_mode; /* Re-tuning mode supported by host */
160#define SDHCI_TUNING_MODE_1 0
161 struct timer_list tuning_timer; /* Timer for tuning */
162
148 unsigned long private[0] ____cacheline_aligned; 163 unsigned long private[0] ____cacheline_aligned;
149}; 164};
150#endif /* __SDHCI_H */ 165#endif /* __SDHCI_H */
diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h
index c981b959760f..faf32b6ec185 100644
--- a/include/linux/mmc/sh_mobile_sdhi.h
+++ b/include/linux/mmc/sh_mobile_sdhi.h
@@ -3,12 +3,16 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6struct platform_device;
7struct tmio_mmc_data;
8
6struct sh_mobile_sdhi_info { 9struct sh_mobile_sdhi_info {
7 int dma_slave_tx; 10 int dma_slave_tx;
8 int dma_slave_rx; 11 int dma_slave_rx;
9 unsigned long tmio_flags; 12 unsigned long tmio_flags;
10 unsigned long tmio_caps; 13 unsigned long tmio_caps;
11 u32 tmio_ocr_mask; /* available MMC voltages */ 14 u32 tmio_ocr_mask; /* available MMC voltages */
15 struct tmio_mmc_data *pdata;
12 void (*set_pwr)(struct platform_device *pdev, int state); 16 void (*set_pwr)(struct platform_device *pdev, int state);
13 int (*get_cd)(struct platform_device *pdev); 17 int (*get_cd)(struct platform_device *pdev);
14}; 18};